• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /******************************************************************************
2 *******************************************************************************
3 **
4 **  Copyright (C) 2005-2010 Red Hat, Inc.  All rights reserved.
5 **
6 **  This copyrighted material is made available to anyone wishing to use,
7 **  modify, copy, or redistribute it subject to the terms and conditions
8 **  of the GNU General Public License v.2.
9 **
10 *******************************************************************************
11 ******************************************************************************/
12 
13 /* Central locking logic has four stages:
14 
15    dlm_lock()
16    dlm_unlock()
17 
18    request_lock(ls, lkb)
19    convert_lock(ls, lkb)
20    unlock_lock(ls, lkb)
21    cancel_lock(ls, lkb)
22 
23    _request_lock(r, lkb)
24    _convert_lock(r, lkb)
25    _unlock_lock(r, lkb)
26    _cancel_lock(r, lkb)
27 
28    do_request(r, lkb)
29    do_convert(r, lkb)
30    do_unlock(r, lkb)
31    do_cancel(r, lkb)
32 
33    Stage 1 (lock, unlock) is mainly about checking input args and
34    splitting into one of the four main operations:
35 
36        dlm_lock          = request_lock
37        dlm_lock+CONVERT  = convert_lock
38        dlm_unlock        = unlock_lock
39        dlm_unlock+CANCEL = cancel_lock
40 
41    Stage 2, xxxx_lock(), just finds and locks the relevant rsb which is
42    provided to the next stage.
43 
44    Stage 3, _xxxx_lock(), determines if the operation is local or remote.
45    When remote, it calls send_xxxx(), when local it calls do_xxxx().
46 
47    Stage 4, do_xxxx(), is the guts of the operation.  It manipulates the
48    given rsb and lkb and queues callbacks.
49 
50    For remote operations, send_xxxx() results in the corresponding do_xxxx()
51    function being executed on the remote node.  The connecting send/receive
52    calls on local (L) and remote (R) nodes:
53 
54    L: send_xxxx()              ->  R: receive_xxxx()
55                                    R: do_xxxx()
56    L: receive_xxxx_reply()     <-  R: send_xxxx_reply()
57 */
58 #include <linux/types.h>
59 #include <linux/rbtree.h>
60 #include <linux/slab.h>
61 #include "dlm_internal.h"
62 #include <linux/dlm_device.h>
63 #include "memory.h"
64 #include "lowcomms.h"
65 #include "requestqueue.h"
66 #include "util.h"
67 #include "dir.h"
68 #include "member.h"
69 #include "lockspace.h"
70 #include "ast.h"
71 #include "lock.h"
72 #include "rcom.h"
73 #include "recover.h"
74 #include "lvb_table.h"
75 #include "user.h"
76 #include "config.h"
77 
78 static int send_request(struct dlm_rsb *r, struct dlm_lkb *lkb);
79 static int send_convert(struct dlm_rsb *r, struct dlm_lkb *lkb);
80 static int send_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb);
81 static int send_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb);
82 static int send_grant(struct dlm_rsb *r, struct dlm_lkb *lkb);
83 static int send_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int mode);
84 static int send_lookup(struct dlm_rsb *r, struct dlm_lkb *lkb);
85 static int send_remove(struct dlm_rsb *r);
86 static int _request_lock(struct dlm_rsb *r, struct dlm_lkb *lkb);
87 static int _cancel_lock(struct dlm_rsb *r, struct dlm_lkb *lkb);
88 static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
89 				    struct dlm_message *ms);
90 static int receive_extralen(struct dlm_message *ms);
91 static void do_purge(struct dlm_ls *ls, int nodeid, int pid);
92 static void del_timeout(struct dlm_lkb *lkb);
93 static void toss_rsb(struct kref *kref);
94 
95 /*
96  * Lock compatibilty matrix - thanks Steve
97  * UN = Unlocked state. Not really a state, used as a flag
98  * PD = Padding. Used to make the matrix a nice power of two in size
99  * Other states are the same as the VMS DLM.
100  * Usage: matrix[grmode+1][rqmode+1]  (although m[rq+1][gr+1] is the same)
101  */
102 
103 static const int __dlm_compat_matrix[8][8] = {
104       /* UN NL CR CW PR PW EX PD */
105         {1, 1, 1, 1, 1, 1, 1, 0},       /* UN */
106         {1, 1, 1, 1, 1, 1, 1, 0},       /* NL */
107         {1, 1, 1, 1, 1, 1, 0, 0},       /* CR */
108         {1, 1, 1, 1, 0, 0, 0, 0},       /* CW */
109         {1, 1, 1, 0, 1, 0, 0, 0},       /* PR */
110         {1, 1, 1, 0, 0, 0, 0, 0},       /* PW */
111         {1, 1, 0, 0, 0, 0, 0, 0},       /* EX */
112         {0, 0, 0, 0, 0, 0, 0, 0}        /* PD */
113 };
114 
115 /*
116  * This defines the direction of transfer of LVB data.
117  * Granted mode is the row; requested mode is the column.
118  * Usage: matrix[grmode+1][rqmode+1]
119  * 1 = LVB is returned to the caller
120  * 0 = LVB is written to the resource
121  * -1 = nothing happens to the LVB
122  */
123 
124 const int dlm_lvb_operations[8][8] = {
125         /* UN   NL  CR  CW  PR  PW  EX  PD*/
126         {  -1,  1,  1,  1,  1,  1,  1, -1 }, /* UN */
127         {  -1,  1,  1,  1,  1,  1,  1,  0 }, /* NL */
128         {  -1, -1,  1,  1,  1,  1,  1,  0 }, /* CR */
129         {  -1, -1, -1,  1,  1,  1,  1,  0 }, /* CW */
130         {  -1, -1, -1, -1,  1,  1,  1,  0 }, /* PR */
131         {  -1,  0,  0,  0,  0,  0,  1,  0 }, /* PW */
132         {  -1,  0,  0,  0,  0,  0,  0,  0 }, /* EX */
133         {  -1,  0,  0,  0,  0,  0,  0,  0 }  /* PD */
134 };
135 
136 #define modes_compat(gr, rq) \
137 	__dlm_compat_matrix[(gr)->lkb_grmode + 1][(rq)->lkb_rqmode + 1]
138 
dlm_modes_compat(int mode1,int mode2)139 int dlm_modes_compat(int mode1, int mode2)
140 {
141 	return __dlm_compat_matrix[mode1 + 1][mode2 + 1];
142 }
143 
144 /*
145  * Compatibility matrix for conversions with QUECVT set.
146  * Granted mode is the row; requested mode is the column.
147  * Usage: matrix[grmode+1][rqmode+1]
148  */
149 
150 static const int __quecvt_compat_matrix[8][8] = {
151       /* UN NL CR CW PR PW EX PD */
152         {0, 0, 0, 0, 0, 0, 0, 0},       /* UN */
153         {0, 0, 1, 1, 1, 1, 1, 0},       /* NL */
154         {0, 0, 0, 1, 1, 1, 1, 0},       /* CR */
155         {0, 0, 0, 0, 1, 1, 1, 0},       /* CW */
156         {0, 0, 0, 1, 0, 1, 1, 0},       /* PR */
157         {0, 0, 0, 0, 0, 0, 1, 0},       /* PW */
158         {0, 0, 0, 0, 0, 0, 0, 0},       /* EX */
159         {0, 0, 0, 0, 0, 0, 0, 0}        /* PD */
160 };
161 
dlm_print_lkb(struct dlm_lkb * lkb)162 void dlm_print_lkb(struct dlm_lkb *lkb)
163 {
164 	printk(KERN_ERR "lkb: nodeid %d id %x remid %x exflags %x flags %x "
165 	       "sts %d rq %d gr %d wait_type %d wait_nodeid %d seq %llu\n",
166 	       lkb->lkb_nodeid, lkb->lkb_id, lkb->lkb_remid, lkb->lkb_exflags,
167 	       lkb->lkb_flags, lkb->lkb_status, lkb->lkb_rqmode,
168 	       lkb->lkb_grmode, lkb->lkb_wait_type, lkb->lkb_wait_nodeid,
169 	       (unsigned long long)lkb->lkb_recover_seq);
170 }
171 
dlm_print_rsb(struct dlm_rsb * r)172 static void dlm_print_rsb(struct dlm_rsb *r)
173 {
174 	printk(KERN_ERR "rsb: nodeid %d master %d dir %d flags %lx first %x "
175 	       "rlc %d name %s\n",
176 	       r->res_nodeid, r->res_master_nodeid, r->res_dir_nodeid,
177 	       r->res_flags, r->res_first_lkid, r->res_recover_locks_count,
178 	       r->res_name);
179 }
180 
dlm_dump_rsb(struct dlm_rsb * r)181 void dlm_dump_rsb(struct dlm_rsb *r)
182 {
183 	struct dlm_lkb *lkb;
184 
185 	dlm_print_rsb(r);
186 
187 	printk(KERN_ERR "rsb: root_list empty %d recover_list empty %d\n",
188 	       list_empty(&r->res_root_list), list_empty(&r->res_recover_list));
189 	printk(KERN_ERR "rsb lookup list\n");
190 	list_for_each_entry(lkb, &r->res_lookup, lkb_rsb_lookup)
191 		dlm_print_lkb(lkb);
192 	printk(KERN_ERR "rsb grant queue:\n");
193 	list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue)
194 		dlm_print_lkb(lkb);
195 	printk(KERN_ERR "rsb convert queue:\n");
196 	list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue)
197 		dlm_print_lkb(lkb);
198 	printk(KERN_ERR "rsb wait queue:\n");
199 	list_for_each_entry(lkb, &r->res_waitqueue, lkb_statequeue)
200 		dlm_print_lkb(lkb);
201 }
202 
203 /* Threads cannot use the lockspace while it's being recovered */
204 
dlm_lock_recovery(struct dlm_ls * ls)205 static inline void dlm_lock_recovery(struct dlm_ls *ls)
206 {
207 	down_read(&ls->ls_in_recovery);
208 }
209 
dlm_unlock_recovery(struct dlm_ls * ls)210 void dlm_unlock_recovery(struct dlm_ls *ls)
211 {
212 	up_read(&ls->ls_in_recovery);
213 }
214 
dlm_lock_recovery_try(struct dlm_ls * ls)215 int dlm_lock_recovery_try(struct dlm_ls *ls)
216 {
217 	return down_read_trylock(&ls->ls_in_recovery);
218 }
219 
can_be_queued(struct dlm_lkb * lkb)220 static inline int can_be_queued(struct dlm_lkb *lkb)
221 {
222 	return !(lkb->lkb_exflags & DLM_LKF_NOQUEUE);
223 }
224 
force_blocking_asts(struct dlm_lkb * lkb)225 static inline int force_blocking_asts(struct dlm_lkb *lkb)
226 {
227 	return (lkb->lkb_exflags & DLM_LKF_NOQUEUEBAST);
228 }
229 
is_demoted(struct dlm_lkb * lkb)230 static inline int is_demoted(struct dlm_lkb *lkb)
231 {
232 	return (lkb->lkb_sbflags & DLM_SBF_DEMOTED);
233 }
234 
is_altmode(struct dlm_lkb * lkb)235 static inline int is_altmode(struct dlm_lkb *lkb)
236 {
237 	return (lkb->lkb_sbflags & DLM_SBF_ALTMODE);
238 }
239 
is_granted(struct dlm_lkb * lkb)240 static inline int is_granted(struct dlm_lkb *lkb)
241 {
242 	return (lkb->lkb_status == DLM_LKSTS_GRANTED);
243 }
244 
is_remote(struct dlm_rsb * r)245 static inline int is_remote(struct dlm_rsb *r)
246 {
247 	DLM_ASSERT(r->res_nodeid >= 0, dlm_print_rsb(r););
248 	return !!r->res_nodeid;
249 }
250 
is_process_copy(struct dlm_lkb * lkb)251 static inline int is_process_copy(struct dlm_lkb *lkb)
252 {
253 	return (lkb->lkb_nodeid && !(lkb->lkb_flags & DLM_IFL_MSTCPY));
254 }
255 
is_master_copy(struct dlm_lkb * lkb)256 static inline int is_master_copy(struct dlm_lkb *lkb)
257 {
258 	return (lkb->lkb_flags & DLM_IFL_MSTCPY) ? 1 : 0;
259 }
260 
middle_conversion(struct dlm_lkb * lkb)261 static inline int middle_conversion(struct dlm_lkb *lkb)
262 {
263 	if ((lkb->lkb_grmode==DLM_LOCK_PR && lkb->lkb_rqmode==DLM_LOCK_CW) ||
264 	    (lkb->lkb_rqmode==DLM_LOCK_PR && lkb->lkb_grmode==DLM_LOCK_CW))
265 		return 1;
266 	return 0;
267 }
268 
down_conversion(struct dlm_lkb * lkb)269 static inline int down_conversion(struct dlm_lkb *lkb)
270 {
271 	return (!middle_conversion(lkb) && lkb->lkb_rqmode < lkb->lkb_grmode);
272 }
273 
is_overlap_unlock(struct dlm_lkb * lkb)274 static inline int is_overlap_unlock(struct dlm_lkb *lkb)
275 {
276 	return lkb->lkb_flags & DLM_IFL_OVERLAP_UNLOCK;
277 }
278 
is_overlap_cancel(struct dlm_lkb * lkb)279 static inline int is_overlap_cancel(struct dlm_lkb *lkb)
280 {
281 	return lkb->lkb_flags & DLM_IFL_OVERLAP_CANCEL;
282 }
283 
is_overlap(struct dlm_lkb * lkb)284 static inline int is_overlap(struct dlm_lkb *lkb)
285 {
286 	return (lkb->lkb_flags & (DLM_IFL_OVERLAP_UNLOCK |
287 				  DLM_IFL_OVERLAP_CANCEL));
288 }
289 
queue_cast(struct dlm_rsb * r,struct dlm_lkb * lkb,int rv)290 static void queue_cast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
291 {
292 	if (is_master_copy(lkb))
293 		return;
294 
295 	del_timeout(lkb);
296 
297 	DLM_ASSERT(lkb->lkb_lksb, dlm_print_lkb(lkb););
298 
299 	/* if the operation was a cancel, then return -DLM_ECANCEL, if a
300 	   timeout caused the cancel then return -ETIMEDOUT */
301 	if (rv == -DLM_ECANCEL && (lkb->lkb_flags & DLM_IFL_TIMEOUT_CANCEL)) {
302 		lkb->lkb_flags &= ~DLM_IFL_TIMEOUT_CANCEL;
303 		rv = -ETIMEDOUT;
304 	}
305 
306 	if (rv == -DLM_ECANCEL && (lkb->lkb_flags & DLM_IFL_DEADLOCK_CANCEL)) {
307 		lkb->lkb_flags &= ~DLM_IFL_DEADLOCK_CANCEL;
308 		rv = -EDEADLK;
309 	}
310 
311 	dlm_add_cb(lkb, DLM_CB_CAST, lkb->lkb_grmode, rv, lkb->lkb_sbflags);
312 }
313 
queue_cast_overlap(struct dlm_rsb * r,struct dlm_lkb * lkb)314 static inline void queue_cast_overlap(struct dlm_rsb *r, struct dlm_lkb *lkb)
315 {
316 	queue_cast(r, lkb,
317 		   is_overlap_unlock(lkb) ? -DLM_EUNLOCK : -DLM_ECANCEL);
318 }
319 
queue_bast(struct dlm_rsb * r,struct dlm_lkb * lkb,int rqmode)320 static void queue_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rqmode)
321 {
322 	if (is_master_copy(lkb)) {
323 		send_bast(r, lkb, rqmode);
324 	} else {
325 		dlm_add_cb(lkb, DLM_CB_BAST, rqmode, 0, 0);
326 	}
327 }
328 
329 /*
330  * Basic operations on rsb's and lkb's
331  */
332 
333 /* This is only called to add a reference when the code already holds
334    a valid reference to the rsb, so there's no need for locking. */
335 
hold_rsb(struct dlm_rsb * r)336 static inline void hold_rsb(struct dlm_rsb *r)
337 {
338 	kref_get(&r->res_ref);
339 }
340 
dlm_hold_rsb(struct dlm_rsb * r)341 void dlm_hold_rsb(struct dlm_rsb *r)
342 {
343 	hold_rsb(r);
344 }
345 
346 /* When all references to the rsb are gone it's transferred to
347    the tossed list for later disposal. */
348 
put_rsb(struct dlm_rsb * r)349 static void put_rsb(struct dlm_rsb *r)
350 {
351 	struct dlm_ls *ls = r->res_ls;
352 	uint32_t bucket = r->res_bucket;
353 
354 	spin_lock(&ls->ls_rsbtbl[bucket].lock);
355 	kref_put(&r->res_ref, toss_rsb);
356 	spin_unlock(&ls->ls_rsbtbl[bucket].lock);
357 }
358 
dlm_put_rsb(struct dlm_rsb * r)359 void dlm_put_rsb(struct dlm_rsb *r)
360 {
361 	put_rsb(r);
362 }
363 
pre_rsb_struct(struct dlm_ls * ls)364 static int pre_rsb_struct(struct dlm_ls *ls)
365 {
366 	struct dlm_rsb *r1, *r2;
367 	int count = 0;
368 
369 	spin_lock(&ls->ls_new_rsb_spin);
370 	if (ls->ls_new_rsb_count > dlm_config.ci_new_rsb_count / 2) {
371 		spin_unlock(&ls->ls_new_rsb_spin);
372 		return 0;
373 	}
374 	spin_unlock(&ls->ls_new_rsb_spin);
375 
376 	r1 = dlm_allocate_rsb(ls);
377 	r2 = dlm_allocate_rsb(ls);
378 
379 	spin_lock(&ls->ls_new_rsb_spin);
380 	if (r1) {
381 		list_add(&r1->res_hashchain, &ls->ls_new_rsb);
382 		ls->ls_new_rsb_count++;
383 	}
384 	if (r2) {
385 		list_add(&r2->res_hashchain, &ls->ls_new_rsb);
386 		ls->ls_new_rsb_count++;
387 	}
388 	count = ls->ls_new_rsb_count;
389 	spin_unlock(&ls->ls_new_rsb_spin);
390 
391 	if (!count)
392 		return -ENOMEM;
393 	return 0;
394 }
395 
396 /* If ls->ls_new_rsb is empty, return -EAGAIN, so the caller can
397    unlock any spinlocks, go back and call pre_rsb_struct again.
398    Otherwise, take an rsb off the list and return it. */
399 
get_rsb_struct(struct dlm_ls * ls,char * name,int len,struct dlm_rsb ** r_ret)400 static int get_rsb_struct(struct dlm_ls *ls, char *name, int len,
401 			  struct dlm_rsb **r_ret)
402 {
403 	struct dlm_rsb *r;
404 	int count;
405 
406 	spin_lock(&ls->ls_new_rsb_spin);
407 	if (list_empty(&ls->ls_new_rsb)) {
408 		count = ls->ls_new_rsb_count;
409 		spin_unlock(&ls->ls_new_rsb_spin);
410 		log_debug(ls, "find_rsb retry %d %d %s",
411 			  count, dlm_config.ci_new_rsb_count, name);
412 		return -EAGAIN;
413 	}
414 
415 	r = list_first_entry(&ls->ls_new_rsb, struct dlm_rsb, res_hashchain);
416 	list_del(&r->res_hashchain);
417 	/* Convert the empty list_head to a NULL rb_node for tree usage: */
418 	memset(&r->res_hashnode, 0, sizeof(struct rb_node));
419 	ls->ls_new_rsb_count--;
420 	spin_unlock(&ls->ls_new_rsb_spin);
421 
422 	r->res_ls = ls;
423 	r->res_length = len;
424 	memcpy(r->res_name, name, len);
425 	mutex_init(&r->res_mutex);
426 
427 	INIT_LIST_HEAD(&r->res_lookup);
428 	INIT_LIST_HEAD(&r->res_grantqueue);
429 	INIT_LIST_HEAD(&r->res_convertqueue);
430 	INIT_LIST_HEAD(&r->res_waitqueue);
431 	INIT_LIST_HEAD(&r->res_root_list);
432 	INIT_LIST_HEAD(&r->res_recover_list);
433 
434 	*r_ret = r;
435 	return 0;
436 }
437 
rsb_cmp(struct dlm_rsb * r,const char * name,int nlen)438 static int rsb_cmp(struct dlm_rsb *r, const char *name, int nlen)
439 {
440 	char maxname[DLM_RESNAME_MAXLEN];
441 
442 	memset(maxname, 0, DLM_RESNAME_MAXLEN);
443 	memcpy(maxname, name, nlen);
444 	return memcmp(r->res_name, maxname, DLM_RESNAME_MAXLEN);
445 }
446 
dlm_search_rsb_tree(struct rb_root * tree,char * name,int len,struct dlm_rsb ** r_ret)447 int dlm_search_rsb_tree(struct rb_root *tree, char *name, int len,
448 			struct dlm_rsb **r_ret)
449 {
450 	struct rb_node *node = tree->rb_node;
451 	struct dlm_rsb *r;
452 	int rc;
453 
454 	while (node) {
455 		r = rb_entry(node, struct dlm_rsb, res_hashnode);
456 		rc = rsb_cmp(r, name, len);
457 		if (rc < 0)
458 			node = node->rb_left;
459 		else if (rc > 0)
460 			node = node->rb_right;
461 		else
462 			goto found;
463 	}
464 	*r_ret = NULL;
465 	return -EBADR;
466 
467  found:
468 	*r_ret = r;
469 	return 0;
470 }
471 
rsb_insert(struct dlm_rsb * rsb,struct rb_root * tree)472 static int rsb_insert(struct dlm_rsb *rsb, struct rb_root *tree)
473 {
474 	struct rb_node **newn = &tree->rb_node;
475 	struct rb_node *parent = NULL;
476 	int rc;
477 
478 	while (*newn) {
479 		struct dlm_rsb *cur = rb_entry(*newn, struct dlm_rsb,
480 					       res_hashnode);
481 
482 		parent = *newn;
483 		rc = rsb_cmp(cur, rsb->res_name, rsb->res_length);
484 		if (rc < 0)
485 			newn = &parent->rb_left;
486 		else if (rc > 0)
487 			newn = &parent->rb_right;
488 		else {
489 			log_print("rsb_insert match");
490 			dlm_dump_rsb(rsb);
491 			dlm_dump_rsb(cur);
492 			return -EEXIST;
493 		}
494 	}
495 
496 	rb_link_node(&rsb->res_hashnode, parent, newn);
497 	rb_insert_color(&rsb->res_hashnode, tree);
498 	return 0;
499 }
500 
501 /*
502  * Find rsb in rsbtbl and potentially create/add one
503  *
504  * Delaying the release of rsb's has a similar benefit to applications keeping
505  * NL locks on an rsb, but without the guarantee that the cached master value
506  * will still be valid when the rsb is reused.  Apps aren't always smart enough
507  * to keep NL locks on an rsb that they may lock again shortly; this can lead
508  * to excessive master lookups and removals if we don't delay the release.
509  *
510  * Searching for an rsb means looking through both the normal list and toss
511  * list.  When found on the toss list the rsb is moved to the normal list with
512  * ref count of 1; when found on normal list the ref count is incremented.
513  *
514  * rsb's on the keep list are being used locally and refcounted.
515  * rsb's on the toss list are not being used locally, and are not refcounted.
516  *
517  * The toss list rsb's were either
518  * - previously used locally but not any more (were on keep list, then
519  *   moved to toss list when last refcount dropped)
520  * - created and put on toss list as a directory record for a lookup
521  *   (we are the dir node for the res, but are not using the res right now,
522  *   but some other node is)
523  *
524  * The purpose of find_rsb() is to return a refcounted rsb for local use.
525  * So, if the given rsb is on the toss list, it is moved to the keep list
526  * before being returned.
527  *
528  * toss_rsb() happens when all local usage of the rsb is done, i.e. no
529  * more refcounts exist, so the rsb is moved from the keep list to the
530  * toss list.
531  *
532  * rsb's on both keep and toss lists are used for doing a name to master
533  * lookups.  rsb's that are in use locally (and being refcounted) are on
534  * the keep list, rsb's that are not in use locally (not refcounted) and
535  * only exist for name/master lookups are on the toss list.
536  *
537  * rsb's on the toss list who's dir_nodeid is not local can have stale
538  * name/master mappings.  So, remote requests on such rsb's can potentially
539  * return with an error, which means the mapping is stale and needs to
540  * be updated with a new lookup.  (The idea behind MASTER UNCERTAIN and
541  * first_lkid is to keep only a single outstanding request on an rsb
542  * while that rsb has a potentially stale master.)
543  */
544 
find_rsb_dir(struct dlm_ls * ls,char * name,int len,uint32_t hash,uint32_t b,int dir_nodeid,int from_nodeid,unsigned int flags,struct dlm_rsb ** r_ret)545 static int find_rsb_dir(struct dlm_ls *ls, char *name, int len,
546 			uint32_t hash, uint32_t b,
547 			int dir_nodeid, int from_nodeid,
548 			unsigned int flags, struct dlm_rsb **r_ret)
549 {
550 	struct dlm_rsb *r = NULL;
551 	int our_nodeid = dlm_our_nodeid();
552 	int from_local = 0;
553 	int from_other = 0;
554 	int from_dir = 0;
555 	int create = 0;
556 	int error;
557 
558 	if (flags & R_RECEIVE_REQUEST) {
559 		if (from_nodeid == dir_nodeid)
560 			from_dir = 1;
561 		else
562 			from_other = 1;
563 	} else if (flags & R_REQUEST) {
564 		from_local = 1;
565 	}
566 
567 	/*
568 	 * flags & R_RECEIVE_RECOVER is from dlm_recover_master_copy, so
569 	 * from_nodeid has sent us a lock in dlm_recover_locks, believing
570 	 * we're the new master.  Our local recovery may not have set
571 	 * res_master_nodeid to our_nodeid yet, so allow either.  Don't
572 	 * create the rsb; dlm_recover_process_copy() will handle EBADR
573 	 * by resending.
574 	 *
575 	 * If someone sends us a request, we are the dir node, and we do
576 	 * not find the rsb anywhere, then recreate it.  This happens if
577 	 * someone sends us a request after we have removed/freed an rsb
578 	 * from our toss list.  (They sent a request instead of lookup
579 	 * because they are using an rsb from their toss list.)
580 	 */
581 
582 	if (from_local || from_dir ||
583 	    (from_other && (dir_nodeid == our_nodeid))) {
584 		create = 1;
585 	}
586 
587  retry:
588 	if (create) {
589 		error = pre_rsb_struct(ls);
590 		if (error < 0)
591 			goto out;
592 	}
593 
594 	spin_lock(&ls->ls_rsbtbl[b].lock);
595 
596 	error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
597 	if (error)
598 		goto do_toss;
599 
600 	/*
601 	 * rsb is active, so we can't check master_nodeid without lock_rsb.
602 	 */
603 
604 	kref_get(&r->res_ref);
605 	error = 0;
606 	goto out_unlock;
607 
608 
609  do_toss:
610 	error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
611 	if (error)
612 		goto do_new;
613 
614 	/*
615 	 * rsb found inactive (master_nodeid may be out of date unless
616 	 * we are the dir_nodeid or were the master)  No other thread
617 	 * is using this rsb because it's on the toss list, so we can
618 	 * look at or update res_master_nodeid without lock_rsb.
619 	 */
620 
621 	if ((r->res_master_nodeid != our_nodeid) && from_other) {
622 		/* our rsb was not master, and another node (not the dir node)
623 		   has sent us a request */
624 		log_debug(ls, "find_rsb toss from_other %d master %d dir %d %s",
625 			  from_nodeid, r->res_master_nodeid, dir_nodeid,
626 			  r->res_name);
627 		error = -ENOTBLK;
628 		goto out_unlock;
629 	}
630 
631 	if ((r->res_master_nodeid != our_nodeid) && from_dir) {
632 		/* don't think this should ever happen */
633 		log_error(ls, "find_rsb toss from_dir %d master %d",
634 			  from_nodeid, r->res_master_nodeid);
635 		dlm_print_rsb(r);
636 		/* fix it and go on */
637 		r->res_master_nodeid = our_nodeid;
638 		r->res_nodeid = 0;
639 		rsb_clear_flag(r, RSB_MASTER_UNCERTAIN);
640 		r->res_first_lkid = 0;
641 	}
642 
643 	if (from_local && (r->res_master_nodeid != our_nodeid)) {
644 		/* Because we have held no locks on this rsb,
645 		   res_master_nodeid could have become stale. */
646 		rsb_set_flag(r, RSB_MASTER_UNCERTAIN);
647 		r->res_first_lkid = 0;
648 	}
649 
650 	rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
651 	error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
652 	goto out_unlock;
653 
654 
655  do_new:
656 	/*
657 	 * rsb not found
658 	 */
659 
660 	if (error == -EBADR && !create)
661 		goto out_unlock;
662 
663 	error = get_rsb_struct(ls, name, len, &r);
664 	if (error == -EAGAIN) {
665 		spin_unlock(&ls->ls_rsbtbl[b].lock);
666 		goto retry;
667 	}
668 	if (error)
669 		goto out_unlock;
670 
671 	r->res_hash = hash;
672 	r->res_bucket = b;
673 	r->res_dir_nodeid = dir_nodeid;
674 	kref_init(&r->res_ref);
675 
676 	if (from_dir) {
677 		/* want to see how often this happens */
678 		log_debug(ls, "find_rsb new from_dir %d recreate %s",
679 			  from_nodeid, r->res_name);
680 		r->res_master_nodeid = our_nodeid;
681 		r->res_nodeid = 0;
682 		goto out_add;
683 	}
684 
685 	if (from_other && (dir_nodeid != our_nodeid)) {
686 		/* should never happen */
687 		log_error(ls, "find_rsb new from_other %d dir %d our %d %s",
688 			  from_nodeid, dir_nodeid, our_nodeid, r->res_name);
689 		dlm_free_rsb(r);
690 		r = NULL;
691 		error = -ENOTBLK;
692 		goto out_unlock;
693 	}
694 
695 	if (from_other) {
696 		log_debug(ls, "find_rsb new from_other %d dir %d %s",
697 			  from_nodeid, dir_nodeid, r->res_name);
698 	}
699 
700 	if (dir_nodeid == our_nodeid) {
701 		/* When we are the dir nodeid, we can set the master
702 		   node immediately */
703 		r->res_master_nodeid = our_nodeid;
704 		r->res_nodeid = 0;
705 	} else {
706 		/* set_master will send_lookup to dir_nodeid */
707 		r->res_master_nodeid = 0;
708 		r->res_nodeid = -1;
709 	}
710 
711  out_add:
712 	error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
713  out_unlock:
714 	spin_unlock(&ls->ls_rsbtbl[b].lock);
715  out:
716 	*r_ret = r;
717 	return error;
718 }
719 
720 /* During recovery, other nodes can send us new MSTCPY locks (from
721    dlm_recover_locks) before we've made ourself master (in
722    dlm_recover_masters). */
723 
find_rsb_nodir(struct dlm_ls * ls,char * name,int len,uint32_t hash,uint32_t b,int dir_nodeid,int from_nodeid,unsigned int flags,struct dlm_rsb ** r_ret)724 static int find_rsb_nodir(struct dlm_ls *ls, char *name, int len,
725 			  uint32_t hash, uint32_t b,
726 			  int dir_nodeid, int from_nodeid,
727 			  unsigned int flags, struct dlm_rsb **r_ret)
728 {
729 	struct dlm_rsb *r = NULL;
730 	int our_nodeid = dlm_our_nodeid();
731 	int recover = (flags & R_RECEIVE_RECOVER);
732 	int error;
733 
734  retry:
735 	error = pre_rsb_struct(ls);
736 	if (error < 0)
737 		goto out;
738 
739 	spin_lock(&ls->ls_rsbtbl[b].lock);
740 
741 	error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
742 	if (error)
743 		goto do_toss;
744 
745 	/*
746 	 * rsb is active, so we can't check master_nodeid without lock_rsb.
747 	 */
748 
749 	kref_get(&r->res_ref);
750 	goto out_unlock;
751 
752 
753  do_toss:
754 	error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
755 	if (error)
756 		goto do_new;
757 
758 	/*
759 	 * rsb found inactive. No other thread is using this rsb because
760 	 * it's on the toss list, so we can look at or update
761 	 * res_master_nodeid without lock_rsb.
762 	 */
763 
764 	if (!recover && (r->res_master_nodeid != our_nodeid) && from_nodeid) {
765 		/* our rsb is not master, and another node has sent us a
766 		   request; this should never happen */
767 		log_error(ls, "find_rsb toss from_nodeid %d master %d dir %d",
768 			  from_nodeid, r->res_master_nodeid, dir_nodeid);
769 		dlm_print_rsb(r);
770 		error = -ENOTBLK;
771 		goto out_unlock;
772 	}
773 
774 	if (!recover && (r->res_master_nodeid != our_nodeid) &&
775 	    (dir_nodeid == our_nodeid)) {
776 		/* our rsb is not master, and we are dir; may as well fix it;
777 		   this should never happen */
778 		log_error(ls, "find_rsb toss our %d master %d dir %d",
779 			  our_nodeid, r->res_master_nodeid, dir_nodeid);
780 		dlm_print_rsb(r);
781 		r->res_master_nodeid = our_nodeid;
782 		r->res_nodeid = 0;
783 	}
784 
785 	rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
786 	error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
787 	goto out_unlock;
788 
789 
790  do_new:
791 	/*
792 	 * rsb not found
793 	 */
794 
795 	error = get_rsb_struct(ls, name, len, &r);
796 	if (error == -EAGAIN) {
797 		spin_unlock(&ls->ls_rsbtbl[b].lock);
798 		goto retry;
799 	}
800 	if (error)
801 		goto out_unlock;
802 
803 	r->res_hash = hash;
804 	r->res_bucket = b;
805 	r->res_dir_nodeid = dir_nodeid;
806 	r->res_master_nodeid = dir_nodeid;
807 	r->res_nodeid = (dir_nodeid == our_nodeid) ? 0 : dir_nodeid;
808 	kref_init(&r->res_ref);
809 
810 	error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
811  out_unlock:
812 	spin_unlock(&ls->ls_rsbtbl[b].lock);
813  out:
814 	*r_ret = r;
815 	return error;
816 }
817 
find_rsb(struct dlm_ls * ls,char * name,int len,int from_nodeid,unsigned int flags,struct dlm_rsb ** r_ret)818 static int find_rsb(struct dlm_ls *ls, char *name, int len, int from_nodeid,
819 		    unsigned int flags, struct dlm_rsb **r_ret)
820 {
821 	uint32_t hash, b;
822 	int dir_nodeid;
823 
824 	if (len > DLM_RESNAME_MAXLEN)
825 		return -EINVAL;
826 
827 	hash = jhash(name, len, 0);
828 	b = hash & (ls->ls_rsbtbl_size - 1);
829 
830 	dir_nodeid = dlm_hash2nodeid(ls, hash);
831 
832 	if (dlm_no_directory(ls))
833 		return find_rsb_nodir(ls, name, len, hash, b, dir_nodeid,
834 				      from_nodeid, flags, r_ret);
835 	else
836 		return find_rsb_dir(ls, name, len, hash, b, dir_nodeid,
837 				      from_nodeid, flags, r_ret);
838 }
839 
840 /* we have received a request and found that res_master_nodeid != our_nodeid,
841    so we need to return an error or make ourself the master */
842 
validate_master_nodeid(struct dlm_ls * ls,struct dlm_rsb * r,int from_nodeid)843 static int validate_master_nodeid(struct dlm_ls *ls, struct dlm_rsb *r,
844 				  int from_nodeid)
845 {
846 	if (dlm_no_directory(ls)) {
847 		log_error(ls, "find_rsb keep from_nodeid %d master %d dir %d",
848 			  from_nodeid, r->res_master_nodeid,
849 			  r->res_dir_nodeid);
850 		dlm_print_rsb(r);
851 		return -ENOTBLK;
852 	}
853 
854 	if (from_nodeid != r->res_dir_nodeid) {
855 		/* our rsb is not master, and another node (not the dir node)
856 	   	   has sent us a request.  this is much more common when our
857 	   	   master_nodeid is zero, so limit debug to non-zero.  */
858 
859 		if (r->res_master_nodeid) {
860 			log_debug(ls, "validate master from_other %d master %d "
861 				  "dir %d first %x %s", from_nodeid,
862 				  r->res_master_nodeid, r->res_dir_nodeid,
863 				  r->res_first_lkid, r->res_name);
864 		}
865 		return -ENOTBLK;
866 	} else {
867 		/* our rsb is not master, but the dir nodeid has sent us a
868 	   	   request; this could happen with master 0 / res_nodeid -1 */
869 
870 		if (r->res_master_nodeid) {
871 			log_error(ls, "validate master from_dir %d master %d "
872 				  "first %x %s",
873 				  from_nodeid, r->res_master_nodeid,
874 				  r->res_first_lkid, r->res_name);
875 		}
876 
877 		r->res_master_nodeid = dlm_our_nodeid();
878 		r->res_nodeid = 0;
879 		return 0;
880 	}
881 }
882 
883 /*
884  * We're the dir node for this res and another node wants to know the
885  * master nodeid.  During normal operation (non recovery) this is only
886  * called from receive_lookup(); master lookups when the local node is
887  * the dir node are done by find_rsb().
888  *
889  * normal operation, we are the dir node for a resource
890  * . _request_lock
891  * . set_master
892  * . send_lookup
893  * . receive_lookup
894  * . dlm_master_lookup flags 0
895  *
896  * recover directory, we are rebuilding dir for all resources
897  * . dlm_recover_directory
898  * . dlm_rcom_names
899  *   remote node sends back the rsb names it is master of and we are dir of
900  * . dlm_master_lookup RECOVER_DIR (fix_master 0, from_master 1)
901  *   we either create new rsb setting remote node as master, or find existing
902  *   rsb and set master to be the remote node.
903  *
904  * recover masters, we are finding the new master for resources
905  * . dlm_recover_masters
906  * . recover_master
907  * . dlm_send_rcom_lookup
908  * . receive_rcom_lookup
909  * . dlm_master_lookup RECOVER_MASTER (fix_master 1, from_master 0)
910  */
911 
dlm_master_lookup(struct dlm_ls * ls,int from_nodeid,char * name,int len,unsigned int flags,int * r_nodeid,int * result)912 int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, char *name, int len,
913 		      unsigned int flags, int *r_nodeid, int *result)
914 {
915 	struct dlm_rsb *r = NULL;
916 	uint32_t hash, b;
917 	int from_master = (flags & DLM_LU_RECOVER_DIR);
918 	int fix_master = (flags & DLM_LU_RECOVER_MASTER);
919 	int our_nodeid = dlm_our_nodeid();
920 	int dir_nodeid, error, toss_list = 0;
921 
922 	if (len > DLM_RESNAME_MAXLEN)
923 		return -EINVAL;
924 
925 	if (from_nodeid == our_nodeid) {
926 		log_error(ls, "dlm_master_lookup from our_nodeid %d flags %x",
927 			  our_nodeid, flags);
928 		return -EINVAL;
929 	}
930 
931 	hash = jhash(name, len, 0);
932 	b = hash & (ls->ls_rsbtbl_size - 1);
933 
934 	dir_nodeid = dlm_hash2nodeid(ls, hash);
935 	if (dir_nodeid != our_nodeid) {
936 		log_error(ls, "dlm_master_lookup from %d dir %d our %d h %x %d",
937 			  from_nodeid, dir_nodeid, our_nodeid, hash,
938 			  ls->ls_num_nodes);
939 		*r_nodeid = -1;
940 		return -EINVAL;
941 	}
942 
943  retry:
944 	error = pre_rsb_struct(ls);
945 	if (error < 0)
946 		return error;
947 
948 	spin_lock(&ls->ls_rsbtbl[b].lock);
949 	error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
950 	if (!error) {
951 		/* because the rsb is active, we need to lock_rsb before
952 		   checking/changing re_master_nodeid */
953 
954 		hold_rsb(r);
955 		spin_unlock(&ls->ls_rsbtbl[b].lock);
956 		lock_rsb(r);
957 		goto found;
958 	}
959 
960 	error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
961 	if (error)
962 		goto not_found;
963 
964 	/* because the rsb is inactive (on toss list), it's not refcounted
965 	   and lock_rsb is not used, but is protected by the rsbtbl lock */
966 
967 	toss_list = 1;
968  found:
969 	if (r->res_dir_nodeid != our_nodeid) {
970 		/* should not happen, but may as well fix it and carry on */
971 		log_error(ls, "dlm_master_lookup res_dir %d our %d %s",
972 			  r->res_dir_nodeid, our_nodeid, r->res_name);
973 		r->res_dir_nodeid = our_nodeid;
974 	}
975 
976 	if (fix_master && dlm_is_removed(ls, r->res_master_nodeid)) {
977 		/* Recovery uses this function to set a new master when
978 		   the previous master failed.  Setting NEW_MASTER will
979 		   force dlm_recover_masters to call recover_master on this
980 		   rsb even though the res_nodeid is no longer removed. */
981 
982 		r->res_master_nodeid = from_nodeid;
983 		r->res_nodeid = from_nodeid;
984 		rsb_set_flag(r, RSB_NEW_MASTER);
985 
986 		if (toss_list) {
987 			/* I don't think we should ever find it on toss list. */
988 			log_error(ls, "dlm_master_lookup fix_master on toss");
989 			dlm_dump_rsb(r);
990 		}
991 	}
992 
993 	if (from_master && (r->res_master_nodeid != from_nodeid)) {
994 		/* this will happen if from_nodeid became master during
995 		   a previous recovery cycle, and we aborted the previous
996 		   cycle before recovering this master value */
997 
998 		log_limit(ls, "dlm_master_lookup from_master %d "
999 			  "master_nodeid %d res_nodeid %d first %x %s",
1000 			  from_nodeid, r->res_master_nodeid, r->res_nodeid,
1001 			  r->res_first_lkid, r->res_name);
1002 
1003 		if (r->res_master_nodeid == our_nodeid) {
1004 			log_error(ls, "from_master %d our_master", from_nodeid);
1005 			dlm_dump_rsb(r);
1006 			dlm_send_rcom_lookup_dump(r, from_nodeid);
1007 			goto out_found;
1008 		}
1009 
1010 		r->res_master_nodeid = from_nodeid;
1011 		r->res_nodeid = from_nodeid;
1012 		rsb_set_flag(r, RSB_NEW_MASTER);
1013 	}
1014 
1015 	if (!r->res_master_nodeid) {
1016 		/* this will happen if recovery happens while we're looking
1017 		   up the master for this rsb */
1018 
1019 		log_debug(ls, "dlm_master_lookup master 0 to %d first %x %s",
1020 			  from_nodeid, r->res_first_lkid, r->res_name);
1021 		r->res_master_nodeid = from_nodeid;
1022 		r->res_nodeid = from_nodeid;
1023 	}
1024 
1025 	if (!from_master && !fix_master &&
1026 	    (r->res_master_nodeid == from_nodeid)) {
1027 		/* this can happen when the master sends remove, the dir node
1028 		   finds the rsb on the keep list and ignores the remove,
1029 		   and the former master sends a lookup */
1030 
1031 		log_limit(ls, "dlm_master_lookup from master %d flags %x "
1032 			  "first %x %s", from_nodeid, flags,
1033 			  r->res_first_lkid, r->res_name);
1034 	}
1035 
1036  out_found:
1037 	*r_nodeid = r->res_master_nodeid;
1038 	if (result)
1039 		*result = DLM_LU_MATCH;
1040 
1041 	if (toss_list) {
1042 		r->res_toss_time = jiffies;
1043 		/* the rsb was inactive (on toss list) */
1044 		spin_unlock(&ls->ls_rsbtbl[b].lock);
1045 	} else {
1046 		/* the rsb was active */
1047 		unlock_rsb(r);
1048 		put_rsb(r);
1049 	}
1050 	return 0;
1051 
1052  not_found:
1053 	error = get_rsb_struct(ls, name, len, &r);
1054 	if (error == -EAGAIN) {
1055 		spin_unlock(&ls->ls_rsbtbl[b].lock);
1056 		goto retry;
1057 	}
1058 	if (error)
1059 		goto out_unlock;
1060 
1061 	r->res_hash = hash;
1062 	r->res_bucket = b;
1063 	r->res_dir_nodeid = our_nodeid;
1064 	r->res_master_nodeid = from_nodeid;
1065 	r->res_nodeid = from_nodeid;
1066 	kref_init(&r->res_ref);
1067 	r->res_toss_time = jiffies;
1068 
1069 	error = rsb_insert(r, &ls->ls_rsbtbl[b].toss);
1070 	if (error) {
1071 		/* should never happen */
1072 		dlm_free_rsb(r);
1073 		spin_unlock(&ls->ls_rsbtbl[b].lock);
1074 		goto retry;
1075 	}
1076 
1077 	if (result)
1078 		*result = DLM_LU_ADD;
1079 	*r_nodeid = from_nodeid;
1080 	error = 0;
1081  out_unlock:
1082 	spin_unlock(&ls->ls_rsbtbl[b].lock);
1083 	return error;
1084 }
1085 
dlm_dump_rsb_hash(struct dlm_ls * ls,uint32_t hash)1086 static void dlm_dump_rsb_hash(struct dlm_ls *ls, uint32_t hash)
1087 {
1088 	struct rb_node *n;
1089 	struct dlm_rsb *r;
1090 	int i;
1091 
1092 	for (i = 0; i < ls->ls_rsbtbl_size; i++) {
1093 		spin_lock(&ls->ls_rsbtbl[i].lock);
1094 		for (n = rb_first(&ls->ls_rsbtbl[i].keep); n; n = rb_next(n)) {
1095 			r = rb_entry(n, struct dlm_rsb, res_hashnode);
1096 			if (r->res_hash == hash)
1097 				dlm_dump_rsb(r);
1098 		}
1099 		spin_unlock(&ls->ls_rsbtbl[i].lock);
1100 	}
1101 }
1102 
dlm_dump_rsb_name(struct dlm_ls * ls,char * name,int len)1103 void dlm_dump_rsb_name(struct dlm_ls *ls, char *name, int len)
1104 {
1105 	struct dlm_rsb *r = NULL;
1106 	uint32_t hash, b;
1107 	int error;
1108 
1109 	hash = jhash(name, len, 0);
1110 	b = hash & (ls->ls_rsbtbl_size - 1);
1111 
1112 	spin_lock(&ls->ls_rsbtbl[b].lock);
1113 	error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
1114 	if (!error)
1115 		goto out_dump;
1116 
1117 	error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
1118 	if (error)
1119 		goto out;
1120  out_dump:
1121 	dlm_dump_rsb(r);
1122  out:
1123 	spin_unlock(&ls->ls_rsbtbl[b].lock);
1124 }
1125 
toss_rsb(struct kref * kref)1126 static void toss_rsb(struct kref *kref)
1127 {
1128 	struct dlm_rsb *r = container_of(kref, struct dlm_rsb, res_ref);
1129 	struct dlm_ls *ls = r->res_ls;
1130 
1131 	DLM_ASSERT(list_empty(&r->res_root_list), dlm_print_rsb(r););
1132 	kref_init(&r->res_ref);
1133 	rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[r->res_bucket].keep);
1134 	rsb_insert(r, &ls->ls_rsbtbl[r->res_bucket].toss);
1135 	r->res_toss_time = jiffies;
1136 	ls->ls_rsbtbl[r->res_bucket].flags |= DLM_RTF_SHRINK;
1137 	if (r->res_lvbptr) {
1138 		dlm_free_lvb(r->res_lvbptr);
1139 		r->res_lvbptr = NULL;
1140 	}
1141 }
1142 
1143 /* See comment for unhold_lkb */
1144 
unhold_rsb(struct dlm_rsb * r)1145 static void unhold_rsb(struct dlm_rsb *r)
1146 {
1147 	int rv;
1148 	rv = kref_put(&r->res_ref, toss_rsb);
1149 	DLM_ASSERT(!rv, dlm_dump_rsb(r););
1150 }
1151 
kill_rsb(struct kref * kref)1152 static void kill_rsb(struct kref *kref)
1153 {
1154 	struct dlm_rsb *r = container_of(kref, struct dlm_rsb, res_ref);
1155 
1156 	/* All work is done after the return from kref_put() so we
1157 	   can release the write_lock before the remove and free. */
1158 
1159 	DLM_ASSERT(list_empty(&r->res_lookup), dlm_dump_rsb(r););
1160 	DLM_ASSERT(list_empty(&r->res_grantqueue), dlm_dump_rsb(r););
1161 	DLM_ASSERT(list_empty(&r->res_convertqueue), dlm_dump_rsb(r););
1162 	DLM_ASSERT(list_empty(&r->res_waitqueue), dlm_dump_rsb(r););
1163 	DLM_ASSERT(list_empty(&r->res_root_list), dlm_dump_rsb(r););
1164 	DLM_ASSERT(list_empty(&r->res_recover_list), dlm_dump_rsb(r););
1165 }
1166 
1167 /* Attaching/detaching lkb's from rsb's is for rsb reference counting.
1168    The rsb must exist as long as any lkb's for it do. */
1169 
attach_lkb(struct dlm_rsb * r,struct dlm_lkb * lkb)1170 static void attach_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb)
1171 {
1172 	hold_rsb(r);
1173 	lkb->lkb_resource = r;
1174 }
1175 
detach_lkb(struct dlm_lkb * lkb)1176 static void detach_lkb(struct dlm_lkb *lkb)
1177 {
1178 	if (lkb->lkb_resource) {
1179 		put_rsb(lkb->lkb_resource);
1180 		lkb->lkb_resource = NULL;
1181 	}
1182 }
1183 
create_lkb(struct dlm_ls * ls,struct dlm_lkb ** lkb_ret)1184 static int create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret)
1185 {
1186 	struct dlm_lkb *lkb;
1187 	int rv;
1188 
1189 	lkb = dlm_allocate_lkb(ls);
1190 	if (!lkb)
1191 		return -ENOMEM;
1192 
1193 	lkb->lkb_nodeid = -1;
1194 	lkb->lkb_grmode = DLM_LOCK_IV;
1195 	kref_init(&lkb->lkb_ref);
1196 	INIT_LIST_HEAD(&lkb->lkb_ownqueue);
1197 	INIT_LIST_HEAD(&lkb->lkb_rsb_lookup);
1198 	INIT_LIST_HEAD(&lkb->lkb_time_list);
1199 	INIT_LIST_HEAD(&lkb->lkb_cb_list);
1200 	mutex_init(&lkb->lkb_cb_mutex);
1201 	INIT_WORK(&lkb->lkb_cb_work, dlm_callback_work);
1202 
1203 	idr_preload(GFP_NOFS);
1204 	spin_lock(&ls->ls_lkbidr_spin);
1205 	rv = idr_alloc(&ls->ls_lkbidr, lkb, 1, 0, GFP_NOWAIT);
1206 	if (rv >= 0)
1207 		lkb->lkb_id = rv;
1208 	spin_unlock(&ls->ls_lkbidr_spin);
1209 	idr_preload_end();
1210 
1211 	if (rv < 0) {
1212 		log_error(ls, "create_lkb idr error %d", rv);
1213 		dlm_free_lkb(lkb);
1214 		return rv;
1215 	}
1216 
1217 	*lkb_ret = lkb;
1218 	return 0;
1219 }
1220 
find_lkb(struct dlm_ls * ls,uint32_t lkid,struct dlm_lkb ** lkb_ret)1221 static int find_lkb(struct dlm_ls *ls, uint32_t lkid, struct dlm_lkb **lkb_ret)
1222 {
1223 	struct dlm_lkb *lkb;
1224 
1225 	spin_lock(&ls->ls_lkbidr_spin);
1226 	lkb = idr_find(&ls->ls_lkbidr, lkid);
1227 	if (lkb)
1228 		kref_get(&lkb->lkb_ref);
1229 	spin_unlock(&ls->ls_lkbidr_spin);
1230 
1231 	*lkb_ret = lkb;
1232 	return lkb ? 0 : -ENOENT;
1233 }
1234 
kill_lkb(struct kref * kref)1235 static void kill_lkb(struct kref *kref)
1236 {
1237 	struct dlm_lkb *lkb = container_of(kref, struct dlm_lkb, lkb_ref);
1238 
1239 	/* All work is done after the return from kref_put() so we
1240 	   can release the write_lock before the detach_lkb */
1241 
1242 	DLM_ASSERT(!lkb->lkb_status, dlm_print_lkb(lkb););
1243 }
1244 
1245 /* __put_lkb() is used when an lkb may not have an rsb attached to
1246    it so we need to provide the lockspace explicitly */
1247 
__put_lkb(struct dlm_ls * ls,struct dlm_lkb * lkb)1248 static int __put_lkb(struct dlm_ls *ls, struct dlm_lkb *lkb)
1249 {
1250 	uint32_t lkid = lkb->lkb_id;
1251 
1252 	spin_lock(&ls->ls_lkbidr_spin);
1253 	if (kref_put(&lkb->lkb_ref, kill_lkb)) {
1254 		idr_remove(&ls->ls_lkbidr, lkid);
1255 		spin_unlock(&ls->ls_lkbidr_spin);
1256 
1257 		detach_lkb(lkb);
1258 
1259 		/* for local/process lkbs, lvbptr points to caller's lksb */
1260 		if (lkb->lkb_lvbptr && is_master_copy(lkb))
1261 			dlm_free_lvb(lkb->lkb_lvbptr);
1262 		dlm_free_lkb(lkb);
1263 		return 1;
1264 	} else {
1265 		spin_unlock(&ls->ls_lkbidr_spin);
1266 		return 0;
1267 	}
1268 }
1269 
dlm_put_lkb(struct dlm_lkb * lkb)1270 int dlm_put_lkb(struct dlm_lkb *lkb)
1271 {
1272 	struct dlm_ls *ls;
1273 
1274 	DLM_ASSERT(lkb->lkb_resource, dlm_print_lkb(lkb););
1275 	DLM_ASSERT(lkb->lkb_resource->res_ls, dlm_print_lkb(lkb););
1276 
1277 	ls = lkb->lkb_resource->res_ls;
1278 	return __put_lkb(ls, lkb);
1279 }
1280 
1281 /* This is only called to add a reference when the code already holds
1282    a valid reference to the lkb, so there's no need for locking. */
1283 
hold_lkb(struct dlm_lkb * lkb)1284 static inline void hold_lkb(struct dlm_lkb *lkb)
1285 {
1286 	kref_get(&lkb->lkb_ref);
1287 }
1288 
1289 /* This is called when we need to remove a reference and are certain
1290    it's not the last ref.  e.g. del_lkb is always called between a
1291    find_lkb/put_lkb and is always the inverse of a previous add_lkb.
1292    put_lkb would work fine, but would involve unnecessary locking */
1293 
unhold_lkb(struct dlm_lkb * lkb)1294 static inline void unhold_lkb(struct dlm_lkb *lkb)
1295 {
1296 	int rv;
1297 	rv = kref_put(&lkb->lkb_ref, kill_lkb);
1298 	DLM_ASSERT(!rv, dlm_print_lkb(lkb););
1299 }
1300 
lkb_add_ordered(struct list_head * new,struct list_head * head,int mode)1301 static void lkb_add_ordered(struct list_head *new, struct list_head *head,
1302 			    int mode)
1303 {
1304 	struct dlm_lkb *lkb = NULL;
1305 
1306 	list_for_each_entry(lkb, head, lkb_statequeue)
1307 		if (lkb->lkb_rqmode < mode)
1308 			break;
1309 
1310 	__list_add(new, lkb->lkb_statequeue.prev, &lkb->lkb_statequeue);
1311 }
1312 
1313 /* add/remove lkb to rsb's grant/convert/wait queue */
1314 
add_lkb(struct dlm_rsb * r,struct dlm_lkb * lkb,int status)1315 static void add_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb, int status)
1316 {
1317 	kref_get(&lkb->lkb_ref);
1318 
1319 	DLM_ASSERT(!lkb->lkb_status, dlm_print_lkb(lkb););
1320 
1321 	lkb->lkb_timestamp = ktime_get();
1322 
1323 	lkb->lkb_status = status;
1324 
1325 	switch (status) {
1326 	case DLM_LKSTS_WAITING:
1327 		if (lkb->lkb_exflags & DLM_LKF_HEADQUE)
1328 			list_add(&lkb->lkb_statequeue, &r->res_waitqueue);
1329 		else
1330 			list_add_tail(&lkb->lkb_statequeue, &r->res_waitqueue);
1331 		break;
1332 	case DLM_LKSTS_GRANTED:
1333 		/* convention says granted locks kept in order of grmode */
1334 		lkb_add_ordered(&lkb->lkb_statequeue, &r->res_grantqueue,
1335 				lkb->lkb_grmode);
1336 		break;
1337 	case DLM_LKSTS_CONVERT:
1338 		if (lkb->lkb_exflags & DLM_LKF_HEADQUE)
1339 			list_add(&lkb->lkb_statequeue, &r->res_convertqueue);
1340 		else
1341 			list_add_tail(&lkb->lkb_statequeue,
1342 				      &r->res_convertqueue);
1343 		break;
1344 	default:
1345 		DLM_ASSERT(0, dlm_print_lkb(lkb); printk("sts=%d\n", status););
1346 	}
1347 }
1348 
del_lkb(struct dlm_rsb * r,struct dlm_lkb * lkb)1349 static void del_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb)
1350 {
1351 	lkb->lkb_status = 0;
1352 	list_del(&lkb->lkb_statequeue);
1353 	unhold_lkb(lkb);
1354 }
1355 
move_lkb(struct dlm_rsb * r,struct dlm_lkb * lkb,int sts)1356 static void move_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb, int sts)
1357 {
1358 	hold_lkb(lkb);
1359 	del_lkb(r, lkb);
1360 	add_lkb(r, lkb, sts);
1361 	unhold_lkb(lkb);
1362 }
1363 
msg_reply_type(int mstype)1364 static int msg_reply_type(int mstype)
1365 {
1366 	switch (mstype) {
1367 	case DLM_MSG_REQUEST:
1368 		return DLM_MSG_REQUEST_REPLY;
1369 	case DLM_MSG_CONVERT:
1370 		return DLM_MSG_CONVERT_REPLY;
1371 	case DLM_MSG_UNLOCK:
1372 		return DLM_MSG_UNLOCK_REPLY;
1373 	case DLM_MSG_CANCEL:
1374 		return DLM_MSG_CANCEL_REPLY;
1375 	case DLM_MSG_LOOKUP:
1376 		return DLM_MSG_LOOKUP_REPLY;
1377 	}
1378 	return -1;
1379 }
1380 
nodeid_warned(int nodeid,int num_nodes,int * warned)1381 static int nodeid_warned(int nodeid, int num_nodes, int *warned)
1382 {
1383 	int i;
1384 
1385 	for (i = 0; i < num_nodes; i++) {
1386 		if (!warned[i]) {
1387 			warned[i] = nodeid;
1388 			return 0;
1389 		}
1390 		if (warned[i] == nodeid)
1391 			return 1;
1392 	}
1393 	return 0;
1394 }
1395 
dlm_scan_waiters(struct dlm_ls * ls)1396 void dlm_scan_waiters(struct dlm_ls *ls)
1397 {
1398 	struct dlm_lkb *lkb;
1399 	ktime_t zero = ktime_set(0, 0);
1400 	s64 us;
1401 	s64 debug_maxus = 0;
1402 	u32 debug_scanned = 0;
1403 	u32 debug_expired = 0;
1404 	int num_nodes = 0;
1405 	int *warned = NULL;
1406 
1407 	if (!dlm_config.ci_waitwarn_us)
1408 		return;
1409 
1410 	mutex_lock(&ls->ls_waiters_mutex);
1411 
1412 	list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
1413 		if (ktime_equal(lkb->lkb_wait_time, zero))
1414 			continue;
1415 
1416 		debug_scanned++;
1417 
1418 		us = ktime_to_us(ktime_sub(ktime_get(), lkb->lkb_wait_time));
1419 
1420 		if (us < dlm_config.ci_waitwarn_us)
1421 			continue;
1422 
1423 		lkb->lkb_wait_time = zero;
1424 
1425 		debug_expired++;
1426 		if (us > debug_maxus)
1427 			debug_maxus = us;
1428 
1429 		if (!num_nodes) {
1430 			num_nodes = ls->ls_num_nodes;
1431 			warned = kzalloc(num_nodes * sizeof(int), GFP_KERNEL);
1432 		}
1433 		if (!warned)
1434 			continue;
1435 		if (nodeid_warned(lkb->lkb_wait_nodeid, num_nodes, warned))
1436 			continue;
1437 
1438 		log_error(ls, "waitwarn %x %lld %d us check connection to "
1439 			  "node %d", lkb->lkb_id, (long long)us,
1440 			  dlm_config.ci_waitwarn_us, lkb->lkb_wait_nodeid);
1441 	}
1442 	mutex_unlock(&ls->ls_waiters_mutex);
1443 	kfree(warned);
1444 
1445 	if (debug_expired)
1446 		log_debug(ls, "scan_waiters %u warn %u over %d us max %lld us",
1447 			  debug_scanned, debug_expired,
1448 			  dlm_config.ci_waitwarn_us, (long long)debug_maxus);
1449 }
1450 
1451 /* add/remove lkb from global waiters list of lkb's waiting for
1452    a reply from a remote node */
1453 
add_to_waiters(struct dlm_lkb * lkb,int mstype,int to_nodeid)1454 static int add_to_waiters(struct dlm_lkb *lkb, int mstype, int to_nodeid)
1455 {
1456 	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1457 	int error = 0;
1458 
1459 	mutex_lock(&ls->ls_waiters_mutex);
1460 
1461 	if (is_overlap_unlock(lkb) ||
1462 	    (is_overlap_cancel(lkb) && (mstype == DLM_MSG_CANCEL))) {
1463 		error = -EINVAL;
1464 		goto out;
1465 	}
1466 
1467 	if (lkb->lkb_wait_type || is_overlap_cancel(lkb)) {
1468 		switch (mstype) {
1469 		case DLM_MSG_UNLOCK:
1470 			lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK;
1471 			break;
1472 		case DLM_MSG_CANCEL:
1473 			lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL;
1474 			break;
1475 		default:
1476 			error = -EBUSY;
1477 			goto out;
1478 		}
1479 		lkb->lkb_wait_count++;
1480 		hold_lkb(lkb);
1481 
1482 		log_debug(ls, "addwait %x cur %d overlap %d count %d f %x",
1483 			  lkb->lkb_id, lkb->lkb_wait_type, mstype,
1484 			  lkb->lkb_wait_count, lkb->lkb_flags);
1485 		goto out;
1486 	}
1487 
1488 	DLM_ASSERT(!lkb->lkb_wait_count,
1489 		   dlm_print_lkb(lkb);
1490 		   printk("wait_count %d\n", lkb->lkb_wait_count););
1491 
1492 	lkb->lkb_wait_count++;
1493 	lkb->lkb_wait_type = mstype;
1494 	lkb->lkb_wait_time = ktime_get();
1495 	lkb->lkb_wait_nodeid = to_nodeid; /* for debugging */
1496 	hold_lkb(lkb);
1497 	list_add(&lkb->lkb_wait_reply, &ls->ls_waiters);
1498  out:
1499 	if (error)
1500 		log_error(ls, "addwait error %x %d flags %x %d %d %s",
1501 			  lkb->lkb_id, error, lkb->lkb_flags, mstype,
1502 			  lkb->lkb_wait_type, lkb->lkb_resource->res_name);
1503 	mutex_unlock(&ls->ls_waiters_mutex);
1504 	return error;
1505 }
1506 
1507 /* We clear the RESEND flag because we might be taking an lkb off the waiters
1508    list as part of process_requestqueue (e.g. a lookup that has an optimized
1509    request reply on the requestqueue) between dlm_recover_waiters_pre() which
1510    set RESEND and dlm_recover_waiters_post() */
1511 
_remove_from_waiters(struct dlm_lkb * lkb,int mstype,struct dlm_message * ms)1512 static int _remove_from_waiters(struct dlm_lkb *lkb, int mstype,
1513 				struct dlm_message *ms)
1514 {
1515 	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1516 	int overlap_done = 0;
1517 
1518 	if (is_overlap_unlock(lkb) && (mstype == DLM_MSG_UNLOCK_REPLY)) {
1519 		log_debug(ls, "remwait %x unlock_reply overlap", lkb->lkb_id);
1520 		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
1521 		overlap_done = 1;
1522 		goto out_del;
1523 	}
1524 
1525 	if (is_overlap_cancel(lkb) && (mstype == DLM_MSG_CANCEL_REPLY)) {
1526 		log_debug(ls, "remwait %x cancel_reply overlap", lkb->lkb_id);
1527 		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
1528 		overlap_done = 1;
1529 		goto out_del;
1530 	}
1531 
1532 	/* Cancel state was preemptively cleared by a successful convert,
1533 	   see next comment, nothing to do. */
1534 
1535 	if ((mstype == DLM_MSG_CANCEL_REPLY) &&
1536 	    (lkb->lkb_wait_type != DLM_MSG_CANCEL)) {
1537 		log_debug(ls, "remwait %x cancel_reply wait_type %d",
1538 			  lkb->lkb_id, lkb->lkb_wait_type);
1539 		return -1;
1540 	}
1541 
1542 	/* Remove for the convert reply, and premptively remove for the
1543 	   cancel reply.  A convert has been granted while there's still
1544 	   an outstanding cancel on it (the cancel is moot and the result
1545 	   in the cancel reply should be 0).  We preempt the cancel reply
1546 	   because the app gets the convert result and then can follow up
1547 	   with another op, like convert.  This subsequent op would see the
1548 	   lingering state of the cancel and fail with -EBUSY. */
1549 
1550 	if ((mstype == DLM_MSG_CONVERT_REPLY) &&
1551 	    (lkb->lkb_wait_type == DLM_MSG_CONVERT) &&
1552 	    is_overlap_cancel(lkb) && ms && !ms->m_result) {
1553 		log_debug(ls, "remwait %x convert_reply zap overlap_cancel",
1554 			  lkb->lkb_id);
1555 		lkb->lkb_wait_type = 0;
1556 		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
1557 		lkb->lkb_wait_count--;
1558 		goto out_del;
1559 	}
1560 
1561 	/* N.B. type of reply may not always correspond to type of original
1562 	   msg due to lookup->request optimization, verify others? */
1563 
1564 	if (lkb->lkb_wait_type) {
1565 		lkb->lkb_wait_type = 0;
1566 		goto out_del;
1567 	}
1568 
1569 	log_error(ls, "remwait error %x remote %d %x msg %d flags %x no wait",
1570 		  lkb->lkb_id, ms ? ms->m_header.h_nodeid : 0, lkb->lkb_remid,
1571 		  mstype, lkb->lkb_flags);
1572 	return -1;
1573 
1574  out_del:
1575 	/* the force-unlock/cancel has completed and we haven't recvd a reply
1576 	   to the op that was in progress prior to the unlock/cancel; we
1577 	   give up on any reply to the earlier op.  FIXME: not sure when/how
1578 	   this would happen */
1579 
1580 	if (overlap_done && lkb->lkb_wait_type) {
1581 		log_error(ls, "remwait error %x reply %d wait_type %d overlap",
1582 			  lkb->lkb_id, mstype, lkb->lkb_wait_type);
1583 		lkb->lkb_wait_count--;
1584 		lkb->lkb_wait_type = 0;
1585 	}
1586 
1587 	DLM_ASSERT(lkb->lkb_wait_count, dlm_print_lkb(lkb););
1588 
1589 	lkb->lkb_flags &= ~DLM_IFL_RESEND;
1590 	lkb->lkb_wait_count--;
1591 	if (!lkb->lkb_wait_count)
1592 		list_del_init(&lkb->lkb_wait_reply);
1593 	unhold_lkb(lkb);
1594 	return 0;
1595 }
1596 
remove_from_waiters(struct dlm_lkb * lkb,int mstype)1597 static int remove_from_waiters(struct dlm_lkb *lkb, int mstype)
1598 {
1599 	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1600 	int error;
1601 
1602 	mutex_lock(&ls->ls_waiters_mutex);
1603 	error = _remove_from_waiters(lkb, mstype, NULL);
1604 	mutex_unlock(&ls->ls_waiters_mutex);
1605 	return error;
1606 }
1607 
1608 /* Handles situations where we might be processing a "fake" or "stub" reply in
1609    which we can't try to take waiters_mutex again. */
1610 
remove_from_waiters_ms(struct dlm_lkb * lkb,struct dlm_message * ms)1611 static int remove_from_waiters_ms(struct dlm_lkb *lkb, struct dlm_message *ms)
1612 {
1613 	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1614 	int error;
1615 
1616 	if (ms->m_flags != DLM_IFL_STUB_MS)
1617 		mutex_lock(&ls->ls_waiters_mutex);
1618 	error = _remove_from_waiters(lkb, ms->m_type, ms);
1619 	if (ms->m_flags != DLM_IFL_STUB_MS)
1620 		mutex_unlock(&ls->ls_waiters_mutex);
1621 	return error;
1622 }
1623 
1624 /* If there's an rsb for the same resource being removed, ensure
1625    that the remove message is sent before the new lookup message.
1626    It should be rare to need a delay here, but if not, then it may
1627    be worthwhile to add a proper wait mechanism rather than a delay. */
1628 
wait_pending_remove(struct dlm_rsb * r)1629 static void wait_pending_remove(struct dlm_rsb *r)
1630 {
1631 	struct dlm_ls *ls = r->res_ls;
1632  restart:
1633 	spin_lock(&ls->ls_remove_spin);
1634 	if (ls->ls_remove_len &&
1635 	    !rsb_cmp(r, ls->ls_remove_name, ls->ls_remove_len)) {
1636 		log_debug(ls, "delay lookup for remove dir %d %s",
1637 		  	  r->res_dir_nodeid, r->res_name);
1638 		spin_unlock(&ls->ls_remove_spin);
1639 		msleep(1);
1640 		goto restart;
1641 	}
1642 	spin_unlock(&ls->ls_remove_spin);
1643 }
1644 
1645 /*
1646  * ls_remove_spin protects ls_remove_name and ls_remove_len which are
1647  * read by other threads in wait_pending_remove.  ls_remove_names
1648  * and ls_remove_lens are only used by the scan thread, so they do
1649  * not need protection.
1650  */
1651 
shrink_bucket(struct dlm_ls * ls,int b)1652 static void shrink_bucket(struct dlm_ls *ls, int b)
1653 {
1654 	struct rb_node *n, *next;
1655 	struct dlm_rsb *r;
1656 	char *name;
1657 	int our_nodeid = dlm_our_nodeid();
1658 	int remote_count = 0;
1659 	int need_shrink = 0;
1660 	int i, len, rv;
1661 
1662 	memset(&ls->ls_remove_lens, 0, sizeof(int) * DLM_REMOVE_NAMES_MAX);
1663 
1664 	spin_lock(&ls->ls_rsbtbl[b].lock);
1665 
1666 	if (!(ls->ls_rsbtbl[b].flags & DLM_RTF_SHRINK)) {
1667 		spin_unlock(&ls->ls_rsbtbl[b].lock);
1668 		return;
1669 	}
1670 
1671 	for (n = rb_first(&ls->ls_rsbtbl[b].toss); n; n = next) {
1672 		next = rb_next(n);
1673 		r = rb_entry(n, struct dlm_rsb, res_hashnode);
1674 
1675 		/* If we're the directory record for this rsb, and
1676 		   we're not the master of it, then we need to wait
1677 		   for the master node to send us a dir remove for
1678 		   before removing the dir record. */
1679 
1680 		if (!dlm_no_directory(ls) &&
1681 		    (r->res_master_nodeid != our_nodeid) &&
1682 		    (dlm_dir_nodeid(r) == our_nodeid)) {
1683 			continue;
1684 		}
1685 
1686 		need_shrink = 1;
1687 
1688 		if (!time_after_eq(jiffies, r->res_toss_time +
1689 				   dlm_config.ci_toss_secs * HZ)) {
1690 			continue;
1691 		}
1692 
1693 		if (!dlm_no_directory(ls) &&
1694 		    (r->res_master_nodeid == our_nodeid) &&
1695 		    (dlm_dir_nodeid(r) != our_nodeid)) {
1696 
1697 			/* We're the master of this rsb but we're not
1698 			   the directory record, so we need to tell the
1699 			   dir node to remove the dir record. */
1700 
1701 			ls->ls_remove_lens[remote_count] = r->res_length;
1702 			memcpy(ls->ls_remove_names[remote_count], r->res_name,
1703 			       DLM_RESNAME_MAXLEN);
1704 			remote_count++;
1705 
1706 			if (remote_count >= DLM_REMOVE_NAMES_MAX)
1707 				break;
1708 			continue;
1709 		}
1710 
1711 		if (!kref_put(&r->res_ref, kill_rsb)) {
1712 			log_error(ls, "tossed rsb in use %s", r->res_name);
1713 			continue;
1714 		}
1715 
1716 		rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
1717 		dlm_free_rsb(r);
1718 	}
1719 
1720 	if (need_shrink)
1721 		ls->ls_rsbtbl[b].flags |= DLM_RTF_SHRINK;
1722 	else
1723 		ls->ls_rsbtbl[b].flags &= ~DLM_RTF_SHRINK;
1724 	spin_unlock(&ls->ls_rsbtbl[b].lock);
1725 
1726 	/*
1727 	 * While searching for rsb's to free, we found some that require
1728 	 * remote removal.  We leave them in place and find them again here
1729 	 * so there is a very small gap between removing them from the toss
1730 	 * list and sending the removal.  Keeping this gap small is
1731 	 * important to keep us (the master node) from being out of sync
1732 	 * with the remote dir node for very long.
1733 	 *
1734 	 * From the time the rsb is removed from toss until just after
1735 	 * send_remove, the rsb name is saved in ls_remove_name.  A new
1736 	 * lookup checks this to ensure that a new lookup message for the
1737 	 * same resource name is not sent just before the remove message.
1738 	 */
1739 
1740 	for (i = 0; i < remote_count; i++) {
1741 		name = ls->ls_remove_names[i];
1742 		len = ls->ls_remove_lens[i];
1743 
1744 		spin_lock(&ls->ls_rsbtbl[b].lock);
1745 		rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
1746 		if (rv) {
1747 			spin_unlock(&ls->ls_rsbtbl[b].lock);
1748 			log_debug(ls, "remove_name not toss %s", name);
1749 			continue;
1750 		}
1751 
1752 		if (r->res_master_nodeid != our_nodeid) {
1753 			spin_unlock(&ls->ls_rsbtbl[b].lock);
1754 			log_debug(ls, "remove_name master %d dir %d our %d %s",
1755 				  r->res_master_nodeid, r->res_dir_nodeid,
1756 				  our_nodeid, name);
1757 			continue;
1758 		}
1759 
1760 		if (r->res_dir_nodeid == our_nodeid) {
1761 			/* should never happen */
1762 			spin_unlock(&ls->ls_rsbtbl[b].lock);
1763 			log_error(ls, "remove_name dir %d master %d our %d %s",
1764 				  r->res_dir_nodeid, r->res_master_nodeid,
1765 				  our_nodeid, name);
1766 			continue;
1767 		}
1768 
1769 		if (!time_after_eq(jiffies, r->res_toss_time +
1770 				   dlm_config.ci_toss_secs * HZ)) {
1771 			spin_unlock(&ls->ls_rsbtbl[b].lock);
1772 			log_debug(ls, "remove_name toss_time %lu now %lu %s",
1773 				  r->res_toss_time, jiffies, name);
1774 			continue;
1775 		}
1776 
1777 		if (!kref_put(&r->res_ref, kill_rsb)) {
1778 			spin_unlock(&ls->ls_rsbtbl[b].lock);
1779 			log_error(ls, "remove_name in use %s", name);
1780 			continue;
1781 		}
1782 
1783 		rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
1784 
1785 		/* block lookup of same name until we've sent remove */
1786 		spin_lock(&ls->ls_remove_spin);
1787 		ls->ls_remove_len = len;
1788 		memcpy(ls->ls_remove_name, name, DLM_RESNAME_MAXLEN);
1789 		spin_unlock(&ls->ls_remove_spin);
1790 		spin_unlock(&ls->ls_rsbtbl[b].lock);
1791 
1792 		send_remove(r);
1793 
1794 		/* allow lookup of name again */
1795 		spin_lock(&ls->ls_remove_spin);
1796 		ls->ls_remove_len = 0;
1797 		memset(ls->ls_remove_name, 0, DLM_RESNAME_MAXLEN);
1798 		spin_unlock(&ls->ls_remove_spin);
1799 
1800 		dlm_free_rsb(r);
1801 	}
1802 }
1803 
dlm_scan_rsbs(struct dlm_ls * ls)1804 void dlm_scan_rsbs(struct dlm_ls *ls)
1805 {
1806 	int i;
1807 
1808 	for (i = 0; i < ls->ls_rsbtbl_size; i++) {
1809 		shrink_bucket(ls, i);
1810 		if (dlm_locking_stopped(ls))
1811 			break;
1812 		cond_resched();
1813 	}
1814 }
1815 
add_timeout(struct dlm_lkb * lkb)1816 static void add_timeout(struct dlm_lkb *lkb)
1817 {
1818 	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1819 
1820 	if (is_master_copy(lkb))
1821 		return;
1822 
1823 	if (test_bit(LSFL_TIMEWARN, &ls->ls_flags) &&
1824 	    !(lkb->lkb_exflags & DLM_LKF_NODLCKWT)) {
1825 		lkb->lkb_flags |= DLM_IFL_WATCH_TIMEWARN;
1826 		goto add_it;
1827 	}
1828 	if (lkb->lkb_exflags & DLM_LKF_TIMEOUT)
1829 		goto add_it;
1830 	return;
1831 
1832  add_it:
1833 	DLM_ASSERT(list_empty(&lkb->lkb_time_list), dlm_print_lkb(lkb););
1834 	mutex_lock(&ls->ls_timeout_mutex);
1835 	hold_lkb(lkb);
1836 	list_add_tail(&lkb->lkb_time_list, &ls->ls_timeout);
1837 	mutex_unlock(&ls->ls_timeout_mutex);
1838 }
1839 
del_timeout(struct dlm_lkb * lkb)1840 static void del_timeout(struct dlm_lkb *lkb)
1841 {
1842 	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1843 
1844 	mutex_lock(&ls->ls_timeout_mutex);
1845 	if (!list_empty(&lkb->lkb_time_list)) {
1846 		list_del_init(&lkb->lkb_time_list);
1847 		unhold_lkb(lkb);
1848 	}
1849 	mutex_unlock(&ls->ls_timeout_mutex);
1850 }
1851 
1852 /* FIXME: is it safe to look at lkb_exflags, lkb_flags, lkb_timestamp, and
1853    lkb_lksb_timeout without lock_rsb?  Note: we can't lock timeout_mutex
1854    and then lock rsb because of lock ordering in add_timeout.  We may need
1855    to specify some special timeout-related bits in the lkb that are just to
1856    be accessed under the timeout_mutex. */
1857 
dlm_scan_timeout(struct dlm_ls * ls)1858 void dlm_scan_timeout(struct dlm_ls *ls)
1859 {
1860 	struct dlm_rsb *r;
1861 	struct dlm_lkb *lkb;
1862 	int do_cancel, do_warn;
1863 	s64 wait_us;
1864 
1865 	for (;;) {
1866 		if (dlm_locking_stopped(ls))
1867 			break;
1868 
1869 		do_cancel = 0;
1870 		do_warn = 0;
1871 		mutex_lock(&ls->ls_timeout_mutex);
1872 		list_for_each_entry(lkb, &ls->ls_timeout, lkb_time_list) {
1873 
1874 			wait_us = ktime_to_us(ktime_sub(ktime_get(),
1875 					      		lkb->lkb_timestamp));
1876 
1877 			if ((lkb->lkb_exflags & DLM_LKF_TIMEOUT) &&
1878 			    wait_us >= (lkb->lkb_timeout_cs * 10000))
1879 				do_cancel = 1;
1880 
1881 			if ((lkb->lkb_flags & DLM_IFL_WATCH_TIMEWARN) &&
1882 			    wait_us >= dlm_config.ci_timewarn_cs * 10000)
1883 				do_warn = 1;
1884 
1885 			if (!do_cancel && !do_warn)
1886 				continue;
1887 			hold_lkb(lkb);
1888 			break;
1889 		}
1890 		mutex_unlock(&ls->ls_timeout_mutex);
1891 
1892 		if (!do_cancel && !do_warn)
1893 			break;
1894 
1895 		r = lkb->lkb_resource;
1896 		hold_rsb(r);
1897 		lock_rsb(r);
1898 
1899 		if (do_warn) {
1900 			/* clear flag so we only warn once */
1901 			lkb->lkb_flags &= ~DLM_IFL_WATCH_TIMEWARN;
1902 			if (!(lkb->lkb_exflags & DLM_LKF_TIMEOUT))
1903 				del_timeout(lkb);
1904 			dlm_timeout_warn(lkb);
1905 		}
1906 
1907 		if (do_cancel) {
1908 			log_debug(ls, "timeout cancel %x node %d %s",
1909 				  lkb->lkb_id, lkb->lkb_nodeid, r->res_name);
1910 			lkb->lkb_flags &= ~DLM_IFL_WATCH_TIMEWARN;
1911 			lkb->lkb_flags |= DLM_IFL_TIMEOUT_CANCEL;
1912 			del_timeout(lkb);
1913 			_cancel_lock(r, lkb);
1914 		}
1915 
1916 		unlock_rsb(r);
1917 		unhold_rsb(r);
1918 		dlm_put_lkb(lkb);
1919 	}
1920 }
1921 
1922 /* This is only called by dlm_recoverd, and we rely on dlm_ls_stop() stopping
1923    dlm_recoverd before checking/setting ls_recover_begin. */
1924 
dlm_adjust_timeouts(struct dlm_ls * ls)1925 void dlm_adjust_timeouts(struct dlm_ls *ls)
1926 {
1927 	struct dlm_lkb *lkb;
1928 	u64 adj_us = jiffies_to_usecs(jiffies - ls->ls_recover_begin);
1929 
1930 	ls->ls_recover_begin = 0;
1931 	mutex_lock(&ls->ls_timeout_mutex);
1932 	list_for_each_entry(lkb, &ls->ls_timeout, lkb_time_list)
1933 		lkb->lkb_timestamp = ktime_add_us(lkb->lkb_timestamp, adj_us);
1934 	mutex_unlock(&ls->ls_timeout_mutex);
1935 
1936 	if (!dlm_config.ci_waitwarn_us)
1937 		return;
1938 
1939 	mutex_lock(&ls->ls_waiters_mutex);
1940 	list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
1941 		if (ktime_to_us(lkb->lkb_wait_time))
1942 			lkb->lkb_wait_time = ktime_get();
1943 	}
1944 	mutex_unlock(&ls->ls_waiters_mutex);
1945 }
1946 
1947 /* lkb is master or local copy */
1948 
set_lvb_lock(struct dlm_rsb * r,struct dlm_lkb * lkb)1949 static void set_lvb_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1950 {
1951 	int b, len = r->res_ls->ls_lvblen;
1952 
1953 	/* b=1 lvb returned to caller
1954 	   b=0 lvb written to rsb or invalidated
1955 	   b=-1 do nothing */
1956 
1957 	b =  dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1];
1958 
1959 	if (b == 1) {
1960 		if (!lkb->lkb_lvbptr)
1961 			return;
1962 
1963 		if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
1964 			return;
1965 
1966 		if (!r->res_lvbptr)
1967 			return;
1968 
1969 		memcpy(lkb->lkb_lvbptr, r->res_lvbptr, len);
1970 		lkb->lkb_lvbseq = r->res_lvbseq;
1971 
1972 	} else if (b == 0) {
1973 		if (lkb->lkb_exflags & DLM_LKF_IVVALBLK) {
1974 			rsb_set_flag(r, RSB_VALNOTVALID);
1975 			return;
1976 		}
1977 
1978 		if (!lkb->lkb_lvbptr)
1979 			return;
1980 
1981 		if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
1982 			return;
1983 
1984 		if (!r->res_lvbptr)
1985 			r->res_lvbptr = dlm_allocate_lvb(r->res_ls);
1986 
1987 		if (!r->res_lvbptr)
1988 			return;
1989 
1990 		memcpy(r->res_lvbptr, lkb->lkb_lvbptr, len);
1991 		r->res_lvbseq++;
1992 		lkb->lkb_lvbseq = r->res_lvbseq;
1993 		rsb_clear_flag(r, RSB_VALNOTVALID);
1994 	}
1995 
1996 	if (rsb_flag(r, RSB_VALNOTVALID))
1997 		lkb->lkb_sbflags |= DLM_SBF_VALNOTVALID;
1998 }
1999 
set_lvb_unlock(struct dlm_rsb * r,struct dlm_lkb * lkb)2000 static void set_lvb_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2001 {
2002 	if (lkb->lkb_grmode < DLM_LOCK_PW)
2003 		return;
2004 
2005 	if (lkb->lkb_exflags & DLM_LKF_IVVALBLK) {
2006 		rsb_set_flag(r, RSB_VALNOTVALID);
2007 		return;
2008 	}
2009 
2010 	if (!lkb->lkb_lvbptr)
2011 		return;
2012 
2013 	if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
2014 		return;
2015 
2016 	if (!r->res_lvbptr)
2017 		r->res_lvbptr = dlm_allocate_lvb(r->res_ls);
2018 
2019 	if (!r->res_lvbptr)
2020 		return;
2021 
2022 	memcpy(r->res_lvbptr, lkb->lkb_lvbptr, r->res_ls->ls_lvblen);
2023 	r->res_lvbseq++;
2024 	rsb_clear_flag(r, RSB_VALNOTVALID);
2025 }
2026 
2027 /* lkb is process copy (pc) */
2028 
set_lvb_lock_pc(struct dlm_rsb * r,struct dlm_lkb * lkb,struct dlm_message * ms)2029 static void set_lvb_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb,
2030 			    struct dlm_message *ms)
2031 {
2032 	int b;
2033 
2034 	if (!lkb->lkb_lvbptr)
2035 		return;
2036 
2037 	if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
2038 		return;
2039 
2040 	b = dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1];
2041 	if (b == 1) {
2042 		int len = receive_extralen(ms);
2043 		if (len > r->res_ls->ls_lvblen)
2044 			len = r->res_ls->ls_lvblen;
2045 		memcpy(lkb->lkb_lvbptr, ms->m_extra, len);
2046 		lkb->lkb_lvbseq = ms->m_lvbseq;
2047 	}
2048 }
2049 
2050 /* Manipulate lkb's on rsb's convert/granted/waiting queues
2051    remove_lock -- used for unlock, removes lkb from granted
2052    revert_lock -- used for cancel, moves lkb from convert to granted
2053    grant_lock  -- used for request and convert, adds lkb to granted or
2054                   moves lkb from convert or waiting to granted
2055 
2056    Each of these is used for master or local copy lkb's.  There is
2057    also a _pc() variation used to make the corresponding change on
2058    a process copy (pc) lkb. */
2059 
_remove_lock(struct dlm_rsb * r,struct dlm_lkb * lkb)2060 static void _remove_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2061 {
2062 	del_lkb(r, lkb);
2063 	lkb->lkb_grmode = DLM_LOCK_IV;
2064 	/* this unhold undoes the original ref from create_lkb()
2065 	   so this leads to the lkb being freed */
2066 	unhold_lkb(lkb);
2067 }
2068 
remove_lock(struct dlm_rsb * r,struct dlm_lkb * lkb)2069 static void remove_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2070 {
2071 	set_lvb_unlock(r, lkb);
2072 	_remove_lock(r, lkb);
2073 }
2074 
remove_lock_pc(struct dlm_rsb * r,struct dlm_lkb * lkb)2075 static void remove_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb)
2076 {
2077 	_remove_lock(r, lkb);
2078 }
2079 
2080 /* returns: 0 did nothing
2081 	    1 moved lock to granted
2082 	   -1 removed lock */
2083 
revert_lock(struct dlm_rsb * r,struct dlm_lkb * lkb)2084 static int revert_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2085 {
2086 	int rv = 0;
2087 
2088 	lkb->lkb_rqmode = DLM_LOCK_IV;
2089 
2090 	switch (lkb->lkb_status) {
2091 	case DLM_LKSTS_GRANTED:
2092 		break;
2093 	case DLM_LKSTS_CONVERT:
2094 		move_lkb(r, lkb, DLM_LKSTS_GRANTED);
2095 		rv = 1;
2096 		break;
2097 	case DLM_LKSTS_WAITING:
2098 		del_lkb(r, lkb);
2099 		lkb->lkb_grmode = DLM_LOCK_IV;
2100 		/* this unhold undoes the original ref from create_lkb()
2101 		   so this leads to the lkb being freed */
2102 		unhold_lkb(lkb);
2103 		rv = -1;
2104 		break;
2105 	default:
2106 		log_print("invalid status for revert %d", lkb->lkb_status);
2107 	}
2108 	return rv;
2109 }
2110 
revert_lock_pc(struct dlm_rsb * r,struct dlm_lkb * lkb)2111 static int revert_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb)
2112 {
2113 	return revert_lock(r, lkb);
2114 }
2115 
_grant_lock(struct dlm_rsb * r,struct dlm_lkb * lkb)2116 static void _grant_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2117 {
2118 	if (lkb->lkb_grmode != lkb->lkb_rqmode) {
2119 		lkb->lkb_grmode = lkb->lkb_rqmode;
2120 		if (lkb->lkb_status)
2121 			move_lkb(r, lkb, DLM_LKSTS_GRANTED);
2122 		else
2123 			add_lkb(r, lkb, DLM_LKSTS_GRANTED);
2124 	}
2125 
2126 	lkb->lkb_rqmode = DLM_LOCK_IV;
2127 	lkb->lkb_highbast = 0;
2128 }
2129 
grant_lock(struct dlm_rsb * r,struct dlm_lkb * lkb)2130 static void grant_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2131 {
2132 	set_lvb_lock(r, lkb);
2133 	_grant_lock(r, lkb);
2134 }
2135 
grant_lock_pc(struct dlm_rsb * r,struct dlm_lkb * lkb,struct dlm_message * ms)2136 static void grant_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb,
2137 			  struct dlm_message *ms)
2138 {
2139 	set_lvb_lock_pc(r, lkb, ms);
2140 	_grant_lock(r, lkb);
2141 }
2142 
2143 /* called by grant_pending_locks() which means an async grant message must
2144    be sent to the requesting node in addition to granting the lock if the
2145    lkb belongs to a remote node. */
2146 
grant_lock_pending(struct dlm_rsb * r,struct dlm_lkb * lkb)2147 static void grant_lock_pending(struct dlm_rsb *r, struct dlm_lkb *lkb)
2148 {
2149 	grant_lock(r, lkb);
2150 	if (is_master_copy(lkb))
2151 		send_grant(r, lkb);
2152 	else
2153 		queue_cast(r, lkb, 0);
2154 }
2155 
2156 /* The special CONVDEADLK, ALTPR and ALTCW flags allow the master to
2157    change the granted/requested modes.  We're munging things accordingly in
2158    the process copy.
2159    CONVDEADLK: our grmode may have been forced down to NL to resolve a
2160    conversion deadlock
2161    ALTPR/ALTCW: our rqmode may have been changed to PR or CW to become
2162    compatible with other granted locks */
2163 
munge_demoted(struct dlm_lkb * lkb)2164 static void munge_demoted(struct dlm_lkb *lkb)
2165 {
2166 	if (lkb->lkb_rqmode == DLM_LOCK_IV || lkb->lkb_grmode == DLM_LOCK_IV) {
2167 		log_print("munge_demoted %x invalid modes gr %d rq %d",
2168 			  lkb->lkb_id, lkb->lkb_grmode, lkb->lkb_rqmode);
2169 		return;
2170 	}
2171 
2172 	lkb->lkb_grmode = DLM_LOCK_NL;
2173 }
2174 
munge_altmode(struct dlm_lkb * lkb,struct dlm_message * ms)2175 static void munge_altmode(struct dlm_lkb *lkb, struct dlm_message *ms)
2176 {
2177 	if (ms->m_type != DLM_MSG_REQUEST_REPLY &&
2178 	    ms->m_type != DLM_MSG_GRANT) {
2179 		log_print("munge_altmode %x invalid reply type %d",
2180 			  lkb->lkb_id, ms->m_type);
2181 		return;
2182 	}
2183 
2184 	if (lkb->lkb_exflags & DLM_LKF_ALTPR)
2185 		lkb->lkb_rqmode = DLM_LOCK_PR;
2186 	else if (lkb->lkb_exflags & DLM_LKF_ALTCW)
2187 		lkb->lkb_rqmode = DLM_LOCK_CW;
2188 	else {
2189 		log_print("munge_altmode invalid exflags %x", lkb->lkb_exflags);
2190 		dlm_print_lkb(lkb);
2191 	}
2192 }
2193 
first_in_list(struct dlm_lkb * lkb,struct list_head * head)2194 static inline int first_in_list(struct dlm_lkb *lkb, struct list_head *head)
2195 {
2196 	struct dlm_lkb *first = list_entry(head->next, struct dlm_lkb,
2197 					   lkb_statequeue);
2198 	if (lkb->lkb_id == first->lkb_id)
2199 		return 1;
2200 
2201 	return 0;
2202 }
2203 
2204 /* Check if the given lkb conflicts with another lkb on the queue. */
2205 
queue_conflict(struct list_head * head,struct dlm_lkb * lkb)2206 static int queue_conflict(struct list_head *head, struct dlm_lkb *lkb)
2207 {
2208 	struct dlm_lkb *this;
2209 
2210 	list_for_each_entry(this, head, lkb_statequeue) {
2211 		if (this == lkb)
2212 			continue;
2213 		if (!modes_compat(this, lkb))
2214 			return 1;
2215 	}
2216 	return 0;
2217 }
2218 
2219 /*
2220  * "A conversion deadlock arises with a pair of lock requests in the converting
2221  * queue for one resource.  The granted mode of each lock blocks the requested
2222  * mode of the other lock."
2223  *
2224  * Part 2: if the granted mode of lkb is preventing an earlier lkb in the
2225  * convert queue from being granted, then deadlk/demote lkb.
2226  *
2227  * Example:
2228  * Granted Queue: empty
2229  * Convert Queue: NL->EX (first lock)
2230  *                PR->EX (second lock)
2231  *
2232  * The first lock can't be granted because of the granted mode of the second
2233  * lock and the second lock can't be granted because it's not first in the
2234  * list.  We either cancel lkb's conversion (PR->EX) and return EDEADLK, or we
2235  * demote the granted mode of lkb (from PR to NL) if it has the CONVDEADLK
2236  * flag set and return DEMOTED in the lksb flags.
2237  *
2238  * Originally, this function detected conv-deadlk in a more limited scope:
2239  * - if !modes_compat(lkb1, lkb2) && !modes_compat(lkb2, lkb1), or
2240  * - if lkb1 was the first entry in the queue (not just earlier), and was
2241  *   blocked by the granted mode of lkb2, and there was nothing on the
2242  *   granted queue preventing lkb1 from being granted immediately, i.e.
2243  *   lkb2 was the only thing preventing lkb1 from being granted.
2244  *
2245  * That second condition meant we'd only say there was conv-deadlk if
2246  * resolving it (by demotion) would lead to the first lock on the convert
2247  * queue being granted right away.  It allowed conversion deadlocks to exist
2248  * between locks on the convert queue while they couldn't be granted anyway.
2249  *
2250  * Now, we detect and take action on conversion deadlocks immediately when
2251  * they're created, even if they may not be immediately consequential.  If
2252  * lkb1 exists anywhere in the convert queue and lkb2 comes in with a granted
2253  * mode that would prevent lkb1's conversion from being granted, we do a
2254  * deadlk/demote on lkb2 right away and don't let it onto the convert queue.
2255  * I think this means that the lkb_is_ahead condition below should always
2256  * be zero, i.e. there will never be conv-deadlk between two locks that are
2257  * both already on the convert queue.
2258  */
2259 
conversion_deadlock_detect(struct dlm_rsb * r,struct dlm_lkb * lkb2)2260 static int conversion_deadlock_detect(struct dlm_rsb *r, struct dlm_lkb *lkb2)
2261 {
2262 	struct dlm_lkb *lkb1;
2263 	int lkb_is_ahead = 0;
2264 
2265 	list_for_each_entry(lkb1, &r->res_convertqueue, lkb_statequeue) {
2266 		if (lkb1 == lkb2) {
2267 			lkb_is_ahead = 1;
2268 			continue;
2269 		}
2270 
2271 		if (!lkb_is_ahead) {
2272 			if (!modes_compat(lkb2, lkb1))
2273 				return 1;
2274 		} else {
2275 			if (!modes_compat(lkb2, lkb1) &&
2276 			    !modes_compat(lkb1, lkb2))
2277 				return 1;
2278 		}
2279 	}
2280 	return 0;
2281 }
2282 
2283 /*
2284  * Return 1 if the lock can be granted, 0 otherwise.
2285  * Also detect and resolve conversion deadlocks.
2286  *
2287  * lkb is the lock to be granted
2288  *
2289  * now is 1 if the function is being called in the context of the
2290  * immediate request, it is 0 if called later, after the lock has been
2291  * queued.
2292  *
2293  * recover is 1 if dlm_recover_grant() is trying to grant conversions
2294  * after recovery.
2295  *
2296  * References are from chapter 6 of "VAXcluster Principles" by Roy Davis
2297  */
2298 
_can_be_granted(struct dlm_rsb * r,struct dlm_lkb * lkb,int now,int recover)2299 static int _can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now,
2300 			   int recover)
2301 {
2302 	int8_t conv = (lkb->lkb_grmode != DLM_LOCK_IV);
2303 
2304 	/*
2305 	 * 6-10: Version 5.4 introduced an option to address the phenomenon of
2306 	 * a new request for a NL mode lock being blocked.
2307 	 *
2308 	 * 6-11: If the optional EXPEDITE flag is used with the new NL mode
2309 	 * request, then it would be granted.  In essence, the use of this flag
2310 	 * tells the Lock Manager to expedite theis request by not considering
2311 	 * what may be in the CONVERTING or WAITING queues...  As of this
2312 	 * writing, the EXPEDITE flag can be used only with new requests for NL
2313 	 * mode locks.  This flag is not valid for conversion requests.
2314 	 *
2315 	 * A shortcut.  Earlier checks return an error if EXPEDITE is used in a
2316 	 * conversion or used with a non-NL requested mode.  We also know an
2317 	 * EXPEDITE request is always granted immediately, so now must always
2318 	 * be 1.  The full condition to grant an expedite request: (now &&
2319 	 * !conv && lkb->rqmode == DLM_LOCK_NL && (flags & EXPEDITE)) can
2320 	 * therefore be shortened to just checking the flag.
2321 	 */
2322 
2323 	if (lkb->lkb_exflags & DLM_LKF_EXPEDITE)
2324 		return 1;
2325 
2326 	/*
2327 	 * A shortcut. Without this, !queue_conflict(grantqueue, lkb) would be
2328 	 * added to the remaining conditions.
2329 	 */
2330 
2331 	if (queue_conflict(&r->res_grantqueue, lkb))
2332 		return 0;
2333 
2334 	/*
2335 	 * 6-3: By default, a conversion request is immediately granted if the
2336 	 * requested mode is compatible with the modes of all other granted
2337 	 * locks
2338 	 */
2339 
2340 	if (queue_conflict(&r->res_convertqueue, lkb))
2341 		return 0;
2342 
2343 	/*
2344 	 * The RECOVER_GRANT flag means dlm_recover_grant() is granting
2345 	 * locks for a recovered rsb, on which lkb's have been rebuilt.
2346 	 * The lkb's may have been rebuilt on the queues in a different
2347 	 * order than they were in on the previous master.  So, granting
2348 	 * queued conversions in order after recovery doesn't make sense
2349 	 * since the order hasn't been preserved anyway.  The new order
2350 	 * could also have created a new "in place" conversion deadlock.
2351 	 * (e.g. old, failed master held granted EX, with PR->EX, NL->EX.
2352 	 * After recovery, there would be no granted locks, and possibly
2353 	 * NL->EX, PR->EX, an in-place conversion deadlock.)  So, after
2354 	 * recovery, grant conversions without considering order.
2355 	 */
2356 
2357 	if (conv && recover)
2358 		return 1;
2359 
2360 	/*
2361 	 * 6-5: But the default algorithm for deciding whether to grant or
2362 	 * queue conversion requests does not by itself guarantee that such
2363 	 * requests are serviced on a "first come first serve" basis.  This, in
2364 	 * turn, can lead to a phenomenon known as "indefinate postponement".
2365 	 *
2366 	 * 6-7: This issue is dealt with by using the optional QUECVT flag with
2367 	 * the system service employed to request a lock conversion.  This flag
2368 	 * forces certain conversion requests to be queued, even if they are
2369 	 * compatible with the granted modes of other locks on the same
2370 	 * resource.  Thus, the use of this flag results in conversion requests
2371 	 * being ordered on a "first come first servce" basis.
2372 	 *
2373 	 * DCT: This condition is all about new conversions being able to occur
2374 	 * "in place" while the lock remains on the granted queue (assuming
2375 	 * nothing else conflicts.)  IOW if QUECVT isn't set, a conversion
2376 	 * doesn't _have_ to go onto the convert queue where it's processed in
2377 	 * order.  The "now" variable is necessary to distinguish converts
2378 	 * being received and processed for the first time now, because once a
2379 	 * convert is moved to the conversion queue the condition below applies
2380 	 * requiring fifo granting.
2381 	 */
2382 
2383 	if (now && conv && !(lkb->lkb_exflags & DLM_LKF_QUECVT))
2384 		return 1;
2385 
2386 	/*
2387 	 * Even if the convert is compat with all granted locks,
2388 	 * QUECVT forces it behind other locks on the convert queue.
2389 	 */
2390 
2391 	if (now && conv && (lkb->lkb_exflags & DLM_LKF_QUECVT)) {
2392 		if (list_empty(&r->res_convertqueue))
2393 			return 1;
2394 		else
2395 			return 0;
2396 	}
2397 
2398 	/*
2399 	 * The NOORDER flag is set to avoid the standard vms rules on grant
2400 	 * order.
2401 	 */
2402 
2403 	if (lkb->lkb_exflags & DLM_LKF_NOORDER)
2404 		return 1;
2405 
2406 	/*
2407 	 * 6-3: Once in that queue [CONVERTING], a conversion request cannot be
2408 	 * granted until all other conversion requests ahead of it are granted
2409 	 * and/or canceled.
2410 	 */
2411 
2412 	if (!now && conv && first_in_list(lkb, &r->res_convertqueue))
2413 		return 1;
2414 
2415 	/*
2416 	 * 6-4: By default, a new request is immediately granted only if all
2417 	 * three of the following conditions are satisfied when the request is
2418 	 * issued:
2419 	 * - The queue of ungranted conversion requests for the resource is
2420 	 *   empty.
2421 	 * - The queue of ungranted new requests for the resource is empty.
2422 	 * - The mode of the new request is compatible with the most
2423 	 *   restrictive mode of all granted locks on the resource.
2424 	 */
2425 
2426 	if (now && !conv && list_empty(&r->res_convertqueue) &&
2427 	    list_empty(&r->res_waitqueue))
2428 		return 1;
2429 
2430 	/*
2431 	 * 6-4: Once a lock request is in the queue of ungranted new requests,
2432 	 * it cannot be granted until the queue of ungranted conversion
2433 	 * requests is empty, all ungranted new requests ahead of it are
2434 	 * granted and/or canceled, and it is compatible with the granted mode
2435 	 * of the most restrictive lock granted on the resource.
2436 	 */
2437 
2438 	if (!now && !conv && list_empty(&r->res_convertqueue) &&
2439 	    first_in_list(lkb, &r->res_waitqueue))
2440 		return 1;
2441 
2442 	return 0;
2443 }
2444 
can_be_granted(struct dlm_rsb * r,struct dlm_lkb * lkb,int now,int recover,int * err)2445 static int can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now,
2446 			  int recover, int *err)
2447 {
2448 	int rv;
2449 	int8_t alt = 0, rqmode = lkb->lkb_rqmode;
2450 	int8_t is_convert = (lkb->lkb_grmode != DLM_LOCK_IV);
2451 
2452 	if (err)
2453 		*err = 0;
2454 
2455 	rv = _can_be_granted(r, lkb, now, recover);
2456 	if (rv)
2457 		goto out;
2458 
2459 	/*
2460 	 * The CONVDEADLK flag is non-standard and tells the dlm to resolve
2461 	 * conversion deadlocks by demoting grmode to NL, otherwise the dlm
2462 	 * cancels one of the locks.
2463 	 */
2464 
2465 	if (is_convert && can_be_queued(lkb) &&
2466 	    conversion_deadlock_detect(r, lkb)) {
2467 		if (lkb->lkb_exflags & DLM_LKF_CONVDEADLK) {
2468 			lkb->lkb_grmode = DLM_LOCK_NL;
2469 			lkb->lkb_sbflags |= DLM_SBF_DEMOTED;
2470 		} else if (!(lkb->lkb_exflags & DLM_LKF_NODLCKWT)) {
2471 			if (err)
2472 				*err = -EDEADLK;
2473 			else {
2474 				log_print("can_be_granted deadlock %x now %d",
2475 					  lkb->lkb_id, now);
2476 				dlm_dump_rsb(r);
2477 			}
2478 		}
2479 		goto out;
2480 	}
2481 
2482 	/*
2483 	 * The ALTPR and ALTCW flags are non-standard and tell the dlm to try
2484 	 * to grant a request in a mode other than the normal rqmode.  It's a
2485 	 * simple way to provide a big optimization to applications that can
2486 	 * use them.
2487 	 */
2488 
2489 	if (rqmode != DLM_LOCK_PR && (lkb->lkb_exflags & DLM_LKF_ALTPR))
2490 		alt = DLM_LOCK_PR;
2491 	else if (rqmode != DLM_LOCK_CW && (lkb->lkb_exflags & DLM_LKF_ALTCW))
2492 		alt = DLM_LOCK_CW;
2493 
2494 	if (alt) {
2495 		lkb->lkb_rqmode = alt;
2496 		rv = _can_be_granted(r, lkb, now, 0);
2497 		if (rv)
2498 			lkb->lkb_sbflags |= DLM_SBF_ALTMODE;
2499 		else
2500 			lkb->lkb_rqmode = rqmode;
2501 	}
2502  out:
2503 	return rv;
2504 }
2505 
2506 /* FIXME: I don't think that can_be_granted() can/will demote or find deadlock
2507    for locks pending on the convert list.  Once verified (watch for these
2508    log_prints), we should be able to just call _can_be_granted() and not
2509    bother with the demote/deadlk cases here (and there's no easy way to deal
2510    with a deadlk here, we'd have to generate something like grant_lock with
2511    the deadlk error.) */
2512 
2513 /* Returns the highest requested mode of all blocked conversions; sets
2514    cw if there's a blocked conversion to DLM_LOCK_CW. */
2515 
grant_pending_convert(struct dlm_rsb * r,int high,int * cw,unsigned int * count)2516 static int grant_pending_convert(struct dlm_rsb *r, int high, int *cw,
2517 				 unsigned int *count)
2518 {
2519 	struct dlm_lkb *lkb, *s;
2520 	int recover = rsb_flag(r, RSB_RECOVER_GRANT);
2521 	int hi, demoted, quit, grant_restart, demote_restart;
2522 	int deadlk;
2523 
2524 	quit = 0;
2525  restart:
2526 	grant_restart = 0;
2527 	demote_restart = 0;
2528 	hi = DLM_LOCK_IV;
2529 
2530 	list_for_each_entry_safe(lkb, s, &r->res_convertqueue, lkb_statequeue) {
2531 		demoted = is_demoted(lkb);
2532 		deadlk = 0;
2533 
2534 		if (can_be_granted(r, lkb, 0, recover, &deadlk)) {
2535 			grant_lock_pending(r, lkb);
2536 			grant_restart = 1;
2537 			if (count)
2538 				(*count)++;
2539 			continue;
2540 		}
2541 
2542 		if (!demoted && is_demoted(lkb)) {
2543 			log_print("WARN: pending demoted %x node %d %s",
2544 				  lkb->lkb_id, lkb->lkb_nodeid, r->res_name);
2545 			demote_restart = 1;
2546 			continue;
2547 		}
2548 
2549 		if (deadlk) {
2550 			log_print("WARN: pending deadlock %x node %d %s",
2551 				  lkb->lkb_id, lkb->lkb_nodeid, r->res_name);
2552 			dlm_dump_rsb(r);
2553 			continue;
2554 		}
2555 
2556 		hi = max_t(int, lkb->lkb_rqmode, hi);
2557 
2558 		if (cw && lkb->lkb_rqmode == DLM_LOCK_CW)
2559 			*cw = 1;
2560 	}
2561 
2562 	if (grant_restart)
2563 		goto restart;
2564 	if (demote_restart && !quit) {
2565 		quit = 1;
2566 		goto restart;
2567 	}
2568 
2569 	return max_t(int, high, hi);
2570 }
2571 
grant_pending_wait(struct dlm_rsb * r,int high,int * cw,unsigned int * count)2572 static int grant_pending_wait(struct dlm_rsb *r, int high, int *cw,
2573 			      unsigned int *count)
2574 {
2575 	struct dlm_lkb *lkb, *s;
2576 
2577 	list_for_each_entry_safe(lkb, s, &r->res_waitqueue, lkb_statequeue) {
2578 		if (can_be_granted(r, lkb, 0, 0, NULL)) {
2579 			grant_lock_pending(r, lkb);
2580 			if (count)
2581 				(*count)++;
2582 		} else {
2583 			high = max_t(int, lkb->lkb_rqmode, high);
2584 			if (lkb->lkb_rqmode == DLM_LOCK_CW)
2585 				*cw = 1;
2586 		}
2587 	}
2588 
2589 	return high;
2590 }
2591 
2592 /* cw of 1 means there's a lock with a rqmode of DLM_LOCK_CW that's blocked
2593    on either the convert or waiting queue.
2594    high is the largest rqmode of all locks blocked on the convert or
2595    waiting queue. */
2596 
lock_requires_bast(struct dlm_lkb * gr,int high,int cw)2597 static int lock_requires_bast(struct dlm_lkb *gr, int high, int cw)
2598 {
2599 	if (gr->lkb_grmode == DLM_LOCK_PR && cw) {
2600 		if (gr->lkb_highbast < DLM_LOCK_EX)
2601 			return 1;
2602 		return 0;
2603 	}
2604 
2605 	if (gr->lkb_highbast < high &&
2606 	    !__dlm_compat_matrix[gr->lkb_grmode+1][high+1])
2607 		return 1;
2608 	return 0;
2609 }
2610 
grant_pending_locks(struct dlm_rsb * r,unsigned int * count)2611 static void grant_pending_locks(struct dlm_rsb *r, unsigned int *count)
2612 {
2613 	struct dlm_lkb *lkb, *s;
2614 	int high = DLM_LOCK_IV;
2615 	int cw = 0;
2616 
2617 	if (!is_master(r)) {
2618 		log_print("grant_pending_locks r nodeid %d", r->res_nodeid);
2619 		dlm_dump_rsb(r);
2620 		return;
2621 	}
2622 
2623 	high = grant_pending_convert(r, high, &cw, count);
2624 	high = grant_pending_wait(r, high, &cw, count);
2625 
2626 	if (high == DLM_LOCK_IV)
2627 		return;
2628 
2629 	/*
2630 	 * If there are locks left on the wait/convert queue then send blocking
2631 	 * ASTs to granted locks based on the largest requested mode (high)
2632 	 * found above.
2633 	 */
2634 
2635 	list_for_each_entry_safe(lkb, s, &r->res_grantqueue, lkb_statequeue) {
2636 		if (lkb->lkb_bastfn && lock_requires_bast(lkb, high, cw)) {
2637 			if (cw && high == DLM_LOCK_PR &&
2638 			    lkb->lkb_grmode == DLM_LOCK_PR)
2639 				queue_bast(r, lkb, DLM_LOCK_CW);
2640 			else
2641 				queue_bast(r, lkb, high);
2642 			lkb->lkb_highbast = high;
2643 		}
2644 	}
2645 }
2646 
modes_require_bast(struct dlm_lkb * gr,struct dlm_lkb * rq)2647 static int modes_require_bast(struct dlm_lkb *gr, struct dlm_lkb *rq)
2648 {
2649 	if ((gr->lkb_grmode == DLM_LOCK_PR && rq->lkb_rqmode == DLM_LOCK_CW) ||
2650 	    (gr->lkb_grmode == DLM_LOCK_CW && rq->lkb_rqmode == DLM_LOCK_PR)) {
2651 		if (gr->lkb_highbast < DLM_LOCK_EX)
2652 			return 1;
2653 		return 0;
2654 	}
2655 
2656 	if (gr->lkb_highbast < rq->lkb_rqmode && !modes_compat(gr, rq))
2657 		return 1;
2658 	return 0;
2659 }
2660 
send_bast_queue(struct dlm_rsb * r,struct list_head * head,struct dlm_lkb * lkb)2661 static void send_bast_queue(struct dlm_rsb *r, struct list_head *head,
2662 			    struct dlm_lkb *lkb)
2663 {
2664 	struct dlm_lkb *gr;
2665 
2666 	list_for_each_entry(gr, head, lkb_statequeue) {
2667 		/* skip self when sending basts to convertqueue */
2668 		if (gr == lkb)
2669 			continue;
2670 		if (gr->lkb_bastfn && modes_require_bast(gr, lkb)) {
2671 			queue_bast(r, gr, lkb->lkb_rqmode);
2672 			gr->lkb_highbast = lkb->lkb_rqmode;
2673 		}
2674 	}
2675 }
2676 
send_blocking_asts(struct dlm_rsb * r,struct dlm_lkb * lkb)2677 static void send_blocking_asts(struct dlm_rsb *r, struct dlm_lkb *lkb)
2678 {
2679 	send_bast_queue(r, &r->res_grantqueue, lkb);
2680 }
2681 
send_blocking_asts_all(struct dlm_rsb * r,struct dlm_lkb * lkb)2682 static void send_blocking_asts_all(struct dlm_rsb *r, struct dlm_lkb *lkb)
2683 {
2684 	send_bast_queue(r, &r->res_grantqueue, lkb);
2685 	send_bast_queue(r, &r->res_convertqueue, lkb);
2686 }
2687 
2688 /* set_master(r, lkb) -- set the master nodeid of a resource
2689 
2690    The purpose of this function is to set the nodeid field in the given
2691    lkb using the nodeid field in the given rsb.  If the rsb's nodeid is
2692    known, it can just be copied to the lkb and the function will return
2693    0.  If the rsb's nodeid is _not_ known, it needs to be looked up
2694    before it can be copied to the lkb.
2695 
2696    When the rsb nodeid is being looked up remotely, the initial lkb
2697    causing the lookup is kept on the ls_waiters list waiting for the
2698    lookup reply.  Other lkb's waiting for the same rsb lookup are kept
2699    on the rsb's res_lookup list until the master is verified.
2700 
2701    Return values:
2702    0: nodeid is set in rsb/lkb and the caller should go ahead and use it
2703    1: the rsb master is not available and the lkb has been placed on
2704       a wait queue
2705 */
2706 
set_master(struct dlm_rsb * r,struct dlm_lkb * lkb)2707 static int set_master(struct dlm_rsb *r, struct dlm_lkb *lkb)
2708 {
2709 	int our_nodeid = dlm_our_nodeid();
2710 
2711 	if (rsb_flag(r, RSB_MASTER_UNCERTAIN)) {
2712 		rsb_clear_flag(r, RSB_MASTER_UNCERTAIN);
2713 		r->res_first_lkid = lkb->lkb_id;
2714 		lkb->lkb_nodeid = r->res_nodeid;
2715 		return 0;
2716 	}
2717 
2718 	if (r->res_first_lkid && r->res_first_lkid != lkb->lkb_id) {
2719 		list_add_tail(&lkb->lkb_rsb_lookup, &r->res_lookup);
2720 		return 1;
2721 	}
2722 
2723 	if (r->res_master_nodeid == our_nodeid) {
2724 		lkb->lkb_nodeid = 0;
2725 		return 0;
2726 	}
2727 
2728 	if (r->res_master_nodeid) {
2729 		lkb->lkb_nodeid = r->res_master_nodeid;
2730 		return 0;
2731 	}
2732 
2733 	if (dlm_dir_nodeid(r) == our_nodeid) {
2734 		/* This is a somewhat unusual case; find_rsb will usually
2735 		   have set res_master_nodeid when dir nodeid is local, but
2736 		   there are cases where we become the dir node after we've
2737 		   past find_rsb and go through _request_lock again.
2738 		   confirm_master() or process_lookup_list() needs to be
2739 		   called after this. */
2740 		log_debug(r->res_ls, "set_master %x self master %d dir %d %s",
2741 			  lkb->lkb_id, r->res_master_nodeid, r->res_dir_nodeid,
2742 			  r->res_name);
2743 		r->res_master_nodeid = our_nodeid;
2744 		r->res_nodeid = 0;
2745 		lkb->lkb_nodeid = 0;
2746 		return 0;
2747 	}
2748 
2749 	wait_pending_remove(r);
2750 
2751 	r->res_first_lkid = lkb->lkb_id;
2752 	send_lookup(r, lkb);
2753 	return 1;
2754 }
2755 
process_lookup_list(struct dlm_rsb * r)2756 static void process_lookup_list(struct dlm_rsb *r)
2757 {
2758 	struct dlm_lkb *lkb, *safe;
2759 
2760 	list_for_each_entry_safe(lkb, safe, &r->res_lookup, lkb_rsb_lookup) {
2761 		list_del_init(&lkb->lkb_rsb_lookup);
2762 		_request_lock(r, lkb);
2763 		schedule();
2764 	}
2765 }
2766 
2767 /* confirm_master -- confirm (or deny) an rsb's master nodeid */
2768 
confirm_master(struct dlm_rsb * r,int error)2769 static void confirm_master(struct dlm_rsb *r, int error)
2770 {
2771 	struct dlm_lkb *lkb;
2772 
2773 	if (!r->res_first_lkid)
2774 		return;
2775 
2776 	switch (error) {
2777 	case 0:
2778 	case -EINPROGRESS:
2779 		r->res_first_lkid = 0;
2780 		process_lookup_list(r);
2781 		break;
2782 
2783 	case -EAGAIN:
2784 	case -EBADR:
2785 	case -ENOTBLK:
2786 		/* the remote request failed and won't be retried (it was
2787 		   a NOQUEUE, or has been canceled/unlocked); make a waiting
2788 		   lkb the first_lkid */
2789 
2790 		r->res_first_lkid = 0;
2791 
2792 		if (!list_empty(&r->res_lookup)) {
2793 			lkb = list_entry(r->res_lookup.next, struct dlm_lkb,
2794 					 lkb_rsb_lookup);
2795 			list_del_init(&lkb->lkb_rsb_lookup);
2796 			r->res_first_lkid = lkb->lkb_id;
2797 			_request_lock(r, lkb);
2798 		}
2799 		break;
2800 
2801 	default:
2802 		log_error(r->res_ls, "confirm_master unknown error %d", error);
2803 	}
2804 }
2805 
set_lock_args(int mode,struct dlm_lksb * lksb,uint32_t flags,int namelen,unsigned long timeout_cs,void (* ast)(void * astparam),void * astparam,void (* bast)(void * astparam,int mode),struct dlm_args * args)2806 static int set_lock_args(int mode, struct dlm_lksb *lksb, uint32_t flags,
2807 			 int namelen, unsigned long timeout_cs,
2808 			 void (*ast) (void *astparam),
2809 			 void *astparam,
2810 			 void (*bast) (void *astparam, int mode),
2811 			 struct dlm_args *args)
2812 {
2813 	int rv = -EINVAL;
2814 
2815 	/* check for invalid arg usage */
2816 
2817 	if (mode < 0 || mode > DLM_LOCK_EX)
2818 		goto out;
2819 
2820 	if (!(flags & DLM_LKF_CONVERT) && (namelen > DLM_RESNAME_MAXLEN))
2821 		goto out;
2822 
2823 	if (flags & DLM_LKF_CANCEL)
2824 		goto out;
2825 
2826 	if (flags & DLM_LKF_QUECVT && !(flags & DLM_LKF_CONVERT))
2827 		goto out;
2828 
2829 	if (flags & DLM_LKF_CONVDEADLK && !(flags & DLM_LKF_CONVERT))
2830 		goto out;
2831 
2832 	if (flags & DLM_LKF_CONVDEADLK && flags & DLM_LKF_NOQUEUE)
2833 		goto out;
2834 
2835 	if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_CONVERT)
2836 		goto out;
2837 
2838 	if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_QUECVT)
2839 		goto out;
2840 
2841 	if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_NOQUEUE)
2842 		goto out;
2843 
2844 	if (flags & DLM_LKF_EXPEDITE && mode != DLM_LOCK_NL)
2845 		goto out;
2846 
2847 	if (!ast || !lksb)
2848 		goto out;
2849 
2850 	if (flags & DLM_LKF_VALBLK && !lksb->sb_lvbptr)
2851 		goto out;
2852 
2853 	if (flags & DLM_LKF_CONVERT && !lksb->sb_lkid)
2854 		goto out;
2855 
2856 	/* these args will be copied to the lkb in validate_lock_args,
2857 	   it cannot be done now because when converting locks, fields in
2858 	   an active lkb cannot be modified before locking the rsb */
2859 
2860 	args->flags = flags;
2861 	args->astfn = ast;
2862 	args->astparam = astparam;
2863 	args->bastfn = bast;
2864 	args->timeout = timeout_cs;
2865 	args->mode = mode;
2866 	args->lksb = lksb;
2867 	rv = 0;
2868  out:
2869 	return rv;
2870 }
2871 
set_unlock_args(uint32_t flags,void * astarg,struct dlm_args * args)2872 static int set_unlock_args(uint32_t flags, void *astarg, struct dlm_args *args)
2873 {
2874 	if (flags & ~(DLM_LKF_CANCEL | DLM_LKF_VALBLK | DLM_LKF_IVVALBLK |
2875  		      DLM_LKF_FORCEUNLOCK))
2876 		return -EINVAL;
2877 
2878 	if (flags & DLM_LKF_CANCEL && flags & DLM_LKF_FORCEUNLOCK)
2879 		return -EINVAL;
2880 
2881 	args->flags = flags;
2882 	args->astparam = astarg;
2883 	return 0;
2884 }
2885 
validate_lock_args(struct dlm_ls * ls,struct dlm_lkb * lkb,struct dlm_args * args)2886 static int validate_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
2887 			      struct dlm_args *args)
2888 {
2889 	int rv = -EINVAL;
2890 
2891 	if (args->flags & DLM_LKF_CONVERT) {
2892 		if (lkb->lkb_flags & DLM_IFL_MSTCPY)
2893 			goto out;
2894 
2895 		if (args->flags & DLM_LKF_QUECVT &&
2896 		    !__quecvt_compat_matrix[lkb->lkb_grmode+1][args->mode+1])
2897 			goto out;
2898 
2899 		rv = -EBUSY;
2900 		if (lkb->lkb_status != DLM_LKSTS_GRANTED)
2901 			goto out;
2902 
2903 		if (lkb->lkb_wait_type)
2904 			goto out;
2905 
2906 		if (is_overlap(lkb))
2907 			goto out;
2908 	}
2909 
2910 	lkb->lkb_exflags = args->flags;
2911 	lkb->lkb_sbflags = 0;
2912 	lkb->lkb_astfn = args->astfn;
2913 	lkb->lkb_astparam = args->astparam;
2914 	lkb->lkb_bastfn = args->bastfn;
2915 	lkb->lkb_rqmode = args->mode;
2916 	lkb->lkb_lksb = args->lksb;
2917 	lkb->lkb_lvbptr = args->lksb->sb_lvbptr;
2918 	lkb->lkb_ownpid = (int) current->pid;
2919 	lkb->lkb_timeout_cs = args->timeout;
2920 	rv = 0;
2921  out:
2922 	if (rv)
2923 		log_debug(ls, "validate_lock_args %d %x %x %x %d %d %s",
2924 			  rv, lkb->lkb_id, lkb->lkb_flags, args->flags,
2925 			  lkb->lkb_status, lkb->lkb_wait_type,
2926 			  lkb->lkb_resource->res_name);
2927 	return rv;
2928 }
2929 
2930 /* when dlm_unlock() sees -EBUSY with CANCEL/FORCEUNLOCK it returns 0
2931    for success */
2932 
2933 /* note: it's valid for lkb_nodeid/res_nodeid to be -1 when we get here
2934    because there may be a lookup in progress and it's valid to do
2935    cancel/unlockf on it */
2936 
validate_unlock_args(struct dlm_lkb * lkb,struct dlm_args * args)2937 static int validate_unlock_args(struct dlm_lkb *lkb, struct dlm_args *args)
2938 {
2939 	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
2940 	int rv = -EINVAL;
2941 
2942 	if (lkb->lkb_flags & DLM_IFL_MSTCPY) {
2943 		log_error(ls, "unlock on MSTCPY %x", lkb->lkb_id);
2944 		dlm_print_lkb(lkb);
2945 		goto out;
2946 	}
2947 
2948 	/* an lkb may still exist even though the lock is EOL'ed due to a
2949 	   cancel, unlock or failed noqueue request; an app can't use these
2950 	   locks; return same error as if the lkid had not been found at all */
2951 
2952 	if (lkb->lkb_flags & DLM_IFL_ENDOFLIFE) {
2953 		log_debug(ls, "unlock on ENDOFLIFE %x", lkb->lkb_id);
2954 		rv = -ENOENT;
2955 		goto out;
2956 	}
2957 
2958 	/* an lkb may be waiting for an rsb lookup to complete where the
2959 	   lookup was initiated by another lock */
2960 
2961 	if (!list_empty(&lkb->lkb_rsb_lookup)) {
2962 		if (args->flags & (DLM_LKF_CANCEL | DLM_LKF_FORCEUNLOCK)) {
2963 			log_debug(ls, "unlock on rsb_lookup %x", lkb->lkb_id);
2964 			list_del_init(&lkb->lkb_rsb_lookup);
2965 			queue_cast(lkb->lkb_resource, lkb,
2966 				   args->flags & DLM_LKF_CANCEL ?
2967 				   -DLM_ECANCEL : -DLM_EUNLOCK);
2968 			unhold_lkb(lkb); /* undoes create_lkb() */
2969 		}
2970 		/* caller changes -EBUSY to 0 for CANCEL and FORCEUNLOCK */
2971 		rv = -EBUSY;
2972 		goto out;
2973 	}
2974 
2975 	/* cancel not allowed with another cancel/unlock in progress */
2976 
2977 	if (args->flags & DLM_LKF_CANCEL) {
2978 		if (lkb->lkb_exflags & DLM_LKF_CANCEL)
2979 			goto out;
2980 
2981 		if (is_overlap(lkb))
2982 			goto out;
2983 
2984 		/* don't let scand try to do a cancel */
2985 		del_timeout(lkb);
2986 
2987 		if (lkb->lkb_flags & DLM_IFL_RESEND) {
2988 			lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL;
2989 			rv = -EBUSY;
2990 			goto out;
2991 		}
2992 
2993 		/* there's nothing to cancel */
2994 		if (lkb->lkb_status == DLM_LKSTS_GRANTED &&
2995 		    !lkb->lkb_wait_type) {
2996 			rv = -EBUSY;
2997 			goto out;
2998 		}
2999 
3000 		switch (lkb->lkb_wait_type) {
3001 		case DLM_MSG_LOOKUP:
3002 		case DLM_MSG_REQUEST:
3003 			lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL;
3004 			rv = -EBUSY;
3005 			goto out;
3006 		case DLM_MSG_UNLOCK:
3007 		case DLM_MSG_CANCEL:
3008 			goto out;
3009 		}
3010 		/* add_to_waiters() will set OVERLAP_CANCEL */
3011 		goto out_ok;
3012 	}
3013 
3014 	/* do we need to allow a force-unlock if there's a normal unlock
3015 	   already in progress?  in what conditions could the normal unlock
3016 	   fail such that we'd want to send a force-unlock to be sure? */
3017 
3018 	if (args->flags & DLM_LKF_FORCEUNLOCK) {
3019 		if (lkb->lkb_exflags & DLM_LKF_FORCEUNLOCK)
3020 			goto out;
3021 
3022 		if (is_overlap_unlock(lkb))
3023 			goto out;
3024 
3025 		/* don't let scand try to do a cancel */
3026 		del_timeout(lkb);
3027 
3028 		if (lkb->lkb_flags & DLM_IFL_RESEND) {
3029 			lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK;
3030 			rv = -EBUSY;
3031 			goto out;
3032 		}
3033 
3034 		switch (lkb->lkb_wait_type) {
3035 		case DLM_MSG_LOOKUP:
3036 		case DLM_MSG_REQUEST:
3037 			lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK;
3038 			rv = -EBUSY;
3039 			goto out;
3040 		case DLM_MSG_UNLOCK:
3041 			goto out;
3042 		}
3043 		/* add_to_waiters() will set OVERLAP_UNLOCK */
3044 		goto out_ok;
3045 	}
3046 
3047 	/* normal unlock not allowed if there's any op in progress */
3048 	rv = -EBUSY;
3049 	if (lkb->lkb_wait_type || lkb->lkb_wait_count)
3050 		goto out;
3051 
3052  out_ok:
3053 	/* an overlapping op shouldn't blow away exflags from other op */
3054 	lkb->lkb_exflags |= args->flags;
3055 	lkb->lkb_sbflags = 0;
3056 	lkb->lkb_astparam = args->astparam;
3057 	rv = 0;
3058  out:
3059 	if (rv)
3060 		log_debug(ls, "validate_unlock_args %d %x %x %x %x %d %s", rv,
3061 			  lkb->lkb_id, lkb->lkb_flags, lkb->lkb_exflags,
3062 			  args->flags, lkb->lkb_wait_type,
3063 			  lkb->lkb_resource->res_name);
3064 	return rv;
3065 }
3066 
3067 /*
3068  * Four stage 4 varieties:
3069  * do_request(), do_convert(), do_unlock(), do_cancel()
3070  * These are called on the master node for the given lock and
3071  * from the central locking logic.
3072  */
3073 
do_request(struct dlm_rsb * r,struct dlm_lkb * lkb)3074 static int do_request(struct dlm_rsb *r, struct dlm_lkb *lkb)
3075 {
3076 	int error = 0;
3077 
3078 	if (can_be_granted(r, lkb, 1, 0, NULL)) {
3079 		grant_lock(r, lkb);
3080 		queue_cast(r, lkb, 0);
3081 		goto out;
3082 	}
3083 
3084 	if (can_be_queued(lkb)) {
3085 		error = -EINPROGRESS;
3086 		add_lkb(r, lkb, DLM_LKSTS_WAITING);
3087 		add_timeout(lkb);
3088 		goto out;
3089 	}
3090 
3091 	error = -EAGAIN;
3092 	queue_cast(r, lkb, -EAGAIN);
3093  out:
3094 	return error;
3095 }
3096 
do_request_effects(struct dlm_rsb * r,struct dlm_lkb * lkb,int error)3097 static void do_request_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
3098 			       int error)
3099 {
3100 	switch (error) {
3101 	case -EAGAIN:
3102 		if (force_blocking_asts(lkb))
3103 			send_blocking_asts_all(r, lkb);
3104 		break;
3105 	case -EINPROGRESS:
3106 		send_blocking_asts(r, lkb);
3107 		break;
3108 	}
3109 }
3110 
do_convert(struct dlm_rsb * r,struct dlm_lkb * lkb)3111 static int do_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
3112 {
3113 	int error = 0;
3114 	int deadlk = 0;
3115 
3116 	/* changing an existing lock may allow others to be granted */
3117 
3118 	if (can_be_granted(r, lkb, 1, 0, &deadlk)) {
3119 		grant_lock(r, lkb);
3120 		queue_cast(r, lkb, 0);
3121 		goto out;
3122 	}
3123 
3124 	/* can_be_granted() detected that this lock would block in a conversion
3125 	   deadlock, so we leave it on the granted queue and return EDEADLK in
3126 	   the ast for the convert. */
3127 
3128 	if (deadlk) {
3129 		/* it's left on the granted queue */
3130 		revert_lock(r, lkb);
3131 		queue_cast(r, lkb, -EDEADLK);
3132 		error = -EDEADLK;
3133 		goto out;
3134 	}
3135 
3136 	/* is_demoted() means the can_be_granted() above set the grmode
3137 	   to NL, and left us on the granted queue.  This auto-demotion
3138 	   (due to CONVDEADLK) might mean other locks, and/or this lock, are
3139 	   now grantable.  We have to try to grant other converting locks
3140 	   before we try again to grant this one. */
3141 
3142 	if (is_demoted(lkb)) {
3143 		grant_pending_convert(r, DLM_LOCK_IV, NULL, NULL);
3144 		if (_can_be_granted(r, lkb, 1, 0)) {
3145 			grant_lock(r, lkb);
3146 			queue_cast(r, lkb, 0);
3147 			goto out;
3148 		}
3149 		/* else fall through and move to convert queue */
3150 	}
3151 
3152 	if (can_be_queued(lkb)) {
3153 		error = -EINPROGRESS;
3154 		del_lkb(r, lkb);
3155 		add_lkb(r, lkb, DLM_LKSTS_CONVERT);
3156 		add_timeout(lkb);
3157 		goto out;
3158 	}
3159 
3160 	error = -EAGAIN;
3161 	queue_cast(r, lkb, -EAGAIN);
3162  out:
3163 	return error;
3164 }
3165 
do_convert_effects(struct dlm_rsb * r,struct dlm_lkb * lkb,int error)3166 static void do_convert_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
3167 			       int error)
3168 {
3169 	switch (error) {
3170 	case 0:
3171 		grant_pending_locks(r, NULL);
3172 		/* grant_pending_locks also sends basts */
3173 		break;
3174 	case -EAGAIN:
3175 		if (force_blocking_asts(lkb))
3176 			send_blocking_asts_all(r, lkb);
3177 		break;
3178 	case -EINPROGRESS:
3179 		send_blocking_asts(r, lkb);
3180 		break;
3181 	}
3182 }
3183 
do_unlock(struct dlm_rsb * r,struct dlm_lkb * lkb)3184 static int do_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3185 {
3186 	remove_lock(r, lkb);
3187 	queue_cast(r, lkb, -DLM_EUNLOCK);
3188 	return -DLM_EUNLOCK;
3189 }
3190 
do_unlock_effects(struct dlm_rsb * r,struct dlm_lkb * lkb,int error)3191 static void do_unlock_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
3192 			      int error)
3193 {
3194 	grant_pending_locks(r, NULL);
3195 }
3196 
3197 /* returns: 0 did nothing, -DLM_ECANCEL canceled lock */
3198 
do_cancel(struct dlm_rsb * r,struct dlm_lkb * lkb)3199 static int do_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb)
3200 {
3201 	int error;
3202 
3203 	error = revert_lock(r, lkb);
3204 	if (error) {
3205 		queue_cast(r, lkb, -DLM_ECANCEL);
3206 		return -DLM_ECANCEL;
3207 	}
3208 	return 0;
3209 }
3210 
do_cancel_effects(struct dlm_rsb * r,struct dlm_lkb * lkb,int error)3211 static void do_cancel_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
3212 			      int error)
3213 {
3214 	if (error)
3215 		grant_pending_locks(r, NULL);
3216 }
3217 
3218 /*
3219  * Four stage 3 varieties:
3220  * _request_lock(), _convert_lock(), _unlock_lock(), _cancel_lock()
3221  */
3222 
3223 /* add a new lkb to a possibly new rsb, called by requesting process */
3224 
_request_lock(struct dlm_rsb * r,struct dlm_lkb * lkb)3225 static int _request_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3226 {
3227 	int error;
3228 
3229 	/* set_master: sets lkb nodeid from r */
3230 
3231 	error = set_master(r, lkb);
3232 	if (error < 0)
3233 		goto out;
3234 	if (error) {
3235 		error = 0;
3236 		goto out;
3237 	}
3238 
3239 	if (is_remote(r)) {
3240 		/* receive_request() calls do_request() on remote node */
3241 		error = send_request(r, lkb);
3242 	} else {
3243 		error = do_request(r, lkb);
3244 		/* for remote locks the request_reply is sent
3245 		   between do_request and do_request_effects */
3246 		do_request_effects(r, lkb, error);
3247 	}
3248  out:
3249 	return error;
3250 }
3251 
3252 /* change some property of an existing lkb, e.g. mode */
3253 
_convert_lock(struct dlm_rsb * r,struct dlm_lkb * lkb)3254 static int _convert_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3255 {
3256 	int error;
3257 
3258 	if (is_remote(r)) {
3259 		/* receive_convert() calls do_convert() on remote node */
3260 		error = send_convert(r, lkb);
3261 	} else {
3262 		error = do_convert(r, lkb);
3263 		/* for remote locks the convert_reply is sent
3264 		   between do_convert and do_convert_effects */
3265 		do_convert_effects(r, lkb, error);
3266 	}
3267 
3268 	return error;
3269 }
3270 
3271 /* remove an existing lkb from the granted queue */
3272 
_unlock_lock(struct dlm_rsb * r,struct dlm_lkb * lkb)3273 static int _unlock_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3274 {
3275 	int error;
3276 
3277 	if (is_remote(r)) {
3278 		/* receive_unlock() calls do_unlock() on remote node */
3279 		error = send_unlock(r, lkb);
3280 	} else {
3281 		error = do_unlock(r, lkb);
3282 		/* for remote locks the unlock_reply is sent
3283 		   between do_unlock and do_unlock_effects */
3284 		do_unlock_effects(r, lkb, error);
3285 	}
3286 
3287 	return error;
3288 }
3289 
3290 /* remove an existing lkb from the convert or wait queue */
3291 
_cancel_lock(struct dlm_rsb * r,struct dlm_lkb * lkb)3292 static int _cancel_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3293 {
3294 	int error;
3295 
3296 	if (is_remote(r)) {
3297 		/* receive_cancel() calls do_cancel() on remote node */
3298 		error = send_cancel(r, lkb);
3299 	} else {
3300 		error = do_cancel(r, lkb);
3301 		/* for remote locks the cancel_reply is sent
3302 		   between do_cancel and do_cancel_effects */
3303 		do_cancel_effects(r, lkb, error);
3304 	}
3305 
3306 	return error;
3307 }
3308 
3309 /*
3310  * Four stage 2 varieties:
3311  * request_lock(), convert_lock(), unlock_lock(), cancel_lock()
3312  */
3313 
request_lock(struct dlm_ls * ls,struct dlm_lkb * lkb,char * name,int len,struct dlm_args * args)3314 static int request_lock(struct dlm_ls *ls, struct dlm_lkb *lkb, char *name,
3315 			int len, struct dlm_args *args)
3316 {
3317 	struct dlm_rsb *r;
3318 	int error;
3319 
3320 	error = validate_lock_args(ls, lkb, args);
3321 	if (error)
3322 		return error;
3323 
3324 	error = find_rsb(ls, name, len, 0, R_REQUEST, &r);
3325 	if (error)
3326 		return error;
3327 
3328 	lock_rsb(r);
3329 
3330 	attach_lkb(r, lkb);
3331 	lkb->lkb_lksb->sb_lkid = lkb->lkb_id;
3332 
3333 	error = _request_lock(r, lkb);
3334 
3335 	unlock_rsb(r);
3336 	put_rsb(r);
3337 	return error;
3338 }
3339 
convert_lock(struct dlm_ls * ls,struct dlm_lkb * lkb,struct dlm_args * args)3340 static int convert_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
3341 			struct dlm_args *args)
3342 {
3343 	struct dlm_rsb *r;
3344 	int error;
3345 
3346 	r = lkb->lkb_resource;
3347 
3348 	hold_rsb(r);
3349 	lock_rsb(r);
3350 
3351 	error = validate_lock_args(ls, lkb, args);
3352 	if (error)
3353 		goto out;
3354 
3355 	error = _convert_lock(r, lkb);
3356  out:
3357 	unlock_rsb(r);
3358 	put_rsb(r);
3359 	return error;
3360 }
3361 
unlock_lock(struct dlm_ls * ls,struct dlm_lkb * lkb,struct dlm_args * args)3362 static int unlock_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
3363 		       struct dlm_args *args)
3364 {
3365 	struct dlm_rsb *r;
3366 	int error;
3367 
3368 	r = lkb->lkb_resource;
3369 
3370 	hold_rsb(r);
3371 	lock_rsb(r);
3372 
3373 	error = validate_unlock_args(lkb, args);
3374 	if (error)
3375 		goto out;
3376 
3377 	error = _unlock_lock(r, lkb);
3378  out:
3379 	unlock_rsb(r);
3380 	put_rsb(r);
3381 	return error;
3382 }
3383 
cancel_lock(struct dlm_ls * ls,struct dlm_lkb * lkb,struct dlm_args * args)3384 static int cancel_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
3385 		       struct dlm_args *args)
3386 {
3387 	struct dlm_rsb *r;
3388 	int error;
3389 
3390 	r = lkb->lkb_resource;
3391 
3392 	hold_rsb(r);
3393 	lock_rsb(r);
3394 
3395 	error = validate_unlock_args(lkb, args);
3396 	if (error)
3397 		goto out;
3398 
3399 	error = _cancel_lock(r, lkb);
3400  out:
3401 	unlock_rsb(r);
3402 	put_rsb(r);
3403 	return error;
3404 }
3405 
3406 /*
3407  * Two stage 1 varieties:  dlm_lock() and dlm_unlock()
3408  */
3409 
dlm_lock(dlm_lockspace_t * lockspace,int mode,struct dlm_lksb * lksb,uint32_t flags,void * name,unsigned int namelen,uint32_t parent_lkid,void (* ast)(void * astarg),void * astarg,void (* bast)(void * astarg,int mode))3410 int dlm_lock(dlm_lockspace_t *lockspace,
3411 	     int mode,
3412 	     struct dlm_lksb *lksb,
3413 	     uint32_t flags,
3414 	     void *name,
3415 	     unsigned int namelen,
3416 	     uint32_t parent_lkid,
3417 	     void (*ast) (void *astarg),
3418 	     void *astarg,
3419 	     void (*bast) (void *astarg, int mode))
3420 {
3421 	struct dlm_ls *ls;
3422 	struct dlm_lkb *lkb;
3423 	struct dlm_args args;
3424 	int error, convert = flags & DLM_LKF_CONVERT;
3425 
3426 	ls = dlm_find_lockspace_local(lockspace);
3427 	if (!ls)
3428 		return -EINVAL;
3429 
3430 	dlm_lock_recovery(ls);
3431 
3432 	if (convert)
3433 		error = find_lkb(ls, lksb->sb_lkid, &lkb);
3434 	else
3435 		error = create_lkb(ls, &lkb);
3436 
3437 	if (error)
3438 		goto out;
3439 
3440 	error = set_lock_args(mode, lksb, flags, namelen, 0, ast,
3441 			      astarg, bast, &args);
3442 	if (error)
3443 		goto out_put;
3444 
3445 	if (convert)
3446 		error = convert_lock(ls, lkb, &args);
3447 	else
3448 		error = request_lock(ls, lkb, name, namelen, &args);
3449 
3450 	if (error == -EINPROGRESS)
3451 		error = 0;
3452  out_put:
3453 	if (convert || error)
3454 		__put_lkb(ls, lkb);
3455 	if (error == -EAGAIN || error == -EDEADLK)
3456 		error = 0;
3457  out:
3458 	dlm_unlock_recovery(ls);
3459 	dlm_put_lockspace(ls);
3460 	return error;
3461 }
3462 
dlm_unlock(dlm_lockspace_t * lockspace,uint32_t lkid,uint32_t flags,struct dlm_lksb * lksb,void * astarg)3463 int dlm_unlock(dlm_lockspace_t *lockspace,
3464 	       uint32_t lkid,
3465 	       uint32_t flags,
3466 	       struct dlm_lksb *lksb,
3467 	       void *astarg)
3468 {
3469 	struct dlm_ls *ls;
3470 	struct dlm_lkb *lkb;
3471 	struct dlm_args args;
3472 	int error;
3473 
3474 	ls = dlm_find_lockspace_local(lockspace);
3475 	if (!ls)
3476 		return -EINVAL;
3477 
3478 	dlm_lock_recovery(ls);
3479 
3480 	error = find_lkb(ls, lkid, &lkb);
3481 	if (error)
3482 		goto out;
3483 
3484 	error = set_unlock_args(flags, astarg, &args);
3485 	if (error)
3486 		goto out_put;
3487 
3488 	if (flags & DLM_LKF_CANCEL)
3489 		error = cancel_lock(ls, lkb, &args);
3490 	else
3491 		error = unlock_lock(ls, lkb, &args);
3492 
3493 	if (error == -DLM_EUNLOCK || error == -DLM_ECANCEL)
3494 		error = 0;
3495 	if (error == -EBUSY && (flags & (DLM_LKF_CANCEL | DLM_LKF_FORCEUNLOCK)))
3496 		error = 0;
3497  out_put:
3498 	dlm_put_lkb(lkb);
3499  out:
3500 	dlm_unlock_recovery(ls);
3501 	dlm_put_lockspace(ls);
3502 	return error;
3503 }
3504 
3505 /*
3506  * send/receive routines for remote operations and replies
3507  *
3508  * send_args
3509  * send_common
3510  * send_request			receive_request
3511  * send_convert			receive_convert
3512  * send_unlock			receive_unlock
3513  * send_cancel			receive_cancel
3514  * send_grant			receive_grant
3515  * send_bast			receive_bast
3516  * send_lookup			receive_lookup
3517  * send_remove			receive_remove
3518  *
3519  * 				send_common_reply
3520  * receive_request_reply	send_request_reply
3521  * receive_convert_reply	send_convert_reply
3522  * receive_unlock_reply		send_unlock_reply
3523  * receive_cancel_reply		send_cancel_reply
3524  * receive_lookup_reply		send_lookup_reply
3525  */
3526 
_create_message(struct dlm_ls * ls,int mb_len,int to_nodeid,int mstype,struct dlm_message ** ms_ret,struct dlm_mhandle ** mh_ret)3527 static int _create_message(struct dlm_ls *ls, int mb_len,
3528 			   int to_nodeid, int mstype,
3529 			   struct dlm_message **ms_ret,
3530 			   struct dlm_mhandle **mh_ret)
3531 {
3532 	struct dlm_message *ms;
3533 	struct dlm_mhandle *mh;
3534 	char *mb;
3535 
3536 	/* get_buffer gives us a message handle (mh) that we need to
3537 	   pass into lowcomms_commit and a message buffer (mb) that we
3538 	   write our data into */
3539 
3540 	mh = dlm_lowcomms_get_buffer(to_nodeid, mb_len, GFP_NOFS, &mb);
3541 	if (!mh)
3542 		return -ENOBUFS;
3543 
3544 	memset(mb, 0, mb_len);
3545 
3546 	ms = (struct dlm_message *) mb;
3547 
3548 	ms->m_header.h_version = (DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
3549 	ms->m_header.h_lockspace = ls->ls_global_id;
3550 	ms->m_header.h_nodeid = dlm_our_nodeid();
3551 	ms->m_header.h_length = mb_len;
3552 	ms->m_header.h_cmd = DLM_MSG;
3553 
3554 	ms->m_type = mstype;
3555 
3556 	*mh_ret = mh;
3557 	*ms_ret = ms;
3558 	return 0;
3559 }
3560 
create_message(struct dlm_rsb * r,struct dlm_lkb * lkb,int to_nodeid,int mstype,struct dlm_message ** ms_ret,struct dlm_mhandle ** mh_ret)3561 static int create_message(struct dlm_rsb *r, struct dlm_lkb *lkb,
3562 			  int to_nodeid, int mstype,
3563 			  struct dlm_message **ms_ret,
3564 			  struct dlm_mhandle **mh_ret)
3565 {
3566 	int mb_len = sizeof(struct dlm_message);
3567 
3568 	switch (mstype) {
3569 	case DLM_MSG_REQUEST:
3570 	case DLM_MSG_LOOKUP:
3571 	case DLM_MSG_REMOVE:
3572 		mb_len += r->res_length;
3573 		break;
3574 	case DLM_MSG_CONVERT:
3575 	case DLM_MSG_UNLOCK:
3576 	case DLM_MSG_REQUEST_REPLY:
3577 	case DLM_MSG_CONVERT_REPLY:
3578 	case DLM_MSG_GRANT:
3579 		if (lkb && lkb->lkb_lvbptr)
3580 			mb_len += r->res_ls->ls_lvblen;
3581 		break;
3582 	}
3583 
3584 	return _create_message(r->res_ls, mb_len, to_nodeid, mstype,
3585 			       ms_ret, mh_ret);
3586 }
3587 
3588 /* further lowcomms enhancements or alternate implementations may make
3589    the return value from this function useful at some point */
3590 
send_message(struct dlm_mhandle * mh,struct dlm_message * ms)3591 static int send_message(struct dlm_mhandle *mh, struct dlm_message *ms)
3592 {
3593 	dlm_message_out(ms);
3594 	dlm_lowcomms_commit_buffer(mh);
3595 	return 0;
3596 }
3597 
send_args(struct dlm_rsb * r,struct dlm_lkb * lkb,struct dlm_message * ms)3598 static void send_args(struct dlm_rsb *r, struct dlm_lkb *lkb,
3599 		      struct dlm_message *ms)
3600 {
3601 	ms->m_nodeid   = lkb->lkb_nodeid;
3602 	ms->m_pid      = lkb->lkb_ownpid;
3603 	ms->m_lkid     = lkb->lkb_id;
3604 	ms->m_remid    = lkb->lkb_remid;
3605 	ms->m_exflags  = lkb->lkb_exflags;
3606 	ms->m_sbflags  = lkb->lkb_sbflags;
3607 	ms->m_flags    = lkb->lkb_flags;
3608 	ms->m_lvbseq   = lkb->lkb_lvbseq;
3609 	ms->m_status   = lkb->lkb_status;
3610 	ms->m_grmode   = lkb->lkb_grmode;
3611 	ms->m_rqmode   = lkb->lkb_rqmode;
3612 	ms->m_hash     = r->res_hash;
3613 
3614 	/* m_result and m_bastmode are set from function args,
3615 	   not from lkb fields */
3616 
3617 	if (lkb->lkb_bastfn)
3618 		ms->m_asts |= DLM_CB_BAST;
3619 	if (lkb->lkb_astfn)
3620 		ms->m_asts |= DLM_CB_CAST;
3621 
3622 	/* compare with switch in create_message; send_remove() doesn't
3623 	   use send_args() */
3624 
3625 	switch (ms->m_type) {
3626 	case DLM_MSG_REQUEST:
3627 	case DLM_MSG_LOOKUP:
3628 		memcpy(ms->m_extra, r->res_name, r->res_length);
3629 		break;
3630 	case DLM_MSG_CONVERT:
3631 	case DLM_MSG_UNLOCK:
3632 	case DLM_MSG_REQUEST_REPLY:
3633 	case DLM_MSG_CONVERT_REPLY:
3634 	case DLM_MSG_GRANT:
3635 		if (!lkb->lkb_lvbptr)
3636 			break;
3637 		memcpy(ms->m_extra, lkb->lkb_lvbptr, r->res_ls->ls_lvblen);
3638 		break;
3639 	}
3640 }
3641 
send_common(struct dlm_rsb * r,struct dlm_lkb * lkb,int mstype)3642 static int send_common(struct dlm_rsb *r, struct dlm_lkb *lkb, int mstype)
3643 {
3644 	struct dlm_message *ms;
3645 	struct dlm_mhandle *mh;
3646 	int to_nodeid, error;
3647 
3648 	to_nodeid = r->res_nodeid;
3649 
3650 	error = add_to_waiters(lkb, mstype, to_nodeid);
3651 	if (error)
3652 		return error;
3653 
3654 	error = create_message(r, lkb, to_nodeid, mstype, &ms, &mh);
3655 	if (error)
3656 		goto fail;
3657 
3658 	send_args(r, lkb, ms);
3659 
3660 	error = send_message(mh, ms);
3661 	if (error)
3662 		goto fail;
3663 	return 0;
3664 
3665  fail:
3666 	remove_from_waiters(lkb, msg_reply_type(mstype));
3667 	return error;
3668 }
3669 
send_request(struct dlm_rsb * r,struct dlm_lkb * lkb)3670 static int send_request(struct dlm_rsb *r, struct dlm_lkb *lkb)
3671 {
3672 	return send_common(r, lkb, DLM_MSG_REQUEST);
3673 }
3674 
send_convert(struct dlm_rsb * r,struct dlm_lkb * lkb)3675 static int send_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
3676 {
3677 	int error;
3678 
3679 	error = send_common(r, lkb, DLM_MSG_CONVERT);
3680 
3681 	/* down conversions go without a reply from the master */
3682 	if (!error && down_conversion(lkb)) {
3683 		remove_from_waiters(lkb, DLM_MSG_CONVERT_REPLY);
3684 		r->res_ls->ls_stub_ms.m_flags = DLM_IFL_STUB_MS;
3685 		r->res_ls->ls_stub_ms.m_type = DLM_MSG_CONVERT_REPLY;
3686 		r->res_ls->ls_stub_ms.m_result = 0;
3687 		__receive_convert_reply(r, lkb, &r->res_ls->ls_stub_ms);
3688 	}
3689 
3690 	return error;
3691 }
3692 
3693 /* FIXME: if this lkb is the only lock we hold on the rsb, then set
3694    MASTER_UNCERTAIN to force the next request on the rsb to confirm
3695    that the master is still correct. */
3696 
send_unlock(struct dlm_rsb * r,struct dlm_lkb * lkb)3697 static int send_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3698 {
3699 	return send_common(r, lkb, DLM_MSG_UNLOCK);
3700 }
3701 
send_cancel(struct dlm_rsb * r,struct dlm_lkb * lkb)3702 static int send_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb)
3703 {
3704 	return send_common(r, lkb, DLM_MSG_CANCEL);
3705 }
3706 
send_grant(struct dlm_rsb * r,struct dlm_lkb * lkb)3707 static int send_grant(struct dlm_rsb *r, struct dlm_lkb *lkb)
3708 {
3709 	struct dlm_message *ms;
3710 	struct dlm_mhandle *mh;
3711 	int to_nodeid, error;
3712 
3713 	to_nodeid = lkb->lkb_nodeid;
3714 
3715 	error = create_message(r, lkb, to_nodeid, DLM_MSG_GRANT, &ms, &mh);
3716 	if (error)
3717 		goto out;
3718 
3719 	send_args(r, lkb, ms);
3720 
3721 	ms->m_result = 0;
3722 
3723 	error = send_message(mh, ms);
3724  out:
3725 	return error;
3726 }
3727 
send_bast(struct dlm_rsb * r,struct dlm_lkb * lkb,int mode)3728 static int send_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int mode)
3729 {
3730 	struct dlm_message *ms;
3731 	struct dlm_mhandle *mh;
3732 	int to_nodeid, error;
3733 
3734 	to_nodeid = lkb->lkb_nodeid;
3735 
3736 	error = create_message(r, NULL, to_nodeid, DLM_MSG_BAST, &ms, &mh);
3737 	if (error)
3738 		goto out;
3739 
3740 	send_args(r, lkb, ms);
3741 
3742 	ms->m_bastmode = mode;
3743 
3744 	error = send_message(mh, ms);
3745  out:
3746 	return error;
3747 }
3748 
send_lookup(struct dlm_rsb * r,struct dlm_lkb * lkb)3749 static int send_lookup(struct dlm_rsb *r, struct dlm_lkb *lkb)
3750 {
3751 	struct dlm_message *ms;
3752 	struct dlm_mhandle *mh;
3753 	int to_nodeid, error;
3754 
3755 	to_nodeid = dlm_dir_nodeid(r);
3756 
3757 	error = add_to_waiters(lkb, DLM_MSG_LOOKUP, to_nodeid);
3758 	if (error)
3759 		return error;
3760 
3761 	error = create_message(r, NULL, to_nodeid, DLM_MSG_LOOKUP, &ms, &mh);
3762 	if (error)
3763 		goto fail;
3764 
3765 	send_args(r, lkb, ms);
3766 
3767 	error = send_message(mh, ms);
3768 	if (error)
3769 		goto fail;
3770 	return 0;
3771 
3772  fail:
3773 	remove_from_waiters(lkb, DLM_MSG_LOOKUP_REPLY);
3774 	return error;
3775 }
3776 
send_remove(struct dlm_rsb * r)3777 static int send_remove(struct dlm_rsb *r)
3778 {
3779 	struct dlm_message *ms;
3780 	struct dlm_mhandle *mh;
3781 	int to_nodeid, error;
3782 
3783 	to_nodeid = dlm_dir_nodeid(r);
3784 
3785 	error = create_message(r, NULL, to_nodeid, DLM_MSG_REMOVE, &ms, &mh);
3786 	if (error)
3787 		goto out;
3788 
3789 	memcpy(ms->m_extra, r->res_name, r->res_length);
3790 	ms->m_hash = r->res_hash;
3791 
3792 	error = send_message(mh, ms);
3793  out:
3794 	return error;
3795 }
3796 
send_common_reply(struct dlm_rsb * r,struct dlm_lkb * lkb,int mstype,int rv)3797 static int send_common_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
3798 			     int mstype, int rv)
3799 {
3800 	struct dlm_message *ms;
3801 	struct dlm_mhandle *mh;
3802 	int to_nodeid, error;
3803 
3804 	to_nodeid = lkb->lkb_nodeid;
3805 
3806 	error = create_message(r, lkb, to_nodeid, mstype, &ms, &mh);
3807 	if (error)
3808 		goto out;
3809 
3810 	send_args(r, lkb, ms);
3811 
3812 	ms->m_result = rv;
3813 
3814 	error = send_message(mh, ms);
3815  out:
3816 	return error;
3817 }
3818 
send_request_reply(struct dlm_rsb * r,struct dlm_lkb * lkb,int rv)3819 static int send_request_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
3820 {
3821 	return send_common_reply(r, lkb, DLM_MSG_REQUEST_REPLY, rv);
3822 }
3823 
send_convert_reply(struct dlm_rsb * r,struct dlm_lkb * lkb,int rv)3824 static int send_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
3825 {
3826 	return send_common_reply(r, lkb, DLM_MSG_CONVERT_REPLY, rv);
3827 }
3828 
send_unlock_reply(struct dlm_rsb * r,struct dlm_lkb * lkb,int rv)3829 static int send_unlock_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
3830 {
3831 	return send_common_reply(r, lkb, DLM_MSG_UNLOCK_REPLY, rv);
3832 }
3833 
send_cancel_reply(struct dlm_rsb * r,struct dlm_lkb * lkb,int rv)3834 static int send_cancel_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
3835 {
3836 	return send_common_reply(r, lkb, DLM_MSG_CANCEL_REPLY, rv);
3837 }
3838 
send_lookup_reply(struct dlm_ls * ls,struct dlm_message * ms_in,int ret_nodeid,int rv)3839 static int send_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms_in,
3840 			     int ret_nodeid, int rv)
3841 {
3842 	struct dlm_rsb *r = &ls->ls_stub_rsb;
3843 	struct dlm_message *ms;
3844 	struct dlm_mhandle *mh;
3845 	int error, nodeid = ms_in->m_header.h_nodeid;
3846 
3847 	error = create_message(r, NULL, nodeid, DLM_MSG_LOOKUP_REPLY, &ms, &mh);
3848 	if (error)
3849 		goto out;
3850 
3851 	ms->m_lkid = ms_in->m_lkid;
3852 	ms->m_result = rv;
3853 	ms->m_nodeid = ret_nodeid;
3854 
3855 	error = send_message(mh, ms);
3856  out:
3857 	return error;
3858 }
3859 
3860 /* which args we save from a received message depends heavily on the type
3861    of message, unlike the send side where we can safely send everything about
3862    the lkb for any type of message */
3863 
receive_flags(struct dlm_lkb * lkb,struct dlm_message * ms)3864 static void receive_flags(struct dlm_lkb *lkb, struct dlm_message *ms)
3865 {
3866 	lkb->lkb_exflags = ms->m_exflags;
3867 	lkb->lkb_sbflags = ms->m_sbflags;
3868 	lkb->lkb_flags = (lkb->lkb_flags & 0xFFFF0000) |
3869 		         (ms->m_flags & 0x0000FFFF);
3870 }
3871 
receive_flags_reply(struct dlm_lkb * lkb,struct dlm_message * ms)3872 static void receive_flags_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
3873 {
3874 	if (ms->m_flags == DLM_IFL_STUB_MS)
3875 		return;
3876 
3877 	lkb->lkb_sbflags = ms->m_sbflags;
3878 	lkb->lkb_flags = (lkb->lkb_flags & 0xFFFF0000) |
3879 		         (ms->m_flags & 0x0000FFFF);
3880 }
3881 
receive_extralen(struct dlm_message * ms)3882 static int receive_extralen(struct dlm_message *ms)
3883 {
3884 	return (ms->m_header.h_length - sizeof(struct dlm_message));
3885 }
3886 
receive_lvb(struct dlm_ls * ls,struct dlm_lkb * lkb,struct dlm_message * ms)3887 static int receive_lvb(struct dlm_ls *ls, struct dlm_lkb *lkb,
3888 		       struct dlm_message *ms)
3889 {
3890 	int len;
3891 
3892 	if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
3893 		if (!lkb->lkb_lvbptr)
3894 			lkb->lkb_lvbptr = dlm_allocate_lvb(ls);
3895 		if (!lkb->lkb_lvbptr)
3896 			return -ENOMEM;
3897 		len = receive_extralen(ms);
3898 		if (len > ls->ls_lvblen)
3899 			len = ls->ls_lvblen;
3900 		memcpy(lkb->lkb_lvbptr, ms->m_extra, len);
3901 	}
3902 	return 0;
3903 }
3904 
fake_bastfn(void * astparam,int mode)3905 static void fake_bastfn(void *astparam, int mode)
3906 {
3907 	log_print("fake_bastfn should not be called");
3908 }
3909 
fake_astfn(void * astparam)3910 static void fake_astfn(void *astparam)
3911 {
3912 	log_print("fake_astfn should not be called");
3913 }
3914 
receive_request_args(struct dlm_ls * ls,struct dlm_lkb * lkb,struct dlm_message * ms)3915 static int receive_request_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
3916 				struct dlm_message *ms)
3917 {
3918 	lkb->lkb_nodeid = ms->m_header.h_nodeid;
3919 	lkb->lkb_ownpid = ms->m_pid;
3920 	lkb->lkb_remid = ms->m_lkid;
3921 	lkb->lkb_grmode = DLM_LOCK_IV;
3922 	lkb->lkb_rqmode = ms->m_rqmode;
3923 
3924 	lkb->lkb_bastfn = (ms->m_asts & DLM_CB_BAST) ? &fake_bastfn : NULL;
3925 	lkb->lkb_astfn = (ms->m_asts & DLM_CB_CAST) ? &fake_astfn : NULL;
3926 
3927 	if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
3928 		/* lkb was just created so there won't be an lvb yet */
3929 		lkb->lkb_lvbptr = dlm_allocate_lvb(ls);
3930 		if (!lkb->lkb_lvbptr)
3931 			return -ENOMEM;
3932 	}
3933 
3934 	return 0;
3935 }
3936 
receive_convert_args(struct dlm_ls * ls,struct dlm_lkb * lkb,struct dlm_message * ms)3937 static int receive_convert_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
3938 				struct dlm_message *ms)
3939 {
3940 	if (lkb->lkb_status != DLM_LKSTS_GRANTED)
3941 		return -EBUSY;
3942 
3943 	if (receive_lvb(ls, lkb, ms))
3944 		return -ENOMEM;
3945 
3946 	lkb->lkb_rqmode = ms->m_rqmode;
3947 	lkb->lkb_lvbseq = ms->m_lvbseq;
3948 
3949 	return 0;
3950 }
3951 
receive_unlock_args(struct dlm_ls * ls,struct dlm_lkb * lkb,struct dlm_message * ms)3952 static int receive_unlock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
3953 			       struct dlm_message *ms)
3954 {
3955 	if (receive_lvb(ls, lkb, ms))
3956 		return -ENOMEM;
3957 	return 0;
3958 }
3959 
3960 /* We fill in the stub-lkb fields with the info that send_xxxx_reply()
3961    uses to send a reply and that the remote end uses to process the reply. */
3962 
setup_stub_lkb(struct dlm_ls * ls,struct dlm_message * ms)3963 static void setup_stub_lkb(struct dlm_ls *ls, struct dlm_message *ms)
3964 {
3965 	struct dlm_lkb *lkb = &ls->ls_stub_lkb;
3966 	lkb->lkb_nodeid = ms->m_header.h_nodeid;
3967 	lkb->lkb_remid = ms->m_lkid;
3968 }
3969 
3970 /* This is called after the rsb is locked so that we can safely inspect
3971    fields in the lkb. */
3972 
validate_message(struct dlm_lkb * lkb,struct dlm_message * ms)3973 static int validate_message(struct dlm_lkb *lkb, struct dlm_message *ms)
3974 {
3975 	int from = ms->m_header.h_nodeid;
3976 	int error = 0;
3977 
3978 	/* currently mixing of user/kernel locks are not supported */
3979 	if (ms->m_flags & DLM_IFL_USER && ~lkb->lkb_flags & DLM_IFL_USER) {
3980 		log_error(lkb->lkb_resource->res_ls,
3981 			  "got user dlm message for a kernel lock");
3982 		error = -EINVAL;
3983 		goto out;
3984 	}
3985 
3986 	switch (ms->m_type) {
3987 	case DLM_MSG_CONVERT:
3988 	case DLM_MSG_UNLOCK:
3989 	case DLM_MSG_CANCEL:
3990 		if (!is_master_copy(lkb) || lkb->lkb_nodeid != from)
3991 			error = -EINVAL;
3992 		break;
3993 
3994 	case DLM_MSG_CONVERT_REPLY:
3995 	case DLM_MSG_UNLOCK_REPLY:
3996 	case DLM_MSG_CANCEL_REPLY:
3997 	case DLM_MSG_GRANT:
3998 	case DLM_MSG_BAST:
3999 		if (!is_process_copy(lkb) || lkb->lkb_nodeid != from)
4000 			error = -EINVAL;
4001 		break;
4002 
4003 	case DLM_MSG_REQUEST_REPLY:
4004 		if (!is_process_copy(lkb))
4005 			error = -EINVAL;
4006 		else if (lkb->lkb_nodeid != -1 && lkb->lkb_nodeid != from)
4007 			error = -EINVAL;
4008 		break;
4009 
4010 	default:
4011 		error = -EINVAL;
4012 	}
4013 
4014 out:
4015 	if (error)
4016 		log_error(lkb->lkb_resource->res_ls,
4017 			  "ignore invalid message %d from %d %x %x %x %d",
4018 			  ms->m_type, from, lkb->lkb_id, lkb->lkb_remid,
4019 			  lkb->lkb_flags, lkb->lkb_nodeid);
4020 	return error;
4021 }
4022 
send_repeat_remove(struct dlm_ls * ls,char * ms_name,int len)4023 static void send_repeat_remove(struct dlm_ls *ls, char *ms_name, int len)
4024 {
4025 	char name[DLM_RESNAME_MAXLEN + 1];
4026 	struct dlm_message *ms;
4027 	struct dlm_mhandle *mh;
4028 	struct dlm_rsb *r;
4029 	uint32_t hash, b;
4030 	int rv, dir_nodeid;
4031 
4032 	memset(name, 0, sizeof(name));
4033 	memcpy(name, ms_name, len);
4034 
4035 	hash = jhash(name, len, 0);
4036 	b = hash & (ls->ls_rsbtbl_size - 1);
4037 
4038 	dir_nodeid = dlm_hash2nodeid(ls, hash);
4039 
4040 	log_error(ls, "send_repeat_remove dir %d %s", dir_nodeid, name);
4041 
4042 	spin_lock(&ls->ls_rsbtbl[b].lock);
4043 	rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
4044 	if (!rv) {
4045 		spin_unlock(&ls->ls_rsbtbl[b].lock);
4046 		log_error(ls, "repeat_remove on keep %s", name);
4047 		return;
4048 	}
4049 
4050 	rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
4051 	if (!rv) {
4052 		spin_unlock(&ls->ls_rsbtbl[b].lock);
4053 		log_error(ls, "repeat_remove on toss %s", name);
4054 		return;
4055 	}
4056 
4057 	/* use ls->remove_name2 to avoid conflict with shrink? */
4058 
4059 	spin_lock(&ls->ls_remove_spin);
4060 	ls->ls_remove_len = len;
4061 	memcpy(ls->ls_remove_name, name, DLM_RESNAME_MAXLEN);
4062 	spin_unlock(&ls->ls_remove_spin);
4063 	spin_unlock(&ls->ls_rsbtbl[b].lock);
4064 
4065 	rv = _create_message(ls, sizeof(struct dlm_message) + len,
4066 			     dir_nodeid, DLM_MSG_REMOVE, &ms, &mh);
4067 	if (rv)
4068 		return;
4069 
4070 	memcpy(ms->m_extra, name, len);
4071 	ms->m_hash = hash;
4072 
4073 	send_message(mh, ms);
4074 
4075 	spin_lock(&ls->ls_remove_spin);
4076 	ls->ls_remove_len = 0;
4077 	memset(ls->ls_remove_name, 0, DLM_RESNAME_MAXLEN);
4078 	spin_unlock(&ls->ls_remove_spin);
4079 }
4080 
receive_request(struct dlm_ls * ls,struct dlm_message * ms)4081 static int receive_request(struct dlm_ls *ls, struct dlm_message *ms)
4082 {
4083 	struct dlm_lkb *lkb;
4084 	struct dlm_rsb *r;
4085 	int from_nodeid;
4086 	int error, namelen = 0;
4087 
4088 	from_nodeid = ms->m_header.h_nodeid;
4089 
4090 	error = create_lkb(ls, &lkb);
4091 	if (error)
4092 		goto fail;
4093 
4094 	receive_flags(lkb, ms);
4095 	lkb->lkb_flags |= DLM_IFL_MSTCPY;
4096 	error = receive_request_args(ls, lkb, ms);
4097 	if (error) {
4098 		__put_lkb(ls, lkb);
4099 		goto fail;
4100 	}
4101 
4102 	/* The dir node is the authority on whether we are the master
4103 	   for this rsb or not, so if the master sends us a request, we should
4104 	   recreate the rsb if we've destroyed it.   This race happens when we
4105 	   send a remove message to the dir node at the same time that the dir
4106 	   node sends us a request for the rsb. */
4107 
4108 	namelen = receive_extralen(ms);
4109 
4110 	error = find_rsb(ls, ms->m_extra, namelen, from_nodeid,
4111 			 R_RECEIVE_REQUEST, &r);
4112 	if (error) {
4113 		__put_lkb(ls, lkb);
4114 		goto fail;
4115 	}
4116 
4117 	lock_rsb(r);
4118 
4119 	if (r->res_master_nodeid != dlm_our_nodeid()) {
4120 		error = validate_master_nodeid(ls, r, from_nodeid);
4121 		if (error) {
4122 			unlock_rsb(r);
4123 			put_rsb(r);
4124 			__put_lkb(ls, lkb);
4125 			goto fail;
4126 		}
4127 	}
4128 
4129 	attach_lkb(r, lkb);
4130 	error = do_request(r, lkb);
4131 	send_request_reply(r, lkb, error);
4132 	do_request_effects(r, lkb, error);
4133 
4134 	unlock_rsb(r);
4135 	put_rsb(r);
4136 
4137 	if (error == -EINPROGRESS)
4138 		error = 0;
4139 	if (error)
4140 		dlm_put_lkb(lkb);
4141 	return 0;
4142 
4143  fail:
4144 	/* TODO: instead of returning ENOTBLK, add the lkb to res_lookup
4145 	   and do this receive_request again from process_lookup_list once
4146 	   we get the lookup reply.  This would avoid a many repeated
4147 	   ENOTBLK request failures when the lookup reply designating us
4148 	   as master is delayed. */
4149 
4150 	/* We could repeatedly return -EBADR here if our send_remove() is
4151 	   delayed in being sent/arriving/being processed on the dir node.
4152 	   Another node would repeatedly lookup up the master, and the dir
4153 	   node would continue returning our nodeid until our send_remove
4154 	   took effect.
4155 
4156 	   We send another remove message in case our previous send_remove
4157 	   was lost/ignored/missed somehow. */
4158 
4159 	if (error != -ENOTBLK) {
4160 		log_limit(ls, "receive_request %x from %d %d",
4161 			  ms->m_lkid, from_nodeid, error);
4162 	}
4163 
4164 	if (namelen && error == -EBADR) {
4165 		send_repeat_remove(ls, ms->m_extra, namelen);
4166 		msleep(1000);
4167 	}
4168 
4169 	setup_stub_lkb(ls, ms);
4170 	send_request_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
4171 	return error;
4172 }
4173 
receive_convert(struct dlm_ls * ls,struct dlm_message * ms)4174 static int receive_convert(struct dlm_ls *ls, struct dlm_message *ms)
4175 {
4176 	struct dlm_lkb *lkb;
4177 	struct dlm_rsb *r;
4178 	int error, reply = 1;
4179 
4180 	error = find_lkb(ls, ms->m_remid, &lkb);
4181 	if (error)
4182 		goto fail;
4183 
4184 	if (lkb->lkb_remid != ms->m_lkid) {
4185 		log_error(ls, "receive_convert %x remid %x recover_seq %llu "
4186 			  "remote %d %x", lkb->lkb_id, lkb->lkb_remid,
4187 			  (unsigned long long)lkb->lkb_recover_seq,
4188 			  ms->m_header.h_nodeid, ms->m_lkid);
4189 		error = -ENOENT;
4190 		dlm_put_lkb(lkb);
4191 		goto fail;
4192 	}
4193 
4194 	r = lkb->lkb_resource;
4195 
4196 	hold_rsb(r);
4197 	lock_rsb(r);
4198 
4199 	error = validate_message(lkb, ms);
4200 	if (error)
4201 		goto out;
4202 
4203 	receive_flags(lkb, ms);
4204 
4205 	error = receive_convert_args(ls, lkb, ms);
4206 	if (error) {
4207 		send_convert_reply(r, lkb, error);
4208 		goto out;
4209 	}
4210 
4211 	reply = !down_conversion(lkb);
4212 
4213 	error = do_convert(r, lkb);
4214 	if (reply)
4215 		send_convert_reply(r, lkb, error);
4216 	do_convert_effects(r, lkb, error);
4217  out:
4218 	unlock_rsb(r);
4219 	put_rsb(r);
4220 	dlm_put_lkb(lkb);
4221 	return 0;
4222 
4223  fail:
4224 	setup_stub_lkb(ls, ms);
4225 	send_convert_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
4226 	return error;
4227 }
4228 
receive_unlock(struct dlm_ls * ls,struct dlm_message * ms)4229 static int receive_unlock(struct dlm_ls *ls, struct dlm_message *ms)
4230 {
4231 	struct dlm_lkb *lkb;
4232 	struct dlm_rsb *r;
4233 	int error;
4234 
4235 	error = find_lkb(ls, ms->m_remid, &lkb);
4236 	if (error)
4237 		goto fail;
4238 
4239 	if (lkb->lkb_remid != ms->m_lkid) {
4240 		log_error(ls, "receive_unlock %x remid %x remote %d %x",
4241 			  lkb->lkb_id, lkb->lkb_remid,
4242 			  ms->m_header.h_nodeid, ms->m_lkid);
4243 		error = -ENOENT;
4244 		dlm_put_lkb(lkb);
4245 		goto fail;
4246 	}
4247 
4248 	r = lkb->lkb_resource;
4249 
4250 	hold_rsb(r);
4251 	lock_rsb(r);
4252 
4253 	error = validate_message(lkb, ms);
4254 	if (error)
4255 		goto out;
4256 
4257 	receive_flags(lkb, ms);
4258 
4259 	error = receive_unlock_args(ls, lkb, ms);
4260 	if (error) {
4261 		send_unlock_reply(r, lkb, error);
4262 		goto out;
4263 	}
4264 
4265 	error = do_unlock(r, lkb);
4266 	send_unlock_reply(r, lkb, error);
4267 	do_unlock_effects(r, lkb, error);
4268  out:
4269 	unlock_rsb(r);
4270 	put_rsb(r);
4271 	dlm_put_lkb(lkb);
4272 	return 0;
4273 
4274  fail:
4275 	setup_stub_lkb(ls, ms);
4276 	send_unlock_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
4277 	return error;
4278 }
4279 
receive_cancel(struct dlm_ls * ls,struct dlm_message * ms)4280 static int receive_cancel(struct dlm_ls *ls, struct dlm_message *ms)
4281 {
4282 	struct dlm_lkb *lkb;
4283 	struct dlm_rsb *r;
4284 	int error;
4285 
4286 	error = find_lkb(ls, ms->m_remid, &lkb);
4287 	if (error)
4288 		goto fail;
4289 
4290 	receive_flags(lkb, ms);
4291 
4292 	r = lkb->lkb_resource;
4293 
4294 	hold_rsb(r);
4295 	lock_rsb(r);
4296 
4297 	error = validate_message(lkb, ms);
4298 	if (error)
4299 		goto out;
4300 
4301 	error = do_cancel(r, lkb);
4302 	send_cancel_reply(r, lkb, error);
4303 	do_cancel_effects(r, lkb, error);
4304  out:
4305 	unlock_rsb(r);
4306 	put_rsb(r);
4307 	dlm_put_lkb(lkb);
4308 	return 0;
4309 
4310  fail:
4311 	setup_stub_lkb(ls, ms);
4312 	send_cancel_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
4313 	return error;
4314 }
4315 
receive_grant(struct dlm_ls * ls,struct dlm_message * ms)4316 static int receive_grant(struct dlm_ls *ls, struct dlm_message *ms)
4317 {
4318 	struct dlm_lkb *lkb;
4319 	struct dlm_rsb *r;
4320 	int error;
4321 
4322 	error = find_lkb(ls, ms->m_remid, &lkb);
4323 	if (error)
4324 		return error;
4325 
4326 	r = lkb->lkb_resource;
4327 
4328 	hold_rsb(r);
4329 	lock_rsb(r);
4330 
4331 	error = validate_message(lkb, ms);
4332 	if (error)
4333 		goto out;
4334 
4335 	receive_flags_reply(lkb, ms);
4336 	if (is_altmode(lkb))
4337 		munge_altmode(lkb, ms);
4338 	grant_lock_pc(r, lkb, ms);
4339 	queue_cast(r, lkb, 0);
4340  out:
4341 	unlock_rsb(r);
4342 	put_rsb(r);
4343 	dlm_put_lkb(lkb);
4344 	return 0;
4345 }
4346 
receive_bast(struct dlm_ls * ls,struct dlm_message * ms)4347 static int receive_bast(struct dlm_ls *ls, struct dlm_message *ms)
4348 {
4349 	struct dlm_lkb *lkb;
4350 	struct dlm_rsb *r;
4351 	int error;
4352 
4353 	error = find_lkb(ls, ms->m_remid, &lkb);
4354 	if (error)
4355 		return error;
4356 
4357 	r = lkb->lkb_resource;
4358 
4359 	hold_rsb(r);
4360 	lock_rsb(r);
4361 
4362 	error = validate_message(lkb, ms);
4363 	if (error)
4364 		goto out;
4365 
4366 	queue_bast(r, lkb, ms->m_bastmode);
4367 	lkb->lkb_highbast = ms->m_bastmode;
4368  out:
4369 	unlock_rsb(r);
4370 	put_rsb(r);
4371 	dlm_put_lkb(lkb);
4372 	return 0;
4373 }
4374 
receive_lookup(struct dlm_ls * ls,struct dlm_message * ms)4375 static void receive_lookup(struct dlm_ls *ls, struct dlm_message *ms)
4376 {
4377 	int len, error, ret_nodeid, from_nodeid, our_nodeid;
4378 
4379 	from_nodeid = ms->m_header.h_nodeid;
4380 	our_nodeid = dlm_our_nodeid();
4381 
4382 	len = receive_extralen(ms);
4383 
4384 	error = dlm_master_lookup(ls, from_nodeid, ms->m_extra, len, 0,
4385 				  &ret_nodeid, NULL);
4386 
4387 	/* Optimization: we're master so treat lookup as a request */
4388 	if (!error && ret_nodeid == our_nodeid) {
4389 		receive_request(ls, ms);
4390 		return;
4391 	}
4392 	send_lookup_reply(ls, ms, ret_nodeid, error);
4393 }
4394 
receive_remove(struct dlm_ls * ls,struct dlm_message * ms)4395 static void receive_remove(struct dlm_ls *ls, struct dlm_message *ms)
4396 {
4397 	char name[DLM_RESNAME_MAXLEN+1];
4398 	struct dlm_rsb *r;
4399 	uint32_t hash, b;
4400 	int rv, len, dir_nodeid, from_nodeid;
4401 
4402 	from_nodeid = ms->m_header.h_nodeid;
4403 
4404 	len = receive_extralen(ms);
4405 
4406 	if (len > DLM_RESNAME_MAXLEN) {
4407 		log_error(ls, "receive_remove from %d bad len %d",
4408 			  from_nodeid, len);
4409 		return;
4410 	}
4411 
4412 	dir_nodeid = dlm_hash2nodeid(ls, ms->m_hash);
4413 	if (dir_nodeid != dlm_our_nodeid()) {
4414 		log_error(ls, "receive_remove from %d bad nodeid %d",
4415 			  from_nodeid, dir_nodeid);
4416 		return;
4417 	}
4418 
4419 	/* Look for name on rsbtbl.toss, if it's there, kill it.
4420 	   If it's on rsbtbl.keep, it's being used, and we should ignore this
4421 	   message.  This is an expected race between the dir node sending a
4422 	   request to the master node at the same time as the master node sends
4423 	   a remove to the dir node.  The resolution to that race is for the
4424 	   dir node to ignore the remove message, and the master node to
4425 	   recreate the master rsb when it gets a request from the dir node for
4426 	   an rsb it doesn't have. */
4427 
4428 	memset(name, 0, sizeof(name));
4429 	memcpy(name, ms->m_extra, len);
4430 
4431 	hash = jhash(name, len, 0);
4432 	b = hash & (ls->ls_rsbtbl_size - 1);
4433 
4434 	spin_lock(&ls->ls_rsbtbl[b].lock);
4435 
4436 	rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
4437 	if (rv) {
4438 		/* verify the rsb is on keep list per comment above */
4439 		rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
4440 		if (rv) {
4441 			/* should not happen */
4442 			log_error(ls, "receive_remove from %d not found %s",
4443 				  from_nodeid, name);
4444 			spin_unlock(&ls->ls_rsbtbl[b].lock);
4445 			return;
4446 		}
4447 		if (r->res_master_nodeid != from_nodeid) {
4448 			/* should not happen */
4449 			log_error(ls, "receive_remove keep from %d master %d",
4450 				  from_nodeid, r->res_master_nodeid);
4451 			dlm_print_rsb(r);
4452 			spin_unlock(&ls->ls_rsbtbl[b].lock);
4453 			return;
4454 		}
4455 
4456 		log_debug(ls, "receive_remove from %d master %d first %x %s",
4457 			  from_nodeid, r->res_master_nodeid, r->res_first_lkid,
4458 			  name);
4459 		spin_unlock(&ls->ls_rsbtbl[b].lock);
4460 		return;
4461 	}
4462 
4463 	if (r->res_master_nodeid != from_nodeid) {
4464 		log_error(ls, "receive_remove toss from %d master %d",
4465 			  from_nodeid, r->res_master_nodeid);
4466 		dlm_print_rsb(r);
4467 		spin_unlock(&ls->ls_rsbtbl[b].lock);
4468 		return;
4469 	}
4470 
4471 	if (kref_put(&r->res_ref, kill_rsb)) {
4472 		rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
4473 		spin_unlock(&ls->ls_rsbtbl[b].lock);
4474 		dlm_free_rsb(r);
4475 	} else {
4476 		log_error(ls, "receive_remove from %d rsb ref error",
4477 			  from_nodeid);
4478 		dlm_print_rsb(r);
4479 		spin_unlock(&ls->ls_rsbtbl[b].lock);
4480 	}
4481 }
4482 
receive_purge(struct dlm_ls * ls,struct dlm_message * ms)4483 static void receive_purge(struct dlm_ls *ls, struct dlm_message *ms)
4484 {
4485 	do_purge(ls, ms->m_nodeid, ms->m_pid);
4486 }
4487 
receive_request_reply(struct dlm_ls * ls,struct dlm_message * ms)4488 static int receive_request_reply(struct dlm_ls *ls, struct dlm_message *ms)
4489 {
4490 	struct dlm_lkb *lkb;
4491 	struct dlm_rsb *r;
4492 	int error, mstype, result;
4493 	int from_nodeid = ms->m_header.h_nodeid;
4494 
4495 	error = find_lkb(ls, ms->m_remid, &lkb);
4496 	if (error)
4497 		return error;
4498 
4499 	r = lkb->lkb_resource;
4500 	hold_rsb(r);
4501 	lock_rsb(r);
4502 
4503 	error = validate_message(lkb, ms);
4504 	if (error)
4505 		goto out;
4506 
4507 	mstype = lkb->lkb_wait_type;
4508 	error = remove_from_waiters(lkb, DLM_MSG_REQUEST_REPLY);
4509 	if (error) {
4510 		log_error(ls, "receive_request_reply %x remote %d %x result %d",
4511 			  lkb->lkb_id, from_nodeid, ms->m_lkid, ms->m_result);
4512 		dlm_dump_rsb(r);
4513 		goto out;
4514 	}
4515 
4516 	/* Optimization: the dir node was also the master, so it took our
4517 	   lookup as a request and sent request reply instead of lookup reply */
4518 	if (mstype == DLM_MSG_LOOKUP) {
4519 		r->res_master_nodeid = from_nodeid;
4520 		r->res_nodeid = from_nodeid;
4521 		lkb->lkb_nodeid = from_nodeid;
4522 	}
4523 
4524 	/* this is the value returned from do_request() on the master */
4525 	result = ms->m_result;
4526 
4527 	switch (result) {
4528 	case -EAGAIN:
4529 		/* request would block (be queued) on remote master */
4530 		queue_cast(r, lkb, -EAGAIN);
4531 		confirm_master(r, -EAGAIN);
4532 		unhold_lkb(lkb); /* undoes create_lkb() */
4533 		break;
4534 
4535 	case -EINPROGRESS:
4536 	case 0:
4537 		/* request was queued or granted on remote master */
4538 		receive_flags_reply(lkb, ms);
4539 		lkb->lkb_remid = ms->m_lkid;
4540 		if (is_altmode(lkb))
4541 			munge_altmode(lkb, ms);
4542 		if (result) {
4543 			add_lkb(r, lkb, DLM_LKSTS_WAITING);
4544 			add_timeout(lkb);
4545 		} else {
4546 			grant_lock_pc(r, lkb, ms);
4547 			queue_cast(r, lkb, 0);
4548 		}
4549 		confirm_master(r, result);
4550 		break;
4551 
4552 	case -EBADR:
4553 	case -ENOTBLK:
4554 		/* find_rsb failed to find rsb or rsb wasn't master */
4555 		log_limit(ls, "receive_request_reply %x from %d %d "
4556 			  "master %d dir %d first %x %s", lkb->lkb_id,
4557 			  from_nodeid, result, r->res_master_nodeid,
4558 			  r->res_dir_nodeid, r->res_first_lkid, r->res_name);
4559 
4560 		if (r->res_dir_nodeid != dlm_our_nodeid() &&
4561 		    r->res_master_nodeid != dlm_our_nodeid()) {
4562 			/* cause _request_lock->set_master->send_lookup */
4563 			r->res_master_nodeid = 0;
4564 			r->res_nodeid = -1;
4565 			lkb->lkb_nodeid = -1;
4566 		}
4567 
4568 		if (is_overlap(lkb)) {
4569 			/* we'll ignore error in cancel/unlock reply */
4570 			queue_cast_overlap(r, lkb);
4571 			confirm_master(r, result);
4572 			unhold_lkb(lkb); /* undoes create_lkb() */
4573 		} else {
4574 			_request_lock(r, lkb);
4575 
4576 			if (r->res_master_nodeid == dlm_our_nodeid())
4577 				confirm_master(r, 0);
4578 		}
4579 		break;
4580 
4581 	default:
4582 		log_error(ls, "receive_request_reply %x error %d",
4583 			  lkb->lkb_id, result);
4584 	}
4585 
4586 	if (is_overlap_unlock(lkb) && (result == 0 || result == -EINPROGRESS)) {
4587 		log_debug(ls, "receive_request_reply %x result %d unlock",
4588 			  lkb->lkb_id, result);
4589 		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
4590 		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
4591 		send_unlock(r, lkb);
4592 	} else if (is_overlap_cancel(lkb) && (result == -EINPROGRESS)) {
4593 		log_debug(ls, "receive_request_reply %x cancel", lkb->lkb_id);
4594 		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
4595 		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
4596 		send_cancel(r, lkb);
4597 	} else {
4598 		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
4599 		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
4600 	}
4601  out:
4602 	unlock_rsb(r);
4603 	put_rsb(r);
4604 	dlm_put_lkb(lkb);
4605 	return 0;
4606 }
4607 
__receive_convert_reply(struct dlm_rsb * r,struct dlm_lkb * lkb,struct dlm_message * ms)4608 static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
4609 				    struct dlm_message *ms)
4610 {
4611 	/* this is the value returned from do_convert() on the master */
4612 	switch (ms->m_result) {
4613 	case -EAGAIN:
4614 		/* convert would block (be queued) on remote master */
4615 		queue_cast(r, lkb, -EAGAIN);
4616 		break;
4617 
4618 	case -EDEADLK:
4619 		receive_flags_reply(lkb, ms);
4620 		revert_lock_pc(r, lkb);
4621 		queue_cast(r, lkb, -EDEADLK);
4622 		break;
4623 
4624 	case -EINPROGRESS:
4625 		/* convert was queued on remote master */
4626 		receive_flags_reply(lkb, ms);
4627 		if (is_demoted(lkb))
4628 			munge_demoted(lkb);
4629 		del_lkb(r, lkb);
4630 		add_lkb(r, lkb, DLM_LKSTS_CONVERT);
4631 		add_timeout(lkb);
4632 		break;
4633 
4634 	case 0:
4635 		/* convert was granted on remote master */
4636 		receive_flags_reply(lkb, ms);
4637 		if (is_demoted(lkb))
4638 			munge_demoted(lkb);
4639 		grant_lock_pc(r, lkb, ms);
4640 		queue_cast(r, lkb, 0);
4641 		break;
4642 
4643 	default:
4644 		log_error(r->res_ls, "receive_convert_reply %x remote %d %x %d",
4645 			  lkb->lkb_id, ms->m_header.h_nodeid, ms->m_lkid,
4646 			  ms->m_result);
4647 		dlm_print_rsb(r);
4648 		dlm_print_lkb(lkb);
4649 	}
4650 }
4651 
_receive_convert_reply(struct dlm_lkb * lkb,struct dlm_message * ms)4652 static void _receive_convert_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
4653 {
4654 	struct dlm_rsb *r = lkb->lkb_resource;
4655 	int error;
4656 
4657 	hold_rsb(r);
4658 	lock_rsb(r);
4659 
4660 	error = validate_message(lkb, ms);
4661 	if (error)
4662 		goto out;
4663 
4664 	/* stub reply can happen with waiters_mutex held */
4665 	error = remove_from_waiters_ms(lkb, ms);
4666 	if (error)
4667 		goto out;
4668 
4669 	__receive_convert_reply(r, lkb, ms);
4670  out:
4671 	unlock_rsb(r);
4672 	put_rsb(r);
4673 }
4674 
receive_convert_reply(struct dlm_ls * ls,struct dlm_message * ms)4675 static int receive_convert_reply(struct dlm_ls *ls, struct dlm_message *ms)
4676 {
4677 	struct dlm_lkb *lkb;
4678 	int error;
4679 
4680 	error = find_lkb(ls, ms->m_remid, &lkb);
4681 	if (error)
4682 		return error;
4683 
4684 	_receive_convert_reply(lkb, ms);
4685 	dlm_put_lkb(lkb);
4686 	return 0;
4687 }
4688 
_receive_unlock_reply(struct dlm_lkb * lkb,struct dlm_message * ms)4689 static void _receive_unlock_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
4690 {
4691 	struct dlm_rsb *r = lkb->lkb_resource;
4692 	int error;
4693 
4694 	hold_rsb(r);
4695 	lock_rsb(r);
4696 
4697 	error = validate_message(lkb, ms);
4698 	if (error)
4699 		goto out;
4700 
4701 	/* stub reply can happen with waiters_mutex held */
4702 	error = remove_from_waiters_ms(lkb, ms);
4703 	if (error)
4704 		goto out;
4705 
4706 	/* this is the value returned from do_unlock() on the master */
4707 
4708 	switch (ms->m_result) {
4709 	case -DLM_EUNLOCK:
4710 		receive_flags_reply(lkb, ms);
4711 		remove_lock_pc(r, lkb);
4712 		queue_cast(r, lkb, -DLM_EUNLOCK);
4713 		break;
4714 	case -ENOENT:
4715 		break;
4716 	default:
4717 		log_error(r->res_ls, "receive_unlock_reply %x error %d",
4718 			  lkb->lkb_id, ms->m_result);
4719 	}
4720  out:
4721 	unlock_rsb(r);
4722 	put_rsb(r);
4723 }
4724 
receive_unlock_reply(struct dlm_ls * ls,struct dlm_message * ms)4725 static int receive_unlock_reply(struct dlm_ls *ls, struct dlm_message *ms)
4726 {
4727 	struct dlm_lkb *lkb;
4728 	int error;
4729 
4730 	error = find_lkb(ls, ms->m_remid, &lkb);
4731 	if (error)
4732 		return error;
4733 
4734 	_receive_unlock_reply(lkb, ms);
4735 	dlm_put_lkb(lkb);
4736 	return 0;
4737 }
4738 
_receive_cancel_reply(struct dlm_lkb * lkb,struct dlm_message * ms)4739 static void _receive_cancel_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
4740 {
4741 	struct dlm_rsb *r = lkb->lkb_resource;
4742 	int error;
4743 
4744 	hold_rsb(r);
4745 	lock_rsb(r);
4746 
4747 	error = validate_message(lkb, ms);
4748 	if (error)
4749 		goto out;
4750 
4751 	/* stub reply can happen with waiters_mutex held */
4752 	error = remove_from_waiters_ms(lkb, ms);
4753 	if (error)
4754 		goto out;
4755 
4756 	/* this is the value returned from do_cancel() on the master */
4757 
4758 	switch (ms->m_result) {
4759 	case -DLM_ECANCEL:
4760 		receive_flags_reply(lkb, ms);
4761 		revert_lock_pc(r, lkb);
4762 		queue_cast(r, lkb, -DLM_ECANCEL);
4763 		break;
4764 	case 0:
4765 		break;
4766 	default:
4767 		log_error(r->res_ls, "receive_cancel_reply %x error %d",
4768 			  lkb->lkb_id, ms->m_result);
4769 	}
4770  out:
4771 	unlock_rsb(r);
4772 	put_rsb(r);
4773 }
4774 
receive_cancel_reply(struct dlm_ls * ls,struct dlm_message * ms)4775 static int receive_cancel_reply(struct dlm_ls *ls, struct dlm_message *ms)
4776 {
4777 	struct dlm_lkb *lkb;
4778 	int error;
4779 
4780 	error = find_lkb(ls, ms->m_remid, &lkb);
4781 	if (error)
4782 		return error;
4783 
4784 	_receive_cancel_reply(lkb, ms);
4785 	dlm_put_lkb(lkb);
4786 	return 0;
4787 }
4788 
receive_lookup_reply(struct dlm_ls * ls,struct dlm_message * ms)4789 static void receive_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms)
4790 {
4791 	struct dlm_lkb *lkb;
4792 	struct dlm_rsb *r;
4793 	int error, ret_nodeid;
4794 	int do_lookup_list = 0;
4795 
4796 	error = find_lkb(ls, ms->m_lkid, &lkb);
4797 	if (error) {
4798 		log_error(ls, "receive_lookup_reply no lkid %x", ms->m_lkid);
4799 		return;
4800 	}
4801 
4802 	/* ms->m_result is the value returned by dlm_master_lookup on dir node
4803 	   FIXME: will a non-zero error ever be returned? */
4804 
4805 	r = lkb->lkb_resource;
4806 	hold_rsb(r);
4807 	lock_rsb(r);
4808 
4809 	error = remove_from_waiters(lkb, DLM_MSG_LOOKUP_REPLY);
4810 	if (error)
4811 		goto out;
4812 
4813 	ret_nodeid = ms->m_nodeid;
4814 
4815 	/* We sometimes receive a request from the dir node for this
4816 	   rsb before we've received the dir node's loookup_reply for it.
4817 	   The request from the dir node implies we're the master, so we set
4818 	   ourself as master in receive_request_reply, and verify here that
4819 	   we are indeed the master. */
4820 
4821 	if (r->res_master_nodeid && (r->res_master_nodeid != ret_nodeid)) {
4822 		/* This should never happen */
4823 		log_error(ls, "receive_lookup_reply %x from %d ret %d "
4824 			  "master %d dir %d our %d first %x %s",
4825 			  lkb->lkb_id, ms->m_header.h_nodeid, ret_nodeid,
4826 			  r->res_master_nodeid, r->res_dir_nodeid,
4827 			  dlm_our_nodeid(), r->res_first_lkid, r->res_name);
4828 	}
4829 
4830 	if (ret_nodeid == dlm_our_nodeid()) {
4831 		r->res_master_nodeid = ret_nodeid;
4832 		r->res_nodeid = 0;
4833 		do_lookup_list = 1;
4834 		r->res_first_lkid = 0;
4835 	} else if (ret_nodeid == -1) {
4836 		/* the remote node doesn't believe it's the dir node */
4837 		log_error(ls, "receive_lookup_reply %x from %d bad ret_nodeid",
4838 			  lkb->lkb_id, ms->m_header.h_nodeid);
4839 		r->res_master_nodeid = 0;
4840 		r->res_nodeid = -1;
4841 		lkb->lkb_nodeid = -1;
4842 	} else {
4843 		/* set_master() will set lkb_nodeid from r */
4844 		r->res_master_nodeid = ret_nodeid;
4845 		r->res_nodeid = ret_nodeid;
4846 	}
4847 
4848 	if (is_overlap(lkb)) {
4849 		log_debug(ls, "receive_lookup_reply %x unlock %x",
4850 			  lkb->lkb_id, lkb->lkb_flags);
4851 		queue_cast_overlap(r, lkb);
4852 		unhold_lkb(lkb); /* undoes create_lkb() */
4853 		goto out_list;
4854 	}
4855 
4856 	_request_lock(r, lkb);
4857 
4858  out_list:
4859 	if (do_lookup_list)
4860 		process_lookup_list(r);
4861  out:
4862 	unlock_rsb(r);
4863 	put_rsb(r);
4864 	dlm_put_lkb(lkb);
4865 }
4866 
_receive_message(struct dlm_ls * ls,struct dlm_message * ms,uint32_t saved_seq)4867 static void _receive_message(struct dlm_ls *ls, struct dlm_message *ms,
4868 			     uint32_t saved_seq)
4869 {
4870 	int error = 0, noent = 0;
4871 
4872 	if (!dlm_is_member(ls, ms->m_header.h_nodeid)) {
4873 		log_limit(ls, "receive %d from non-member %d %x %x %d",
4874 			  ms->m_type, ms->m_header.h_nodeid, ms->m_lkid,
4875 			  ms->m_remid, ms->m_result);
4876 		return;
4877 	}
4878 
4879 	switch (ms->m_type) {
4880 
4881 	/* messages sent to a master node */
4882 
4883 	case DLM_MSG_REQUEST:
4884 		error = receive_request(ls, ms);
4885 		break;
4886 
4887 	case DLM_MSG_CONVERT:
4888 		error = receive_convert(ls, ms);
4889 		break;
4890 
4891 	case DLM_MSG_UNLOCK:
4892 		error = receive_unlock(ls, ms);
4893 		break;
4894 
4895 	case DLM_MSG_CANCEL:
4896 		noent = 1;
4897 		error = receive_cancel(ls, ms);
4898 		break;
4899 
4900 	/* messages sent from a master node (replies to above) */
4901 
4902 	case DLM_MSG_REQUEST_REPLY:
4903 		error = receive_request_reply(ls, ms);
4904 		break;
4905 
4906 	case DLM_MSG_CONVERT_REPLY:
4907 		error = receive_convert_reply(ls, ms);
4908 		break;
4909 
4910 	case DLM_MSG_UNLOCK_REPLY:
4911 		error = receive_unlock_reply(ls, ms);
4912 		break;
4913 
4914 	case DLM_MSG_CANCEL_REPLY:
4915 		error = receive_cancel_reply(ls, ms);
4916 		break;
4917 
4918 	/* messages sent from a master node (only two types of async msg) */
4919 
4920 	case DLM_MSG_GRANT:
4921 		noent = 1;
4922 		error = receive_grant(ls, ms);
4923 		break;
4924 
4925 	case DLM_MSG_BAST:
4926 		noent = 1;
4927 		error = receive_bast(ls, ms);
4928 		break;
4929 
4930 	/* messages sent to a dir node */
4931 
4932 	case DLM_MSG_LOOKUP:
4933 		receive_lookup(ls, ms);
4934 		break;
4935 
4936 	case DLM_MSG_REMOVE:
4937 		receive_remove(ls, ms);
4938 		break;
4939 
4940 	/* messages sent from a dir node (remove has no reply) */
4941 
4942 	case DLM_MSG_LOOKUP_REPLY:
4943 		receive_lookup_reply(ls, ms);
4944 		break;
4945 
4946 	/* other messages */
4947 
4948 	case DLM_MSG_PURGE:
4949 		receive_purge(ls, ms);
4950 		break;
4951 
4952 	default:
4953 		log_error(ls, "unknown message type %d", ms->m_type);
4954 	}
4955 
4956 	/*
4957 	 * When checking for ENOENT, we're checking the result of
4958 	 * find_lkb(m_remid):
4959 	 *
4960 	 * The lock id referenced in the message wasn't found.  This may
4961 	 * happen in normal usage for the async messages and cancel, so
4962 	 * only use log_debug for them.
4963 	 *
4964 	 * Some errors are expected and normal.
4965 	 */
4966 
4967 	if (error == -ENOENT && noent) {
4968 		log_debug(ls, "receive %d no %x remote %d %x saved_seq %u",
4969 			  ms->m_type, ms->m_remid, ms->m_header.h_nodeid,
4970 			  ms->m_lkid, saved_seq);
4971 	} else if (error == -ENOENT) {
4972 		log_error(ls, "receive %d no %x remote %d %x saved_seq %u",
4973 			  ms->m_type, ms->m_remid, ms->m_header.h_nodeid,
4974 			  ms->m_lkid, saved_seq);
4975 
4976 		if (ms->m_type == DLM_MSG_CONVERT)
4977 			dlm_dump_rsb_hash(ls, ms->m_hash);
4978 	}
4979 
4980 	if (error == -EINVAL) {
4981 		log_error(ls, "receive %d inval from %d lkid %x remid %x "
4982 			  "saved_seq %u",
4983 			  ms->m_type, ms->m_header.h_nodeid,
4984 			  ms->m_lkid, ms->m_remid, saved_seq);
4985 	}
4986 }
4987 
4988 /* If the lockspace is in recovery mode (locking stopped), then normal
4989    messages are saved on the requestqueue for processing after recovery is
4990    done.  When not in recovery mode, we wait for dlm_recoverd to drain saved
4991    messages off the requestqueue before we process new ones. This occurs right
4992    after recovery completes when we transition from saving all messages on
4993    requestqueue, to processing all the saved messages, to processing new
4994    messages as they arrive. */
4995 
dlm_receive_message(struct dlm_ls * ls,struct dlm_message * ms,int nodeid)4996 static void dlm_receive_message(struct dlm_ls *ls, struct dlm_message *ms,
4997 				int nodeid)
4998 {
4999 	if (dlm_locking_stopped(ls)) {
5000 		/* If we were a member of this lockspace, left, and rejoined,
5001 		   other nodes may still be sending us messages from the
5002 		   lockspace generation before we left. */
5003 		if (!ls->ls_generation) {
5004 			log_limit(ls, "receive %d from %d ignore old gen",
5005 				  ms->m_type, nodeid);
5006 			return;
5007 		}
5008 
5009 		dlm_add_requestqueue(ls, nodeid, ms);
5010 	} else {
5011 		dlm_wait_requestqueue(ls);
5012 		_receive_message(ls, ms, 0);
5013 	}
5014 }
5015 
5016 /* This is called by dlm_recoverd to process messages that were saved on
5017    the requestqueue. */
5018 
dlm_receive_message_saved(struct dlm_ls * ls,struct dlm_message * ms,uint32_t saved_seq)5019 void dlm_receive_message_saved(struct dlm_ls *ls, struct dlm_message *ms,
5020 			       uint32_t saved_seq)
5021 {
5022 	_receive_message(ls, ms, saved_seq);
5023 }
5024 
5025 /* This is called by the midcomms layer when something is received for
5026    the lockspace.  It could be either a MSG (normal message sent as part of
5027    standard locking activity) or an RCOM (recovery message sent as part of
5028    lockspace recovery). */
5029 
dlm_receive_buffer(union dlm_packet * p,int nodeid)5030 void dlm_receive_buffer(union dlm_packet *p, int nodeid)
5031 {
5032 	struct dlm_header *hd = &p->header;
5033 	struct dlm_ls *ls;
5034 	int type = 0;
5035 
5036 	switch (hd->h_cmd) {
5037 	case DLM_MSG:
5038 		dlm_message_in(&p->message);
5039 		type = p->message.m_type;
5040 		break;
5041 	case DLM_RCOM:
5042 		dlm_rcom_in(&p->rcom);
5043 		type = p->rcom.rc_type;
5044 		break;
5045 	default:
5046 		log_print("invalid h_cmd %d from %u", hd->h_cmd, nodeid);
5047 		return;
5048 	}
5049 
5050 	if (hd->h_nodeid != nodeid) {
5051 		log_print("invalid h_nodeid %d from %d lockspace %x",
5052 			  hd->h_nodeid, nodeid, hd->h_lockspace);
5053 		return;
5054 	}
5055 
5056 	ls = dlm_find_lockspace_global(hd->h_lockspace);
5057 	if (!ls) {
5058 		if (dlm_config.ci_log_debug) {
5059 			printk_ratelimited(KERN_DEBUG "dlm: invalid lockspace "
5060 				"%u from %d cmd %d type %d\n",
5061 				hd->h_lockspace, nodeid, hd->h_cmd, type);
5062 		}
5063 
5064 		if (hd->h_cmd == DLM_RCOM && type == DLM_RCOM_STATUS)
5065 			dlm_send_ls_not_ready(nodeid, &p->rcom);
5066 		return;
5067 	}
5068 
5069 	/* this rwsem allows dlm_ls_stop() to wait for all dlm_recv threads to
5070 	   be inactive (in this ls) before transitioning to recovery mode */
5071 
5072 	down_read(&ls->ls_recv_active);
5073 	if (hd->h_cmd == DLM_MSG)
5074 		dlm_receive_message(ls, &p->message, nodeid);
5075 	else
5076 		dlm_receive_rcom(ls, &p->rcom, nodeid);
5077 	up_read(&ls->ls_recv_active);
5078 
5079 	dlm_put_lockspace(ls);
5080 }
5081 
recover_convert_waiter(struct dlm_ls * ls,struct dlm_lkb * lkb,struct dlm_message * ms_stub)5082 static void recover_convert_waiter(struct dlm_ls *ls, struct dlm_lkb *lkb,
5083 				   struct dlm_message *ms_stub)
5084 {
5085 	if (middle_conversion(lkb)) {
5086 		hold_lkb(lkb);
5087 		memset(ms_stub, 0, sizeof(struct dlm_message));
5088 		ms_stub->m_flags = DLM_IFL_STUB_MS;
5089 		ms_stub->m_type = DLM_MSG_CONVERT_REPLY;
5090 		ms_stub->m_result = -EINPROGRESS;
5091 		ms_stub->m_header.h_nodeid = lkb->lkb_nodeid;
5092 		_receive_convert_reply(lkb, ms_stub);
5093 
5094 		/* Same special case as in receive_rcom_lock_args() */
5095 		lkb->lkb_grmode = DLM_LOCK_IV;
5096 		rsb_set_flag(lkb->lkb_resource, RSB_RECOVER_CONVERT);
5097 		unhold_lkb(lkb);
5098 
5099 	} else if (lkb->lkb_rqmode >= lkb->lkb_grmode) {
5100 		lkb->lkb_flags |= DLM_IFL_RESEND;
5101 	}
5102 
5103 	/* lkb->lkb_rqmode < lkb->lkb_grmode shouldn't happen since down
5104 	   conversions are async; there's no reply from the remote master */
5105 }
5106 
5107 /* A waiting lkb needs recovery if the master node has failed, or
5108    the master node is changing (only when no directory is used) */
5109 
waiter_needs_recovery(struct dlm_ls * ls,struct dlm_lkb * lkb,int dir_nodeid)5110 static int waiter_needs_recovery(struct dlm_ls *ls, struct dlm_lkb *lkb,
5111 				 int dir_nodeid)
5112 {
5113 	if (dlm_no_directory(ls))
5114 		return 1;
5115 
5116 	if (dlm_is_removed(ls, lkb->lkb_wait_nodeid))
5117 		return 1;
5118 
5119 	return 0;
5120 }
5121 
5122 /* Recovery for locks that are waiting for replies from nodes that are now
5123    gone.  We can just complete unlocks and cancels by faking a reply from the
5124    dead node.  Requests and up-conversions we flag to be resent after
5125    recovery.  Down-conversions can just be completed with a fake reply like
5126    unlocks.  Conversions between PR and CW need special attention. */
5127 
dlm_recover_waiters_pre(struct dlm_ls * ls)5128 void dlm_recover_waiters_pre(struct dlm_ls *ls)
5129 {
5130 	struct dlm_lkb *lkb, *safe;
5131 	struct dlm_message *ms_stub;
5132 	int wait_type, stub_unlock_result, stub_cancel_result;
5133 	int dir_nodeid;
5134 
5135 	ms_stub = kmalloc(sizeof(struct dlm_message), GFP_KERNEL);
5136 	if (!ms_stub) {
5137 		log_error(ls, "dlm_recover_waiters_pre no mem");
5138 		return;
5139 	}
5140 
5141 	mutex_lock(&ls->ls_waiters_mutex);
5142 
5143 	list_for_each_entry_safe(lkb, safe, &ls->ls_waiters, lkb_wait_reply) {
5144 
5145 		dir_nodeid = dlm_dir_nodeid(lkb->lkb_resource);
5146 
5147 		/* exclude debug messages about unlocks because there can be so
5148 		   many and they aren't very interesting */
5149 
5150 		if (lkb->lkb_wait_type != DLM_MSG_UNLOCK) {
5151 			log_debug(ls, "waiter %x remote %x msg %d r_nodeid %d "
5152 				  "lkb_nodeid %d wait_nodeid %d dir_nodeid %d",
5153 				  lkb->lkb_id,
5154 				  lkb->lkb_remid,
5155 				  lkb->lkb_wait_type,
5156 				  lkb->lkb_resource->res_nodeid,
5157 				  lkb->lkb_nodeid,
5158 				  lkb->lkb_wait_nodeid,
5159 				  dir_nodeid);
5160 		}
5161 
5162 		/* all outstanding lookups, regardless of destination  will be
5163 		   resent after recovery is done */
5164 
5165 		if (lkb->lkb_wait_type == DLM_MSG_LOOKUP) {
5166 			lkb->lkb_flags |= DLM_IFL_RESEND;
5167 			continue;
5168 		}
5169 
5170 		if (!waiter_needs_recovery(ls, lkb, dir_nodeid))
5171 			continue;
5172 
5173 		wait_type = lkb->lkb_wait_type;
5174 		stub_unlock_result = -DLM_EUNLOCK;
5175 		stub_cancel_result = -DLM_ECANCEL;
5176 
5177 		/* Main reply may have been received leaving a zero wait_type,
5178 		   but a reply for the overlapping op may not have been
5179 		   received.  In that case we need to fake the appropriate
5180 		   reply for the overlap op. */
5181 
5182 		if (!wait_type) {
5183 			if (is_overlap_cancel(lkb)) {
5184 				wait_type = DLM_MSG_CANCEL;
5185 				if (lkb->lkb_grmode == DLM_LOCK_IV)
5186 					stub_cancel_result = 0;
5187 			}
5188 			if (is_overlap_unlock(lkb)) {
5189 				wait_type = DLM_MSG_UNLOCK;
5190 				if (lkb->lkb_grmode == DLM_LOCK_IV)
5191 					stub_unlock_result = -ENOENT;
5192 			}
5193 
5194 			log_debug(ls, "rwpre overlap %x %x %d %d %d",
5195 				  lkb->lkb_id, lkb->lkb_flags, wait_type,
5196 				  stub_cancel_result, stub_unlock_result);
5197 		}
5198 
5199 		switch (wait_type) {
5200 
5201 		case DLM_MSG_REQUEST:
5202 			lkb->lkb_flags |= DLM_IFL_RESEND;
5203 			break;
5204 
5205 		case DLM_MSG_CONVERT:
5206 			recover_convert_waiter(ls, lkb, ms_stub);
5207 			break;
5208 
5209 		case DLM_MSG_UNLOCK:
5210 			hold_lkb(lkb);
5211 			memset(ms_stub, 0, sizeof(struct dlm_message));
5212 			ms_stub->m_flags = DLM_IFL_STUB_MS;
5213 			ms_stub->m_type = DLM_MSG_UNLOCK_REPLY;
5214 			ms_stub->m_result = stub_unlock_result;
5215 			ms_stub->m_header.h_nodeid = lkb->lkb_nodeid;
5216 			_receive_unlock_reply(lkb, ms_stub);
5217 			dlm_put_lkb(lkb);
5218 			break;
5219 
5220 		case DLM_MSG_CANCEL:
5221 			hold_lkb(lkb);
5222 			memset(ms_stub, 0, sizeof(struct dlm_message));
5223 			ms_stub->m_flags = DLM_IFL_STUB_MS;
5224 			ms_stub->m_type = DLM_MSG_CANCEL_REPLY;
5225 			ms_stub->m_result = stub_cancel_result;
5226 			ms_stub->m_header.h_nodeid = lkb->lkb_nodeid;
5227 			_receive_cancel_reply(lkb, ms_stub);
5228 			dlm_put_lkb(lkb);
5229 			break;
5230 
5231 		default:
5232 			log_error(ls, "invalid lkb wait_type %d %d",
5233 				  lkb->lkb_wait_type, wait_type);
5234 		}
5235 		schedule();
5236 	}
5237 	mutex_unlock(&ls->ls_waiters_mutex);
5238 	kfree(ms_stub);
5239 }
5240 
find_resend_waiter(struct dlm_ls * ls)5241 static struct dlm_lkb *find_resend_waiter(struct dlm_ls *ls)
5242 {
5243 	struct dlm_lkb *lkb;
5244 	int found = 0;
5245 
5246 	mutex_lock(&ls->ls_waiters_mutex);
5247 	list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
5248 		if (lkb->lkb_flags & DLM_IFL_RESEND) {
5249 			hold_lkb(lkb);
5250 			found = 1;
5251 			break;
5252 		}
5253 	}
5254 	mutex_unlock(&ls->ls_waiters_mutex);
5255 
5256 	if (!found)
5257 		lkb = NULL;
5258 	return lkb;
5259 }
5260 
5261 /* Deal with lookups and lkb's marked RESEND from _pre.  We may now be the
5262    master or dir-node for r.  Processing the lkb may result in it being placed
5263    back on waiters. */
5264 
5265 /* We do this after normal locking has been enabled and any saved messages
5266    (in requestqueue) have been processed.  We should be confident that at
5267    this point we won't get or process a reply to any of these waiting
5268    operations.  But, new ops may be coming in on the rsbs/locks here from
5269    userspace or remotely. */
5270 
5271 /* there may have been an overlap unlock/cancel prior to recovery or after
5272    recovery.  if before, the lkb may still have a pos wait_count; if after, the
5273    overlap flag would just have been set and nothing new sent.  we can be
5274    confident here than any replies to either the initial op or overlap ops
5275    prior to recovery have been received. */
5276 
dlm_recover_waiters_post(struct dlm_ls * ls)5277 int dlm_recover_waiters_post(struct dlm_ls *ls)
5278 {
5279 	struct dlm_lkb *lkb;
5280 	struct dlm_rsb *r;
5281 	int error = 0, mstype, err, oc, ou;
5282 
5283 	while (1) {
5284 		if (dlm_locking_stopped(ls)) {
5285 			log_debug(ls, "recover_waiters_post aborted");
5286 			error = -EINTR;
5287 			break;
5288 		}
5289 
5290 		lkb = find_resend_waiter(ls);
5291 		if (!lkb)
5292 			break;
5293 
5294 		r = lkb->lkb_resource;
5295 		hold_rsb(r);
5296 		lock_rsb(r);
5297 
5298 		mstype = lkb->lkb_wait_type;
5299 		oc = is_overlap_cancel(lkb);
5300 		ou = is_overlap_unlock(lkb);
5301 		err = 0;
5302 
5303 		log_debug(ls, "waiter %x remote %x msg %d r_nodeid %d "
5304 			  "lkb_nodeid %d wait_nodeid %d dir_nodeid %d "
5305 			  "overlap %d %d", lkb->lkb_id, lkb->lkb_remid, mstype,
5306 			  r->res_nodeid, lkb->lkb_nodeid, lkb->lkb_wait_nodeid,
5307 			  dlm_dir_nodeid(r), oc, ou);
5308 
5309 		/* At this point we assume that we won't get a reply to any
5310 		   previous op or overlap op on this lock.  First, do a big
5311 		   remove_from_waiters() for all previous ops. */
5312 
5313 		lkb->lkb_flags &= ~DLM_IFL_RESEND;
5314 		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
5315 		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
5316 		lkb->lkb_wait_type = 0;
5317 		lkb->lkb_wait_count = 0;
5318 		mutex_lock(&ls->ls_waiters_mutex);
5319 		list_del_init(&lkb->lkb_wait_reply);
5320 		mutex_unlock(&ls->ls_waiters_mutex);
5321 		unhold_lkb(lkb); /* for waiters list */
5322 
5323 		if (oc || ou) {
5324 			/* do an unlock or cancel instead of resending */
5325 			switch (mstype) {
5326 			case DLM_MSG_LOOKUP:
5327 			case DLM_MSG_REQUEST:
5328 				queue_cast(r, lkb, ou ? -DLM_EUNLOCK :
5329 							-DLM_ECANCEL);
5330 				unhold_lkb(lkb); /* undoes create_lkb() */
5331 				break;
5332 			case DLM_MSG_CONVERT:
5333 				if (oc) {
5334 					queue_cast(r, lkb, -DLM_ECANCEL);
5335 				} else {
5336 					lkb->lkb_exflags |= DLM_LKF_FORCEUNLOCK;
5337 					_unlock_lock(r, lkb);
5338 				}
5339 				break;
5340 			default:
5341 				err = 1;
5342 			}
5343 		} else {
5344 			switch (mstype) {
5345 			case DLM_MSG_LOOKUP:
5346 			case DLM_MSG_REQUEST:
5347 				_request_lock(r, lkb);
5348 				if (is_master(r))
5349 					confirm_master(r, 0);
5350 				break;
5351 			case DLM_MSG_CONVERT:
5352 				_convert_lock(r, lkb);
5353 				break;
5354 			default:
5355 				err = 1;
5356 			}
5357 		}
5358 
5359 		if (err) {
5360 			log_error(ls, "waiter %x msg %d r_nodeid %d "
5361 				  "dir_nodeid %d overlap %d %d",
5362 				  lkb->lkb_id, mstype, r->res_nodeid,
5363 				  dlm_dir_nodeid(r), oc, ou);
5364 		}
5365 		unlock_rsb(r);
5366 		put_rsb(r);
5367 		dlm_put_lkb(lkb);
5368 	}
5369 
5370 	return error;
5371 }
5372 
purge_mstcpy_list(struct dlm_ls * ls,struct dlm_rsb * r,struct list_head * list)5373 static void purge_mstcpy_list(struct dlm_ls *ls, struct dlm_rsb *r,
5374 			      struct list_head *list)
5375 {
5376 	struct dlm_lkb *lkb, *safe;
5377 
5378 	list_for_each_entry_safe(lkb, safe, list, lkb_statequeue) {
5379 		if (!is_master_copy(lkb))
5380 			continue;
5381 
5382 		/* don't purge lkbs we've added in recover_master_copy for
5383 		   the current recovery seq */
5384 
5385 		if (lkb->lkb_recover_seq == ls->ls_recover_seq)
5386 			continue;
5387 
5388 		del_lkb(r, lkb);
5389 
5390 		/* this put should free the lkb */
5391 		if (!dlm_put_lkb(lkb))
5392 			log_error(ls, "purged mstcpy lkb not released");
5393 	}
5394 }
5395 
dlm_purge_mstcpy_locks(struct dlm_rsb * r)5396 void dlm_purge_mstcpy_locks(struct dlm_rsb *r)
5397 {
5398 	struct dlm_ls *ls = r->res_ls;
5399 
5400 	purge_mstcpy_list(ls, r, &r->res_grantqueue);
5401 	purge_mstcpy_list(ls, r, &r->res_convertqueue);
5402 	purge_mstcpy_list(ls, r, &r->res_waitqueue);
5403 }
5404 
purge_dead_list(struct dlm_ls * ls,struct dlm_rsb * r,struct list_head * list,int nodeid_gone,unsigned int * count)5405 static void purge_dead_list(struct dlm_ls *ls, struct dlm_rsb *r,
5406 			    struct list_head *list,
5407 			    int nodeid_gone, unsigned int *count)
5408 {
5409 	struct dlm_lkb *lkb, *safe;
5410 
5411 	list_for_each_entry_safe(lkb, safe, list, lkb_statequeue) {
5412 		if (!is_master_copy(lkb))
5413 			continue;
5414 
5415 		if ((lkb->lkb_nodeid == nodeid_gone) ||
5416 		    dlm_is_removed(ls, lkb->lkb_nodeid)) {
5417 
5418 			/* tell recover_lvb to invalidate the lvb
5419 			   because a node holding EX/PW failed */
5420 			if ((lkb->lkb_exflags & DLM_LKF_VALBLK) &&
5421 			    (lkb->lkb_grmode >= DLM_LOCK_PW)) {
5422 				rsb_set_flag(r, RSB_RECOVER_LVB_INVAL);
5423 			}
5424 
5425 			del_lkb(r, lkb);
5426 
5427 			/* this put should free the lkb */
5428 			if (!dlm_put_lkb(lkb))
5429 				log_error(ls, "purged dead lkb not released");
5430 
5431 			rsb_set_flag(r, RSB_RECOVER_GRANT);
5432 
5433 			(*count)++;
5434 		}
5435 	}
5436 }
5437 
5438 /* Get rid of locks held by nodes that are gone. */
5439 
dlm_recover_purge(struct dlm_ls * ls)5440 void dlm_recover_purge(struct dlm_ls *ls)
5441 {
5442 	struct dlm_rsb *r;
5443 	struct dlm_member *memb;
5444 	int nodes_count = 0;
5445 	int nodeid_gone = 0;
5446 	unsigned int lkb_count = 0;
5447 
5448 	/* cache one removed nodeid to optimize the common
5449 	   case of a single node removed */
5450 
5451 	list_for_each_entry(memb, &ls->ls_nodes_gone, list) {
5452 		nodes_count++;
5453 		nodeid_gone = memb->nodeid;
5454 	}
5455 
5456 	if (!nodes_count)
5457 		return;
5458 
5459 	down_write(&ls->ls_root_sem);
5460 	list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
5461 		hold_rsb(r);
5462 		lock_rsb(r);
5463 		if (is_master(r)) {
5464 			purge_dead_list(ls, r, &r->res_grantqueue,
5465 					nodeid_gone, &lkb_count);
5466 			purge_dead_list(ls, r, &r->res_convertqueue,
5467 					nodeid_gone, &lkb_count);
5468 			purge_dead_list(ls, r, &r->res_waitqueue,
5469 					nodeid_gone, &lkb_count);
5470 		}
5471 		unlock_rsb(r);
5472 		unhold_rsb(r);
5473 		cond_resched();
5474 	}
5475 	up_write(&ls->ls_root_sem);
5476 
5477 	if (lkb_count)
5478 		log_rinfo(ls, "dlm_recover_purge %u locks for %u nodes",
5479 			  lkb_count, nodes_count);
5480 }
5481 
find_grant_rsb(struct dlm_ls * ls,int bucket)5482 static struct dlm_rsb *find_grant_rsb(struct dlm_ls *ls, int bucket)
5483 {
5484 	struct rb_node *n;
5485 	struct dlm_rsb *r;
5486 
5487 	spin_lock(&ls->ls_rsbtbl[bucket].lock);
5488 	for (n = rb_first(&ls->ls_rsbtbl[bucket].keep); n; n = rb_next(n)) {
5489 		r = rb_entry(n, struct dlm_rsb, res_hashnode);
5490 
5491 		if (!rsb_flag(r, RSB_RECOVER_GRANT))
5492 			continue;
5493 		if (!is_master(r)) {
5494 			rsb_clear_flag(r, RSB_RECOVER_GRANT);
5495 			continue;
5496 		}
5497 		hold_rsb(r);
5498 		spin_unlock(&ls->ls_rsbtbl[bucket].lock);
5499 		return r;
5500 	}
5501 	spin_unlock(&ls->ls_rsbtbl[bucket].lock);
5502 	return NULL;
5503 }
5504 
5505 /*
5506  * Attempt to grant locks on resources that we are the master of.
5507  * Locks may have become grantable during recovery because locks
5508  * from departed nodes have been purged (or not rebuilt), allowing
5509  * previously blocked locks to now be granted.  The subset of rsb's
5510  * we are interested in are those with lkb's on either the convert or
5511  * waiting queues.
5512  *
5513  * Simplest would be to go through each master rsb and check for non-empty
5514  * convert or waiting queues, and attempt to grant on those rsbs.
5515  * Checking the queues requires lock_rsb, though, for which we'd need
5516  * to release the rsbtbl lock.  This would make iterating through all
5517  * rsb's very inefficient.  So, we rely on earlier recovery routines
5518  * to set RECOVER_GRANT on any rsb's that we should attempt to grant
5519  * locks for.
5520  */
5521 
dlm_recover_grant(struct dlm_ls * ls)5522 void dlm_recover_grant(struct dlm_ls *ls)
5523 {
5524 	struct dlm_rsb *r;
5525 	int bucket = 0;
5526 	unsigned int count = 0;
5527 	unsigned int rsb_count = 0;
5528 	unsigned int lkb_count = 0;
5529 
5530 	while (1) {
5531 		r = find_grant_rsb(ls, bucket);
5532 		if (!r) {
5533 			if (bucket == ls->ls_rsbtbl_size - 1)
5534 				break;
5535 			bucket++;
5536 			continue;
5537 		}
5538 		rsb_count++;
5539 		count = 0;
5540 		lock_rsb(r);
5541 		/* the RECOVER_GRANT flag is checked in the grant path */
5542 		grant_pending_locks(r, &count);
5543 		rsb_clear_flag(r, RSB_RECOVER_GRANT);
5544 		lkb_count += count;
5545 		confirm_master(r, 0);
5546 		unlock_rsb(r);
5547 		put_rsb(r);
5548 		cond_resched();
5549 	}
5550 
5551 	if (lkb_count)
5552 		log_rinfo(ls, "dlm_recover_grant %u locks on %u resources",
5553 			  lkb_count, rsb_count);
5554 }
5555 
search_remid_list(struct list_head * head,int nodeid,uint32_t remid)5556 static struct dlm_lkb *search_remid_list(struct list_head *head, int nodeid,
5557 					 uint32_t remid)
5558 {
5559 	struct dlm_lkb *lkb;
5560 
5561 	list_for_each_entry(lkb, head, lkb_statequeue) {
5562 		if (lkb->lkb_nodeid == nodeid && lkb->lkb_remid == remid)
5563 			return lkb;
5564 	}
5565 	return NULL;
5566 }
5567 
search_remid(struct dlm_rsb * r,int nodeid,uint32_t remid)5568 static struct dlm_lkb *search_remid(struct dlm_rsb *r, int nodeid,
5569 				    uint32_t remid)
5570 {
5571 	struct dlm_lkb *lkb;
5572 
5573 	lkb = search_remid_list(&r->res_grantqueue, nodeid, remid);
5574 	if (lkb)
5575 		return lkb;
5576 	lkb = search_remid_list(&r->res_convertqueue, nodeid, remid);
5577 	if (lkb)
5578 		return lkb;
5579 	lkb = search_remid_list(&r->res_waitqueue, nodeid, remid);
5580 	if (lkb)
5581 		return lkb;
5582 	return NULL;
5583 }
5584 
5585 /* needs at least dlm_rcom + rcom_lock */
receive_rcom_lock_args(struct dlm_ls * ls,struct dlm_lkb * lkb,struct dlm_rsb * r,struct dlm_rcom * rc)5586 static int receive_rcom_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
5587 				  struct dlm_rsb *r, struct dlm_rcom *rc)
5588 {
5589 	struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
5590 
5591 	lkb->lkb_nodeid = rc->rc_header.h_nodeid;
5592 	lkb->lkb_ownpid = le32_to_cpu(rl->rl_ownpid);
5593 	lkb->lkb_remid = le32_to_cpu(rl->rl_lkid);
5594 	lkb->lkb_exflags = le32_to_cpu(rl->rl_exflags);
5595 	lkb->lkb_flags = le32_to_cpu(rl->rl_flags) & 0x0000FFFF;
5596 	lkb->lkb_flags |= DLM_IFL_MSTCPY;
5597 	lkb->lkb_lvbseq = le32_to_cpu(rl->rl_lvbseq);
5598 	lkb->lkb_rqmode = rl->rl_rqmode;
5599 	lkb->lkb_grmode = rl->rl_grmode;
5600 	/* don't set lkb_status because add_lkb wants to itself */
5601 
5602 	lkb->lkb_bastfn = (rl->rl_asts & DLM_CB_BAST) ? &fake_bastfn : NULL;
5603 	lkb->lkb_astfn = (rl->rl_asts & DLM_CB_CAST) ? &fake_astfn : NULL;
5604 
5605 	if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
5606 		int lvblen = rc->rc_header.h_length - sizeof(struct dlm_rcom) -
5607 			 sizeof(struct rcom_lock);
5608 		if (lvblen > ls->ls_lvblen)
5609 			return -EINVAL;
5610 		lkb->lkb_lvbptr = dlm_allocate_lvb(ls);
5611 		if (!lkb->lkb_lvbptr)
5612 			return -ENOMEM;
5613 		memcpy(lkb->lkb_lvbptr, rl->rl_lvb, lvblen);
5614 	}
5615 
5616 	/* Conversions between PR and CW (middle modes) need special handling.
5617 	   The real granted mode of these converting locks cannot be determined
5618 	   until all locks have been rebuilt on the rsb (recover_conversion) */
5619 
5620 	if (rl->rl_wait_type == cpu_to_le16(DLM_MSG_CONVERT) &&
5621 	    middle_conversion(lkb)) {
5622 		rl->rl_status = DLM_LKSTS_CONVERT;
5623 		lkb->lkb_grmode = DLM_LOCK_IV;
5624 		rsb_set_flag(r, RSB_RECOVER_CONVERT);
5625 	}
5626 
5627 	return 0;
5628 }
5629 
5630 /* This lkb may have been recovered in a previous aborted recovery so we need
5631    to check if the rsb already has an lkb with the given remote nodeid/lkid.
5632    If so we just send back a standard reply.  If not, we create a new lkb with
5633    the given values and send back our lkid.  We send back our lkid by sending
5634    back the rcom_lock struct we got but with the remid field filled in. */
5635 
5636 /* needs at least dlm_rcom + rcom_lock */
dlm_recover_master_copy(struct dlm_ls * ls,struct dlm_rcom * rc)5637 int dlm_recover_master_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
5638 {
5639 	struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
5640 	struct dlm_rsb *r;
5641 	struct dlm_lkb *lkb;
5642 	uint32_t remid = 0;
5643 	int from_nodeid = rc->rc_header.h_nodeid;
5644 	int error;
5645 
5646 	if (rl->rl_parent_lkid) {
5647 		error = -EOPNOTSUPP;
5648 		goto out;
5649 	}
5650 
5651 	remid = le32_to_cpu(rl->rl_lkid);
5652 
5653 	/* In general we expect the rsb returned to be R_MASTER, but we don't
5654 	   have to require it.  Recovery of masters on one node can overlap
5655 	   recovery of locks on another node, so one node can send us MSTCPY
5656 	   locks before we've made ourselves master of this rsb.  We can still
5657 	   add new MSTCPY locks that we receive here without any harm; when
5658 	   we make ourselves master, dlm_recover_masters() won't touch the
5659 	   MSTCPY locks we've received early. */
5660 
5661 	error = find_rsb(ls, rl->rl_name, le16_to_cpu(rl->rl_namelen),
5662 			 from_nodeid, R_RECEIVE_RECOVER, &r);
5663 	if (error)
5664 		goto out;
5665 
5666 	lock_rsb(r);
5667 
5668 	if (dlm_no_directory(ls) && (dlm_dir_nodeid(r) != dlm_our_nodeid())) {
5669 		log_error(ls, "dlm_recover_master_copy remote %d %x not dir",
5670 			  from_nodeid, remid);
5671 		error = -EBADR;
5672 		goto out_unlock;
5673 	}
5674 
5675 	lkb = search_remid(r, from_nodeid, remid);
5676 	if (lkb) {
5677 		error = -EEXIST;
5678 		goto out_remid;
5679 	}
5680 
5681 	error = create_lkb(ls, &lkb);
5682 	if (error)
5683 		goto out_unlock;
5684 
5685 	error = receive_rcom_lock_args(ls, lkb, r, rc);
5686 	if (error) {
5687 		__put_lkb(ls, lkb);
5688 		goto out_unlock;
5689 	}
5690 
5691 	attach_lkb(r, lkb);
5692 	add_lkb(r, lkb, rl->rl_status);
5693 	error = 0;
5694 	ls->ls_recover_locks_in++;
5695 
5696 	if (!list_empty(&r->res_waitqueue) || !list_empty(&r->res_convertqueue))
5697 		rsb_set_flag(r, RSB_RECOVER_GRANT);
5698 
5699  out_remid:
5700 	/* this is the new value returned to the lock holder for
5701 	   saving in its process-copy lkb */
5702 	rl->rl_remid = cpu_to_le32(lkb->lkb_id);
5703 
5704 	lkb->lkb_recover_seq = ls->ls_recover_seq;
5705 
5706  out_unlock:
5707 	unlock_rsb(r);
5708 	put_rsb(r);
5709  out:
5710 	if (error && error != -EEXIST)
5711 		log_rinfo(ls, "dlm_recover_master_copy remote %d %x error %d",
5712 			  from_nodeid, remid, error);
5713 	rl->rl_result = cpu_to_le32(error);
5714 	return error;
5715 }
5716 
5717 /* needs at least dlm_rcom + rcom_lock */
dlm_recover_process_copy(struct dlm_ls * ls,struct dlm_rcom * rc)5718 int dlm_recover_process_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
5719 {
5720 	struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
5721 	struct dlm_rsb *r;
5722 	struct dlm_lkb *lkb;
5723 	uint32_t lkid, remid;
5724 	int error, result;
5725 
5726 	lkid = le32_to_cpu(rl->rl_lkid);
5727 	remid = le32_to_cpu(rl->rl_remid);
5728 	result = le32_to_cpu(rl->rl_result);
5729 
5730 	error = find_lkb(ls, lkid, &lkb);
5731 	if (error) {
5732 		log_error(ls, "dlm_recover_process_copy no %x remote %d %x %d",
5733 			  lkid, rc->rc_header.h_nodeid, remid, result);
5734 		return error;
5735 	}
5736 
5737 	r = lkb->lkb_resource;
5738 	hold_rsb(r);
5739 	lock_rsb(r);
5740 
5741 	if (!is_process_copy(lkb)) {
5742 		log_error(ls, "dlm_recover_process_copy bad %x remote %d %x %d",
5743 			  lkid, rc->rc_header.h_nodeid, remid, result);
5744 		dlm_dump_rsb(r);
5745 		unlock_rsb(r);
5746 		put_rsb(r);
5747 		dlm_put_lkb(lkb);
5748 		return -EINVAL;
5749 	}
5750 
5751 	switch (result) {
5752 	case -EBADR:
5753 		/* There's a chance the new master received our lock before
5754 		   dlm_recover_master_reply(), this wouldn't happen if we did
5755 		   a barrier between recover_masters and recover_locks. */
5756 
5757 		log_debug(ls, "dlm_recover_process_copy %x remote %d %x %d",
5758 			  lkid, rc->rc_header.h_nodeid, remid, result);
5759 
5760 		dlm_send_rcom_lock(r, lkb);
5761 		goto out;
5762 	case -EEXIST:
5763 	case 0:
5764 		lkb->lkb_remid = remid;
5765 		break;
5766 	default:
5767 		log_error(ls, "dlm_recover_process_copy %x remote %d %x %d unk",
5768 			  lkid, rc->rc_header.h_nodeid, remid, result);
5769 	}
5770 
5771 	/* an ack for dlm_recover_locks() which waits for replies from
5772 	   all the locks it sends to new masters */
5773 	dlm_recovered_lock(r);
5774  out:
5775 	unlock_rsb(r);
5776 	put_rsb(r);
5777 	dlm_put_lkb(lkb);
5778 
5779 	return 0;
5780 }
5781 
dlm_user_request(struct dlm_ls * ls,struct dlm_user_args * ua,int mode,uint32_t flags,void * name,unsigned int namelen,unsigned long timeout_cs)5782 int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua,
5783 		     int mode, uint32_t flags, void *name, unsigned int namelen,
5784 		     unsigned long timeout_cs)
5785 {
5786 	struct dlm_lkb *lkb;
5787 	struct dlm_args args;
5788 	int error;
5789 
5790 	dlm_lock_recovery(ls);
5791 
5792 	error = create_lkb(ls, &lkb);
5793 	if (error) {
5794 		kfree(ua);
5795 		goto out;
5796 	}
5797 
5798 	if (flags & DLM_LKF_VALBLK) {
5799 		ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_NOFS);
5800 		if (!ua->lksb.sb_lvbptr) {
5801 			kfree(ua);
5802 			__put_lkb(ls, lkb);
5803 			error = -ENOMEM;
5804 			goto out;
5805 		}
5806 	}
5807 	error = set_lock_args(mode, &ua->lksb, flags, namelen, timeout_cs,
5808 			      fake_astfn, ua, fake_bastfn, &args);
5809 	if (error) {
5810 		kfree(ua->lksb.sb_lvbptr);
5811 		ua->lksb.sb_lvbptr = NULL;
5812 		kfree(ua);
5813 		__put_lkb(ls, lkb);
5814 		goto out;
5815 	}
5816 
5817 	/* After ua is attached to lkb it will be freed by dlm_free_lkb().
5818 	   When DLM_IFL_USER is set, the dlm knows that this is a userspace
5819 	   lock and that lkb_astparam is the dlm_user_args structure. */
5820 	lkb->lkb_flags |= DLM_IFL_USER;
5821 	error = request_lock(ls, lkb, name, namelen, &args);
5822 
5823 	switch (error) {
5824 	case 0:
5825 		break;
5826 	case -EINPROGRESS:
5827 		error = 0;
5828 		break;
5829 	case -EAGAIN:
5830 		error = 0;
5831 		/* fall through */
5832 	default:
5833 		__put_lkb(ls, lkb);
5834 		goto out;
5835 	}
5836 
5837 	/* add this new lkb to the per-process list of locks */
5838 	spin_lock(&ua->proc->locks_spin);
5839 	hold_lkb(lkb);
5840 	list_add_tail(&lkb->lkb_ownqueue, &ua->proc->locks);
5841 	spin_unlock(&ua->proc->locks_spin);
5842  out:
5843 	dlm_unlock_recovery(ls);
5844 	return error;
5845 }
5846 
dlm_user_convert(struct dlm_ls * ls,struct dlm_user_args * ua_tmp,int mode,uint32_t flags,uint32_t lkid,char * lvb_in,unsigned long timeout_cs)5847 int dlm_user_convert(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
5848 		     int mode, uint32_t flags, uint32_t lkid, char *lvb_in,
5849 		     unsigned long timeout_cs)
5850 {
5851 	struct dlm_lkb *lkb;
5852 	struct dlm_args args;
5853 	struct dlm_user_args *ua;
5854 	int error;
5855 
5856 	dlm_lock_recovery(ls);
5857 
5858 	error = find_lkb(ls, lkid, &lkb);
5859 	if (error)
5860 		goto out;
5861 
5862 	/* user can change the params on its lock when it converts it, or
5863 	   add an lvb that didn't exist before */
5864 
5865 	ua = lkb->lkb_ua;
5866 
5867 	if (flags & DLM_LKF_VALBLK && !ua->lksb.sb_lvbptr) {
5868 		ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_NOFS);
5869 		if (!ua->lksb.sb_lvbptr) {
5870 			error = -ENOMEM;
5871 			goto out_put;
5872 		}
5873 	}
5874 	if (lvb_in && ua->lksb.sb_lvbptr)
5875 		memcpy(ua->lksb.sb_lvbptr, lvb_in, DLM_USER_LVB_LEN);
5876 
5877 	ua->xid = ua_tmp->xid;
5878 	ua->castparam = ua_tmp->castparam;
5879 	ua->castaddr = ua_tmp->castaddr;
5880 	ua->bastparam = ua_tmp->bastparam;
5881 	ua->bastaddr = ua_tmp->bastaddr;
5882 	ua->user_lksb = ua_tmp->user_lksb;
5883 
5884 	error = set_lock_args(mode, &ua->lksb, flags, 0, timeout_cs,
5885 			      fake_astfn, ua, fake_bastfn, &args);
5886 	if (error)
5887 		goto out_put;
5888 
5889 	error = convert_lock(ls, lkb, &args);
5890 
5891 	if (error == -EINPROGRESS || error == -EAGAIN || error == -EDEADLK)
5892 		error = 0;
5893  out_put:
5894 	dlm_put_lkb(lkb);
5895  out:
5896 	dlm_unlock_recovery(ls);
5897 	kfree(ua_tmp);
5898 	return error;
5899 }
5900 
5901 /*
5902  * The caller asks for an orphan lock on a given resource with a given mode.
5903  * If a matching lock exists, it's moved to the owner's list of locks and
5904  * the lkid is returned.
5905  */
5906 
dlm_user_adopt_orphan(struct dlm_ls * ls,struct dlm_user_args * ua_tmp,int mode,uint32_t flags,void * name,unsigned int namelen,unsigned long timeout_cs,uint32_t * lkid)5907 int dlm_user_adopt_orphan(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
5908 		     int mode, uint32_t flags, void *name, unsigned int namelen,
5909 		     unsigned long timeout_cs, uint32_t *lkid)
5910 {
5911 	struct dlm_lkb *lkb;
5912 	struct dlm_user_args *ua;
5913 	int found_other_mode = 0;
5914 	int found = 0;
5915 	int rv = 0;
5916 
5917 	mutex_lock(&ls->ls_orphans_mutex);
5918 	list_for_each_entry(lkb, &ls->ls_orphans, lkb_ownqueue) {
5919 		if (lkb->lkb_resource->res_length != namelen)
5920 			continue;
5921 		if (memcmp(lkb->lkb_resource->res_name, name, namelen))
5922 			continue;
5923 		if (lkb->lkb_grmode != mode) {
5924 			found_other_mode = 1;
5925 			continue;
5926 		}
5927 
5928 		found = 1;
5929 		list_del_init(&lkb->lkb_ownqueue);
5930 		lkb->lkb_flags &= ~DLM_IFL_ORPHAN;
5931 		*lkid = lkb->lkb_id;
5932 		break;
5933 	}
5934 	mutex_unlock(&ls->ls_orphans_mutex);
5935 
5936 	if (!found && found_other_mode) {
5937 		rv = -EAGAIN;
5938 		goto out;
5939 	}
5940 
5941 	if (!found) {
5942 		rv = -ENOENT;
5943 		goto out;
5944 	}
5945 
5946 	lkb->lkb_exflags = flags;
5947 	lkb->lkb_ownpid = (int) current->pid;
5948 
5949 	ua = lkb->lkb_ua;
5950 
5951 	ua->proc = ua_tmp->proc;
5952 	ua->xid = ua_tmp->xid;
5953 	ua->castparam = ua_tmp->castparam;
5954 	ua->castaddr = ua_tmp->castaddr;
5955 	ua->bastparam = ua_tmp->bastparam;
5956 	ua->bastaddr = ua_tmp->bastaddr;
5957 	ua->user_lksb = ua_tmp->user_lksb;
5958 
5959 	/*
5960 	 * The lkb reference from the ls_orphans list was not
5961 	 * removed above, and is now considered the reference
5962 	 * for the proc locks list.
5963 	 */
5964 
5965 	spin_lock(&ua->proc->locks_spin);
5966 	list_add_tail(&lkb->lkb_ownqueue, &ua->proc->locks);
5967 	spin_unlock(&ua->proc->locks_spin);
5968  out:
5969 	kfree(ua_tmp);
5970 	return rv;
5971 }
5972 
dlm_user_unlock(struct dlm_ls * ls,struct dlm_user_args * ua_tmp,uint32_t flags,uint32_t lkid,char * lvb_in)5973 int dlm_user_unlock(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
5974 		    uint32_t flags, uint32_t lkid, char *lvb_in)
5975 {
5976 	struct dlm_lkb *lkb;
5977 	struct dlm_args args;
5978 	struct dlm_user_args *ua;
5979 	int error;
5980 
5981 	dlm_lock_recovery(ls);
5982 
5983 	error = find_lkb(ls, lkid, &lkb);
5984 	if (error)
5985 		goto out;
5986 
5987 	ua = lkb->lkb_ua;
5988 
5989 	if (lvb_in && ua->lksb.sb_lvbptr)
5990 		memcpy(ua->lksb.sb_lvbptr, lvb_in, DLM_USER_LVB_LEN);
5991 	if (ua_tmp->castparam)
5992 		ua->castparam = ua_tmp->castparam;
5993 	ua->user_lksb = ua_tmp->user_lksb;
5994 
5995 	error = set_unlock_args(flags, ua, &args);
5996 	if (error)
5997 		goto out_put;
5998 
5999 	error = unlock_lock(ls, lkb, &args);
6000 
6001 	if (error == -DLM_EUNLOCK)
6002 		error = 0;
6003 	/* from validate_unlock_args() */
6004 	if (error == -EBUSY && (flags & DLM_LKF_FORCEUNLOCK))
6005 		error = 0;
6006 	if (error)
6007 		goto out_put;
6008 
6009 	spin_lock(&ua->proc->locks_spin);
6010 	/* dlm_user_add_cb() may have already taken lkb off the proc list */
6011 	if (!list_empty(&lkb->lkb_ownqueue))
6012 		list_move(&lkb->lkb_ownqueue, &ua->proc->unlocking);
6013 	spin_unlock(&ua->proc->locks_spin);
6014  out_put:
6015 	dlm_put_lkb(lkb);
6016  out:
6017 	dlm_unlock_recovery(ls);
6018 	kfree(ua_tmp);
6019 	return error;
6020 }
6021 
dlm_user_cancel(struct dlm_ls * ls,struct dlm_user_args * ua_tmp,uint32_t flags,uint32_t lkid)6022 int dlm_user_cancel(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
6023 		    uint32_t flags, uint32_t lkid)
6024 {
6025 	struct dlm_lkb *lkb;
6026 	struct dlm_args args;
6027 	struct dlm_user_args *ua;
6028 	int error;
6029 
6030 	dlm_lock_recovery(ls);
6031 
6032 	error = find_lkb(ls, lkid, &lkb);
6033 	if (error)
6034 		goto out;
6035 
6036 	ua = lkb->lkb_ua;
6037 	if (ua_tmp->castparam)
6038 		ua->castparam = ua_tmp->castparam;
6039 	ua->user_lksb = ua_tmp->user_lksb;
6040 
6041 	error = set_unlock_args(flags, ua, &args);
6042 	if (error)
6043 		goto out_put;
6044 
6045 	error = cancel_lock(ls, lkb, &args);
6046 
6047 	if (error == -DLM_ECANCEL)
6048 		error = 0;
6049 	/* from validate_unlock_args() */
6050 	if (error == -EBUSY)
6051 		error = 0;
6052  out_put:
6053 	dlm_put_lkb(lkb);
6054  out:
6055 	dlm_unlock_recovery(ls);
6056 	kfree(ua_tmp);
6057 	return error;
6058 }
6059 
dlm_user_deadlock(struct dlm_ls * ls,uint32_t flags,uint32_t lkid)6060 int dlm_user_deadlock(struct dlm_ls *ls, uint32_t flags, uint32_t lkid)
6061 {
6062 	struct dlm_lkb *lkb;
6063 	struct dlm_args args;
6064 	struct dlm_user_args *ua;
6065 	struct dlm_rsb *r;
6066 	int error;
6067 
6068 	dlm_lock_recovery(ls);
6069 
6070 	error = find_lkb(ls, lkid, &lkb);
6071 	if (error)
6072 		goto out;
6073 
6074 	ua = lkb->lkb_ua;
6075 
6076 	error = set_unlock_args(flags, ua, &args);
6077 	if (error)
6078 		goto out_put;
6079 
6080 	/* same as cancel_lock(), but set DEADLOCK_CANCEL after lock_rsb */
6081 
6082 	r = lkb->lkb_resource;
6083 	hold_rsb(r);
6084 	lock_rsb(r);
6085 
6086 	error = validate_unlock_args(lkb, &args);
6087 	if (error)
6088 		goto out_r;
6089 	lkb->lkb_flags |= DLM_IFL_DEADLOCK_CANCEL;
6090 
6091 	error = _cancel_lock(r, lkb);
6092  out_r:
6093 	unlock_rsb(r);
6094 	put_rsb(r);
6095 
6096 	if (error == -DLM_ECANCEL)
6097 		error = 0;
6098 	/* from validate_unlock_args() */
6099 	if (error == -EBUSY)
6100 		error = 0;
6101  out_put:
6102 	dlm_put_lkb(lkb);
6103  out:
6104 	dlm_unlock_recovery(ls);
6105 	return error;
6106 }
6107 
6108 /* lkb's that are removed from the waiters list by revert are just left on the
6109    orphans list with the granted orphan locks, to be freed by purge */
6110 
orphan_proc_lock(struct dlm_ls * ls,struct dlm_lkb * lkb)6111 static int orphan_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb)
6112 {
6113 	struct dlm_args args;
6114 	int error;
6115 
6116 	hold_lkb(lkb); /* reference for the ls_orphans list */
6117 	mutex_lock(&ls->ls_orphans_mutex);
6118 	list_add_tail(&lkb->lkb_ownqueue, &ls->ls_orphans);
6119 	mutex_unlock(&ls->ls_orphans_mutex);
6120 
6121 	set_unlock_args(0, lkb->lkb_ua, &args);
6122 
6123 	error = cancel_lock(ls, lkb, &args);
6124 	if (error == -DLM_ECANCEL)
6125 		error = 0;
6126 	return error;
6127 }
6128 
6129 /* The FORCEUNLOCK flag allows the unlock to go ahead even if the lkb isn't
6130    granted.  Regardless of what rsb queue the lock is on, it's removed and
6131    freed.  The IVVALBLK flag causes the lvb on the resource to be invalidated
6132    if our lock is PW/EX (it's ignored if our granted mode is smaller.) */
6133 
unlock_proc_lock(struct dlm_ls * ls,struct dlm_lkb * lkb)6134 static int unlock_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb)
6135 {
6136 	struct dlm_args args;
6137 	int error;
6138 
6139 	set_unlock_args(DLM_LKF_FORCEUNLOCK | DLM_LKF_IVVALBLK,
6140 			lkb->lkb_ua, &args);
6141 
6142 	error = unlock_lock(ls, lkb, &args);
6143 	if (error == -DLM_EUNLOCK)
6144 		error = 0;
6145 	return error;
6146 }
6147 
6148 /* We have to release clear_proc_locks mutex before calling unlock_proc_lock()
6149    (which does lock_rsb) due to deadlock with receiving a message that does
6150    lock_rsb followed by dlm_user_add_cb() */
6151 
del_proc_lock(struct dlm_ls * ls,struct dlm_user_proc * proc)6152 static struct dlm_lkb *del_proc_lock(struct dlm_ls *ls,
6153 				     struct dlm_user_proc *proc)
6154 {
6155 	struct dlm_lkb *lkb = NULL;
6156 
6157 	mutex_lock(&ls->ls_clear_proc_locks);
6158 	if (list_empty(&proc->locks))
6159 		goto out;
6160 
6161 	lkb = list_entry(proc->locks.next, struct dlm_lkb, lkb_ownqueue);
6162 	list_del_init(&lkb->lkb_ownqueue);
6163 
6164 	if (lkb->lkb_exflags & DLM_LKF_PERSISTENT)
6165 		lkb->lkb_flags |= DLM_IFL_ORPHAN;
6166 	else
6167 		lkb->lkb_flags |= DLM_IFL_DEAD;
6168  out:
6169 	mutex_unlock(&ls->ls_clear_proc_locks);
6170 	return lkb;
6171 }
6172 
6173 /* The ls_clear_proc_locks mutex protects against dlm_user_add_cb() which
6174    1) references lkb->ua which we free here and 2) adds lkbs to proc->asts,
6175    which we clear here. */
6176 
6177 /* proc CLOSING flag is set so no more device_reads should look at proc->asts
6178    list, and no more device_writes should add lkb's to proc->locks list; so we
6179    shouldn't need to take asts_spin or locks_spin here.  this assumes that
6180    device reads/writes/closes are serialized -- FIXME: we may need to serialize
6181    them ourself. */
6182 
dlm_clear_proc_locks(struct dlm_ls * ls,struct dlm_user_proc * proc)6183 void dlm_clear_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
6184 {
6185 	struct dlm_lkb *lkb, *safe;
6186 
6187 	dlm_lock_recovery(ls);
6188 
6189 	while (1) {
6190 		lkb = del_proc_lock(ls, proc);
6191 		if (!lkb)
6192 			break;
6193 		del_timeout(lkb);
6194 		if (lkb->lkb_exflags & DLM_LKF_PERSISTENT)
6195 			orphan_proc_lock(ls, lkb);
6196 		else
6197 			unlock_proc_lock(ls, lkb);
6198 
6199 		/* this removes the reference for the proc->locks list
6200 		   added by dlm_user_request, it may result in the lkb
6201 		   being freed */
6202 
6203 		dlm_put_lkb(lkb);
6204 	}
6205 
6206 	mutex_lock(&ls->ls_clear_proc_locks);
6207 
6208 	/* in-progress unlocks */
6209 	list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) {
6210 		list_del_init(&lkb->lkb_ownqueue);
6211 		lkb->lkb_flags |= DLM_IFL_DEAD;
6212 		dlm_put_lkb(lkb);
6213 	}
6214 
6215 	list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_cb_list) {
6216 		memset(&lkb->lkb_callbacks, 0,
6217 		       sizeof(struct dlm_callback) * DLM_CALLBACKS_SIZE);
6218 		list_del_init(&lkb->lkb_cb_list);
6219 		dlm_put_lkb(lkb);
6220 	}
6221 
6222 	mutex_unlock(&ls->ls_clear_proc_locks);
6223 	dlm_unlock_recovery(ls);
6224 }
6225 
purge_proc_locks(struct dlm_ls * ls,struct dlm_user_proc * proc)6226 static void purge_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
6227 {
6228 	struct dlm_lkb *lkb, *safe;
6229 
6230 	while (1) {
6231 		lkb = NULL;
6232 		spin_lock(&proc->locks_spin);
6233 		if (!list_empty(&proc->locks)) {
6234 			lkb = list_entry(proc->locks.next, struct dlm_lkb,
6235 					 lkb_ownqueue);
6236 			list_del_init(&lkb->lkb_ownqueue);
6237 		}
6238 		spin_unlock(&proc->locks_spin);
6239 
6240 		if (!lkb)
6241 			break;
6242 
6243 		lkb->lkb_flags |= DLM_IFL_DEAD;
6244 		unlock_proc_lock(ls, lkb);
6245 		dlm_put_lkb(lkb); /* ref from proc->locks list */
6246 	}
6247 
6248 	spin_lock(&proc->locks_spin);
6249 	list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) {
6250 		list_del_init(&lkb->lkb_ownqueue);
6251 		lkb->lkb_flags |= DLM_IFL_DEAD;
6252 		dlm_put_lkb(lkb);
6253 	}
6254 	spin_unlock(&proc->locks_spin);
6255 
6256 	spin_lock(&proc->asts_spin);
6257 	list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_cb_list) {
6258 		memset(&lkb->lkb_callbacks, 0,
6259 		       sizeof(struct dlm_callback) * DLM_CALLBACKS_SIZE);
6260 		list_del_init(&lkb->lkb_cb_list);
6261 		dlm_put_lkb(lkb);
6262 	}
6263 	spin_unlock(&proc->asts_spin);
6264 }
6265 
6266 /* pid of 0 means purge all orphans */
6267 
do_purge(struct dlm_ls * ls,int nodeid,int pid)6268 static void do_purge(struct dlm_ls *ls, int nodeid, int pid)
6269 {
6270 	struct dlm_lkb *lkb, *safe;
6271 
6272 	mutex_lock(&ls->ls_orphans_mutex);
6273 	list_for_each_entry_safe(lkb, safe, &ls->ls_orphans, lkb_ownqueue) {
6274 		if (pid && lkb->lkb_ownpid != pid)
6275 			continue;
6276 		unlock_proc_lock(ls, lkb);
6277 		list_del_init(&lkb->lkb_ownqueue);
6278 		dlm_put_lkb(lkb);
6279 	}
6280 	mutex_unlock(&ls->ls_orphans_mutex);
6281 }
6282 
send_purge(struct dlm_ls * ls,int nodeid,int pid)6283 static int send_purge(struct dlm_ls *ls, int nodeid, int pid)
6284 {
6285 	struct dlm_message *ms;
6286 	struct dlm_mhandle *mh;
6287 	int error;
6288 
6289 	error = _create_message(ls, sizeof(struct dlm_message), nodeid,
6290 				DLM_MSG_PURGE, &ms, &mh);
6291 	if (error)
6292 		return error;
6293 	ms->m_nodeid = nodeid;
6294 	ms->m_pid = pid;
6295 
6296 	return send_message(mh, ms);
6297 }
6298 
dlm_user_purge(struct dlm_ls * ls,struct dlm_user_proc * proc,int nodeid,int pid)6299 int dlm_user_purge(struct dlm_ls *ls, struct dlm_user_proc *proc,
6300 		   int nodeid, int pid)
6301 {
6302 	int error = 0;
6303 
6304 	if (nodeid && (nodeid != dlm_our_nodeid())) {
6305 		error = send_purge(ls, nodeid, pid);
6306 	} else {
6307 		dlm_lock_recovery(ls);
6308 		if (pid == current->pid)
6309 			purge_proc_locks(ls, proc);
6310 		else
6311 			do_purge(ls, nodeid, pid);
6312 		dlm_unlock_recovery(ls);
6313 	}
6314 	return error;
6315 }
6316 
6317