• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /******************************************************************************
3 *******************************************************************************
4 **
5 **  Copyright (C) 2005-2010 Red Hat, Inc.  All rights reserved.
6 **
7 **
8 *******************************************************************************
9 ******************************************************************************/
10 
11 /* Central locking logic has four stages:
12 
13    dlm_lock()
14    dlm_unlock()
15 
16    request_lock(ls, lkb)
17    convert_lock(ls, lkb)
18    unlock_lock(ls, lkb)
19    cancel_lock(ls, lkb)
20 
21    _request_lock(r, lkb)
22    _convert_lock(r, lkb)
23    _unlock_lock(r, lkb)
24    _cancel_lock(r, lkb)
25 
26    do_request(r, lkb)
27    do_convert(r, lkb)
28    do_unlock(r, lkb)
29    do_cancel(r, lkb)
30 
31    Stage 1 (lock, unlock) is mainly about checking input args and
32    splitting into one of the four main operations:
33 
34        dlm_lock          = request_lock
35        dlm_lock+CONVERT  = convert_lock
36        dlm_unlock        = unlock_lock
37        dlm_unlock+CANCEL = cancel_lock
38 
39    Stage 2, xxxx_lock(), just finds and locks the relevant rsb which is
40    provided to the next stage.
41 
42    Stage 3, _xxxx_lock(), determines if the operation is local or remote.
43    When remote, it calls send_xxxx(), when local it calls do_xxxx().
44 
45    Stage 4, do_xxxx(), is the guts of the operation.  It manipulates the
46    given rsb and lkb and queues callbacks.
47 
48    For remote operations, send_xxxx() results in the corresponding do_xxxx()
49    function being executed on the remote node.  The connecting send/receive
50    calls on local (L) and remote (R) nodes:
51 
52    L: send_xxxx()              ->  R: receive_xxxx()
53                                    R: do_xxxx()
54    L: receive_xxxx_reply()     <-  R: send_xxxx_reply()
55 */
56 #include <linux/types.h>
57 #include <linux/rbtree.h>
58 #include <linux/slab.h>
59 #include "dlm_internal.h"
60 #include <linux/dlm_device.h>
61 #include "memory.h"
62 #include "lowcomms.h"
63 #include "requestqueue.h"
64 #include "util.h"
65 #include "dir.h"
66 #include "member.h"
67 #include "lockspace.h"
68 #include "ast.h"
69 #include "lock.h"
70 #include "rcom.h"
71 #include "recover.h"
72 #include "lvb_table.h"
73 #include "user.h"
74 #include "config.h"
75 
76 static int send_request(struct dlm_rsb *r, struct dlm_lkb *lkb);
77 static int send_convert(struct dlm_rsb *r, struct dlm_lkb *lkb);
78 static int send_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb);
79 static int send_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb);
80 static int send_grant(struct dlm_rsb *r, struct dlm_lkb *lkb);
81 static int send_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int mode);
82 static int send_lookup(struct dlm_rsb *r, struct dlm_lkb *lkb);
83 static int send_remove(struct dlm_rsb *r);
84 static int _request_lock(struct dlm_rsb *r, struct dlm_lkb *lkb);
85 static int _cancel_lock(struct dlm_rsb *r, struct dlm_lkb *lkb);
86 static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
87 				    struct dlm_message *ms);
88 static int receive_extralen(struct dlm_message *ms);
89 static void do_purge(struct dlm_ls *ls, int nodeid, int pid);
90 static void del_timeout(struct dlm_lkb *lkb);
91 static void toss_rsb(struct kref *kref);
92 
93 /*
94  * Lock compatibilty matrix - thanks Steve
95  * UN = Unlocked state. Not really a state, used as a flag
96  * PD = Padding. Used to make the matrix a nice power of two in size
97  * Other states are the same as the VMS DLM.
98  * Usage: matrix[grmode+1][rqmode+1]  (although m[rq+1][gr+1] is the same)
99  */
100 
101 static const int __dlm_compat_matrix[8][8] = {
102       /* UN NL CR CW PR PW EX PD */
103         {1, 1, 1, 1, 1, 1, 1, 0},       /* UN */
104         {1, 1, 1, 1, 1, 1, 1, 0},       /* NL */
105         {1, 1, 1, 1, 1, 1, 0, 0},       /* CR */
106         {1, 1, 1, 1, 0, 0, 0, 0},       /* CW */
107         {1, 1, 1, 0, 1, 0, 0, 0},       /* PR */
108         {1, 1, 1, 0, 0, 0, 0, 0},       /* PW */
109         {1, 1, 0, 0, 0, 0, 0, 0},       /* EX */
110         {0, 0, 0, 0, 0, 0, 0, 0}        /* PD */
111 };
112 
113 /*
114  * This defines the direction of transfer of LVB data.
115  * Granted mode is the row; requested mode is the column.
116  * Usage: matrix[grmode+1][rqmode+1]
117  * 1 = LVB is returned to the caller
118  * 0 = LVB is written to the resource
119  * -1 = nothing happens to the LVB
120  */
121 
122 const int dlm_lvb_operations[8][8] = {
123         /* UN   NL  CR  CW  PR  PW  EX  PD*/
124         {  -1,  1,  1,  1,  1,  1,  1, -1 }, /* UN */
125         {  -1,  1,  1,  1,  1,  1,  1,  0 }, /* NL */
126         {  -1, -1,  1,  1,  1,  1,  1,  0 }, /* CR */
127         {  -1, -1, -1,  1,  1,  1,  1,  0 }, /* CW */
128         {  -1, -1, -1, -1,  1,  1,  1,  0 }, /* PR */
129         {  -1,  0,  0,  0,  0,  0,  1,  0 }, /* PW */
130         {  -1,  0,  0,  0,  0,  0,  0,  0 }, /* EX */
131         {  -1,  0,  0,  0,  0,  0,  0,  0 }  /* PD */
132 };
133 
134 #define modes_compat(gr, rq) \
135 	__dlm_compat_matrix[(gr)->lkb_grmode + 1][(rq)->lkb_rqmode + 1]
136 
dlm_modes_compat(int mode1,int mode2)137 int dlm_modes_compat(int mode1, int mode2)
138 {
139 	return __dlm_compat_matrix[mode1 + 1][mode2 + 1];
140 }
141 
142 /*
143  * Compatibility matrix for conversions with QUECVT set.
144  * Granted mode is the row; requested mode is the column.
145  * Usage: matrix[grmode+1][rqmode+1]
146  */
147 
148 static const int __quecvt_compat_matrix[8][8] = {
149       /* UN NL CR CW PR PW EX PD */
150         {0, 0, 0, 0, 0, 0, 0, 0},       /* UN */
151         {0, 0, 1, 1, 1, 1, 1, 0},       /* NL */
152         {0, 0, 0, 1, 1, 1, 1, 0},       /* CR */
153         {0, 0, 0, 0, 1, 1, 1, 0},       /* CW */
154         {0, 0, 0, 1, 0, 1, 1, 0},       /* PR */
155         {0, 0, 0, 0, 0, 0, 1, 0},       /* PW */
156         {0, 0, 0, 0, 0, 0, 0, 0},       /* EX */
157         {0, 0, 0, 0, 0, 0, 0, 0}        /* PD */
158 };
159 
dlm_print_lkb(struct dlm_lkb * lkb)160 void dlm_print_lkb(struct dlm_lkb *lkb)
161 {
162 	printk(KERN_ERR "lkb: nodeid %d id %x remid %x exflags %x flags %x "
163 	       "sts %d rq %d gr %d wait_type %d wait_nodeid %d seq %llu\n",
164 	       lkb->lkb_nodeid, lkb->lkb_id, lkb->lkb_remid, lkb->lkb_exflags,
165 	       lkb->lkb_flags, lkb->lkb_status, lkb->lkb_rqmode,
166 	       lkb->lkb_grmode, lkb->lkb_wait_type, lkb->lkb_wait_nodeid,
167 	       (unsigned long long)lkb->lkb_recover_seq);
168 }
169 
dlm_print_rsb(struct dlm_rsb * r)170 static void dlm_print_rsb(struct dlm_rsb *r)
171 {
172 	printk(KERN_ERR "rsb: nodeid %d master %d dir %d flags %lx first %x "
173 	       "rlc %d name %s\n",
174 	       r->res_nodeid, r->res_master_nodeid, r->res_dir_nodeid,
175 	       r->res_flags, r->res_first_lkid, r->res_recover_locks_count,
176 	       r->res_name);
177 }
178 
dlm_dump_rsb(struct dlm_rsb * r)179 void dlm_dump_rsb(struct dlm_rsb *r)
180 {
181 	struct dlm_lkb *lkb;
182 
183 	dlm_print_rsb(r);
184 
185 	printk(KERN_ERR "rsb: root_list empty %d recover_list empty %d\n",
186 	       list_empty(&r->res_root_list), list_empty(&r->res_recover_list));
187 	printk(KERN_ERR "rsb lookup list\n");
188 	list_for_each_entry(lkb, &r->res_lookup, lkb_rsb_lookup)
189 		dlm_print_lkb(lkb);
190 	printk(KERN_ERR "rsb grant queue:\n");
191 	list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue)
192 		dlm_print_lkb(lkb);
193 	printk(KERN_ERR "rsb convert queue:\n");
194 	list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue)
195 		dlm_print_lkb(lkb);
196 	printk(KERN_ERR "rsb wait queue:\n");
197 	list_for_each_entry(lkb, &r->res_waitqueue, lkb_statequeue)
198 		dlm_print_lkb(lkb);
199 }
200 
201 /* Threads cannot use the lockspace while it's being recovered */
202 
dlm_lock_recovery(struct dlm_ls * ls)203 static inline void dlm_lock_recovery(struct dlm_ls *ls)
204 {
205 	down_read(&ls->ls_in_recovery);
206 }
207 
dlm_unlock_recovery(struct dlm_ls * ls)208 void dlm_unlock_recovery(struct dlm_ls *ls)
209 {
210 	up_read(&ls->ls_in_recovery);
211 }
212 
dlm_lock_recovery_try(struct dlm_ls * ls)213 int dlm_lock_recovery_try(struct dlm_ls *ls)
214 {
215 	return down_read_trylock(&ls->ls_in_recovery);
216 }
217 
can_be_queued(struct dlm_lkb * lkb)218 static inline int can_be_queued(struct dlm_lkb *lkb)
219 {
220 	return !(lkb->lkb_exflags & DLM_LKF_NOQUEUE);
221 }
222 
force_blocking_asts(struct dlm_lkb * lkb)223 static inline int force_blocking_asts(struct dlm_lkb *lkb)
224 {
225 	return (lkb->lkb_exflags & DLM_LKF_NOQUEUEBAST);
226 }
227 
is_demoted(struct dlm_lkb * lkb)228 static inline int is_demoted(struct dlm_lkb *lkb)
229 {
230 	return (lkb->lkb_sbflags & DLM_SBF_DEMOTED);
231 }
232 
is_altmode(struct dlm_lkb * lkb)233 static inline int is_altmode(struct dlm_lkb *lkb)
234 {
235 	return (lkb->lkb_sbflags & DLM_SBF_ALTMODE);
236 }
237 
is_granted(struct dlm_lkb * lkb)238 static inline int is_granted(struct dlm_lkb *lkb)
239 {
240 	return (lkb->lkb_status == DLM_LKSTS_GRANTED);
241 }
242 
is_remote(struct dlm_rsb * r)243 static inline int is_remote(struct dlm_rsb *r)
244 {
245 	DLM_ASSERT(r->res_nodeid >= 0, dlm_print_rsb(r););
246 	return !!r->res_nodeid;
247 }
248 
is_process_copy(struct dlm_lkb * lkb)249 static inline int is_process_copy(struct dlm_lkb *lkb)
250 {
251 	return (lkb->lkb_nodeid && !(lkb->lkb_flags & DLM_IFL_MSTCPY));
252 }
253 
is_master_copy(struct dlm_lkb * lkb)254 static inline int is_master_copy(struct dlm_lkb *lkb)
255 {
256 	return (lkb->lkb_flags & DLM_IFL_MSTCPY) ? 1 : 0;
257 }
258 
middle_conversion(struct dlm_lkb * lkb)259 static inline int middle_conversion(struct dlm_lkb *lkb)
260 {
261 	if ((lkb->lkb_grmode==DLM_LOCK_PR && lkb->lkb_rqmode==DLM_LOCK_CW) ||
262 	    (lkb->lkb_rqmode==DLM_LOCK_PR && lkb->lkb_grmode==DLM_LOCK_CW))
263 		return 1;
264 	return 0;
265 }
266 
down_conversion(struct dlm_lkb * lkb)267 static inline int down_conversion(struct dlm_lkb *lkb)
268 {
269 	return (!middle_conversion(lkb) && lkb->lkb_rqmode < lkb->lkb_grmode);
270 }
271 
is_overlap_unlock(struct dlm_lkb * lkb)272 static inline int is_overlap_unlock(struct dlm_lkb *lkb)
273 {
274 	return lkb->lkb_flags & DLM_IFL_OVERLAP_UNLOCK;
275 }
276 
is_overlap_cancel(struct dlm_lkb * lkb)277 static inline int is_overlap_cancel(struct dlm_lkb *lkb)
278 {
279 	return lkb->lkb_flags & DLM_IFL_OVERLAP_CANCEL;
280 }
281 
is_overlap(struct dlm_lkb * lkb)282 static inline int is_overlap(struct dlm_lkb *lkb)
283 {
284 	return (lkb->lkb_flags & (DLM_IFL_OVERLAP_UNLOCK |
285 				  DLM_IFL_OVERLAP_CANCEL));
286 }
287 
queue_cast(struct dlm_rsb * r,struct dlm_lkb * lkb,int rv)288 static void queue_cast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
289 {
290 	if (is_master_copy(lkb))
291 		return;
292 
293 	del_timeout(lkb);
294 
295 	DLM_ASSERT(lkb->lkb_lksb, dlm_print_lkb(lkb););
296 
297 	/* if the operation was a cancel, then return -DLM_ECANCEL, if a
298 	   timeout caused the cancel then return -ETIMEDOUT */
299 	if (rv == -DLM_ECANCEL && (lkb->lkb_flags & DLM_IFL_TIMEOUT_CANCEL)) {
300 		lkb->lkb_flags &= ~DLM_IFL_TIMEOUT_CANCEL;
301 		rv = -ETIMEDOUT;
302 	}
303 
304 	if (rv == -DLM_ECANCEL && (lkb->lkb_flags & DLM_IFL_DEADLOCK_CANCEL)) {
305 		lkb->lkb_flags &= ~DLM_IFL_DEADLOCK_CANCEL;
306 		rv = -EDEADLK;
307 	}
308 
309 	dlm_add_cb(lkb, DLM_CB_CAST, lkb->lkb_grmode, rv, lkb->lkb_sbflags);
310 }
311 
queue_cast_overlap(struct dlm_rsb * r,struct dlm_lkb * lkb)312 static inline void queue_cast_overlap(struct dlm_rsb *r, struct dlm_lkb *lkb)
313 {
314 	queue_cast(r, lkb,
315 		   is_overlap_unlock(lkb) ? -DLM_EUNLOCK : -DLM_ECANCEL);
316 }
317 
queue_bast(struct dlm_rsb * r,struct dlm_lkb * lkb,int rqmode)318 static void queue_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rqmode)
319 {
320 	if (is_master_copy(lkb)) {
321 		send_bast(r, lkb, rqmode);
322 	} else {
323 		dlm_add_cb(lkb, DLM_CB_BAST, rqmode, 0, 0);
324 	}
325 }
326 
327 /*
328  * Basic operations on rsb's and lkb's
329  */
330 
331 /* This is only called to add a reference when the code already holds
332    a valid reference to the rsb, so there's no need for locking. */
333 
hold_rsb(struct dlm_rsb * r)334 static inline void hold_rsb(struct dlm_rsb *r)
335 {
336 	kref_get(&r->res_ref);
337 }
338 
dlm_hold_rsb(struct dlm_rsb * r)339 void dlm_hold_rsb(struct dlm_rsb *r)
340 {
341 	hold_rsb(r);
342 }
343 
344 /* When all references to the rsb are gone it's transferred to
345    the tossed list for later disposal. */
346 
put_rsb(struct dlm_rsb * r)347 static void put_rsb(struct dlm_rsb *r)
348 {
349 	struct dlm_ls *ls = r->res_ls;
350 	uint32_t bucket = r->res_bucket;
351 
352 	spin_lock(&ls->ls_rsbtbl[bucket].lock);
353 	kref_put(&r->res_ref, toss_rsb);
354 	spin_unlock(&ls->ls_rsbtbl[bucket].lock);
355 }
356 
dlm_put_rsb(struct dlm_rsb * r)357 void dlm_put_rsb(struct dlm_rsb *r)
358 {
359 	put_rsb(r);
360 }
361 
pre_rsb_struct(struct dlm_ls * ls)362 static int pre_rsb_struct(struct dlm_ls *ls)
363 {
364 	struct dlm_rsb *r1, *r2;
365 	int count = 0;
366 
367 	spin_lock(&ls->ls_new_rsb_spin);
368 	if (ls->ls_new_rsb_count > dlm_config.ci_new_rsb_count / 2) {
369 		spin_unlock(&ls->ls_new_rsb_spin);
370 		return 0;
371 	}
372 	spin_unlock(&ls->ls_new_rsb_spin);
373 
374 	r1 = dlm_allocate_rsb(ls);
375 	r2 = dlm_allocate_rsb(ls);
376 
377 	spin_lock(&ls->ls_new_rsb_spin);
378 	if (r1) {
379 		list_add(&r1->res_hashchain, &ls->ls_new_rsb);
380 		ls->ls_new_rsb_count++;
381 	}
382 	if (r2) {
383 		list_add(&r2->res_hashchain, &ls->ls_new_rsb);
384 		ls->ls_new_rsb_count++;
385 	}
386 	count = ls->ls_new_rsb_count;
387 	spin_unlock(&ls->ls_new_rsb_spin);
388 
389 	if (!count)
390 		return -ENOMEM;
391 	return 0;
392 }
393 
394 /* If ls->ls_new_rsb is empty, return -EAGAIN, so the caller can
395    unlock any spinlocks, go back and call pre_rsb_struct again.
396    Otherwise, take an rsb off the list and return it. */
397 
get_rsb_struct(struct dlm_ls * ls,char * name,int len,struct dlm_rsb ** r_ret)398 static int get_rsb_struct(struct dlm_ls *ls, char *name, int len,
399 			  struct dlm_rsb **r_ret)
400 {
401 	struct dlm_rsb *r;
402 	int count;
403 
404 	spin_lock(&ls->ls_new_rsb_spin);
405 	if (list_empty(&ls->ls_new_rsb)) {
406 		count = ls->ls_new_rsb_count;
407 		spin_unlock(&ls->ls_new_rsb_spin);
408 		log_debug(ls, "find_rsb retry %d %d %s",
409 			  count, dlm_config.ci_new_rsb_count, name);
410 		return -EAGAIN;
411 	}
412 
413 	r = list_first_entry(&ls->ls_new_rsb, struct dlm_rsb, res_hashchain);
414 	list_del(&r->res_hashchain);
415 	/* Convert the empty list_head to a NULL rb_node for tree usage: */
416 	memset(&r->res_hashnode, 0, sizeof(struct rb_node));
417 	ls->ls_new_rsb_count--;
418 	spin_unlock(&ls->ls_new_rsb_spin);
419 
420 	r->res_ls = ls;
421 	r->res_length = len;
422 	memcpy(r->res_name, name, len);
423 	mutex_init(&r->res_mutex);
424 
425 	INIT_LIST_HEAD(&r->res_lookup);
426 	INIT_LIST_HEAD(&r->res_grantqueue);
427 	INIT_LIST_HEAD(&r->res_convertqueue);
428 	INIT_LIST_HEAD(&r->res_waitqueue);
429 	INIT_LIST_HEAD(&r->res_root_list);
430 	INIT_LIST_HEAD(&r->res_recover_list);
431 
432 	*r_ret = r;
433 	return 0;
434 }
435 
rsb_cmp(struct dlm_rsb * r,const char * name,int nlen)436 static int rsb_cmp(struct dlm_rsb *r, const char *name, int nlen)
437 {
438 	char maxname[DLM_RESNAME_MAXLEN];
439 
440 	memset(maxname, 0, DLM_RESNAME_MAXLEN);
441 	memcpy(maxname, name, nlen);
442 	return memcmp(r->res_name, maxname, DLM_RESNAME_MAXLEN);
443 }
444 
dlm_search_rsb_tree(struct rb_root * tree,char * name,int len,struct dlm_rsb ** r_ret)445 int dlm_search_rsb_tree(struct rb_root *tree, char *name, int len,
446 			struct dlm_rsb **r_ret)
447 {
448 	struct rb_node *node = tree->rb_node;
449 	struct dlm_rsb *r;
450 	int rc;
451 
452 	while (node) {
453 		r = rb_entry(node, struct dlm_rsb, res_hashnode);
454 		rc = rsb_cmp(r, name, len);
455 		if (rc < 0)
456 			node = node->rb_left;
457 		else if (rc > 0)
458 			node = node->rb_right;
459 		else
460 			goto found;
461 	}
462 	*r_ret = NULL;
463 	return -EBADR;
464 
465  found:
466 	*r_ret = r;
467 	return 0;
468 }
469 
rsb_insert(struct dlm_rsb * rsb,struct rb_root * tree)470 static int rsb_insert(struct dlm_rsb *rsb, struct rb_root *tree)
471 {
472 	struct rb_node **newn = &tree->rb_node;
473 	struct rb_node *parent = NULL;
474 	int rc;
475 
476 	while (*newn) {
477 		struct dlm_rsb *cur = rb_entry(*newn, struct dlm_rsb,
478 					       res_hashnode);
479 
480 		parent = *newn;
481 		rc = rsb_cmp(cur, rsb->res_name, rsb->res_length);
482 		if (rc < 0)
483 			newn = &parent->rb_left;
484 		else if (rc > 0)
485 			newn = &parent->rb_right;
486 		else {
487 			log_print("rsb_insert match");
488 			dlm_dump_rsb(rsb);
489 			dlm_dump_rsb(cur);
490 			return -EEXIST;
491 		}
492 	}
493 
494 	rb_link_node(&rsb->res_hashnode, parent, newn);
495 	rb_insert_color(&rsb->res_hashnode, tree);
496 	return 0;
497 }
498 
499 /*
500  * Find rsb in rsbtbl and potentially create/add one
501  *
502  * Delaying the release of rsb's has a similar benefit to applications keeping
503  * NL locks on an rsb, but without the guarantee that the cached master value
504  * will still be valid when the rsb is reused.  Apps aren't always smart enough
505  * to keep NL locks on an rsb that they may lock again shortly; this can lead
506  * to excessive master lookups and removals if we don't delay the release.
507  *
508  * Searching for an rsb means looking through both the normal list and toss
509  * list.  When found on the toss list the rsb is moved to the normal list with
510  * ref count of 1; when found on normal list the ref count is incremented.
511  *
512  * rsb's on the keep list are being used locally and refcounted.
513  * rsb's on the toss list are not being used locally, and are not refcounted.
514  *
515  * The toss list rsb's were either
516  * - previously used locally but not any more (were on keep list, then
517  *   moved to toss list when last refcount dropped)
518  * - created and put on toss list as a directory record for a lookup
519  *   (we are the dir node for the res, but are not using the res right now,
520  *   but some other node is)
521  *
522  * The purpose of find_rsb() is to return a refcounted rsb for local use.
523  * So, if the given rsb is on the toss list, it is moved to the keep list
524  * before being returned.
525  *
526  * toss_rsb() happens when all local usage of the rsb is done, i.e. no
527  * more refcounts exist, so the rsb is moved from the keep list to the
528  * toss list.
529  *
530  * rsb's on both keep and toss lists are used for doing a name to master
531  * lookups.  rsb's that are in use locally (and being refcounted) are on
532  * the keep list, rsb's that are not in use locally (not refcounted) and
533  * only exist for name/master lookups are on the toss list.
534  *
535  * rsb's on the toss list who's dir_nodeid is not local can have stale
536  * name/master mappings.  So, remote requests on such rsb's can potentially
537  * return with an error, which means the mapping is stale and needs to
538  * be updated with a new lookup.  (The idea behind MASTER UNCERTAIN and
539  * first_lkid is to keep only a single outstanding request on an rsb
540  * while that rsb has a potentially stale master.)
541  */
542 
find_rsb_dir(struct dlm_ls * ls,char * name,int len,uint32_t hash,uint32_t b,int dir_nodeid,int from_nodeid,unsigned int flags,struct dlm_rsb ** r_ret)543 static int find_rsb_dir(struct dlm_ls *ls, char *name, int len,
544 			uint32_t hash, uint32_t b,
545 			int dir_nodeid, int from_nodeid,
546 			unsigned int flags, struct dlm_rsb **r_ret)
547 {
548 	struct dlm_rsb *r = NULL;
549 	int our_nodeid = dlm_our_nodeid();
550 	int from_local = 0;
551 	int from_other = 0;
552 	int from_dir = 0;
553 	int create = 0;
554 	int error;
555 
556 	if (flags & R_RECEIVE_REQUEST) {
557 		if (from_nodeid == dir_nodeid)
558 			from_dir = 1;
559 		else
560 			from_other = 1;
561 	} else if (flags & R_REQUEST) {
562 		from_local = 1;
563 	}
564 
565 	/*
566 	 * flags & R_RECEIVE_RECOVER is from dlm_recover_master_copy, so
567 	 * from_nodeid has sent us a lock in dlm_recover_locks, believing
568 	 * we're the new master.  Our local recovery may not have set
569 	 * res_master_nodeid to our_nodeid yet, so allow either.  Don't
570 	 * create the rsb; dlm_recover_process_copy() will handle EBADR
571 	 * by resending.
572 	 *
573 	 * If someone sends us a request, we are the dir node, and we do
574 	 * not find the rsb anywhere, then recreate it.  This happens if
575 	 * someone sends us a request after we have removed/freed an rsb
576 	 * from our toss list.  (They sent a request instead of lookup
577 	 * because they are using an rsb from their toss list.)
578 	 */
579 
580 	if (from_local || from_dir ||
581 	    (from_other && (dir_nodeid == our_nodeid))) {
582 		create = 1;
583 	}
584 
585  retry:
586 	if (create) {
587 		error = pre_rsb_struct(ls);
588 		if (error < 0)
589 			goto out;
590 	}
591 
592 	spin_lock(&ls->ls_rsbtbl[b].lock);
593 
594 	error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
595 	if (error)
596 		goto do_toss;
597 
598 	/*
599 	 * rsb is active, so we can't check master_nodeid without lock_rsb.
600 	 */
601 
602 	kref_get(&r->res_ref);
603 	error = 0;
604 	goto out_unlock;
605 
606 
607  do_toss:
608 	error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
609 	if (error)
610 		goto do_new;
611 
612 	/*
613 	 * rsb found inactive (master_nodeid may be out of date unless
614 	 * we are the dir_nodeid or were the master)  No other thread
615 	 * is using this rsb because it's on the toss list, so we can
616 	 * look at or update res_master_nodeid without lock_rsb.
617 	 */
618 
619 	if ((r->res_master_nodeid != our_nodeid) && from_other) {
620 		/* our rsb was not master, and another node (not the dir node)
621 		   has sent us a request */
622 		log_debug(ls, "find_rsb toss from_other %d master %d dir %d %s",
623 			  from_nodeid, r->res_master_nodeid, dir_nodeid,
624 			  r->res_name);
625 		error = -ENOTBLK;
626 		goto out_unlock;
627 	}
628 
629 	if ((r->res_master_nodeid != our_nodeid) && from_dir) {
630 		/* don't think this should ever happen */
631 		log_error(ls, "find_rsb toss from_dir %d master %d",
632 			  from_nodeid, r->res_master_nodeid);
633 		dlm_print_rsb(r);
634 		/* fix it and go on */
635 		r->res_master_nodeid = our_nodeid;
636 		r->res_nodeid = 0;
637 		rsb_clear_flag(r, RSB_MASTER_UNCERTAIN);
638 		r->res_first_lkid = 0;
639 	}
640 
641 	if (from_local && (r->res_master_nodeid != our_nodeid)) {
642 		/* Because we have held no locks on this rsb,
643 		   res_master_nodeid could have become stale. */
644 		rsb_set_flag(r, RSB_MASTER_UNCERTAIN);
645 		r->res_first_lkid = 0;
646 	}
647 
648 	rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
649 	error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
650 	goto out_unlock;
651 
652 
653  do_new:
654 	/*
655 	 * rsb not found
656 	 */
657 
658 	if (error == -EBADR && !create)
659 		goto out_unlock;
660 
661 	error = get_rsb_struct(ls, name, len, &r);
662 	if (error == -EAGAIN) {
663 		spin_unlock(&ls->ls_rsbtbl[b].lock);
664 		goto retry;
665 	}
666 	if (error)
667 		goto out_unlock;
668 
669 	r->res_hash = hash;
670 	r->res_bucket = b;
671 	r->res_dir_nodeid = dir_nodeid;
672 	kref_init(&r->res_ref);
673 
674 	if (from_dir) {
675 		/* want to see how often this happens */
676 		log_debug(ls, "find_rsb new from_dir %d recreate %s",
677 			  from_nodeid, r->res_name);
678 		r->res_master_nodeid = our_nodeid;
679 		r->res_nodeid = 0;
680 		goto out_add;
681 	}
682 
683 	if (from_other && (dir_nodeid != our_nodeid)) {
684 		/* should never happen */
685 		log_error(ls, "find_rsb new from_other %d dir %d our %d %s",
686 			  from_nodeid, dir_nodeid, our_nodeid, r->res_name);
687 		dlm_free_rsb(r);
688 		r = NULL;
689 		error = -ENOTBLK;
690 		goto out_unlock;
691 	}
692 
693 	if (from_other) {
694 		log_debug(ls, "find_rsb new from_other %d dir %d %s",
695 			  from_nodeid, dir_nodeid, r->res_name);
696 	}
697 
698 	if (dir_nodeid == our_nodeid) {
699 		/* When we are the dir nodeid, we can set the master
700 		   node immediately */
701 		r->res_master_nodeid = our_nodeid;
702 		r->res_nodeid = 0;
703 	} else {
704 		/* set_master will send_lookup to dir_nodeid */
705 		r->res_master_nodeid = 0;
706 		r->res_nodeid = -1;
707 	}
708 
709  out_add:
710 	error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
711  out_unlock:
712 	spin_unlock(&ls->ls_rsbtbl[b].lock);
713  out:
714 	*r_ret = r;
715 	return error;
716 }
717 
718 /* During recovery, other nodes can send us new MSTCPY locks (from
719    dlm_recover_locks) before we've made ourself master (in
720    dlm_recover_masters). */
721 
find_rsb_nodir(struct dlm_ls * ls,char * name,int len,uint32_t hash,uint32_t b,int dir_nodeid,int from_nodeid,unsigned int flags,struct dlm_rsb ** r_ret)722 static int find_rsb_nodir(struct dlm_ls *ls, char *name, int len,
723 			  uint32_t hash, uint32_t b,
724 			  int dir_nodeid, int from_nodeid,
725 			  unsigned int flags, struct dlm_rsb **r_ret)
726 {
727 	struct dlm_rsb *r = NULL;
728 	int our_nodeid = dlm_our_nodeid();
729 	int recover = (flags & R_RECEIVE_RECOVER);
730 	int error;
731 
732  retry:
733 	error = pre_rsb_struct(ls);
734 	if (error < 0)
735 		goto out;
736 
737 	spin_lock(&ls->ls_rsbtbl[b].lock);
738 
739 	error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
740 	if (error)
741 		goto do_toss;
742 
743 	/*
744 	 * rsb is active, so we can't check master_nodeid without lock_rsb.
745 	 */
746 
747 	kref_get(&r->res_ref);
748 	goto out_unlock;
749 
750 
751  do_toss:
752 	error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
753 	if (error)
754 		goto do_new;
755 
756 	/*
757 	 * rsb found inactive. No other thread is using this rsb because
758 	 * it's on the toss list, so we can look at or update
759 	 * res_master_nodeid without lock_rsb.
760 	 */
761 
762 	if (!recover && (r->res_master_nodeid != our_nodeid) && from_nodeid) {
763 		/* our rsb is not master, and another node has sent us a
764 		   request; this should never happen */
765 		log_error(ls, "find_rsb toss from_nodeid %d master %d dir %d",
766 			  from_nodeid, r->res_master_nodeid, dir_nodeid);
767 		dlm_print_rsb(r);
768 		error = -ENOTBLK;
769 		goto out_unlock;
770 	}
771 
772 	if (!recover && (r->res_master_nodeid != our_nodeid) &&
773 	    (dir_nodeid == our_nodeid)) {
774 		/* our rsb is not master, and we are dir; may as well fix it;
775 		   this should never happen */
776 		log_error(ls, "find_rsb toss our %d master %d dir %d",
777 			  our_nodeid, r->res_master_nodeid, dir_nodeid);
778 		dlm_print_rsb(r);
779 		r->res_master_nodeid = our_nodeid;
780 		r->res_nodeid = 0;
781 	}
782 
783 	rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
784 	error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
785 	goto out_unlock;
786 
787 
788  do_new:
789 	/*
790 	 * rsb not found
791 	 */
792 
793 	error = get_rsb_struct(ls, name, len, &r);
794 	if (error == -EAGAIN) {
795 		spin_unlock(&ls->ls_rsbtbl[b].lock);
796 		goto retry;
797 	}
798 	if (error)
799 		goto out_unlock;
800 
801 	r->res_hash = hash;
802 	r->res_bucket = b;
803 	r->res_dir_nodeid = dir_nodeid;
804 	r->res_master_nodeid = dir_nodeid;
805 	r->res_nodeid = (dir_nodeid == our_nodeid) ? 0 : dir_nodeid;
806 	kref_init(&r->res_ref);
807 
808 	error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
809  out_unlock:
810 	spin_unlock(&ls->ls_rsbtbl[b].lock);
811  out:
812 	*r_ret = r;
813 	return error;
814 }
815 
find_rsb(struct dlm_ls * ls,char * name,int len,int from_nodeid,unsigned int flags,struct dlm_rsb ** r_ret)816 static int find_rsb(struct dlm_ls *ls, char *name, int len, int from_nodeid,
817 		    unsigned int flags, struct dlm_rsb **r_ret)
818 {
819 	uint32_t hash, b;
820 	int dir_nodeid;
821 
822 	if (len > DLM_RESNAME_MAXLEN)
823 		return -EINVAL;
824 
825 	hash = jhash(name, len, 0);
826 	b = hash & (ls->ls_rsbtbl_size - 1);
827 
828 	dir_nodeid = dlm_hash2nodeid(ls, hash);
829 
830 	if (dlm_no_directory(ls))
831 		return find_rsb_nodir(ls, name, len, hash, b, dir_nodeid,
832 				      from_nodeid, flags, r_ret);
833 	else
834 		return find_rsb_dir(ls, name, len, hash, b, dir_nodeid,
835 				      from_nodeid, flags, r_ret);
836 }
837 
838 /* we have received a request and found that res_master_nodeid != our_nodeid,
839    so we need to return an error or make ourself the master */
840 
validate_master_nodeid(struct dlm_ls * ls,struct dlm_rsb * r,int from_nodeid)841 static int validate_master_nodeid(struct dlm_ls *ls, struct dlm_rsb *r,
842 				  int from_nodeid)
843 {
844 	if (dlm_no_directory(ls)) {
845 		log_error(ls, "find_rsb keep from_nodeid %d master %d dir %d",
846 			  from_nodeid, r->res_master_nodeid,
847 			  r->res_dir_nodeid);
848 		dlm_print_rsb(r);
849 		return -ENOTBLK;
850 	}
851 
852 	if (from_nodeid != r->res_dir_nodeid) {
853 		/* our rsb is not master, and another node (not the dir node)
854 	   	   has sent us a request.  this is much more common when our
855 	   	   master_nodeid is zero, so limit debug to non-zero.  */
856 
857 		if (r->res_master_nodeid) {
858 			log_debug(ls, "validate master from_other %d master %d "
859 				  "dir %d first %x %s", from_nodeid,
860 				  r->res_master_nodeid, r->res_dir_nodeid,
861 				  r->res_first_lkid, r->res_name);
862 		}
863 		return -ENOTBLK;
864 	} else {
865 		/* our rsb is not master, but the dir nodeid has sent us a
866 	   	   request; this could happen with master 0 / res_nodeid -1 */
867 
868 		if (r->res_master_nodeid) {
869 			log_error(ls, "validate master from_dir %d master %d "
870 				  "first %x %s",
871 				  from_nodeid, r->res_master_nodeid,
872 				  r->res_first_lkid, r->res_name);
873 		}
874 
875 		r->res_master_nodeid = dlm_our_nodeid();
876 		r->res_nodeid = 0;
877 		return 0;
878 	}
879 }
880 
881 /*
882  * We're the dir node for this res and another node wants to know the
883  * master nodeid.  During normal operation (non recovery) this is only
884  * called from receive_lookup(); master lookups when the local node is
885  * the dir node are done by find_rsb().
886  *
887  * normal operation, we are the dir node for a resource
888  * . _request_lock
889  * . set_master
890  * . send_lookup
891  * . receive_lookup
892  * . dlm_master_lookup flags 0
893  *
894  * recover directory, we are rebuilding dir for all resources
895  * . dlm_recover_directory
896  * . dlm_rcom_names
897  *   remote node sends back the rsb names it is master of and we are dir of
898  * . dlm_master_lookup RECOVER_DIR (fix_master 0, from_master 1)
899  *   we either create new rsb setting remote node as master, or find existing
900  *   rsb and set master to be the remote node.
901  *
902  * recover masters, we are finding the new master for resources
903  * . dlm_recover_masters
904  * . recover_master
905  * . dlm_send_rcom_lookup
906  * . receive_rcom_lookup
907  * . dlm_master_lookup RECOVER_MASTER (fix_master 1, from_master 0)
908  */
909 
dlm_master_lookup(struct dlm_ls * ls,int from_nodeid,char * name,int len,unsigned int flags,int * r_nodeid,int * result)910 int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, char *name, int len,
911 		      unsigned int flags, int *r_nodeid, int *result)
912 {
913 	struct dlm_rsb *r = NULL;
914 	uint32_t hash, b;
915 	int from_master = (flags & DLM_LU_RECOVER_DIR);
916 	int fix_master = (flags & DLM_LU_RECOVER_MASTER);
917 	int our_nodeid = dlm_our_nodeid();
918 	int dir_nodeid, error, toss_list = 0;
919 
920 	if (len > DLM_RESNAME_MAXLEN)
921 		return -EINVAL;
922 
923 	if (from_nodeid == our_nodeid) {
924 		log_error(ls, "dlm_master_lookup from our_nodeid %d flags %x",
925 			  our_nodeid, flags);
926 		return -EINVAL;
927 	}
928 
929 	hash = jhash(name, len, 0);
930 	b = hash & (ls->ls_rsbtbl_size - 1);
931 
932 	dir_nodeid = dlm_hash2nodeid(ls, hash);
933 	if (dir_nodeid != our_nodeid) {
934 		log_error(ls, "dlm_master_lookup from %d dir %d our %d h %x %d",
935 			  from_nodeid, dir_nodeid, our_nodeid, hash,
936 			  ls->ls_num_nodes);
937 		*r_nodeid = -1;
938 		return -EINVAL;
939 	}
940 
941  retry:
942 	error = pre_rsb_struct(ls);
943 	if (error < 0)
944 		return error;
945 
946 	spin_lock(&ls->ls_rsbtbl[b].lock);
947 	error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
948 	if (!error) {
949 		/* because the rsb is active, we need to lock_rsb before
950 		   checking/changing re_master_nodeid */
951 
952 		hold_rsb(r);
953 		spin_unlock(&ls->ls_rsbtbl[b].lock);
954 		lock_rsb(r);
955 		goto found;
956 	}
957 
958 	error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
959 	if (error)
960 		goto not_found;
961 
962 	/* because the rsb is inactive (on toss list), it's not refcounted
963 	   and lock_rsb is not used, but is protected by the rsbtbl lock */
964 
965 	toss_list = 1;
966  found:
967 	if (r->res_dir_nodeid != our_nodeid) {
968 		/* should not happen, but may as well fix it and carry on */
969 		log_error(ls, "dlm_master_lookup res_dir %d our %d %s",
970 			  r->res_dir_nodeid, our_nodeid, r->res_name);
971 		r->res_dir_nodeid = our_nodeid;
972 	}
973 
974 	if (fix_master && dlm_is_removed(ls, r->res_master_nodeid)) {
975 		/* Recovery uses this function to set a new master when
976 		   the previous master failed.  Setting NEW_MASTER will
977 		   force dlm_recover_masters to call recover_master on this
978 		   rsb even though the res_nodeid is no longer removed. */
979 
980 		r->res_master_nodeid = from_nodeid;
981 		r->res_nodeid = from_nodeid;
982 		rsb_set_flag(r, RSB_NEW_MASTER);
983 
984 		if (toss_list) {
985 			/* I don't think we should ever find it on toss list. */
986 			log_error(ls, "dlm_master_lookup fix_master on toss");
987 			dlm_dump_rsb(r);
988 		}
989 	}
990 
991 	if (from_master && (r->res_master_nodeid != from_nodeid)) {
992 		/* this will happen if from_nodeid became master during
993 		   a previous recovery cycle, and we aborted the previous
994 		   cycle before recovering this master value */
995 
996 		log_limit(ls, "dlm_master_lookup from_master %d "
997 			  "master_nodeid %d res_nodeid %d first %x %s",
998 			  from_nodeid, r->res_master_nodeid, r->res_nodeid,
999 			  r->res_first_lkid, r->res_name);
1000 
1001 		if (r->res_master_nodeid == our_nodeid) {
1002 			log_error(ls, "from_master %d our_master", from_nodeid);
1003 			dlm_dump_rsb(r);
1004 			goto out_found;
1005 		}
1006 
1007 		r->res_master_nodeid = from_nodeid;
1008 		r->res_nodeid = from_nodeid;
1009 		rsb_set_flag(r, RSB_NEW_MASTER);
1010 	}
1011 
1012 	if (!r->res_master_nodeid) {
1013 		/* this will happen if recovery happens while we're looking
1014 		   up the master for this rsb */
1015 
1016 		log_debug(ls, "dlm_master_lookup master 0 to %d first %x %s",
1017 			  from_nodeid, r->res_first_lkid, r->res_name);
1018 		r->res_master_nodeid = from_nodeid;
1019 		r->res_nodeid = from_nodeid;
1020 	}
1021 
1022 	if (!from_master && !fix_master &&
1023 	    (r->res_master_nodeid == from_nodeid)) {
1024 		/* this can happen when the master sends remove, the dir node
1025 		   finds the rsb on the keep list and ignores the remove,
1026 		   and the former master sends a lookup */
1027 
1028 		log_limit(ls, "dlm_master_lookup from master %d flags %x "
1029 			  "first %x %s", from_nodeid, flags,
1030 			  r->res_first_lkid, r->res_name);
1031 	}
1032 
1033  out_found:
1034 	*r_nodeid = r->res_master_nodeid;
1035 	if (result)
1036 		*result = DLM_LU_MATCH;
1037 
1038 	if (toss_list) {
1039 		r->res_toss_time = jiffies;
1040 		/* the rsb was inactive (on toss list) */
1041 		spin_unlock(&ls->ls_rsbtbl[b].lock);
1042 	} else {
1043 		/* the rsb was active */
1044 		unlock_rsb(r);
1045 		put_rsb(r);
1046 	}
1047 	return 0;
1048 
1049  not_found:
1050 	error = get_rsb_struct(ls, name, len, &r);
1051 	if (error == -EAGAIN) {
1052 		spin_unlock(&ls->ls_rsbtbl[b].lock);
1053 		goto retry;
1054 	}
1055 	if (error)
1056 		goto out_unlock;
1057 
1058 	r->res_hash = hash;
1059 	r->res_bucket = b;
1060 	r->res_dir_nodeid = our_nodeid;
1061 	r->res_master_nodeid = from_nodeid;
1062 	r->res_nodeid = from_nodeid;
1063 	kref_init(&r->res_ref);
1064 	r->res_toss_time = jiffies;
1065 
1066 	error = rsb_insert(r, &ls->ls_rsbtbl[b].toss);
1067 	if (error) {
1068 		/* should never happen */
1069 		dlm_free_rsb(r);
1070 		spin_unlock(&ls->ls_rsbtbl[b].lock);
1071 		goto retry;
1072 	}
1073 
1074 	if (result)
1075 		*result = DLM_LU_ADD;
1076 	*r_nodeid = from_nodeid;
1077 	error = 0;
1078  out_unlock:
1079 	spin_unlock(&ls->ls_rsbtbl[b].lock);
1080 	return error;
1081 }
1082 
dlm_dump_rsb_hash(struct dlm_ls * ls,uint32_t hash)1083 static void dlm_dump_rsb_hash(struct dlm_ls *ls, uint32_t hash)
1084 {
1085 	struct rb_node *n;
1086 	struct dlm_rsb *r;
1087 	int i;
1088 
1089 	for (i = 0; i < ls->ls_rsbtbl_size; i++) {
1090 		spin_lock(&ls->ls_rsbtbl[i].lock);
1091 		for (n = rb_first(&ls->ls_rsbtbl[i].keep); n; n = rb_next(n)) {
1092 			r = rb_entry(n, struct dlm_rsb, res_hashnode);
1093 			if (r->res_hash == hash)
1094 				dlm_dump_rsb(r);
1095 		}
1096 		spin_unlock(&ls->ls_rsbtbl[i].lock);
1097 	}
1098 }
1099 
dlm_dump_rsb_name(struct dlm_ls * ls,char * name,int len)1100 void dlm_dump_rsb_name(struct dlm_ls *ls, char *name, int len)
1101 {
1102 	struct dlm_rsb *r = NULL;
1103 	uint32_t hash, b;
1104 	int error;
1105 
1106 	hash = jhash(name, len, 0);
1107 	b = hash & (ls->ls_rsbtbl_size - 1);
1108 
1109 	spin_lock(&ls->ls_rsbtbl[b].lock);
1110 	error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
1111 	if (!error)
1112 		goto out_dump;
1113 
1114 	error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
1115 	if (error)
1116 		goto out;
1117  out_dump:
1118 	dlm_dump_rsb(r);
1119  out:
1120 	spin_unlock(&ls->ls_rsbtbl[b].lock);
1121 }
1122 
toss_rsb(struct kref * kref)1123 static void toss_rsb(struct kref *kref)
1124 {
1125 	struct dlm_rsb *r = container_of(kref, struct dlm_rsb, res_ref);
1126 	struct dlm_ls *ls = r->res_ls;
1127 
1128 	DLM_ASSERT(list_empty(&r->res_root_list), dlm_print_rsb(r););
1129 	kref_init(&r->res_ref);
1130 	rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[r->res_bucket].keep);
1131 	rsb_insert(r, &ls->ls_rsbtbl[r->res_bucket].toss);
1132 	r->res_toss_time = jiffies;
1133 	ls->ls_rsbtbl[r->res_bucket].flags |= DLM_RTF_SHRINK;
1134 	if (r->res_lvbptr) {
1135 		dlm_free_lvb(r->res_lvbptr);
1136 		r->res_lvbptr = NULL;
1137 	}
1138 }
1139 
1140 /* See comment for unhold_lkb */
1141 
unhold_rsb(struct dlm_rsb * r)1142 static void unhold_rsb(struct dlm_rsb *r)
1143 {
1144 	int rv;
1145 	rv = kref_put(&r->res_ref, toss_rsb);
1146 	DLM_ASSERT(!rv, dlm_dump_rsb(r););
1147 }
1148 
kill_rsb(struct kref * kref)1149 static void kill_rsb(struct kref *kref)
1150 {
1151 	struct dlm_rsb *r = container_of(kref, struct dlm_rsb, res_ref);
1152 
1153 	/* All work is done after the return from kref_put() so we
1154 	   can release the write_lock before the remove and free. */
1155 
1156 	DLM_ASSERT(list_empty(&r->res_lookup), dlm_dump_rsb(r););
1157 	DLM_ASSERT(list_empty(&r->res_grantqueue), dlm_dump_rsb(r););
1158 	DLM_ASSERT(list_empty(&r->res_convertqueue), dlm_dump_rsb(r););
1159 	DLM_ASSERT(list_empty(&r->res_waitqueue), dlm_dump_rsb(r););
1160 	DLM_ASSERT(list_empty(&r->res_root_list), dlm_dump_rsb(r););
1161 	DLM_ASSERT(list_empty(&r->res_recover_list), dlm_dump_rsb(r););
1162 }
1163 
1164 /* Attaching/detaching lkb's from rsb's is for rsb reference counting.
1165    The rsb must exist as long as any lkb's for it do. */
1166 
attach_lkb(struct dlm_rsb * r,struct dlm_lkb * lkb)1167 static void attach_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb)
1168 {
1169 	hold_rsb(r);
1170 	lkb->lkb_resource = r;
1171 }
1172 
detach_lkb(struct dlm_lkb * lkb)1173 static void detach_lkb(struct dlm_lkb *lkb)
1174 {
1175 	if (lkb->lkb_resource) {
1176 		put_rsb(lkb->lkb_resource);
1177 		lkb->lkb_resource = NULL;
1178 	}
1179 }
1180 
create_lkb(struct dlm_ls * ls,struct dlm_lkb ** lkb_ret)1181 static int create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret)
1182 {
1183 	struct dlm_lkb *lkb;
1184 	int rv;
1185 
1186 	lkb = dlm_allocate_lkb(ls);
1187 	if (!lkb)
1188 		return -ENOMEM;
1189 
1190 	lkb->lkb_nodeid = -1;
1191 	lkb->lkb_grmode = DLM_LOCK_IV;
1192 	kref_init(&lkb->lkb_ref);
1193 	INIT_LIST_HEAD(&lkb->lkb_ownqueue);
1194 	INIT_LIST_HEAD(&lkb->lkb_rsb_lookup);
1195 	INIT_LIST_HEAD(&lkb->lkb_time_list);
1196 	INIT_LIST_HEAD(&lkb->lkb_cb_list);
1197 	mutex_init(&lkb->lkb_cb_mutex);
1198 	INIT_WORK(&lkb->lkb_cb_work, dlm_callback_work);
1199 
1200 	idr_preload(GFP_NOFS);
1201 	spin_lock(&ls->ls_lkbidr_spin);
1202 	rv = idr_alloc(&ls->ls_lkbidr, lkb, 1, 0, GFP_NOWAIT);
1203 	if (rv >= 0)
1204 		lkb->lkb_id = rv;
1205 	spin_unlock(&ls->ls_lkbidr_spin);
1206 	idr_preload_end();
1207 
1208 	if (rv < 0) {
1209 		log_error(ls, "create_lkb idr error %d", rv);
1210 		dlm_free_lkb(lkb);
1211 		return rv;
1212 	}
1213 
1214 	*lkb_ret = lkb;
1215 	return 0;
1216 }
1217 
find_lkb(struct dlm_ls * ls,uint32_t lkid,struct dlm_lkb ** lkb_ret)1218 static int find_lkb(struct dlm_ls *ls, uint32_t lkid, struct dlm_lkb **lkb_ret)
1219 {
1220 	struct dlm_lkb *lkb;
1221 
1222 	spin_lock(&ls->ls_lkbidr_spin);
1223 	lkb = idr_find(&ls->ls_lkbidr, lkid);
1224 	if (lkb)
1225 		kref_get(&lkb->lkb_ref);
1226 	spin_unlock(&ls->ls_lkbidr_spin);
1227 
1228 	*lkb_ret = lkb;
1229 	return lkb ? 0 : -ENOENT;
1230 }
1231 
kill_lkb(struct kref * kref)1232 static void kill_lkb(struct kref *kref)
1233 {
1234 	struct dlm_lkb *lkb = container_of(kref, struct dlm_lkb, lkb_ref);
1235 
1236 	/* All work is done after the return from kref_put() so we
1237 	   can release the write_lock before the detach_lkb */
1238 
1239 	DLM_ASSERT(!lkb->lkb_status, dlm_print_lkb(lkb););
1240 }
1241 
1242 /* __put_lkb() is used when an lkb may not have an rsb attached to
1243    it so we need to provide the lockspace explicitly */
1244 
__put_lkb(struct dlm_ls * ls,struct dlm_lkb * lkb)1245 static int __put_lkb(struct dlm_ls *ls, struct dlm_lkb *lkb)
1246 {
1247 	uint32_t lkid = lkb->lkb_id;
1248 
1249 	spin_lock(&ls->ls_lkbidr_spin);
1250 	if (kref_put(&lkb->lkb_ref, kill_lkb)) {
1251 		idr_remove(&ls->ls_lkbidr, lkid);
1252 		spin_unlock(&ls->ls_lkbidr_spin);
1253 
1254 		detach_lkb(lkb);
1255 
1256 		/* for local/process lkbs, lvbptr points to caller's lksb */
1257 		if (lkb->lkb_lvbptr && is_master_copy(lkb))
1258 			dlm_free_lvb(lkb->lkb_lvbptr);
1259 		dlm_free_lkb(lkb);
1260 		return 1;
1261 	} else {
1262 		spin_unlock(&ls->ls_lkbidr_spin);
1263 		return 0;
1264 	}
1265 }
1266 
dlm_put_lkb(struct dlm_lkb * lkb)1267 int dlm_put_lkb(struct dlm_lkb *lkb)
1268 {
1269 	struct dlm_ls *ls;
1270 
1271 	DLM_ASSERT(lkb->lkb_resource, dlm_print_lkb(lkb););
1272 	DLM_ASSERT(lkb->lkb_resource->res_ls, dlm_print_lkb(lkb););
1273 
1274 	ls = lkb->lkb_resource->res_ls;
1275 	return __put_lkb(ls, lkb);
1276 }
1277 
1278 /* This is only called to add a reference when the code already holds
1279    a valid reference to the lkb, so there's no need for locking. */
1280 
hold_lkb(struct dlm_lkb * lkb)1281 static inline void hold_lkb(struct dlm_lkb *lkb)
1282 {
1283 	kref_get(&lkb->lkb_ref);
1284 }
1285 
1286 /* This is called when we need to remove a reference and are certain
1287    it's not the last ref.  e.g. del_lkb is always called between a
1288    find_lkb/put_lkb and is always the inverse of a previous add_lkb.
1289    put_lkb would work fine, but would involve unnecessary locking */
1290 
unhold_lkb(struct dlm_lkb * lkb)1291 static inline void unhold_lkb(struct dlm_lkb *lkb)
1292 {
1293 	int rv;
1294 	rv = kref_put(&lkb->lkb_ref, kill_lkb);
1295 	DLM_ASSERT(!rv, dlm_print_lkb(lkb););
1296 }
1297 
lkb_add_ordered(struct list_head * new,struct list_head * head,int mode)1298 static void lkb_add_ordered(struct list_head *new, struct list_head *head,
1299 			    int mode)
1300 {
1301 	struct dlm_lkb *lkb = NULL;
1302 
1303 	list_for_each_entry(lkb, head, lkb_statequeue)
1304 		if (lkb->lkb_rqmode < mode)
1305 			break;
1306 
1307 	__list_add(new, lkb->lkb_statequeue.prev, &lkb->lkb_statequeue);
1308 }
1309 
1310 /* add/remove lkb to rsb's grant/convert/wait queue */
1311 
add_lkb(struct dlm_rsb * r,struct dlm_lkb * lkb,int status)1312 static void add_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb, int status)
1313 {
1314 	kref_get(&lkb->lkb_ref);
1315 
1316 	DLM_ASSERT(!lkb->lkb_status, dlm_print_lkb(lkb););
1317 
1318 	lkb->lkb_timestamp = ktime_get();
1319 
1320 	lkb->lkb_status = status;
1321 
1322 	switch (status) {
1323 	case DLM_LKSTS_WAITING:
1324 		if (lkb->lkb_exflags & DLM_LKF_HEADQUE)
1325 			list_add(&lkb->lkb_statequeue, &r->res_waitqueue);
1326 		else
1327 			list_add_tail(&lkb->lkb_statequeue, &r->res_waitqueue);
1328 		break;
1329 	case DLM_LKSTS_GRANTED:
1330 		/* convention says granted locks kept in order of grmode */
1331 		lkb_add_ordered(&lkb->lkb_statequeue, &r->res_grantqueue,
1332 				lkb->lkb_grmode);
1333 		break;
1334 	case DLM_LKSTS_CONVERT:
1335 		if (lkb->lkb_exflags & DLM_LKF_HEADQUE)
1336 			list_add(&lkb->lkb_statequeue, &r->res_convertqueue);
1337 		else
1338 			list_add_tail(&lkb->lkb_statequeue,
1339 				      &r->res_convertqueue);
1340 		break;
1341 	default:
1342 		DLM_ASSERT(0, dlm_print_lkb(lkb); printk("sts=%d\n", status););
1343 	}
1344 }
1345 
del_lkb(struct dlm_rsb * r,struct dlm_lkb * lkb)1346 static void del_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb)
1347 {
1348 	lkb->lkb_status = 0;
1349 	list_del(&lkb->lkb_statequeue);
1350 	unhold_lkb(lkb);
1351 }
1352 
move_lkb(struct dlm_rsb * r,struct dlm_lkb * lkb,int sts)1353 static void move_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb, int sts)
1354 {
1355 	hold_lkb(lkb);
1356 	del_lkb(r, lkb);
1357 	add_lkb(r, lkb, sts);
1358 	unhold_lkb(lkb);
1359 }
1360 
msg_reply_type(int mstype)1361 static int msg_reply_type(int mstype)
1362 {
1363 	switch (mstype) {
1364 	case DLM_MSG_REQUEST:
1365 		return DLM_MSG_REQUEST_REPLY;
1366 	case DLM_MSG_CONVERT:
1367 		return DLM_MSG_CONVERT_REPLY;
1368 	case DLM_MSG_UNLOCK:
1369 		return DLM_MSG_UNLOCK_REPLY;
1370 	case DLM_MSG_CANCEL:
1371 		return DLM_MSG_CANCEL_REPLY;
1372 	case DLM_MSG_LOOKUP:
1373 		return DLM_MSG_LOOKUP_REPLY;
1374 	}
1375 	return -1;
1376 }
1377 
nodeid_warned(int nodeid,int num_nodes,int * warned)1378 static int nodeid_warned(int nodeid, int num_nodes, int *warned)
1379 {
1380 	int i;
1381 
1382 	for (i = 0; i < num_nodes; i++) {
1383 		if (!warned[i]) {
1384 			warned[i] = nodeid;
1385 			return 0;
1386 		}
1387 		if (warned[i] == nodeid)
1388 			return 1;
1389 	}
1390 	return 0;
1391 }
1392 
dlm_scan_waiters(struct dlm_ls * ls)1393 void dlm_scan_waiters(struct dlm_ls *ls)
1394 {
1395 	struct dlm_lkb *lkb;
1396 	s64 us;
1397 	s64 debug_maxus = 0;
1398 	u32 debug_scanned = 0;
1399 	u32 debug_expired = 0;
1400 	int num_nodes = 0;
1401 	int *warned = NULL;
1402 
1403 	if (!dlm_config.ci_waitwarn_us)
1404 		return;
1405 
1406 	mutex_lock(&ls->ls_waiters_mutex);
1407 
1408 	list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
1409 		if (!lkb->lkb_wait_time)
1410 			continue;
1411 
1412 		debug_scanned++;
1413 
1414 		us = ktime_to_us(ktime_sub(ktime_get(), lkb->lkb_wait_time));
1415 
1416 		if (us < dlm_config.ci_waitwarn_us)
1417 			continue;
1418 
1419 		lkb->lkb_wait_time = 0;
1420 
1421 		debug_expired++;
1422 		if (us > debug_maxus)
1423 			debug_maxus = us;
1424 
1425 		if (!num_nodes) {
1426 			num_nodes = ls->ls_num_nodes;
1427 			warned = kcalloc(num_nodes, sizeof(int), GFP_KERNEL);
1428 		}
1429 		if (!warned)
1430 			continue;
1431 		if (nodeid_warned(lkb->lkb_wait_nodeid, num_nodes, warned))
1432 			continue;
1433 
1434 		log_error(ls, "waitwarn %x %lld %d us check connection to "
1435 			  "node %d", lkb->lkb_id, (long long)us,
1436 			  dlm_config.ci_waitwarn_us, lkb->lkb_wait_nodeid);
1437 	}
1438 	mutex_unlock(&ls->ls_waiters_mutex);
1439 	kfree(warned);
1440 
1441 	if (debug_expired)
1442 		log_debug(ls, "scan_waiters %u warn %u over %d us max %lld us",
1443 			  debug_scanned, debug_expired,
1444 			  dlm_config.ci_waitwarn_us, (long long)debug_maxus);
1445 }
1446 
1447 /* add/remove lkb from global waiters list of lkb's waiting for
1448    a reply from a remote node */
1449 
add_to_waiters(struct dlm_lkb * lkb,int mstype,int to_nodeid)1450 static int add_to_waiters(struct dlm_lkb *lkb, int mstype, int to_nodeid)
1451 {
1452 	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1453 	int error = 0;
1454 
1455 	mutex_lock(&ls->ls_waiters_mutex);
1456 
1457 	if (is_overlap_unlock(lkb) ||
1458 	    (is_overlap_cancel(lkb) && (mstype == DLM_MSG_CANCEL))) {
1459 		error = -EINVAL;
1460 		goto out;
1461 	}
1462 
1463 	if (lkb->lkb_wait_type || is_overlap_cancel(lkb)) {
1464 		switch (mstype) {
1465 		case DLM_MSG_UNLOCK:
1466 			lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK;
1467 			break;
1468 		case DLM_MSG_CANCEL:
1469 			lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL;
1470 			break;
1471 		default:
1472 			error = -EBUSY;
1473 			goto out;
1474 		}
1475 		lkb->lkb_wait_count++;
1476 		hold_lkb(lkb);
1477 
1478 		log_debug(ls, "addwait %x cur %d overlap %d count %d f %x",
1479 			  lkb->lkb_id, lkb->lkb_wait_type, mstype,
1480 			  lkb->lkb_wait_count, lkb->lkb_flags);
1481 		goto out;
1482 	}
1483 
1484 	DLM_ASSERT(!lkb->lkb_wait_count,
1485 		   dlm_print_lkb(lkb);
1486 		   printk("wait_count %d\n", lkb->lkb_wait_count););
1487 
1488 	lkb->lkb_wait_count++;
1489 	lkb->lkb_wait_type = mstype;
1490 	lkb->lkb_wait_time = ktime_get();
1491 	lkb->lkb_wait_nodeid = to_nodeid; /* for debugging */
1492 	hold_lkb(lkb);
1493 	list_add(&lkb->lkb_wait_reply, &ls->ls_waiters);
1494  out:
1495 	if (error)
1496 		log_error(ls, "addwait error %x %d flags %x %d %d %s",
1497 			  lkb->lkb_id, error, lkb->lkb_flags, mstype,
1498 			  lkb->lkb_wait_type, lkb->lkb_resource->res_name);
1499 	mutex_unlock(&ls->ls_waiters_mutex);
1500 	return error;
1501 }
1502 
1503 /* We clear the RESEND flag because we might be taking an lkb off the waiters
1504    list as part of process_requestqueue (e.g. a lookup that has an optimized
1505    request reply on the requestqueue) between dlm_recover_waiters_pre() which
1506    set RESEND and dlm_recover_waiters_post() */
1507 
_remove_from_waiters(struct dlm_lkb * lkb,int mstype,struct dlm_message * ms)1508 static int _remove_from_waiters(struct dlm_lkb *lkb, int mstype,
1509 				struct dlm_message *ms)
1510 {
1511 	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1512 	int overlap_done = 0;
1513 
1514 	if (is_overlap_unlock(lkb) && (mstype == DLM_MSG_UNLOCK_REPLY)) {
1515 		log_debug(ls, "remwait %x unlock_reply overlap", lkb->lkb_id);
1516 		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
1517 		overlap_done = 1;
1518 		goto out_del;
1519 	}
1520 
1521 	if (is_overlap_cancel(lkb) && (mstype == DLM_MSG_CANCEL_REPLY)) {
1522 		log_debug(ls, "remwait %x cancel_reply overlap", lkb->lkb_id);
1523 		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
1524 		overlap_done = 1;
1525 		goto out_del;
1526 	}
1527 
1528 	/* Cancel state was preemptively cleared by a successful convert,
1529 	   see next comment, nothing to do. */
1530 
1531 	if ((mstype == DLM_MSG_CANCEL_REPLY) &&
1532 	    (lkb->lkb_wait_type != DLM_MSG_CANCEL)) {
1533 		log_debug(ls, "remwait %x cancel_reply wait_type %d",
1534 			  lkb->lkb_id, lkb->lkb_wait_type);
1535 		return -1;
1536 	}
1537 
1538 	/* Remove for the convert reply, and premptively remove for the
1539 	   cancel reply.  A convert has been granted while there's still
1540 	   an outstanding cancel on it (the cancel is moot and the result
1541 	   in the cancel reply should be 0).  We preempt the cancel reply
1542 	   because the app gets the convert result and then can follow up
1543 	   with another op, like convert.  This subsequent op would see the
1544 	   lingering state of the cancel and fail with -EBUSY. */
1545 
1546 	if ((mstype == DLM_MSG_CONVERT_REPLY) &&
1547 	    (lkb->lkb_wait_type == DLM_MSG_CONVERT) &&
1548 	    is_overlap_cancel(lkb) && ms && !ms->m_result) {
1549 		log_debug(ls, "remwait %x convert_reply zap overlap_cancel",
1550 			  lkb->lkb_id);
1551 		lkb->lkb_wait_type = 0;
1552 		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
1553 		lkb->lkb_wait_count--;
1554 		unhold_lkb(lkb);
1555 		goto out_del;
1556 	}
1557 
1558 	/* N.B. type of reply may not always correspond to type of original
1559 	   msg due to lookup->request optimization, verify others? */
1560 
1561 	if (lkb->lkb_wait_type) {
1562 		lkb->lkb_wait_type = 0;
1563 		goto out_del;
1564 	}
1565 
1566 	log_error(ls, "remwait error %x remote %d %x msg %d flags %x no wait",
1567 		  lkb->lkb_id, ms ? ms->m_header.h_nodeid : 0, lkb->lkb_remid,
1568 		  mstype, lkb->lkb_flags);
1569 	return -1;
1570 
1571  out_del:
1572 	/* the force-unlock/cancel has completed and we haven't recvd a reply
1573 	   to the op that was in progress prior to the unlock/cancel; we
1574 	   give up on any reply to the earlier op.  FIXME: not sure when/how
1575 	   this would happen */
1576 
1577 	if (overlap_done && lkb->lkb_wait_type) {
1578 		log_error(ls, "remwait error %x reply %d wait_type %d overlap",
1579 			  lkb->lkb_id, mstype, lkb->lkb_wait_type);
1580 		lkb->lkb_wait_count--;
1581 		unhold_lkb(lkb);
1582 		lkb->lkb_wait_type = 0;
1583 	}
1584 
1585 	DLM_ASSERT(lkb->lkb_wait_count, dlm_print_lkb(lkb););
1586 
1587 	lkb->lkb_flags &= ~DLM_IFL_RESEND;
1588 	lkb->lkb_wait_count--;
1589 	if (!lkb->lkb_wait_count)
1590 		list_del_init(&lkb->lkb_wait_reply);
1591 	unhold_lkb(lkb);
1592 	return 0;
1593 }
1594 
remove_from_waiters(struct dlm_lkb * lkb,int mstype)1595 static int remove_from_waiters(struct dlm_lkb *lkb, int mstype)
1596 {
1597 	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1598 	int error;
1599 
1600 	mutex_lock(&ls->ls_waiters_mutex);
1601 	error = _remove_from_waiters(lkb, mstype, NULL);
1602 	mutex_unlock(&ls->ls_waiters_mutex);
1603 	return error;
1604 }
1605 
1606 /* Handles situations where we might be processing a "fake" or "stub" reply in
1607    which we can't try to take waiters_mutex again. */
1608 
remove_from_waiters_ms(struct dlm_lkb * lkb,struct dlm_message * ms)1609 static int remove_from_waiters_ms(struct dlm_lkb *lkb, struct dlm_message *ms)
1610 {
1611 	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1612 	int error;
1613 
1614 	if (ms->m_flags != DLM_IFL_STUB_MS)
1615 		mutex_lock(&ls->ls_waiters_mutex);
1616 	error = _remove_from_waiters(lkb, ms->m_type, ms);
1617 	if (ms->m_flags != DLM_IFL_STUB_MS)
1618 		mutex_unlock(&ls->ls_waiters_mutex);
1619 	return error;
1620 }
1621 
1622 /* If there's an rsb for the same resource being removed, ensure
1623    that the remove message is sent before the new lookup message.
1624    It should be rare to need a delay here, but if not, then it may
1625    be worthwhile to add a proper wait mechanism rather than a delay. */
1626 
wait_pending_remove(struct dlm_rsb * r)1627 static void wait_pending_remove(struct dlm_rsb *r)
1628 {
1629 	struct dlm_ls *ls = r->res_ls;
1630  restart:
1631 	spin_lock(&ls->ls_remove_spin);
1632 	if (ls->ls_remove_len &&
1633 	    !rsb_cmp(r, ls->ls_remove_name, ls->ls_remove_len)) {
1634 		log_debug(ls, "delay lookup for remove dir %d %s",
1635 		  	  r->res_dir_nodeid, r->res_name);
1636 		spin_unlock(&ls->ls_remove_spin);
1637 		msleep(1);
1638 		goto restart;
1639 	}
1640 	spin_unlock(&ls->ls_remove_spin);
1641 }
1642 
1643 /*
1644  * ls_remove_spin protects ls_remove_name and ls_remove_len which are
1645  * read by other threads in wait_pending_remove.  ls_remove_names
1646  * and ls_remove_lens are only used by the scan thread, so they do
1647  * not need protection.
1648  */
1649 
shrink_bucket(struct dlm_ls * ls,int b)1650 static void shrink_bucket(struct dlm_ls *ls, int b)
1651 {
1652 	struct rb_node *n, *next;
1653 	struct dlm_rsb *r;
1654 	char *name;
1655 	int our_nodeid = dlm_our_nodeid();
1656 	int remote_count = 0;
1657 	int need_shrink = 0;
1658 	int i, len, rv;
1659 
1660 	memset(&ls->ls_remove_lens, 0, sizeof(int) * DLM_REMOVE_NAMES_MAX);
1661 
1662 	spin_lock(&ls->ls_rsbtbl[b].lock);
1663 
1664 	if (!(ls->ls_rsbtbl[b].flags & DLM_RTF_SHRINK)) {
1665 		spin_unlock(&ls->ls_rsbtbl[b].lock);
1666 		return;
1667 	}
1668 
1669 	for (n = rb_first(&ls->ls_rsbtbl[b].toss); n; n = next) {
1670 		next = rb_next(n);
1671 		r = rb_entry(n, struct dlm_rsb, res_hashnode);
1672 
1673 		/* If we're the directory record for this rsb, and
1674 		   we're not the master of it, then we need to wait
1675 		   for the master node to send us a dir remove for
1676 		   before removing the dir record. */
1677 
1678 		if (!dlm_no_directory(ls) &&
1679 		    (r->res_master_nodeid != our_nodeid) &&
1680 		    (dlm_dir_nodeid(r) == our_nodeid)) {
1681 			continue;
1682 		}
1683 
1684 		need_shrink = 1;
1685 
1686 		if (!time_after_eq(jiffies, r->res_toss_time +
1687 				   dlm_config.ci_toss_secs * HZ)) {
1688 			continue;
1689 		}
1690 
1691 		if (!dlm_no_directory(ls) &&
1692 		    (r->res_master_nodeid == our_nodeid) &&
1693 		    (dlm_dir_nodeid(r) != our_nodeid)) {
1694 
1695 			/* We're the master of this rsb but we're not
1696 			   the directory record, so we need to tell the
1697 			   dir node to remove the dir record. */
1698 
1699 			ls->ls_remove_lens[remote_count] = r->res_length;
1700 			memcpy(ls->ls_remove_names[remote_count], r->res_name,
1701 			       DLM_RESNAME_MAXLEN);
1702 			remote_count++;
1703 
1704 			if (remote_count >= DLM_REMOVE_NAMES_MAX)
1705 				break;
1706 			continue;
1707 		}
1708 
1709 		if (!kref_put(&r->res_ref, kill_rsb)) {
1710 			log_error(ls, "tossed rsb in use %s", r->res_name);
1711 			continue;
1712 		}
1713 
1714 		rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
1715 		dlm_free_rsb(r);
1716 	}
1717 
1718 	if (need_shrink)
1719 		ls->ls_rsbtbl[b].flags |= DLM_RTF_SHRINK;
1720 	else
1721 		ls->ls_rsbtbl[b].flags &= ~DLM_RTF_SHRINK;
1722 	spin_unlock(&ls->ls_rsbtbl[b].lock);
1723 
1724 	/*
1725 	 * While searching for rsb's to free, we found some that require
1726 	 * remote removal.  We leave them in place and find them again here
1727 	 * so there is a very small gap between removing them from the toss
1728 	 * list and sending the removal.  Keeping this gap small is
1729 	 * important to keep us (the master node) from being out of sync
1730 	 * with the remote dir node for very long.
1731 	 *
1732 	 * From the time the rsb is removed from toss until just after
1733 	 * send_remove, the rsb name is saved in ls_remove_name.  A new
1734 	 * lookup checks this to ensure that a new lookup message for the
1735 	 * same resource name is not sent just before the remove message.
1736 	 */
1737 
1738 	for (i = 0; i < remote_count; i++) {
1739 		name = ls->ls_remove_names[i];
1740 		len = ls->ls_remove_lens[i];
1741 
1742 		spin_lock(&ls->ls_rsbtbl[b].lock);
1743 		rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
1744 		if (rv) {
1745 			spin_unlock(&ls->ls_rsbtbl[b].lock);
1746 			log_debug(ls, "remove_name not toss %s", name);
1747 			continue;
1748 		}
1749 
1750 		if (r->res_master_nodeid != our_nodeid) {
1751 			spin_unlock(&ls->ls_rsbtbl[b].lock);
1752 			log_debug(ls, "remove_name master %d dir %d our %d %s",
1753 				  r->res_master_nodeid, r->res_dir_nodeid,
1754 				  our_nodeid, name);
1755 			continue;
1756 		}
1757 
1758 		if (r->res_dir_nodeid == our_nodeid) {
1759 			/* should never happen */
1760 			spin_unlock(&ls->ls_rsbtbl[b].lock);
1761 			log_error(ls, "remove_name dir %d master %d our %d %s",
1762 				  r->res_dir_nodeid, r->res_master_nodeid,
1763 				  our_nodeid, name);
1764 			continue;
1765 		}
1766 
1767 		if (!time_after_eq(jiffies, r->res_toss_time +
1768 				   dlm_config.ci_toss_secs * HZ)) {
1769 			spin_unlock(&ls->ls_rsbtbl[b].lock);
1770 			log_debug(ls, "remove_name toss_time %lu now %lu %s",
1771 				  r->res_toss_time, jiffies, name);
1772 			continue;
1773 		}
1774 
1775 		if (!kref_put(&r->res_ref, kill_rsb)) {
1776 			spin_unlock(&ls->ls_rsbtbl[b].lock);
1777 			log_error(ls, "remove_name in use %s", name);
1778 			continue;
1779 		}
1780 
1781 		rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
1782 
1783 		/* block lookup of same name until we've sent remove */
1784 		spin_lock(&ls->ls_remove_spin);
1785 		ls->ls_remove_len = len;
1786 		memcpy(ls->ls_remove_name, name, DLM_RESNAME_MAXLEN);
1787 		spin_unlock(&ls->ls_remove_spin);
1788 		spin_unlock(&ls->ls_rsbtbl[b].lock);
1789 
1790 		send_remove(r);
1791 
1792 		/* allow lookup of name again */
1793 		spin_lock(&ls->ls_remove_spin);
1794 		ls->ls_remove_len = 0;
1795 		memset(ls->ls_remove_name, 0, DLM_RESNAME_MAXLEN);
1796 		spin_unlock(&ls->ls_remove_spin);
1797 
1798 		dlm_free_rsb(r);
1799 	}
1800 }
1801 
dlm_scan_rsbs(struct dlm_ls * ls)1802 void dlm_scan_rsbs(struct dlm_ls *ls)
1803 {
1804 	int i;
1805 
1806 	for (i = 0; i < ls->ls_rsbtbl_size; i++) {
1807 		shrink_bucket(ls, i);
1808 		if (dlm_locking_stopped(ls))
1809 			break;
1810 		cond_resched();
1811 	}
1812 }
1813 
add_timeout(struct dlm_lkb * lkb)1814 static void add_timeout(struct dlm_lkb *lkb)
1815 {
1816 	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1817 
1818 	if (is_master_copy(lkb))
1819 		return;
1820 
1821 	if (test_bit(LSFL_TIMEWARN, &ls->ls_flags) &&
1822 	    !(lkb->lkb_exflags & DLM_LKF_NODLCKWT)) {
1823 		lkb->lkb_flags |= DLM_IFL_WATCH_TIMEWARN;
1824 		goto add_it;
1825 	}
1826 	if (lkb->lkb_exflags & DLM_LKF_TIMEOUT)
1827 		goto add_it;
1828 	return;
1829 
1830  add_it:
1831 	DLM_ASSERT(list_empty(&lkb->lkb_time_list), dlm_print_lkb(lkb););
1832 	mutex_lock(&ls->ls_timeout_mutex);
1833 	hold_lkb(lkb);
1834 	list_add_tail(&lkb->lkb_time_list, &ls->ls_timeout);
1835 	mutex_unlock(&ls->ls_timeout_mutex);
1836 }
1837 
del_timeout(struct dlm_lkb * lkb)1838 static void del_timeout(struct dlm_lkb *lkb)
1839 {
1840 	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1841 
1842 	mutex_lock(&ls->ls_timeout_mutex);
1843 	if (!list_empty(&lkb->lkb_time_list)) {
1844 		list_del_init(&lkb->lkb_time_list);
1845 		unhold_lkb(lkb);
1846 	}
1847 	mutex_unlock(&ls->ls_timeout_mutex);
1848 }
1849 
1850 /* FIXME: is it safe to look at lkb_exflags, lkb_flags, lkb_timestamp, and
1851    lkb_lksb_timeout without lock_rsb?  Note: we can't lock timeout_mutex
1852    and then lock rsb because of lock ordering in add_timeout.  We may need
1853    to specify some special timeout-related bits in the lkb that are just to
1854    be accessed under the timeout_mutex. */
1855 
dlm_scan_timeout(struct dlm_ls * ls)1856 void dlm_scan_timeout(struct dlm_ls *ls)
1857 {
1858 	struct dlm_rsb *r;
1859 	struct dlm_lkb *lkb;
1860 	int do_cancel, do_warn;
1861 	s64 wait_us;
1862 
1863 	for (;;) {
1864 		if (dlm_locking_stopped(ls))
1865 			break;
1866 
1867 		do_cancel = 0;
1868 		do_warn = 0;
1869 		mutex_lock(&ls->ls_timeout_mutex);
1870 		list_for_each_entry(lkb, &ls->ls_timeout, lkb_time_list) {
1871 
1872 			wait_us = ktime_to_us(ktime_sub(ktime_get(),
1873 					      		lkb->lkb_timestamp));
1874 
1875 			if ((lkb->lkb_exflags & DLM_LKF_TIMEOUT) &&
1876 			    wait_us >= (lkb->lkb_timeout_cs * 10000))
1877 				do_cancel = 1;
1878 
1879 			if ((lkb->lkb_flags & DLM_IFL_WATCH_TIMEWARN) &&
1880 			    wait_us >= dlm_config.ci_timewarn_cs * 10000)
1881 				do_warn = 1;
1882 
1883 			if (!do_cancel && !do_warn)
1884 				continue;
1885 			hold_lkb(lkb);
1886 			break;
1887 		}
1888 		mutex_unlock(&ls->ls_timeout_mutex);
1889 
1890 		if (!do_cancel && !do_warn)
1891 			break;
1892 
1893 		r = lkb->lkb_resource;
1894 		hold_rsb(r);
1895 		lock_rsb(r);
1896 
1897 		if (do_warn) {
1898 			/* clear flag so we only warn once */
1899 			lkb->lkb_flags &= ~DLM_IFL_WATCH_TIMEWARN;
1900 			if (!(lkb->lkb_exflags & DLM_LKF_TIMEOUT))
1901 				del_timeout(lkb);
1902 			dlm_timeout_warn(lkb);
1903 		}
1904 
1905 		if (do_cancel) {
1906 			log_debug(ls, "timeout cancel %x node %d %s",
1907 				  lkb->lkb_id, lkb->lkb_nodeid, r->res_name);
1908 			lkb->lkb_flags &= ~DLM_IFL_WATCH_TIMEWARN;
1909 			lkb->lkb_flags |= DLM_IFL_TIMEOUT_CANCEL;
1910 			del_timeout(lkb);
1911 			_cancel_lock(r, lkb);
1912 		}
1913 
1914 		unlock_rsb(r);
1915 		unhold_rsb(r);
1916 		dlm_put_lkb(lkb);
1917 	}
1918 }
1919 
1920 /* This is only called by dlm_recoverd, and we rely on dlm_ls_stop() stopping
1921    dlm_recoverd before checking/setting ls_recover_begin. */
1922 
dlm_adjust_timeouts(struct dlm_ls * ls)1923 void dlm_adjust_timeouts(struct dlm_ls *ls)
1924 {
1925 	struct dlm_lkb *lkb;
1926 	u64 adj_us = jiffies_to_usecs(jiffies - ls->ls_recover_begin);
1927 
1928 	ls->ls_recover_begin = 0;
1929 	mutex_lock(&ls->ls_timeout_mutex);
1930 	list_for_each_entry(lkb, &ls->ls_timeout, lkb_time_list)
1931 		lkb->lkb_timestamp = ktime_add_us(lkb->lkb_timestamp, adj_us);
1932 	mutex_unlock(&ls->ls_timeout_mutex);
1933 
1934 	if (!dlm_config.ci_waitwarn_us)
1935 		return;
1936 
1937 	mutex_lock(&ls->ls_waiters_mutex);
1938 	list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
1939 		if (ktime_to_us(lkb->lkb_wait_time))
1940 			lkb->lkb_wait_time = ktime_get();
1941 	}
1942 	mutex_unlock(&ls->ls_waiters_mutex);
1943 }
1944 
1945 /* lkb is master or local copy */
1946 
set_lvb_lock(struct dlm_rsb * r,struct dlm_lkb * lkb)1947 static void set_lvb_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1948 {
1949 	int b, len = r->res_ls->ls_lvblen;
1950 
1951 	/* b=1 lvb returned to caller
1952 	   b=0 lvb written to rsb or invalidated
1953 	   b=-1 do nothing */
1954 
1955 	b =  dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1];
1956 
1957 	if (b == 1) {
1958 		if (!lkb->lkb_lvbptr)
1959 			return;
1960 
1961 		if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
1962 			return;
1963 
1964 		if (!r->res_lvbptr)
1965 			return;
1966 
1967 		memcpy(lkb->lkb_lvbptr, r->res_lvbptr, len);
1968 		lkb->lkb_lvbseq = r->res_lvbseq;
1969 
1970 	} else if (b == 0) {
1971 		if (lkb->lkb_exflags & DLM_LKF_IVVALBLK) {
1972 			rsb_set_flag(r, RSB_VALNOTVALID);
1973 			return;
1974 		}
1975 
1976 		if (!lkb->lkb_lvbptr)
1977 			return;
1978 
1979 		if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
1980 			return;
1981 
1982 		if (!r->res_lvbptr)
1983 			r->res_lvbptr = dlm_allocate_lvb(r->res_ls);
1984 
1985 		if (!r->res_lvbptr)
1986 			return;
1987 
1988 		memcpy(r->res_lvbptr, lkb->lkb_lvbptr, len);
1989 		r->res_lvbseq++;
1990 		lkb->lkb_lvbseq = r->res_lvbseq;
1991 		rsb_clear_flag(r, RSB_VALNOTVALID);
1992 	}
1993 
1994 	if (rsb_flag(r, RSB_VALNOTVALID))
1995 		lkb->lkb_sbflags |= DLM_SBF_VALNOTVALID;
1996 }
1997 
set_lvb_unlock(struct dlm_rsb * r,struct dlm_lkb * lkb)1998 static void set_lvb_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1999 {
2000 	if (lkb->lkb_grmode < DLM_LOCK_PW)
2001 		return;
2002 
2003 	if (lkb->lkb_exflags & DLM_LKF_IVVALBLK) {
2004 		rsb_set_flag(r, RSB_VALNOTVALID);
2005 		return;
2006 	}
2007 
2008 	if (!lkb->lkb_lvbptr)
2009 		return;
2010 
2011 	if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
2012 		return;
2013 
2014 	if (!r->res_lvbptr)
2015 		r->res_lvbptr = dlm_allocate_lvb(r->res_ls);
2016 
2017 	if (!r->res_lvbptr)
2018 		return;
2019 
2020 	memcpy(r->res_lvbptr, lkb->lkb_lvbptr, r->res_ls->ls_lvblen);
2021 	r->res_lvbseq++;
2022 	rsb_clear_flag(r, RSB_VALNOTVALID);
2023 }
2024 
2025 /* lkb is process copy (pc) */
2026 
set_lvb_lock_pc(struct dlm_rsb * r,struct dlm_lkb * lkb,struct dlm_message * ms)2027 static void set_lvb_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb,
2028 			    struct dlm_message *ms)
2029 {
2030 	int b;
2031 
2032 	if (!lkb->lkb_lvbptr)
2033 		return;
2034 
2035 	if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
2036 		return;
2037 
2038 	b = dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1];
2039 	if (b == 1) {
2040 		int len = receive_extralen(ms);
2041 		if (len > r->res_ls->ls_lvblen)
2042 			len = r->res_ls->ls_lvblen;
2043 		memcpy(lkb->lkb_lvbptr, ms->m_extra, len);
2044 		lkb->lkb_lvbseq = ms->m_lvbseq;
2045 	}
2046 }
2047 
2048 /* Manipulate lkb's on rsb's convert/granted/waiting queues
2049    remove_lock -- used for unlock, removes lkb from granted
2050    revert_lock -- used for cancel, moves lkb from convert to granted
2051    grant_lock  -- used for request and convert, adds lkb to granted or
2052                   moves lkb from convert or waiting to granted
2053 
2054    Each of these is used for master or local copy lkb's.  There is
2055    also a _pc() variation used to make the corresponding change on
2056    a process copy (pc) lkb. */
2057 
_remove_lock(struct dlm_rsb * r,struct dlm_lkb * lkb)2058 static void _remove_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2059 {
2060 	del_lkb(r, lkb);
2061 	lkb->lkb_grmode = DLM_LOCK_IV;
2062 	/* this unhold undoes the original ref from create_lkb()
2063 	   so this leads to the lkb being freed */
2064 	unhold_lkb(lkb);
2065 }
2066 
remove_lock(struct dlm_rsb * r,struct dlm_lkb * lkb)2067 static void remove_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2068 {
2069 	set_lvb_unlock(r, lkb);
2070 	_remove_lock(r, lkb);
2071 }
2072 
remove_lock_pc(struct dlm_rsb * r,struct dlm_lkb * lkb)2073 static void remove_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb)
2074 {
2075 	_remove_lock(r, lkb);
2076 }
2077 
2078 /* returns: 0 did nothing
2079 	    1 moved lock to granted
2080 	   -1 removed lock */
2081 
revert_lock(struct dlm_rsb * r,struct dlm_lkb * lkb)2082 static int revert_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2083 {
2084 	int rv = 0;
2085 
2086 	lkb->lkb_rqmode = DLM_LOCK_IV;
2087 
2088 	switch (lkb->lkb_status) {
2089 	case DLM_LKSTS_GRANTED:
2090 		break;
2091 	case DLM_LKSTS_CONVERT:
2092 		move_lkb(r, lkb, DLM_LKSTS_GRANTED);
2093 		rv = 1;
2094 		break;
2095 	case DLM_LKSTS_WAITING:
2096 		del_lkb(r, lkb);
2097 		lkb->lkb_grmode = DLM_LOCK_IV;
2098 		/* this unhold undoes the original ref from create_lkb()
2099 		   so this leads to the lkb being freed */
2100 		unhold_lkb(lkb);
2101 		rv = -1;
2102 		break;
2103 	default:
2104 		log_print("invalid status for revert %d", lkb->lkb_status);
2105 	}
2106 	return rv;
2107 }
2108 
revert_lock_pc(struct dlm_rsb * r,struct dlm_lkb * lkb)2109 static int revert_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb)
2110 {
2111 	return revert_lock(r, lkb);
2112 }
2113 
_grant_lock(struct dlm_rsb * r,struct dlm_lkb * lkb)2114 static void _grant_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2115 {
2116 	if (lkb->lkb_grmode != lkb->lkb_rqmode) {
2117 		lkb->lkb_grmode = lkb->lkb_rqmode;
2118 		if (lkb->lkb_status)
2119 			move_lkb(r, lkb, DLM_LKSTS_GRANTED);
2120 		else
2121 			add_lkb(r, lkb, DLM_LKSTS_GRANTED);
2122 	}
2123 
2124 	lkb->lkb_rqmode = DLM_LOCK_IV;
2125 	lkb->lkb_highbast = 0;
2126 }
2127 
grant_lock(struct dlm_rsb * r,struct dlm_lkb * lkb)2128 static void grant_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2129 {
2130 	set_lvb_lock(r, lkb);
2131 	_grant_lock(r, lkb);
2132 }
2133 
grant_lock_pc(struct dlm_rsb * r,struct dlm_lkb * lkb,struct dlm_message * ms)2134 static void grant_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb,
2135 			  struct dlm_message *ms)
2136 {
2137 	set_lvb_lock_pc(r, lkb, ms);
2138 	_grant_lock(r, lkb);
2139 }
2140 
2141 /* called by grant_pending_locks() which means an async grant message must
2142    be sent to the requesting node in addition to granting the lock if the
2143    lkb belongs to a remote node. */
2144 
grant_lock_pending(struct dlm_rsb * r,struct dlm_lkb * lkb)2145 static void grant_lock_pending(struct dlm_rsb *r, struct dlm_lkb *lkb)
2146 {
2147 	grant_lock(r, lkb);
2148 	if (is_master_copy(lkb))
2149 		send_grant(r, lkb);
2150 	else
2151 		queue_cast(r, lkb, 0);
2152 }
2153 
2154 /* The special CONVDEADLK, ALTPR and ALTCW flags allow the master to
2155    change the granted/requested modes.  We're munging things accordingly in
2156    the process copy.
2157    CONVDEADLK: our grmode may have been forced down to NL to resolve a
2158    conversion deadlock
2159    ALTPR/ALTCW: our rqmode may have been changed to PR or CW to become
2160    compatible with other granted locks */
2161 
munge_demoted(struct dlm_lkb * lkb)2162 static void munge_demoted(struct dlm_lkb *lkb)
2163 {
2164 	if (lkb->lkb_rqmode == DLM_LOCK_IV || lkb->lkb_grmode == DLM_LOCK_IV) {
2165 		log_print("munge_demoted %x invalid modes gr %d rq %d",
2166 			  lkb->lkb_id, lkb->lkb_grmode, lkb->lkb_rqmode);
2167 		return;
2168 	}
2169 
2170 	lkb->lkb_grmode = DLM_LOCK_NL;
2171 }
2172 
munge_altmode(struct dlm_lkb * lkb,struct dlm_message * ms)2173 static void munge_altmode(struct dlm_lkb *lkb, struct dlm_message *ms)
2174 {
2175 	if (ms->m_type != DLM_MSG_REQUEST_REPLY &&
2176 	    ms->m_type != DLM_MSG_GRANT) {
2177 		log_print("munge_altmode %x invalid reply type %d",
2178 			  lkb->lkb_id, ms->m_type);
2179 		return;
2180 	}
2181 
2182 	if (lkb->lkb_exflags & DLM_LKF_ALTPR)
2183 		lkb->lkb_rqmode = DLM_LOCK_PR;
2184 	else if (lkb->lkb_exflags & DLM_LKF_ALTCW)
2185 		lkb->lkb_rqmode = DLM_LOCK_CW;
2186 	else {
2187 		log_print("munge_altmode invalid exflags %x", lkb->lkb_exflags);
2188 		dlm_print_lkb(lkb);
2189 	}
2190 }
2191 
first_in_list(struct dlm_lkb * lkb,struct list_head * head)2192 static inline int first_in_list(struct dlm_lkb *lkb, struct list_head *head)
2193 {
2194 	struct dlm_lkb *first = list_entry(head->next, struct dlm_lkb,
2195 					   lkb_statequeue);
2196 	if (lkb->lkb_id == first->lkb_id)
2197 		return 1;
2198 
2199 	return 0;
2200 }
2201 
2202 /* Check if the given lkb conflicts with another lkb on the queue. */
2203 
queue_conflict(struct list_head * head,struct dlm_lkb * lkb)2204 static int queue_conflict(struct list_head *head, struct dlm_lkb *lkb)
2205 {
2206 	struct dlm_lkb *this;
2207 
2208 	list_for_each_entry(this, head, lkb_statequeue) {
2209 		if (this == lkb)
2210 			continue;
2211 		if (!modes_compat(this, lkb))
2212 			return 1;
2213 	}
2214 	return 0;
2215 }
2216 
2217 /*
2218  * "A conversion deadlock arises with a pair of lock requests in the converting
2219  * queue for one resource.  The granted mode of each lock blocks the requested
2220  * mode of the other lock."
2221  *
2222  * Part 2: if the granted mode of lkb is preventing an earlier lkb in the
2223  * convert queue from being granted, then deadlk/demote lkb.
2224  *
2225  * Example:
2226  * Granted Queue: empty
2227  * Convert Queue: NL->EX (first lock)
2228  *                PR->EX (second lock)
2229  *
2230  * The first lock can't be granted because of the granted mode of the second
2231  * lock and the second lock can't be granted because it's not first in the
2232  * list.  We either cancel lkb's conversion (PR->EX) and return EDEADLK, or we
2233  * demote the granted mode of lkb (from PR to NL) if it has the CONVDEADLK
2234  * flag set and return DEMOTED in the lksb flags.
2235  *
2236  * Originally, this function detected conv-deadlk in a more limited scope:
2237  * - if !modes_compat(lkb1, lkb2) && !modes_compat(lkb2, lkb1), or
2238  * - if lkb1 was the first entry in the queue (not just earlier), and was
2239  *   blocked by the granted mode of lkb2, and there was nothing on the
2240  *   granted queue preventing lkb1 from being granted immediately, i.e.
2241  *   lkb2 was the only thing preventing lkb1 from being granted.
2242  *
2243  * That second condition meant we'd only say there was conv-deadlk if
2244  * resolving it (by demotion) would lead to the first lock on the convert
2245  * queue being granted right away.  It allowed conversion deadlocks to exist
2246  * between locks on the convert queue while they couldn't be granted anyway.
2247  *
2248  * Now, we detect and take action on conversion deadlocks immediately when
2249  * they're created, even if they may not be immediately consequential.  If
2250  * lkb1 exists anywhere in the convert queue and lkb2 comes in with a granted
2251  * mode that would prevent lkb1's conversion from being granted, we do a
2252  * deadlk/demote on lkb2 right away and don't let it onto the convert queue.
2253  * I think this means that the lkb_is_ahead condition below should always
2254  * be zero, i.e. there will never be conv-deadlk between two locks that are
2255  * both already on the convert queue.
2256  */
2257 
conversion_deadlock_detect(struct dlm_rsb * r,struct dlm_lkb * lkb2)2258 static int conversion_deadlock_detect(struct dlm_rsb *r, struct dlm_lkb *lkb2)
2259 {
2260 	struct dlm_lkb *lkb1;
2261 	int lkb_is_ahead = 0;
2262 
2263 	list_for_each_entry(lkb1, &r->res_convertqueue, lkb_statequeue) {
2264 		if (lkb1 == lkb2) {
2265 			lkb_is_ahead = 1;
2266 			continue;
2267 		}
2268 
2269 		if (!lkb_is_ahead) {
2270 			if (!modes_compat(lkb2, lkb1))
2271 				return 1;
2272 		} else {
2273 			if (!modes_compat(lkb2, lkb1) &&
2274 			    !modes_compat(lkb1, lkb2))
2275 				return 1;
2276 		}
2277 	}
2278 	return 0;
2279 }
2280 
2281 /*
2282  * Return 1 if the lock can be granted, 0 otherwise.
2283  * Also detect and resolve conversion deadlocks.
2284  *
2285  * lkb is the lock to be granted
2286  *
2287  * now is 1 if the function is being called in the context of the
2288  * immediate request, it is 0 if called later, after the lock has been
2289  * queued.
2290  *
2291  * recover is 1 if dlm_recover_grant() is trying to grant conversions
2292  * after recovery.
2293  *
2294  * References are from chapter 6 of "VAXcluster Principles" by Roy Davis
2295  */
2296 
_can_be_granted(struct dlm_rsb * r,struct dlm_lkb * lkb,int now,int recover)2297 static int _can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now,
2298 			   int recover)
2299 {
2300 	int8_t conv = (lkb->lkb_grmode != DLM_LOCK_IV);
2301 
2302 	/*
2303 	 * 6-10: Version 5.4 introduced an option to address the phenomenon of
2304 	 * a new request for a NL mode lock being blocked.
2305 	 *
2306 	 * 6-11: If the optional EXPEDITE flag is used with the new NL mode
2307 	 * request, then it would be granted.  In essence, the use of this flag
2308 	 * tells the Lock Manager to expedite theis request by not considering
2309 	 * what may be in the CONVERTING or WAITING queues...  As of this
2310 	 * writing, the EXPEDITE flag can be used only with new requests for NL
2311 	 * mode locks.  This flag is not valid for conversion requests.
2312 	 *
2313 	 * A shortcut.  Earlier checks return an error if EXPEDITE is used in a
2314 	 * conversion or used with a non-NL requested mode.  We also know an
2315 	 * EXPEDITE request is always granted immediately, so now must always
2316 	 * be 1.  The full condition to grant an expedite request: (now &&
2317 	 * !conv && lkb->rqmode == DLM_LOCK_NL && (flags & EXPEDITE)) can
2318 	 * therefore be shortened to just checking the flag.
2319 	 */
2320 
2321 	if (lkb->lkb_exflags & DLM_LKF_EXPEDITE)
2322 		return 1;
2323 
2324 	/*
2325 	 * A shortcut. Without this, !queue_conflict(grantqueue, lkb) would be
2326 	 * added to the remaining conditions.
2327 	 */
2328 
2329 	if (queue_conflict(&r->res_grantqueue, lkb))
2330 		return 0;
2331 
2332 	/*
2333 	 * 6-3: By default, a conversion request is immediately granted if the
2334 	 * requested mode is compatible with the modes of all other granted
2335 	 * locks
2336 	 */
2337 
2338 	if (queue_conflict(&r->res_convertqueue, lkb))
2339 		return 0;
2340 
2341 	/*
2342 	 * The RECOVER_GRANT flag means dlm_recover_grant() is granting
2343 	 * locks for a recovered rsb, on which lkb's have been rebuilt.
2344 	 * The lkb's may have been rebuilt on the queues in a different
2345 	 * order than they were in on the previous master.  So, granting
2346 	 * queued conversions in order after recovery doesn't make sense
2347 	 * since the order hasn't been preserved anyway.  The new order
2348 	 * could also have created a new "in place" conversion deadlock.
2349 	 * (e.g. old, failed master held granted EX, with PR->EX, NL->EX.
2350 	 * After recovery, there would be no granted locks, and possibly
2351 	 * NL->EX, PR->EX, an in-place conversion deadlock.)  So, after
2352 	 * recovery, grant conversions without considering order.
2353 	 */
2354 
2355 	if (conv && recover)
2356 		return 1;
2357 
2358 	/*
2359 	 * 6-5: But the default algorithm for deciding whether to grant or
2360 	 * queue conversion requests does not by itself guarantee that such
2361 	 * requests are serviced on a "first come first serve" basis.  This, in
2362 	 * turn, can lead to a phenomenon known as "indefinate postponement".
2363 	 *
2364 	 * 6-7: This issue is dealt with by using the optional QUECVT flag with
2365 	 * the system service employed to request a lock conversion.  This flag
2366 	 * forces certain conversion requests to be queued, even if they are
2367 	 * compatible with the granted modes of other locks on the same
2368 	 * resource.  Thus, the use of this flag results in conversion requests
2369 	 * being ordered on a "first come first servce" basis.
2370 	 *
2371 	 * DCT: This condition is all about new conversions being able to occur
2372 	 * "in place" while the lock remains on the granted queue (assuming
2373 	 * nothing else conflicts.)  IOW if QUECVT isn't set, a conversion
2374 	 * doesn't _have_ to go onto the convert queue where it's processed in
2375 	 * order.  The "now" variable is necessary to distinguish converts
2376 	 * being received and processed for the first time now, because once a
2377 	 * convert is moved to the conversion queue the condition below applies
2378 	 * requiring fifo granting.
2379 	 */
2380 
2381 	if (now && conv && !(lkb->lkb_exflags & DLM_LKF_QUECVT))
2382 		return 1;
2383 
2384 	/*
2385 	 * Even if the convert is compat with all granted locks,
2386 	 * QUECVT forces it behind other locks on the convert queue.
2387 	 */
2388 
2389 	if (now && conv && (lkb->lkb_exflags & DLM_LKF_QUECVT)) {
2390 		if (list_empty(&r->res_convertqueue))
2391 			return 1;
2392 		else
2393 			return 0;
2394 	}
2395 
2396 	/*
2397 	 * The NOORDER flag is set to avoid the standard vms rules on grant
2398 	 * order.
2399 	 */
2400 
2401 	if (lkb->lkb_exflags & DLM_LKF_NOORDER)
2402 		return 1;
2403 
2404 	/*
2405 	 * 6-3: Once in that queue [CONVERTING], a conversion request cannot be
2406 	 * granted until all other conversion requests ahead of it are granted
2407 	 * and/or canceled.
2408 	 */
2409 
2410 	if (!now && conv && first_in_list(lkb, &r->res_convertqueue))
2411 		return 1;
2412 
2413 	/*
2414 	 * 6-4: By default, a new request is immediately granted only if all
2415 	 * three of the following conditions are satisfied when the request is
2416 	 * issued:
2417 	 * - The queue of ungranted conversion requests for the resource is
2418 	 *   empty.
2419 	 * - The queue of ungranted new requests for the resource is empty.
2420 	 * - The mode of the new request is compatible with the most
2421 	 *   restrictive mode of all granted locks on the resource.
2422 	 */
2423 
2424 	if (now && !conv && list_empty(&r->res_convertqueue) &&
2425 	    list_empty(&r->res_waitqueue))
2426 		return 1;
2427 
2428 	/*
2429 	 * 6-4: Once a lock request is in the queue of ungranted new requests,
2430 	 * it cannot be granted until the queue of ungranted conversion
2431 	 * requests is empty, all ungranted new requests ahead of it are
2432 	 * granted and/or canceled, and it is compatible with the granted mode
2433 	 * of the most restrictive lock granted on the resource.
2434 	 */
2435 
2436 	if (!now && !conv && list_empty(&r->res_convertqueue) &&
2437 	    first_in_list(lkb, &r->res_waitqueue))
2438 		return 1;
2439 
2440 	return 0;
2441 }
2442 
can_be_granted(struct dlm_rsb * r,struct dlm_lkb * lkb,int now,int recover,int * err)2443 static int can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now,
2444 			  int recover, int *err)
2445 {
2446 	int rv;
2447 	int8_t alt = 0, rqmode = lkb->lkb_rqmode;
2448 	int8_t is_convert = (lkb->lkb_grmode != DLM_LOCK_IV);
2449 
2450 	if (err)
2451 		*err = 0;
2452 
2453 	rv = _can_be_granted(r, lkb, now, recover);
2454 	if (rv)
2455 		goto out;
2456 
2457 	/*
2458 	 * The CONVDEADLK flag is non-standard and tells the dlm to resolve
2459 	 * conversion deadlocks by demoting grmode to NL, otherwise the dlm
2460 	 * cancels one of the locks.
2461 	 */
2462 
2463 	if (is_convert && can_be_queued(lkb) &&
2464 	    conversion_deadlock_detect(r, lkb)) {
2465 		if (lkb->lkb_exflags & DLM_LKF_CONVDEADLK) {
2466 			lkb->lkb_grmode = DLM_LOCK_NL;
2467 			lkb->lkb_sbflags |= DLM_SBF_DEMOTED;
2468 		} else if (err) {
2469 			*err = -EDEADLK;
2470 		} else {
2471 			log_print("can_be_granted deadlock %x now %d",
2472 				  lkb->lkb_id, now);
2473 			dlm_dump_rsb(r);
2474 		}
2475 		goto out;
2476 	}
2477 
2478 	/*
2479 	 * The ALTPR and ALTCW flags are non-standard and tell the dlm to try
2480 	 * to grant a request in a mode other than the normal rqmode.  It's a
2481 	 * simple way to provide a big optimization to applications that can
2482 	 * use them.
2483 	 */
2484 
2485 	if (rqmode != DLM_LOCK_PR && (lkb->lkb_exflags & DLM_LKF_ALTPR))
2486 		alt = DLM_LOCK_PR;
2487 	else if (rqmode != DLM_LOCK_CW && (lkb->lkb_exflags & DLM_LKF_ALTCW))
2488 		alt = DLM_LOCK_CW;
2489 
2490 	if (alt) {
2491 		lkb->lkb_rqmode = alt;
2492 		rv = _can_be_granted(r, lkb, now, 0);
2493 		if (rv)
2494 			lkb->lkb_sbflags |= DLM_SBF_ALTMODE;
2495 		else
2496 			lkb->lkb_rqmode = rqmode;
2497 	}
2498  out:
2499 	return rv;
2500 }
2501 
2502 /* Returns the highest requested mode of all blocked conversions; sets
2503    cw if there's a blocked conversion to DLM_LOCK_CW. */
2504 
grant_pending_convert(struct dlm_rsb * r,int high,int * cw,unsigned int * count)2505 static int grant_pending_convert(struct dlm_rsb *r, int high, int *cw,
2506 				 unsigned int *count)
2507 {
2508 	struct dlm_lkb *lkb, *s;
2509 	int recover = rsb_flag(r, RSB_RECOVER_GRANT);
2510 	int hi, demoted, quit, grant_restart, demote_restart;
2511 	int deadlk;
2512 
2513 	quit = 0;
2514  restart:
2515 	grant_restart = 0;
2516 	demote_restart = 0;
2517 	hi = DLM_LOCK_IV;
2518 
2519 	list_for_each_entry_safe(lkb, s, &r->res_convertqueue, lkb_statequeue) {
2520 		demoted = is_demoted(lkb);
2521 		deadlk = 0;
2522 
2523 		if (can_be_granted(r, lkb, 0, recover, &deadlk)) {
2524 			grant_lock_pending(r, lkb);
2525 			grant_restart = 1;
2526 			if (count)
2527 				(*count)++;
2528 			continue;
2529 		}
2530 
2531 		if (!demoted && is_demoted(lkb)) {
2532 			log_print("WARN: pending demoted %x node %d %s",
2533 				  lkb->lkb_id, lkb->lkb_nodeid, r->res_name);
2534 			demote_restart = 1;
2535 			continue;
2536 		}
2537 
2538 		if (deadlk) {
2539 			/*
2540 			 * If DLM_LKB_NODLKWT flag is set and conversion
2541 			 * deadlock is detected, we request blocking AST and
2542 			 * down (or cancel) conversion.
2543 			 */
2544 			if (lkb->lkb_exflags & DLM_LKF_NODLCKWT) {
2545 				if (lkb->lkb_highbast < lkb->lkb_rqmode) {
2546 					queue_bast(r, lkb, lkb->lkb_rqmode);
2547 					lkb->lkb_highbast = lkb->lkb_rqmode;
2548 				}
2549 			} else {
2550 				log_print("WARN: pending deadlock %x node %d %s",
2551 					  lkb->lkb_id, lkb->lkb_nodeid,
2552 					  r->res_name);
2553 				dlm_dump_rsb(r);
2554 			}
2555 			continue;
2556 		}
2557 
2558 		hi = max_t(int, lkb->lkb_rqmode, hi);
2559 
2560 		if (cw && lkb->lkb_rqmode == DLM_LOCK_CW)
2561 			*cw = 1;
2562 	}
2563 
2564 	if (grant_restart)
2565 		goto restart;
2566 	if (demote_restart && !quit) {
2567 		quit = 1;
2568 		goto restart;
2569 	}
2570 
2571 	return max_t(int, high, hi);
2572 }
2573 
grant_pending_wait(struct dlm_rsb * r,int high,int * cw,unsigned int * count)2574 static int grant_pending_wait(struct dlm_rsb *r, int high, int *cw,
2575 			      unsigned int *count)
2576 {
2577 	struct dlm_lkb *lkb, *s;
2578 
2579 	list_for_each_entry_safe(lkb, s, &r->res_waitqueue, lkb_statequeue) {
2580 		if (can_be_granted(r, lkb, 0, 0, NULL)) {
2581 			grant_lock_pending(r, lkb);
2582 			if (count)
2583 				(*count)++;
2584 		} else {
2585 			high = max_t(int, lkb->lkb_rqmode, high);
2586 			if (lkb->lkb_rqmode == DLM_LOCK_CW)
2587 				*cw = 1;
2588 		}
2589 	}
2590 
2591 	return high;
2592 }
2593 
2594 /* cw of 1 means there's a lock with a rqmode of DLM_LOCK_CW that's blocked
2595    on either the convert or waiting queue.
2596    high is the largest rqmode of all locks blocked on the convert or
2597    waiting queue. */
2598 
lock_requires_bast(struct dlm_lkb * gr,int high,int cw)2599 static int lock_requires_bast(struct dlm_lkb *gr, int high, int cw)
2600 {
2601 	if (gr->lkb_grmode == DLM_LOCK_PR && cw) {
2602 		if (gr->lkb_highbast < DLM_LOCK_EX)
2603 			return 1;
2604 		return 0;
2605 	}
2606 
2607 	if (gr->lkb_highbast < high &&
2608 	    !__dlm_compat_matrix[gr->lkb_grmode+1][high+1])
2609 		return 1;
2610 	return 0;
2611 }
2612 
grant_pending_locks(struct dlm_rsb * r,unsigned int * count)2613 static void grant_pending_locks(struct dlm_rsb *r, unsigned int *count)
2614 {
2615 	struct dlm_lkb *lkb, *s;
2616 	int high = DLM_LOCK_IV;
2617 	int cw = 0;
2618 
2619 	if (!is_master(r)) {
2620 		log_print("grant_pending_locks r nodeid %d", r->res_nodeid);
2621 		dlm_dump_rsb(r);
2622 		return;
2623 	}
2624 
2625 	high = grant_pending_convert(r, high, &cw, count);
2626 	high = grant_pending_wait(r, high, &cw, count);
2627 
2628 	if (high == DLM_LOCK_IV)
2629 		return;
2630 
2631 	/*
2632 	 * If there are locks left on the wait/convert queue then send blocking
2633 	 * ASTs to granted locks based on the largest requested mode (high)
2634 	 * found above.
2635 	 */
2636 
2637 	list_for_each_entry_safe(lkb, s, &r->res_grantqueue, lkb_statequeue) {
2638 		if (lkb->lkb_bastfn && lock_requires_bast(lkb, high, cw)) {
2639 			if (cw && high == DLM_LOCK_PR &&
2640 			    lkb->lkb_grmode == DLM_LOCK_PR)
2641 				queue_bast(r, lkb, DLM_LOCK_CW);
2642 			else
2643 				queue_bast(r, lkb, high);
2644 			lkb->lkb_highbast = high;
2645 		}
2646 	}
2647 }
2648 
modes_require_bast(struct dlm_lkb * gr,struct dlm_lkb * rq)2649 static int modes_require_bast(struct dlm_lkb *gr, struct dlm_lkb *rq)
2650 {
2651 	if ((gr->lkb_grmode == DLM_LOCK_PR && rq->lkb_rqmode == DLM_LOCK_CW) ||
2652 	    (gr->lkb_grmode == DLM_LOCK_CW && rq->lkb_rqmode == DLM_LOCK_PR)) {
2653 		if (gr->lkb_highbast < DLM_LOCK_EX)
2654 			return 1;
2655 		return 0;
2656 	}
2657 
2658 	if (gr->lkb_highbast < rq->lkb_rqmode && !modes_compat(gr, rq))
2659 		return 1;
2660 	return 0;
2661 }
2662 
send_bast_queue(struct dlm_rsb * r,struct list_head * head,struct dlm_lkb * lkb)2663 static void send_bast_queue(struct dlm_rsb *r, struct list_head *head,
2664 			    struct dlm_lkb *lkb)
2665 {
2666 	struct dlm_lkb *gr;
2667 
2668 	list_for_each_entry(gr, head, lkb_statequeue) {
2669 		/* skip self when sending basts to convertqueue */
2670 		if (gr == lkb)
2671 			continue;
2672 		if (gr->lkb_bastfn && modes_require_bast(gr, lkb)) {
2673 			queue_bast(r, gr, lkb->lkb_rqmode);
2674 			gr->lkb_highbast = lkb->lkb_rqmode;
2675 		}
2676 	}
2677 }
2678 
send_blocking_asts(struct dlm_rsb * r,struct dlm_lkb * lkb)2679 static void send_blocking_asts(struct dlm_rsb *r, struct dlm_lkb *lkb)
2680 {
2681 	send_bast_queue(r, &r->res_grantqueue, lkb);
2682 }
2683 
send_blocking_asts_all(struct dlm_rsb * r,struct dlm_lkb * lkb)2684 static void send_blocking_asts_all(struct dlm_rsb *r, struct dlm_lkb *lkb)
2685 {
2686 	send_bast_queue(r, &r->res_grantqueue, lkb);
2687 	send_bast_queue(r, &r->res_convertqueue, lkb);
2688 }
2689 
2690 /* set_master(r, lkb) -- set the master nodeid of a resource
2691 
2692    The purpose of this function is to set the nodeid field in the given
2693    lkb using the nodeid field in the given rsb.  If the rsb's nodeid is
2694    known, it can just be copied to the lkb and the function will return
2695    0.  If the rsb's nodeid is _not_ known, it needs to be looked up
2696    before it can be copied to the lkb.
2697 
2698    When the rsb nodeid is being looked up remotely, the initial lkb
2699    causing the lookup is kept on the ls_waiters list waiting for the
2700    lookup reply.  Other lkb's waiting for the same rsb lookup are kept
2701    on the rsb's res_lookup list until the master is verified.
2702 
2703    Return values:
2704    0: nodeid is set in rsb/lkb and the caller should go ahead and use it
2705    1: the rsb master is not available and the lkb has been placed on
2706       a wait queue
2707 */
2708 
set_master(struct dlm_rsb * r,struct dlm_lkb * lkb)2709 static int set_master(struct dlm_rsb *r, struct dlm_lkb *lkb)
2710 {
2711 	int our_nodeid = dlm_our_nodeid();
2712 
2713 	if (rsb_flag(r, RSB_MASTER_UNCERTAIN)) {
2714 		rsb_clear_flag(r, RSB_MASTER_UNCERTAIN);
2715 		r->res_first_lkid = lkb->lkb_id;
2716 		lkb->lkb_nodeid = r->res_nodeid;
2717 		return 0;
2718 	}
2719 
2720 	if (r->res_first_lkid && r->res_first_lkid != lkb->lkb_id) {
2721 		list_add_tail(&lkb->lkb_rsb_lookup, &r->res_lookup);
2722 		return 1;
2723 	}
2724 
2725 	if (r->res_master_nodeid == our_nodeid) {
2726 		lkb->lkb_nodeid = 0;
2727 		return 0;
2728 	}
2729 
2730 	if (r->res_master_nodeid) {
2731 		lkb->lkb_nodeid = r->res_master_nodeid;
2732 		return 0;
2733 	}
2734 
2735 	if (dlm_dir_nodeid(r) == our_nodeid) {
2736 		/* This is a somewhat unusual case; find_rsb will usually
2737 		   have set res_master_nodeid when dir nodeid is local, but
2738 		   there are cases where we become the dir node after we've
2739 		   past find_rsb and go through _request_lock again.
2740 		   confirm_master() or process_lookup_list() needs to be
2741 		   called after this. */
2742 		log_debug(r->res_ls, "set_master %x self master %d dir %d %s",
2743 			  lkb->lkb_id, r->res_master_nodeid, r->res_dir_nodeid,
2744 			  r->res_name);
2745 		r->res_master_nodeid = our_nodeid;
2746 		r->res_nodeid = 0;
2747 		lkb->lkb_nodeid = 0;
2748 		return 0;
2749 	}
2750 
2751 	wait_pending_remove(r);
2752 
2753 	r->res_first_lkid = lkb->lkb_id;
2754 	send_lookup(r, lkb);
2755 	return 1;
2756 }
2757 
process_lookup_list(struct dlm_rsb * r)2758 static void process_lookup_list(struct dlm_rsb *r)
2759 {
2760 	struct dlm_lkb *lkb, *safe;
2761 
2762 	list_for_each_entry_safe(lkb, safe, &r->res_lookup, lkb_rsb_lookup) {
2763 		list_del_init(&lkb->lkb_rsb_lookup);
2764 		_request_lock(r, lkb);
2765 		schedule();
2766 	}
2767 }
2768 
2769 /* confirm_master -- confirm (or deny) an rsb's master nodeid */
2770 
confirm_master(struct dlm_rsb * r,int error)2771 static void confirm_master(struct dlm_rsb *r, int error)
2772 {
2773 	struct dlm_lkb *lkb;
2774 
2775 	if (!r->res_first_lkid)
2776 		return;
2777 
2778 	switch (error) {
2779 	case 0:
2780 	case -EINPROGRESS:
2781 		r->res_first_lkid = 0;
2782 		process_lookup_list(r);
2783 		break;
2784 
2785 	case -EAGAIN:
2786 	case -EBADR:
2787 	case -ENOTBLK:
2788 		/* the remote request failed and won't be retried (it was
2789 		   a NOQUEUE, or has been canceled/unlocked); make a waiting
2790 		   lkb the first_lkid */
2791 
2792 		r->res_first_lkid = 0;
2793 
2794 		if (!list_empty(&r->res_lookup)) {
2795 			lkb = list_entry(r->res_lookup.next, struct dlm_lkb,
2796 					 lkb_rsb_lookup);
2797 			list_del_init(&lkb->lkb_rsb_lookup);
2798 			r->res_first_lkid = lkb->lkb_id;
2799 			_request_lock(r, lkb);
2800 		}
2801 		break;
2802 
2803 	default:
2804 		log_error(r->res_ls, "confirm_master unknown error %d", error);
2805 	}
2806 }
2807 
set_lock_args(int mode,struct dlm_lksb * lksb,uint32_t flags,int namelen,unsigned long timeout_cs,void (* ast)(void * astparam),void * astparam,void (* bast)(void * astparam,int mode),struct dlm_args * args)2808 static int set_lock_args(int mode, struct dlm_lksb *lksb, uint32_t flags,
2809 			 int namelen, unsigned long timeout_cs,
2810 			 void (*ast) (void *astparam),
2811 			 void *astparam,
2812 			 void (*bast) (void *astparam, int mode),
2813 			 struct dlm_args *args)
2814 {
2815 	int rv = -EINVAL;
2816 
2817 	/* check for invalid arg usage */
2818 
2819 	if (mode < 0 || mode > DLM_LOCK_EX)
2820 		goto out;
2821 
2822 	if (!(flags & DLM_LKF_CONVERT) && (namelen > DLM_RESNAME_MAXLEN))
2823 		goto out;
2824 
2825 	if (flags & DLM_LKF_CANCEL)
2826 		goto out;
2827 
2828 	if (flags & DLM_LKF_QUECVT && !(flags & DLM_LKF_CONVERT))
2829 		goto out;
2830 
2831 	if (flags & DLM_LKF_CONVDEADLK && !(flags & DLM_LKF_CONVERT))
2832 		goto out;
2833 
2834 	if (flags & DLM_LKF_CONVDEADLK && flags & DLM_LKF_NOQUEUE)
2835 		goto out;
2836 
2837 	if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_CONVERT)
2838 		goto out;
2839 
2840 	if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_QUECVT)
2841 		goto out;
2842 
2843 	if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_NOQUEUE)
2844 		goto out;
2845 
2846 	if (flags & DLM_LKF_EXPEDITE && mode != DLM_LOCK_NL)
2847 		goto out;
2848 
2849 	if (!ast || !lksb)
2850 		goto out;
2851 
2852 	if (flags & DLM_LKF_VALBLK && !lksb->sb_lvbptr)
2853 		goto out;
2854 
2855 	if (flags & DLM_LKF_CONVERT && !lksb->sb_lkid)
2856 		goto out;
2857 
2858 	/* these args will be copied to the lkb in validate_lock_args,
2859 	   it cannot be done now because when converting locks, fields in
2860 	   an active lkb cannot be modified before locking the rsb */
2861 
2862 	args->flags = flags;
2863 	args->astfn = ast;
2864 	args->astparam = astparam;
2865 	args->bastfn = bast;
2866 	args->timeout = timeout_cs;
2867 	args->mode = mode;
2868 	args->lksb = lksb;
2869 	rv = 0;
2870  out:
2871 	return rv;
2872 }
2873 
set_unlock_args(uint32_t flags,void * astarg,struct dlm_args * args)2874 static int set_unlock_args(uint32_t flags, void *astarg, struct dlm_args *args)
2875 {
2876 	if (flags & ~(DLM_LKF_CANCEL | DLM_LKF_VALBLK | DLM_LKF_IVVALBLK |
2877  		      DLM_LKF_FORCEUNLOCK))
2878 		return -EINVAL;
2879 
2880 	if (flags & DLM_LKF_CANCEL && flags & DLM_LKF_FORCEUNLOCK)
2881 		return -EINVAL;
2882 
2883 	args->flags = flags;
2884 	args->astparam = astarg;
2885 	return 0;
2886 }
2887 
validate_lock_args(struct dlm_ls * ls,struct dlm_lkb * lkb,struct dlm_args * args)2888 static int validate_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
2889 			      struct dlm_args *args)
2890 {
2891 	int rv = -EBUSY;
2892 
2893 	if (args->flags & DLM_LKF_CONVERT) {
2894 		if (lkb->lkb_status != DLM_LKSTS_GRANTED)
2895 			goto out;
2896 
2897 		if (lkb->lkb_wait_type)
2898 			goto out;
2899 
2900 		if (is_overlap(lkb))
2901 			goto out;
2902 
2903 		rv = -EINVAL;
2904 		if (lkb->lkb_flags & DLM_IFL_MSTCPY)
2905 			goto out;
2906 
2907 		if (args->flags & DLM_LKF_QUECVT &&
2908 		    !__quecvt_compat_matrix[lkb->lkb_grmode+1][args->mode+1])
2909 			goto out;
2910 	}
2911 
2912 	lkb->lkb_exflags = args->flags;
2913 	lkb->lkb_sbflags = 0;
2914 	lkb->lkb_astfn = args->astfn;
2915 	lkb->lkb_astparam = args->astparam;
2916 	lkb->lkb_bastfn = args->bastfn;
2917 	lkb->lkb_rqmode = args->mode;
2918 	lkb->lkb_lksb = args->lksb;
2919 	lkb->lkb_lvbptr = args->lksb->sb_lvbptr;
2920 	lkb->lkb_ownpid = (int) current->pid;
2921 	lkb->lkb_timeout_cs = args->timeout;
2922 	rv = 0;
2923  out:
2924 	if (rv)
2925 		log_debug(ls, "validate_lock_args %d %x %x %x %d %d %s",
2926 			  rv, lkb->lkb_id, lkb->lkb_flags, args->flags,
2927 			  lkb->lkb_status, lkb->lkb_wait_type,
2928 			  lkb->lkb_resource->res_name);
2929 	return rv;
2930 }
2931 
2932 /* when dlm_unlock() sees -EBUSY with CANCEL/FORCEUNLOCK it returns 0
2933    for success */
2934 
2935 /* note: it's valid for lkb_nodeid/res_nodeid to be -1 when we get here
2936    because there may be a lookup in progress and it's valid to do
2937    cancel/unlockf on it */
2938 
validate_unlock_args(struct dlm_lkb * lkb,struct dlm_args * args)2939 static int validate_unlock_args(struct dlm_lkb *lkb, struct dlm_args *args)
2940 {
2941 	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
2942 	int rv = -EINVAL;
2943 
2944 	if (lkb->lkb_flags & DLM_IFL_MSTCPY) {
2945 		log_error(ls, "unlock on MSTCPY %x", lkb->lkb_id);
2946 		dlm_print_lkb(lkb);
2947 		goto out;
2948 	}
2949 
2950 	/* an lkb may still exist even though the lock is EOL'ed due to a
2951 	   cancel, unlock or failed noqueue request; an app can't use these
2952 	   locks; return same error as if the lkid had not been found at all */
2953 
2954 	if (lkb->lkb_flags & DLM_IFL_ENDOFLIFE) {
2955 		log_debug(ls, "unlock on ENDOFLIFE %x", lkb->lkb_id);
2956 		rv = -ENOENT;
2957 		goto out;
2958 	}
2959 
2960 	/* an lkb may be waiting for an rsb lookup to complete where the
2961 	   lookup was initiated by another lock */
2962 
2963 	if (!list_empty(&lkb->lkb_rsb_lookup)) {
2964 		if (args->flags & (DLM_LKF_CANCEL | DLM_LKF_FORCEUNLOCK)) {
2965 			log_debug(ls, "unlock on rsb_lookup %x", lkb->lkb_id);
2966 			list_del_init(&lkb->lkb_rsb_lookup);
2967 			queue_cast(lkb->lkb_resource, lkb,
2968 				   args->flags & DLM_LKF_CANCEL ?
2969 				   -DLM_ECANCEL : -DLM_EUNLOCK);
2970 			unhold_lkb(lkb); /* undoes create_lkb() */
2971 		}
2972 		/* caller changes -EBUSY to 0 for CANCEL and FORCEUNLOCK */
2973 		rv = -EBUSY;
2974 		goto out;
2975 	}
2976 
2977 	/* cancel not allowed with another cancel/unlock in progress */
2978 
2979 	if (args->flags & DLM_LKF_CANCEL) {
2980 		if (lkb->lkb_exflags & DLM_LKF_CANCEL)
2981 			goto out;
2982 
2983 		if (is_overlap(lkb))
2984 			goto out;
2985 
2986 		/* don't let scand try to do a cancel */
2987 		del_timeout(lkb);
2988 
2989 		if (lkb->lkb_flags & DLM_IFL_RESEND) {
2990 			lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL;
2991 			rv = -EBUSY;
2992 			goto out;
2993 		}
2994 
2995 		/* there's nothing to cancel */
2996 		if (lkb->lkb_status == DLM_LKSTS_GRANTED &&
2997 		    !lkb->lkb_wait_type) {
2998 			rv = -EBUSY;
2999 			goto out;
3000 		}
3001 
3002 		switch (lkb->lkb_wait_type) {
3003 		case DLM_MSG_LOOKUP:
3004 		case DLM_MSG_REQUEST:
3005 			lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL;
3006 			rv = -EBUSY;
3007 			goto out;
3008 		case DLM_MSG_UNLOCK:
3009 		case DLM_MSG_CANCEL:
3010 			goto out;
3011 		}
3012 		/* add_to_waiters() will set OVERLAP_CANCEL */
3013 		goto out_ok;
3014 	}
3015 
3016 	/* do we need to allow a force-unlock if there's a normal unlock
3017 	   already in progress?  in what conditions could the normal unlock
3018 	   fail such that we'd want to send a force-unlock to be sure? */
3019 
3020 	if (args->flags & DLM_LKF_FORCEUNLOCK) {
3021 		if (lkb->lkb_exflags & DLM_LKF_FORCEUNLOCK)
3022 			goto out;
3023 
3024 		if (is_overlap_unlock(lkb))
3025 			goto out;
3026 
3027 		/* don't let scand try to do a cancel */
3028 		del_timeout(lkb);
3029 
3030 		if (lkb->lkb_flags & DLM_IFL_RESEND) {
3031 			lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK;
3032 			rv = -EBUSY;
3033 			goto out;
3034 		}
3035 
3036 		switch (lkb->lkb_wait_type) {
3037 		case DLM_MSG_LOOKUP:
3038 		case DLM_MSG_REQUEST:
3039 			lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK;
3040 			rv = -EBUSY;
3041 			goto out;
3042 		case DLM_MSG_UNLOCK:
3043 			goto out;
3044 		}
3045 		/* add_to_waiters() will set OVERLAP_UNLOCK */
3046 		goto out_ok;
3047 	}
3048 
3049 	/* normal unlock not allowed if there's any op in progress */
3050 	rv = -EBUSY;
3051 	if (lkb->lkb_wait_type || lkb->lkb_wait_count)
3052 		goto out;
3053 
3054  out_ok:
3055 	/* an overlapping op shouldn't blow away exflags from other op */
3056 	lkb->lkb_exflags |= args->flags;
3057 	lkb->lkb_sbflags = 0;
3058 	lkb->lkb_astparam = args->astparam;
3059 	rv = 0;
3060  out:
3061 	if (rv)
3062 		log_debug(ls, "validate_unlock_args %d %x %x %x %x %d %s", rv,
3063 			  lkb->lkb_id, lkb->lkb_flags, lkb->lkb_exflags,
3064 			  args->flags, lkb->lkb_wait_type,
3065 			  lkb->lkb_resource->res_name);
3066 	return rv;
3067 }
3068 
3069 /*
3070  * Four stage 4 varieties:
3071  * do_request(), do_convert(), do_unlock(), do_cancel()
3072  * These are called on the master node for the given lock and
3073  * from the central locking logic.
3074  */
3075 
do_request(struct dlm_rsb * r,struct dlm_lkb * lkb)3076 static int do_request(struct dlm_rsb *r, struct dlm_lkb *lkb)
3077 {
3078 	int error = 0;
3079 
3080 	if (can_be_granted(r, lkb, 1, 0, NULL)) {
3081 		grant_lock(r, lkb);
3082 		queue_cast(r, lkb, 0);
3083 		goto out;
3084 	}
3085 
3086 	if (can_be_queued(lkb)) {
3087 		error = -EINPROGRESS;
3088 		add_lkb(r, lkb, DLM_LKSTS_WAITING);
3089 		add_timeout(lkb);
3090 		goto out;
3091 	}
3092 
3093 	error = -EAGAIN;
3094 	queue_cast(r, lkb, -EAGAIN);
3095  out:
3096 	return error;
3097 }
3098 
do_request_effects(struct dlm_rsb * r,struct dlm_lkb * lkb,int error)3099 static void do_request_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
3100 			       int error)
3101 {
3102 	switch (error) {
3103 	case -EAGAIN:
3104 		if (force_blocking_asts(lkb))
3105 			send_blocking_asts_all(r, lkb);
3106 		break;
3107 	case -EINPROGRESS:
3108 		send_blocking_asts(r, lkb);
3109 		break;
3110 	}
3111 }
3112 
do_convert(struct dlm_rsb * r,struct dlm_lkb * lkb)3113 static int do_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
3114 {
3115 	int error = 0;
3116 	int deadlk = 0;
3117 
3118 	/* changing an existing lock may allow others to be granted */
3119 
3120 	if (can_be_granted(r, lkb, 1, 0, &deadlk)) {
3121 		grant_lock(r, lkb);
3122 		queue_cast(r, lkb, 0);
3123 		goto out;
3124 	}
3125 
3126 	/* can_be_granted() detected that this lock would block in a conversion
3127 	   deadlock, so we leave it on the granted queue and return EDEADLK in
3128 	   the ast for the convert. */
3129 
3130 	if (deadlk && !(lkb->lkb_exflags & DLM_LKF_NODLCKWT)) {
3131 		/* it's left on the granted queue */
3132 		revert_lock(r, lkb);
3133 		queue_cast(r, lkb, -EDEADLK);
3134 		error = -EDEADLK;
3135 		goto out;
3136 	}
3137 
3138 	/* is_demoted() means the can_be_granted() above set the grmode
3139 	   to NL, and left us on the granted queue.  This auto-demotion
3140 	   (due to CONVDEADLK) might mean other locks, and/or this lock, are
3141 	   now grantable.  We have to try to grant other converting locks
3142 	   before we try again to grant this one. */
3143 
3144 	if (is_demoted(lkb)) {
3145 		grant_pending_convert(r, DLM_LOCK_IV, NULL, NULL);
3146 		if (_can_be_granted(r, lkb, 1, 0)) {
3147 			grant_lock(r, lkb);
3148 			queue_cast(r, lkb, 0);
3149 			goto out;
3150 		}
3151 		/* else fall through and move to convert queue */
3152 	}
3153 
3154 	if (can_be_queued(lkb)) {
3155 		error = -EINPROGRESS;
3156 		del_lkb(r, lkb);
3157 		add_lkb(r, lkb, DLM_LKSTS_CONVERT);
3158 		add_timeout(lkb);
3159 		goto out;
3160 	}
3161 
3162 	error = -EAGAIN;
3163 	queue_cast(r, lkb, -EAGAIN);
3164  out:
3165 	return error;
3166 }
3167 
do_convert_effects(struct dlm_rsb * r,struct dlm_lkb * lkb,int error)3168 static void do_convert_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
3169 			       int error)
3170 {
3171 	switch (error) {
3172 	case 0:
3173 		grant_pending_locks(r, NULL);
3174 		/* grant_pending_locks also sends basts */
3175 		break;
3176 	case -EAGAIN:
3177 		if (force_blocking_asts(lkb))
3178 			send_blocking_asts_all(r, lkb);
3179 		break;
3180 	case -EINPROGRESS:
3181 		send_blocking_asts(r, lkb);
3182 		break;
3183 	}
3184 }
3185 
do_unlock(struct dlm_rsb * r,struct dlm_lkb * lkb)3186 static int do_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3187 {
3188 	remove_lock(r, lkb);
3189 	queue_cast(r, lkb, -DLM_EUNLOCK);
3190 	return -DLM_EUNLOCK;
3191 }
3192 
do_unlock_effects(struct dlm_rsb * r,struct dlm_lkb * lkb,int error)3193 static void do_unlock_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
3194 			      int error)
3195 {
3196 	grant_pending_locks(r, NULL);
3197 }
3198 
3199 /* returns: 0 did nothing, -DLM_ECANCEL canceled lock */
3200 
do_cancel(struct dlm_rsb * r,struct dlm_lkb * lkb)3201 static int do_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb)
3202 {
3203 	int error;
3204 
3205 	error = revert_lock(r, lkb);
3206 	if (error) {
3207 		queue_cast(r, lkb, -DLM_ECANCEL);
3208 		return -DLM_ECANCEL;
3209 	}
3210 	return 0;
3211 }
3212 
do_cancel_effects(struct dlm_rsb * r,struct dlm_lkb * lkb,int error)3213 static void do_cancel_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
3214 			      int error)
3215 {
3216 	if (error)
3217 		grant_pending_locks(r, NULL);
3218 }
3219 
3220 /*
3221  * Four stage 3 varieties:
3222  * _request_lock(), _convert_lock(), _unlock_lock(), _cancel_lock()
3223  */
3224 
3225 /* add a new lkb to a possibly new rsb, called by requesting process */
3226 
_request_lock(struct dlm_rsb * r,struct dlm_lkb * lkb)3227 static int _request_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3228 {
3229 	int error;
3230 
3231 	/* set_master: sets lkb nodeid from r */
3232 
3233 	error = set_master(r, lkb);
3234 	if (error < 0)
3235 		goto out;
3236 	if (error) {
3237 		error = 0;
3238 		goto out;
3239 	}
3240 
3241 	if (is_remote(r)) {
3242 		/* receive_request() calls do_request() on remote node */
3243 		error = send_request(r, lkb);
3244 	} else {
3245 		error = do_request(r, lkb);
3246 		/* for remote locks the request_reply is sent
3247 		   between do_request and do_request_effects */
3248 		do_request_effects(r, lkb, error);
3249 	}
3250  out:
3251 	return error;
3252 }
3253 
3254 /* change some property of an existing lkb, e.g. mode */
3255 
_convert_lock(struct dlm_rsb * r,struct dlm_lkb * lkb)3256 static int _convert_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3257 {
3258 	int error;
3259 
3260 	if (is_remote(r)) {
3261 		/* receive_convert() calls do_convert() on remote node */
3262 		error = send_convert(r, lkb);
3263 	} else {
3264 		error = do_convert(r, lkb);
3265 		/* for remote locks the convert_reply is sent
3266 		   between do_convert and do_convert_effects */
3267 		do_convert_effects(r, lkb, error);
3268 	}
3269 
3270 	return error;
3271 }
3272 
3273 /* remove an existing lkb from the granted queue */
3274 
_unlock_lock(struct dlm_rsb * r,struct dlm_lkb * lkb)3275 static int _unlock_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3276 {
3277 	int error;
3278 
3279 	if (is_remote(r)) {
3280 		/* receive_unlock() calls do_unlock() on remote node */
3281 		error = send_unlock(r, lkb);
3282 	} else {
3283 		error = do_unlock(r, lkb);
3284 		/* for remote locks the unlock_reply is sent
3285 		   between do_unlock and do_unlock_effects */
3286 		do_unlock_effects(r, lkb, error);
3287 	}
3288 
3289 	return error;
3290 }
3291 
3292 /* remove an existing lkb from the convert or wait queue */
3293 
_cancel_lock(struct dlm_rsb * r,struct dlm_lkb * lkb)3294 static int _cancel_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3295 {
3296 	int error;
3297 
3298 	if (is_remote(r)) {
3299 		/* receive_cancel() calls do_cancel() on remote node */
3300 		error = send_cancel(r, lkb);
3301 	} else {
3302 		error = do_cancel(r, lkb);
3303 		/* for remote locks the cancel_reply is sent
3304 		   between do_cancel and do_cancel_effects */
3305 		do_cancel_effects(r, lkb, error);
3306 	}
3307 
3308 	return error;
3309 }
3310 
3311 /*
3312  * Four stage 2 varieties:
3313  * request_lock(), convert_lock(), unlock_lock(), cancel_lock()
3314  */
3315 
request_lock(struct dlm_ls * ls,struct dlm_lkb * lkb,char * name,int len,struct dlm_args * args)3316 static int request_lock(struct dlm_ls *ls, struct dlm_lkb *lkb, char *name,
3317 			int len, struct dlm_args *args)
3318 {
3319 	struct dlm_rsb *r;
3320 	int error;
3321 
3322 	error = validate_lock_args(ls, lkb, args);
3323 	if (error)
3324 		return error;
3325 
3326 	error = find_rsb(ls, name, len, 0, R_REQUEST, &r);
3327 	if (error)
3328 		return error;
3329 
3330 	lock_rsb(r);
3331 
3332 	attach_lkb(r, lkb);
3333 	lkb->lkb_lksb->sb_lkid = lkb->lkb_id;
3334 
3335 	error = _request_lock(r, lkb);
3336 
3337 	unlock_rsb(r);
3338 	put_rsb(r);
3339 	return error;
3340 }
3341 
convert_lock(struct dlm_ls * ls,struct dlm_lkb * lkb,struct dlm_args * args)3342 static int convert_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
3343 			struct dlm_args *args)
3344 {
3345 	struct dlm_rsb *r;
3346 	int error;
3347 
3348 	r = lkb->lkb_resource;
3349 
3350 	hold_rsb(r);
3351 	lock_rsb(r);
3352 
3353 	error = validate_lock_args(ls, lkb, args);
3354 	if (error)
3355 		goto out;
3356 
3357 	error = _convert_lock(r, lkb);
3358  out:
3359 	unlock_rsb(r);
3360 	put_rsb(r);
3361 	return error;
3362 }
3363 
unlock_lock(struct dlm_ls * ls,struct dlm_lkb * lkb,struct dlm_args * args)3364 static int unlock_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
3365 		       struct dlm_args *args)
3366 {
3367 	struct dlm_rsb *r;
3368 	int error;
3369 
3370 	r = lkb->lkb_resource;
3371 
3372 	hold_rsb(r);
3373 	lock_rsb(r);
3374 
3375 	error = validate_unlock_args(lkb, args);
3376 	if (error)
3377 		goto out;
3378 
3379 	error = _unlock_lock(r, lkb);
3380  out:
3381 	unlock_rsb(r);
3382 	put_rsb(r);
3383 	return error;
3384 }
3385 
cancel_lock(struct dlm_ls * ls,struct dlm_lkb * lkb,struct dlm_args * args)3386 static int cancel_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
3387 		       struct dlm_args *args)
3388 {
3389 	struct dlm_rsb *r;
3390 	int error;
3391 
3392 	r = lkb->lkb_resource;
3393 
3394 	hold_rsb(r);
3395 	lock_rsb(r);
3396 
3397 	error = validate_unlock_args(lkb, args);
3398 	if (error)
3399 		goto out;
3400 
3401 	error = _cancel_lock(r, lkb);
3402  out:
3403 	unlock_rsb(r);
3404 	put_rsb(r);
3405 	return error;
3406 }
3407 
3408 /*
3409  * Two stage 1 varieties:  dlm_lock() and dlm_unlock()
3410  */
3411 
dlm_lock(dlm_lockspace_t * lockspace,int mode,struct dlm_lksb * lksb,uint32_t flags,void * name,unsigned int namelen,uint32_t parent_lkid,void (* ast)(void * astarg),void * astarg,void (* bast)(void * astarg,int mode))3412 int dlm_lock(dlm_lockspace_t *lockspace,
3413 	     int mode,
3414 	     struct dlm_lksb *lksb,
3415 	     uint32_t flags,
3416 	     void *name,
3417 	     unsigned int namelen,
3418 	     uint32_t parent_lkid,
3419 	     void (*ast) (void *astarg),
3420 	     void *astarg,
3421 	     void (*bast) (void *astarg, int mode))
3422 {
3423 	struct dlm_ls *ls;
3424 	struct dlm_lkb *lkb;
3425 	struct dlm_args args;
3426 	int error, convert = flags & DLM_LKF_CONVERT;
3427 
3428 	ls = dlm_find_lockspace_local(lockspace);
3429 	if (!ls)
3430 		return -EINVAL;
3431 
3432 	dlm_lock_recovery(ls);
3433 
3434 	if (convert)
3435 		error = find_lkb(ls, lksb->sb_lkid, &lkb);
3436 	else
3437 		error = create_lkb(ls, &lkb);
3438 
3439 	if (error)
3440 		goto out;
3441 
3442 	error = set_lock_args(mode, lksb, flags, namelen, 0, ast,
3443 			      astarg, bast, &args);
3444 	if (error)
3445 		goto out_put;
3446 
3447 	if (convert)
3448 		error = convert_lock(ls, lkb, &args);
3449 	else
3450 		error = request_lock(ls, lkb, name, namelen, &args);
3451 
3452 	if (error == -EINPROGRESS)
3453 		error = 0;
3454  out_put:
3455 	if (convert || error)
3456 		__put_lkb(ls, lkb);
3457 	if (error == -EAGAIN || error == -EDEADLK)
3458 		error = 0;
3459  out:
3460 	dlm_unlock_recovery(ls);
3461 	dlm_put_lockspace(ls);
3462 	return error;
3463 }
3464 
dlm_unlock(dlm_lockspace_t * lockspace,uint32_t lkid,uint32_t flags,struct dlm_lksb * lksb,void * astarg)3465 int dlm_unlock(dlm_lockspace_t *lockspace,
3466 	       uint32_t lkid,
3467 	       uint32_t flags,
3468 	       struct dlm_lksb *lksb,
3469 	       void *astarg)
3470 {
3471 	struct dlm_ls *ls;
3472 	struct dlm_lkb *lkb;
3473 	struct dlm_args args;
3474 	int error;
3475 
3476 	ls = dlm_find_lockspace_local(lockspace);
3477 	if (!ls)
3478 		return -EINVAL;
3479 
3480 	dlm_lock_recovery(ls);
3481 
3482 	error = find_lkb(ls, lkid, &lkb);
3483 	if (error)
3484 		goto out;
3485 
3486 	error = set_unlock_args(flags, astarg, &args);
3487 	if (error)
3488 		goto out_put;
3489 
3490 	if (flags & DLM_LKF_CANCEL)
3491 		error = cancel_lock(ls, lkb, &args);
3492 	else
3493 		error = unlock_lock(ls, lkb, &args);
3494 
3495 	if (error == -DLM_EUNLOCK || error == -DLM_ECANCEL)
3496 		error = 0;
3497 	if (error == -EBUSY && (flags & (DLM_LKF_CANCEL | DLM_LKF_FORCEUNLOCK)))
3498 		error = 0;
3499  out_put:
3500 	dlm_put_lkb(lkb);
3501  out:
3502 	dlm_unlock_recovery(ls);
3503 	dlm_put_lockspace(ls);
3504 	return error;
3505 }
3506 
3507 /*
3508  * send/receive routines for remote operations and replies
3509  *
3510  * send_args
3511  * send_common
3512  * send_request			receive_request
3513  * send_convert			receive_convert
3514  * send_unlock			receive_unlock
3515  * send_cancel			receive_cancel
3516  * send_grant			receive_grant
3517  * send_bast			receive_bast
3518  * send_lookup			receive_lookup
3519  * send_remove			receive_remove
3520  *
3521  * 				send_common_reply
3522  * receive_request_reply	send_request_reply
3523  * receive_convert_reply	send_convert_reply
3524  * receive_unlock_reply		send_unlock_reply
3525  * receive_cancel_reply		send_cancel_reply
3526  * receive_lookup_reply		send_lookup_reply
3527  */
3528 
_create_message(struct dlm_ls * ls,int mb_len,int to_nodeid,int mstype,struct dlm_message ** ms_ret,struct dlm_mhandle ** mh_ret)3529 static int _create_message(struct dlm_ls *ls, int mb_len,
3530 			   int to_nodeid, int mstype,
3531 			   struct dlm_message **ms_ret,
3532 			   struct dlm_mhandle **mh_ret)
3533 {
3534 	struct dlm_message *ms;
3535 	struct dlm_mhandle *mh;
3536 	char *mb;
3537 
3538 	/* get_buffer gives us a message handle (mh) that we need to
3539 	   pass into lowcomms_commit and a message buffer (mb) that we
3540 	   write our data into */
3541 
3542 	mh = dlm_lowcomms_get_buffer(to_nodeid, mb_len, GFP_NOFS, &mb);
3543 	if (!mh)
3544 		return -ENOBUFS;
3545 
3546 	memset(mb, 0, mb_len);
3547 
3548 	ms = (struct dlm_message *) mb;
3549 
3550 	ms->m_header.h_version = (DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
3551 	ms->m_header.h_lockspace = ls->ls_global_id;
3552 	ms->m_header.h_nodeid = dlm_our_nodeid();
3553 	ms->m_header.h_length = mb_len;
3554 	ms->m_header.h_cmd = DLM_MSG;
3555 
3556 	ms->m_type = mstype;
3557 
3558 	*mh_ret = mh;
3559 	*ms_ret = ms;
3560 	return 0;
3561 }
3562 
create_message(struct dlm_rsb * r,struct dlm_lkb * lkb,int to_nodeid,int mstype,struct dlm_message ** ms_ret,struct dlm_mhandle ** mh_ret)3563 static int create_message(struct dlm_rsb *r, struct dlm_lkb *lkb,
3564 			  int to_nodeid, int mstype,
3565 			  struct dlm_message **ms_ret,
3566 			  struct dlm_mhandle **mh_ret)
3567 {
3568 	int mb_len = sizeof(struct dlm_message);
3569 
3570 	switch (mstype) {
3571 	case DLM_MSG_REQUEST:
3572 	case DLM_MSG_LOOKUP:
3573 	case DLM_MSG_REMOVE:
3574 		mb_len += r->res_length;
3575 		break;
3576 	case DLM_MSG_CONVERT:
3577 	case DLM_MSG_UNLOCK:
3578 	case DLM_MSG_REQUEST_REPLY:
3579 	case DLM_MSG_CONVERT_REPLY:
3580 	case DLM_MSG_GRANT:
3581 		if (lkb && lkb->lkb_lvbptr)
3582 			mb_len += r->res_ls->ls_lvblen;
3583 		break;
3584 	}
3585 
3586 	return _create_message(r->res_ls, mb_len, to_nodeid, mstype,
3587 			       ms_ret, mh_ret);
3588 }
3589 
3590 /* further lowcomms enhancements or alternate implementations may make
3591    the return value from this function useful at some point */
3592 
send_message(struct dlm_mhandle * mh,struct dlm_message * ms)3593 static int send_message(struct dlm_mhandle *mh, struct dlm_message *ms)
3594 {
3595 	dlm_message_out(ms);
3596 	dlm_lowcomms_commit_buffer(mh);
3597 	return 0;
3598 }
3599 
send_args(struct dlm_rsb * r,struct dlm_lkb * lkb,struct dlm_message * ms)3600 static void send_args(struct dlm_rsb *r, struct dlm_lkb *lkb,
3601 		      struct dlm_message *ms)
3602 {
3603 	ms->m_nodeid   = lkb->lkb_nodeid;
3604 	ms->m_pid      = lkb->lkb_ownpid;
3605 	ms->m_lkid     = lkb->lkb_id;
3606 	ms->m_remid    = lkb->lkb_remid;
3607 	ms->m_exflags  = lkb->lkb_exflags;
3608 	ms->m_sbflags  = lkb->lkb_sbflags;
3609 	ms->m_flags    = lkb->lkb_flags;
3610 	ms->m_lvbseq   = lkb->lkb_lvbseq;
3611 	ms->m_status   = lkb->lkb_status;
3612 	ms->m_grmode   = lkb->lkb_grmode;
3613 	ms->m_rqmode   = lkb->lkb_rqmode;
3614 	ms->m_hash     = r->res_hash;
3615 
3616 	/* m_result and m_bastmode are set from function args,
3617 	   not from lkb fields */
3618 
3619 	if (lkb->lkb_bastfn)
3620 		ms->m_asts |= DLM_CB_BAST;
3621 	if (lkb->lkb_astfn)
3622 		ms->m_asts |= DLM_CB_CAST;
3623 
3624 	/* compare with switch in create_message; send_remove() doesn't
3625 	   use send_args() */
3626 
3627 	switch (ms->m_type) {
3628 	case DLM_MSG_REQUEST:
3629 	case DLM_MSG_LOOKUP:
3630 		memcpy(ms->m_extra, r->res_name, r->res_length);
3631 		break;
3632 	case DLM_MSG_CONVERT:
3633 	case DLM_MSG_UNLOCK:
3634 	case DLM_MSG_REQUEST_REPLY:
3635 	case DLM_MSG_CONVERT_REPLY:
3636 	case DLM_MSG_GRANT:
3637 		if (!lkb->lkb_lvbptr)
3638 			break;
3639 		memcpy(ms->m_extra, lkb->lkb_lvbptr, r->res_ls->ls_lvblen);
3640 		break;
3641 	}
3642 }
3643 
send_common(struct dlm_rsb * r,struct dlm_lkb * lkb,int mstype)3644 static int send_common(struct dlm_rsb *r, struct dlm_lkb *lkb, int mstype)
3645 {
3646 	struct dlm_message *ms;
3647 	struct dlm_mhandle *mh;
3648 	int to_nodeid, error;
3649 
3650 	to_nodeid = r->res_nodeid;
3651 
3652 	error = add_to_waiters(lkb, mstype, to_nodeid);
3653 	if (error)
3654 		return error;
3655 
3656 	error = create_message(r, lkb, to_nodeid, mstype, &ms, &mh);
3657 	if (error)
3658 		goto fail;
3659 
3660 	send_args(r, lkb, ms);
3661 
3662 	error = send_message(mh, ms);
3663 	if (error)
3664 		goto fail;
3665 	return 0;
3666 
3667  fail:
3668 	remove_from_waiters(lkb, msg_reply_type(mstype));
3669 	return error;
3670 }
3671 
send_request(struct dlm_rsb * r,struct dlm_lkb * lkb)3672 static int send_request(struct dlm_rsb *r, struct dlm_lkb *lkb)
3673 {
3674 	return send_common(r, lkb, DLM_MSG_REQUEST);
3675 }
3676 
send_convert(struct dlm_rsb * r,struct dlm_lkb * lkb)3677 static int send_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
3678 {
3679 	int error;
3680 
3681 	error = send_common(r, lkb, DLM_MSG_CONVERT);
3682 
3683 	/* down conversions go without a reply from the master */
3684 	if (!error && down_conversion(lkb)) {
3685 		remove_from_waiters(lkb, DLM_MSG_CONVERT_REPLY);
3686 		r->res_ls->ls_stub_ms.m_flags = DLM_IFL_STUB_MS;
3687 		r->res_ls->ls_stub_ms.m_type = DLM_MSG_CONVERT_REPLY;
3688 		r->res_ls->ls_stub_ms.m_result = 0;
3689 		__receive_convert_reply(r, lkb, &r->res_ls->ls_stub_ms);
3690 	}
3691 
3692 	return error;
3693 }
3694 
3695 /* FIXME: if this lkb is the only lock we hold on the rsb, then set
3696    MASTER_UNCERTAIN to force the next request on the rsb to confirm
3697    that the master is still correct. */
3698 
send_unlock(struct dlm_rsb * r,struct dlm_lkb * lkb)3699 static int send_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3700 {
3701 	return send_common(r, lkb, DLM_MSG_UNLOCK);
3702 }
3703 
send_cancel(struct dlm_rsb * r,struct dlm_lkb * lkb)3704 static int send_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb)
3705 {
3706 	return send_common(r, lkb, DLM_MSG_CANCEL);
3707 }
3708 
send_grant(struct dlm_rsb * r,struct dlm_lkb * lkb)3709 static int send_grant(struct dlm_rsb *r, struct dlm_lkb *lkb)
3710 {
3711 	struct dlm_message *ms;
3712 	struct dlm_mhandle *mh;
3713 	int to_nodeid, error;
3714 
3715 	to_nodeid = lkb->lkb_nodeid;
3716 
3717 	error = create_message(r, lkb, to_nodeid, DLM_MSG_GRANT, &ms, &mh);
3718 	if (error)
3719 		goto out;
3720 
3721 	send_args(r, lkb, ms);
3722 
3723 	ms->m_result = 0;
3724 
3725 	error = send_message(mh, ms);
3726  out:
3727 	return error;
3728 }
3729 
send_bast(struct dlm_rsb * r,struct dlm_lkb * lkb,int mode)3730 static int send_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int mode)
3731 {
3732 	struct dlm_message *ms;
3733 	struct dlm_mhandle *mh;
3734 	int to_nodeid, error;
3735 
3736 	to_nodeid = lkb->lkb_nodeid;
3737 
3738 	error = create_message(r, NULL, to_nodeid, DLM_MSG_BAST, &ms, &mh);
3739 	if (error)
3740 		goto out;
3741 
3742 	send_args(r, lkb, ms);
3743 
3744 	ms->m_bastmode = mode;
3745 
3746 	error = send_message(mh, ms);
3747  out:
3748 	return error;
3749 }
3750 
send_lookup(struct dlm_rsb * r,struct dlm_lkb * lkb)3751 static int send_lookup(struct dlm_rsb *r, struct dlm_lkb *lkb)
3752 {
3753 	struct dlm_message *ms;
3754 	struct dlm_mhandle *mh;
3755 	int to_nodeid, error;
3756 
3757 	to_nodeid = dlm_dir_nodeid(r);
3758 
3759 	error = add_to_waiters(lkb, DLM_MSG_LOOKUP, to_nodeid);
3760 	if (error)
3761 		return error;
3762 
3763 	error = create_message(r, NULL, to_nodeid, DLM_MSG_LOOKUP, &ms, &mh);
3764 	if (error)
3765 		goto fail;
3766 
3767 	send_args(r, lkb, ms);
3768 
3769 	error = send_message(mh, ms);
3770 	if (error)
3771 		goto fail;
3772 	return 0;
3773 
3774  fail:
3775 	remove_from_waiters(lkb, DLM_MSG_LOOKUP_REPLY);
3776 	return error;
3777 }
3778 
send_remove(struct dlm_rsb * r)3779 static int send_remove(struct dlm_rsb *r)
3780 {
3781 	struct dlm_message *ms;
3782 	struct dlm_mhandle *mh;
3783 	int to_nodeid, error;
3784 
3785 	to_nodeid = dlm_dir_nodeid(r);
3786 
3787 	error = create_message(r, NULL, to_nodeid, DLM_MSG_REMOVE, &ms, &mh);
3788 	if (error)
3789 		goto out;
3790 
3791 	memcpy(ms->m_extra, r->res_name, r->res_length);
3792 	ms->m_hash = r->res_hash;
3793 
3794 	error = send_message(mh, ms);
3795  out:
3796 	return error;
3797 }
3798 
send_common_reply(struct dlm_rsb * r,struct dlm_lkb * lkb,int mstype,int rv)3799 static int send_common_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
3800 			     int mstype, int rv)
3801 {
3802 	struct dlm_message *ms;
3803 	struct dlm_mhandle *mh;
3804 	int to_nodeid, error;
3805 
3806 	to_nodeid = lkb->lkb_nodeid;
3807 
3808 	error = create_message(r, lkb, to_nodeid, mstype, &ms, &mh);
3809 	if (error)
3810 		goto out;
3811 
3812 	send_args(r, lkb, ms);
3813 
3814 	ms->m_result = rv;
3815 
3816 	error = send_message(mh, ms);
3817  out:
3818 	return error;
3819 }
3820 
send_request_reply(struct dlm_rsb * r,struct dlm_lkb * lkb,int rv)3821 static int send_request_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
3822 {
3823 	return send_common_reply(r, lkb, DLM_MSG_REQUEST_REPLY, rv);
3824 }
3825 
send_convert_reply(struct dlm_rsb * r,struct dlm_lkb * lkb,int rv)3826 static int send_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
3827 {
3828 	return send_common_reply(r, lkb, DLM_MSG_CONVERT_REPLY, rv);
3829 }
3830 
send_unlock_reply(struct dlm_rsb * r,struct dlm_lkb * lkb,int rv)3831 static int send_unlock_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
3832 {
3833 	return send_common_reply(r, lkb, DLM_MSG_UNLOCK_REPLY, rv);
3834 }
3835 
send_cancel_reply(struct dlm_rsb * r,struct dlm_lkb * lkb,int rv)3836 static int send_cancel_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
3837 {
3838 	return send_common_reply(r, lkb, DLM_MSG_CANCEL_REPLY, rv);
3839 }
3840 
send_lookup_reply(struct dlm_ls * ls,struct dlm_message * ms_in,int ret_nodeid,int rv)3841 static int send_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms_in,
3842 			     int ret_nodeid, int rv)
3843 {
3844 	struct dlm_rsb *r = &ls->ls_stub_rsb;
3845 	struct dlm_message *ms;
3846 	struct dlm_mhandle *mh;
3847 	int error, nodeid = ms_in->m_header.h_nodeid;
3848 
3849 	error = create_message(r, NULL, nodeid, DLM_MSG_LOOKUP_REPLY, &ms, &mh);
3850 	if (error)
3851 		goto out;
3852 
3853 	ms->m_lkid = ms_in->m_lkid;
3854 	ms->m_result = rv;
3855 	ms->m_nodeid = ret_nodeid;
3856 
3857 	error = send_message(mh, ms);
3858  out:
3859 	return error;
3860 }
3861 
3862 /* which args we save from a received message depends heavily on the type
3863    of message, unlike the send side where we can safely send everything about
3864    the lkb for any type of message */
3865 
receive_flags(struct dlm_lkb * lkb,struct dlm_message * ms)3866 static void receive_flags(struct dlm_lkb *lkb, struct dlm_message *ms)
3867 {
3868 	lkb->lkb_exflags = ms->m_exflags;
3869 	lkb->lkb_sbflags = ms->m_sbflags;
3870 	lkb->lkb_flags = (lkb->lkb_flags & 0xFFFF0000) |
3871 		         (ms->m_flags & 0x0000FFFF);
3872 }
3873 
receive_flags_reply(struct dlm_lkb * lkb,struct dlm_message * ms)3874 static void receive_flags_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
3875 {
3876 	if (ms->m_flags == DLM_IFL_STUB_MS)
3877 		return;
3878 
3879 	lkb->lkb_sbflags = ms->m_sbflags;
3880 	lkb->lkb_flags = (lkb->lkb_flags & 0xFFFF0000) |
3881 		         (ms->m_flags & 0x0000FFFF);
3882 }
3883 
receive_extralen(struct dlm_message * ms)3884 static int receive_extralen(struct dlm_message *ms)
3885 {
3886 	return (ms->m_header.h_length - sizeof(struct dlm_message));
3887 }
3888 
receive_lvb(struct dlm_ls * ls,struct dlm_lkb * lkb,struct dlm_message * ms)3889 static int receive_lvb(struct dlm_ls *ls, struct dlm_lkb *lkb,
3890 		       struct dlm_message *ms)
3891 {
3892 	int len;
3893 
3894 	if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
3895 		if (!lkb->lkb_lvbptr)
3896 			lkb->lkb_lvbptr = dlm_allocate_lvb(ls);
3897 		if (!lkb->lkb_lvbptr)
3898 			return -ENOMEM;
3899 		len = receive_extralen(ms);
3900 		if (len > ls->ls_lvblen)
3901 			len = ls->ls_lvblen;
3902 		memcpy(lkb->lkb_lvbptr, ms->m_extra, len);
3903 	}
3904 	return 0;
3905 }
3906 
fake_bastfn(void * astparam,int mode)3907 static void fake_bastfn(void *astparam, int mode)
3908 {
3909 	log_print("fake_bastfn should not be called");
3910 }
3911 
fake_astfn(void * astparam)3912 static void fake_astfn(void *astparam)
3913 {
3914 	log_print("fake_astfn should not be called");
3915 }
3916 
receive_request_args(struct dlm_ls * ls,struct dlm_lkb * lkb,struct dlm_message * ms)3917 static int receive_request_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
3918 				struct dlm_message *ms)
3919 {
3920 	lkb->lkb_nodeid = ms->m_header.h_nodeid;
3921 	lkb->lkb_ownpid = ms->m_pid;
3922 	lkb->lkb_remid = ms->m_lkid;
3923 	lkb->lkb_grmode = DLM_LOCK_IV;
3924 	lkb->lkb_rqmode = ms->m_rqmode;
3925 
3926 	lkb->lkb_bastfn = (ms->m_asts & DLM_CB_BAST) ? &fake_bastfn : NULL;
3927 	lkb->lkb_astfn = (ms->m_asts & DLM_CB_CAST) ? &fake_astfn : NULL;
3928 
3929 	if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
3930 		/* lkb was just created so there won't be an lvb yet */
3931 		lkb->lkb_lvbptr = dlm_allocate_lvb(ls);
3932 		if (!lkb->lkb_lvbptr)
3933 			return -ENOMEM;
3934 	}
3935 
3936 	return 0;
3937 }
3938 
receive_convert_args(struct dlm_ls * ls,struct dlm_lkb * lkb,struct dlm_message * ms)3939 static int receive_convert_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
3940 				struct dlm_message *ms)
3941 {
3942 	if (lkb->lkb_status != DLM_LKSTS_GRANTED)
3943 		return -EBUSY;
3944 
3945 	if (receive_lvb(ls, lkb, ms))
3946 		return -ENOMEM;
3947 
3948 	lkb->lkb_rqmode = ms->m_rqmode;
3949 	lkb->lkb_lvbseq = ms->m_lvbseq;
3950 
3951 	return 0;
3952 }
3953 
receive_unlock_args(struct dlm_ls * ls,struct dlm_lkb * lkb,struct dlm_message * ms)3954 static int receive_unlock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
3955 			       struct dlm_message *ms)
3956 {
3957 	if (receive_lvb(ls, lkb, ms))
3958 		return -ENOMEM;
3959 	return 0;
3960 }
3961 
3962 /* We fill in the stub-lkb fields with the info that send_xxxx_reply()
3963    uses to send a reply and that the remote end uses to process the reply. */
3964 
setup_stub_lkb(struct dlm_ls * ls,struct dlm_message * ms)3965 static void setup_stub_lkb(struct dlm_ls *ls, struct dlm_message *ms)
3966 {
3967 	struct dlm_lkb *lkb = &ls->ls_stub_lkb;
3968 	lkb->lkb_nodeid = ms->m_header.h_nodeid;
3969 	lkb->lkb_remid = ms->m_lkid;
3970 }
3971 
3972 /* This is called after the rsb is locked so that we can safely inspect
3973    fields in the lkb. */
3974 
validate_message(struct dlm_lkb * lkb,struct dlm_message * ms)3975 static int validate_message(struct dlm_lkb *lkb, struct dlm_message *ms)
3976 {
3977 	int from = ms->m_header.h_nodeid;
3978 	int error = 0;
3979 
3980 	/* currently mixing of user/kernel locks are not supported */
3981 	if (ms->m_flags & DLM_IFL_USER && ~lkb->lkb_flags & DLM_IFL_USER) {
3982 		log_error(lkb->lkb_resource->res_ls,
3983 			  "got user dlm message for a kernel lock");
3984 		error = -EINVAL;
3985 		goto out;
3986 	}
3987 
3988 	switch (ms->m_type) {
3989 	case DLM_MSG_CONVERT:
3990 	case DLM_MSG_UNLOCK:
3991 	case DLM_MSG_CANCEL:
3992 		if (!is_master_copy(lkb) || lkb->lkb_nodeid != from)
3993 			error = -EINVAL;
3994 		break;
3995 
3996 	case DLM_MSG_CONVERT_REPLY:
3997 	case DLM_MSG_UNLOCK_REPLY:
3998 	case DLM_MSG_CANCEL_REPLY:
3999 	case DLM_MSG_GRANT:
4000 	case DLM_MSG_BAST:
4001 		if (!is_process_copy(lkb) || lkb->lkb_nodeid != from)
4002 			error = -EINVAL;
4003 		break;
4004 
4005 	case DLM_MSG_REQUEST_REPLY:
4006 		if (!is_process_copy(lkb))
4007 			error = -EINVAL;
4008 		else if (lkb->lkb_nodeid != -1 && lkb->lkb_nodeid != from)
4009 			error = -EINVAL;
4010 		break;
4011 
4012 	default:
4013 		error = -EINVAL;
4014 	}
4015 
4016 out:
4017 	if (error)
4018 		log_error(lkb->lkb_resource->res_ls,
4019 			  "ignore invalid message %d from %d %x %x %x %d",
4020 			  ms->m_type, from, lkb->lkb_id, lkb->lkb_remid,
4021 			  lkb->lkb_flags, lkb->lkb_nodeid);
4022 	return error;
4023 }
4024 
send_repeat_remove(struct dlm_ls * ls,char * ms_name,int len)4025 static void send_repeat_remove(struct dlm_ls *ls, char *ms_name, int len)
4026 {
4027 	char name[DLM_RESNAME_MAXLEN + 1];
4028 	struct dlm_message *ms;
4029 	struct dlm_mhandle *mh;
4030 	struct dlm_rsb *r;
4031 	uint32_t hash, b;
4032 	int rv, dir_nodeid;
4033 
4034 	memset(name, 0, sizeof(name));
4035 	memcpy(name, ms_name, len);
4036 
4037 	hash = jhash(name, len, 0);
4038 	b = hash & (ls->ls_rsbtbl_size - 1);
4039 
4040 	dir_nodeid = dlm_hash2nodeid(ls, hash);
4041 
4042 	log_error(ls, "send_repeat_remove dir %d %s", dir_nodeid, name);
4043 
4044 	spin_lock(&ls->ls_rsbtbl[b].lock);
4045 	rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
4046 	if (!rv) {
4047 		spin_unlock(&ls->ls_rsbtbl[b].lock);
4048 		log_error(ls, "repeat_remove on keep %s", name);
4049 		return;
4050 	}
4051 
4052 	rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
4053 	if (!rv) {
4054 		spin_unlock(&ls->ls_rsbtbl[b].lock);
4055 		log_error(ls, "repeat_remove on toss %s", name);
4056 		return;
4057 	}
4058 
4059 	/* use ls->remove_name2 to avoid conflict with shrink? */
4060 
4061 	spin_lock(&ls->ls_remove_spin);
4062 	ls->ls_remove_len = len;
4063 	memcpy(ls->ls_remove_name, name, DLM_RESNAME_MAXLEN);
4064 	spin_unlock(&ls->ls_remove_spin);
4065 	spin_unlock(&ls->ls_rsbtbl[b].lock);
4066 
4067 	rv = _create_message(ls, sizeof(struct dlm_message) + len,
4068 			     dir_nodeid, DLM_MSG_REMOVE, &ms, &mh);
4069 	if (rv)
4070 		goto out;
4071 
4072 	memcpy(ms->m_extra, name, len);
4073 	ms->m_hash = hash;
4074 
4075 	send_message(mh, ms);
4076 
4077 out:
4078 	spin_lock(&ls->ls_remove_spin);
4079 	ls->ls_remove_len = 0;
4080 	memset(ls->ls_remove_name, 0, DLM_RESNAME_MAXLEN);
4081 	spin_unlock(&ls->ls_remove_spin);
4082 }
4083 
receive_request(struct dlm_ls * ls,struct dlm_message * ms)4084 static int receive_request(struct dlm_ls *ls, struct dlm_message *ms)
4085 {
4086 	struct dlm_lkb *lkb;
4087 	struct dlm_rsb *r;
4088 	int from_nodeid;
4089 	int error, namelen = 0;
4090 
4091 	from_nodeid = ms->m_header.h_nodeid;
4092 
4093 	error = create_lkb(ls, &lkb);
4094 	if (error)
4095 		goto fail;
4096 
4097 	receive_flags(lkb, ms);
4098 	lkb->lkb_flags |= DLM_IFL_MSTCPY;
4099 	error = receive_request_args(ls, lkb, ms);
4100 	if (error) {
4101 		__put_lkb(ls, lkb);
4102 		goto fail;
4103 	}
4104 
4105 	/* The dir node is the authority on whether we are the master
4106 	   for this rsb or not, so if the master sends us a request, we should
4107 	   recreate the rsb if we've destroyed it.   This race happens when we
4108 	   send a remove message to the dir node at the same time that the dir
4109 	   node sends us a request for the rsb. */
4110 
4111 	namelen = receive_extralen(ms);
4112 
4113 	error = find_rsb(ls, ms->m_extra, namelen, from_nodeid,
4114 			 R_RECEIVE_REQUEST, &r);
4115 	if (error) {
4116 		__put_lkb(ls, lkb);
4117 		goto fail;
4118 	}
4119 
4120 	lock_rsb(r);
4121 
4122 	if (r->res_master_nodeid != dlm_our_nodeid()) {
4123 		error = validate_master_nodeid(ls, r, from_nodeid);
4124 		if (error) {
4125 			unlock_rsb(r);
4126 			put_rsb(r);
4127 			__put_lkb(ls, lkb);
4128 			goto fail;
4129 		}
4130 	}
4131 
4132 	attach_lkb(r, lkb);
4133 	error = do_request(r, lkb);
4134 	send_request_reply(r, lkb, error);
4135 	do_request_effects(r, lkb, error);
4136 
4137 	unlock_rsb(r);
4138 	put_rsb(r);
4139 
4140 	if (error == -EINPROGRESS)
4141 		error = 0;
4142 	if (error)
4143 		dlm_put_lkb(lkb);
4144 	return 0;
4145 
4146  fail:
4147 	/* TODO: instead of returning ENOTBLK, add the lkb to res_lookup
4148 	   and do this receive_request again from process_lookup_list once
4149 	   we get the lookup reply.  This would avoid a many repeated
4150 	   ENOTBLK request failures when the lookup reply designating us
4151 	   as master is delayed. */
4152 
4153 	/* We could repeatedly return -EBADR here if our send_remove() is
4154 	   delayed in being sent/arriving/being processed on the dir node.
4155 	   Another node would repeatedly lookup up the master, and the dir
4156 	   node would continue returning our nodeid until our send_remove
4157 	   took effect.
4158 
4159 	   We send another remove message in case our previous send_remove
4160 	   was lost/ignored/missed somehow. */
4161 
4162 	if (error != -ENOTBLK) {
4163 		log_limit(ls, "receive_request %x from %d %d",
4164 			  ms->m_lkid, from_nodeid, error);
4165 	}
4166 
4167 	if (namelen && error == -EBADR) {
4168 		send_repeat_remove(ls, ms->m_extra, namelen);
4169 		msleep(1000);
4170 	}
4171 
4172 	setup_stub_lkb(ls, ms);
4173 	send_request_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
4174 	return error;
4175 }
4176 
receive_convert(struct dlm_ls * ls,struct dlm_message * ms)4177 static int receive_convert(struct dlm_ls *ls, struct dlm_message *ms)
4178 {
4179 	struct dlm_lkb *lkb;
4180 	struct dlm_rsb *r;
4181 	int error, reply = 1;
4182 
4183 	error = find_lkb(ls, ms->m_remid, &lkb);
4184 	if (error)
4185 		goto fail;
4186 
4187 	if (lkb->lkb_remid != ms->m_lkid) {
4188 		log_error(ls, "receive_convert %x remid %x recover_seq %llu "
4189 			  "remote %d %x", lkb->lkb_id, lkb->lkb_remid,
4190 			  (unsigned long long)lkb->lkb_recover_seq,
4191 			  ms->m_header.h_nodeid, ms->m_lkid);
4192 		error = -ENOENT;
4193 		dlm_put_lkb(lkb);
4194 		goto fail;
4195 	}
4196 
4197 	r = lkb->lkb_resource;
4198 
4199 	hold_rsb(r);
4200 	lock_rsb(r);
4201 
4202 	error = validate_message(lkb, ms);
4203 	if (error)
4204 		goto out;
4205 
4206 	receive_flags(lkb, ms);
4207 
4208 	error = receive_convert_args(ls, lkb, ms);
4209 	if (error) {
4210 		send_convert_reply(r, lkb, error);
4211 		goto out;
4212 	}
4213 
4214 	reply = !down_conversion(lkb);
4215 
4216 	error = do_convert(r, lkb);
4217 	if (reply)
4218 		send_convert_reply(r, lkb, error);
4219 	do_convert_effects(r, lkb, error);
4220  out:
4221 	unlock_rsb(r);
4222 	put_rsb(r);
4223 	dlm_put_lkb(lkb);
4224 	return 0;
4225 
4226  fail:
4227 	setup_stub_lkb(ls, ms);
4228 	send_convert_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
4229 	return error;
4230 }
4231 
receive_unlock(struct dlm_ls * ls,struct dlm_message * ms)4232 static int receive_unlock(struct dlm_ls *ls, struct dlm_message *ms)
4233 {
4234 	struct dlm_lkb *lkb;
4235 	struct dlm_rsb *r;
4236 	int error;
4237 
4238 	error = find_lkb(ls, ms->m_remid, &lkb);
4239 	if (error)
4240 		goto fail;
4241 
4242 	if (lkb->lkb_remid != ms->m_lkid) {
4243 		log_error(ls, "receive_unlock %x remid %x remote %d %x",
4244 			  lkb->lkb_id, lkb->lkb_remid,
4245 			  ms->m_header.h_nodeid, ms->m_lkid);
4246 		error = -ENOENT;
4247 		dlm_put_lkb(lkb);
4248 		goto fail;
4249 	}
4250 
4251 	r = lkb->lkb_resource;
4252 
4253 	hold_rsb(r);
4254 	lock_rsb(r);
4255 
4256 	error = validate_message(lkb, ms);
4257 	if (error)
4258 		goto out;
4259 
4260 	receive_flags(lkb, ms);
4261 
4262 	error = receive_unlock_args(ls, lkb, ms);
4263 	if (error) {
4264 		send_unlock_reply(r, lkb, error);
4265 		goto out;
4266 	}
4267 
4268 	error = do_unlock(r, lkb);
4269 	send_unlock_reply(r, lkb, error);
4270 	do_unlock_effects(r, lkb, error);
4271  out:
4272 	unlock_rsb(r);
4273 	put_rsb(r);
4274 	dlm_put_lkb(lkb);
4275 	return 0;
4276 
4277  fail:
4278 	setup_stub_lkb(ls, ms);
4279 	send_unlock_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
4280 	return error;
4281 }
4282 
receive_cancel(struct dlm_ls * ls,struct dlm_message * ms)4283 static int receive_cancel(struct dlm_ls *ls, struct dlm_message *ms)
4284 {
4285 	struct dlm_lkb *lkb;
4286 	struct dlm_rsb *r;
4287 	int error;
4288 
4289 	error = find_lkb(ls, ms->m_remid, &lkb);
4290 	if (error)
4291 		goto fail;
4292 
4293 	receive_flags(lkb, ms);
4294 
4295 	r = lkb->lkb_resource;
4296 
4297 	hold_rsb(r);
4298 	lock_rsb(r);
4299 
4300 	error = validate_message(lkb, ms);
4301 	if (error)
4302 		goto out;
4303 
4304 	error = do_cancel(r, lkb);
4305 	send_cancel_reply(r, lkb, error);
4306 	do_cancel_effects(r, lkb, error);
4307  out:
4308 	unlock_rsb(r);
4309 	put_rsb(r);
4310 	dlm_put_lkb(lkb);
4311 	return 0;
4312 
4313  fail:
4314 	setup_stub_lkb(ls, ms);
4315 	send_cancel_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
4316 	return error;
4317 }
4318 
receive_grant(struct dlm_ls * ls,struct dlm_message * ms)4319 static int receive_grant(struct dlm_ls *ls, struct dlm_message *ms)
4320 {
4321 	struct dlm_lkb *lkb;
4322 	struct dlm_rsb *r;
4323 	int error;
4324 
4325 	error = find_lkb(ls, ms->m_remid, &lkb);
4326 	if (error)
4327 		return error;
4328 
4329 	r = lkb->lkb_resource;
4330 
4331 	hold_rsb(r);
4332 	lock_rsb(r);
4333 
4334 	error = validate_message(lkb, ms);
4335 	if (error)
4336 		goto out;
4337 
4338 	receive_flags_reply(lkb, ms);
4339 	if (is_altmode(lkb))
4340 		munge_altmode(lkb, ms);
4341 	grant_lock_pc(r, lkb, ms);
4342 	queue_cast(r, lkb, 0);
4343  out:
4344 	unlock_rsb(r);
4345 	put_rsb(r);
4346 	dlm_put_lkb(lkb);
4347 	return 0;
4348 }
4349 
receive_bast(struct dlm_ls * ls,struct dlm_message * ms)4350 static int receive_bast(struct dlm_ls *ls, struct dlm_message *ms)
4351 {
4352 	struct dlm_lkb *lkb;
4353 	struct dlm_rsb *r;
4354 	int error;
4355 
4356 	error = find_lkb(ls, ms->m_remid, &lkb);
4357 	if (error)
4358 		return error;
4359 
4360 	r = lkb->lkb_resource;
4361 
4362 	hold_rsb(r);
4363 	lock_rsb(r);
4364 
4365 	error = validate_message(lkb, ms);
4366 	if (error)
4367 		goto out;
4368 
4369 	queue_bast(r, lkb, ms->m_bastmode);
4370 	lkb->lkb_highbast = ms->m_bastmode;
4371  out:
4372 	unlock_rsb(r);
4373 	put_rsb(r);
4374 	dlm_put_lkb(lkb);
4375 	return 0;
4376 }
4377 
receive_lookup(struct dlm_ls * ls,struct dlm_message * ms)4378 static void receive_lookup(struct dlm_ls *ls, struct dlm_message *ms)
4379 {
4380 	int len, error, ret_nodeid, from_nodeid, our_nodeid;
4381 
4382 	from_nodeid = ms->m_header.h_nodeid;
4383 	our_nodeid = dlm_our_nodeid();
4384 
4385 	len = receive_extralen(ms);
4386 
4387 	error = dlm_master_lookup(ls, from_nodeid, ms->m_extra, len, 0,
4388 				  &ret_nodeid, NULL);
4389 
4390 	/* Optimization: we're master so treat lookup as a request */
4391 	if (!error && ret_nodeid == our_nodeid) {
4392 		receive_request(ls, ms);
4393 		return;
4394 	}
4395 	send_lookup_reply(ls, ms, ret_nodeid, error);
4396 }
4397 
receive_remove(struct dlm_ls * ls,struct dlm_message * ms)4398 static void receive_remove(struct dlm_ls *ls, struct dlm_message *ms)
4399 {
4400 	char name[DLM_RESNAME_MAXLEN+1];
4401 	struct dlm_rsb *r;
4402 	uint32_t hash, b;
4403 	int rv, len, dir_nodeid, from_nodeid;
4404 
4405 	from_nodeid = ms->m_header.h_nodeid;
4406 
4407 	len = receive_extralen(ms);
4408 
4409 	if (len > DLM_RESNAME_MAXLEN) {
4410 		log_error(ls, "receive_remove from %d bad len %d",
4411 			  from_nodeid, len);
4412 		return;
4413 	}
4414 
4415 	dir_nodeid = dlm_hash2nodeid(ls, ms->m_hash);
4416 	if (dir_nodeid != dlm_our_nodeid()) {
4417 		log_error(ls, "receive_remove from %d bad nodeid %d",
4418 			  from_nodeid, dir_nodeid);
4419 		return;
4420 	}
4421 
4422 	/* Look for name on rsbtbl.toss, if it's there, kill it.
4423 	   If it's on rsbtbl.keep, it's being used, and we should ignore this
4424 	   message.  This is an expected race between the dir node sending a
4425 	   request to the master node at the same time as the master node sends
4426 	   a remove to the dir node.  The resolution to that race is for the
4427 	   dir node to ignore the remove message, and the master node to
4428 	   recreate the master rsb when it gets a request from the dir node for
4429 	   an rsb it doesn't have. */
4430 
4431 	memset(name, 0, sizeof(name));
4432 	memcpy(name, ms->m_extra, len);
4433 
4434 	hash = jhash(name, len, 0);
4435 	b = hash & (ls->ls_rsbtbl_size - 1);
4436 
4437 	spin_lock(&ls->ls_rsbtbl[b].lock);
4438 
4439 	rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
4440 	if (rv) {
4441 		/* verify the rsb is on keep list per comment above */
4442 		rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
4443 		if (rv) {
4444 			/* should not happen */
4445 			log_error(ls, "receive_remove from %d not found %s",
4446 				  from_nodeid, name);
4447 			spin_unlock(&ls->ls_rsbtbl[b].lock);
4448 			return;
4449 		}
4450 		if (r->res_master_nodeid != from_nodeid) {
4451 			/* should not happen */
4452 			log_error(ls, "receive_remove keep from %d master %d",
4453 				  from_nodeid, r->res_master_nodeid);
4454 			dlm_print_rsb(r);
4455 			spin_unlock(&ls->ls_rsbtbl[b].lock);
4456 			return;
4457 		}
4458 
4459 		log_debug(ls, "receive_remove from %d master %d first %x %s",
4460 			  from_nodeid, r->res_master_nodeid, r->res_first_lkid,
4461 			  name);
4462 		spin_unlock(&ls->ls_rsbtbl[b].lock);
4463 		return;
4464 	}
4465 
4466 	if (r->res_master_nodeid != from_nodeid) {
4467 		log_error(ls, "receive_remove toss from %d master %d",
4468 			  from_nodeid, r->res_master_nodeid);
4469 		dlm_print_rsb(r);
4470 		spin_unlock(&ls->ls_rsbtbl[b].lock);
4471 		return;
4472 	}
4473 
4474 	if (kref_put(&r->res_ref, kill_rsb)) {
4475 		rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
4476 		spin_unlock(&ls->ls_rsbtbl[b].lock);
4477 		dlm_free_rsb(r);
4478 	} else {
4479 		log_error(ls, "receive_remove from %d rsb ref error",
4480 			  from_nodeid);
4481 		dlm_print_rsb(r);
4482 		spin_unlock(&ls->ls_rsbtbl[b].lock);
4483 	}
4484 }
4485 
receive_purge(struct dlm_ls * ls,struct dlm_message * ms)4486 static void receive_purge(struct dlm_ls *ls, struct dlm_message *ms)
4487 {
4488 	do_purge(ls, ms->m_nodeid, ms->m_pid);
4489 }
4490 
receive_request_reply(struct dlm_ls * ls,struct dlm_message * ms)4491 static int receive_request_reply(struct dlm_ls *ls, struct dlm_message *ms)
4492 {
4493 	struct dlm_lkb *lkb;
4494 	struct dlm_rsb *r;
4495 	int error, mstype, result;
4496 	int from_nodeid = ms->m_header.h_nodeid;
4497 
4498 	error = find_lkb(ls, ms->m_remid, &lkb);
4499 	if (error)
4500 		return error;
4501 
4502 	r = lkb->lkb_resource;
4503 	hold_rsb(r);
4504 	lock_rsb(r);
4505 
4506 	error = validate_message(lkb, ms);
4507 	if (error)
4508 		goto out;
4509 
4510 	mstype = lkb->lkb_wait_type;
4511 	error = remove_from_waiters(lkb, DLM_MSG_REQUEST_REPLY);
4512 	if (error) {
4513 		log_error(ls, "receive_request_reply %x remote %d %x result %d",
4514 			  lkb->lkb_id, from_nodeid, ms->m_lkid, ms->m_result);
4515 		dlm_dump_rsb(r);
4516 		goto out;
4517 	}
4518 
4519 	/* Optimization: the dir node was also the master, so it took our
4520 	   lookup as a request and sent request reply instead of lookup reply */
4521 	if (mstype == DLM_MSG_LOOKUP) {
4522 		r->res_master_nodeid = from_nodeid;
4523 		r->res_nodeid = from_nodeid;
4524 		lkb->lkb_nodeid = from_nodeid;
4525 	}
4526 
4527 	/* this is the value returned from do_request() on the master */
4528 	result = ms->m_result;
4529 
4530 	switch (result) {
4531 	case -EAGAIN:
4532 		/* request would block (be queued) on remote master */
4533 		queue_cast(r, lkb, -EAGAIN);
4534 		confirm_master(r, -EAGAIN);
4535 		unhold_lkb(lkb); /* undoes create_lkb() */
4536 		break;
4537 
4538 	case -EINPROGRESS:
4539 	case 0:
4540 		/* request was queued or granted on remote master */
4541 		receive_flags_reply(lkb, ms);
4542 		lkb->lkb_remid = ms->m_lkid;
4543 		if (is_altmode(lkb))
4544 			munge_altmode(lkb, ms);
4545 		if (result) {
4546 			add_lkb(r, lkb, DLM_LKSTS_WAITING);
4547 			add_timeout(lkb);
4548 		} else {
4549 			grant_lock_pc(r, lkb, ms);
4550 			queue_cast(r, lkb, 0);
4551 		}
4552 		confirm_master(r, result);
4553 		break;
4554 
4555 	case -EBADR:
4556 	case -ENOTBLK:
4557 		/* find_rsb failed to find rsb or rsb wasn't master */
4558 		log_limit(ls, "receive_request_reply %x from %d %d "
4559 			  "master %d dir %d first %x %s", lkb->lkb_id,
4560 			  from_nodeid, result, r->res_master_nodeid,
4561 			  r->res_dir_nodeid, r->res_first_lkid, r->res_name);
4562 
4563 		if (r->res_dir_nodeid != dlm_our_nodeid() &&
4564 		    r->res_master_nodeid != dlm_our_nodeid()) {
4565 			/* cause _request_lock->set_master->send_lookup */
4566 			r->res_master_nodeid = 0;
4567 			r->res_nodeid = -1;
4568 			lkb->lkb_nodeid = -1;
4569 		}
4570 
4571 		if (is_overlap(lkb)) {
4572 			/* we'll ignore error in cancel/unlock reply */
4573 			queue_cast_overlap(r, lkb);
4574 			confirm_master(r, result);
4575 			unhold_lkb(lkb); /* undoes create_lkb() */
4576 		} else {
4577 			_request_lock(r, lkb);
4578 
4579 			if (r->res_master_nodeid == dlm_our_nodeid())
4580 				confirm_master(r, 0);
4581 		}
4582 		break;
4583 
4584 	default:
4585 		log_error(ls, "receive_request_reply %x error %d",
4586 			  lkb->lkb_id, result);
4587 	}
4588 
4589 	if (is_overlap_unlock(lkb) && (result == 0 || result == -EINPROGRESS)) {
4590 		log_debug(ls, "receive_request_reply %x result %d unlock",
4591 			  lkb->lkb_id, result);
4592 		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
4593 		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
4594 		send_unlock(r, lkb);
4595 	} else if (is_overlap_cancel(lkb) && (result == -EINPROGRESS)) {
4596 		log_debug(ls, "receive_request_reply %x cancel", lkb->lkb_id);
4597 		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
4598 		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
4599 		send_cancel(r, lkb);
4600 	} else {
4601 		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
4602 		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
4603 	}
4604  out:
4605 	unlock_rsb(r);
4606 	put_rsb(r);
4607 	dlm_put_lkb(lkb);
4608 	return 0;
4609 }
4610 
__receive_convert_reply(struct dlm_rsb * r,struct dlm_lkb * lkb,struct dlm_message * ms)4611 static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
4612 				    struct dlm_message *ms)
4613 {
4614 	/* this is the value returned from do_convert() on the master */
4615 	switch (ms->m_result) {
4616 	case -EAGAIN:
4617 		/* convert would block (be queued) on remote master */
4618 		queue_cast(r, lkb, -EAGAIN);
4619 		break;
4620 
4621 	case -EDEADLK:
4622 		receive_flags_reply(lkb, ms);
4623 		revert_lock_pc(r, lkb);
4624 		queue_cast(r, lkb, -EDEADLK);
4625 		break;
4626 
4627 	case -EINPROGRESS:
4628 		/* convert was queued on remote master */
4629 		receive_flags_reply(lkb, ms);
4630 		if (is_demoted(lkb))
4631 			munge_demoted(lkb);
4632 		del_lkb(r, lkb);
4633 		add_lkb(r, lkb, DLM_LKSTS_CONVERT);
4634 		add_timeout(lkb);
4635 		break;
4636 
4637 	case 0:
4638 		/* convert was granted on remote master */
4639 		receive_flags_reply(lkb, ms);
4640 		if (is_demoted(lkb))
4641 			munge_demoted(lkb);
4642 		grant_lock_pc(r, lkb, ms);
4643 		queue_cast(r, lkb, 0);
4644 		break;
4645 
4646 	default:
4647 		log_error(r->res_ls, "receive_convert_reply %x remote %d %x %d",
4648 			  lkb->lkb_id, ms->m_header.h_nodeid, ms->m_lkid,
4649 			  ms->m_result);
4650 		dlm_print_rsb(r);
4651 		dlm_print_lkb(lkb);
4652 	}
4653 }
4654 
_receive_convert_reply(struct dlm_lkb * lkb,struct dlm_message * ms)4655 static void _receive_convert_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
4656 {
4657 	struct dlm_rsb *r = lkb->lkb_resource;
4658 	int error;
4659 
4660 	hold_rsb(r);
4661 	lock_rsb(r);
4662 
4663 	error = validate_message(lkb, ms);
4664 	if (error)
4665 		goto out;
4666 
4667 	/* stub reply can happen with waiters_mutex held */
4668 	error = remove_from_waiters_ms(lkb, ms);
4669 	if (error)
4670 		goto out;
4671 
4672 	__receive_convert_reply(r, lkb, ms);
4673  out:
4674 	unlock_rsb(r);
4675 	put_rsb(r);
4676 }
4677 
receive_convert_reply(struct dlm_ls * ls,struct dlm_message * ms)4678 static int receive_convert_reply(struct dlm_ls *ls, struct dlm_message *ms)
4679 {
4680 	struct dlm_lkb *lkb;
4681 	int error;
4682 
4683 	error = find_lkb(ls, ms->m_remid, &lkb);
4684 	if (error)
4685 		return error;
4686 
4687 	_receive_convert_reply(lkb, ms);
4688 	dlm_put_lkb(lkb);
4689 	return 0;
4690 }
4691 
_receive_unlock_reply(struct dlm_lkb * lkb,struct dlm_message * ms)4692 static void _receive_unlock_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
4693 {
4694 	struct dlm_rsb *r = lkb->lkb_resource;
4695 	int error;
4696 
4697 	hold_rsb(r);
4698 	lock_rsb(r);
4699 
4700 	error = validate_message(lkb, ms);
4701 	if (error)
4702 		goto out;
4703 
4704 	/* stub reply can happen with waiters_mutex held */
4705 	error = remove_from_waiters_ms(lkb, ms);
4706 	if (error)
4707 		goto out;
4708 
4709 	/* this is the value returned from do_unlock() on the master */
4710 
4711 	switch (ms->m_result) {
4712 	case -DLM_EUNLOCK:
4713 		receive_flags_reply(lkb, ms);
4714 		remove_lock_pc(r, lkb);
4715 		queue_cast(r, lkb, -DLM_EUNLOCK);
4716 		break;
4717 	case -ENOENT:
4718 		break;
4719 	default:
4720 		log_error(r->res_ls, "receive_unlock_reply %x error %d",
4721 			  lkb->lkb_id, ms->m_result);
4722 	}
4723  out:
4724 	unlock_rsb(r);
4725 	put_rsb(r);
4726 }
4727 
receive_unlock_reply(struct dlm_ls * ls,struct dlm_message * ms)4728 static int receive_unlock_reply(struct dlm_ls *ls, struct dlm_message *ms)
4729 {
4730 	struct dlm_lkb *lkb;
4731 	int error;
4732 
4733 	error = find_lkb(ls, ms->m_remid, &lkb);
4734 	if (error)
4735 		return error;
4736 
4737 	_receive_unlock_reply(lkb, ms);
4738 	dlm_put_lkb(lkb);
4739 	return 0;
4740 }
4741 
_receive_cancel_reply(struct dlm_lkb * lkb,struct dlm_message * ms)4742 static void _receive_cancel_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
4743 {
4744 	struct dlm_rsb *r = lkb->lkb_resource;
4745 	int error;
4746 
4747 	hold_rsb(r);
4748 	lock_rsb(r);
4749 
4750 	error = validate_message(lkb, ms);
4751 	if (error)
4752 		goto out;
4753 
4754 	/* stub reply can happen with waiters_mutex held */
4755 	error = remove_from_waiters_ms(lkb, ms);
4756 	if (error)
4757 		goto out;
4758 
4759 	/* this is the value returned from do_cancel() on the master */
4760 
4761 	switch (ms->m_result) {
4762 	case -DLM_ECANCEL:
4763 		receive_flags_reply(lkb, ms);
4764 		revert_lock_pc(r, lkb);
4765 		queue_cast(r, lkb, -DLM_ECANCEL);
4766 		break;
4767 	case 0:
4768 		break;
4769 	default:
4770 		log_error(r->res_ls, "receive_cancel_reply %x error %d",
4771 			  lkb->lkb_id, ms->m_result);
4772 	}
4773  out:
4774 	unlock_rsb(r);
4775 	put_rsb(r);
4776 }
4777 
receive_cancel_reply(struct dlm_ls * ls,struct dlm_message * ms)4778 static int receive_cancel_reply(struct dlm_ls *ls, struct dlm_message *ms)
4779 {
4780 	struct dlm_lkb *lkb;
4781 	int error;
4782 
4783 	error = find_lkb(ls, ms->m_remid, &lkb);
4784 	if (error)
4785 		return error;
4786 
4787 	_receive_cancel_reply(lkb, ms);
4788 	dlm_put_lkb(lkb);
4789 	return 0;
4790 }
4791 
receive_lookup_reply(struct dlm_ls * ls,struct dlm_message * ms)4792 static void receive_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms)
4793 {
4794 	struct dlm_lkb *lkb;
4795 	struct dlm_rsb *r;
4796 	int error, ret_nodeid;
4797 	int do_lookup_list = 0;
4798 
4799 	error = find_lkb(ls, ms->m_lkid, &lkb);
4800 	if (error) {
4801 		log_error(ls, "receive_lookup_reply no lkid %x", ms->m_lkid);
4802 		return;
4803 	}
4804 
4805 	/* ms->m_result is the value returned by dlm_master_lookup on dir node
4806 	   FIXME: will a non-zero error ever be returned? */
4807 
4808 	r = lkb->lkb_resource;
4809 	hold_rsb(r);
4810 	lock_rsb(r);
4811 
4812 	error = remove_from_waiters(lkb, DLM_MSG_LOOKUP_REPLY);
4813 	if (error)
4814 		goto out;
4815 
4816 	ret_nodeid = ms->m_nodeid;
4817 
4818 	/* We sometimes receive a request from the dir node for this
4819 	   rsb before we've received the dir node's loookup_reply for it.
4820 	   The request from the dir node implies we're the master, so we set
4821 	   ourself as master in receive_request_reply, and verify here that
4822 	   we are indeed the master. */
4823 
4824 	if (r->res_master_nodeid && (r->res_master_nodeid != ret_nodeid)) {
4825 		/* This should never happen */
4826 		log_error(ls, "receive_lookup_reply %x from %d ret %d "
4827 			  "master %d dir %d our %d first %x %s",
4828 			  lkb->lkb_id, ms->m_header.h_nodeid, ret_nodeid,
4829 			  r->res_master_nodeid, r->res_dir_nodeid,
4830 			  dlm_our_nodeid(), r->res_first_lkid, r->res_name);
4831 	}
4832 
4833 	if (ret_nodeid == dlm_our_nodeid()) {
4834 		r->res_master_nodeid = ret_nodeid;
4835 		r->res_nodeid = 0;
4836 		do_lookup_list = 1;
4837 		r->res_first_lkid = 0;
4838 	} else if (ret_nodeid == -1) {
4839 		/* the remote node doesn't believe it's the dir node */
4840 		log_error(ls, "receive_lookup_reply %x from %d bad ret_nodeid",
4841 			  lkb->lkb_id, ms->m_header.h_nodeid);
4842 		r->res_master_nodeid = 0;
4843 		r->res_nodeid = -1;
4844 		lkb->lkb_nodeid = -1;
4845 	} else {
4846 		/* set_master() will set lkb_nodeid from r */
4847 		r->res_master_nodeid = ret_nodeid;
4848 		r->res_nodeid = ret_nodeid;
4849 	}
4850 
4851 	if (is_overlap(lkb)) {
4852 		log_debug(ls, "receive_lookup_reply %x unlock %x",
4853 			  lkb->lkb_id, lkb->lkb_flags);
4854 		queue_cast_overlap(r, lkb);
4855 		unhold_lkb(lkb); /* undoes create_lkb() */
4856 		goto out_list;
4857 	}
4858 
4859 	_request_lock(r, lkb);
4860 
4861  out_list:
4862 	if (do_lookup_list)
4863 		process_lookup_list(r);
4864  out:
4865 	unlock_rsb(r);
4866 	put_rsb(r);
4867 	dlm_put_lkb(lkb);
4868 }
4869 
_receive_message(struct dlm_ls * ls,struct dlm_message * ms,uint32_t saved_seq)4870 static void _receive_message(struct dlm_ls *ls, struct dlm_message *ms,
4871 			     uint32_t saved_seq)
4872 {
4873 	int error = 0, noent = 0;
4874 
4875 	if (!dlm_is_member(ls, ms->m_header.h_nodeid)) {
4876 		log_limit(ls, "receive %d from non-member %d %x %x %d",
4877 			  ms->m_type, ms->m_header.h_nodeid, ms->m_lkid,
4878 			  ms->m_remid, ms->m_result);
4879 		return;
4880 	}
4881 
4882 	switch (ms->m_type) {
4883 
4884 	/* messages sent to a master node */
4885 
4886 	case DLM_MSG_REQUEST:
4887 		error = receive_request(ls, ms);
4888 		break;
4889 
4890 	case DLM_MSG_CONVERT:
4891 		error = receive_convert(ls, ms);
4892 		break;
4893 
4894 	case DLM_MSG_UNLOCK:
4895 		error = receive_unlock(ls, ms);
4896 		break;
4897 
4898 	case DLM_MSG_CANCEL:
4899 		noent = 1;
4900 		error = receive_cancel(ls, ms);
4901 		break;
4902 
4903 	/* messages sent from a master node (replies to above) */
4904 
4905 	case DLM_MSG_REQUEST_REPLY:
4906 		error = receive_request_reply(ls, ms);
4907 		break;
4908 
4909 	case DLM_MSG_CONVERT_REPLY:
4910 		error = receive_convert_reply(ls, ms);
4911 		break;
4912 
4913 	case DLM_MSG_UNLOCK_REPLY:
4914 		error = receive_unlock_reply(ls, ms);
4915 		break;
4916 
4917 	case DLM_MSG_CANCEL_REPLY:
4918 		error = receive_cancel_reply(ls, ms);
4919 		break;
4920 
4921 	/* messages sent from a master node (only two types of async msg) */
4922 
4923 	case DLM_MSG_GRANT:
4924 		noent = 1;
4925 		error = receive_grant(ls, ms);
4926 		break;
4927 
4928 	case DLM_MSG_BAST:
4929 		noent = 1;
4930 		error = receive_bast(ls, ms);
4931 		break;
4932 
4933 	/* messages sent to a dir node */
4934 
4935 	case DLM_MSG_LOOKUP:
4936 		receive_lookup(ls, ms);
4937 		break;
4938 
4939 	case DLM_MSG_REMOVE:
4940 		receive_remove(ls, ms);
4941 		break;
4942 
4943 	/* messages sent from a dir node (remove has no reply) */
4944 
4945 	case DLM_MSG_LOOKUP_REPLY:
4946 		receive_lookup_reply(ls, ms);
4947 		break;
4948 
4949 	/* other messages */
4950 
4951 	case DLM_MSG_PURGE:
4952 		receive_purge(ls, ms);
4953 		break;
4954 
4955 	default:
4956 		log_error(ls, "unknown message type %d", ms->m_type);
4957 	}
4958 
4959 	/*
4960 	 * When checking for ENOENT, we're checking the result of
4961 	 * find_lkb(m_remid):
4962 	 *
4963 	 * The lock id referenced in the message wasn't found.  This may
4964 	 * happen in normal usage for the async messages and cancel, so
4965 	 * only use log_debug for them.
4966 	 *
4967 	 * Some errors are expected and normal.
4968 	 */
4969 
4970 	if (error == -ENOENT && noent) {
4971 		log_debug(ls, "receive %d no %x remote %d %x saved_seq %u",
4972 			  ms->m_type, ms->m_remid, ms->m_header.h_nodeid,
4973 			  ms->m_lkid, saved_seq);
4974 	} else if (error == -ENOENT) {
4975 		log_error(ls, "receive %d no %x remote %d %x saved_seq %u",
4976 			  ms->m_type, ms->m_remid, ms->m_header.h_nodeid,
4977 			  ms->m_lkid, saved_seq);
4978 
4979 		if (ms->m_type == DLM_MSG_CONVERT)
4980 			dlm_dump_rsb_hash(ls, ms->m_hash);
4981 	}
4982 
4983 	if (error == -EINVAL) {
4984 		log_error(ls, "receive %d inval from %d lkid %x remid %x "
4985 			  "saved_seq %u",
4986 			  ms->m_type, ms->m_header.h_nodeid,
4987 			  ms->m_lkid, ms->m_remid, saved_seq);
4988 	}
4989 }
4990 
4991 /* If the lockspace is in recovery mode (locking stopped), then normal
4992    messages are saved on the requestqueue for processing after recovery is
4993    done.  When not in recovery mode, we wait for dlm_recoverd to drain saved
4994    messages off the requestqueue before we process new ones. This occurs right
4995    after recovery completes when we transition from saving all messages on
4996    requestqueue, to processing all the saved messages, to processing new
4997    messages as they arrive. */
4998 
dlm_receive_message(struct dlm_ls * ls,struct dlm_message * ms,int nodeid)4999 static void dlm_receive_message(struct dlm_ls *ls, struct dlm_message *ms,
5000 				int nodeid)
5001 {
5002 	if (dlm_locking_stopped(ls)) {
5003 		/* If we were a member of this lockspace, left, and rejoined,
5004 		   other nodes may still be sending us messages from the
5005 		   lockspace generation before we left. */
5006 		if (!ls->ls_generation) {
5007 			log_limit(ls, "receive %d from %d ignore old gen",
5008 				  ms->m_type, nodeid);
5009 			return;
5010 		}
5011 
5012 		dlm_add_requestqueue(ls, nodeid, ms);
5013 	} else {
5014 		dlm_wait_requestqueue(ls);
5015 		_receive_message(ls, ms, 0);
5016 	}
5017 }
5018 
5019 /* This is called by dlm_recoverd to process messages that were saved on
5020    the requestqueue. */
5021 
dlm_receive_message_saved(struct dlm_ls * ls,struct dlm_message * ms,uint32_t saved_seq)5022 void dlm_receive_message_saved(struct dlm_ls *ls, struct dlm_message *ms,
5023 			       uint32_t saved_seq)
5024 {
5025 	_receive_message(ls, ms, saved_seq);
5026 }
5027 
5028 /* This is called by the midcomms layer when something is received for
5029    the lockspace.  It could be either a MSG (normal message sent as part of
5030    standard locking activity) or an RCOM (recovery message sent as part of
5031    lockspace recovery). */
5032 
dlm_receive_buffer(union dlm_packet * p,int nodeid)5033 void dlm_receive_buffer(union dlm_packet *p, int nodeid)
5034 {
5035 	struct dlm_header *hd = &p->header;
5036 	struct dlm_ls *ls;
5037 	int type = 0;
5038 
5039 	switch (hd->h_cmd) {
5040 	case DLM_MSG:
5041 		dlm_message_in(&p->message);
5042 		type = p->message.m_type;
5043 		break;
5044 	case DLM_RCOM:
5045 		dlm_rcom_in(&p->rcom);
5046 		type = p->rcom.rc_type;
5047 		break;
5048 	default:
5049 		log_print("invalid h_cmd %d from %u", hd->h_cmd, nodeid);
5050 		return;
5051 	}
5052 
5053 	if (hd->h_nodeid != nodeid) {
5054 		log_print("invalid h_nodeid %d from %d lockspace %x",
5055 			  hd->h_nodeid, nodeid, hd->h_lockspace);
5056 		return;
5057 	}
5058 
5059 	ls = dlm_find_lockspace_global(hd->h_lockspace);
5060 	if (!ls) {
5061 		if (dlm_config.ci_log_debug) {
5062 			printk_ratelimited(KERN_DEBUG "dlm: invalid lockspace "
5063 				"%u from %d cmd %d type %d\n",
5064 				hd->h_lockspace, nodeid, hd->h_cmd, type);
5065 		}
5066 
5067 		if (hd->h_cmd == DLM_RCOM && type == DLM_RCOM_STATUS)
5068 			dlm_send_ls_not_ready(nodeid, &p->rcom);
5069 		return;
5070 	}
5071 
5072 	/* this rwsem allows dlm_ls_stop() to wait for all dlm_recv threads to
5073 	   be inactive (in this ls) before transitioning to recovery mode */
5074 
5075 	down_read(&ls->ls_recv_active);
5076 	if (hd->h_cmd == DLM_MSG)
5077 		dlm_receive_message(ls, &p->message, nodeid);
5078 	else
5079 		dlm_receive_rcom(ls, &p->rcom, nodeid);
5080 	up_read(&ls->ls_recv_active);
5081 
5082 	dlm_put_lockspace(ls);
5083 }
5084 
recover_convert_waiter(struct dlm_ls * ls,struct dlm_lkb * lkb,struct dlm_message * ms_stub)5085 static void recover_convert_waiter(struct dlm_ls *ls, struct dlm_lkb *lkb,
5086 				   struct dlm_message *ms_stub)
5087 {
5088 	if (middle_conversion(lkb)) {
5089 		hold_lkb(lkb);
5090 		memset(ms_stub, 0, sizeof(struct dlm_message));
5091 		ms_stub->m_flags = DLM_IFL_STUB_MS;
5092 		ms_stub->m_type = DLM_MSG_CONVERT_REPLY;
5093 		ms_stub->m_result = -EINPROGRESS;
5094 		ms_stub->m_header.h_nodeid = lkb->lkb_nodeid;
5095 		_receive_convert_reply(lkb, ms_stub);
5096 
5097 		/* Same special case as in receive_rcom_lock_args() */
5098 		lkb->lkb_grmode = DLM_LOCK_IV;
5099 		rsb_set_flag(lkb->lkb_resource, RSB_RECOVER_CONVERT);
5100 		unhold_lkb(lkb);
5101 
5102 	} else if (lkb->lkb_rqmode >= lkb->lkb_grmode) {
5103 		lkb->lkb_flags |= DLM_IFL_RESEND;
5104 	}
5105 
5106 	/* lkb->lkb_rqmode < lkb->lkb_grmode shouldn't happen since down
5107 	   conversions are async; there's no reply from the remote master */
5108 }
5109 
5110 /* A waiting lkb needs recovery if the master node has failed, or
5111    the master node is changing (only when no directory is used) */
5112 
waiter_needs_recovery(struct dlm_ls * ls,struct dlm_lkb * lkb,int dir_nodeid)5113 static int waiter_needs_recovery(struct dlm_ls *ls, struct dlm_lkb *lkb,
5114 				 int dir_nodeid)
5115 {
5116 	if (dlm_no_directory(ls))
5117 		return 1;
5118 
5119 	if (dlm_is_removed(ls, lkb->lkb_wait_nodeid))
5120 		return 1;
5121 
5122 	return 0;
5123 }
5124 
5125 /* Recovery for locks that are waiting for replies from nodes that are now
5126    gone.  We can just complete unlocks and cancels by faking a reply from the
5127    dead node.  Requests and up-conversions we flag to be resent after
5128    recovery.  Down-conversions can just be completed with a fake reply like
5129    unlocks.  Conversions between PR and CW need special attention. */
5130 
dlm_recover_waiters_pre(struct dlm_ls * ls)5131 void dlm_recover_waiters_pre(struct dlm_ls *ls)
5132 {
5133 	struct dlm_lkb *lkb, *safe;
5134 	struct dlm_message *ms_stub;
5135 	int wait_type, stub_unlock_result, stub_cancel_result;
5136 	int dir_nodeid;
5137 
5138 	ms_stub = kmalloc(sizeof(*ms_stub), GFP_KERNEL);
5139 	if (!ms_stub)
5140 		return;
5141 
5142 	mutex_lock(&ls->ls_waiters_mutex);
5143 
5144 	list_for_each_entry_safe(lkb, safe, &ls->ls_waiters, lkb_wait_reply) {
5145 
5146 		dir_nodeid = dlm_dir_nodeid(lkb->lkb_resource);
5147 
5148 		/* exclude debug messages about unlocks because there can be so
5149 		   many and they aren't very interesting */
5150 
5151 		if (lkb->lkb_wait_type != DLM_MSG_UNLOCK) {
5152 			log_debug(ls, "waiter %x remote %x msg %d r_nodeid %d "
5153 				  "lkb_nodeid %d wait_nodeid %d dir_nodeid %d",
5154 				  lkb->lkb_id,
5155 				  lkb->lkb_remid,
5156 				  lkb->lkb_wait_type,
5157 				  lkb->lkb_resource->res_nodeid,
5158 				  lkb->lkb_nodeid,
5159 				  lkb->lkb_wait_nodeid,
5160 				  dir_nodeid);
5161 		}
5162 
5163 		/* all outstanding lookups, regardless of destination  will be
5164 		   resent after recovery is done */
5165 
5166 		if (lkb->lkb_wait_type == DLM_MSG_LOOKUP) {
5167 			lkb->lkb_flags |= DLM_IFL_RESEND;
5168 			continue;
5169 		}
5170 
5171 		if (!waiter_needs_recovery(ls, lkb, dir_nodeid))
5172 			continue;
5173 
5174 		wait_type = lkb->lkb_wait_type;
5175 		stub_unlock_result = -DLM_EUNLOCK;
5176 		stub_cancel_result = -DLM_ECANCEL;
5177 
5178 		/* Main reply may have been received leaving a zero wait_type,
5179 		   but a reply for the overlapping op may not have been
5180 		   received.  In that case we need to fake the appropriate
5181 		   reply for the overlap op. */
5182 
5183 		if (!wait_type) {
5184 			if (is_overlap_cancel(lkb)) {
5185 				wait_type = DLM_MSG_CANCEL;
5186 				if (lkb->lkb_grmode == DLM_LOCK_IV)
5187 					stub_cancel_result = 0;
5188 			}
5189 			if (is_overlap_unlock(lkb)) {
5190 				wait_type = DLM_MSG_UNLOCK;
5191 				if (lkb->lkb_grmode == DLM_LOCK_IV)
5192 					stub_unlock_result = -ENOENT;
5193 			}
5194 
5195 			log_debug(ls, "rwpre overlap %x %x %d %d %d",
5196 				  lkb->lkb_id, lkb->lkb_flags, wait_type,
5197 				  stub_cancel_result, stub_unlock_result);
5198 		}
5199 
5200 		switch (wait_type) {
5201 
5202 		case DLM_MSG_REQUEST:
5203 			lkb->lkb_flags |= DLM_IFL_RESEND;
5204 			break;
5205 
5206 		case DLM_MSG_CONVERT:
5207 			recover_convert_waiter(ls, lkb, ms_stub);
5208 			break;
5209 
5210 		case DLM_MSG_UNLOCK:
5211 			hold_lkb(lkb);
5212 			memset(ms_stub, 0, sizeof(struct dlm_message));
5213 			ms_stub->m_flags = DLM_IFL_STUB_MS;
5214 			ms_stub->m_type = DLM_MSG_UNLOCK_REPLY;
5215 			ms_stub->m_result = stub_unlock_result;
5216 			ms_stub->m_header.h_nodeid = lkb->lkb_nodeid;
5217 			_receive_unlock_reply(lkb, ms_stub);
5218 			dlm_put_lkb(lkb);
5219 			break;
5220 
5221 		case DLM_MSG_CANCEL:
5222 			hold_lkb(lkb);
5223 			memset(ms_stub, 0, sizeof(struct dlm_message));
5224 			ms_stub->m_flags = DLM_IFL_STUB_MS;
5225 			ms_stub->m_type = DLM_MSG_CANCEL_REPLY;
5226 			ms_stub->m_result = stub_cancel_result;
5227 			ms_stub->m_header.h_nodeid = lkb->lkb_nodeid;
5228 			_receive_cancel_reply(lkb, ms_stub);
5229 			dlm_put_lkb(lkb);
5230 			break;
5231 
5232 		default:
5233 			log_error(ls, "invalid lkb wait_type %d %d",
5234 				  lkb->lkb_wait_type, wait_type);
5235 		}
5236 		schedule();
5237 	}
5238 	mutex_unlock(&ls->ls_waiters_mutex);
5239 	kfree(ms_stub);
5240 }
5241 
find_resend_waiter(struct dlm_ls * ls)5242 static struct dlm_lkb *find_resend_waiter(struct dlm_ls *ls)
5243 {
5244 	struct dlm_lkb *lkb;
5245 	int found = 0;
5246 
5247 	mutex_lock(&ls->ls_waiters_mutex);
5248 	list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
5249 		if (lkb->lkb_flags & DLM_IFL_RESEND) {
5250 			hold_lkb(lkb);
5251 			found = 1;
5252 			break;
5253 		}
5254 	}
5255 	mutex_unlock(&ls->ls_waiters_mutex);
5256 
5257 	if (!found)
5258 		lkb = NULL;
5259 	return lkb;
5260 }
5261 
5262 /* Deal with lookups and lkb's marked RESEND from _pre.  We may now be the
5263    master or dir-node for r.  Processing the lkb may result in it being placed
5264    back on waiters. */
5265 
5266 /* We do this after normal locking has been enabled and any saved messages
5267    (in requestqueue) have been processed.  We should be confident that at
5268    this point we won't get or process a reply to any of these waiting
5269    operations.  But, new ops may be coming in on the rsbs/locks here from
5270    userspace or remotely. */
5271 
5272 /* there may have been an overlap unlock/cancel prior to recovery or after
5273    recovery.  if before, the lkb may still have a pos wait_count; if after, the
5274    overlap flag would just have been set and nothing new sent.  we can be
5275    confident here than any replies to either the initial op or overlap ops
5276    prior to recovery have been received. */
5277 
dlm_recover_waiters_post(struct dlm_ls * ls)5278 int dlm_recover_waiters_post(struct dlm_ls *ls)
5279 {
5280 	struct dlm_lkb *lkb;
5281 	struct dlm_rsb *r;
5282 	int error = 0, mstype, err, oc, ou;
5283 
5284 	while (1) {
5285 		if (dlm_locking_stopped(ls)) {
5286 			log_debug(ls, "recover_waiters_post aborted");
5287 			error = -EINTR;
5288 			break;
5289 		}
5290 
5291 		lkb = find_resend_waiter(ls);
5292 		if (!lkb)
5293 			break;
5294 
5295 		r = lkb->lkb_resource;
5296 		hold_rsb(r);
5297 		lock_rsb(r);
5298 
5299 		mstype = lkb->lkb_wait_type;
5300 		oc = is_overlap_cancel(lkb);
5301 		ou = is_overlap_unlock(lkb);
5302 		err = 0;
5303 
5304 		log_debug(ls, "waiter %x remote %x msg %d r_nodeid %d "
5305 			  "lkb_nodeid %d wait_nodeid %d dir_nodeid %d "
5306 			  "overlap %d %d", lkb->lkb_id, lkb->lkb_remid, mstype,
5307 			  r->res_nodeid, lkb->lkb_nodeid, lkb->lkb_wait_nodeid,
5308 			  dlm_dir_nodeid(r), oc, ou);
5309 
5310 		/* At this point we assume that we won't get a reply to any
5311 		   previous op or overlap op on this lock.  First, do a big
5312 		   remove_from_waiters() for all previous ops. */
5313 
5314 		lkb->lkb_flags &= ~DLM_IFL_RESEND;
5315 		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
5316 		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
5317 		lkb->lkb_wait_type = 0;
5318 		/* drop all wait_count references we still
5319 		 * hold a reference for this iteration.
5320 		 */
5321 		while (lkb->lkb_wait_count) {
5322 			lkb->lkb_wait_count--;
5323 			unhold_lkb(lkb);
5324 		}
5325 		mutex_lock(&ls->ls_waiters_mutex);
5326 		list_del_init(&lkb->lkb_wait_reply);
5327 		mutex_unlock(&ls->ls_waiters_mutex);
5328 
5329 		if (oc || ou) {
5330 			/* do an unlock or cancel instead of resending */
5331 			switch (mstype) {
5332 			case DLM_MSG_LOOKUP:
5333 			case DLM_MSG_REQUEST:
5334 				queue_cast(r, lkb, ou ? -DLM_EUNLOCK :
5335 							-DLM_ECANCEL);
5336 				unhold_lkb(lkb); /* undoes create_lkb() */
5337 				break;
5338 			case DLM_MSG_CONVERT:
5339 				if (oc) {
5340 					queue_cast(r, lkb, -DLM_ECANCEL);
5341 				} else {
5342 					lkb->lkb_exflags |= DLM_LKF_FORCEUNLOCK;
5343 					_unlock_lock(r, lkb);
5344 				}
5345 				break;
5346 			default:
5347 				err = 1;
5348 			}
5349 		} else {
5350 			switch (mstype) {
5351 			case DLM_MSG_LOOKUP:
5352 			case DLM_MSG_REQUEST:
5353 				_request_lock(r, lkb);
5354 				if (is_master(r))
5355 					confirm_master(r, 0);
5356 				break;
5357 			case DLM_MSG_CONVERT:
5358 				_convert_lock(r, lkb);
5359 				break;
5360 			default:
5361 				err = 1;
5362 			}
5363 		}
5364 
5365 		if (err) {
5366 			log_error(ls, "waiter %x msg %d r_nodeid %d "
5367 				  "dir_nodeid %d overlap %d %d",
5368 				  lkb->lkb_id, mstype, r->res_nodeid,
5369 				  dlm_dir_nodeid(r), oc, ou);
5370 		}
5371 		unlock_rsb(r);
5372 		put_rsb(r);
5373 		dlm_put_lkb(lkb);
5374 	}
5375 
5376 	return error;
5377 }
5378 
purge_mstcpy_list(struct dlm_ls * ls,struct dlm_rsb * r,struct list_head * list)5379 static void purge_mstcpy_list(struct dlm_ls *ls, struct dlm_rsb *r,
5380 			      struct list_head *list)
5381 {
5382 	struct dlm_lkb *lkb, *safe;
5383 
5384 	list_for_each_entry_safe(lkb, safe, list, lkb_statequeue) {
5385 		if (!is_master_copy(lkb))
5386 			continue;
5387 
5388 		/* don't purge lkbs we've added in recover_master_copy for
5389 		   the current recovery seq */
5390 
5391 		if (lkb->lkb_recover_seq == ls->ls_recover_seq)
5392 			continue;
5393 
5394 		del_lkb(r, lkb);
5395 
5396 		/* this put should free the lkb */
5397 		if (!dlm_put_lkb(lkb))
5398 			log_error(ls, "purged mstcpy lkb not released");
5399 	}
5400 }
5401 
dlm_purge_mstcpy_locks(struct dlm_rsb * r)5402 void dlm_purge_mstcpy_locks(struct dlm_rsb *r)
5403 {
5404 	struct dlm_ls *ls = r->res_ls;
5405 
5406 	purge_mstcpy_list(ls, r, &r->res_grantqueue);
5407 	purge_mstcpy_list(ls, r, &r->res_convertqueue);
5408 	purge_mstcpy_list(ls, r, &r->res_waitqueue);
5409 }
5410 
purge_dead_list(struct dlm_ls * ls,struct dlm_rsb * r,struct list_head * list,int nodeid_gone,unsigned int * count)5411 static void purge_dead_list(struct dlm_ls *ls, struct dlm_rsb *r,
5412 			    struct list_head *list,
5413 			    int nodeid_gone, unsigned int *count)
5414 {
5415 	struct dlm_lkb *lkb, *safe;
5416 
5417 	list_for_each_entry_safe(lkb, safe, list, lkb_statequeue) {
5418 		if (!is_master_copy(lkb))
5419 			continue;
5420 
5421 		if ((lkb->lkb_nodeid == nodeid_gone) ||
5422 		    dlm_is_removed(ls, lkb->lkb_nodeid)) {
5423 
5424 			/* tell recover_lvb to invalidate the lvb
5425 			   because a node holding EX/PW failed */
5426 			if ((lkb->lkb_exflags & DLM_LKF_VALBLK) &&
5427 			    (lkb->lkb_grmode >= DLM_LOCK_PW)) {
5428 				rsb_set_flag(r, RSB_RECOVER_LVB_INVAL);
5429 			}
5430 
5431 			del_lkb(r, lkb);
5432 
5433 			/* this put should free the lkb */
5434 			if (!dlm_put_lkb(lkb))
5435 				log_error(ls, "purged dead lkb not released");
5436 
5437 			rsb_set_flag(r, RSB_RECOVER_GRANT);
5438 
5439 			(*count)++;
5440 		}
5441 	}
5442 }
5443 
5444 /* Get rid of locks held by nodes that are gone. */
5445 
dlm_recover_purge(struct dlm_ls * ls)5446 void dlm_recover_purge(struct dlm_ls *ls)
5447 {
5448 	struct dlm_rsb *r;
5449 	struct dlm_member *memb;
5450 	int nodes_count = 0;
5451 	int nodeid_gone = 0;
5452 	unsigned int lkb_count = 0;
5453 
5454 	/* cache one removed nodeid to optimize the common
5455 	   case of a single node removed */
5456 
5457 	list_for_each_entry(memb, &ls->ls_nodes_gone, list) {
5458 		nodes_count++;
5459 		nodeid_gone = memb->nodeid;
5460 	}
5461 
5462 	if (!nodes_count)
5463 		return;
5464 
5465 	down_write(&ls->ls_root_sem);
5466 	list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
5467 		hold_rsb(r);
5468 		lock_rsb(r);
5469 		if (is_master(r)) {
5470 			purge_dead_list(ls, r, &r->res_grantqueue,
5471 					nodeid_gone, &lkb_count);
5472 			purge_dead_list(ls, r, &r->res_convertqueue,
5473 					nodeid_gone, &lkb_count);
5474 			purge_dead_list(ls, r, &r->res_waitqueue,
5475 					nodeid_gone, &lkb_count);
5476 		}
5477 		unlock_rsb(r);
5478 		unhold_rsb(r);
5479 		cond_resched();
5480 	}
5481 	up_write(&ls->ls_root_sem);
5482 
5483 	if (lkb_count)
5484 		log_rinfo(ls, "dlm_recover_purge %u locks for %u nodes",
5485 			  lkb_count, nodes_count);
5486 }
5487 
find_grant_rsb(struct dlm_ls * ls,int bucket)5488 static struct dlm_rsb *find_grant_rsb(struct dlm_ls *ls, int bucket)
5489 {
5490 	struct rb_node *n;
5491 	struct dlm_rsb *r;
5492 
5493 	spin_lock(&ls->ls_rsbtbl[bucket].lock);
5494 	for (n = rb_first(&ls->ls_rsbtbl[bucket].keep); n; n = rb_next(n)) {
5495 		r = rb_entry(n, struct dlm_rsb, res_hashnode);
5496 
5497 		if (!rsb_flag(r, RSB_RECOVER_GRANT))
5498 			continue;
5499 		if (!is_master(r)) {
5500 			rsb_clear_flag(r, RSB_RECOVER_GRANT);
5501 			continue;
5502 		}
5503 		hold_rsb(r);
5504 		spin_unlock(&ls->ls_rsbtbl[bucket].lock);
5505 		return r;
5506 	}
5507 	spin_unlock(&ls->ls_rsbtbl[bucket].lock);
5508 	return NULL;
5509 }
5510 
5511 /*
5512  * Attempt to grant locks on resources that we are the master of.
5513  * Locks may have become grantable during recovery because locks
5514  * from departed nodes have been purged (or not rebuilt), allowing
5515  * previously blocked locks to now be granted.  The subset of rsb's
5516  * we are interested in are those with lkb's on either the convert or
5517  * waiting queues.
5518  *
5519  * Simplest would be to go through each master rsb and check for non-empty
5520  * convert or waiting queues, and attempt to grant on those rsbs.
5521  * Checking the queues requires lock_rsb, though, for which we'd need
5522  * to release the rsbtbl lock.  This would make iterating through all
5523  * rsb's very inefficient.  So, we rely on earlier recovery routines
5524  * to set RECOVER_GRANT on any rsb's that we should attempt to grant
5525  * locks for.
5526  */
5527 
dlm_recover_grant(struct dlm_ls * ls)5528 void dlm_recover_grant(struct dlm_ls *ls)
5529 {
5530 	struct dlm_rsb *r;
5531 	int bucket = 0;
5532 	unsigned int count = 0;
5533 	unsigned int rsb_count = 0;
5534 	unsigned int lkb_count = 0;
5535 
5536 	while (1) {
5537 		r = find_grant_rsb(ls, bucket);
5538 		if (!r) {
5539 			if (bucket == ls->ls_rsbtbl_size - 1)
5540 				break;
5541 			bucket++;
5542 			continue;
5543 		}
5544 		rsb_count++;
5545 		count = 0;
5546 		lock_rsb(r);
5547 		/* the RECOVER_GRANT flag is checked in the grant path */
5548 		grant_pending_locks(r, &count);
5549 		rsb_clear_flag(r, RSB_RECOVER_GRANT);
5550 		lkb_count += count;
5551 		confirm_master(r, 0);
5552 		unlock_rsb(r);
5553 		put_rsb(r);
5554 		cond_resched();
5555 	}
5556 
5557 	if (lkb_count)
5558 		log_rinfo(ls, "dlm_recover_grant %u locks on %u resources",
5559 			  lkb_count, rsb_count);
5560 }
5561 
search_remid_list(struct list_head * head,int nodeid,uint32_t remid)5562 static struct dlm_lkb *search_remid_list(struct list_head *head, int nodeid,
5563 					 uint32_t remid)
5564 {
5565 	struct dlm_lkb *lkb;
5566 
5567 	list_for_each_entry(lkb, head, lkb_statequeue) {
5568 		if (lkb->lkb_nodeid == nodeid && lkb->lkb_remid == remid)
5569 			return lkb;
5570 	}
5571 	return NULL;
5572 }
5573 
search_remid(struct dlm_rsb * r,int nodeid,uint32_t remid)5574 static struct dlm_lkb *search_remid(struct dlm_rsb *r, int nodeid,
5575 				    uint32_t remid)
5576 {
5577 	struct dlm_lkb *lkb;
5578 
5579 	lkb = search_remid_list(&r->res_grantqueue, nodeid, remid);
5580 	if (lkb)
5581 		return lkb;
5582 	lkb = search_remid_list(&r->res_convertqueue, nodeid, remid);
5583 	if (lkb)
5584 		return lkb;
5585 	lkb = search_remid_list(&r->res_waitqueue, nodeid, remid);
5586 	if (lkb)
5587 		return lkb;
5588 	return NULL;
5589 }
5590 
5591 /* needs at least dlm_rcom + rcom_lock */
receive_rcom_lock_args(struct dlm_ls * ls,struct dlm_lkb * lkb,struct dlm_rsb * r,struct dlm_rcom * rc)5592 static int receive_rcom_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
5593 				  struct dlm_rsb *r, struct dlm_rcom *rc)
5594 {
5595 	struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
5596 
5597 	lkb->lkb_nodeid = rc->rc_header.h_nodeid;
5598 	lkb->lkb_ownpid = le32_to_cpu(rl->rl_ownpid);
5599 	lkb->lkb_remid = le32_to_cpu(rl->rl_lkid);
5600 	lkb->lkb_exflags = le32_to_cpu(rl->rl_exflags);
5601 	lkb->lkb_flags = le32_to_cpu(rl->rl_flags) & 0x0000FFFF;
5602 	lkb->lkb_flags |= DLM_IFL_MSTCPY;
5603 	lkb->lkb_lvbseq = le32_to_cpu(rl->rl_lvbseq);
5604 	lkb->lkb_rqmode = rl->rl_rqmode;
5605 	lkb->lkb_grmode = rl->rl_grmode;
5606 	/* don't set lkb_status because add_lkb wants to itself */
5607 
5608 	lkb->lkb_bastfn = (rl->rl_asts & DLM_CB_BAST) ? &fake_bastfn : NULL;
5609 	lkb->lkb_astfn = (rl->rl_asts & DLM_CB_CAST) ? &fake_astfn : NULL;
5610 
5611 	if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
5612 		int lvblen = rc->rc_header.h_length - sizeof(struct dlm_rcom) -
5613 			 sizeof(struct rcom_lock);
5614 		if (lvblen > ls->ls_lvblen)
5615 			return -EINVAL;
5616 		lkb->lkb_lvbptr = dlm_allocate_lvb(ls);
5617 		if (!lkb->lkb_lvbptr)
5618 			return -ENOMEM;
5619 		memcpy(lkb->lkb_lvbptr, rl->rl_lvb, lvblen);
5620 	}
5621 
5622 	/* Conversions between PR and CW (middle modes) need special handling.
5623 	   The real granted mode of these converting locks cannot be determined
5624 	   until all locks have been rebuilt on the rsb (recover_conversion) */
5625 
5626 	if (rl->rl_wait_type == cpu_to_le16(DLM_MSG_CONVERT) &&
5627 	    middle_conversion(lkb)) {
5628 		rl->rl_status = DLM_LKSTS_CONVERT;
5629 		lkb->lkb_grmode = DLM_LOCK_IV;
5630 		rsb_set_flag(r, RSB_RECOVER_CONVERT);
5631 	}
5632 
5633 	return 0;
5634 }
5635 
5636 /* This lkb may have been recovered in a previous aborted recovery so we need
5637    to check if the rsb already has an lkb with the given remote nodeid/lkid.
5638    If so we just send back a standard reply.  If not, we create a new lkb with
5639    the given values and send back our lkid.  We send back our lkid by sending
5640    back the rcom_lock struct we got but with the remid field filled in. */
5641 
5642 /* needs at least dlm_rcom + rcom_lock */
dlm_recover_master_copy(struct dlm_ls * ls,struct dlm_rcom * rc)5643 int dlm_recover_master_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
5644 {
5645 	struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
5646 	struct dlm_rsb *r;
5647 	struct dlm_lkb *lkb;
5648 	uint32_t remid = 0;
5649 	int from_nodeid = rc->rc_header.h_nodeid;
5650 	int error;
5651 
5652 	if (rl->rl_parent_lkid) {
5653 		error = -EOPNOTSUPP;
5654 		goto out;
5655 	}
5656 
5657 	remid = le32_to_cpu(rl->rl_lkid);
5658 
5659 	/* In general we expect the rsb returned to be R_MASTER, but we don't
5660 	   have to require it.  Recovery of masters on one node can overlap
5661 	   recovery of locks on another node, so one node can send us MSTCPY
5662 	   locks before we've made ourselves master of this rsb.  We can still
5663 	   add new MSTCPY locks that we receive here without any harm; when
5664 	   we make ourselves master, dlm_recover_masters() won't touch the
5665 	   MSTCPY locks we've received early. */
5666 
5667 	error = find_rsb(ls, rl->rl_name, le16_to_cpu(rl->rl_namelen),
5668 			 from_nodeid, R_RECEIVE_RECOVER, &r);
5669 	if (error)
5670 		goto out;
5671 
5672 	lock_rsb(r);
5673 
5674 	if (dlm_no_directory(ls) && (dlm_dir_nodeid(r) != dlm_our_nodeid())) {
5675 		log_error(ls, "dlm_recover_master_copy remote %d %x not dir",
5676 			  from_nodeid, remid);
5677 		error = -EBADR;
5678 		goto out_unlock;
5679 	}
5680 
5681 	lkb = search_remid(r, from_nodeid, remid);
5682 	if (lkb) {
5683 		error = -EEXIST;
5684 		goto out_remid;
5685 	}
5686 
5687 	error = create_lkb(ls, &lkb);
5688 	if (error)
5689 		goto out_unlock;
5690 
5691 	error = receive_rcom_lock_args(ls, lkb, r, rc);
5692 	if (error) {
5693 		__put_lkb(ls, lkb);
5694 		goto out_unlock;
5695 	}
5696 
5697 	attach_lkb(r, lkb);
5698 	add_lkb(r, lkb, rl->rl_status);
5699 	error = 0;
5700 	ls->ls_recover_locks_in++;
5701 
5702 	if (!list_empty(&r->res_waitqueue) || !list_empty(&r->res_convertqueue))
5703 		rsb_set_flag(r, RSB_RECOVER_GRANT);
5704 
5705  out_remid:
5706 	/* this is the new value returned to the lock holder for
5707 	   saving in its process-copy lkb */
5708 	rl->rl_remid = cpu_to_le32(lkb->lkb_id);
5709 
5710 	lkb->lkb_recover_seq = ls->ls_recover_seq;
5711 
5712  out_unlock:
5713 	unlock_rsb(r);
5714 	put_rsb(r);
5715  out:
5716 	if (error && error != -EEXIST)
5717 		log_rinfo(ls, "dlm_recover_master_copy remote %d %x error %d",
5718 			  from_nodeid, remid, error);
5719 	rl->rl_result = cpu_to_le32(error);
5720 	return error;
5721 }
5722 
5723 /* needs at least dlm_rcom + rcom_lock */
dlm_recover_process_copy(struct dlm_ls * ls,struct dlm_rcom * rc)5724 int dlm_recover_process_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
5725 {
5726 	struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
5727 	struct dlm_rsb *r;
5728 	struct dlm_lkb *lkb;
5729 	uint32_t lkid, remid;
5730 	int error, result;
5731 
5732 	lkid = le32_to_cpu(rl->rl_lkid);
5733 	remid = le32_to_cpu(rl->rl_remid);
5734 	result = le32_to_cpu(rl->rl_result);
5735 
5736 	error = find_lkb(ls, lkid, &lkb);
5737 	if (error) {
5738 		log_error(ls, "dlm_recover_process_copy no %x remote %d %x %d",
5739 			  lkid, rc->rc_header.h_nodeid, remid, result);
5740 		return error;
5741 	}
5742 
5743 	r = lkb->lkb_resource;
5744 	hold_rsb(r);
5745 	lock_rsb(r);
5746 
5747 	if (!is_process_copy(lkb)) {
5748 		log_error(ls, "dlm_recover_process_copy bad %x remote %d %x %d",
5749 			  lkid, rc->rc_header.h_nodeid, remid, result);
5750 		dlm_dump_rsb(r);
5751 		unlock_rsb(r);
5752 		put_rsb(r);
5753 		dlm_put_lkb(lkb);
5754 		return -EINVAL;
5755 	}
5756 
5757 	switch (result) {
5758 	case -EBADR:
5759 		/* There's a chance the new master received our lock before
5760 		   dlm_recover_master_reply(), this wouldn't happen if we did
5761 		   a barrier between recover_masters and recover_locks. */
5762 
5763 		log_debug(ls, "dlm_recover_process_copy %x remote %d %x %d",
5764 			  lkid, rc->rc_header.h_nodeid, remid, result);
5765 
5766 		dlm_send_rcom_lock(r, lkb);
5767 		goto out;
5768 	case -EEXIST:
5769 	case 0:
5770 		lkb->lkb_remid = remid;
5771 		break;
5772 	default:
5773 		log_error(ls, "dlm_recover_process_copy %x remote %d %x %d unk",
5774 			  lkid, rc->rc_header.h_nodeid, remid, result);
5775 	}
5776 
5777 	/* an ack for dlm_recover_locks() which waits for replies from
5778 	   all the locks it sends to new masters */
5779 	dlm_recovered_lock(r);
5780  out:
5781 	unlock_rsb(r);
5782 	put_rsb(r);
5783 	dlm_put_lkb(lkb);
5784 
5785 	return 0;
5786 }
5787 
dlm_user_request(struct dlm_ls * ls,struct dlm_user_args * ua,int mode,uint32_t flags,void * name,unsigned int namelen,unsigned long timeout_cs)5788 int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua,
5789 		     int mode, uint32_t flags, void *name, unsigned int namelen,
5790 		     unsigned long timeout_cs)
5791 {
5792 	struct dlm_lkb *lkb;
5793 	struct dlm_args args;
5794 	int error;
5795 
5796 	dlm_lock_recovery(ls);
5797 
5798 	error = create_lkb(ls, &lkb);
5799 	if (error) {
5800 		kfree(ua);
5801 		goto out;
5802 	}
5803 
5804 	if (flags & DLM_LKF_VALBLK) {
5805 		ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_NOFS);
5806 		if (!ua->lksb.sb_lvbptr) {
5807 			kfree(ua);
5808 			__put_lkb(ls, lkb);
5809 			error = -ENOMEM;
5810 			goto out;
5811 		}
5812 	}
5813 	error = set_lock_args(mode, &ua->lksb, flags, namelen, timeout_cs,
5814 			      fake_astfn, ua, fake_bastfn, &args);
5815 	if (error) {
5816 		kfree(ua->lksb.sb_lvbptr);
5817 		ua->lksb.sb_lvbptr = NULL;
5818 		kfree(ua);
5819 		__put_lkb(ls, lkb);
5820 		goto out;
5821 	}
5822 
5823 	/* After ua is attached to lkb it will be freed by dlm_free_lkb().
5824 	   When DLM_IFL_USER is set, the dlm knows that this is a userspace
5825 	   lock and that lkb_astparam is the dlm_user_args structure. */
5826 	lkb->lkb_flags |= DLM_IFL_USER;
5827 	error = request_lock(ls, lkb, name, namelen, &args);
5828 
5829 	switch (error) {
5830 	case 0:
5831 		break;
5832 	case -EINPROGRESS:
5833 		error = 0;
5834 		break;
5835 	case -EAGAIN:
5836 		error = 0;
5837 		fallthrough;
5838 	default:
5839 		__put_lkb(ls, lkb);
5840 		goto out;
5841 	}
5842 
5843 	/* add this new lkb to the per-process list of locks */
5844 	spin_lock(&ua->proc->locks_spin);
5845 	hold_lkb(lkb);
5846 	list_add_tail(&lkb->lkb_ownqueue, &ua->proc->locks);
5847 	spin_unlock(&ua->proc->locks_spin);
5848  out:
5849 	dlm_unlock_recovery(ls);
5850 	return error;
5851 }
5852 
dlm_user_convert(struct dlm_ls * ls,struct dlm_user_args * ua_tmp,int mode,uint32_t flags,uint32_t lkid,char * lvb_in,unsigned long timeout_cs)5853 int dlm_user_convert(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
5854 		     int mode, uint32_t flags, uint32_t lkid, char *lvb_in,
5855 		     unsigned long timeout_cs)
5856 {
5857 	struct dlm_lkb *lkb;
5858 	struct dlm_args args;
5859 	struct dlm_user_args *ua;
5860 	int error;
5861 
5862 	dlm_lock_recovery(ls);
5863 
5864 	error = find_lkb(ls, lkid, &lkb);
5865 	if (error)
5866 		goto out;
5867 
5868 	/* user can change the params on its lock when it converts it, or
5869 	   add an lvb that didn't exist before */
5870 
5871 	ua = lkb->lkb_ua;
5872 
5873 	if (flags & DLM_LKF_VALBLK && !ua->lksb.sb_lvbptr) {
5874 		ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_NOFS);
5875 		if (!ua->lksb.sb_lvbptr) {
5876 			error = -ENOMEM;
5877 			goto out_put;
5878 		}
5879 	}
5880 	if (lvb_in && ua->lksb.sb_lvbptr)
5881 		memcpy(ua->lksb.sb_lvbptr, lvb_in, DLM_USER_LVB_LEN);
5882 
5883 	ua->xid = ua_tmp->xid;
5884 	ua->castparam = ua_tmp->castparam;
5885 	ua->castaddr = ua_tmp->castaddr;
5886 	ua->bastparam = ua_tmp->bastparam;
5887 	ua->bastaddr = ua_tmp->bastaddr;
5888 	ua->user_lksb = ua_tmp->user_lksb;
5889 
5890 	error = set_lock_args(mode, &ua->lksb, flags, 0, timeout_cs,
5891 			      fake_astfn, ua, fake_bastfn, &args);
5892 	if (error)
5893 		goto out_put;
5894 
5895 	error = convert_lock(ls, lkb, &args);
5896 
5897 	if (error == -EINPROGRESS || error == -EAGAIN || error == -EDEADLK)
5898 		error = 0;
5899  out_put:
5900 	dlm_put_lkb(lkb);
5901  out:
5902 	dlm_unlock_recovery(ls);
5903 	kfree(ua_tmp);
5904 	return error;
5905 }
5906 
5907 /*
5908  * The caller asks for an orphan lock on a given resource with a given mode.
5909  * If a matching lock exists, it's moved to the owner's list of locks and
5910  * the lkid is returned.
5911  */
5912 
dlm_user_adopt_orphan(struct dlm_ls * ls,struct dlm_user_args * ua_tmp,int mode,uint32_t flags,void * name,unsigned int namelen,unsigned long timeout_cs,uint32_t * lkid)5913 int dlm_user_adopt_orphan(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
5914 		     int mode, uint32_t flags, void *name, unsigned int namelen,
5915 		     unsigned long timeout_cs, uint32_t *lkid)
5916 {
5917 	struct dlm_lkb *lkb;
5918 	struct dlm_user_args *ua;
5919 	int found_other_mode = 0;
5920 	int found = 0;
5921 	int rv = 0;
5922 
5923 	mutex_lock(&ls->ls_orphans_mutex);
5924 	list_for_each_entry(lkb, &ls->ls_orphans, lkb_ownqueue) {
5925 		if (lkb->lkb_resource->res_length != namelen)
5926 			continue;
5927 		if (memcmp(lkb->lkb_resource->res_name, name, namelen))
5928 			continue;
5929 		if (lkb->lkb_grmode != mode) {
5930 			found_other_mode = 1;
5931 			continue;
5932 		}
5933 
5934 		found = 1;
5935 		list_del_init(&lkb->lkb_ownqueue);
5936 		lkb->lkb_flags &= ~DLM_IFL_ORPHAN;
5937 		*lkid = lkb->lkb_id;
5938 		break;
5939 	}
5940 	mutex_unlock(&ls->ls_orphans_mutex);
5941 
5942 	if (!found && found_other_mode) {
5943 		rv = -EAGAIN;
5944 		goto out;
5945 	}
5946 
5947 	if (!found) {
5948 		rv = -ENOENT;
5949 		goto out;
5950 	}
5951 
5952 	lkb->lkb_exflags = flags;
5953 	lkb->lkb_ownpid = (int) current->pid;
5954 
5955 	ua = lkb->lkb_ua;
5956 
5957 	ua->proc = ua_tmp->proc;
5958 	ua->xid = ua_tmp->xid;
5959 	ua->castparam = ua_tmp->castparam;
5960 	ua->castaddr = ua_tmp->castaddr;
5961 	ua->bastparam = ua_tmp->bastparam;
5962 	ua->bastaddr = ua_tmp->bastaddr;
5963 	ua->user_lksb = ua_tmp->user_lksb;
5964 
5965 	/*
5966 	 * The lkb reference from the ls_orphans list was not
5967 	 * removed above, and is now considered the reference
5968 	 * for the proc locks list.
5969 	 */
5970 
5971 	spin_lock(&ua->proc->locks_spin);
5972 	list_add_tail(&lkb->lkb_ownqueue, &ua->proc->locks);
5973 	spin_unlock(&ua->proc->locks_spin);
5974  out:
5975 	kfree(ua_tmp);
5976 	return rv;
5977 }
5978 
dlm_user_unlock(struct dlm_ls * ls,struct dlm_user_args * ua_tmp,uint32_t flags,uint32_t lkid,char * lvb_in)5979 int dlm_user_unlock(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
5980 		    uint32_t flags, uint32_t lkid, char *lvb_in)
5981 {
5982 	struct dlm_lkb *lkb;
5983 	struct dlm_args args;
5984 	struct dlm_user_args *ua;
5985 	int error;
5986 
5987 	dlm_lock_recovery(ls);
5988 
5989 	error = find_lkb(ls, lkid, &lkb);
5990 	if (error)
5991 		goto out;
5992 
5993 	ua = lkb->lkb_ua;
5994 
5995 	if (lvb_in && ua->lksb.sb_lvbptr)
5996 		memcpy(ua->lksb.sb_lvbptr, lvb_in, DLM_USER_LVB_LEN);
5997 	if (ua_tmp->castparam)
5998 		ua->castparam = ua_tmp->castparam;
5999 	ua->user_lksb = ua_tmp->user_lksb;
6000 
6001 	error = set_unlock_args(flags, ua, &args);
6002 	if (error)
6003 		goto out_put;
6004 
6005 	error = unlock_lock(ls, lkb, &args);
6006 
6007 	if (error == -DLM_EUNLOCK)
6008 		error = 0;
6009 	/* from validate_unlock_args() */
6010 	if (error == -EBUSY && (flags & DLM_LKF_FORCEUNLOCK))
6011 		error = 0;
6012 	if (error)
6013 		goto out_put;
6014 
6015 	spin_lock(&ua->proc->locks_spin);
6016 	/* dlm_user_add_cb() may have already taken lkb off the proc list */
6017 	if (!list_empty(&lkb->lkb_ownqueue))
6018 		list_move(&lkb->lkb_ownqueue, &ua->proc->unlocking);
6019 	spin_unlock(&ua->proc->locks_spin);
6020  out_put:
6021 	dlm_put_lkb(lkb);
6022  out:
6023 	dlm_unlock_recovery(ls);
6024 	kfree(ua_tmp);
6025 	return error;
6026 }
6027 
dlm_user_cancel(struct dlm_ls * ls,struct dlm_user_args * ua_tmp,uint32_t flags,uint32_t lkid)6028 int dlm_user_cancel(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
6029 		    uint32_t flags, uint32_t lkid)
6030 {
6031 	struct dlm_lkb *lkb;
6032 	struct dlm_args args;
6033 	struct dlm_user_args *ua;
6034 	int error;
6035 
6036 	dlm_lock_recovery(ls);
6037 
6038 	error = find_lkb(ls, lkid, &lkb);
6039 	if (error)
6040 		goto out;
6041 
6042 	ua = lkb->lkb_ua;
6043 	if (ua_tmp->castparam)
6044 		ua->castparam = ua_tmp->castparam;
6045 	ua->user_lksb = ua_tmp->user_lksb;
6046 
6047 	error = set_unlock_args(flags, ua, &args);
6048 	if (error)
6049 		goto out_put;
6050 
6051 	error = cancel_lock(ls, lkb, &args);
6052 
6053 	if (error == -DLM_ECANCEL)
6054 		error = 0;
6055 	/* from validate_unlock_args() */
6056 	if (error == -EBUSY)
6057 		error = 0;
6058  out_put:
6059 	dlm_put_lkb(lkb);
6060  out:
6061 	dlm_unlock_recovery(ls);
6062 	kfree(ua_tmp);
6063 	return error;
6064 }
6065 
dlm_user_deadlock(struct dlm_ls * ls,uint32_t flags,uint32_t lkid)6066 int dlm_user_deadlock(struct dlm_ls *ls, uint32_t flags, uint32_t lkid)
6067 {
6068 	struct dlm_lkb *lkb;
6069 	struct dlm_args args;
6070 	struct dlm_user_args *ua;
6071 	struct dlm_rsb *r;
6072 	int error;
6073 
6074 	dlm_lock_recovery(ls);
6075 
6076 	error = find_lkb(ls, lkid, &lkb);
6077 	if (error)
6078 		goto out;
6079 
6080 	ua = lkb->lkb_ua;
6081 
6082 	error = set_unlock_args(flags, ua, &args);
6083 	if (error)
6084 		goto out_put;
6085 
6086 	/* same as cancel_lock(), but set DEADLOCK_CANCEL after lock_rsb */
6087 
6088 	r = lkb->lkb_resource;
6089 	hold_rsb(r);
6090 	lock_rsb(r);
6091 
6092 	error = validate_unlock_args(lkb, &args);
6093 	if (error)
6094 		goto out_r;
6095 	lkb->lkb_flags |= DLM_IFL_DEADLOCK_CANCEL;
6096 
6097 	error = _cancel_lock(r, lkb);
6098  out_r:
6099 	unlock_rsb(r);
6100 	put_rsb(r);
6101 
6102 	if (error == -DLM_ECANCEL)
6103 		error = 0;
6104 	/* from validate_unlock_args() */
6105 	if (error == -EBUSY)
6106 		error = 0;
6107  out_put:
6108 	dlm_put_lkb(lkb);
6109  out:
6110 	dlm_unlock_recovery(ls);
6111 	return error;
6112 }
6113 
6114 /* lkb's that are removed from the waiters list by revert are just left on the
6115    orphans list with the granted orphan locks, to be freed by purge */
6116 
orphan_proc_lock(struct dlm_ls * ls,struct dlm_lkb * lkb)6117 static int orphan_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb)
6118 {
6119 	struct dlm_args args;
6120 	int error;
6121 
6122 	hold_lkb(lkb); /* reference for the ls_orphans list */
6123 	mutex_lock(&ls->ls_orphans_mutex);
6124 	list_add_tail(&lkb->lkb_ownqueue, &ls->ls_orphans);
6125 	mutex_unlock(&ls->ls_orphans_mutex);
6126 
6127 	set_unlock_args(0, lkb->lkb_ua, &args);
6128 
6129 	error = cancel_lock(ls, lkb, &args);
6130 	if (error == -DLM_ECANCEL)
6131 		error = 0;
6132 	return error;
6133 }
6134 
6135 /* The FORCEUNLOCK flag allows the unlock to go ahead even if the lkb isn't
6136    granted.  Regardless of what rsb queue the lock is on, it's removed and
6137    freed.  The IVVALBLK flag causes the lvb on the resource to be invalidated
6138    if our lock is PW/EX (it's ignored if our granted mode is smaller.) */
6139 
unlock_proc_lock(struct dlm_ls * ls,struct dlm_lkb * lkb)6140 static int unlock_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb)
6141 {
6142 	struct dlm_args args;
6143 	int error;
6144 
6145 	set_unlock_args(DLM_LKF_FORCEUNLOCK | DLM_LKF_IVVALBLK,
6146 			lkb->lkb_ua, &args);
6147 
6148 	error = unlock_lock(ls, lkb, &args);
6149 	if (error == -DLM_EUNLOCK)
6150 		error = 0;
6151 	return error;
6152 }
6153 
6154 /* We have to release clear_proc_locks mutex before calling unlock_proc_lock()
6155    (which does lock_rsb) due to deadlock with receiving a message that does
6156    lock_rsb followed by dlm_user_add_cb() */
6157 
del_proc_lock(struct dlm_ls * ls,struct dlm_user_proc * proc)6158 static struct dlm_lkb *del_proc_lock(struct dlm_ls *ls,
6159 				     struct dlm_user_proc *proc)
6160 {
6161 	struct dlm_lkb *lkb = NULL;
6162 
6163 	mutex_lock(&ls->ls_clear_proc_locks);
6164 	if (list_empty(&proc->locks))
6165 		goto out;
6166 
6167 	lkb = list_entry(proc->locks.next, struct dlm_lkb, lkb_ownqueue);
6168 	list_del_init(&lkb->lkb_ownqueue);
6169 
6170 	if (lkb->lkb_exflags & DLM_LKF_PERSISTENT)
6171 		lkb->lkb_flags |= DLM_IFL_ORPHAN;
6172 	else
6173 		lkb->lkb_flags |= DLM_IFL_DEAD;
6174  out:
6175 	mutex_unlock(&ls->ls_clear_proc_locks);
6176 	return lkb;
6177 }
6178 
6179 /* The ls_clear_proc_locks mutex protects against dlm_user_add_cb() which
6180    1) references lkb->ua which we free here and 2) adds lkbs to proc->asts,
6181    which we clear here. */
6182 
6183 /* proc CLOSING flag is set so no more device_reads should look at proc->asts
6184    list, and no more device_writes should add lkb's to proc->locks list; so we
6185    shouldn't need to take asts_spin or locks_spin here.  this assumes that
6186    device reads/writes/closes are serialized -- FIXME: we may need to serialize
6187    them ourself. */
6188 
dlm_clear_proc_locks(struct dlm_ls * ls,struct dlm_user_proc * proc)6189 void dlm_clear_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
6190 {
6191 	struct dlm_lkb *lkb, *safe;
6192 
6193 	dlm_lock_recovery(ls);
6194 
6195 	while (1) {
6196 		lkb = del_proc_lock(ls, proc);
6197 		if (!lkb)
6198 			break;
6199 		del_timeout(lkb);
6200 		if (lkb->lkb_exflags & DLM_LKF_PERSISTENT)
6201 			orphan_proc_lock(ls, lkb);
6202 		else
6203 			unlock_proc_lock(ls, lkb);
6204 
6205 		/* this removes the reference for the proc->locks list
6206 		   added by dlm_user_request, it may result in the lkb
6207 		   being freed */
6208 
6209 		dlm_put_lkb(lkb);
6210 	}
6211 
6212 	mutex_lock(&ls->ls_clear_proc_locks);
6213 
6214 	/* in-progress unlocks */
6215 	list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) {
6216 		list_del_init(&lkb->lkb_ownqueue);
6217 		lkb->lkb_flags |= DLM_IFL_DEAD;
6218 		dlm_put_lkb(lkb);
6219 	}
6220 
6221 	list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_cb_list) {
6222 		memset(&lkb->lkb_callbacks, 0,
6223 		       sizeof(struct dlm_callback) * DLM_CALLBACKS_SIZE);
6224 		list_del_init(&lkb->lkb_cb_list);
6225 		dlm_put_lkb(lkb);
6226 	}
6227 
6228 	mutex_unlock(&ls->ls_clear_proc_locks);
6229 	dlm_unlock_recovery(ls);
6230 }
6231 
purge_proc_locks(struct dlm_ls * ls,struct dlm_user_proc * proc)6232 static void purge_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
6233 {
6234 	struct dlm_lkb *lkb, *safe;
6235 
6236 	while (1) {
6237 		lkb = NULL;
6238 		spin_lock(&proc->locks_spin);
6239 		if (!list_empty(&proc->locks)) {
6240 			lkb = list_entry(proc->locks.next, struct dlm_lkb,
6241 					 lkb_ownqueue);
6242 			list_del_init(&lkb->lkb_ownqueue);
6243 		}
6244 		spin_unlock(&proc->locks_spin);
6245 
6246 		if (!lkb)
6247 			break;
6248 
6249 		lkb->lkb_flags |= DLM_IFL_DEAD;
6250 		unlock_proc_lock(ls, lkb);
6251 		dlm_put_lkb(lkb); /* ref from proc->locks list */
6252 	}
6253 
6254 	spin_lock(&proc->locks_spin);
6255 	list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) {
6256 		list_del_init(&lkb->lkb_ownqueue);
6257 		lkb->lkb_flags |= DLM_IFL_DEAD;
6258 		dlm_put_lkb(lkb);
6259 	}
6260 	spin_unlock(&proc->locks_spin);
6261 
6262 	spin_lock(&proc->asts_spin);
6263 	list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_cb_list) {
6264 		memset(&lkb->lkb_callbacks, 0,
6265 		       sizeof(struct dlm_callback) * DLM_CALLBACKS_SIZE);
6266 		list_del_init(&lkb->lkb_cb_list);
6267 		dlm_put_lkb(lkb);
6268 	}
6269 	spin_unlock(&proc->asts_spin);
6270 }
6271 
6272 /* pid of 0 means purge all orphans */
6273 
do_purge(struct dlm_ls * ls,int nodeid,int pid)6274 static void do_purge(struct dlm_ls *ls, int nodeid, int pid)
6275 {
6276 	struct dlm_lkb *lkb, *safe;
6277 
6278 	mutex_lock(&ls->ls_orphans_mutex);
6279 	list_for_each_entry_safe(lkb, safe, &ls->ls_orphans, lkb_ownqueue) {
6280 		if (pid && lkb->lkb_ownpid != pid)
6281 			continue;
6282 		unlock_proc_lock(ls, lkb);
6283 		list_del_init(&lkb->lkb_ownqueue);
6284 		dlm_put_lkb(lkb);
6285 	}
6286 	mutex_unlock(&ls->ls_orphans_mutex);
6287 }
6288 
send_purge(struct dlm_ls * ls,int nodeid,int pid)6289 static int send_purge(struct dlm_ls *ls, int nodeid, int pid)
6290 {
6291 	struct dlm_message *ms;
6292 	struct dlm_mhandle *mh;
6293 	int error;
6294 
6295 	error = _create_message(ls, sizeof(struct dlm_message), nodeid,
6296 				DLM_MSG_PURGE, &ms, &mh);
6297 	if (error)
6298 		return error;
6299 	ms->m_nodeid = nodeid;
6300 	ms->m_pid = pid;
6301 
6302 	return send_message(mh, ms);
6303 }
6304 
dlm_user_purge(struct dlm_ls * ls,struct dlm_user_proc * proc,int nodeid,int pid)6305 int dlm_user_purge(struct dlm_ls *ls, struct dlm_user_proc *proc,
6306 		   int nodeid, int pid)
6307 {
6308 	int error = 0;
6309 
6310 	if (nodeid && (nodeid != dlm_our_nodeid())) {
6311 		error = send_purge(ls, nodeid, pid);
6312 	} else {
6313 		dlm_lock_recovery(ls);
6314 		if (pid == current->pid)
6315 			purge_proc_locks(ls, proc);
6316 		else
6317 			do_purge(ls, nodeid, pid);
6318 		dlm_unlock_recovery(ls);
6319 	}
6320 	return error;
6321 }
6322 
6323