1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
3 *
4 * dlmcommon.h
5 *
6 * Copyright (C) 2004 Oracle. All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public
19 * License along with this program; if not, write to the
20 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
21 * Boston, MA 021110-1307, USA.
22 *
23 */
24
25 #ifndef DLMCOMMON_H
26 #define DLMCOMMON_H
27
28 #include <linux/kref.h>
29
30 #define DLM_HB_NODE_DOWN_PRI (0xf000000)
31 #define DLM_HB_NODE_UP_PRI (0x8000000)
32
33 #define DLM_LOCKID_NAME_MAX 32
34
35 #define DLM_DOMAIN_NAME_MAX_LEN 255
36 #define DLM_LOCK_RES_OWNER_UNKNOWN O2NM_MAX_NODES
37 #define DLM_THREAD_SHUFFLE_INTERVAL 5 // flush everything every 5 passes
38 #define DLM_THREAD_MS 200 // flush at least every 200 ms
39
40 #define DLM_HASH_SIZE_DEFAULT (1 << 17)
41 #if DLM_HASH_SIZE_DEFAULT < PAGE_SIZE
42 # define DLM_HASH_PAGES 1
43 #else
44 # define DLM_HASH_PAGES (DLM_HASH_SIZE_DEFAULT / PAGE_SIZE)
45 #endif
46 #define DLM_BUCKETS_PER_PAGE (PAGE_SIZE / sizeof(struct hlist_head))
47 #define DLM_HASH_BUCKETS (DLM_HASH_PAGES * DLM_BUCKETS_PER_PAGE)
48
49 /* Intended to make it easier for us to switch out hash functions */
50 #define dlm_lockid_hash(_n, _l) full_name_hash(_n, _l)
51
52 enum dlm_mle_type {
53 DLM_MLE_BLOCK = 0,
54 DLM_MLE_MASTER = 1,
55 DLM_MLE_MIGRATION = 2,
56 DLM_MLE_NUM_TYPES = 3,
57 };
58
59 struct dlm_master_list_entry {
60 struct hlist_node master_hash_node;
61 struct list_head hb_events;
62 struct dlm_ctxt *dlm;
63 spinlock_t spinlock;
64 wait_queue_head_t wq;
65 atomic_t woken;
66 struct kref mle_refs;
67 int inuse;
68 unsigned long maybe_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
69 unsigned long vote_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
70 unsigned long response_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
71 unsigned long node_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
72 u8 master;
73 u8 new_master;
74 enum dlm_mle_type type;
75 struct o2hb_callback_func mle_hb_up;
76 struct o2hb_callback_func mle_hb_down;
77 struct dlm_lock_resource *mleres;
78 unsigned char mname[DLM_LOCKID_NAME_MAX];
79 unsigned int mnamelen;
80 unsigned int mnamehash;
81 };
82
83 enum dlm_ast_type {
84 DLM_AST = 0,
85 DLM_BAST = 1,
86 DLM_ASTUNLOCK = 2,
87 };
88
89
90 #define LKM_VALID_FLAGS (LKM_VALBLK | LKM_CONVERT | LKM_UNLOCK | \
91 LKM_CANCEL | LKM_INVVALBLK | LKM_FORCE | \
92 LKM_RECOVERY | LKM_LOCAL | LKM_NOQUEUE)
93
94 #define DLM_RECOVERY_LOCK_NAME "$RECOVERY"
95 #define DLM_RECOVERY_LOCK_NAME_LEN 9
96
dlm_is_recovery_lock(const char * lock_name,int name_len)97 static inline int dlm_is_recovery_lock(const char *lock_name, int name_len)
98 {
99 if (name_len == DLM_RECOVERY_LOCK_NAME_LEN &&
100 memcmp(lock_name, DLM_RECOVERY_LOCK_NAME, name_len)==0)
101 return 1;
102 return 0;
103 }
104
105 #define DLM_RECO_STATE_ACTIVE 0x0001
106 #define DLM_RECO_STATE_FINALIZE 0x0002
107
108 struct dlm_recovery_ctxt
109 {
110 struct list_head resources;
111 struct list_head node_data;
112 u8 new_master;
113 u8 dead_node;
114 u16 state;
115 unsigned long node_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
116 wait_queue_head_t event;
117 };
118
119 enum dlm_ctxt_state {
120 DLM_CTXT_NEW = 0,
121 DLM_CTXT_JOINED = 1,
122 DLM_CTXT_IN_SHUTDOWN = 2,
123 DLM_CTXT_LEAVING = 3,
124 };
125
126 struct dlm_ctxt
127 {
128 struct list_head list;
129 struct hlist_head **lockres_hash;
130 struct list_head dirty_list;
131 struct list_head purge_list;
132 struct list_head pending_asts;
133 struct list_head pending_basts;
134 struct list_head tracking_list;
135 unsigned int purge_count;
136 spinlock_t spinlock;
137 spinlock_t ast_lock;
138 spinlock_t track_lock;
139 char *name;
140 u8 node_num;
141 u32 key;
142 u8 joining_node;
143 wait_queue_head_t dlm_join_events;
144 unsigned long live_nodes_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
145 unsigned long domain_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
146 unsigned long exit_domain_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
147 unsigned long recovery_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
148 struct dlm_recovery_ctxt reco;
149 spinlock_t master_lock;
150 struct hlist_head **master_hash;
151 struct list_head mle_hb_events;
152
153 /* these give a really vague idea of the system load */
154 atomic_t mle_tot_count[DLM_MLE_NUM_TYPES];
155 atomic_t mle_cur_count[DLM_MLE_NUM_TYPES];
156 atomic_t res_tot_count;
157 atomic_t res_cur_count;
158
159 struct dlm_debug_ctxt *dlm_debug_ctxt;
160 struct dentry *dlm_debugfs_subroot;
161
162 /* NOTE: Next three are protected by dlm_domain_lock */
163 struct kref dlm_refs;
164 enum dlm_ctxt_state dlm_state;
165 unsigned int num_joins;
166
167 struct o2hb_callback_func dlm_hb_up;
168 struct o2hb_callback_func dlm_hb_down;
169 struct task_struct *dlm_thread_task;
170 struct task_struct *dlm_reco_thread_task;
171 struct workqueue_struct *dlm_worker;
172 wait_queue_head_t dlm_thread_wq;
173 wait_queue_head_t dlm_reco_thread_wq;
174 wait_queue_head_t ast_wq;
175 wait_queue_head_t migration_wq;
176
177 struct work_struct dispatched_work;
178 struct list_head work_list;
179 spinlock_t work_lock;
180 struct list_head dlm_domain_handlers;
181 struct list_head dlm_eviction_callbacks;
182
183 /* The filesystem specifies this at domain registration. We
184 * cache it here to know what to tell other nodes. */
185 struct dlm_protocol_version fs_locking_proto;
186 /* This is the inter-dlm communication version */
187 struct dlm_protocol_version dlm_locking_proto;
188 };
189
dlm_lockres_hash(struct dlm_ctxt * dlm,unsigned i)190 static inline struct hlist_head *dlm_lockres_hash(struct dlm_ctxt *dlm, unsigned i)
191 {
192 return dlm->lockres_hash[(i / DLM_BUCKETS_PER_PAGE) % DLM_HASH_PAGES] + (i % DLM_BUCKETS_PER_PAGE);
193 }
194
dlm_master_hash(struct dlm_ctxt * dlm,unsigned i)195 static inline struct hlist_head *dlm_master_hash(struct dlm_ctxt *dlm,
196 unsigned i)
197 {
198 return dlm->master_hash[(i / DLM_BUCKETS_PER_PAGE) % DLM_HASH_PAGES] +
199 (i % DLM_BUCKETS_PER_PAGE);
200 }
201
202 /* these keventd work queue items are for less-frequently
203 * called functions that cannot be directly called from the
204 * net message handlers for some reason, usually because
205 * they need to send net messages of their own. */
206 void dlm_dispatch_work(struct work_struct *work);
207
208 struct dlm_lock_resource;
209 struct dlm_work_item;
210
211 typedef void (dlm_workfunc_t)(struct dlm_work_item *, void *);
212
213 struct dlm_request_all_locks_priv
214 {
215 u8 reco_master;
216 u8 dead_node;
217 };
218
219 struct dlm_mig_lockres_priv
220 {
221 struct dlm_lock_resource *lockres;
222 u8 real_master;
223 u8 extra_ref;
224 };
225
226 struct dlm_assert_master_priv
227 {
228 struct dlm_lock_resource *lockres;
229 u8 request_from;
230 u32 flags;
231 unsigned ignore_higher:1;
232 };
233
234 struct dlm_deref_lockres_priv
235 {
236 struct dlm_lock_resource *deref_res;
237 u8 deref_node;
238 };
239
240 struct dlm_work_item
241 {
242 struct list_head list;
243 dlm_workfunc_t *func;
244 struct dlm_ctxt *dlm;
245 void *data;
246 union {
247 struct dlm_request_all_locks_priv ral;
248 struct dlm_mig_lockres_priv ml;
249 struct dlm_assert_master_priv am;
250 struct dlm_deref_lockres_priv dl;
251 } u;
252 };
253
dlm_init_work_item(struct dlm_ctxt * dlm,struct dlm_work_item * i,dlm_workfunc_t * f,void * data)254 static inline void dlm_init_work_item(struct dlm_ctxt *dlm,
255 struct dlm_work_item *i,
256 dlm_workfunc_t *f, void *data)
257 {
258 memset(i, 0, sizeof(*i));
259 i->func = f;
260 INIT_LIST_HEAD(&i->list);
261 i->data = data;
262 i->dlm = dlm; /* must have already done a dlm_grab on this! */
263 }
264
265
266
__dlm_set_joining_node(struct dlm_ctxt * dlm,u8 node)267 static inline void __dlm_set_joining_node(struct dlm_ctxt *dlm,
268 u8 node)
269 {
270 assert_spin_locked(&dlm->spinlock);
271
272 dlm->joining_node = node;
273 wake_up(&dlm->dlm_join_events);
274 }
275
276 #define DLM_LOCK_RES_UNINITED 0x00000001
277 #define DLM_LOCK_RES_RECOVERING 0x00000002
278 #define DLM_LOCK_RES_READY 0x00000004
279 #define DLM_LOCK_RES_DIRTY 0x00000008
280 #define DLM_LOCK_RES_IN_PROGRESS 0x00000010
281 #define DLM_LOCK_RES_MIGRATING 0x00000020
282 #define DLM_LOCK_RES_DROPPING_REF 0x00000040
283 #define DLM_LOCK_RES_BLOCK_DIRTY 0x00001000
284 #define DLM_LOCK_RES_SETREF_INPROG 0x00002000
285
286 /* max milliseconds to wait to sync up a network failure with a node death */
287 #define DLM_NODE_DEATH_WAIT_MAX (5 * 1000)
288
289 #define DLM_PURGE_INTERVAL_MS (8 * 1000)
290
291 struct dlm_lock_resource
292 {
293 /* WARNING: Please see the comment in dlm_init_lockres before
294 * adding fields here. */
295 struct hlist_node hash_node;
296 struct qstr lockname;
297 struct kref refs;
298
299 /*
300 * Please keep granted, converting, and blocked in this order,
301 * as some funcs want to iterate over all lists.
302 *
303 * All four lists are protected by the hash's reference.
304 */
305 struct list_head granted;
306 struct list_head converting;
307 struct list_head blocked;
308 struct list_head purge;
309
310 /*
311 * These two lists require you to hold an additional reference
312 * while they are on the list.
313 */
314 struct list_head dirty;
315 struct list_head recovering; // dlm_recovery_ctxt.resources list
316
317 /* Added during init and removed during release */
318 struct list_head tracking; /* dlm->tracking_list */
319
320 /* unused lock resources have their last_used stamped and are
321 * put on a list for the dlm thread to run. */
322 unsigned long last_used;
323
324 struct dlm_ctxt *dlm;
325
326 unsigned migration_pending:1;
327 atomic_t asts_reserved;
328 spinlock_t spinlock;
329 wait_queue_head_t wq;
330 u8 owner; //node which owns the lock resource, or unknown
331 u16 state;
332 char lvb[DLM_LVB_LEN];
333 unsigned int inflight_locks;
334 unsigned int inflight_assert_workers;
335 unsigned long refmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
336 };
337
338 struct dlm_migratable_lock
339 {
340 __be64 cookie;
341
342 /* these 3 are just padding for the in-memory structure, but
343 * list and flags are actually used when sent over the wire */
344 __be16 pad1;
345 u8 list; // 0=granted, 1=converting, 2=blocked
346 u8 flags;
347
348 s8 type;
349 s8 convert_type;
350 s8 highest_blocked;
351 u8 node;
352 }; // 16 bytes
353
354 struct dlm_lock
355 {
356 struct dlm_migratable_lock ml;
357
358 struct list_head list;
359 struct list_head ast_list;
360 struct list_head bast_list;
361 struct dlm_lock_resource *lockres;
362 spinlock_t spinlock;
363 struct kref lock_refs;
364
365 // ast and bast must be callable while holding a spinlock!
366 dlm_astlockfunc_t *ast;
367 dlm_bastlockfunc_t *bast;
368 void *astdata;
369 struct dlm_lockstatus *lksb;
370 unsigned ast_pending:1,
371 bast_pending:1,
372 convert_pending:1,
373 lock_pending:1,
374 cancel_pending:1,
375 unlock_pending:1,
376 lksb_kernel_allocated:1;
377 };
378
379
380 #define DLM_LKSB_UNUSED1 0x01
381 #define DLM_LKSB_PUT_LVB 0x02
382 #define DLM_LKSB_GET_LVB 0x04
383 #define DLM_LKSB_UNUSED2 0x08
384 #define DLM_LKSB_UNUSED3 0x10
385 #define DLM_LKSB_UNUSED4 0x20
386 #define DLM_LKSB_UNUSED5 0x40
387 #define DLM_LKSB_UNUSED6 0x80
388
389
390 enum dlm_lockres_list {
391 DLM_GRANTED_LIST = 0,
392 DLM_CONVERTING_LIST = 1,
393 DLM_BLOCKED_LIST = 2,
394 };
395
dlm_lvb_is_empty(char * lvb)396 static inline int dlm_lvb_is_empty(char *lvb)
397 {
398 int i;
399 for (i=0; i<DLM_LVB_LEN; i++)
400 if (lvb[i])
401 return 0;
402 return 1;
403 }
404
dlm_list_in_text(enum dlm_lockres_list idx)405 static inline char *dlm_list_in_text(enum dlm_lockres_list idx)
406 {
407 if (idx == DLM_GRANTED_LIST)
408 return "granted";
409 else if (idx == DLM_CONVERTING_LIST)
410 return "converting";
411 else if (idx == DLM_BLOCKED_LIST)
412 return "blocked";
413 else
414 return "unknown";
415 }
416
417 static inline struct list_head *
dlm_list_idx_to_ptr(struct dlm_lock_resource * res,enum dlm_lockres_list idx)418 dlm_list_idx_to_ptr(struct dlm_lock_resource *res, enum dlm_lockres_list idx)
419 {
420 struct list_head *ret = NULL;
421 if (idx == DLM_GRANTED_LIST)
422 ret = &res->granted;
423 else if (idx == DLM_CONVERTING_LIST)
424 ret = &res->converting;
425 else if (idx == DLM_BLOCKED_LIST)
426 ret = &res->blocked;
427 else
428 BUG();
429 return ret;
430 }
431
432
433
434
435 struct dlm_node_iter
436 {
437 unsigned long node_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
438 int curnode;
439 };
440
441
442 enum {
443 DLM_MASTER_REQUEST_MSG = 500,
444 DLM_UNUSED_MSG1 = 501,
445 DLM_ASSERT_MASTER_MSG = 502,
446 DLM_CREATE_LOCK_MSG = 503,
447 DLM_CONVERT_LOCK_MSG = 504,
448 DLM_PROXY_AST_MSG = 505,
449 DLM_UNLOCK_LOCK_MSG = 506,
450 DLM_DEREF_LOCKRES_MSG = 507,
451 DLM_MIGRATE_REQUEST_MSG = 508,
452 DLM_MIG_LOCKRES_MSG = 509,
453 DLM_QUERY_JOIN_MSG = 510,
454 DLM_ASSERT_JOINED_MSG = 511,
455 DLM_CANCEL_JOIN_MSG = 512,
456 DLM_EXIT_DOMAIN_MSG = 513,
457 DLM_MASTER_REQUERY_MSG = 514,
458 DLM_LOCK_REQUEST_MSG = 515,
459 DLM_RECO_DATA_DONE_MSG = 516,
460 DLM_BEGIN_RECO_MSG = 517,
461 DLM_FINALIZE_RECO_MSG = 518,
462 DLM_QUERY_REGION = 519,
463 DLM_QUERY_NODEINFO = 520,
464 DLM_BEGIN_EXIT_DOMAIN_MSG = 521,
465 };
466
467 struct dlm_reco_node_data
468 {
469 int state;
470 u8 node_num;
471 struct list_head list;
472 };
473
474 enum {
475 DLM_RECO_NODE_DATA_DEAD = -1,
476 DLM_RECO_NODE_DATA_INIT = 0,
477 DLM_RECO_NODE_DATA_REQUESTING = 1,
478 DLM_RECO_NODE_DATA_REQUESTED = 2,
479 DLM_RECO_NODE_DATA_RECEIVING = 3,
480 DLM_RECO_NODE_DATA_DONE = 4,
481 DLM_RECO_NODE_DATA_FINALIZE_SENT = 5,
482 };
483
484
485 enum {
486 DLM_MASTER_RESP_NO = 0,
487 DLM_MASTER_RESP_YES = 1,
488 DLM_MASTER_RESP_MAYBE = 2,
489 DLM_MASTER_RESP_ERROR = 3,
490 };
491
492
493 struct dlm_master_request
494 {
495 u8 node_idx;
496 u8 namelen;
497 __be16 pad1;
498 __be32 flags;
499
500 u8 name[O2NM_MAX_NAME_LEN];
501 };
502
503 #define DLM_ASSERT_RESPONSE_REASSERT 0x00000001
504 #define DLM_ASSERT_RESPONSE_MASTERY_REF 0x00000002
505
506 #define DLM_ASSERT_MASTER_MLE_CLEANUP 0x00000001
507 #define DLM_ASSERT_MASTER_REQUERY 0x00000002
508 #define DLM_ASSERT_MASTER_FINISH_MIGRATION 0x00000004
509 struct dlm_assert_master
510 {
511 u8 node_idx;
512 u8 namelen;
513 __be16 pad1;
514 __be32 flags;
515
516 u8 name[O2NM_MAX_NAME_LEN];
517 };
518
519 #define DLM_MIGRATE_RESPONSE_MASTERY_REF 0x00000001
520
521 struct dlm_migrate_request
522 {
523 u8 master;
524 u8 new_master;
525 u8 namelen;
526 u8 pad1;
527 __be32 pad2;
528 u8 name[O2NM_MAX_NAME_LEN];
529 };
530
531 struct dlm_master_requery
532 {
533 u8 pad1;
534 u8 pad2;
535 u8 node_idx;
536 u8 namelen;
537 __be32 pad3;
538 u8 name[O2NM_MAX_NAME_LEN];
539 };
540
541 #define DLM_MRES_RECOVERY 0x01
542 #define DLM_MRES_MIGRATION 0x02
543 #define DLM_MRES_ALL_DONE 0x04
544
545 /*
546 * We would like to get one whole lockres into a single network
547 * message whenever possible. Generally speaking, there will be
548 * at most one dlm_lock on a lockres for each node in the cluster,
549 * plus (infrequently) any additional locks coming in from userdlm.
550 *
551 * struct _dlm_lockres_page
552 * {
553 * dlm_migratable_lockres mres;
554 * dlm_migratable_lock ml[DLM_MAX_MIGRATABLE_LOCKS];
555 * u8 pad[DLM_MIG_LOCKRES_RESERVED];
556 * };
557 *
558 * from ../cluster/tcp.h
559 * NET_MAX_PAYLOAD_BYTES (4096 - sizeof(net_msg))
560 * (roughly 4080 bytes)
561 * and sizeof(dlm_migratable_lockres) = 112 bytes
562 * and sizeof(dlm_migratable_lock) = 16 bytes
563 *
564 * Choosing DLM_MAX_MIGRATABLE_LOCKS=240 and
565 * DLM_MIG_LOCKRES_RESERVED=128 means we have this:
566 *
567 * (DLM_MAX_MIGRATABLE_LOCKS * sizeof(dlm_migratable_lock)) +
568 * sizeof(dlm_migratable_lockres) + DLM_MIG_LOCKRES_RESERVED =
569 * NET_MAX_PAYLOAD_BYTES
570 * (240 * 16) + 112 + 128 = 4080
571 *
572 * So a lockres would need more than 240 locks before it would
573 * use more than one network packet to recover. Not too bad.
574 */
575 #define DLM_MAX_MIGRATABLE_LOCKS 240
576
577 struct dlm_migratable_lockres
578 {
579 u8 master;
580 u8 lockname_len;
581 u8 num_locks; // locks sent in this structure
582 u8 flags;
583 __be32 total_locks; // locks to be sent for this migration cookie
584 __be64 mig_cookie; // cookie for this lockres migration
585 // or zero if not needed
586 // 16 bytes
587 u8 lockname[DLM_LOCKID_NAME_MAX];
588 // 48 bytes
589 u8 lvb[DLM_LVB_LEN];
590 // 112 bytes
591 struct dlm_migratable_lock ml[0]; // 16 bytes each, begins at byte 112
592 };
593 #define DLM_MIG_LOCKRES_MAX_LEN \
594 (sizeof(struct dlm_migratable_lockres) + \
595 (sizeof(struct dlm_migratable_lock) * \
596 DLM_MAX_MIGRATABLE_LOCKS) )
597
598 /* from above, 128 bytes
599 * for some undetermined future use */
600 #define DLM_MIG_LOCKRES_RESERVED (NET_MAX_PAYLOAD_BYTES - \
601 DLM_MIG_LOCKRES_MAX_LEN)
602
603 struct dlm_create_lock
604 {
605 __be64 cookie;
606
607 __be32 flags;
608 u8 pad1;
609 u8 node_idx;
610 s8 requested_type;
611 u8 namelen;
612
613 u8 name[O2NM_MAX_NAME_LEN];
614 };
615
616 struct dlm_convert_lock
617 {
618 __be64 cookie;
619
620 __be32 flags;
621 u8 pad1;
622 u8 node_idx;
623 s8 requested_type;
624 u8 namelen;
625
626 u8 name[O2NM_MAX_NAME_LEN];
627
628 s8 lvb[0];
629 };
630 #define DLM_CONVERT_LOCK_MAX_LEN (sizeof(struct dlm_convert_lock)+DLM_LVB_LEN)
631
632 struct dlm_unlock_lock
633 {
634 __be64 cookie;
635
636 __be32 flags;
637 __be16 pad1;
638 u8 node_idx;
639 u8 namelen;
640
641 u8 name[O2NM_MAX_NAME_LEN];
642
643 s8 lvb[0];
644 };
645 #define DLM_UNLOCK_LOCK_MAX_LEN (sizeof(struct dlm_unlock_lock)+DLM_LVB_LEN)
646
647 struct dlm_proxy_ast
648 {
649 __be64 cookie;
650
651 __be32 flags;
652 u8 node_idx;
653 u8 type;
654 u8 blocked_type;
655 u8 namelen;
656
657 u8 name[O2NM_MAX_NAME_LEN];
658
659 s8 lvb[0];
660 };
661 #define DLM_PROXY_AST_MAX_LEN (sizeof(struct dlm_proxy_ast)+DLM_LVB_LEN)
662
663 #define DLM_MOD_KEY (0x666c6172)
664 enum dlm_query_join_response_code {
665 JOIN_DISALLOW = 0,
666 JOIN_OK = 1,
667 JOIN_OK_NO_MAP = 2,
668 JOIN_PROTOCOL_MISMATCH = 3,
669 };
670
671 struct dlm_query_join_packet {
672 u8 code; /* Response code. dlm_minor and fs_minor
673 are only valid if this is JOIN_OK */
674 u8 dlm_minor; /* The minor version of the protocol the
675 dlm is speaking. */
676 u8 fs_minor; /* The minor version of the protocol the
677 filesystem is speaking. */
678 u8 reserved;
679 };
680
681 union dlm_query_join_response {
682 __be32 intval;
683 struct dlm_query_join_packet packet;
684 };
685
686 struct dlm_lock_request
687 {
688 u8 node_idx;
689 u8 dead_node;
690 __be16 pad1;
691 __be32 pad2;
692 };
693
694 struct dlm_reco_data_done
695 {
696 u8 node_idx;
697 u8 dead_node;
698 __be16 pad1;
699 __be32 pad2;
700
701 /* unused for now */
702 /* eventually we can use this to attempt
703 * lvb recovery based on each node's info */
704 u8 reco_lvb[DLM_LVB_LEN];
705 };
706
707 struct dlm_begin_reco
708 {
709 u8 node_idx;
710 u8 dead_node;
711 __be16 pad1;
712 __be32 pad2;
713 };
714
715
716 #define BITS_PER_BYTE 8
717 #define BITS_TO_BYTES(bits) (((bits)+BITS_PER_BYTE-1)/BITS_PER_BYTE)
718
719 struct dlm_query_join_request
720 {
721 u8 node_idx;
722 u8 pad1[2];
723 u8 name_len;
724 struct dlm_protocol_version dlm_proto;
725 struct dlm_protocol_version fs_proto;
726 u8 domain[O2NM_MAX_NAME_LEN];
727 u8 node_map[BITS_TO_BYTES(O2NM_MAX_NODES)];
728 };
729
730 struct dlm_assert_joined
731 {
732 u8 node_idx;
733 u8 pad1[2];
734 u8 name_len;
735 u8 domain[O2NM_MAX_NAME_LEN];
736 };
737
738 struct dlm_cancel_join
739 {
740 u8 node_idx;
741 u8 pad1[2];
742 u8 name_len;
743 u8 domain[O2NM_MAX_NAME_LEN];
744 };
745
746 struct dlm_query_region {
747 u8 qr_node;
748 u8 qr_numregions;
749 u8 qr_namelen;
750 u8 pad1;
751 u8 qr_domain[O2NM_MAX_NAME_LEN];
752 u8 qr_regions[O2HB_MAX_REGION_NAME_LEN * O2NM_MAX_REGIONS];
753 };
754
755 struct dlm_node_info {
756 u8 ni_nodenum;
757 u8 pad1;
758 __be16 ni_ipv4_port;
759 __be32 ni_ipv4_address;
760 };
761
762 struct dlm_query_nodeinfo {
763 u8 qn_nodenum;
764 u8 qn_numnodes;
765 u8 qn_namelen;
766 u8 pad1;
767 u8 qn_domain[O2NM_MAX_NAME_LEN];
768 struct dlm_node_info qn_nodes[O2NM_MAX_NODES];
769 };
770
771 struct dlm_exit_domain
772 {
773 u8 node_idx;
774 u8 pad1[3];
775 };
776
777 struct dlm_finalize_reco
778 {
779 u8 node_idx;
780 u8 dead_node;
781 u8 flags;
782 u8 pad1;
783 __be32 pad2;
784 };
785
786 struct dlm_deref_lockres
787 {
788 u32 pad1;
789 u16 pad2;
790 u8 node_idx;
791 u8 namelen;
792
793 u8 name[O2NM_MAX_NAME_LEN];
794 };
795
796 static inline enum dlm_status
__dlm_lockres_state_to_status(struct dlm_lock_resource * res)797 __dlm_lockres_state_to_status(struct dlm_lock_resource *res)
798 {
799 enum dlm_status status = DLM_NORMAL;
800
801 assert_spin_locked(&res->spinlock);
802
803 if (res->state & DLM_LOCK_RES_RECOVERING)
804 status = DLM_RECOVERING;
805 else if (res->state & DLM_LOCK_RES_MIGRATING)
806 status = DLM_MIGRATING;
807 else if (res->state & DLM_LOCK_RES_IN_PROGRESS)
808 status = DLM_FORWARD;
809
810 return status;
811 }
812
dlm_get_lock_cookie_node(u64 cookie)813 static inline u8 dlm_get_lock_cookie_node(u64 cookie)
814 {
815 u8 ret;
816 cookie >>= 56;
817 ret = (u8)(cookie & 0xffULL);
818 return ret;
819 }
820
dlm_get_lock_cookie_seq(u64 cookie)821 static inline unsigned long long dlm_get_lock_cookie_seq(u64 cookie)
822 {
823 unsigned long long ret;
824 ret = ((unsigned long long)cookie) & 0x00ffffffffffffffULL;
825 return ret;
826 }
827
828 struct dlm_lock * dlm_new_lock(int type, u8 node, u64 cookie,
829 struct dlm_lockstatus *lksb);
830 void dlm_lock_get(struct dlm_lock *lock);
831 void dlm_lock_put(struct dlm_lock *lock);
832
833 void dlm_lock_attach_lockres(struct dlm_lock *lock,
834 struct dlm_lock_resource *res);
835
836 int dlm_create_lock_handler(struct o2net_msg *msg, u32 len, void *data,
837 void **ret_data);
838 int dlm_convert_lock_handler(struct o2net_msg *msg, u32 len, void *data,
839 void **ret_data);
840 int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data,
841 void **ret_data);
842
843 void dlm_revert_pending_convert(struct dlm_lock_resource *res,
844 struct dlm_lock *lock);
845 void dlm_revert_pending_lock(struct dlm_lock_resource *res,
846 struct dlm_lock *lock);
847
848 int dlm_unlock_lock_handler(struct o2net_msg *msg, u32 len, void *data,
849 void **ret_data);
850 void dlm_commit_pending_cancel(struct dlm_lock_resource *res,
851 struct dlm_lock *lock);
852 void dlm_commit_pending_unlock(struct dlm_lock_resource *res,
853 struct dlm_lock *lock);
854
855 int dlm_launch_thread(struct dlm_ctxt *dlm);
856 void dlm_complete_thread(struct dlm_ctxt *dlm);
857 int dlm_launch_recovery_thread(struct dlm_ctxt *dlm);
858 void dlm_complete_recovery_thread(struct dlm_ctxt *dlm);
859 void dlm_wait_for_recovery(struct dlm_ctxt *dlm);
860 void dlm_kick_recovery_thread(struct dlm_ctxt *dlm);
861 int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node);
862 void dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout);
863 void dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout);
864
865 void dlm_put(struct dlm_ctxt *dlm);
866 struct dlm_ctxt *dlm_grab(struct dlm_ctxt *dlm);
867 int dlm_domain_fully_joined(struct dlm_ctxt *dlm);
868
869 void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
870 struct dlm_lock_resource *res);
871 void dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
872 struct dlm_lock_resource *res);
dlm_lockres_get(struct dlm_lock_resource * res)873 static inline void dlm_lockres_get(struct dlm_lock_resource *res)
874 {
875 /* This is called on every lookup, so it might be worth
876 * inlining. */
877 kref_get(&res->refs);
878 }
879 void dlm_lockres_put(struct dlm_lock_resource *res);
880 void __dlm_unhash_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res);
881 void __dlm_insert_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res);
882 struct dlm_lock_resource * __dlm_lookup_lockres_full(struct dlm_ctxt *dlm,
883 const char *name,
884 unsigned int len,
885 unsigned int hash);
886 struct dlm_lock_resource * __dlm_lookup_lockres(struct dlm_ctxt *dlm,
887 const char *name,
888 unsigned int len,
889 unsigned int hash);
890 struct dlm_lock_resource * dlm_lookup_lockres(struct dlm_ctxt *dlm,
891 const char *name,
892 unsigned int len);
893
894 int dlm_is_host_down(int errno);
895
896 struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm,
897 const char *lockid,
898 int namelen,
899 int flags);
900 struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm,
901 const char *name,
902 unsigned int namelen);
903
904 void dlm_lockres_set_refmap_bit(struct dlm_ctxt *dlm,
905 struct dlm_lock_resource *res, int bit);
906 void dlm_lockres_clear_refmap_bit(struct dlm_ctxt *dlm,
907 struct dlm_lock_resource *res, int bit);
908
909 void dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
910 struct dlm_lock_resource *res);
911 void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
912 struct dlm_lock_resource *res);
913
914 void __dlm_lockres_grab_inflight_worker(struct dlm_ctxt *dlm,
915 struct dlm_lock_resource *res);
916
917 void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
918 void dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
919 void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
920 void __dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
921 void dlm_do_local_ast(struct dlm_ctxt *dlm,
922 struct dlm_lock_resource *res,
923 struct dlm_lock *lock);
924 int dlm_do_remote_ast(struct dlm_ctxt *dlm,
925 struct dlm_lock_resource *res,
926 struct dlm_lock *lock);
927 void dlm_do_local_bast(struct dlm_ctxt *dlm,
928 struct dlm_lock_resource *res,
929 struct dlm_lock *lock,
930 int blocked_type);
931 int dlm_send_proxy_ast_msg(struct dlm_ctxt *dlm,
932 struct dlm_lock_resource *res,
933 struct dlm_lock *lock,
934 int msg_type,
935 int blocked_type, int flags);
dlm_send_proxy_bast(struct dlm_ctxt * dlm,struct dlm_lock_resource * res,struct dlm_lock * lock,int blocked_type)936 static inline int dlm_send_proxy_bast(struct dlm_ctxt *dlm,
937 struct dlm_lock_resource *res,
938 struct dlm_lock *lock,
939 int blocked_type)
940 {
941 return dlm_send_proxy_ast_msg(dlm, res, lock, DLM_BAST,
942 blocked_type, 0);
943 }
944
dlm_send_proxy_ast(struct dlm_ctxt * dlm,struct dlm_lock_resource * res,struct dlm_lock * lock,int flags)945 static inline int dlm_send_proxy_ast(struct dlm_ctxt *dlm,
946 struct dlm_lock_resource *res,
947 struct dlm_lock *lock,
948 int flags)
949 {
950 return dlm_send_proxy_ast_msg(dlm, res, lock, DLM_AST,
951 0, flags);
952 }
953
954 void dlm_print_one_lock_resource(struct dlm_lock_resource *res);
955 void __dlm_print_one_lock_resource(struct dlm_lock_resource *res);
956
957 u8 dlm_nm_this_node(struct dlm_ctxt *dlm);
958 void dlm_kick_thread(struct dlm_ctxt *dlm, struct dlm_lock_resource *res);
959 void __dlm_dirty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res);
960
961
962 int dlm_nm_init(struct dlm_ctxt *dlm);
963 int dlm_heartbeat_init(struct dlm_ctxt *dlm);
964 void dlm_hb_node_down_cb(struct o2nm_node *node, int idx, void *data);
965 void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data);
966
967 int dlm_empty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res);
968 int dlm_finish_migration(struct dlm_ctxt *dlm,
969 struct dlm_lock_resource *res,
970 u8 old_master);
971 void dlm_lockres_release_ast(struct dlm_ctxt *dlm,
972 struct dlm_lock_resource *res);
973 void __dlm_lockres_reserve_ast(struct dlm_lock_resource *res);
974
975 int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data,
976 void **ret_data);
977 int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data,
978 void **ret_data);
979 void dlm_assert_master_post_handler(int status, void *data, void *ret_data);
980 int dlm_deref_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
981 void **ret_data);
982 int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data,
983 void **ret_data);
984 int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
985 void **ret_data);
986 int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data,
987 void **ret_data);
988 int dlm_request_all_locks_handler(struct o2net_msg *msg, u32 len, void *data,
989 void **ret_data);
990 int dlm_reco_data_done_handler(struct o2net_msg *msg, u32 len, void *data,
991 void **ret_data);
992 int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data,
993 void **ret_data);
994 int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data,
995 void **ret_data);
996 int dlm_do_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
997 u8 nodenum, u8 *real_master);
998
999
1000 int dlm_dispatch_assert_master(struct dlm_ctxt *dlm,
1001 struct dlm_lock_resource *res,
1002 int ignore_higher,
1003 u8 request_from,
1004 u32 flags);
1005
1006
1007 int dlm_send_one_lockres(struct dlm_ctxt *dlm,
1008 struct dlm_lock_resource *res,
1009 struct dlm_migratable_lockres *mres,
1010 u8 send_to,
1011 u8 flags);
1012 void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm,
1013 struct dlm_lock_resource *res);
1014
1015 /* will exit holding res->spinlock, but may drop in function */
1016 void __dlm_wait_on_lockres_flags(struct dlm_lock_resource *res, int flags);
1017 void __dlm_wait_on_lockres_flags_set(struct dlm_lock_resource *res, int flags);
1018
1019 /* will exit holding res->spinlock, but may drop in function */
__dlm_wait_on_lockres(struct dlm_lock_resource * res)1020 static inline void __dlm_wait_on_lockres(struct dlm_lock_resource *res)
1021 {
1022 __dlm_wait_on_lockres_flags(res, (DLM_LOCK_RES_IN_PROGRESS|
1023 DLM_LOCK_RES_RECOVERING|
1024 DLM_LOCK_RES_MIGRATING));
1025 }
1026
1027 void __dlm_unlink_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle);
1028 void __dlm_insert_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle);
1029
1030 /* create/destroy slab caches */
1031 int dlm_init_master_caches(void);
1032 void dlm_destroy_master_caches(void);
1033
1034 int dlm_init_lock_cache(void);
1035 void dlm_destroy_lock_cache(void);
1036
1037 int dlm_init_mle_cache(void);
1038 void dlm_destroy_mle_cache(void);
1039
1040 void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up);
1041 int dlm_drop_lockres_ref(struct dlm_ctxt *dlm,
1042 struct dlm_lock_resource *res);
1043 void dlm_clean_master_list(struct dlm_ctxt *dlm,
1044 u8 dead_node);
1045 void dlm_force_free_mles(struct dlm_ctxt *dlm);
1046 int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock);
1047 int __dlm_lockres_has_locks(struct dlm_lock_resource *res);
1048 int __dlm_lockres_unused(struct dlm_lock_resource *res);
1049
dlm_lock_mode_name(int mode)1050 static inline const char * dlm_lock_mode_name(int mode)
1051 {
1052 switch (mode) {
1053 case LKM_EXMODE:
1054 return "EX";
1055 case LKM_PRMODE:
1056 return "PR";
1057 case LKM_NLMODE:
1058 return "NL";
1059 }
1060 return "UNKNOWN";
1061 }
1062
1063
dlm_lock_compatible(int existing,int request)1064 static inline int dlm_lock_compatible(int existing, int request)
1065 {
1066 /* NO_LOCK compatible with all */
1067 if (request == LKM_NLMODE ||
1068 existing == LKM_NLMODE)
1069 return 1;
1070
1071 /* EX incompatible with all non-NO_LOCK */
1072 if (request == LKM_EXMODE)
1073 return 0;
1074
1075 /* request must be PR, which is compatible with PR */
1076 if (existing == LKM_PRMODE)
1077 return 1;
1078
1079 return 0;
1080 }
1081
dlm_lock_on_list(struct list_head * head,struct dlm_lock * lock)1082 static inline int dlm_lock_on_list(struct list_head *head,
1083 struct dlm_lock *lock)
1084 {
1085 struct dlm_lock *tmplock;
1086
1087 list_for_each_entry(tmplock, head, list) {
1088 if (tmplock == lock)
1089 return 1;
1090 }
1091 return 0;
1092 }
1093
1094
dlm_err_to_dlm_status(int err)1095 static inline enum dlm_status dlm_err_to_dlm_status(int err)
1096 {
1097 enum dlm_status ret;
1098 if (err == -ENOMEM)
1099 ret = DLM_SYSERR;
1100 else if (err == -ETIMEDOUT || o2net_link_down(err, NULL))
1101 ret = DLM_NOLOCKMGR;
1102 else if (err == -EINVAL)
1103 ret = DLM_BADPARAM;
1104 else if (err == -ENAMETOOLONG)
1105 ret = DLM_IVBUFLEN;
1106 else
1107 ret = DLM_BADARGS;
1108 return ret;
1109 }
1110
1111
dlm_node_iter_init(unsigned long * map,struct dlm_node_iter * iter)1112 static inline void dlm_node_iter_init(unsigned long *map,
1113 struct dlm_node_iter *iter)
1114 {
1115 memcpy(iter->node_map, map, sizeof(iter->node_map));
1116 iter->curnode = -1;
1117 }
1118
dlm_node_iter_next(struct dlm_node_iter * iter)1119 static inline int dlm_node_iter_next(struct dlm_node_iter *iter)
1120 {
1121 int bit;
1122 bit = find_next_bit(iter->node_map, O2NM_MAX_NODES, iter->curnode+1);
1123 if (bit >= O2NM_MAX_NODES) {
1124 iter->curnode = O2NM_MAX_NODES;
1125 return -ENOENT;
1126 }
1127 iter->curnode = bit;
1128 return bit;
1129 }
1130
dlm_set_lockres_owner(struct dlm_ctxt * dlm,struct dlm_lock_resource * res,u8 owner)1131 static inline void dlm_set_lockres_owner(struct dlm_ctxt *dlm,
1132 struct dlm_lock_resource *res,
1133 u8 owner)
1134 {
1135 assert_spin_locked(&res->spinlock);
1136
1137 res->owner = owner;
1138 }
1139
dlm_change_lockres_owner(struct dlm_ctxt * dlm,struct dlm_lock_resource * res,u8 owner)1140 static inline void dlm_change_lockres_owner(struct dlm_ctxt *dlm,
1141 struct dlm_lock_resource *res,
1142 u8 owner)
1143 {
1144 assert_spin_locked(&res->spinlock);
1145
1146 if (owner != res->owner)
1147 dlm_set_lockres_owner(dlm, res, owner);
1148 }
1149
1150 #endif /* DLMCOMMON_H */
1151