1 // SPDX-License-Identifier: GPL-2.0-only
2 /******************************************************************************
3 *******************************************************************************
4 **
5 ** Copyright (C) 2005-2007 Red Hat, Inc. All rights reserved.
6 **
7 **
8 *******************************************************************************
9 ******************************************************************************/
10
11 #include "dlm_internal.h"
12 #include "member.h"
13 #include "lock.h"
14 #include "dir.h"
15 #include "config.h"
16 #include "requestqueue.h"
17 #include "util.h"
18
19 struct rq_entry {
20 struct list_head list;
21 uint32_t recover_seq;
22 int nodeid;
23 struct dlm_message request;
24 };
25
26 /*
27 * Requests received while the lockspace is in recovery get added to the
28 * request queue and processed when recovery is complete. This happens when
29 * the lockspace is suspended on some nodes before it is on others, or the
30 * lockspace is enabled on some while still suspended on others.
31 */
32
dlm_add_requestqueue(struct dlm_ls * ls,int nodeid,const struct dlm_message * ms)33 void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid,
34 const struct dlm_message *ms)
35 {
36 struct rq_entry *e;
37 int length = le16_to_cpu(ms->m_header.h_length) -
38 sizeof(struct dlm_message);
39
40 e = kmalloc(sizeof(struct rq_entry) + length, GFP_NOFS);
41 if (!e) {
42 log_print("dlm_add_requestqueue: out of memory len %d", length);
43 return;
44 }
45
46 e->recover_seq = ls->ls_recover_seq & 0xFFFFFFFF;
47 e->nodeid = nodeid;
48 memcpy(&e->request, ms, sizeof(*ms));
49 memcpy(&e->request.m_extra, ms->m_extra, length);
50
51 atomic_inc(&ls->ls_requestqueue_cnt);
52 mutex_lock(&ls->ls_requestqueue_mutex);
53 list_add_tail(&e->list, &ls->ls_requestqueue);
54 mutex_unlock(&ls->ls_requestqueue_mutex);
55 }
56
57 /*
58 * Called by dlm_recoverd to process normal messages saved while recovery was
59 * happening. Normal locking has been enabled before this is called. dlm_recv
60 * upon receiving a message, will wait for all saved messages to be drained
61 * here before processing the message it got. If a new dlm_ls_stop() arrives
62 * while we're processing these saved messages, it may block trying to suspend
63 * dlm_recv if dlm_recv is waiting for us in dlm_wait_requestqueue. In that
64 * case, we don't abort since locking_stopped is still 0. If dlm_recv is not
65 * waiting for us, then this processing may be aborted due to locking_stopped.
66 */
67
dlm_process_requestqueue(struct dlm_ls * ls)68 int dlm_process_requestqueue(struct dlm_ls *ls)
69 {
70 struct rq_entry *e;
71 struct dlm_message *ms;
72 int error = 0;
73
74 mutex_lock(&ls->ls_requestqueue_mutex);
75
76 for (;;) {
77 if (list_empty(&ls->ls_requestqueue)) {
78 mutex_unlock(&ls->ls_requestqueue_mutex);
79 error = 0;
80 break;
81 }
82 e = list_entry(ls->ls_requestqueue.next, struct rq_entry, list);
83 mutex_unlock(&ls->ls_requestqueue_mutex);
84
85 ms = &e->request;
86
87 log_limit(ls, "dlm_process_requestqueue msg %d from %d "
88 "lkid %x remid %x result %d seq %u",
89 le32_to_cpu(ms->m_type),
90 le32_to_cpu(ms->m_header.h_nodeid),
91 le32_to_cpu(ms->m_lkid), le32_to_cpu(ms->m_remid),
92 from_dlm_errno(le32_to_cpu(ms->m_result)),
93 e->recover_seq);
94
95 dlm_receive_message_saved(ls, &e->request, e->recover_seq);
96
97 mutex_lock(&ls->ls_requestqueue_mutex);
98 list_del(&e->list);
99 if (atomic_dec_and_test(&ls->ls_requestqueue_cnt))
100 wake_up(&ls->ls_requestqueue_wait);
101 kfree(e);
102
103 if (dlm_locking_stopped(ls)) {
104 log_debug(ls, "process_requestqueue abort running");
105 mutex_unlock(&ls->ls_requestqueue_mutex);
106 error = -EINTR;
107 break;
108 }
109 schedule();
110 }
111
112 return error;
113 }
114
115 /*
116 * After recovery is done, locking is resumed and dlm_recoverd takes all the
117 * saved requests and processes them as they would have been by dlm_recv. At
118 * the same time, dlm_recv will start receiving new requests from remote nodes.
119 * We want to delay dlm_recv processing new requests until dlm_recoverd has
120 * finished processing the old saved requests. We don't check for locking
121 * stopped here because dlm_ls_stop won't stop locking until it's suspended us
122 * (dlm_recv).
123 */
124
dlm_wait_requestqueue(struct dlm_ls * ls)125 void dlm_wait_requestqueue(struct dlm_ls *ls)
126 {
127 wait_event(ls->ls_requestqueue_wait,
128 atomic_read(&ls->ls_requestqueue_cnt) == 0);
129 }
130
purge_request(struct dlm_ls * ls,struct dlm_message * ms,int nodeid)131 static int purge_request(struct dlm_ls *ls, struct dlm_message *ms, int nodeid)
132 {
133 __le32 type = ms->m_type;
134
135 /* the ls is being cleaned up and freed by release_lockspace */
136 if (!atomic_read(&ls->ls_count))
137 return 1;
138
139 if (dlm_is_removed(ls, nodeid))
140 return 1;
141
142 /* directory operations are always purged because the directory is
143 always rebuilt during recovery and the lookups resent */
144
145 if (type == cpu_to_le32(DLM_MSG_REMOVE) ||
146 type == cpu_to_le32(DLM_MSG_LOOKUP) ||
147 type == cpu_to_le32(DLM_MSG_LOOKUP_REPLY))
148 return 1;
149
150 if (!dlm_no_directory(ls))
151 return 0;
152
153 return 1;
154 }
155
dlm_purge_requestqueue(struct dlm_ls * ls)156 void dlm_purge_requestqueue(struct dlm_ls *ls)
157 {
158 struct dlm_message *ms;
159 struct rq_entry *e, *safe;
160
161 mutex_lock(&ls->ls_requestqueue_mutex);
162 list_for_each_entry_safe(e, safe, &ls->ls_requestqueue, list) {
163 ms = &e->request;
164
165 if (purge_request(ls, ms, e->nodeid)) {
166 list_del(&e->list);
167 if (atomic_dec_and_test(&ls->ls_requestqueue_cnt))
168 wake_up(&ls->ls_requestqueue_wait);
169 kfree(e);
170 }
171 }
172 mutex_unlock(&ls->ls_requestqueue_mutex);
173 }
174
175