• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2    drbd_state.c
3 
4    This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5 
6    Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7    Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8    Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9 
10    Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
11    from Logicworks, Inc. for making SDP replication support possible.
12 
13    drbd is free software; you can redistribute it and/or modify
14    it under the terms of the GNU General Public License as published by
15    the Free Software Foundation; either version 2, or (at your option)
16    any later version.
17 
18    drbd is distributed in the hope that it will be useful,
19    but WITHOUT ANY WARRANTY; without even the implied warranty of
20    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21    GNU General Public License for more details.
22 
23    You should have received a copy of the GNU General Public License
24    along with drbd; see the file COPYING.  If not, write to
25    the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26  */
27 
28 #include <linux/drbd_limits.h>
29 #include "drbd_int.h"
30 #include "drbd_protocol.h"
31 #include "drbd_req.h"
32 #include "drbd_state_change.h"
33 
34 struct after_state_chg_work {
35 	struct drbd_work w;
36 	struct drbd_device *device;
37 	union drbd_state os;
38 	union drbd_state ns;
39 	enum chg_state_flags flags;
40 	struct completion *done;
41 	struct drbd_state_change *state_change;
42 };
43 
44 enum sanitize_state_warnings {
45 	NO_WARNING,
46 	ABORTED_ONLINE_VERIFY,
47 	ABORTED_RESYNC,
48 	CONNECTION_LOST_NEGOTIATING,
49 	IMPLICITLY_UPGRADED_DISK,
50 	IMPLICITLY_UPGRADED_PDSK,
51 };
52 
count_objects(struct drbd_resource * resource,unsigned int * n_devices,unsigned int * n_connections)53 static void count_objects(struct drbd_resource *resource,
54 			  unsigned int *n_devices,
55 			  unsigned int *n_connections)
56 {
57 	struct drbd_device *device;
58 	struct drbd_connection *connection;
59 	int vnr;
60 
61 	*n_devices = 0;
62 	*n_connections = 0;
63 
64 	idr_for_each_entry(&resource->devices, device, vnr)
65 		(*n_devices)++;
66 	for_each_connection(connection, resource)
67 		(*n_connections)++;
68 }
69 
alloc_state_change(unsigned int n_devices,unsigned int n_connections,gfp_t gfp)70 static struct drbd_state_change *alloc_state_change(unsigned int n_devices, unsigned int n_connections, gfp_t gfp)
71 {
72 	struct drbd_state_change *state_change;
73 	unsigned int size, n;
74 
75 	size = sizeof(struct drbd_state_change) +
76 	       n_devices * sizeof(struct drbd_device_state_change) +
77 	       n_connections * sizeof(struct drbd_connection_state_change) +
78 	       n_devices * n_connections * sizeof(struct drbd_peer_device_state_change);
79 	state_change = kmalloc(size, gfp);
80 	if (!state_change)
81 		return NULL;
82 	state_change->n_devices = n_devices;
83 	state_change->n_connections = n_connections;
84 	state_change->devices = (void *)(state_change + 1);
85 	state_change->connections = (void *)&state_change->devices[n_devices];
86 	state_change->peer_devices = (void *)&state_change->connections[n_connections];
87 	state_change->resource->resource = NULL;
88 	for (n = 0; n < n_devices; n++)
89 		state_change->devices[n].device = NULL;
90 	for (n = 0; n < n_connections; n++)
91 		state_change->connections[n].connection = NULL;
92 	return state_change;
93 }
94 
remember_old_state(struct drbd_resource * resource,gfp_t gfp)95 struct drbd_state_change *remember_old_state(struct drbd_resource *resource, gfp_t gfp)
96 {
97 	struct drbd_state_change *state_change;
98 	struct drbd_device *device;
99 	unsigned int n_devices;
100 	struct drbd_connection *connection;
101 	unsigned int n_connections;
102 	int vnr;
103 
104 	struct drbd_device_state_change *device_state_change;
105 	struct drbd_peer_device_state_change *peer_device_state_change;
106 	struct drbd_connection_state_change *connection_state_change;
107 
108 	/* Caller holds req_lock spinlock.
109 	 * No state, no device IDR, no connections lists can change. */
110 	count_objects(resource, &n_devices, &n_connections);
111 	state_change = alloc_state_change(n_devices, n_connections, gfp);
112 	if (!state_change)
113 		return NULL;
114 
115 	kref_get(&resource->kref);
116 	state_change->resource->resource = resource;
117 	state_change->resource->role[OLD] =
118 		conn_highest_role(first_connection(resource));
119 	state_change->resource->susp[OLD] = resource->susp;
120 	state_change->resource->susp_nod[OLD] = resource->susp_nod;
121 	state_change->resource->susp_fen[OLD] = resource->susp_fen;
122 
123 	connection_state_change = state_change->connections;
124 	for_each_connection(connection, resource) {
125 		kref_get(&connection->kref);
126 		connection_state_change->connection = connection;
127 		connection_state_change->cstate[OLD] =
128 			connection->cstate;
129 		connection_state_change->peer_role[OLD] =
130 			conn_highest_peer(connection);
131 		connection_state_change++;
132 	}
133 
134 	device_state_change = state_change->devices;
135 	peer_device_state_change = state_change->peer_devices;
136 	idr_for_each_entry(&resource->devices, device, vnr) {
137 		kref_get(&device->kref);
138 		device_state_change->device = device;
139 		device_state_change->disk_state[OLD] = device->state.disk;
140 
141 		/* The peer_devices for each device have to be enumerated in
142 		   the order of the connections. We may not use for_each_peer_device() here. */
143 		for_each_connection(connection, resource) {
144 			struct drbd_peer_device *peer_device;
145 
146 			peer_device = conn_peer_device(connection, device->vnr);
147 			peer_device_state_change->peer_device = peer_device;
148 			peer_device_state_change->disk_state[OLD] =
149 				device->state.pdsk;
150 			peer_device_state_change->repl_state[OLD] =
151 				max_t(enum drbd_conns,
152 				      C_WF_REPORT_PARAMS, device->state.conn);
153 			peer_device_state_change->resync_susp_user[OLD] =
154 				device->state.user_isp;
155 			peer_device_state_change->resync_susp_peer[OLD] =
156 				device->state.peer_isp;
157 			peer_device_state_change->resync_susp_dependency[OLD] =
158 				device->state.aftr_isp;
159 			peer_device_state_change++;
160 		}
161 		device_state_change++;
162 	}
163 
164 	return state_change;
165 }
166 
remember_new_state(struct drbd_state_change * state_change)167 static void remember_new_state(struct drbd_state_change *state_change)
168 {
169 	struct drbd_resource_state_change *resource_state_change;
170 	struct drbd_resource *resource;
171 	unsigned int n;
172 
173 	if (!state_change)
174 		return;
175 
176 	resource_state_change = &state_change->resource[0];
177 	resource = resource_state_change->resource;
178 
179 	resource_state_change->role[NEW] =
180 		conn_highest_role(first_connection(resource));
181 	resource_state_change->susp[NEW] = resource->susp;
182 	resource_state_change->susp_nod[NEW] = resource->susp_nod;
183 	resource_state_change->susp_fen[NEW] = resource->susp_fen;
184 
185 	for (n = 0; n < state_change->n_devices; n++) {
186 		struct drbd_device_state_change *device_state_change =
187 			&state_change->devices[n];
188 		struct drbd_device *device = device_state_change->device;
189 
190 		device_state_change->disk_state[NEW] = device->state.disk;
191 	}
192 
193 	for (n = 0; n < state_change->n_connections; n++) {
194 		struct drbd_connection_state_change *connection_state_change =
195 			&state_change->connections[n];
196 		struct drbd_connection *connection =
197 			connection_state_change->connection;
198 
199 		connection_state_change->cstate[NEW] = connection->cstate;
200 		connection_state_change->peer_role[NEW] =
201 			conn_highest_peer(connection);
202 	}
203 
204 	for (n = 0; n < state_change->n_devices * state_change->n_connections; n++) {
205 		struct drbd_peer_device_state_change *peer_device_state_change =
206 			&state_change->peer_devices[n];
207 		struct drbd_device *device =
208 			peer_device_state_change->peer_device->device;
209 		union drbd_dev_state state = device->state;
210 
211 		peer_device_state_change->disk_state[NEW] = state.pdsk;
212 		peer_device_state_change->repl_state[NEW] =
213 			max_t(enum drbd_conns, C_WF_REPORT_PARAMS, state.conn);
214 		peer_device_state_change->resync_susp_user[NEW] =
215 			state.user_isp;
216 		peer_device_state_change->resync_susp_peer[NEW] =
217 			state.peer_isp;
218 		peer_device_state_change->resync_susp_dependency[NEW] =
219 			state.aftr_isp;
220 	}
221 }
222 
copy_old_to_new_state_change(struct drbd_state_change * state_change)223 void copy_old_to_new_state_change(struct drbd_state_change *state_change)
224 {
225 	struct drbd_resource_state_change *resource_state_change = &state_change->resource[0];
226 	unsigned int n_device, n_connection, n_peer_device, n_peer_devices;
227 
228 #define OLD_TO_NEW(x) \
229 	(x[NEW] = x[OLD])
230 
231 	OLD_TO_NEW(resource_state_change->role);
232 	OLD_TO_NEW(resource_state_change->susp);
233 	OLD_TO_NEW(resource_state_change->susp_nod);
234 	OLD_TO_NEW(resource_state_change->susp_fen);
235 
236 	for (n_connection = 0; n_connection < state_change->n_connections; n_connection++) {
237 		struct drbd_connection_state_change *connection_state_change =
238 				&state_change->connections[n_connection];
239 
240 		OLD_TO_NEW(connection_state_change->peer_role);
241 		OLD_TO_NEW(connection_state_change->cstate);
242 	}
243 
244 	for (n_device = 0; n_device < state_change->n_devices; n_device++) {
245 		struct drbd_device_state_change *device_state_change =
246 			&state_change->devices[n_device];
247 
248 		OLD_TO_NEW(device_state_change->disk_state);
249 	}
250 
251 	n_peer_devices = state_change->n_devices * state_change->n_connections;
252 	for (n_peer_device = 0; n_peer_device < n_peer_devices; n_peer_device++) {
253 		struct drbd_peer_device_state_change *p =
254 			&state_change->peer_devices[n_peer_device];
255 
256 		OLD_TO_NEW(p->disk_state);
257 		OLD_TO_NEW(p->repl_state);
258 		OLD_TO_NEW(p->resync_susp_user);
259 		OLD_TO_NEW(p->resync_susp_peer);
260 		OLD_TO_NEW(p->resync_susp_dependency);
261 	}
262 
263 #undef OLD_TO_NEW
264 }
265 
forget_state_change(struct drbd_state_change * state_change)266 void forget_state_change(struct drbd_state_change *state_change)
267 {
268 	unsigned int n;
269 
270 	if (!state_change)
271 		return;
272 
273 	if (state_change->resource->resource)
274 		kref_put(&state_change->resource->resource->kref, drbd_destroy_resource);
275 	for (n = 0; n < state_change->n_devices; n++) {
276 		struct drbd_device *device = state_change->devices[n].device;
277 
278 		if (device)
279 			kref_put(&device->kref, drbd_destroy_device);
280 	}
281 	for (n = 0; n < state_change->n_connections; n++) {
282 		struct drbd_connection *connection =
283 			state_change->connections[n].connection;
284 
285 		if (connection)
286 			kref_put(&connection->kref, drbd_destroy_connection);
287 	}
288 	kfree(state_change);
289 }
290 
291 static int w_after_state_ch(struct drbd_work *w, int unused);
292 static void after_state_ch(struct drbd_device *device, union drbd_state os,
293 			   union drbd_state ns, enum chg_state_flags flags,
294 			   struct drbd_state_change *);
295 static enum drbd_state_rv is_valid_state(struct drbd_device *, union drbd_state);
296 static enum drbd_state_rv is_valid_soft_transition(union drbd_state, union drbd_state, struct drbd_connection *);
297 static enum drbd_state_rv is_valid_transition(union drbd_state os, union drbd_state ns);
298 static union drbd_state sanitize_state(struct drbd_device *device, union drbd_state os,
299 				       union drbd_state ns, enum sanitize_state_warnings *warn);
300 
is_susp(union drbd_state s)301 static inline bool is_susp(union drbd_state s)
302 {
303         return s.susp || s.susp_nod || s.susp_fen;
304 }
305 
conn_all_vols_unconf(struct drbd_connection * connection)306 bool conn_all_vols_unconf(struct drbd_connection *connection)
307 {
308 	struct drbd_peer_device *peer_device;
309 	bool rv = true;
310 	int vnr;
311 
312 	rcu_read_lock();
313 	idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
314 		struct drbd_device *device = peer_device->device;
315 		if (device->state.disk != D_DISKLESS ||
316 		    device->state.conn != C_STANDALONE ||
317 		    device->state.role != R_SECONDARY) {
318 			rv = false;
319 			break;
320 		}
321 	}
322 	rcu_read_unlock();
323 
324 	return rv;
325 }
326 
327 /* Unfortunately the states where not correctly ordered, when
328    they where defined. therefore can not use max_t() here. */
max_role(enum drbd_role role1,enum drbd_role role2)329 static enum drbd_role max_role(enum drbd_role role1, enum drbd_role role2)
330 {
331 	if (role1 == R_PRIMARY || role2 == R_PRIMARY)
332 		return R_PRIMARY;
333 	if (role1 == R_SECONDARY || role2 == R_SECONDARY)
334 		return R_SECONDARY;
335 	return R_UNKNOWN;
336 }
337 
min_role(enum drbd_role role1,enum drbd_role role2)338 static enum drbd_role min_role(enum drbd_role role1, enum drbd_role role2)
339 {
340 	if (role1 == R_UNKNOWN || role2 == R_UNKNOWN)
341 		return R_UNKNOWN;
342 	if (role1 == R_SECONDARY || role2 == R_SECONDARY)
343 		return R_SECONDARY;
344 	return R_PRIMARY;
345 }
346 
conn_highest_role(struct drbd_connection * connection)347 enum drbd_role conn_highest_role(struct drbd_connection *connection)
348 {
349 	enum drbd_role role = R_UNKNOWN;
350 	struct drbd_peer_device *peer_device;
351 	int vnr;
352 
353 	rcu_read_lock();
354 	idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
355 		struct drbd_device *device = peer_device->device;
356 		role = max_role(role, device->state.role);
357 	}
358 	rcu_read_unlock();
359 
360 	return role;
361 }
362 
conn_highest_peer(struct drbd_connection * connection)363 enum drbd_role conn_highest_peer(struct drbd_connection *connection)
364 {
365 	enum drbd_role peer = R_UNKNOWN;
366 	struct drbd_peer_device *peer_device;
367 	int vnr;
368 
369 	rcu_read_lock();
370 	idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
371 		struct drbd_device *device = peer_device->device;
372 		peer = max_role(peer, device->state.peer);
373 	}
374 	rcu_read_unlock();
375 
376 	return peer;
377 }
378 
conn_highest_disk(struct drbd_connection * connection)379 enum drbd_disk_state conn_highest_disk(struct drbd_connection *connection)
380 {
381 	enum drbd_disk_state disk_state = D_DISKLESS;
382 	struct drbd_peer_device *peer_device;
383 	int vnr;
384 
385 	rcu_read_lock();
386 	idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
387 		struct drbd_device *device = peer_device->device;
388 		disk_state = max_t(enum drbd_disk_state, disk_state, device->state.disk);
389 	}
390 	rcu_read_unlock();
391 
392 	return disk_state;
393 }
394 
conn_lowest_disk(struct drbd_connection * connection)395 enum drbd_disk_state conn_lowest_disk(struct drbd_connection *connection)
396 {
397 	enum drbd_disk_state disk_state = D_MASK;
398 	struct drbd_peer_device *peer_device;
399 	int vnr;
400 
401 	rcu_read_lock();
402 	idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
403 		struct drbd_device *device = peer_device->device;
404 		disk_state = min_t(enum drbd_disk_state, disk_state, device->state.disk);
405 	}
406 	rcu_read_unlock();
407 
408 	return disk_state;
409 }
410 
conn_highest_pdsk(struct drbd_connection * connection)411 enum drbd_disk_state conn_highest_pdsk(struct drbd_connection *connection)
412 {
413 	enum drbd_disk_state disk_state = D_DISKLESS;
414 	struct drbd_peer_device *peer_device;
415 	int vnr;
416 
417 	rcu_read_lock();
418 	idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
419 		struct drbd_device *device = peer_device->device;
420 		disk_state = max_t(enum drbd_disk_state, disk_state, device->state.pdsk);
421 	}
422 	rcu_read_unlock();
423 
424 	return disk_state;
425 }
426 
conn_lowest_conn(struct drbd_connection * connection)427 enum drbd_conns conn_lowest_conn(struct drbd_connection *connection)
428 {
429 	enum drbd_conns conn = C_MASK;
430 	struct drbd_peer_device *peer_device;
431 	int vnr;
432 
433 	rcu_read_lock();
434 	idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
435 		struct drbd_device *device = peer_device->device;
436 		conn = min_t(enum drbd_conns, conn, device->state.conn);
437 	}
438 	rcu_read_unlock();
439 
440 	return conn;
441 }
442 
no_peer_wf_report_params(struct drbd_connection * connection)443 static bool no_peer_wf_report_params(struct drbd_connection *connection)
444 {
445 	struct drbd_peer_device *peer_device;
446 	int vnr;
447 	bool rv = true;
448 
449 	rcu_read_lock();
450 	idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
451 		if (peer_device->device->state.conn == C_WF_REPORT_PARAMS) {
452 			rv = false;
453 			break;
454 		}
455 	rcu_read_unlock();
456 
457 	return rv;
458 }
459 
wake_up_all_devices(struct drbd_connection * connection)460 static void wake_up_all_devices(struct drbd_connection *connection)
461 {
462 	struct drbd_peer_device *peer_device;
463 	int vnr;
464 
465 	rcu_read_lock();
466 	idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
467 		wake_up(&peer_device->device->state_wait);
468 	rcu_read_unlock();
469 
470 }
471 
472 
473 /**
474  * cl_wide_st_chg() - true if the state change is a cluster wide one
475  * @device:	DRBD device.
476  * @os:		old (current) state.
477  * @ns:		new (wanted) state.
478  */
cl_wide_st_chg(struct drbd_device * device,union drbd_state os,union drbd_state ns)479 static int cl_wide_st_chg(struct drbd_device *device,
480 			  union drbd_state os, union drbd_state ns)
481 {
482 	return (os.conn >= C_CONNECTED && ns.conn >= C_CONNECTED &&
483 		 ((os.role != R_PRIMARY && ns.role == R_PRIMARY) ||
484 		  (os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
485 		  (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S) ||
486 		  (os.disk != D_FAILED && ns.disk == D_FAILED))) ||
487 		(os.conn >= C_CONNECTED && ns.conn == C_DISCONNECTING) ||
488 		(os.conn == C_CONNECTED && ns.conn == C_VERIFY_S) ||
489 		(os.conn == C_CONNECTED && ns.conn == C_WF_REPORT_PARAMS);
490 }
491 
492 static union drbd_state
apply_mask_val(union drbd_state os,union drbd_state mask,union drbd_state val)493 apply_mask_val(union drbd_state os, union drbd_state mask, union drbd_state val)
494 {
495 	union drbd_state ns;
496 	ns.i = (os.i & ~mask.i) | val.i;
497 	return ns;
498 }
499 
500 enum drbd_state_rv
drbd_change_state(struct drbd_device * device,enum chg_state_flags f,union drbd_state mask,union drbd_state val)501 drbd_change_state(struct drbd_device *device, enum chg_state_flags f,
502 		  union drbd_state mask, union drbd_state val)
503 {
504 	unsigned long flags;
505 	union drbd_state ns;
506 	enum drbd_state_rv rv;
507 
508 	spin_lock_irqsave(&device->resource->req_lock, flags);
509 	ns = apply_mask_val(drbd_read_state(device), mask, val);
510 	rv = _drbd_set_state(device, ns, f, NULL);
511 	spin_unlock_irqrestore(&device->resource->req_lock, flags);
512 
513 	return rv;
514 }
515 
516 /**
517  * drbd_force_state() - Impose a change which happens outside our control on our state
518  * @device:	DRBD device.
519  * @mask:	mask of state bits to change.
520  * @val:	value of new state bits.
521  */
drbd_force_state(struct drbd_device * device,union drbd_state mask,union drbd_state val)522 void drbd_force_state(struct drbd_device *device,
523 	union drbd_state mask, union drbd_state val)
524 {
525 	drbd_change_state(device, CS_HARD, mask, val);
526 }
527 
528 static enum drbd_state_rv
_req_st_cond(struct drbd_device * device,union drbd_state mask,union drbd_state val)529 _req_st_cond(struct drbd_device *device, union drbd_state mask,
530 	     union drbd_state val)
531 {
532 	union drbd_state os, ns;
533 	unsigned long flags;
534 	enum drbd_state_rv rv;
535 
536 	if (test_and_clear_bit(CL_ST_CHG_SUCCESS, &device->flags))
537 		return SS_CW_SUCCESS;
538 
539 	if (test_and_clear_bit(CL_ST_CHG_FAIL, &device->flags))
540 		return SS_CW_FAILED_BY_PEER;
541 
542 	spin_lock_irqsave(&device->resource->req_lock, flags);
543 	os = drbd_read_state(device);
544 	ns = sanitize_state(device, os, apply_mask_val(os, mask, val), NULL);
545 	rv = is_valid_transition(os, ns);
546 	if (rv >= SS_SUCCESS)
547 		rv = SS_UNKNOWN_ERROR;  /* cont waiting, otherwise fail. */
548 
549 	if (!cl_wide_st_chg(device, os, ns))
550 		rv = SS_CW_NO_NEED;
551 	if (rv == SS_UNKNOWN_ERROR) {
552 		rv = is_valid_state(device, ns);
553 		if (rv >= SS_SUCCESS) {
554 			rv = is_valid_soft_transition(os, ns, first_peer_device(device)->connection);
555 			if (rv >= SS_SUCCESS)
556 				rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */
557 		}
558 	}
559 	spin_unlock_irqrestore(&device->resource->req_lock, flags);
560 
561 	return rv;
562 }
563 
564 /**
565  * drbd_req_state() - Perform an eventually cluster wide state change
566  * @device:	DRBD device.
567  * @mask:	mask of state bits to change.
568  * @val:	value of new state bits.
569  * @f:		flags
570  *
571  * Should not be called directly, use drbd_request_state() or
572  * _drbd_request_state().
573  */
574 static enum drbd_state_rv
drbd_req_state(struct drbd_device * device,union drbd_state mask,union drbd_state val,enum chg_state_flags f)575 drbd_req_state(struct drbd_device *device, union drbd_state mask,
576 	       union drbd_state val, enum chg_state_flags f)
577 {
578 	struct completion done;
579 	unsigned long flags;
580 	union drbd_state os, ns;
581 	enum drbd_state_rv rv;
582 
583 	init_completion(&done);
584 
585 	if (f & CS_SERIALIZE)
586 		mutex_lock(device->state_mutex);
587 
588 	spin_lock_irqsave(&device->resource->req_lock, flags);
589 	os = drbd_read_state(device);
590 	ns = sanitize_state(device, os, apply_mask_val(os, mask, val), NULL);
591 	rv = is_valid_transition(os, ns);
592 	if (rv < SS_SUCCESS) {
593 		spin_unlock_irqrestore(&device->resource->req_lock, flags);
594 		goto abort;
595 	}
596 
597 	if (cl_wide_st_chg(device, os, ns)) {
598 		rv = is_valid_state(device, ns);
599 		if (rv == SS_SUCCESS)
600 			rv = is_valid_soft_transition(os, ns, first_peer_device(device)->connection);
601 		spin_unlock_irqrestore(&device->resource->req_lock, flags);
602 
603 		if (rv < SS_SUCCESS) {
604 			if (f & CS_VERBOSE)
605 				print_st_err(device, os, ns, rv);
606 			goto abort;
607 		}
608 
609 		if (drbd_send_state_req(first_peer_device(device), mask, val)) {
610 			rv = SS_CW_FAILED_BY_PEER;
611 			if (f & CS_VERBOSE)
612 				print_st_err(device, os, ns, rv);
613 			goto abort;
614 		}
615 
616 		wait_event(device->state_wait,
617 			(rv = _req_st_cond(device, mask, val)));
618 
619 		if (rv < SS_SUCCESS) {
620 			if (f & CS_VERBOSE)
621 				print_st_err(device, os, ns, rv);
622 			goto abort;
623 		}
624 		spin_lock_irqsave(&device->resource->req_lock, flags);
625 		ns = apply_mask_val(drbd_read_state(device), mask, val);
626 		rv = _drbd_set_state(device, ns, f, &done);
627 	} else {
628 		rv = _drbd_set_state(device, ns, f, &done);
629 	}
630 
631 	spin_unlock_irqrestore(&device->resource->req_lock, flags);
632 
633 	if (f & CS_WAIT_COMPLETE && rv == SS_SUCCESS) {
634 		D_ASSERT(device, current != first_peer_device(device)->connection->worker.task);
635 		wait_for_completion(&done);
636 	}
637 
638 abort:
639 	if (f & CS_SERIALIZE)
640 		mutex_unlock(device->state_mutex);
641 
642 	return rv;
643 }
644 
645 /**
646  * _drbd_request_state() - Request a state change (with flags)
647  * @device:	DRBD device.
648  * @mask:	mask of state bits to change.
649  * @val:	value of new state bits.
650  * @f:		flags
651  *
652  * Cousin of drbd_request_state(), useful with the CS_WAIT_COMPLETE
653  * flag, or when logging of failed state change requests is not desired.
654  */
655 enum drbd_state_rv
_drbd_request_state(struct drbd_device * device,union drbd_state mask,union drbd_state val,enum chg_state_flags f)656 _drbd_request_state(struct drbd_device *device, union drbd_state mask,
657 		    union drbd_state val, enum chg_state_flags f)
658 {
659 	enum drbd_state_rv rv;
660 
661 	wait_event(device->state_wait,
662 		   (rv = drbd_req_state(device, mask, val, f)) != SS_IN_TRANSIENT_STATE);
663 
664 	return rv;
665 }
666 
667 enum drbd_state_rv
_drbd_request_state_holding_state_mutex(struct drbd_device * device,union drbd_state mask,union drbd_state val,enum chg_state_flags f)668 _drbd_request_state_holding_state_mutex(struct drbd_device *device, union drbd_state mask,
669 		    union drbd_state val, enum chg_state_flags f)
670 {
671 	enum drbd_state_rv rv;
672 
673 	BUG_ON(f & CS_SERIALIZE);
674 
675 	wait_event_cmd(device->state_wait,
676 		       (rv = drbd_req_state(device, mask, val, f)) != SS_IN_TRANSIENT_STATE,
677 		       mutex_unlock(device->state_mutex),
678 		       mutex_lock(device->state_mutex));
679 
680 	return rv;
681 }
682 
print_st(struct drbd_device * device,const char * name,union drbd_state ns)683 static void print_st(struct drbd_device *device, const char *name, union drbd_state ns)
684 {
685 	drbd_err(device, " %s = { cs:%s ro:%s/%s ds:%s/%s %c%c%c%c%c%c }\n",
686 	    name,
687 	    drbd_conn_str(ns.conn),
688 	    drbd_role_str(ns.role),
689 	    drbd_role_str(ns.peer),
690 	    drbd_disk_str(ns.disk),
691 	    drbd_disk_str(ns.pdsk),
692 	    is_susp(ns) ? 's' : 'r',
693 	    ns.aftr_isp ? 'a' : '-',
694 	    ns.peer_isp ? 'p' : '-',
695 	    ns.user_isp ? 'u' : '-',
696 	    ns.susp_fen ? 'F' : '-',
697 	    ns.susp_nod ? 'N' : '-'
698 	    );
699 }
700 
print_st_err(struct drbd_device * device,union drbd_state os,union drbd_state ns,enum drbd_state_rv err)701 void print_st_err(struct drbd_device *device, union drbd_state os,
702 	          union drbd_state ns, enum drbd_state_rv err)
703 {
704 	if (err == SS_IN_TRANSIENT_STATE)
705 		return;
706 	drbd_err(device, "State change failed: %s\n", drbd_set_st_err_str(err));
707 	print_st(device, " state", os);
708 	print_st(device, "wanted", ns);
709 }
710 
print_state_change(char * pb,union drbd_state os,union drbd_state ns,enum chg_state_flags flags)711 static long print_state_change(char *pb, union drbd_state os, union drbd_state ns,
712 			       enum chg_state_flags flags)
713 {
714 	char *pbp;
715 	pbp = pb;
716 	*pbp = 0;
717 
718 	if (ns.role != os.role && flags & CS_DC_ROLE)
719 		pbp += sprintf(pbp, "role( %s -> %s ) ",
720 			       drbd_role_str(os.role),
721 			       drbd_role_str(ns.role));
722 	if (ns.peer != os.peer && flags & CS_DC_PEER)
723 		pbp += sprintf(pbp, "peer( %s -> %s ) ",
724 			       drbd_role_str(os.peer),
725 			       drbd_role_str(ns.peer));
726 	if (ns.conn != os.conn && flags & CS_DC_CONN)
727 		pbp += sprintf(pbp, "conn( %s -> %s ) ",
728 			       drbd_conn_str(os.conn),
729 			       drbd_conn_str(ns.conn));
730 	if (ns.disk != os.disk && flags & CS_DC_DISK)
731 		pbp += sprintf(pbp, "disk( %s -> %s ) ",
732 			       drbd_disk_str(os.disk),
733 			       drbd_disk_str(ns.disk));
734 	if (ns.pdsk != os.pdsk && flags & CS_DC_PDSK)
735 		pbp += sprintf(pbp, "pdsk( %s -> %s ) ",
736 			       drbd_disk_str(os.pdsk),
737 			       drbd_disk_str(ns.pdsk));
738 
739 	return pbp - pb;
740 }
741 
drbd_pr_state_change(struct drbd_device * device,union drbd_state os,union drbd_state ns,enum chg_state_flags flags)742 static void drbd_pr_state_change(struct drbd_device *device, union drbd_state os, union drbd_state ns,
743 				 enum chg_state_flags flags)
744 {
745 	char pb[300];
746 	char *pbp = pb;
747 
748 	pbp += print_state_change(pbp, os, ns, flags ^ CS_DC_MASK);
749 
750 	if (ns.aftr_isp != os.aftr_isp)
751 		pbp += sprintf(pbp, "aftr_isp( %d -> %d ) ",
752 			       os.aftr_isp,
753 			       ns.aftr_isp);
754 	if (ns.peer_isp != os.peer_isp)
755 		pbp += sprintf(pbp, "peer_isp( %d -> %d ) ",
756 			       os.peer_isp,
757 			       ns.peer_isp);
758 	if (ns.user_isp != os.user_isp)
759 		pbp += sprintf(pbp, "user_isp( %d -> %d ) ",
760 			       os.user_isp,
761 			       ns.user_isp);
762 
763 	if (pbp != pb)
764 		drbd_info(device, "%s\n", pb);
765 }
766 
conn_pr_state_change(struct drbd_connection * connection,union drbd_state os,union drbd_state ns,enum chg_state_flags flags)767 static void conn_pr_state_change(struct drbd_connection *connection, union drbd_state os, union drbd_state ns,
768 				 enum chg_state_flags flags)
769 {
770 	char pb[300];
771 	char *pbp = pb;
772 
773 	pbp += print_state_change(pbp, os, ns, flags);
774 
775 	if (is_susp(ns) != is_susp(os) && flags & CS_DC_SUSP)
776 		pbp += sprintf(pbp, "susp( %d -> %d ) ",
777 			       is_susp(os),
778 			       is_susp(ns));
779 
780 	if (pbp != pb)
781 		drbd_info(connection, "%s\n", pb);
782 }
783 
784 
785 /**
786  * is_valid_state() - Returns an SS_ error code if ns is not valid
787  * @device:	DRBD device.
788  * @ns:		State to consider.
789  */
790 static enum drbd_state_rv
is_valid_state(struct drbd_device * device,union drbd_state ns)791 is_valid_state(struct drbd_device *device, union drbd_state ns)
792 {
793 	/* See drbd_state_sw_errors in drbd_strings.c */
794 
795 	enum drbd_fencing_p fp;
796 	enum drbd_state_rv rv = SS_SUCCESS;
797 	struct net_conf *nc;
798 
799 	rcu_read_lock();
800 	fp = FP_DONT_CARE;
801 	if (get_ldev(device)) {
802 		fp = rcu_dereference(device->ldev->disk_conf)->fencing;
803 		put_ldev(device);
804 	}
805 
806 	nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
807 	if (nc) {
808 		if (!nc->two_primaries && ns.role == R_PRIMARY) {
809 			if (ns.peer == R_PRIMARY)
810 				rv = SS_TWO_PRIMARIES;
811 			else if (conn_highest_peer(first_peer_device(device)->connection) == R_PRIMARY)
812 				rv = SS_O_VOL_PEER_PRI;
813 		}
814 	}
815 
816 	if (rv <= 0)
817 		goto out; /* already found a reason to abort */
818 	else if (ns.role == R_SECONDARY && device->open_cnt)
819 		rv = SS_DEVICE_IN_USE;
820 
821 	else if (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.disk < D_UP_TO_DATE)
822 		rv = SS_NO_UP_TO_DATE_DISK;
823 
824 	else if (fp >= FP_RESOURCE &&
825 		 ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk >= D_UNKNOWN)
826 		rv = SS_PRIMARY_NOP;
827 
828 	else if (ns.role == R_PRIMARY && ns.disk <= D_INCONSISTENT && ns.pdsk <= D_INCONSISTENT)
829 		rv = SS_NO_UP_TO_DATE_DISK;
830 
831 	else if (ns.conn > C_CONNECTED && ns.disk < D_INCONSISTENT)
832 		rv = SS_NO_LOCAL_DISK;
833 
834 	else if (ns.conn > C_CONNECTED && ns.pdsk < D_INCONSISTENT)
835 		rv = SS_NO_REMOTE_DISK;
836 
837 	else if (ns.conn > C_CONNECTED && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE)
838 		rv = SS_NO_UP_TO_DATE_DISK;
839 
840 	else if ((ns.conn == C_CONNECTED ||
841 		  ns.conn == C_WF_BITMAP_S ||
842 		  ns.conn == C_SYNC_SOURCE ||
843 		  ns.conn == C_PAUSED_SYNC_S) &&
844 		  ns.disk == D_OUTDATED)
845 		rv = SS_CONNECTED_OUTDATES;
846 
847 	else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
848 		 (nc->verify_alg[0] == 0))
849 		rv = SS_NO_VERIFY_ALG;
850 
851 	else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
852 		  first_peer_device(device)->connection->agreed_pro_version < 88)
853 		rv = SS_NOT_SUPPORTED;
854 
855 	else if (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE)
856 		rv = SS_NO_UP_TO_DATE_DISK;
857 
858 	else if ((ns.conn == C_STARTING_SYNC_S || ns.conn == C_STARTING_SYNC_T) &&
859                  ns.pdsk == D_UNKNOWN)
860 		rv = SS_NEED_CONNECTION;
861 
862 	else if (ns.conn >= C_CONNECTED && ns.pdsk == D_UNKNOWN)
863 		rv = SS_CONNECTED_OUTDATES;
864 
865 out:
866 	rcu_read_unlock();
867 
868 	return rv;
869 }
870 
871 /**
872  * is_valid_soft_transition() - Returns an SS_ error code if the state transition is not possible
873  * This function limits state transitions that may be declined by DRBD. I.e.
874  * user requests (aka soft transitions).
875  * @device:	DRBD device.
876  * @ns:		new state.
877  * @os:		old state.
878  */
879 static enum drbd_state_rv
is_valid_soft_transition(union drbd_state os,union drbd_state ns,struct drbd_connection * connection)880 is_valid_soft_transition(union drbd_state os, union drbd_state ns, struct drbd_connection *connection)
881 {
882 	enum drbd_state_rv rv = SS_SUCCESS;
883 
884 	if ((ns.conn == C_STARTING_SYNC_T || ns.conn == C_STARTING_SYNC_S) &&
885 	    os.conn > C_CONNECTED)
886 		rv = SS_RESYNC_RUNNING;
887 
888 	if (ns.conn == C_DISCONNECTING && os.conn == C_STANDALONE)
889 		rv = SS_ALREADY_STANDALONE;
890 
891 	if (ns.disk > D_ATTACHING && os.disk == D_DISKLESS)
892 		rv = SS_IS_DISKLESS;
893 
894 	if (ns.conn == C_WF_CONNECTION && os.conn < C_UNCONNECTED)
895 		rv = SS_NO_NET_CONFIG;
896 
897 	if (ns.disk == D_OUTDATED && os.disk < D_OUTDATED && os.disk != D_ATTACHING)
898 		rv = SS_LOWER_THAN_OUTDATED;
899 
900 	if (ns.conn == C_DISCONNECTING && os.conn == C_UNCONNECTED)
901 		rv = SS_IN_TRANSIENT_STATE;
902 
903 	/* While establishing a connection only allow cstate to change.
904 	   Delay/refuse role changes, detach attach etc... (they do not touch cstate) */
905 	if (test_bit(STATE_SENT, &connection->flags) &&
906 	    !((ns.conn == C_WF_REPORT_PARAMS && os.conn == C_WF_CONNECTION) ||
907 	      (ns.conn >= C_CONNECTED && os.conn == C_WF_REPORT_PARAMS)))
908 		rv = SS_IN_TRANSIENT_STATE;
909 
910 	/* Do not promote during resync handshake triggered by "force primary".
911 	 * This is a hack. It should really be rejected by the peer during the
912 	 * cluster wide state change request. */
913 	if (os.role != R_PRIMARY && ns.role == R_PRIMARY
914 		&& ns.pdsk == D_UP_TO_DATE
915 		&& ns.disk != D_UP_TO_DATE && ns.disk != D_DISKLESS
916 		&& (ns.conn <= C_WF_SYNC_UUID || ns.conn != os.conn))
917 			rv = SS_IN_TRANSIENT_STATE;
918 
919 	if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && os.conn < C_CONNECTED)
920 		rv = SS_NEED_CONNECTION;
921 
922 	if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
923 	    ns.conn != os.conn && os.conn > C_CONNECTED)
924 		rv = SS_RESYNC_RUNNING;
925 
926 	if ((ns.conn == C_STARTING_SYNC_S || ns.conn == C_STARTING_SYNC_T) &&
927 	    os.conn < C_CONNECTED)
928 		rv = SS_NEED_CONNECTION;
929 
930 	if ((ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE)
931 	    && os.conn < C_WF_REPORT_PARAMS)
932 		rv = SS_NEED_CONNECTION; /* No NetworkFailure -> SyncTarget etc... */
933 
934 	if (ns.conn == C_DISCONNECTING && ns.pdsk == D_OUTDATED &&
935 	    os.conn < C_CONNECTED && os.pdsk > D_OUTDATED)
936 		rv = SS_OUTDATE_WO_CONN;
937 
938 	return rv;
939 }
940 
941 static enum drbd_state_rv
is_valid_conn_transition(enum drbd_conns oc,enum drbd_conns nc)942 is_valid_conn_transition(enum drbd_conns oc, enum drbd_conns nc)
943 {
944 	/* no change -> nothing to do, at least for the connection part */
945 	if (oc == nc)
946 		return SS_NOTHING_TO_DO;
947 
948 	/* disconnect of an unconfigured connection does not make sense */
949 	if (oc == C_STANDALONE && nc == C_DISCONNECTING)
950 		return SS_ALREADY_STANDALONE;
951 
952 	/* from C_STANDALONE, we start with C_UNCONNECTED */
953 	if (oc == C_STANDALONE && nc != C_UNCONNECTED)
954 		return SS_NEED_CONNECTION;
955 
956 	/* When establishing a connection we need to go through WF_REPORT_PARAMS!
957 	   Necessary to do the right thing upon invalidate-remote on a disconnected resource */
958 	if (oc < C_WF_REPORT_PARAMS && nc >= C_CONNECTED)
959 		return SS_NEED_CONNECTION;
960 
961 	/* After a network error only C_UNCONNECTED or C_DISCONNECTING may follow. */
962 	if (oc >= C_TIMEOUT && oc <= C_TEAR_DOWN && nc != C_UNCONNECTED && nc != C_DISCONNECTING)
963 		return SS_IN_TRANSIENT_STATE;
964 
965 	/* After C_DISCONNECTING only C_STANDALONE may follow */
966 	if (oc == C_DISCONNECTING && nc != C_STANDALONE)
967 		return SS_IN_TRANSIENT_STATE;
968 
969 	return SS_SUCCESS;
970 }
971 
972 
973 /**
974  * is_valid_transition() - Returns an SS_ error code if the state transition is not possible
975  * This limits hard state transitions. Hard state transitions are facts there are
976  * imposed on DRBD by the environment. E.g. disk broke or network broke down.
977  * But those hard state transitions are still not allowed to do everything.
978  * @ns:		new state.
979  * @os:		old state.
980  */
981 static enum drbd_state_rv
is_valid_transition(union drbd_state os,union drbd_state ns)982 is_valid_transition(union drbd_state os, union drbd_state ns)
983 {
984 	enum drbd_state_rv rv;
985 
986 	rv = is_valid_conn_transition(os.conn, ns.conn);
987 
988 	/* we cannot fail (again) if we already detached */
989 	if (ns.disk == D_FAILED && os.disk == D_DISKLESS)
990 		rv = SS_IS_DISKLESS;
991 
992 	return rv;
993 }
994 
print_sanitize_warnings(struct drbd_device * device,enum sanitize_state_warnings warn)995 static void print_sanitize_warnings(struct drbd_device *device, enum sanitize_state_warnings warn)
996 {
997 	static const char *msg_table[] = {
998 		[NO_WARNING] = "",
999 		[ABORTED_ONLINE_VERIFY] = "Online-verify aborted.",
1000 		[ABORTED_RESYNC] = "Resync aborted.",
1001 		[CONNECTION_LOST_NEGOTIATING] = "Connection lost while negotiating, no data!",
1002 		[IMPLICITLY_UPGRADED_DISK] = "Implicitly upgraded disk",
1003 		[IMPLICITLY_UPGRADED_PDSK] = "Implicitly upgraded pdsk",
1004 	};
1005 
1006 	if (warn != NO_WARNING)
1007 		drbd_warn(device, "%s\n", msg_table[warn]);
1008 }
1009 
1010 /**
1011  * sanitize_state() - Resolves implicitly necessary additional changes to a state transition
1012  * @device:	DRBD device.
1013  * @os:		old state.
1014  * @ns:		new state.
1015  * @warn_sync_abort:
1016  *
1017  * When we loose connection, we have to set the state of the peers disk (pdsk)
1018  * to D_UNKNOWN. This rule and many more along those lines are in this function.
1019  */
sanitize_state(struct drbd_device * device,union drbd_state os,union drbd_state ns,enum sanitize_state_warnings * warn)1020 static union drbd_state sanitize_state(struct drbd_device *device, union drbd_state os,
1021 				       union drbd_state ns, enum sanitize_state_warnings *warn)
1022 {
1023 	enum drbd_fencing_p fp;
1024 	enum drbd_disk_state disk_min, disk_max, pdsk_min, pdsk_max;
1025 
1026 	if (warn)
1027 		*warn = NO_WARNING;
1028 
1029 	fp = FP_DONT_CARE;
1030 	if (get_ldev(device)) {
1031 		rcu_read_lock();
1032 		fp = rcu_dereference(device->ldev->disk_conf)->fencing;
1033 		rcu_read_unlock();
1034 		put_ldev(device);
1035 	}
1036 
1037 	/* Implications from connection to peer and peer_isp */
1038 	if (ns.conn < C_CONNECTED) {
1039 		ns.peer_isp = 0;
1040 		ns.peer = R_UNKNOWN;
1041 		if (ns.pdsk > D_UNKNOWN || ns.pdsk < D_INCONSISTENT)
1042 			ns.pdsk = D_UNKNOWN;
1043 	}
1044 
1045 	/* Clear the aftr_isp when becoming unconfigured */
1046 	if (ns.conn == C_STANDALONE && ns.disk == D_DISKLESS && ns.role == R_SECONDARY)
1047 		ns.aftr_isp = 0;
1048 
1049 	/* An implication of the disk states onto the connection state */
1050 	/* Abort resync if a disk fails/detaches */
1051 	if (ns.conn > C_CONNECTED && (ns.disk <= D_FAILED || ns.pdsk <= D_FAILED)) {
1052 		if (warn)
1053 			*warn = ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T ?
1054 				ABORTED_ONLINE_VERIFY : ABORTED_RESYNC;
1055 		ns.conn = C_CONNECTED;
1056 	}
1057 
1058 	/* Connection breaks down before we finished "Negotiating" */
1059 	if (ns.conn < C_CONNECTED && ns.disk == D_NEGOTIATING &&
1060 	    get_ldev_if_state(device, D_NEGOTIATING)) {
1061 		if (device->ed_uuid == device->ldev->md.uuid[UI_CURRENT]) {
1062 			ns.disk = device->new_state_tmp.disk;
1063 			ns.pdsk = device->new_state_tmp.pdsk;
1064 		} else {
1065 			if (warn)
1066 				*warn = CONNECTION_LOST_NEGOTIATING;
1067 			ns.disk = D_DISKLESS;
1068 			ns.pdsk = D_UNKNOWN;
1069 		}
1070 		put_ldev(device);
1071 	}
1072 
1073 	/* D_CONSISTENT and D_OUTDATED vanish when we get connected */
1074 	if (ns.conn >= C_CONNECTED && ns.conn < C_AHEAD) {
1075 		if (ns.disk == D_CONSISTENT || ns.disk == D_OUTDATED)
1076 			ns.disk = D_UP_TO_DATE;
1077 		if (ns.pdsk == D_CONSISTENT || ns.pdsk == D_OUTDATED)
1078 			ns.pdsk = D_UP_TO_DATE;
1079 	}
1080 
1081 	/* Implications of the connection stat on the disk states */
1082 	disk_min = D_DISKLESS;
1083 	disk_max = D_UP_TO_DATE;
1084 	pdsk_min = D_INCONSISTENT;
1085 	pdsk_max = D_UNKNOWN;
1086 	switch ((enum drbd_conns)ns.conn) {
1087 	case C_WF_BITMAP_T:
1088 	case C_PAUSED_SYNC_T:
1089 	case C_STARTING_SYNC_T:
1090 	case C_WF_SYNC_UUID:
1091 	case C_BEHIND:
1092 		disk_min = D_INCONSISTENT;
1093 		disk_max = D_OUTDATED;
1094 		pdsk_min = D_UP_TO_DATE;
1095 		pdsk_max = D_UP_TO_DATE;
1096 		break;
1097 	case C_VERIFY_S:
1098 	case C_VERIFY_T:
1099 		disk_min = D_UP_TO_DATE;
1100 		disk_max = D_UP_TO_DATE;
1101 		pdsk_min = D_UP_TO_DATE;
1102 		pdsk_max = D_UP_TO_DATE;
1103 		break;
1104 	case C_CONNECTED:
1105 		disk_min = D_DISKLESS;
1106 		disk_max = D_UP_TO_DATE;
1107 		pdsk_min = D_DISKLESS;
1108 		pdsk_max = D_UP_TO_DATE;
1109 		break;
1110 	case C_WF_BITMAP_S:
1111 	case C_PAUSED_SYNC_S:
1112 	case C_STARTING_SYNC_S:
1113 	case C_AHEAD:
1114 		disk_min = D_UP_TO_DATE;
1115 		disk_max = D_UP_TO_DATE;
1116 		pdsk_min = D_INCONSISTENT;
1117 		pdsk_max = D_CONSISTENT; /* D_OUTDATED would be nice. But explicit outdate necessary*/
1118 		break;
1119 	case C_SYNC_TARGET:
1120 		disk_min = D_INCONSISTENT;
1121 		disk_max = D_INCONSISTENT;
1122 		pdsk_min = D_UP_TO_DATE;
1123 		pdsk_max = D_UP_TO_DATE;
1124 		break;
1125 	case C_SYNC_SOURCE:
1126 		disk_min = D_UP_TO_DATE;
1127 		disk_max = D_UP_TO_DATE;
1128 		pdsk_min = D_INCONSISTENT;
1129 		pdsk_max = D_INCONSISTENT;
1130 		break;
1131 	case C_STANDALONE:
1132 	case C_DISCONNECTING:
1133 	case C_UNCONNECTED:
1134 	case C_TIMEOUT:
1135 	case C_BROKEN_PIPE:
1136 	case C_NETWORK_FAILURE:
1137 	case C_PROTOCOL_ERROR:
1138 	case C_TEAR_DOWN:
1139 	case C_WF_CONNECTION:
1140 	case C_WF_REPORT_PARAMS:
1141 	case C_MASK:
1142 		break;
1143 	}
1144 	if (ns.disk > disk_max)
1145 		ns.disk = disk_max;
1146 
1147 	if (ns.disk < disk_min) {
1148 		if (warn)
1149 			*warn = IMPLICITLY_UPGRADED_DISK;
1150 		ns.disk = disk_min;
1151 	}
1152 	if (ns.pdsk > pdsk_max)
1153 		ns.pdsk = pdsk_max;
1154 
1155 	if (ns.pdsk < pdsk_min) {
1156 		if (warn)
1157 			*warn = IMPLICITLY_UPGRADED_PDSK;
1158 		ns.pdsk = pdsk_min;
1159 	}
1160 
1161 	if (fp == FP_STONITH &&
1162 	    (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk > D_OUTDATED) &&
1163 	    !(os.role == R_PRIMARY && os.conn < C_CONNECTED && os.pdsk > D_OUTDATED))
1164 		ns.susp_fen = 1; /* Suspend IO while fence-peer handler runs (peer lost) */
1165 
1166 	if (device->resource->res_opts.on_no_data == OND_SUSPEND_IO &&
1167 	    (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE) &&
1168 	    !(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE))
1169 		ns.susp_nod = 1; /* Suspend IO while no data available (no accessible data available) */
1170 
1171 	if (ns.aftr_isp || ns.peer_isp || ns.user_isp) {
1172 		if (ns.conn == C_SYNC_SOURCE)
1173 			ns.conn = C_PAUSED_SYNC_S;
1174 		if (ns.conn == C_SYNC_TARGET)
1175 			ns.conn = C_PAUSED_SYNC_T;
1176 	} else {
1177 		if (ns.conn == C_PAUSED_SYNC_S)
1178 			ns.conn = C_SYNC_SOURCE;
1179 		if (ns.conn == C_PAUSED_SYNC_T)
1180 			ns.conn = C_SYNC_TARGET;
1181 	}
1182 
1183 	return ns;
1184 }
1185 
drbd_resume_al(struct drbd_device * device)1186 void drbd_resume_al(struct drbd_device *device)
1187 {
1188 	if (test_and_clear_bit(AL_SUSPENDED, &device->flags))
1189 		drbd_info(device, "Resumed AL updates\n");
1190 }
1191 
1192 /* helper for _drbd_set_state */
set_ov_position(struct drbd_device * device,enum drbd_conns cs)1193 static void set_ov_position(struct drbd_device *device, enum drbd_conns cs)
1194 {
1195 	if (first_peer_device(device)->connection->agreed_pro_version < 90)
1196 		device->ov_start_sector = 0;
1197 	device->rs_total = drbd_bm_bits(device);
1198 	device->ov_position = 0;
1199 	if (cs == C_VERIFY_T) {
1200 		/* starting online verify from an arbitrary position
1201 		 * does not fit well into the existing protocol.
1202 		 * on C_VERIFY_T, we initialize ov_left and friends
1203 		 * implicitly in receive_DataRequest once the
1204 		 * first P_OV_REQUEST is received */
1205 		device->ov_start_sector = ~(sector_t)0;
1206 	} else {
1207 		unsigned long bit = BM_SECT_TO_BIT(device->ov_start_sector);
1208 		if (bit >= device->rs_total) {
1209 			device->ov_start_sector =
1210 				BM_BIT_TO_SECT(device->rs_total - 1);
1211 			device->rs_total = 1;
1212 		} else
1213 			device->rs_total -= bit;
1214 		device->ov_position = device->ov_start_sector;
1215 	}
1216 	device->ov_left = device->rs_total;
1217 }
1218 
1219 /**
1220  * _drbd_set_state() - Set a new DRBD state
1221  * @device:	DRBD device.
1222  * @ns:		new state.
1223  * @flags:	Flags
1224  * @done:	Optional completion, that will get completed after the after_state_ch() finished
1225  *
1226  * Caller needs to hold req_lock. Do not call directly.
1227  */
1228 enum drbd_state_rv
_drbd_set_state(struct drbd_device * device,union drbd_state ns,enum chg_state_flags flags,struct completion * done)1229 _drbd_set_state(struct drbd_device *device, union drbd_state ns,
1230 	        enum chg_state_flags flags, struct completion *done)
1231 {
1232 	struct drbd_peer_device *peer_device = first_peer_device(device);
1233 	struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
1234 	union drbd_state os;
1235 	enum drbd_state_rv rv = SS_SUCCESS;
1236 	enum sanitize_state_warnings ssw;
1237 	struct after_state_chg_work *ascw;
1238 	struct drbd_state_change *state_change;
1239 
1240 	os = drbd_read_state(device);
1241 
1242 	ns = sanitize_state(device, os, ns, &ssw);
1243 	if (ns.i == os.i)
1244 		return SS_NOTHING_TO_DO;
1245 
1246 	rv = is_valid_transition(os, ns);
1247 	if (rv < SS_SUCCESS)
1248 		return rv;
1249 
1250 	if (!(flags & CS_HARD)) {
1251 		/*  pre-state-change checks ; only look at ns  */
1252 		/* See drbd_state_sw_errors in drbd_strings.c */
1253 
1254 		rv = is_valid_state(device, ns);
1255 		if (rv < SS_SUCCESS) {
1256 			/* If the old state was illegal as well, then let
1257 			   this happen...*/
1258 
1259 			if (is_valid_state(device, os) == rv)
1260 				rv = is_valid_soft_transition(os, ns, connection);
1261 		} else
1262 			rv = is_valid_soft_transition(os, ns, connection);
1263 	}
1264 
1265 	if (rv < SS_SUCCESS) {
1266 		if (flags & CS_VERBOSE)
1267 			print_st_err(device, os, ns, rv);
1268 		return rv;
1269 	}
1270 
1271 	print_sanitize_warnings(device, ssw);
1272 
1273 	drbd_pr_state_change(device, os, ns, flags);
1274 
1275 	/* Display changes to the susp* flags that where caused by the call to
1276 	   sanitize_state(). Only display it here if we where not called from
1277 	   _conn_request_state() */
1278 	if (!(flags & CS_DC_SUSP))
1279 		conn_pr_state_change(connection, os, ns,
1280 				     (flags & ~CS_DC_MASK) | CS_DC_SUSP);
1281 
1282 	/* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference
1283 	 * on the ldev here, to be sure the transition -> D_DISKLESS resp.
1284 	 * drbd_ldev_destroy() won't happen before our corresponding
1285 	 * after_state_ch works run, where we put_ldev again. */
1286 	if ((os.disk != D_FAILED && ns.disk == D_FAILED) ||
1287 	    (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))
1288 		atomic_inc(&device->local_cnt);
1289 
1290 	if (!is_sync_state(os.conn) && is_sync_state(ns.conn))
1291 		clear_bit(RS_DONE, &device->flags);
1292 
1293 	/* FIXME: Have any flags been set earlier in this function already? */
1294 	state_change = remember_old_state(device->resource, GFP_ATOMIC);
1295 
1296 	/* changes to local_cnt and device flags should be visible before
1297 	 * changes to state, which again should be visible before anything else
1298 	 * depending on that change happens. */
1299 	smp_wmb();
1300 	device->state.i = ns.i;
1301 	device->resource->susp = ns.susp;
1302 	device->resource->susp_nod = ns.susp_nod;
1303 	device->resource->susp_fen = ns.susp_fen;
1304 	smp_wmb();
1305 
1306 	remember_new_state(state_change);
1307 
1308 	/* put replicated vs not-replicated requests in seperate epochs */
1309 	if (drbd_should_do_remote((union drbd_dev_state)os.i) !=
1310 	    drbd_should_do_remote((union drbd_dev_state)ns.i))
1311 		start_new_tl_epoch(connection);
1312 
1313 	if (os.disk == D_ATTACHING && ns.disk >= D_NEGOTIATING)
1314 		drbd_print_uuids(device, "attached to UUIDs");
1315 
1316 	/* Wake up role changes, that were delayed because of connection establishing */
1317 	if (os.conn == C_WF_REPORT_PARAMS && ns.conn != C_WF_REPORT_PARAMS &&
1318 	    no_peer_wf_report_params(connection)) {
1319 		clear_bit(STATE_SENT, &connection->flags);
1320 		wake_up_all_devices(connection);
1321 	}
1322 
1323 	wake_up(&device->misc_wait);
1324 	wake_up(&device->state_wait);
1325 	wake_up(&connection->ping_wait);
1326 
1327 	/* Aborted verify run, or we reached the stop sector.
1328 	 * Log the last position, unless end-of-device. */
1329 	if ((os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) &&
1330 	    ns.conn <= C_CONNECTED) {
1331 		device->ov_start_sector =
1332 			BM_BIT_TO_SECT(drbd_bm_bits(device) - device->ov_left);
1333 		if (device->ov_left)
1334 			drbd_info(device, "Online Verify reached sector %llu\n",
1335 				(unsigned long long)device->ov_start_sector);
1336 	}
1337 
1338 	if ((os.conn == C_PAUSED_SYNC_T || os.conn == C_PAUSED_SYNC_S) &&
1339 	    (ns.conn == C_SYNC_TARGET  || ns.conn == C_SYNC_SOURCE)) {
1340 		drbd_info(device, "Syncer continues.\n");
1341 		device->rs_paused += (long)jiffies
1342 				  -(long)device->rs_mark_time[device->rs_last_mark];
1343 		if (ns.conn == C_SYNC_TARGET)
1344 			mod_timer(&device->resync_timer, jiffies);
1345 	}
1346 
1347 	if ((os.conn == C_SYNC_TARGET  || os.conn == C_SYNC_SOURCE) &&
1348 	    (ns.conn == C_PAUSED_SYNC_T || ns.conn == C_PAUSED_SYNC_S)) {
1349 		drbd_info(device, "Resync suspended\n");
1350 		device->rs_mark_time[device->rs_last_mark] = jiffies;
1351 	}
1352 
1353 	if (os.conn == C_CONNECTED &&
1354 	    (ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T)) {
1355 		unsigned long now = jiffies;
1356 		int i;
1357 
1358 		set_ov_position(device, ns.conn);
1359 		device->rs_start = now;
1360 		device->rs_last_sect_ev = 0;
1361 		device->ov_last_oos_size = 0;
1362 		device->ov_last_oos_start = 0;
1363 
1364 		for (i = 0; i < DRBD_SYNC_MARKS; i++) {
1365 			device->rs_mark_left[i] = device->ov_left;
1366 			device->rs_mark_time[i] = now;
1367 		}
1368 
1369 		drbd_rs_controller_reset(device);
1370 
1371 		if (ns.conn == C_VERIFY_S) {
1372 			drbd_info(device, "Starting Online Verify from sector %llu\n",
1373 					(unsigned long long)device->ov_position);
1374 			mod_timer(&device->resync_timer, jiffies);
1375 		}
1376 	}
1377 
1378 	if (get_ldev(device)) {
1379 		u32 mdf = device->ldev->md.flags & ~(MDF_CONSISTENT|MDF_PRIMARY_IND|
1380 						 MDF_CONNECTED_IND|MDF_WAS_UP_TO_DATE|
1381 						 MDF_PEER_OUT_DATED|MDF_CRASHED_PRIMARY);
1382 
1383 		mdf &= ~MDF_AL_CLEAN;
1384 		if (test_bit(CRASHED_PRIMARY, &device->flags))
1385 			mdf |= MDF_CRASHED_PRIMARY;
1386 		if (device->state.role == R_PRIMARY ||
1387 		    (device->state.pdsk < D_INCONSISTENT && device->state.peer == R_PRIMARY))
1388 			mdf |= MDF_PRIMARY_IND;
1389 		if (device->state.conn > C_WF_REPORT_PARAMS)
1390 			mdf |= MDF_CONNECTED_IND;
1391 		if (device->state.disk > D_INCONSISTENT)
1392 			mdf |= MDF_CONSISTENT;
1393 		if (device->state.disk > D_OUTDATED)
1394 			mdf |= MDF_WAS_UP_TO_DATE;
1395 		if (device->state.pdsk <= D_OUTDATED && device->state.pdsk >= D_INCONSISTENT)
1396 			mdf |= MDF_PEER_OUT_DATED;
1397 		if (mdf != device->ldev->md.flags) {
1398 			device->ldev->md.flags = mdf;
1399 			drbd_md_mark_dirty(device);
1400 		}
1401 		if (os.disk < D_CONSISTENT && ns.disk >= D_CONSISTENT)
1402 			drbd_set_ed_uuid(device, device->ldev->md.uuid[UI_CURRENT]);
1403 		put_ldev(device);
1404 	}
1405 
1406 	/* Peer was forced D_UP_TO_DATE & R_PRIMARY, consider to resync */
1407 	if (os.disk == D_INCONSISTENT && os.pdsk == D_INCONSISTENT &&
1408 	    os.peer == R_SECONDARY && ns.peer == R_PRIMARY)
1409 		set_bit(CONSIDER_RESYNC, &device->flags);
1410 
1411 	/* Receiver should clean up itself */
1412 	if (os.conn != C_DISCONNECTING && ns.conn == C_DISCONNECTING)
1413 		drbd_thread_stop_nowait(&connection->receiver);
1414 
1415 	/* Now the receiver finished cleaning up itself, it should die */
1416 	if (os.conn != C_STANDALONE && ns.conn == C_STANDALONE)
1417 		drbd_thread_stop_nowait(&connection->receiver);
1418 
1419 	/* Upon network failure, we need to restart the receiver. */
1420 	if (os.conn > C_WF_CONNECTION &&
1421 	    ns.conn <= C_TEAR_DOWN && ns.conn >= C_TIMEOUT)
1422 		drbd_thread_restart_nowait(&connection->receiver);
1423 
1424 	/* Resume AL writing if we get a connection */
1425 	if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) {
1426 		drbd_resume_al(device);
1427 		connection->connect_cnt++;
1428 	}
1429 
1430 	/* remember last attach time so request_timer_fn() won't
1431 	 * kill newly established sessions while we are still trying to thaw
1432 	 * previously frozen IO */
1433 	if ((os.disk == D_ATTACHING || os.disk == D_NEGOTIATING) &&
1434 	    ns.disk > D_NEGOTIATING)
1435 		device->last_reattach_jif = jiffies;
1436 
1437 	ascw = kmalloc(sizeof(*ascw), GFP_ATOMIC);
1438 	if (ascw) {
1439 		ascw->os = os;
1440 		ascw->ns = ns;
1441 		ascw->flags = flags;
1442 		ascw->w.cb = w_after_state_ch;
1443 		ascw->device = device;
1444 		ascw->done = done;
1445 		ascw->state_change = state_change;
1446 		drbd_queue_work(&connection->sender_work,
1447 				&ascw->w);
1448 	} else {
1449 		drbd_err(device, "Could not kmalloc an ascw\n");
1450 	}
1451 
1452 	return rv;
1453 }
1454 
w_after_state_ch(struct drbd_work * w,int unused)1455 static int w_after_state_ch(struct drbd_work *w, int unused)
1456 {
1457 	struct after_state_chg_work *ascw =
1458 		container_of(w, struct after_state_chg_work, w);
1459 	struct drbd_device *device = ascw->device;
1460 
1461 	after_state_ch(device, ascw->os, ascw->ns, ascw->flags, ascw->state_change);
1462 	forget_state_change(ascw->state_change);
1463 	if (ascw->flags & CS_WAIT_COMPLETE)
1464 		complete(ascw->done);
1465 	kfree(ascw);
1466 
1467 	return 0;
1468 }
1469 
abw_start_sync(struct drbd_device * device,int rv)1470 static void abw_start_sync(struct drbd_device *device, int rv)
1471 {
1472 	if (rv) {
1473 		drbd_err(device, "Writing the bitmap failed not starting resync.\n");
1474 		_drbd_request_state(device, NS(conn, C_CONNECTED), CS_VERBOSE);
1475 		return;
1476 	}
1477 
1478 	switch (device->state.conn) {
1479 	case C_STARTING_SYNC_T:
1480 		_drbd_request_state(device, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
1481 		break;
1482 	case C_STARTING_SYNC_S:
1483 		drbd_start_resync(device, C_SYNC_SOURCE);
1484 		break;
1485 	}
1486 }
1487 
drbd_bitmap_io_from_worker(struct drbd_device * device,int (* io_fn)(struct drbd_device *),char * why,enum bm_flag flags)1488 int drbd_bitmap_io_from_worker(struct drbd_device *device,
1489 		int (*io_fn)(struct drbd_device *),
1490 		char *why, enum bm_flag flags)
1491 {
1492 	int rv;
1493 
1494 	D_ASSERT(device, current == first_peer_device(device)->connection->worker.task);
1495 
1496 	/* open coded non-blocking drbd_suspend_io(device); */
1497 	atomic_inc(&device->suspend_cnt);
1498 
1499 	drbd_bm_lock(device, why, flags);
1500 	rv = io_fn(device);
1501 	drbd_bm_unlock(device);
1502 
1503 	drbd_resume_io(device);
1504 
1505 	return rv;
1506 }
1507 
notify_resource_state_change(struct sk_buff * skb,unsigned int seq,struct drbd_resource_state_change * resource_state_change,enum drbd_notification_type type)1508 void notify_resource_state_change(struct sk_buff *skb,
1509 				  unsigned int seq,
1510 				  struct drbd_resource_state_change *resource_state_change,
1511 				  enum drbd_notification_type type)
1512 {
1513 	struct drbd_resource *resource = resource_state_change->resource;
1514 	struct resource_info resource_info = {
1515 		.res_role = resource_state_change->role[NEW],
1516 		.res_susp = resource_state_change->susp[NEW],
1517 		.res_susp_nod = resource_state_change->susp_nod[NEW],
1518 		.res_susp_fen = resource_state_change->susp_fen[NEW],
1519 	};
1520 
1521 	notify_resource_state(skb, seq, resource, &resource_info, type);
1522 }
1523 
notify_connection_state_change(struct sk_buff * skb,unsigned int seq,struct drbd_connection_state_change * connection_state_change,enum drbd_notification_type type)1524 void notify_connection_state_change(struct sk_buff *skb,
1525 				    unsigned int seq,
1526 				    struct drbd_connection_state_change *connection_state_change,
1527 				    enum drbd_notification_type type)
1528 {
1529 	struct drbd_connection *connection = connection_state_change->connection;
1530 	struct connection_info connection_info = {
1531 		.conn_connection_state = connection_state_change->cstate[NEW],
1532 		.conn_role = connection_state_change->peer_role[NEW],
1533 	};
1534 
1535 	notify_connection_state(skb, seq, connection, &connection_info, type);
1536 }
1537 
notify_device_state_change(struct sk_buff * skb,unsigned int seq,struct drbd_device_state_change * device_state_change,enum drbd_notification_type type)1538 void notify_device_state_change(struct sk_buff *skb,
1539 				unsigned int seq,
1540 				struct drbd_device_state_change *device_state_change,
1541 				enum drbd_notification_type type)
1542 {
1543 	struct drbd_device *device = device_state_change->device;
1544 	struct device_info device_info = {
1545 		.dev_disk_state = device_state_change->disk_state[NEW],
1546 	};
1547 
1548 	notify_device_state(skb, seq, device, &device_info, type);
1549 }
1550 
notify_peer_device_state_change(struct sk_buff * skb,unsigned int seq,struct drbd_peer_device_state_change * p,enum drbd_notification_type type)1551 void notify_peer_device_state_change(struct sk_buff *skb,
1552 				     unsigned int seq,
1553 				     struct drbd_peer_device_state_change *p,
1554 				     enum drbd_notification_type type)
1555 {
1556 	struct drbd_peer_device *peer_device = p->peer_device;
1557 	struct peer_device_info peer_device_info = {
1558 		.peer_repl_state = p->repl_state[NEW],
1559 		.peer_disk_state = p->disk_state[NEW],
1560 		.peer_resync_susp_user = p->resync_susp_user[NEW],
1561 		.peer_resync_susp_peer = p->resync_susp_peer[NEW],
1562 		.peer_resync_susp_dependency = p->resync_susp_dependency[NEW],
1563 	};
1564 
1565 	notify_peer_device_state(skb, seq, peer_device, &peer_device_info, type);
1566 }
1567 
broadcast_state_change(struct drbd_state_change * state_change)1568 static void broadcast_state_change(struct drbd_state_change *state_change)
1569 {
1570 	struct drbd_resource_state_change *resource_state_change = &state_change->resource[0];
1571 	bool resource_state_has_changed;
1572 	unsigned int n_device, n_connection, n_peer_device, n_peer_devices;
1573 	void (*last_func)(struct sk_buff *, unsigned int, void *,
1574 			  enum drbd_notification_type) = NULL;
1575 	void *uninitialized_var(last_arg);
1576 
1577 #define HAS_CHANGED(state) ((state)[OLD] != (state)[NEW])
1578 #define FINAL_STATE_CHANGE(type) \
1579 	({ if (last_func) \
1580 		last_func(NULL, 0, last_arg, type); \
1581 	})
1582 #define REMEMBER_STATE_CHANGE(func, arg, type) \
1583 	({ FINAL_STATE_CHANGE(type | NOTIFY_CONTINUES); \
1584 	   last_func = (typeof(last_func))func; \
1585 	   last_arg = arg; \
1586 	 })
1587 
1588 	mutex_lock(&notification_mutex);
1589 
1590 	resource_state_has_changed =
1591 	    HAS_CHANGED(resource_state_change->role) ||
1592 	    HAS_CHANGED(resource_state_change->susp) ||
1593 	    HAS_CHANGED(resource_state_change->susp_nod) ||
1594 	    HAS_CHANGED(resource_state_change->susp_fen);
1595 
1596 	if (resource_state_has_changed)
1597 		REMEMBER_STATE_CHANGE(notify_resource_state_change,
1598 				      resource_state_change, NOTIFY_CHANGE);
1599 
1600 	for (n_connection = 0; n_connection < state_change->n_connections; n_connection++) {
1601 		struct drbd_connection_state_change *connection_state_change =
1602 				&state_change->connections[n_connection];
1603 
1604 		if (HAS_CHANGED(connection_state_change->peer_role) ||
1605 		    HAS_CHANGED(connection_state_change->cstate))
1606 			REMEMBER_STATE_CHANGE(notify_connection_state_change,
1607 					      connection_state_change, NOTIFY_CHANGE);
1608 	}
1609 
1610 	for (n_device = 0; n_device < state_change->n_devices; n_device++) {
1611 		struct drbd_device_state_change *device_state_change =
1612 			&state_change->devices[n_device];
1613 
1614 		if (HAS_CHANGED(device_state_change->disk_state))
1615 			REMEMBER_STATE_CHANGE(notify_device_state_change,
1616 					      device_state_change, NOTIFY_CHANGE);
1617 	}
1618 
1619 	n_peer_devices = state_change->n_devices * state_change->n_connections;
1620 	for (n_peer_device = 0; n_peer_device < n_peer_devices; n_peer_device++) {
1621 		struct drbd_peer_device_state_change *p =
1622 			&state_change->peer_devices[n_peer_device];
1623 
1624 		if (HAS_CHANGED(p->disk_state) ||
1625 		    HAS_CHANGED(p->repl_state) ||
1626 		    HAS_CHANGED(p->resync_susp_user) ||
1627 		    HAS_CHANGED(p->resync_susp_peer) ||
1628 		    HAS_CHANGED(p->resync_susp_dependency))
1629 			REMEMBER_STATE_CHANGE(notify_peer_device_state_change,
1630 					      p, NOTIFY_CHANGE);
1631 	}
1632 
1633 	FINAL_STATE_CHANGE(NOTIFY_CHANGE);
1634 	mutex_unlock(&notification_mutex);
1635 
1636 #undef HAS_CHANGED
1637 #undef FINAL_STATE_CHANGE
1638 #undef REMEMBER_STATE_CHANGE
1639 }
1640 
1641 /* takes old and new peer disk state */
lost_contact_to_peer_data(enum drbd_disk_state os,enum drbd_disk_state ns)1642 static bool lost_contact_to_peer_data(enum drbd_disk_state os, enum drbd_disk_state ns)
1643 {
1644 	if ((os >= D_INCONSISTENT && os != D_UNKNOWN && os != D_OUTDATED)
1645 	&&  (ns < D_INCONSISTENT || ns == D_UNKNOWN || ns == D_OUTDATED))
1646 		return true;
1647 
1648 	/* Scenario, starting with normal operation
1649 	 * Connected Primary/Secondary UpToDate/UpToDate
1650 	 * NetworkFailure Primary/Unknown UpToDate/DUnknown (frozen)
1651 	 * ...
1652 	 * Connected Primary/Secondary UpToDate/Diskless (resumed; needs to bump uuid!)
1653 	 */
1654 	if (os == D_UNKNOWN
1655 	&&  (ns == D_DISKLESS || ns == D_FAILED || ns == D_OUTDATED))
1656 		return true;
1657 
1658 	return false;
1659 }
1660 
1661 /**
1662  * after_state_ch() - Perform after state change actions that may sleep
1663  * @device:	DRBD device.
1664  * @os:		old state.
1665  * @ns:		new state.
1666  * @flags:	Flags
1667  */
after_state_ch(struct drbd_device * device,union drbd_state os,union drbd_state ns,enum chg_state_flags flags,struct drbd_state_change * state_change)1668 static void after_state_ch(struct drbd_device *device, union drbd_state os,
1669 			   union drbd_state ns, enum chg_state_flags flags,
1670 			   struct drbd_state_change *state_change)
1671 {
1672 	struct drbd_resource *resource = device->resource;
1673 	struct drbd_peer_device *peer_device = first_peer_device(device);
1674 	struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
1675 	struct sib_info sib;
1676 
1677 	broadcast_state_change(state_change);
1678 
1679 	sib.sib_reason = SIB_STATE_CHANGE;
1680 	sib.os = os;
1681 	sib.ns = ns;
1682 
1683 	if ((os.disk != D_UP_TO_DATE || os.pdsk != D_UP_TO_DATE)
1684 	&&  (ns.disk == D_UP_TO_DATE && ns.pdsk == D_UP_TO_DATE)) {
1685 		clear_bit(CRASHED_PRIMARY, &device->flags);
1686 		if (device->p_uuid)
1687 			device->p_uuid[UI_FLAGS] &= ~((u64)2);
1688 	}
1689 
1690 	/* Inform userspace about the change... */
1691 	drbd_bcast_event(device, &sib);
1692 
1693 	if (!(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE) &&
1694 	    (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE))
1695 		drbd_khelper(device, "pri-on-incon-degr");
1696 
1697 	/* Here we have the actions that are performed after a
1698 	   state change. This function might sleep */
1699 
1700 	if (ns.susp_nod) {
1701 		enum drbd_req_event what = NOTHING;
1702 
1703 		spin_lock_irq(&device->resource->req_lock);
1704 		if (os.conn < C_CONNECTED && conn_lowest_conn(connection) >= C_CONNECTED)
1705 			what = RESEND;
1706 
1707 		if ((os.disk == D_ATTACHING || os.disk == D_NEGOTIATING) &&
1708 		    conn_lowest_disk(connection) == D_UP_TO_DATE)
1709 			what = RESTART_FROZEN_DISK_IO;
1710 
1711 		if (resource->susp_nod && what != NOTHING) {
1712 			_tl_restart(connection, what);
1713 			_conn_request_state(connection,
1714 					    (union drbd_state) { { .susp_nod = 1 } },
1715 					    (union drbd_state) { { .susp_nod = 0 } },
1716 					    CS_VERBOSE);
1717 		}
1718 		spin_unlock_irq(&device->resource->req_lock);
1719 	}
1720 
1721 	if (ns.susp_fen) {
1722 		spin_lock_irq(&device->resource->req_lock);
1723 		if (resource->susp_fen && conn_lowest_conn(connection) >= C_CONNECTED) {
1724 			/* case2: The connection was established again: */
1725 			struct drbd_peer_device *peer_device;
1726 			int vnr;
1727 
1728 			rcu_read_lock();
1729 			idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
1730 				clear_bit(NEW_CUR_UUID, &peer_device->device->flags);
1731 			rcu_read_unlock();
1732 
1733 			/* We should actively create a new uuid, _before_
1734 			 * we resume/resent, if the peer is diskless
1735 			 * (recovery from a multiple error scenario).
1736 			 * Currently, this happens with a slight delay
1737 			 * below when checking lost_contact_to_peer_data() ...
1738 			 */
1739 			_tl_restart(connection, RESEND);
1740 			_conn_request_state(connection,
1741 					    (union drbd_state) { { .susp_fen = 1 } },
1742 					    (union drbd_state) { { .susp_fen = 0 } },
1743 					    CS_VERBOSE);
1744 		}
1745 		spin_unlock_irq(&device->resource->req_lock);
1746 	}
1747 
1748 	/* Became sync source.  With protocol >= 96, we still need to send out
1749 	 * the sync uuid now. Need to do that before any drbd_send_state, or
1750 	 * the other side may go "paused sync" before receiving the sync uuids,
1751 	 * which is unexpected. */
1752 	if ((os.conn != C_SYNC_SOURCE && os.conn != C_PAUSED_SYNC_S) &&
1753 	    (ns.conn == C_SYNC_SOURCE || ns.conn == C_PAUSED_SYNC_S) &&
1754 	    connection->agreed_pro_version >= 96 && get_ldev(device)) {
1755 		drbd_gen_and_send_sync_uuid(peer_device);
1756 		put_ldev(device);
1757 	}
1758 
1759 	/* Do not change the order of the if above and the two below... */
1760 	if (os.pdsk == D_DISKLESS &&
1761 	    ns.pdsk > D_DISKLESS && ns.pdsk != D_UNKNOWN) {      /* attach on the peer */
1762 		/* we probably will start a resync soon.
1763 		 * make sure those things are properly reset. */
1764 		device->rs_total = 0;
1765 		device->rs_failed = 0;
1766 		atomic_set(&device->rs_pending_cnt, 0);
1767 		drbd_rs_cancel_all(device);
1768 
1769 		drbd_send_uuids(peer_device);
1770 		drbd_send_state(peer_device, ns);
1771 	}
1772 	/* No point in queuing send_bitmap if we don't have a connection
1773 	 * anymore, so check also the _current_ state, not only the new state
1774 	 * at the time this work was queued. */
1775 	if (os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S &&
1776 	    device->state.conn == C_WF_BITMAP_S)
1777 		drbd_queue_bitmap_io(device, &drbd_send_bitmap, NULL,
1778 				"send_bitmap (WFBitMapS)",
1779 				BM_LOCKED_TEST_ALLOWED);
1780 
1781 	/* Lost contact to peer's copy of the data */
1782 	if (lost_contact_to_peer_data(os.pdsk, ns.pdsk)) {
1783 		if (get_ldev(device)) {
1784 			if ((ns.role == R_PRIMARY || ns.peer == R_PRIMARY) &&
1785 			    device->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
1786 				if (drbd_suspended(device)) {
1787 					set_bit(NEW_CUR_UUID, &device->flags);
1788 				} else {
1789 					drbd_uuid_new_current(device);
1790 					drbd_send_uuids(peer_device);
1791 				}
1792 			}
1793 			put_ldev(device);
1794 		}
1795 	}
1796 
1797 	if (ns.pdsk < D_INCONSISTENT && get_ldev(device)) {
1798 		if (os.peer != R_PRIMARY && ns.peer == R_PRIMARY &&
1799 		    device->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
1800 			drbd_uuid_new_current(device);
1801 			drbd_send_uuids(peer_device);
1802 		}
1803 		/* D_DISKLESS Peer becomes secondary */
1804 		if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY)
1805 			/* We may still be Primary ourselves.
1806 			 * No harm done if the bitmap still changes,
1807 			 * redirtied pages will follow later. */
1808 			drbd_bitmap_io_from_worker(device, &drbd_bm_write,
1809 				"demote diskless peer", BM_LOCKED_SET_ALLOWED);
1810 		put_ldev(device);
1811 	}
1812 
1813 	/* Write out all changed bits on demote.
1814 	 * Though, no need to da that just yet
1815 	 * if there is a resync going on still */
1816 	if (os.role == R_PRIMARY && ns.role == R_SECONDARY &&
1817 		device->state.conn <= C_CONNECTED && get_ldev(device)) {
1818 		/* No changes to the bitmap expected this time, so assert that,
1819 		 * even though no harm was done if it did change. */
1820 		drbd_bitmap_io_from_worker(device, &drbd_bm_write,
1821 				"demote", BM_LOCKED_TEST_ALLOWED);
1822 		put_ldev(device);
1823 	}
1824 
1825 	/* Last part of the attaching process ... */
1826 	if (ns.conn >= C_CONNECTED &&
1827 	    os.disk == D_ATTACHING && ns.disk == D_NEGOTIATING) {
1828 		drbd_send_sizes(peer_device, 0, 0);  /* to start sync... */
1829 		drbd_send_uuids(peer_device);
1830 		drbd_send_state(peer_device, ns);
1831 	}
1832 
1833 	/* We want to pause/continue resync, tell peer. */
1834 	if (ns.conn >= C_CONNECTED &&
1835 	     ((os.aftr_isp != ns.aftr_isp) ||
1836 	      (os.user_isp != ns.user_isp)))
1837 		drbd_send_state(peer_device, ns);
1838 
1839 	/* In case one of the isp bits got set, suspend other devices. */
1840 	if ((!os.aftr_isp && !os.peer_isp && !os.user_isp) &&
1841 	    (ns.aftr_isp || ns.peer_isp || ns.user_isp))
1842 		suspend_other_sg(device);
1843 
1844 	/* Make sure the peer gets informed about eventual state
1845 	   changes (ISP bits) while we were in WFReportParams. */
1846 	if (os.conn == C_WF_REPORT_PARAMS && ns.conn >= C_CONNECTED)
1847 		drbd_send_state(peer_device, ns);
1848 
1849 	if (os.conn != C_AHEAD && ns.conn == C_AHEAD)
1850 		drbd_send_state(peer_device, ns);
1851 
1852 	/* We are in the progress to start a full sync... */
1853 	if ((os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
1854 	    (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S))
1855 		/* no other bitmap changes expected during this phase */
1856 		drbd_queue_bitmap_io(device,
1857 			&drbd_bmio_set_n_write, &abw_start_sync,
1858 			"set_n_write from StartingSync", BM_LOCKED_TEST_ALLOWED);
1859 
1860 	/* first half of local IO error, failure to attach,
1861 	 * or administrative detach */
1862 	if (os.disk != D_FAILED && ns.disk == D_FAILED) {
1863 		enum drbd_io_error_p eh = EP_PASS_ON;
1864 		int was_io_error = 0;
1865 		/* corresponding get_ldev was in _drbd_set_state, to serialize
1866 		 * our cleanup here with the transition to D_DISKLESS.
1867 		 * But is is still not save to dreference ldev here, since
1868 		 * we might come from an failed Attach before ldev was set. */
1869 		if (device->ldev) {
1870 			rcu_read_lock();
1871 			eh = rcu_dereference(device->ldev->disk_conf)->on_io_error;
1872 			rcu_read_unlock();
1873 
1874 			was_io_error = test_and_clear_bit(WAS_IO_ERROR, &device->flags);
1875 
1876 			/* Intentionally call this handler first, before drbd_send_state().
1877 			 * See: 2932204 drbd: call local-io-error handler early
1878 			 * People may chose to hard-reset the box from this handler.
1879 			 * It is useful if this looks like a "regular node crash". */
1880 			if (was_io_error && eh == EP_CALL_HELPER)
1881 				drbd_khelper(device, "local-io-error");
1882 
1883 			/* Immediately allow completion of all application IO,
1884 			 * that waits for completion from the local disk,
1885 			 * if this was a force-detach due to disk_timeout
1886 			 * or administrator request (drbdsetup detach --force).
1887 			 * Do NOT abort otherwise.
1888 			 * Aborting local requests may cause serious problems,
1889 			 * if requests are completed to upper layers already,
1890 			 * and then later the already submitted local bio completes.
1891 			 * This can cause DMA into former bio pages that meanwhile
1892 			 * have been re-used for other things.
1893 			 * So aborting local requests may cause crashes,
1894 			 * or even worse, silent data corruption.
1895 			 */
1896 			if (test_and_clear_bit(FORCE_DETACH, &device->flags))
1897 				tl_abort_disk_io(device);
1898 
1899 			/* current state still has to be D_FAILED,
1900 			 * there is only one way out: to D_DISKLESS,
1901 			 * and that may only happen after our put_ldev below. */
1902 			if (device->state.disk != D_FAILED)
1903 				drbd_err(device,
1904 					"ASSERT FAILED: disk is %s during detach\n",
1905 					drbd_disk_str(device->state.disk));
1906 
1907 			if (ns.conn >= C_CONNECTED)
1908 				drbd_send_state(peer_device, ns);
1909 
1910 			drbd_rs_cancel_all(device);
1911 
1912 			/* In case we want to get something to stable storage still,
1913 			 * this may be the last chance.
1914 			 * Following put_ldev may transition to D_DISKLESS. */
1915 			drbd_md_sync(device);
1916 		}
1917 		put_ldev(device);
1918 	}
1919 
1920 	/* second half of local IO error, failure to attach,
1921 	 * or administrative detach,
1922 	 * after local_cnt references have reached zero again */
1923 	if (os.disk != D_DISKLESS && ns.disk == D_DISKLESS) {
1924 		/* We must still be diskless,
1925 		 * re-attach has to be serialized with this! */
1926 		if (device->state.disk != D_DISKLESS)
1927 			drbd_err(device,
1928 				 "ASSERT FAILED: disk is %s while going diskless\n",
1929 				 drbd_disk_str(device->state.disk));
1930 
1931 		if (ns.conn >= C_CONNECTED)
1932 			drbd_send_state(peer_device, ns);
1933 		/* corresponding get_ldev in __drbd_set_state
1934 		 * this may finally trigger drbd_ldev_destroy. */
1935 		put_ldev(device);
1936 	}
1937 
1938 	/* Notify peer that I had a local IO error, and did not detached.. */
1939 	if (os.disk == D_UP_TO_DATE && ns.disk == D_INCONSISTENT && ns.conn >= C_CONNECTED)
1940 		drbd_send_state(peer_device, ns);
1941 
1942 	/* Disks got bigger while they were detached */
1943 	if (ns.disk > D_NEGOTIATING && ns.pdsk > D_NEGOTIATING &&
1944 	    test_and_clear_bit(RESYNC_AFTER_NEG, &device->flags)) {
1945 		if (ns.conn == C_CONNECTED)
1946 			resync_after_online_grow(device);
1947 	}
1948 
1949 	/* A resync finished or aborted, wake paused devices... */
1950 	if ((os.conn > C_CONNECTED && ns.conn <= C_CONNECTED) ||
1951 	    (os.peer_isp && !ns.peer_isp) ||
1952 	    (os.user_isp && !ns.user_isp))
1953 		resume_next_sg(device);
1954 
1955 	/* sync target done with resync.  Explicitly notify peer, even though
1956 	 * it should (at least for non-empty resyncs) already know itself. */
1957 	if (os.disk < D_UP_TO_DATE && os.conn >= C_SYNC_SOURCE && ns.conn == C_CONNECTED)
1958 		drbd_send_state(peer_device, ns);
1959 
1960 	/* Verify finished, or reached stop sector.  Peer did not know about
1961 	 * the stop sector, and we may even have changed the stop sector during
1962 	 * verify to interrupt/stop early.  Send the new state. */
1963 	if (os.conn == C_VERIFY_S && ns.conn == C_CONNECTED
1964 	&& verify_can_do_stop_sector(device))
1965 		drbd_send_state(peer_device, ns);
1966 
1967 	/* This triggers bitmap writeout of potentially still unwritten pages
1968 	 * if the resync finished cleanly, or aborted because of peer disk
1969 	 * failure, or on transition from resync back to AHEAD/BEHIND.
1970 	 *
1971 	 * Connection loss is handled in drbd_disconnected() by the receiver.
1972 	 *
1973 	 * For resync aborted because of local disk failure, we cannot do
1974 	 * any bitmap writeout anymore.
1975 	 *
1976 	 * No harm done if some bits change during this phase.
1977 	 */
1978 	if ((os.conn > C_CONNECTED && os.conn < C_AHEAD) &&
1979 	    (ns.conn == C_CONNECTED || ns.conn >= C_AHEAD) && get_ldev(device)) {
1980 		drbd_queue_bitmap_io(device, &drbd_bm_write_copy_pages, NULL,
1981 			"write from resync_finished", BM_LOCKED_CHANGE_ALLOWED);
1982 		put_ldev(device);
1983 	}
1984 
1985 	if (ns.disk == D_DISKLESS &&
1986 	    ns.conn == C_STANDALONE &&
1987 	    ns.role == R_SECONDARY) {
1988 		if (os.aftr_isp != ns.aftr_isp)
1989 			resume_next_sg(device);
1990 	}
1991 
1992 	drbd_md_sync(device);
1993 }
1994 
1995 struct after_conn_state_chg_work {
1996 	struct drbd_work w;
1997 	enum drbd_conns oc;
1998 	union drbd_state ns_min;
1999 	union drbd_state ns_max; /* new, max state, over all devices */
2000 	enum chg_state_flags flags;
2001 	struct drbd_connection *connection;
2002 	struct drbd_state_change *state_change;
2003 };
2004 
w_after_conn_state_ch(struct drbd_work * w,int unused)2005 static int w_after_conn_state_ch(struct drbd_work *w, int unused)
2006 {
2007 	struct after_conn_state_chg_work *acscw =
2008 		container_of(w, struct after_conn_state_chg_work, w);
2009 	struct drbd_connection *connection = acscw->connection;
2010 	enum drbd_conns oc = acscw->oc;
2011 	union drbd_state ns_max = acscw->ns_max;
2012 	struct drbd_peer_device *peer_device;
2013 	int vnr;
2014 
2015 	broadcast_state_change(acscw->state_change);
2016 	forget_state_change(acscw->state_change);
2017 	kfree(acscw);
2018 
2019 	/* Upon network configuration, we need to start the receiver */
2020 	if (oc == C_STANDALONE && ns_max.conn == C_UNCONNECTED)
2021 		drbd_thread_start(&connection->receiver);
2022 
2023 	if (oc == C_DISCONNECTING && ns_max.conn == C_STANDALONE) {
2024 		struct net_conf *old_conf;
2025 
2026 		mutex_lock(&notification_mutex);
2027 		idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
2028 			notify_peer_device_state(NULL, 0, peer_device, NULL,
2029 						 NOTIFY_DESTROY | NOTIFY_CONTINUES);
2030 		notify_connection_state(NULL, 0, connection, NULL, NOTIFY_DESTROY);
2031 		mutex_unlock(&notification_mutex);
2032 
2033 		mutex_lock(&connection->resource->conf_update);
2034 		old_conf = connection->net_conf;
2035 		connection->my_addr_len = 0;
2036 		connection->peer_addr_len = 0;
2037 		RCU_INIT_POINTER(connection->net_conf, NULL);
2038 		conn_free_crypto(connection);
2039 		mutex_unlock(&connection->resource->conf_update);
2040 
2041 		synchronize_rcu();
2042 		kfree(old_conf);
2043 	}
2044 
2045 	if (ns_max.susp_fen) {
2046 		/* case1: The outdate peer handler is successful: */
2047 		if (ns_max.pdsk <= D_OUTDATED) {
2048 			rcu_read_lock();
2049 			idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
2050 				struct drbd_device *device = peer_device->device;
2051 				if (test_bit(NEW_CUR_UUID, &device->flags)) {
2052 					drbd_uuid_new_current(device);
2053 					clear_bit(NEW_CUR_UUID, &device->flags);
2054 				}
2055 			}
2056 			rcu_read_unlock();
2057 			spin_lock_irq(&connection->resource->req_lock);
2058 			_tl_restart(connection, CONNECTION_LOST_WHILE_PENDING);
2059 			_conn_request_state(connection,
2060 					    (union drbd_state) { { .susp_fen = 1 } },
2061 					    (union drbd_state) { { .susp_fen = 0 } },
2062 					    CS_VERBOSE);
2063 			spin_unlock_irq(&connection->resource->req_lock);
2064 		}
2065 	}
2066 	kref_put(&connection->kref, drbd_destroy_connection);
2067 
2068 	conn_md_sync(connection);
2069 
2070 	return 0;
2071 }
2072 
conn_old_common_state(struct drbd_connection * connection,union drbd_state * pcs,enum chg_state_flags * pf)2073 static void conn_old_common_state(struct drbd_connection *connection, union drbd_state *pcs, enum chg_state_flags *pf)
2074 {
2075 	enum chg_state_flags flags = ~0;
2076 	struct drbd_peer_device *peer_device;
2077 	int vnr, first_vol = 1;
2078 	union drbd_dev_state os, cs = {
2079 		{ .role = R_SECONDARY,
2080 		  .peer = R_UNKNOWN,
2081 		  .conn = connection->cstate,
2082 		  .disk = D_DISKLESS,
2083 		  .pdsk = D_UNKNOWN,
2084 		} };
2085 
2086 	rcu_read_lock();
2087 	idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
2088 		struct drbd_device *device = peer_device->device;
2089 		os = device->state;
2090 
2091 		if (first_vol) {
2092 			cs = os;
2093 			first_vol = 0;
2094 			continue;
2095 		}
2096 
2097 		if (cs.role != os.role)
2098 			flags &= ~CS_DC_ROLE;
2099 
2100 		if (cs.peer != os.peer)
2101 			flags &= ~CS_DC_PEER;
2102 
2103 		if (cs.conn != os.conn)
2104 			flags &= ~CS_DC_CONN;
2105 
2106 		if (cs.disk != os.disk)
2107 			flags &= ~CS_DC_DISK;
2108 
2109 		if (cs.pdsk != os.pdsk)
2110 			flags &= ~CS_DC_PDSK;
2111 	}
2112 	rcu_read_unlock();
2113 
2114 	*pf |= CS_DC_MASK;
2115 	*pf &= flags;
2116 	(*pcs).i = cs.i;
2117 }
2118 
2119 static enum drbd_state_rv
conn_is_valid_transition(struct drbd_connection * connection,union drbd_state mask,union drbd_state val,enum chg_state_flags flags)2120 conn_is_valid_transition(struct drbd_connection *connection, union drbd_state mask, union drbd_state val,
2121 			 enum chg_state_flags flags)
2122 {
2123 	enum drbd_state_rv rv = SS_SUCCESS;
2124 	union drbd_state ns, os;
2125 	struct drbd_peer_device *peer_device;
2126 	int vnr;
2127 
2128 	rcu_read_lock();
2129 	idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
2130 		struct drbd_device *device = peer_device->device;
2131 		os = drbd_read_state(device);
2132 		ns = sanitize_state(device, os, apply_mask_val(os, mask, val), NULL);
2133 
2134 		if (flags & CS_IGN_OUTD_FAIL && ns.disk == D_OUTDATED && os.disk < D_OUTDATED)
2135 			ns.disk = os.disk;
2136 
2137 		if (ns.i == os.i)
2138 			continue;
2139 
2140 		rv = is_valid_transition(os, ns);
2141 
2142 		if (rv >= SS_SUCCESS && !(flags & CS_HARD)) {
2143 			rv = is_valid_state(device, ns);
2144 			if (rv < SS_SUCCESS) {
2145 				if (is_valid_state(device, os) == rv)
2146 					rv = is_valid_soft_transition(os, ns, connection);
2147 			} else
2148 				rv = is_valid_soft_transition(os, ns, connection);
2149 		}
2150 
2151 		if (rv < SS_SUCCESS) {
2152 			if (flags & CS_VERBOSE)
2153 				print_st_err(device, os, ns, rv);
2154 			break;
2155 		}
2156 	}
2157 	rcu_read_unlock();
2158 
2159 	return rv;
2160 }
2161 
2162 static void
conn_set_state(struct drbd_connection * connection,union drbd_state mask,union drbd_state val,union drbd_state * pns_min,union drbd_state * pns_max,enum chg_state_flags flags)2163 conn_set_state(struct drbd_connection *connection, union drbd_state mask, union drbd_state val,
2164 	       union drbd_state *pns_min, union drbd_state *pns_max, enum chg_state_flags flags)
2165 {
2166 	union drbd_state ns, os, ns_max = { };
2167 	union drbd_state ns_min = {
2168 		{ .role = R_MASK,
2169 		  .peer = R_MASK,
2170 		  .conn = val.conn,
2171 		  .disk = D_MASK,
2172 		  .pdsk = D_MASK
2173 		} };
2174 	struct drbd_peer_device *peer_device;
2175 	enum drbd_state_rv rv;
2176 	int vnr, number_of_volumes = 0;
2177 
2178 	if (mask.conn == C_MASK) {
2179 		/* remember last connect time so request_timer_fn() won't
2180 		 * kill newly established sessions while we are still trying to thaw
2181 		 * previously frozen IO */
2182 		if (connection->cstate != C_WF_REPORT_PARAMS && val.conn == C_WF_REPORT_PARAMS)
2183 			connection->last_reconnect_jif = jiffies;
2184 
2185 		connection->cstate = val.conn;
2186 	}
2187 
2188 	rcu_read_lock();
2189 	idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
2190 		struct drbd_device *device = peer_device->device;
2191 		number_of_volumes++;
2192 		os = drbd_read_state(device);
2193 		ns = apply_mask_val(os, mask, val);
2194 		ns = sanitize_state(device, os, ns, NULL);
2195 
2196 		if (flags & CS_IGN_OUTD_FAIL && ns.disk == D_OUTDATED && os.disk < D_OUTDATED)
2197 			ns.disk = os.disk;
2198 
2199 		rv = _drbd_set_state(device, ns, flags, NULL);
2200 		BUG_ON(rv < SS_SUCCESS);
2201 		ns.i = device->state.i;
2202 		ns_max.role = max_role(ns.role, ns_max.role);
2203 		ns_max.peer = max_role(ns.peer, ns_max.peer);
2204 		ns_max.conn = max_t(enum drbd_conns, ns.conn, ns_max.conn);
2205 		ns_max.disk = max_t(enum drbd_disk_state, ns.disk, ns_max.disk);
2206 		ns_max.pdsk = max_t(enum drbd_disk_state, ns.pdsk, ns_max.pdsk);
2207 
2208 		ns_min.role = min_role(ns.role, ns_min.role);
2209 		ns_min.peer = min_role(ns.peer, ns_min.peer);
2210 		ns_min.conn = min_t(enum drbd_conns, ns.conn, ns_min.conn);
2211 		ns_min.disk = min_t(enum drbd_disk_state, ns.disk, ns_min.disk);
2212 		ns_min.pdsk = min_t(enum drbd_disk_state, ns.pdsk, ns_min.pdsk);
2213 	}
2214 	rcu_read_unlock();
2215 
2216 	if (number_of_volumes == 0) {
2217 		ns_min = ns_max = (union drbd_state) { {
2218 				.role = R_SECONDARY,
2219 				.peer = R_UNKNOWN,
2220 				.conn = val.conn,
2221 				.disk = D_DISKLESS,
2222 				.pdsk = D_UNKNOWN
2223 			} };
2224 	}
2225 
2226 	ns_min.susp = ns_max.susp = connection->resource->susp;
2227 	ns_min.susp_nod = ns_max.susp_nod = connection->resource->susp_nod;
2228 	ns_min.susp_fen = ns_max.susp_fen = connection->resource->susp_fen;
2229 
2230 	*pns_min = ns_min;
2231 	*pns_max = ns_max;
2232 }
2233 
2234 static enum drbd_state_rv
_conn_rq_cond(struct drbd_connection * connection,union drbd_state mask,union drbd_state val)2235 _conn_rq_cond(struct drbd_connection *connection, union drbd_state mask, union drbd_state val)
2236 {
2237 	enum drbd_state_rv err, rv = SS_UNKNOWN_ERROR; /* continue waiting */;
2238 
2239 	if (test_and_clear_bit(CONN_WD_ST_CHG_OKAY, &connection->flags))
2240 		rv = SS_CW_SUCCESS;
2241 
2242 	if (test_and_clear_bit(CONN_WD_ST_CHG_FAIL, &connection->flags))
2243 		rv = SS_CW_FAILED_BY_PEER;
2244 
2245 	err = conn_is_valid_transition(connection, mask, val, 0);
2246 	if (err == SS_SUCCESS && connection->cstate == C_WF_REPORT_PARAMS)
2247 		return rv;
2248 
2249 	return err;
2250 }
2251 
2252 enum drbd_state_rv
_conn_request_state(struct drbd_connection * connection,union drbd_state mask,union drbd_state val,enum chg_state_flags flags)2253 _conn_request_state(struct drbd_connection *connection, union drbd_state mask, union drbd_state val,
2254 		    enum chg_state_flags flags)
2255 {
2256 	enum drbd_state_rv rv = SS_SUCCESS;
2257 	struct after_conn_state_chg_work *acscw;
2258 	enum drbd_conns oc = connection->cstate;
2259 	union drbd_state ns_max, ns_min, os;
2260 	bool have_mutex = false;
2261 	struct drbd_state_change *state_change;
2262 
2263 	if (mask.conn) {
2264 		rv = is_valid_conn_transition(oc, val.conn);
2265 		if (rv < SS_SUCCESS)
2266 			goto abort;
2267 	}
2268 
2269 	rv = conn_is_valid_transition(connection, mask, val, flags);
2270 	if (rv < SS_SUCCESS)
2271 		goto abort;
2272 
2273 	if (oc == C_WF_REPORT_PARAMS && val.conn == C_DISCONNECTING &&
2274 	    !(flags & (CS_LOCAL_ONLY | CS_HARD))) {
2275 
2276 		/* This will be a cluster-wide state change.
2277 		 * Need to give up the spinlock, grab the mutex,
2278 		 * then send the state change request, ... */
2279 		spin_unlock_irq(&connection->resource->req_lock);
2280 		mutex_lock(&connection->cstate_mutex);
2281 		have_mutex = true;
2282 
2283 		set_bit(CONN_WD_ST_CHG_REQ, &connection->flags);
2284 		if (conn_send_state_req(connection, mask, val)) {
2285 			/* sending failed. */
2286 			clear_bit(CONN_WD_ST_CHG_REQ, &connection->flags);
2287 			rv = SS_CW_FAILED_BY_PEER;
2288 			/* need to re-aquire the spin lock, though */
2289 			goto abort_unlocked;
2290 		}
2291 
2292 		if (val.conn == C_DISCONNECTING)
2293 			set_bit(DISCONNECT_SENT, &connection->flags);
2294 
2295 		/* ... and re-aquire the spinlock.
2296 		 * If _conn_rq_cond() returned >= SS_SUCCESS, we must call
2297 		 * conn_set_state() within the same spinlock. */
2298 		spin_lock_irq(&connection->resource->req_lock);
2299 		wait_event_lock_irq(connection->ping_wait,
2300 				(rv = _conn_rq_cond(connection, mask, val)),
2301 				connection->resource->req_lock);
2302 		clear_bit(CONN_WD_ST_CHG_REQ, &connection->flags);
2303 		if (rv < SS_SUCCESS)
2304 			goto abort;
2305 	}
2306 
2307 	state_change = remember_old_state(connection->resource, GFP_ATOMIC);
2308 	conn_old_common_state(connection, &os, &flags);
2309 	flags |= CS_DC_SUSP;
2310 	conn_set_state(connection, mask, val, &ns_min, &ns_max, flags);
2311 	conn_pr_state_change(connection, os, ns_max, flags);
2312 	remember_new_state(state_change);
2313 
2314 	acscw = kmalloc(sizeof(*acscw), GFP_ATOMIC);
2315 	if (acscw) {
2316 		acscw->oc = os.conn;
2317 		acscw->ns_min = ns_min;
2318 		acscw->ns_max = ns_max;
2319 		acscw->flags = flags;
2320 		acscw->w.cb = w_after_conn_state_ch;
2321 		kref_get(&connection->kref);
2322 		acscw->connection = connection;
2323 		acscw->state_change = state_change;
2324 		drbd_queue_work(&connection->sender_work, &acscw->w);
2325 	} else {
2326 		drbd_err(connection, "Could not kmalloc an acscw\n");
2327 	}
2328 
2329  abort:
2330 	if (have_mutex) {
2331 		/* mutex_unlock() "... must not be used in interrupt context.",
2332 		 * so give up the spinlock, then re-aquire it */
2333 		spin_unlock_irq(&connection->resource->req_lock);
2334  abort_unlocked:
2335 		mutex_unlock(&connection->cstate_mutex);
2336 		spin_lock_irq(&connection->resource->req_lock);
2337 	}
2338 	if (rv < SS_SUCCESS && flags & CS_VERBOSE) {
2339 		drbd_err(connection, "State change failed: %s\n", drbd_set_st_err_str(rv));
2340 		drbd_err(connection, " mask = 0x%x val = 0x%x\n", mask.i, val.i);
2341 		drbd_err(connection, " old_conn:%s wanted_conn:%s\n", drbd_conn_str(oc), drbd_conn_str(val.conn));
2342 	}
2343 	return rv;
2344 }
2345 
2346 enum drbd_state_rv
conn_request_state(struct drbd_connection * connection,union drbd_state mask,union drbd_state val,enum chg_state_flags flags)2347 conn_request_state(struct drbd_connection *connection, union drbd_state mask, union drbd_state val,
2348 		   enum chg_state_flags flags)
2349 {
2350 	enum drbd_state_rv rv;
2351 
2352 	spin_lock_irq(&connection->resource->req_lock);
2353 	rv = _conn_request_state(connection, mask, val, flags);
2354 	spin_unlock_irq(&connection->resource->req_lock);
2355 
2356 	return rv;
2357 }
2358