• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/ptlrpc/import.c
37  *
38  * Author: Mike Shaver <shaver@clusterfs.com>
39  */
40 
41 #define DEBUG_SUBSYSTEM S_RPC
42 
43 #include "../include/obd_support.h"
44 #include "../include/lustre_ha.h"
45 #include "../include/lustre_net.h"
46 #include "../include/lustre_import.h"
47 #include "../include/lustre_export.h"
48 #include "../include/obd.h"
49 #include "../include/obd_cksum.h"
50 #include "../include/obd_class.h"
51 
52 #include "ptlrpc_internal.h"
53 
54 struct ptlrpc_connect_async_args {
55 	 __u64 pcaa_peer_committed;
56 	int pcaa_initial_connect;
57 };
58 
59 /**
60  * Updates import \a imp current state to provided \a state value
61  * Helper function. Must be called under imp_lock.
62  */
__import_set_state(struct obd_import * imp,enum lustre_imp_state state)63 static void __import_set_state(struct obd_import *imp,
64 			       enum lustre_imp_state state)
65 {
66 	switch (state) {
67 	case LUSTRE_IMP_CLOSED:
68 	case LUSTRE_IMP_NEW:
69 	case LUSTRE_IMP_DISCON:
70 	case LUSTRE_IMP_CONNECTING:
71 		break;
72 	case LUSTRE_IMP_REPLAY_WAIT:
73 		imp->imp_replay_state = LUSTRE_IMP_REPLAY_LOCKS;
74 		break;
75 	default:
76 		imp->imp_replay_state = LUSTRE_IMP_REPLAY;
77 	}
78 
79 	imp->imp_state = state;
80 	imp->imp_state_hist[imp->imp_state_hist_idx].ish_state = state;
81 	imp->imp_state_hist[imp->imp_state_hist_idx].ish_time =
82 		ktime_get_real_seconds();
83 	imp->imp_state_hist_idx = (imp->imp_state_hist_idx + 1) %
84 		IMP_STATE_HIST_LEN;
85 }
86 
87 /* A CLOSED import should remain so. */
88 #define IMPORT_SET_STATE_NOLOCK(imp, state)				       \
89 do {									       \
90 	if (imp->imp_state != LUSTRE_IMP_CLOSED) {			       \
91 		CDEBUG(D_HA, "%p %s: changing import state from %s to %s\n",   \
92 		       imp, obd2cli_tgt(imp->imp_obd),			       \
93 		       ptlrpc_import_state_name(imp->imp_state),	       \
94 		       ptlrpc_import_state_name(state));		       \
95 		__import_set_state(imp, state);				       \
96 	}								       \
97 } while (0)
98 
99 #define IMPORT_SET_STATE(imp, state)					\
100 do {									\
101 	spin_lock(&imp->imp_lock);					\
102 	IMPORT_SET_STATE_NOLOCK(imp, state);				\
103 	spin_unlock(&imp->imp_lock);					\
104 } while (0)
105 
106 static int ptlrpc_connect_interpret(const struct lu_env *env,
107 				    struct ptlrpc_request *request,
108 				    void *data, int rc);
109 int ptlrpc_import_recovery_state_machine(struct obd_import *imp);
110 
111 /* Only this function is allowed to change the import state when it is
112  * CLOSED. I would rather refcount the import and free it after
113  * disconnection like we do with exports. To do that, the client_obd
114  * will need to save the peer info somewhere other than in the import,
115  * though. */
ptlrpc_init_import(struct obd_import * imp)116 int ptlrpc_init_import(struct obd_import *imp)
117 {
118 	spin_lock(&imp->imp_lock);
119 
120 	imp->imp_generation++;
121 	imp->imp_state = LUSTRE_IMP_NEW;
122 
123 	spin_unlock(&imp->imp_lock);
124 
125 	return 0;
126 }
127 EXPORT_SYMBOL(ptlrpc_init_import);
128 
129 #define UUID_STR "_UUID"
deuuidify(char * uuid,const char * prefix,char ** uuid_start,int * uuid_len)130 static void deuuidify(char *uuid, const char *prefix, char **uuid_start,
131 		      int *uuid_len)
132 {
133 	*uuid_start = !prefix || strncmp(uuid, prefix, strlen(prefix))
134 		? uuid : uuid + strlen(prefix);
135 
136 	*uuid_len = strlen(*uuid_start);
137 
138 	if (*uuid_len < strlen(UUID_STR))
139 		return;
140 
141 	if (!strncmp(*uuid_start + *uuid_len - strlen(UUID_STR),
142 		    UUID_STR, strlen(UUID_STR)))
143 		*uuid_len -= strlen(UUID_STR);
144 }
145 
146 /**
147  * Returns true if import was FULL, false if import was already not
148  * connected.
149  * @imp - import to be disconnected
150  * @conn_cnt - connection count (epoch) of the request that timed out
151  *	     and caused the disconnection.  In some cases, multiple
152  *	     inflight requests can fail to a single target (e.g. OST
153  *	     bulk requests) and if one has already caused a reconnection
154  *	     (increasing the import->conn_cnt) the older failure should
155  *	     not also cause a reconnection.  If zero it forces a reconnect.
156  */
ptlrpc_set_import_discon(struct obd_import * imp,__u32 conn_cnt)157 int ptlrpc_set_import_discon(struct obd_import *imp, __u32 conn_cnt)
158 {
159 	int rc = 0;
160 
161 	spin_lock(&imp->imp_lock);
162 
163 	if (imp->imp_state == LUSTRE_IMP_FULL &&
164 	    (conn_cnt == 0 || conn_cnt == imp->imp_conn_cnt)) {
165 		char *target_start;
166 		int   target_len;
167 
168 		deuuidify(obd2cli_tgt(imp->imp_obd), NULL,
169 			  &target_start, &target_len);
170 
171 		if (imp->imp_replayable) {
172 			LCONSOLE_WARN("%s: Connection to %.*s (at %s) was lost; in progress operations using this service will wait for recovery to complete\n",
173 				      imp->imp_obd->obd_name, target_len, target_start,
174 				      libcfs_nid2str(imp->imp_connection->c_peer.nid));
175 		} else {
176 			LCONSOLE_ERROR_MSG(0x166, "%s: Connection to %.*s (at %s) was lost; in progress operations using this service will fail\n",
177 					   imp->imp_obd->obd_name,
178 					   target_len, target_start,
179 					   libcfs_nid2str(imp->imp_connection->c_peer.nid));
180 		}
181 		IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_DISCON);
182 		spin_unlock(&imp->imp_lock);
183 
184 		if (obd_dump_on_timeout)
185 			libcfs_debug_dumplog();
186 
187 		obd_import_event(imp->imp_obd, imp, IMP_EVENT_DISCON);
188 		rc = 1;
189 	} else {
190 		spin_unlock(&imp->imp_lock);
191 		CDEBUG(D_HA, "%s: import %p already %s (conn %u, was %u): %s\n",
192 		       imp->imp_client->cli_name, imp,
193 		       (imp->imp_state == LUSTRE_IMP_FULL &&
194 			imp->imp_conn_cnt > conn_cnt) ?
195 		       "reconnected" : "not connected", imp->imp_conn_cnt,
196 		       conn_cnt, ptlrpc_import_state_name(imp->imp_state));
197 	}
198 
199 	return rc;
200 }
201 
202 /*
203  * This acts as a barrier; all existing requests are rejected, and
204  * no new requests will be accepted until the import is valid again.
205  */
ptlrpc_deactivate_import(struct obd_import * imp)206 void ptlrpc_deactivate_import(struct obd_import *imp)
207 {
208 	CDEBUG(D_HA, "setting import %s INVALID\n", obd2cli_tgt(imp->imp_obd));
209 
210 	spin_lock(&imp->imp_lock);
211 	imp->imp_invalid = 1;
212 	imp->imp_generation++;
213 	spin_unlock(&imp->imp_lock);
214 
215 	ptlrpc_abort_inflight(imp);
216 	obd_import_event(imp->imp_obd, imp, IMP_EVENT_INACTIVE);
217 }
218 EXPORT_SYMBOL(ptlrpc_deactivate_import);
219 
220 static unsigned int
ptlrpc_inflight_deadline(struct ptlrpc_request * req,time64_t now)221 ptlrpc_inflight_deadline(struct ptlrpc_request *req, time64_t now)
222 {
223 	long dl;
224 
225 	if (!(((req->rq_phase == RQ_PHASE_RPC) && !req->rq_waiting) ||
226 	      (req->rq_phase == RQ_PHASE_BULK) ||
227 	      (req->rq_phase == RQ_PHASE_NEW)))
228 		return 0;
229 
230 	if (req->rq_timedout)
231 		return 0;
232 
233 	if (req->rq_phase == RQ_PHASE_NEW)
234 		dl = req->rq_sent;
235 	else
236 		dl = req->rq_deadline;
237 
238 	if (dl <= now)
239 		return 0;
240 
241 	return dl - now;
242 }
243 
ptlrpc_inflight_timeout(struct obd_import * imp)244 static unsigned int ptlrpc_inflight_timeout(struct obd_import *imp)
245 {
246 	time64_t now = ktime_get_real_seconds();
247 	struct list_head *tmp, *n;
248 	struct ptlrpc_request *req;
249 	unsigned int timeout = 0;
250 
251 	spin_lock(&imp->imp_lock);
252 	list_for_each_safe(tmp, n, &imp->imp_sending_list) {
253 		req = list_entry(tmp, struct ptlrpc_request, rq_list);
254 		timeout = max(ptlrpc_inflight_deadline(req, now), timeout);
255 	}
256 	spin_unlock(&imp->imp_lock);
257 	return timeout;
258 }
259 
260 /**
261  * This function will invalidate the import, if necessary, then block
262  * for all the RPC completions, and finally notify the obd to
263  * invalidate its state (ie cancel locks, clear pending requests,
264  * etc).
265  */
ptlrpc_invalidate_import(struct obd_import * imp)266 void ptlrpc_invalidate_import(struct obd_import *imp)
267 {
268 	struct list_head *tmp, *n;
269 	struct ptlrpc_request *req;
270 	struct l_wait_info lwi;
271 	unsigned int timeout;
272 	int rc;
273 
274 	atomic_inc(&imp->imp_inval_count);
275 
276 	if (!imp->imp_invalid || imp->imp_obd->obd_no_recov)
277 		ptlrpc_deactivate_import(imp);
278 
279 	CFS_FAIL_TIMEOUT(OBD_FAIL_MGS_CONNECT_NET, 3 * cfs_fail_val / 2);
280 	LASSERT(imp->imp_invalid);
281 
282 	/* Wait forever until inflight == 0. We really can't do it another
283 	 * way because in some cases we need to wait for very long reply
284 	 * unlink. We can't do anything before that because there is really
285 	 * no guarantee that some rdma transfer is not in progress right now. */
286 	do {
287 		/* Calculate max timeout for waiting on rpcs to error
288 		 * out. Use obd_timeout if calculated value is smaller
289 		 * than it. */
290 		if (!OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK)) {
291 			timeout = ptlrpc_inflight_timeout(imp);
292 			timeout += timeout / 3;
293 
294 			if (timeout == 0)
295 				timeout = obd_timeout;
296 		} else {
297 			/* decrease the interval to increase race condition */
298 			timeout = 1;
299 		}
300 
301 		CDEBUG(D_RPCTRACE,
302 		       "Sleeping %d sec for inflight to error out\n",
303 		       timeout);
304 
305 		/* Wait for all requests to error out and call completion
306 		 * callbacks. Cap it at obd_timeout -- these should all
307 		 * have been locally cancelled by ptlrpc_abort_inflight. */
308 		lwi = LWI_TIMEOUT_INTERVAL(
309 			cfs_timeout_cap(cfs_time_seconds(timeout)),
310 			(timeout > 1)?cfs_time_seconds(1):cfs_time_seconds(1)/2,
311 			NULL, NULL);
312 		rc = l_wait_event(imp->imp_recovery_waitq,
313 				  (atomic_read(&imp->imp_inflight) == 0),
314 				  &lwi);
315 		if (rc) {
316 			const char *cli_tgt = obd2cli_tgt(imp->imp_obd);
317 
318 			CERROR("%s: rc = %d waiting for callback (%d != 0)\n",
319 			       cli_tgt, rc,
320 			       atomic_read(&imp->imp_inflight));
321 
322 			spin_lock(&imp->imp_lock);
323 			if (atomic_read(&imp->imp_inflight) == 0) {
324 				int count = atomic_read(&imp->imp_unregistering);
325 
326 				/* We know that "unregistering" rpcs only can
327 				 * survive in sending or delaying lists (they
328 				 * maybe waiting for long reply unlink in
329 				 * sluggish nets). Let's check this. If there
330 				 * is no inflight and unregistering != 0, this
331 				 * is bug. */
332 				LASSERTF(count == 0, "Some RPCs are still unregistering: %d\n",
333 					 count);
334 
335 				/* Let's save one loop as soon as inflight have
336 				 * dropped to zero. No new inflights possible at
337 				 * this point. */
338 				rc = 0;
339 			} else {
340 				list_for_each_safe(tmp, n,
341 						       &imp->imp_sending_list) {
342 					req = list_entry(tmp,
343 							     struct ptlrpc_request,
344 							     rq_list);
345 					DEBUG_REQ(D_ERROR, req,
346 						  "still on sending list");
347 				}
348 				list_for_each_safe(tmp, n,
349 						       &imp->imp_delayed_list) {
350 					req = list_entry(tmp,
351 							     struct ptlrpc_request,
352 							     rq_list);
353 					DEBUG_REQ(D_ERROR, req,
354 						  "still on delayed list");
355 				}
356 
357 				CERROR("%s: RPCs in \"%s\" phase found (%d). Network is sluggish? Waiting them to error out.\n",
358 				       cli_tgt,
359 				       ptlrpc_phase2str(RQ_PHASE_UNREGISTERING),
360 				       atomic_read(&imp->
361 						   imp_unregistering));
362 			}
363 			spin_unlock(&imp->imp_lock);
364 		}
365 	} while (rc != 0);
366 
367 	/*
368 	 * Let's additionally check that no new rpcs added to import in
369 	 * "invalidate" state.
370 	 */
371 	LASSERT(atomic_read(&imp->imp_inflight) == 0);
372 	obd_import_event(imp->imp_obd, imp, IMP_EVENT_INVALIDATE);
373 	sptlrpc_import_flush_all_ctx(imp);
374 
375 	atomic_dec(&imp->imp_inval_count);
376 	wake_up_all(&imp->imp_recovery_waitq);
377 }
378 EXPORT_SYMBOL(ptlrpc_invalidate_import);
379 
380 /* unset imp_invalid */
ptlrpc_activate_import(struct obd_import * imp)381 void ptlrpc_activate_import(struct obd_import *imp)
382 {
383 	struct obd_device *obd = imp->imp_obd;
384 
385 	spin_lock(&imp->imp_lock);
386 	if (imp->imp_deactive != 0) {
387 		spin_unlock(&imp->imp_lock);
388 		return;
389 	}
390 
391 	imp->imp_invalid = 0;
392 	spin_unlock(&imp->imp_lock);
393 	obd_import_event(obd, imp, IMP_EVENT_ACTIVE);
394 }
395 EXPORT_SYMBOL(ptlrpc_activate_import);
396 
ptlrpc_pinger_force(struct obd_import * imp)397 static void ptlrpc_pinger_force(struct obd_import *imp)
398 {
399 	CDEBUG(D_HA, "%s: waking up pinger s:%s\n", obd2cli_tgt(imp->imp_obd),
400 	       ptlrpc_import_state_name(imp->imp_state));
401 
402 	spin_lock(&imp->imp_lock);
403 	imp->imp_force_verify = 1;
404 	spin_unlock(&imp->imp_lock);
405 
406 	if (imp->imp_state != LUSTRE_IMP_CONNECTING)
407 		ptlrpc_pinger_wake_up();
408 }
409 
ptlrpc_fail_import(struct obd_import * imp,__u32 conn_cnt)410 void ptlrpc_fail_import(struct obd_import *imp, __u32 conn_cnt)
411 {
412 	LASSERT(!imp->imp_dlm_fake);
413 
414 	if (ptlrpc_set_import_discon(imp, conn_cnt)) {
415 		if (!imp->imp_replayable) {
416 			CDEBUG(D_HA, "import %s@%s for %s not replayable, auto-deactivating\n",
417 			       obd2cli_tgt(imp->imp_obd),
418 			       imp->imp_connection->c_remote_uuid.uuid,
419 			       imp->imp_obd->obd_name);
420 			ptlrpc_deactivate_import(imp);
421 		}
422 
423 		ptlrpc_pinger_force(imp);
424 	}
425 }
426 EXPORT_SYMBOL(ptlrpc_fail_import);
427 
ptlrpc_reconnect_import(struct obd_import * imp)428 int ptlrpc_reconnect_import(struct obd_import *imp)
429 {
430 #ifdef ENABLE_PINGER
431 	struct l_wait_info lwi;
432 	int secs = cfs_time_seconds(obd_timeout);
433 	int rc;
434 
435 	ptlrpc_pinger_force(imp);
436 
437 	CDEBUG(D_HA, "%s: recovery started, waiting %u seconds\n",
438 	       obd2cli_tgt(imp->imp_obd), secs);
439 
440 	lwi = LWI_TIMEOUT(secs, NULL, NULL);
441 	rc = l_wait_event(imp->imp_recovery_waitq,
442 			  !ptlrpc_import_in_recovery(imp), &lwi);
443 	CDEBUG(D_HA, "%s: recovery finished s:%s\n", obd2cli_tgt(imp->imp_obd),
444 	       ptlrpc_import_state_name(imp->imp_state));
445 	return rc;
446 #else
447 	ptlrpc_set_import_discon(imp, 0);
448 	/* Force a new connect attempt */
449 	ptlrpc_invalidate_import(imp);
450 	/* Do a fresh connect next time by zeroing the handle */
451 	ptlrpc_disconnect_import(imp, 1);
452 	/* Wait for all invalidate calls to finish */
453 	if (atomic_read(&imp->imp_inval_count) > 0) {
454 		int rc;
455 		struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
456 
457 		rc = l_wait_event(imp->imp_recovery_waitq,
458 				  (atomic_read(&imp->imp_inval_count) == 0),
459 				  &lwi);
460 		if (rc)
461 			CERROR("Interrupted, inval=%d\n",
462 			       atomic_read(&imp->imp_inval_count));
463 	}
464 
465 	/* Allow reconnect attempts */
466 	imp->imp_obd->obd_no_recov = 0;
467 	/* Remove 'invalid' flag */
468 	ptlrpc_activate_import(imp);
469 	/* Attempt a new connect */
470 	ptlrpc_recover_import(imp, NULL, 0);
471 	return 0;
472 #endif
473 }
474 EXPORT_SYMBOL(ptlrpc_reconnect_import);
475 
476 /**
477  * Connection on import \a imp is changed to another one (if more than one is
478  * present). We typically chose connection that we have not tried to connect to
479  * the longest
480  */
import_select_connection(struct obd_import * imp)481 static int import_select_connection(struct obd_import *imp)
482 {
483 	struct obd_import_conn *imp_conn = NULL, *conn;
484 	struct obd_export *dlmexp;
485 	char *target_start;
486 	int target_len, tried_all = 1;
487 
488 	spin_lock(&imp->imp_lock);
489 
490 	if (list_empty(&imp->imp_conn_list)) {
491 		CERROR("%s: no connections available\n",
492 		       imp->imp_obd->obd_name);
493 		spin_unlock(&imp->imp_lock);
494 		return -EINVAL;
495 	}
496 
497 	list_for_each_entry(conn, &imp->imp_conn_list, oic_item) {
498 		CDEBUG(D_HA, "%s: connect to NID %s last attempt %llu\n",
499 		       imp->imp_obd->obd_name,
500 		       libcfs_nid2str(conn->oic_conn->c_peer.nid),
501 		       conn->oic_last_attempt);
502 
503 		/* If we have not tried this connection since
504 		   the last successful attempt, go with this one */
505 		if ((conn->oic_last_attempt == 0) ||
506 		    cfs_time_beforeq_64(conn->oic_last_attempt,
507 				       imp->imp_last_success_conn)) {
508 			imp_conn = conn;
509 			tried_all = 0;
510 			break;
511 		}
512 
513 		/* If all of the connections have already been tried
514 		   since the last successful connection; just choose the
515 		   least recently used */
516 		if (!imp_conn)
517 			imp_conn = conn;
518 		else if (cfs_time_before_64(conn->oic_last_attempt,
519 					    imp_conn->oic_last_attempt))
520 			imp_conn = conn;
521 	}
522 
523 	/* if not found, simply choose the current one */
524 	if (!imp_conn || imp->imp_force_reconnect) {
525 		LASSERT(imp->imp_conn_current);
526 		imp_conn = imp->imp_conn_current;
527 		tried_all = 0;
528 	}
529 	LASSERT(imp_conn->oic_conn);
530 
531 	/* If we've tried everything, and we're back to the beginning of the
532 	   list, increase our timeout and try again. It will be reset when
533 	   we do finally connect. (FIXME: really we should wait for all network
534 	   state associated with the last connection attempt to drain before
535 	   trying to reconnect on it.) */
536 	if (tried_all && (imp->imp_conn_list.next == &imp_conn->oic_item)) {
537 		struct adaptive_timeout *at = &imp->imp_at.iat_net_latency;
538 
539 		if (at_get(at) < CONNECTION_SWITCH_MAX) {
540 			at_measured(at, at_get(at) + CONNECTION_SWITCH_INC);
541 			if (at_get(at) > CONNECTION_SWITCH_MAX)
542 				at_reset(at, CONNECTION_SWITCH_MAX);
543 		}
544 		LASSERT(imp_conn->oic_last_attempt);
545 		CDEBUG(D_HA, "%s: tried all connections, increasing latency to %ds\n",
546 		       imp->imp_obd->obd_name, at_get(at));
547 	}
548 
549 	imp_conn->oic_last_attempt = cfs_time_current_64();
550 
551 	/* switch connection, don't mind if it's same as the current one */
552 	ptlrpc_connection_put(imp->imp_connection);
553 	imp->imp_connection = ptlrpc_connection_addref(imp_conn->oic_conn);
554 
555 	dlmexp = class_conn2export(&imp->imp_dlm_handle);
556 	LASSERT(dlmexp != NULL);
557 	ptlrpc_connection_put(dlmexp->exp_connection);
558 	dlmexp->exp_connection = ptlrpc_connection_addref(imp_conn->oic_conn);
559 	class_export_put(dlmexp);
560 
561 	if (imp->imp_conn_current != imp_conn) {
562 		if (imp->imp_conn_current) {
563 			deuuidify(obd2cli_tgt(imp->imp_obd), NULL,
564 				  &target_start, &target_len);
565 
566 			CDEBUG(D_HA, "%s: Connection changing to %.*s (at %s)\n",
567 			       imp->imp_obd->obd_name,
568 			       target_len, target_start,
569 			       libcfs_nid2str(imp_conn->oic_conn->c_peer.nid));
570 		}
571 
572 		imp->imp_conn_current = imp_conn;
573 	}
574 
575 	CDEBUG(D_HA, "%s: import %p using connection %s/%s\n",
576 	       imp->imp_obd->obd_name, imp, imp_conn->oic_uuid.uuid,
577 	       libcfs_nid2str(imp_conn->oic_conn->c_peer.nid));
578 
579 	spin_unlock(&imp->imp_lock);
580 
581 	return 0;
582 }
583 
584 /*
585  * must be called under imp_lock
586  */
ptlrpc_first_transno(struct obd_import * imp,__u64 * transno)587 static int ptlrpc_first_transno(struct obd_import *imp, __u64 *transno)
588 {
589 	struct ptlrpc_request *req;
590 	struct list_head *tmp;
591 
592 	/* The requests in committed_list always have smaller transnos than
593 	 * the requests in replay_list */
594 	if (!list_empty(&imp->imp_committed_list)) {
595 		tmp = imp->imp_committed_list.next;
596 		req = list_entry(tmp, struct ptlrpc_request, rq_replay_list);
597 		*transno = req->rq_transno;
598 		if (req->rq_transno == 0) {
599 			DEBUG_REQ(D_ERROR, req,
600 				  "zero transno in committed_list");
601 			LBUG();
602 		}
603 		return 1;
604 	}
605 	if (!list_empty(&imp->imp_replay_list)) {
606 		tmp = imp->imp_replay_list.next;
607 		req = list_entry(tmp, struct ptlrpc_request, rq_replay_list);
608 		*transno = req->rq_transno;
609 		if (req->rq_transno == 0) {
610 			DEBUG_REQ(D_ERROR, req, "zero transno in replay_list");
611 			LBUG();
612 		}
613 		return 1;
614 	}
615 	return 0;
616 }
617 
618 /**
619  * Attempt to (re)connect import \a imp. This includes all preparations,
620  * initializing CONNECT RPC request and passing it to ptlrpcd for
621  * actual sending.
622  * Returns 0 on success or error code.
623  */
ptlrpc_connect_import(struct obd_import * imp)624 int ptlrpc_connect_import(struct obd_import *imp)
625 {
626 	struct obd_device *obd = imp->imp_obd;
627 	int initial_connect = 0;
628 	int set_transno = 0;
629 	__u64 committed_before_reconnect = 0;
630 	struct ptlrpc_request *request;
631 	char *bufs[] = { NULL,
632 			 obd2cli_tgt(imp->imp_obd),
633 			 obd->obd_uuid.uuid,
634 			 (char *)&imp->imp_dlm_handle,
635 			 (char *)&imp->imp_connect_data };
636 	struct ptlrpc_connect_async_args *aa;
637 	int rc;
638 
639 	spin_lock(&imp->imp_lock);
640 	if (imp->imp_state == LUSTRE_IMP_CLOSED) {
641 		spin_unlock(&imp->imp_lock);
642 		CERROR("can't connect to a closed import\n");
643 		return -EINVAL;
644 	} else if (imp->imp_state == LUSTRE_IMP_FULL) {
645 		spin_unlock(&imp->imp_lock);
646 		CERROR("already connected\n");
647 		return 0;
648 	} else if (imp->imp_state == LUSTRE_IMP_CONNECTING) {
649 		spin_unlock(&imp->imp_lock);
650 		CERROR("already connecting\n");
651 		return -EALREADY;
652 	}
653 
654 	IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_CONNECTING);
655 
656 	imp->imp_conn_cnt++;
657 	imp->imp_resend_replay = 0;
658 
659 	if (!lustre_handle_is_used(&imp->imp_remote_handle))
660 		initial_connect = 1;
661 	else
662 		committed_before_reconnect = imp->imp_peer_committed_transno;
663 
664 	set_transno = ptlrpc_first_transno(imp,
665 					   &imp->imp_connect_data.ocd_transno);
666 	spin_unlock(&imp->imp_lock);
667 
668 	rc = import_select_connection(imp);
669 	if (rc)
670 		goto out;
671 
672 	rc = sptlrpc_import_sec_adapt(imp, NULL, NULL);
673 	if (rc)
674 		goto out;
675 
676 	/* Reset connect flags to the originally requested flags, in case
677 	 * the server is updated on-the-fly we will get the new features. */
678 	imp->imp_connect_data.ocd_connect_flags = imp->imp_connect_flags_orig;
679 	/* Reset ocd_version each time so the server knows the exact versions */
680 	imp->imp_connect_data.ocd_version = LUSTRE_VERSION_CODE;
681 	imp->imp_msghdr_flags &= ~MSGHDR_AT_SUPPORT;
682 	imp->imp_msghdr_flags &= ~MSGHDR_CKSUM_INCOMPAT18;
683 
684 	rc = obd_reconnect(NULL, imp->imp_obd->obd_self_export, obd,
685 			   &obd->obd_uuid, &imp->imp_connect_data, NULL);
686 	if (rc)
687 		goto out;
688 
689 	request = ptlrpc_request_alloc(imp, &RQF_MDS_CONNECT);
690 	if (request == NULL) {
691 		rc = -ENOMEM;
692 		goto out;
693 	}
694 
695 	rc = ptlrpc_request_bufs_pack(request, LUSTRE_OBD_VERSION,
696 				      imp->imp_connect_op, bufs, NULL);
697 	if (rc) {
698 		ptlrpc_request_free(request);
699 		goto out;
700 	}
701 
702 	/* Report the rpc service time to the server so that it knows how long
703 	 * to wait for clients to join recovery */
704 	lustre_msg_set_service_time(request->rq_reqmsg,
705 				    at_timeout2est(request->rq_timeout));
706 
707 	/* The amount of time we give the server to process the connect req.
708 	 * import_select_connection will increase the net latency on
709 	 * repeated reconnect attempts to cover slow networks.
710 	 * We override/ignore the server rpc completion estimate here,
711 	 * which may be large if this is a reconnect attempt */
712 	request->rq_timeout = INITIAL_CONNECT_TIMEOUT;
713 	lustre_msg_set_timeout(request->rq_reqmsg, request->rq_timeout);
714 
715 	lustre_msg_add_op_flags(request->rq_reqmsg, MSG_CONNECT_NEXT_VER);
716 
717 	request->rq_no_resend = request->rq_no_delay = 1;
718 	request->rq_send_state = LUSTRE_IMP_CONNECTING;
719 	/* Allow a slightly larger reply for future growth compatibility */
720 	req_capsule_set_size(&request->rq_pill, &RMF_CONNECT_DATA, RCL_SERVER,
721 			     sizeof(struct obd_connect_data)+16*sizeof(__u64));
722 	ptlrpc_request_set_replen(request);
723 	request->rq_interpret_reply = ptlrpc_connect_interpret;
724 
725 	CLASSERT(sizeof(*aa) <= sizeof(request->rq_async_args));
726 	aa = ptlrpc_req_async_args(request);
727 	memset(aa, 0, sizeof(*aa));
728 
729 	aa->pcaa_peer_committed = committed_before_reconnect;
730 	aa->pcaa_initial_connect = initial_connect;
731 
732 	if (aa->pcaa_initial_connect) {
733 		spin_lock(&imp->imp_lock);
734 		imp->imp_replayable = 1;
735 		spin_unlock(&imp->imp_lock);
736 		lustre_msg_add_op_flags(request->rq_reqmsg,
737 					MSG_CONNECT_INITIAL);
738 	}
739 
740 	if (set_transno)
741 		lustre_msg_add_op_flags(request->rq_reqmsg,
742 					MSG_CONNECT_TRANSNO);
743 
744 	DEBUG_REQ(D_RPCTRACE, request, "(re)connect request (timeout %d)",
745 		  request->rq_timeout);
746 	ptlrpcd_add_req(request);
747 	rc = 0;
748 out:
749 	if (rc != 0)
750 		IMPORT_SET_STATE(imp, LUSTRE_IMP_DISCON);
751 
752 	return rc;
753 }
754 EXPORT_SYMBOL(ptlrpc_connect_import);
755 
ptlrpc_maybe_ping_import_soon(struct obd_import * imp)756 static void ptlrpc_maybe_ping_import_soon(struct obd_import *imp)
757 {
758 	int force_verify;
759 
760 	spin_lock(&imp->imp_lock);
761 	force_verify = imp->imp_force_verify != 0;
762 	spin_unlock(&imp->imp_lock);
763 
764 	if (force_verify)
765 		ptlrpc_pinger_wake_up();
766 }
767 
ptlrpc_busy_reconnect(int rc)768 static int ptlrpc_busy_reconnect(int rc)
769 {
770 	return (rc == -EBUSY) || (rc == -EAGAIN);
771 }
772 
773 /**
774  * interpret_reply callback for connect RPCs.
775  * Looks into returned status of connect operation and decides
776  * what to do with the import - i.e enter recovery, promote it to
777  * full state for normal operations of disconnect it due to an error.
778  */
ptlrpc_connect_interpret(const struct lu_env * env,struct ptlrpc_request * request,void * data,int rc)779 static int ptlrpc_connect_interpret(const struct lu_env *env,
780 				    struct ptlrpc_request *request,
781 				    void *data, int rc)
782 {
783 	struct ptlrpc_connect_async_args *aa = data;
784 	struct obd_import *imp = request->rq_import;
785 	struct client_obd *cli = &imp->imp_obd->u.cli;
786 	struct lustre_handle old_hdl;
787 	__u64 old_connect_flags;
788 	int msg_flags;
789 	struct obd_connect_data *ocd;
790 	struct obd_export *exp;
791 	int ret;
792 
793 	spin_lock(&imp->imp_lock);
794 	if (imp->imp_state == LUSTRE_IMP_CLOSED) {
795 		imp->imp_connect_tried = 1;
796 		spin_unlock(&imp->imp_lock);
797 		return 0;
798 	}
799 
800 	if (rc) {
801 		/* if this reconnect to busy export - not need select new target
802 		 * for connecting*/
803 		imp->imp_force_reconnect = ptlrpc_busy_reconnect(rc);
804 		spin_unlock(&imp->imp_lock);
805 		ptlrpc_maybe_ping_import_soon(imp);
806 		goto out;
807 	}
808 	spin_unlock(&imp->imp_lock);
809 
810 	LASSERT(imp->imp_conn_current);
811 
812 	msg_flags = lustre_msg_get_op_flags(request->rq_repmsg);
813 
814 	ret = req_capsule_get_size(&request->rq_pill, &RMF_CONNECT_DATA,
815 				   RCL_SERVER);
816 	/* server replied obd_connect_data is always bigger */
817 	ocd = req_capsule_server_sized_get(&request->rq_pill,
818 					   &RMF_CONNECT_DATA, ret);
819 
820 	if (ocd == NULL) {
821 		CERROR("%s: no connect data from server\n",
822 		       imp->imp_obd->obd_name);
823 		rc = -EPROTO;
824 		goto out;
825 	}
826 
827 	spin_lock(&imp->imp_lock);
828 
829 	/* All imports are pingable */
830 	imp->imp_pingable = 1;
831 	imp->imp_force_reconnect = 0;
832 	imp->imp_force_verify = 0;
833 
834 	imp->imp_connect_data = *ocd;
835 
836 	CDEBUG(D_HA, "%s: connect to target with instance %u\n",
837 	       imp->imp_obd->obd_name, ocd->ocd_instance);
838 	exp = class_conn2export(&imp->imp_dlm_handle);
839 
840 	spin_unlock(&imp->imp_lock);
841 
842 	/* check that server granted subset of flags we asked for. */
843 	if ((ocd->ocd_connect_flags & imp->imp_connect_flags_orig) !=
844 	    ocd->ocd_connect_flags) {
845 		CERROR("%s: Server didn't granted asked subset of flags: asked=%#llx grranted=%#llx\n",
846 		       imp->imp_obd->obd_name, imp->imp_connect_flags_orig,
847 		       ocd->ocd_connect_flags);
848 		rc = -EPROTO;
849 		goto out;
850 	}
851 
852 	if (!exp) {
853 		/* This could happen if export is cleaned during the
854 		   connect attempt */
855 		CERROR("%s: missing export after connect\n",
856 		       imp->imp_obd->obd_name);
857 		rc = -ENODEV;
858 		goto out;
859 	}
860 	old_connect_flags = exp_connect_flags(exp);
861 	exp->exp_connect_data = *ocd;
862 	imp->imp_obd->obd_self_export->exp_connect_data = *ocd;
863 	class_export_put(exp);
864 
865 	obd_import_event(imp->imp_obd, imp, IMP_EVENT_OCD);
866 
867 	if (aa->pcaa_initial_connect) {
868 		spin_lock(&imp->imp_lock);
869 		if (msg_flags & MSG_CONNECT_REPLAYABLE) {
870 			imp->imp_replayable = 1;
871 			spin_unlock(&imp->imp_lock);
872 			CDEBUG(D_HA, "connected to replayable target: %s\n",
873 			       obd2cli_tgt(imp->imp_obd));
874 		} else {
875 			imp->imp_replayable = 0;
876 			spin_unlock(&imp->imp_lock);
877 		}
878 
879 		/* if applies, adjust the imp->imp_msg_magic here
880 		 * according to reply flags */
881 
882 		imp->imp_remote_handle =
883 				*lustre_msg_get_handle(request->rq_repmsg);
884 
885 		/* Initial connects are allowed for clients with non-random
886 		 * uuids when servers are in recovery.  Simply signal the
887 		 * servers replay is complete and wait in REPLAY_WAIT. */
888 		if (msg_flags & MSG_CONNECT_RECOVERING) {
889 			CDEBUG(D_HA, "connect to %s during recovery\n",
890 			       obd2cli_tgt(imp->imp_obd));
891 			IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY_LOCKS);
892 		} else {
893 			IMPORT_SET_STATE(imp, LUSTRE_IMP_FULL);
894 			ptlrpc_activate_import(imp);
895 		}
896 
897 		rc = 0;
898 		goto finish;
899 	}
900 
901 	/* Determine what recovery state to move the import to. */
902 	if (msg_flags & MSG_CONNECT_RECONNECT) {
903 		memset(&old_hdl, 0, sizeof(old_hdl));
904 		if (!memcmp(&old_hdl, lustre_msg_get_handle(request->rq_repmsg),
905 			    sizeof(old_hdl))) {
906 			LCONSOLE_WARN("Reconnect to %s (at @%s) failed due bad handle %#llx\n",
907 				      obd2cli_tgt(imp->imp_obd),
908 				      imp->imp_connection->c_remote_uuid.uuid,
909 				      imp->imp_dlm_handle.cookie);
910 			rc = -ENOTCONN;
911 			goto out;
912 		}
913 
914 		if (memcmp(&imp->imp_remote_handle,
915 			   lustre_msg_get_handle(request->rq_repmsg),
916 			   sizeof(imp->imp_remote_handle))) {
917 			int level = msg_flags & MSG_CONNECT_RECOVERING ?
918 				D_HA : D_WARNING;
919 
920 			/* Bug 16611/14775: if server handle have changed,
921 			 * that means some sort of disconnection happened.
922 			 * If the server is not in recovery, that also means it
923 			 * already erased all of our state because of previous
924 			 * eviction. If it is in recovery - we are safe to
925 			 * participate since we can reestablish all of our state
926 			 * with server again */
927 			if ((msg_flags & MSG_CONNECT_RECOVERING)) {
928 				CDEBUG(level, "%s@%s changed server handle from %#llx to %#llx but is still in recovery\n",
929 				       obd2cli_tgt(imp->imp_obd),
930 				       imp->imp_connection->c_remote_uuid.uuid,
931 				       imp->imp_remote_handle.cookie,
932 				       lustre_msg_get_handle(
933 				       request->rq_repmsg)->cookie);
934 			} else {
935 				LCONSOLE_WARN("Evicted from %s (at %s) after server handle changed from %#llx to %#llx\n",
936 					      obd2cli_tgt(imp->imp_obd),
937 					      imp->imp_connection-> \
938 					      c_remote_uuid.uuid,
939 					      imp->imp_remote_handle.cookie,
940 					      lustre_msg_get_handle(
941 						      request->rq_repmsg)->cookie);
942 			}
943 
944 			imp->imp_remote_handle =
945 				     *lustre_msg_get_handle(request->rq_repmsg);
946 
947 			if (!(msg_flags & MSG_CONNECT_RECOVERING)) {
948 				IMPORT_SET_STATE(imp, LUSTRE_IMP_EVICTED);
949 				rc = 0;
950 				goto finish;
951 			}
952 
953 		} else {
954 			CDEBUG(D_HA, "reconnected to %s@%s after partition\n",
955 			       obd2cli_tgt(imp->imp_obd),
956 			       imp->imp_connection->c_remote_uuid.uuid);
957 		}
958 
959 		if (imp->imp_invalid) {
960 			CDEBUG(D_HA, "%s: reconnected but import is invalid; marking evicted\n",
961 			       imp->imp_obd->obd_name);
962 			IMPORT_SET_STATE(imp, LUSTRE_IMP_EVICTED);
963 		} else if (msg_flags & MSG_CONNECT_RECOVERING) {
964 			CDEBUG(D_HA, "%s: reconnected to %s during replay\n",
965 			       imp->imp_obd->obd_name,
966 			       obd2cli_tgt(imp->imp_obd));
967 
968 			spin_lock(&imp->imp_lock);
969 			imp->imp_resend_replay = 1;
970 			spin_unlock(&imp->imp_lock);
971 
972 			IMPORT_SET_STATE(imp, imp->imp_replay_state);
973 		} else {
974 			IMPORT_SET_STATE(imp, LUSTRE_IMP_RECOVER);
975 		}
976 	} else if ((msg_flags & MSG_CONNECT_RECOVERING) && !imp->imp_invalid) {
977 		LASSERT(imp->imp_replayable);
978 		imp->imp_remote_handle =
979 				*lustre_msg_get_handle(request->rq_repmsg);
980 		imp->imp_last_replay_transno = 0;
981 		IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY);
982 	} else {
983 		DEBUG_REQ(D_HA, request, "%s: evicting (reconnect/recover flags not set: %x)",
984 			  imp->imp_obd->obd_name, msg_flags);
985 		imp->imp_remote_handle =
986 				*lustre_msg_get_handle(request->rq_repmsg);
987 		IMPORT_SET_STATE(imp, LUSTRE_IMP_EVICTED);
988 	}
989 
990 	/* Sanity checks for a reconnected import. */
991 	if (!(imp->imp_replayable) != !(msg_flags & MSG_CONNECT_REPLAYABLE)) {
992 		CERROR("imp_replayable flag does not match server after reconnect. We should LBUG right here.\n");
993 	}
994 
995 	if (lustre_msg_get_last_committed(request->rq_repmsg) > 0 &&
996 	    lustre_msg_get_last_committed(request->rq_repmsg) <
997 	    aa->pcaa_peer_committed) {
998 		CERROR("%s went back in time (transno %lld was previously committed, server now claims %lld)!  See https://bugzilla.lustre.org/show_bug.cgi?id=9646\n",
999 		       obd2cli_tgt(imp->imp_obd), aa->pcaa_peer_committed,
1000 		       lustre_msg_get_last_committed(request->rq_repmsg));
1001 	}
1002 
1003 finish:
1004 	rc = ptlrpc_import_recovery_state_machine(imp);
1005 	if (rc != 0) {
1006 		if (rc == -ENOTCONN) {
1007 			CDEBUG(D_HA, "evicted/aborted by %s@%s during recovery; invalidating and reconnecting\n",
1008 			       obd2cli_tgt(imp->imp_obd),
1009 			       imp->imp_connection->c_remote_uuid.uuid);
1010 			ptlrpc_connect_import(imp);
1011 			imp->imp_connect_tried = 1;
1012 			return 0;
1013 		}
1014 	} else {
1015 
1016 		spin_lock(&imp->imp_lock);
1017 		list_del(&imp->imp_conn_current->oic_item);
1018 		list_add(&imp->imp_conn_current->oic_item,
1019 			     &imp->imp_conn_list);
1020 		imp->imp_last_success_conn =
1021 			imp->imp_conn_current->oic_last_attempt;
1022 
1023 		spin_unlock(&imp->imp_lock);
1024 
1025 		if ((imp->imp_connect_flags_orig & OBD_CONNECT_IBITS) &&
1026 		    !(ocd->ocd_connect_flags & OBD_CONNECT_IBITS)) {
1027 			LCONSOLE_WARN("%s: MDS %s does not support ibits lock, either very old or invalid: requested %llx, replied %llx\n",
1028 				      imp->imp_obd->obd_name,
1029 				      imp->imp_connection->c_remote_uuid.uuid,
1030 				      imp->imp_connect_flags_orig,
1031 				      ocd->ocd_connect_flags);
1032 			rc = -EPROTO;
1033 			goto out;
1034 		}
1035 
1036 		if ((ocd->ocd_connect_flags & OBD_CONNECT_VERSION) &&
1037 		    (ocd->ocd_version > LUSTRE_VERSION_CODE +
1038 					LUSTRE_VERSION_OFFSET_WARN ||
1039 		     ocd->ocd_version < LUSTRE_VERSION_CODE -
1040 					LUSTRE_VERSION_OFFSET_WARN)) {
1041 			/* Sigh, some compilers do not like #ifdef in the middle
1042 			   of macro arguments */
1043 			const char *older = "older. Consider upgrading server or downgrading client"
1044 				;
1045 			const char *newer = "newer than client version. Consider upgrading client"
1046 					    ;
1047 
1048 			LCONSOLE_WARN("Server %s version (%d.%d.%d.%d) is much %s (%s)\n",
1049 				      obd2cli_tgt(imp->imp_obd),
1050 				      OBD_OCD_VERSION_MAJOR(ocd->ocd_version),
1051 				      OBD_OCD_VERSION_MINOR(ocd->ocd_version),
1052 				      OBD_OCD_VERSION_PATCH(ocd->ocd_version),
1053 				      OBD_OCD_VERSION_FIX(ocd->ocd_version),
1054 				      ocd->ocd_version > LUSTRE_VERSION_CODE ?
1055 				      newer : older, LUSTRE_VERSION_STRING);
1056 		}
1057 
1058 #if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(3, 2, 50, 0)
1059 		/* Check if server has LU-1252 fix applied to not always swab
1060 		 * the IR MNE entries. Do this only once per connection.  This
1061 		 * fixup is version-limited, because we don't want to carry the
1062 		 * OBD_CONNECT_MNE_SWAB flag around forever, just so long as we
1063 		 * need interop with unpatched 2.2 servers.  For newer servers,
1064 		 * the client will do MNE swabbing only as needed.  LU-1644 */
1065 		if (unlikely((ocd->ocd_connect_flags & OBD_CONNECT_VERSION) &&
1066 			     !(ocd->ocd_connect_flags & OBD_CONNECT_MNE_SWAB) &&
1067 			     OBD_OCD_VERSION_MAJOR(ocd->ocd_version) == 2 &&
1068 			     OBD_OCD_VERSION_MINOR(ocd->ocd_version) == 2 &&
1069 			     OBD_OCD_VERSION_PATCH(ocd->ocd_version) < 55 &&
1070 			     strcmp(imp->imp_obd->obd_type->typ_name,
1071 				    LUSTRE_MGC_NAME) == 0))
1072 			imp->imp_need_mne_swab = 1;
1073 		else /* clear if server was upgraded since last connect */
1074 			imp->imp_need_mne_swab = 0;
1075 #else
1076 #warning "LU-1644: Remove old OBD_CONNECT_MNE_SWAB fixup and imp_need_mne_swab"
1077 #endif
1078 
1079 		if (ocd->ocd_connect_flags & OBD_CONNECT_CKSUM) {
1080 			/* We sent to the server ocd_cksum_types with bits set
1081 			 * for algorithms we understand. The server masked off
1082 			 * the checksum types it doesn't support */
1083 			if ((ocd->ocd_cksum_types &
1084 			     cksum_types_supported_client()) == 0) {
1085 				LCONSOLE_WARN("The negotiation of the checksum algorithm to use with server %s failed (%x/%x), disabling checksums\n",
1086 					      obd2cli_tgt(imp->imp_obd),
1087 					      ocd->ocd_cksum_types,
1088 					      cksum_types_supported_client());
1089 				cli->cl_checksum = 0;
1090 				cli->cl_supp_cksum_types = OBD_CKSUM_ADLER;
1091 			} else {
1092 				cli->cl_supp_cksum_types = ocd->ocd_cksum_types;
1093 			}
1094 		} else {
1095 			/* The server does not support OBD_CONNECT_CKSUM.
1096 			 * Enforce ADLER for backward compatibility*/
1097 			cli->cl_supp_cksum_types = OBD_CKSUM_ADLER;
1098 		}
1099 		cli->cl_cksum_type = cksum_type_select(cli->cl_supp_cksum_types);
1100 
1101 		if (ocd->ocd_connect_flags & OBD_CONNECT_BRW_SIZE)
1102 			cli->cl_max_pages_per_rpc =
1103 				min(ocd->ocd_brw_size >> PAGE_CACHE_SHIFT,
1104 				    cli->cl_max_pages_per_rpc);
1105 		else if (imp->imp_connect_op == MDS_CONNECT ||
1106 			 imp->imp_connect_op == MGS_CONNECT)
1107 			cli->cl_max_pages_per_rpc = 1;
1108 
1109 		/* Reset ns_connect_flags only for initial connect. It might be
1110 		 * changed in while using FS and if we reset it in reconnect
1111 		 * this leads to losing user settings done before such as
1112 		 * disable lru_resize, etc. */
1113 		if (old_connect_flags != exp_connect_flags(exp) ||
1114 		    aa->pcaa_initial_connect) {
1115 			CDEBUG(D_HA, "%s: Resetting ns_connect_flags to server flags: %#llx\n",
1116 			       imp->imp_obd->obd_name, ocd->ocd_connect_flags);
1117 			imp->imp_obd->obd_namespace->ns_connect_flags =
1118 				ocd->ocd_connect_flags;
1119 			imp->imp_obd->obd_namespace->ns_orig_connect_flags =
1120 				ocd->ocd_connect_flags;
1121 		}
1122 
1123 		if ((ocd->ocd_connect_flags & OBD_CONNECT_AT) &&
1124 		    (imp->imp_msg_magic == LUSTRE_MSG_MAGIC_V2))
1125 			/* We need a per-message support flag, because
1126 			   a. we don't know if the incoming connect reply
1127 			      supports AT or not (in reply_in_callback)
1128 			      until we unpack it.
1129 			   b. failovered server means export and flags are gone
1130 			      (in ptlrpc_send_reply).
1131 			   Can only be set when we know AT is supported at
1132 			   both ends */
1133 			imp->imp_msghdr_flags |= MSGHDR_AT_SUPPORT;
1134 		else
1135 			imp->imp_msghdr_flags &= ~MSGHDR_AT_SUPPORT;
1136 
1137 		if ((ocd->ocd_connect_flags & OBD_CONNECT_FULL20) &&
1138 		    (imp->imp_msg_magic == LUSTRE_MSG_MAGIC_V2))
1139 			imp->imp_msghdr_flags |= MSGHDR_CKSUM_INCOMPAT18;
1140 		else
1141 			imp->imp_msghdr_flags &= ~MSGHDR_CKSUM_INCOMPAT18;
1142 
1143 		LASSERT((cli->cl_max_pages_per_rpc <= PTLRPC_MAX_BRW_PAGES) &&
1144 			(cli->cl_max_pages_per_rpc > 0));
1145 	}
1146 
1147 out:
1148 	imp->imp_connect_tried = 1;
1149 
1150 	if (rc != 0) {
1151 		IMPORT_SET_STATE(imp, LUSTRE_IMP_DISCON);
1152 		if (rc == -EACCES) {
1153 			/*
1154 			 * Give up trying to reconnect
1155 			 * EACCES means client has no permission for connection
1156 			 */
1157 			imp->imp_obd->obd_no_recov = 1;
1158 			ptlrpc_deactivate_import(imp);
1159 		}
1160 
1161 		if (rc == -EPROTO) {
1162 			struct obd_connect_data *ocd;
1163 
1164 			/* reply message might not be ready */
1165 			if (request->rq_repmsg == NULL)
1166 				return -EPROTO;
1167 
1168 			ocd = req_capsule_server_get(&request->rq_pill,
1169 						     &RMF_CONNECT_DATA);
1170 			if (ocd &&
1171 			    (ocd->ocd_connect_flags & OBD_CONNECT_VERSION) &&
1172 			    (ocd->ocd_version != LUSTRE_VERSION_CODE)) {
1173 				/*
1174 				 * Actually servers are only supposed to refuse
1175 				 * connection from liblustre clients, so we
1176 				 * should never see this from VFS context
1177 				 */
1178 				LCONSOLE_ERROR_MSG(0x16a, "Server %s version (%d.%d.%d.%d) refused connection from this client with an incompatible version (%s).  Client must be recompiled\n",
1179 						   obd2cli_tgt(imp->imp_obd),
1180 						   OBD_OCD_VERSION_MAJOR(ocd->ocd_version),
1181 						   OBD_OCD_VERSION_MINOR(ocd->ocd_version),
1182 						   OBD_OCD_VERSION_PATCH(ocd->ocd_version),
1183 						   OBD_OCD_VERSION_FIX(ocd->ocd_version),
1184 						   LUSTRE_VERSION_STRING);
1185 				ptlrpc_deactivate_import(imp);
1186 				IMPORT_SET_STATE(imp, LUSTRE_IMP_CLOSED);
1187 			}
1188 			return -EPROTO;
1189 		}
1190 
1191 		ptlrpc_maybe_ping_import_soon(imp);
1192 
1193 		CDEBUG(D_HA, "recovery of %s on %s failed (%d)\n",
1194 		       obd2cli_tgt(imp->imp_obd),
1195 		       (char *)imp->imp_connection->c_remote_uuid.uuid, rc);
1196 	}
1197 
1198 	wake_up_all(&imp->imp_recovery_waitq);
1199 	return rc;
1200 }
1201 
1202 /**
1203  * interpret callback for "completed replay" RPCs.
1204  * \see signal_completed_replay
1205  */
completed_replay_interpret(const struct lu_env * env,struct ptlrpc_request * req,void * data,int rc)1206 static int completed_replay_interpret(const struct lu_env *env,
1207 				      struct ptlrpc_request *req,
1208 				      void *data, int rc)
1209 {
1210 	atomic_dec(&req->rq_import->imp_replay_inflight);
1211 	if (req->rq_status == 0 &&
1212 	    !req->rq_import->imp_vbr_failed) {
1213 		ptlrpc_import_recovery_state_machine(req->rq_import);
1214 	} else {
1215 		if (req->rq_import->imp_vbr_failed) {
1216 			CDEBUG(D_WARNING,
1217 			       "%s: version recovery fails, reconnecting\n",
1218 			       req->rq_import->imp_obd->obd_name);
1219 		} else {
1220 			CDEBUG(D_HA, "%s: LAST_REPLAY message error: %d, reconnecting\n",
1221 			       req->rq_import->imp_obd->obd_name,
1222 			       req->rq_status);
1223 		}
1224 		ptlrpc_connect_import(req->rq_import);
1225 	}
1226 
1227 	return 0;
1228 }
1229 
1230 /**
1231  * Let server know that we have no requests to replay anymore.
1232  * Achieved by just sending a PING request
1233  */
signal_completed_replay(struct obd_import * imp)1234 static int signal_completed_replay(struct obd_import *imp)
1235 {
1236 	struct ptlrpc_request *req;
1237 
1238 	if (unlikely(OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_FINISH_REPLAY)))
1239 		return 0;
1240 
1241 	LASSERT(atomic_read(&imp->imp_replay_inflight) == 0);
1242 	atomic_inc(&imp->imp_replay_inflight);
1243 
1244 	req = ptlrpc_request_alloc_pack(imp, &RQF_OBD_PING, LUSTRE_OBD_VERSION,
1245 					OBD_PING);
1246 	if (req == NULL) {
1247 		atomic_dec(&imp->imp_replay_inflight);
1248 		return -ENOMEM;
1249 	}
1250 
1251 	ptlrpc_request_set_replen(req);
1252 	req->rq_send_state = LUSTRE_IMP_REPLAY_WAIT;
1253 	lustre_msg_add_flags(req->rq_reqmsg,
1254 			     MSG_LOCK_REPLAY_DONE | MSG_REQ_REPLAY_DONE);
1255 	if (AT_OFF)
1256 		req->rq_timeout *= 3;
1257 	req->rq_interpret_reply = completed_replay_interpret;
1258 
1259 	ptlrpcd_add_req(req);
1260 	return 0;
1261 }
1262 
1263 /**
1264  * In kernel code all import invalidation happens in its own
1265  * separate thread, so that whatever application happened to encounter
1266  * a problem could still be killed or otherwise continue
1267  */
ptlrpc_invalidate_import_thread(void * data)1268 static int ptlrpc_invalidate_import_thread(void *data)
1269 {
1270 	struct obd_import *imp = data;
1271 
1272 	unshare_fs_struct();
1273 
1274 	CDEBUG(D_HA, "thread invalidate import %s to %s@%s\n",
1275 	       imp->imp_obd->obd_name, obd2cli_tgt(imp->imp_obd),
1276 	       imp->imp_connection->c_remote_uuid.uuid);
1277 
1278 	ptlrpc_invalidate_import(imp);
1279 
1280 	if (obd_dump_on_eviction) {
1281 		CERROR("dump the log upon eviction\n");
1282 		libcfs_debug_dumplog();
1283 	}
1284 
1285 	IMPORT_SET_STATE(imp, LUSTRE_IMP_RECOVER);
1286 	ptlrpc_import_recovery_state_machine(imp);
1287 
1288 	class_import_put(imp);
1289 	return 0;
1290 }
1291 
1292 /**
1293  * This is the state machine for client-side recovery on import.
1294  *
1295  * Typically we have two possibly paths. If we came to server and it is not
1296  * in recovery, we just enter IMP_EVICTED state, invalidate our import
1297  * state and reconnect from scratch.
1298  * If we came to server that is in recovery, we enter IMP_REPLAY import state.
1299  * We go through our list of requests to replay and send them to server one by
1300  * one.
1301  * After sending all request from the list we change import state to
1302  * IMP_REPLAY_LOCKS and re-request all the locks we believe we have from server
1303  * and also all the locks we don't yet have and wait for server to grant us.
1304  * After that we send a special "replay completed" request and change import
1305  * state to IMP_REPLAY_WAIT.
1306  * Upon receiving reply to that "replay completed" RPC we enter IMP_RECOVER
1307  * state and resend all requests from sending list.
1308  * After that we promote import to FULL state and send all delayed requests
1309  * and import is fully operational after that.
1310  *
1311  */
ptlrpc_import_recovery_state_machine(struct obd_import * imp)1312 int ptlrpc_import_recovery_state_machine(struct obd_import *imp)
1313 {
1314 	int rc = 0;
1315 	int inflight;
1316 	char *target_start;
1317 	int target_len;
1318 
1319 	if (imp->imp_state == LUSTRE_IMP_EVICTED) {
1320 		deuuidify(obd2cli_tgt(imp->imp_obd), NULL,
1321 			  &target_start, &target_len);
1322 		/* Don't care about MGC eviction */
1323 		if (strcmp(imp->imp_obd->obd_type->typ_name,
1324 			   LUSTRE_MGC_NAME) != 0) {
1325 			LCONSOLE_ERROR_MSG(0x167, "%s: This client was evicted by %.*s; in progress operations using this service will fail.\n",
1326 					   imp->imp_obd->obd_name, target_len,
1327 					   target_start);
1328 		}
1329 		CDEBUG(D_HA, "evicted from %s@%s; invalidating\n",
1330 		       obd2cli_tgt(imp->imp_obd),
1331 		       imp->imp_connection->c_remote_uuid.uuid);
1332 		/* reset vbr_failed flag upon eviction */
1333 		spin_lock(&imp->imp_lock);
1334 		imp->imp_vbr_failed = 0;
1335 		spin_unlock(&imp->imp_lock);
1336 
1337 		{
1338 		struct task_struct *task;
1339 		/* bug 17802:  XXX client_disconnect_export vs connect request
1340 		 * race. if client will evicted at this time, we start
1341 		 * invalidate thread without reference to import and import can
1342 		 * be freed at same time. */
1343 		class_import_get(imp);
1344 		task = kthread_run(ptlrpc_invalidate_import_thread, imp,
1345 				     "ll_imp_inval");
1346 		if (IS_ERR(task)) {
1347 			class_import_put(imp);
1348 			CERROR("error starting invalidate thread: %d\n", rc);
1349 			rc = PTR_ERR(task);
1350 		} else {
1351 			rc = 0;
1352 		}
1353 		return rc;
1354 		}
1355 	}
1356 
1357 	if (imp->imp_state == LUSTRE_IMP_REPLAY) {
1358 		CDEBUG(D_HA, "replay requested by %s\n",
1359 		       obd2cli_tgt(imp->imp_obd));
1360 		rc = ptlrpc_replay_next(imp, &inflight);
1361 		if (inflight == 0 &&
1362 		    atomic_read(&imp->imp_replay_inflight) == 0) {
1363 			IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY_LOCKS);
1364 			rc = ldlm_replay_locks(imp);
1365 			if (rc)
1366 				goto out;
1367 		}
1368 		rc = 0;
1369 	}
1370 
1371 	if (imp->imp_state == LUSTRE_IMP_REPLAY_LOCKS) {
1372 		if (atomic_read(&imp->imp_replay_inflight) == 0) {
1373 			IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY_WAIT);
1374 			rc = signal_completed_replay(imp);
1375 			if (rc)
1376 				goto out;
1377 		}
1378 
1379 	}
1380 
1381 	if (imp->imp_state == LUSTRE_IMP_REPLAY_WAIT) {
1382 		if (atomic_read(&imp->imp_replay_inflight) == 0) {
1383 			IMPORT_SET_STATE(imp, LUSTRE_IMP_RECOVER);
1384 		}
1385 	}
1386 
1387 	if (imp->imp_state == LUSTRE_IMP_RECOVER) {
1388 		CDEBUG(D_HA, "reconnected to %s@%s\n",
1389 		       obd2cli_tgt(imp->imp_obd),
1390 		       imp->imp_connection->c_remote_uuid.uuid);
1391 
1392 		rc = ptlrpc_resend(imp);
1393 		if (rc)
1394 			goto out;
1395 		IMPORT_SET_STATE(imp, LUSTRE_IMP_FULL);
1396 		ptlrpc_activate_import(imp);
1397 
1398 		deuuidify(obd2cli_tgt(imp->imp_obd), NULL,
1399 			  &target_start, &target_len);
1400 		LCONSOLE_INFO("%s: Connection restored to %.*s (at %s)\n",
1401 			      imp->imp_obd->obd_name,
1402 			      target_len, target_start,
1403 			      libcfs_nid2str(imp->imp_connection->c_peer.nid));
1404 	}
1405 
1406 	if (imp->imp_state == LUSTRE_IMP_FULL) {
1407 		wake_up_all(&imp->imp_recovery_waitq);
1408 		ptlrpc_wake_delayed(imp);
1409 	}
1410 
1411 out:
1412 	return rc;
1413 }
1414 
ptlrpc_disconnect_import(struct obd_import * imp,int noclose)1415 int ptlrpc_disconnect_import(struct obd_import *imp, int noclose)
1416 {
1417 	struct ptlrpc_request *req;
1418 	int rq_opc, rc = 0;
1419 
1420 	if (imp->imp_obd->obd_force)
1421 		goto set_state;
1422 
1423 	switch (imp->imp_connect_op) {
1424 	case OST_CONNECT:
1425 		rq_opc = OST_DISCONNECT;
1426 		break;
1427 	case MDS_CONNECT:
1428 		rq_opc = MDS_DISCONNECT;
1429 		break;
1430 	case MGS_CONNECT:
1431 		rq_opc = MGS_DISCONNECT;
1432 		break;
1433 	default:
1434 		rc = -EINVAL;
1435 		CERROR("%s: don't know how to disconnect from %s (connect_op %d): rc = %d\n",
1436 		       imp->imp_obd->obd_name, obd2cli_tgt(imp->imp_obd),
1437 		       imp->imp_connect_op, rc);
1438 		return rc;
1439 	}
1440 
1441 	if (ptlrpc_import_in_recovery(imp)) {
1442 		struct l_wait_info lwi;
1443 		long timeout;
1444 
1445 		if (AT_OFF) {
1446 			if (imp->imp_server_timeout)
1447 				timeout = cfs_time_seconds(obd_timeout / 2);
1448 			else
1449 				timeout = cfs_time_seconds(obd_timeout);
1450 		} else {
1451 			int idx = import_at_get_index(imp,
1452 				imp->imp_client->cli_request_portal);
1453 			timeout = cfs_time_seconds(
1454 				at_get(&imp->imp_at.iat_service_estimate[idx]));
1455 		}
1456 
1457 		lwi = LWI_TIMEOUT_INTR(cfs_timeout_cap(timeout),
1458 				       back_to_sleep, LWI_ON_SIGNAL_NOOP, NULL);
1459 		rc = l_wait_event(imp->imp_recovery_waitq,
1460 				  !ptlrpc_import_in_recovery(imp), &lwi);
1461 
1462 	}
1463 
1464 	spin_lock(&imp->imp_lock);
1465 	if (imp->imp_state != LUSTRE_IMP_FULL)
1466 		goto out;
1467 	spin_unlock(&imp->imp_lock);
1468 
1469 	req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_DISCONNECT,
1470 					LUSTRE_OBD_VERSION, rq_opc);
1471 	if (req) {
1472 		/* We are disconnecting, do not retry a failed DISCONNECT rpc if
1473 		 * it fails.  We can get through the above with a down server
1474 		 * if the client doesn't know the server is gone yet. */
1475 		req->rq_no_resend = 1;
1476 
1477 		/* We want client umounts to happen quickly, no matter the
1478 		   server state... */
1479 		req->rq_timeout = min_t(int, req->rq_timeout,
1480 					INITIAL_CONNECT_TIMEOUT);
1481 
1482 		IMPORT_SET_STATE(imp, LUSTRE_IMP_CONNECTING);
1483 		req->rq_send_state = LUSTRE_IMP_CONNECTING;
1484 		ptlrpc_request_set_replen(req);
1485 		rc = ptlrpc_queue_wait(req);
1486 		ptlrpc_req_finished(req);
1487 	}
1488 
1489 set_state:
1490 	spin_lock(&imp->imp_lock);
1491 out:
1492 	if (noclose)
1493 		IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_DISCON);
1494 	else
1495 		IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_CLOSED);
1496 	memset(&imp->imp_remote_handle, 0, sizeof(imp->imp_remote_handle));
1497 	spin_unlock(&imp->imp_lock);
1498 
1499 	if (rc == -ETIMEDOUT || rc == -ENOTCONN || rc == -ESHUTDOWN)
1500 		rc = 0;
1501 
1502 	return rc;
1503 }
1504 EXPORT_SYMBOL(ptlrpc_disconnect_import);
1505 
1506 /* Adaptive Timeout utils */
1507 extern unsigned int at_min, at_max, at_history;
1508 
1509 /* Bin into timeslices using AT_BINS bins.
1510    This gives us a max of the last binlimit*AT_BINS secs without the storage,
1511    but still smoothing out a return to normalcy from a slow response.
1512    (E.g. remember the maximum latency in each minute of the last 4 minutes.) */
at_measured(struct adaptive_timeout * at,unsigned int val)1513 int at_measured(struct adaptive_timeout *at, unsigned int val)
1514 {
1515 	unsigned int old = at->at_current;
1516 	time64_t now = ktime_get_real_seconds();
1517 	long binlimit = max_t(long, at_history / AT_BINS, 1);
1518 
1519 	LASSERT(at);
1520 	CDEBUG(D_OTHER, "add %u to %p time=%lu v=%u (%u %u %u %u)\n",
1521 	       val, at, (long)(now - at->at_binstart), at->at_current,
1522 	       at->at_hist[0], at->at_hist[1], at->at_hist[2], at->at_hist[3]);
1523 
1524 	if (val == 0)
1525 		/* 0's don't count, because we never want our timeout to
1526 		   drop to 0, and because 0 could mean an error */
1527 		return 0;
1528 
1529 	spin_lock(&at->at_lock);
1530 
1531 	if (unlikely(at->at_binstart == 0)) {
1532 		/* Special case to remove default from history */
1533 		at->at_current = val;
1534 		at->at_worst_ever = val;
1535 		at->at_worst_time = now;
1536 		at->at_hist[0] = val;
1537 		at->at_binstart = now;
1538 	} else if (now - at->at_binstart < binlimit) {
1539 		/* in bin 0 */
1540 		at->at_hist[0] = max(val, at->at_hist[0]);
1541 		at->at_current = max(val, at->at_current);
1542 	} else {
1543 		int i, shift;
1544 		unsigned int maxv = val;
1545 		/* move bins over */
1546 		shift = (u32)(now - at->at_binstart) / binlimit;
1547 		LASSERT(shift > 0);
1548 		for (i = AT_BINS - 1; i >= 0; i--) {
1549 			if (i >= shift) {
1550 				at->at_hist[i] = at->at_hist[i - shift];
1551 				maxv = max(maxv, at->at_hist[i]);
1552 			} else {
1553 				at->at_hist[i] = 0;
1554 			}
1555 		}
1556 		at->at_hist[0] = val;
1557 		at->at_current = maxv;
1558 		at->at_binstart += shift * binlimit;
1559 	}
1560 
1561 	if (at->at_current > at->at_worst_ever) {
1562 		at->at_worst_ever = at->at_current;
1563 		at->at_worst_time = now;
1564 	}
1565 
1566 	if (at->at_flags & AT_FLG_NOHIST)
1567 		/* Only keep last reported val; keeping the rest of the history
1568 		   for proc only */
1569 		at->at_current = val;
1570 
1571 	if (at_max > 0)
1572 		at->at_current =  min(at->at_current, at_max);
1573 	at->at_current =  max(at->at_current, at_min);
1574 
1575 	if (at->at_current != old)
1576 		CDEBUG(D_OTHER, "AT %p change: old=%u new=%u delta=%d (val=%u) hist %u %u %u %u\n",
1577 		       at,
1578 		       old, at->at_current, at->at_current - old, val,
1579 		       at->at_hist[0], at->at_hist[1], at->at_hist[2],
1580 		       at->at_hist[3]);
1581 
1582 	/* if we changed, report the old value */
1583 	old = (at->at_current != old) ? old : 0;
1584 
1585 	spin_unlock(&at->at_lock);
1586 	return old;
1587 }
1588 
1589 /* Find the imp_at index for a given portal; assign if space available */
import_at_get_index(struct obd_import * imp,int portal)1590 int import_at_get_index(struct obd_import *imp, int portal)
1591 {
1592 	struct imp_at *at = &imp->imp_at;
1593 	int i;
1594 
1595 	for (i = 0; i < IMP_AT_MAX_PORTALS; i++) {
1596 		if (at->iat_portal[i] == portal)
1597 			return i;
1598 		if (at->iat_portal[i] == 0)
1599 			/* unused */
1600 			break;
1601 	}
1602 
1603 	/* Not found in list, add it under a lock */
1604 	spin_lock(&imp->imp_lock);
1605 
1606 	/* Check unused under lock */
1607 	for (; i < IMP_AT_MAX_PORTALS; i++) {
1608 		if (at->iat_portal[i] == portal)
1609 			goto out;
1610 		if (at->iat_portal[i] == 0)
1611 			/* unused */
1612 			break;
1613 	}
1614 
1615 	/* Not enough portals? */
1616 	LASSERT(i < IMP_AT_MAX_PORTALS);
1617 
1618 	at->iat_portal[i] = portal;
1619 out:
1620 	spin_unlock(&imp->imp_lock);
1621 	return i;
1622 }
1623