• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2015, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  */
32 
33 #define DEBUG_SUBSYSTEM S_LNET
34 #include <linux/log2.h>
35 #include <linux/ktime.h>
36 
37 #include "../../include/linux/lnet/lib-lnet.h"
38 #include "../../include/linux/lnet/lib-dlc.h"
39 
40 #define D_LNI D_CONSOLE
41 
42 lnet_t the_lnet;			   /* THE state of the network */
43 EXPORT_SYMBOL(the_lnet);
44 
45 static char *ip2nets = "";
46 module_param(ip2nets, charp, 0444);
47 MODULE_PARM_DESC(ip2nets, "LNET network <- IP table");
48 
49 static char *networks = "";
50 module_param(networks, charp, 0444);
51 MODULE_PARM_DESC(networks, "local networks");
52 
53 static char *routes = "";
54 module_param(routes, charp, 0444);
55 MODULE_PARM_DESC(routes, "routes to non-local networks");
56 
57 static int rnet_htable_size = LNET_REMOTE_NETS_HASH_DEFAULT;
58 module_param(rnet_htable_size, int, 0444);
59 MODULE_PARM_DESC(rnet_htable_size, "size of remote network hash table");
60 
61 static int lnet_ping(lnet_process_id_t id, int timeout_ms,
62 		     lnet_process_id_t __user *ids, int n_ids);
63 
64 static char *
lnet_get_routes(void)65 lnet_get_routes(void)
66 {
67 	return routes;
68 }
69 
70 static char *
lnet_get_networks(void)71 lnet_get_networks(void)
72 {
73 	char *nets;
74 	int rc;
75 
76 	if (*networks && *ip2nets) {
77 		LCONSOLE_ERROR_MSG(0x101, "Please specify EITHER 'networks' or 'ip2nets' but not both at once\n");
78 		return NULL;
79 	}
80 
81 	if (*ip2nets) {
82 		rc = lnet_parse_ip2nets(&nets, ip2nets);
83 		return !rc ? nets : NULL;
84 	}
85 
86 	if (*networks)
87 		return networks;
88 
89 	return "tcp";
90 }
91 
92 static void
lnet_init_locks(void)93 lnet_init_locks(void)
94 {
95 	spin_lock_init(&the_lnet.ln_eq_wait_lock);
96 	init_waitqueue_head(&the_lnet.ln_eq_waitq);
97 	init_waitqueue_head(&the_lnet.ln_rc_waitq);
98 	mutex_init(&the_lnet.ln_lnd_mutex);
99 	mutex_init(&the_lnet.ln_api_mutex);
100 }
101 
102 static int
lnet_create_remote_nets_table(void)103 lnet_create_remote_nets_table(void)
104 {
105 	int i;
106 	struct list_head *hash;
107 
108 	LASSERT(!the_lnet.ln_remote_nets_hash);
109 	LASSERT(the_lnet.ln_remote_nets_hbits > 0);
110 	LIBCFS_ALLOC(hash, LNET_REMOTE_NETS_HASH_SIZE * sizeof(*hash));
111 	if (!hash) {
112 		CERROR("Failed to create remote nets hash table\n");
113 		return -ENOMEM;
114 	}
115 
116 	for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++)
117 		INIT_LIST_HEAD(&hash[i]);
118 	the_lnet.ln_remote_nets_hash = hash;
119 	return 0;
120 }
121 
122 static void
lnet_destroy_remote_nets_table(void)123 lnet_destroy_remote_nets_table(void)
124 {
125 	int i;
126 
127 	if (!the_lnet.ln_remote_nets_hash)
128 		return;
129 
130 	for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++)
131 		LASSERT(list_empty(&the_lnet.ln_remote_nets_hash[i]));
132 
133 	LIBCFS_FREE(the_lnet.ln_remote_nets_hash,
134 		    LNET_REMOTE_NETS_HASH_SIZE *
135 		    sizeof(the_lnet.ln_remote_nets_hash[0]));
136 	the_lnet.ln_remote_nets_hash = NULL;
137 }
138 
139 static void
lnet_destroy_locks(void)140 lnet_destroy_locks(void)
141 {
142 	if (the_lnet.ln_res_lock) {
143 		cfs_percpt_lock_free(the_lnet.ln_res_lock);
144 		the_lnet.ln_res_lock = NULL;
145 	}
146 
147 	if (the_lnet.ln_net_lock) {
148 		cfs_percpt_lock_free(the_lnet.ln_net_lock);
149 		the_lnet.ln_net_lock = NULL;
150 	}
151 }
152 
153 static int
lnet_create_locks(void)154 lnet_create_locks(void)
155 {
156 	lnet_init_locks();
157 
158 	the_lnet.ln_res_lock = cfs_percpt_lock_alloc(lnet_cpt_table());
159 	if (!the_lnet.ln_res_lock)
160 		goto failed;
161 
162 	the_lnet.ln_net_lock = cfs_percpt_lock_alloc(lnet_cpt_table());
163 	if (!the_lnet.ln_net_lock)
164 		goto failed;
165 
166 	return 0;
167 
168  failed:
169 	lnet_destroy_locks();
170 	return -ENOMEM;
171 }
172 
lnet_assert_wire_constants(void)173 static void lnet_assert_wire_constants(void)
174 {
175 	/*
176 	 * Wire protocol assertions generated by 'wirecheck'
177 	 * running on Linux robert.bartonsoftware.com 2.6.8-1.521
178 	 * #1 Mon Aug 16 09:01:18 EDT 2004 i686 athlon i386 GNU/Linux
179 	 * with gcc version 3.3.3 20040412 (Red Hat Linux 3.3.3-7)
180 	 */
181 
182 	/* Constants... */
183 	CLASSERT(LNET_PROTO_TCP_MAGIC == 0xeebc0ded);
184 	CLASSERT(LNET_PROTO_TCP_VERSION_MAJOR == 1);
185 	CLASSERT(LNET_PROTO_TCP_VERSION_MINOR == 0);
186 	CLASSERT(LNET_MSG_ACK == 0);
187 	CLASSERT(LNET_MSG_PUT == 1);
188 	CLASSERT(LNET_MSG_GET == 2);
189 	CLASSERT(LNET_MSG_REPLY == 3);
190 	CLASSERT(LNET_MSG_HELLO == 4);
191 
192 	/* Checks for struct ptl_handle_wire_t */
193 	CLASSERT((int)sizeof(lnet_handle_wire_t) == 16);
194 	CLASSERT((int)offsetof(lnet_handle_wire_t, wh_interface_cookie) == 0);
195 	CLASSERT((int)sizeof(((lnet_handle_wire_t *)0)->wh_interface_cookie) == 8);
196 	CLASSERT((int)offsetof(lnet_handle_wire_t, wh_object_cookie) == 8);
197 	CLASSERT((int)sizeof(((lnet_handle_wire_t *)0)->wh_object_cookie) == 8);
198 
199 	/* Checks for struct lnet_magicversion_t */
200 	CLASSERT((int)sizeof(lnet_magicversion_t) == 8);
201 	CLASSERT((int)offsetof(lnet_magicversion_t, magic) == 0);
202 	CLASSERT((int)sizeof(((lnet_magicversion_t *)0)->magic) == 4);
203 	CLASSERT((int)offsetof(lnet_magicversion_t, version_major) == 4);
204 	CLASSERT((int)sizeof(((lnet_magicversion_t *)0)->version_major) == 2);
205 	CLASSERT((int)offsetof(lnet_magicversion_t, version_minor) == 6);
206 	CLASSERT((int)sizeof(((lnet_magicversion_t *)0)->version_minor) == 2);
207 
208 	/* Checks for struct lnet_hdr_t */
209 	CLASSERT((int)sizeof(lnet_hdr_t) == 72);
210 	CLASSERT((int)offsetof(lnet_hdr_t, dest_nid) == 0);
211 	CLASSERT((int)sizeof(((lnet_hdr_t *)0)->dest_nid) == 8);
212 	CLASSERT((int)offsetof(lnet_hdr_t, src_nid) == 8);
213 	CLASSERT((int)sizeof(((lnet_hdr_t *)0)->src_nid) == 8);
214 	CLASSERT((int)offsetof(lnet_hdr_t, dest_pid) == 16);
215 	CLASSERT((int)sizeof(((lnet_hdr_t *)0)->dest_pid) == 4);
216 	CLASSERT((int)offsetof(lnet_hdr_t, src_pid) == 20);
217 	CLASSERT((int)sizeof(((lnet_hdr_t *)0)->src_pid) == 4);
218 	CLASSERT((int)offsetof(lnet_hdr_t, type) == 24);
219 	CLASSERT((int)sizeof(((lnet_hdr_t *)0)->type) == 4);
220 	CLASSERT((int)offsetof(lnet_hdr_t, payload_length) == 28);
221 	CLASSERT((int)sizeof(((lnet_hdr_t *)0)->payload_length) == 4);
222 	CLASSERT((int)offsetof(lnet_hdr_t, msg) == 32);
223 	CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg) == 40);
224 
225 	/* Ack */
226 	CLASSERT((int)offsetof(lnet_hdr_t, msg.ack.dst_wmd) == 32);
227 	CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.ack.dst_wmd) == 16);
228 	CLASSERT((int)offsetof(lnet_hdr_t, msg.ack.match_bits) == 48);
229 	CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.ack.match_bits) == 8);
230 	CLASSERT((int)offsetof(lnet_hdr_t, msg.ack.mlength) == 56);
231 	CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.ack.mlength) == 4);
232 
233 	/* Put */
234 	CLASSERT((int)offsetof(lnet_hdr_t, msg.put.ack_wmd) == 32);
235 	CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.put.ack_wmd) == 16);
236 	CLASSERT((int)offsetof(lnet_hdr_t, msg.put.match_bits) == 48);
237 	CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.put.match_bits) == 8);
238 	CLASSERT((int)offsetof(lnet_hdr_t, msg.put.hdr_data) == 56);
239 	CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.put.hdr_data) == 8);
240 	CLASSERT((int)offsetof(lnet_hdr_t, msg.put.ptl_index) == 64);
241 	CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.put.ptl_index) == 4);
242 	CLASSERT((int)offsetof(lnet_hdr_t, msg.put.offset) == 68);
243 	CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.put.offset) == 4);
244 
245 	/* Get */
246 	CLASSERT((int)offsetof(lnet_hdr_t, msg.get.return_wmd) == 32);
247 	CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.get.return_wmd) == 16);
248 	CLASSERT((int)offsetof(lnet_hdr_t, msg.get.match_bits) == 48);
249 	CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.get.match_bits) == 8);
250 	CLASSERT((int)offsetof(lnet_hdr_t, msg.get.ptl_index) == 56);
251 	CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.get.ptl_index) == 4);
252 	CLASSERT((int)offsetof(lnet_hdr_t, msg.get.src_offset) == 60);
253 	CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.get.src_offset) == 4);
254 	CLASSERT((int)offsetof(lnet_hdr_t, msg.get.sink_length) == 64);
255 	CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.get.sink_length) == 4);
256 
257 	/* Reply */
258 	CLASSERT((int)offsetof(lnet_hdr_t, msg.reply.dst_wmd) == 32);
259 	CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.reply.dst_wmd) == 16);
260 
261 	/* Hello */
262 	CLASSERT((int)offsetof(lnet_hdr_t, msg.hello.incarnation) == 32);
263 	CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.hello.incarnation) == 8);
264 	CLASSERT((int)offsetof(lnet_hdr_t, msg.hello.type) == 40);
265 	CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.hello.type) == 4);
266 }
267 
268 static lnd_t *
lnet_find_lnd_by_type(__u32 type)269 lnet_find_lnd_by_type(__u32 type)
270 {
271 	lnd_t *lnd;
272 	struct list_head *tmp;
273 
274 	/* holding lnd mutex */
275 	list_for_each(tmp, &the_lnet.ln_lnds) {
276 		lnd = list_entry(tmp, lnd_t, lnd_list);
277 
278 		if (lnd->lnd_type == type)
279 			return lnd;
280 	}
281 
282 	return NULL;
283 }
284 
285 void
lnet_register_lnd(lnd_t * lnd)286 lnet_register_lnd(lnd_t *lnd)
287 {
288 	mutex_lock(&the_lnet.ln_lnd_mutex);
289 
290 	LASSERT(libcfs_isknown_lnd(lnd->lnd_type));
291 	LASSERT(!lnet_find_lnd_by_type(lnd->lnd_type));
292 
293 	list_add_tail(&lnd->lnd_list, &the_lnet.ln_lnds);
294 	lnd->lnd_refcount = 0;
295 
296 	CDEBUG(D_NET, "%s LND registered\n", libcfs_lnd2str(lnd->lnd_type));
297 
298 	mutex_unlock(&the_lnet.ln_lnd_mutex);
299 }
300 EXPORT_SYMBOL(lnet_register_lnd);
301 
302 void
lnet_unregister_lnd(lnd_t * lnd)303 lnet_unregister_lnd(lnd_t *lnd)
304 {
305 	mutex_lock(&the_lnet.ln_lnd_mutex);
306 
307 	LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == lnd);
308 	LASSERT(!lnd->lnd_refcount);
309 
310 	list_del(&lnd->lnd_list);
311 	CDEBUG(D_NET, "%s LND unregistered\n", libcfs_lnd2str(lnd->lnd_type));
312 
313 	mutex_unlock(&the_lnet.ln_lnd_mutex);
314 }
315 EXPORT_SYMBOL(lnet_unregister_lnd);
316 
317 void
lnet_counters_get(lnet_counters_t * counters)318 lnet_counters_get(lnet_counters_t *counters)
319 {
320 	lnet_counters_t *ctr;
321 	int i;
322 
323 	memset(counters, 0, sizeof(*counters));
324 
325 	lnet_net_lock(LNET_LOCK_EX);
326 
327 	cfs_percpt_for_each(ctr, i, the_lnet.ln_counters) {
328 		counters->msgs_max     += ctr->msgs_max;
329 		counters->msgs_alloc   += ctr->msgs_alloc;
330 		counters->errors       += ctr->errors;
331 		counters->send_count   += ctr->send_count;
332 		counters->recv_count   += ctr->recv_count;
333 		counters->route_count  += ctr->route_count;
334 		counters->drop_count   += ctr->drop_count;
335 		counters->send_length  += ctr->send_length;
336 		counters->recv_length  += ctr->recv_length;
337 		counters->route_length += ctr->route_length;
338 		counters->drop_length  += ctr->drop_length;
339 	}
340 	lnet_net_unlock(LNET_LOCK_EX);
341 }
342 EXPORT_SYMBOL(lnet_counters_get);
343 
344 void
lnet_counters_reset(void)345 lnet_counters_reset(void)
346 {
347 	lnet_counters_t *counters;
348 	int i;
349 
350 	lnet_net_lock(LNET_LOCK_EX);
351 
352 	cfs_percpt_for_each(counters, i, the_lnet.ln_counters)
353 		memset(counters, 0, sizeof(lnet_counters_t));
354 
355 	lnet_net_unlock(LNET_LOCK_EX);
356 }
357 
358 static char *
lnet_res_type2str(int type)359 lnet_res_type2str(int type)
360 {
361 	switch (type) {
362 	default:
363 		LBUG();
364 	case LNET_COOKIE_TYPE_MD:
365 		return "MD";
366 	case LNET_COOKIE_TYPE_ME:
367 		return "ME";
368 	case LNET_COOKIE_TYPE_EQ:
369 		return "EQ";
370 	}
371 }
372 
373 static void
lnet_res_container_cleanup(struct lnet_res_container * rec)374 lnet_res_container_cleanup(struct lnet_res_container *rec)
375 {
376 	int count = 0;
377 
378 	if (!rec->rec_type) /* not set yet, it's uninitialized */
379 		return;
380 
381 	while (!list_empty(&rec->rec_active)) {
382 		struct list_head *e = rec->rec_active.next;
383 
384 		list_del_init(e);
385 		if (rec->rec_type == LNET_COOKIE_TYPE_EQ) {
386 			lnet_eq_free(list_entry(e, lnet_eq_t, eq_list));
387 
388 		} else if (rec->rec_type == LNET_COOKIE_TYPE_MD) {
389 			lnet_md_free(list_entry(e, lnet_libmd_t, md_list));
390 
391 		} else { /* NB: Active MEs should be attached on portals */
392 			LBUG();
393 		}
394 		count++;
395 	}
396 
397 	if (count > 0) {
398 		/*
399 		 * Found alive MD/ME/EQ, user really should unlink/free
400 		 * all of them before finalize LNet, but if someone didn't,
401 		 * we have to recycle garbage for him
402 		 */
403 		CERROR("%d active elements on exit of %s container\n",
404 		       count, lnet_res_type2str(rec->rec_type));
405 	}
406 
407 	if (rec->rec_lh_hash) {
408 		LIBCFS_FREE(rec->rec_lh_hash,
409 			    LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0]));
410 		rec->rec_lh_hash = NULL;
411 	}
412 
413 	rec->rec_type = 0; /* mark it as finalized */
414 }
415 
416 static int
lnet_res_container_setup(struct lnet_res_container * rec,int cpt,int type)417 lnet_res_container_setup(struct lnet_res_container *rec, int cpt, int type)
418 {
419 	int rc = 0;
420 	int i;
421 
422 	LASSERT(!rec->rec_type);
423 
424 	rec->rec_type = type;
425 	INIT_LIST_HEAD(&rec->rec_active);
426 	rec->rec_lh_cookie = (cpt << LNET_COOKIE_TYPE_BITS) | type;
427 
428 	/* Arbitrary choice of hash table size */
429 	LIBCFS_CPT_ALLOC(rec->rec_lh_hash, lnet_cpt_table(), cpt,
430 			 LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0]));
431 	if (!rec->rec_lh_hash) {
432 		rc = -ENOMEM;
433 		goto out;
434 	}
435 
436 	for (i = 0; i < LNET_LH_HASH_SIZE; i++)
437 		INIT_LIST_HEAD(&rec->rec_lh_hash[i]);
438 
439 	return 0;
440 
441 out:
442 	CERROR("Failed to setup %s resource container\n",
443 	       lnet_res_type2str(type));
444 	lnet_res_container_cleanup(rec);
445 	return rc;
446 }
447 
448 static void
lnet_res_containers_destroy(struct lnet_res_container ** recs)449 lnet_res_containers_destroy(struct lnet_res_container **recs)
450 {
451 	struct lnet_res_container *rec;
452 	int i;
453 
454 	cfs_percpt_for_each(rec, i, recs)
455 		lnet_res_container_cleanup(rec);
456 
457 	cfs_percpt_free(recs);
458 }
459 
460 static struct lnet_res_container **
lnet_res_containers_create(int type)461 lnet_res_containers_create(int type)
462 {
463 	struct lnet_res_container **recs;
464 	struct lnet_res_container *rec;
465 	int rc;
466 	int i;
467 
468 	recs = cfs_percpt_alloc(lnet_cpt_table(), sizeof(*rec));
469 	if (!recs) {
470 		CERROR("Failed to allocate %s resource containers\n",
471 		       lnet_res_type2str(type));
472 		return NULL;
473 	}
474 
475 	cfs_percpt_for_each(rec, i, recs) {
476 		rc = lnet_res_container_setup(rec, i, type);
477 		if (rc) {
478 			lnet_res_containers_destroy(recs);
479 			return NULL;
480 		}
481 	}
482 
483 	return recs;
484 }
485 
486 lnet_libhandle_t *
lnet_res_lh_lookup(struct lnet_res_container * rec,__u64 cookie)487 lnet_res_lh_lookup(struct lnet_res_container *rec, __u64 cookie)
488 {
489 	/* ALWAYS called with lnet_res_lock held */
490 	struct list_head *head;
491 	lnet_libhandle_t *lh;
492 	unsigned int hash;
493 
494 	if ((cookie & LNET_COOKIE_MASK) != rec->rec_type)
495 		return NULL;
496 
497 	hash = cookie >> (LNET_COOKIE_TYPE_BITS + LNET_CPT_BITS);
498 	head = &rec->rec_lh_hash[hash & LNET_LH_HASH_MASK];
499 
500 	list_for_each_entry(lh, head, lh_hash_chain) {
501 		if (lh->lh_cookie == cookie)
502 			return lh;
503 	}
504 
505 	return NULL;
506 }
507 
508 void
lnet_res_lh_initialize(struct lnet_res_container * rec,lnet_libhandle_t * lh)509 lnet_res_lh_initialize(struct lnet_res_container *rec, lnet_libhandle_t *lh)
510 {
511 	/* ALWAYS called with lnet_res_lock held */
512 	unsigned int ibits = LNET_COOKIE_TYPE_BITS + LNET_CPT_BITS;
513 	unsigned int hash;
514 
515 	lh->lh_cookie = rec->rec_lh_cookie;
516 	rec->rec_lh_cookie += 1 << ibits;
517 
518 	hash = (lh->lh_cookie >> ibits) & LNET_LH_HASH_MASK;
519 
520 	list_add(&lh->lh_hash_chain, &rec->rec_lh_hash[hash]);
521 }
522 
523 static int lnet_unprepare(void);
524 
525 static int
lnet_prepare(lnet_pid_t requested_pid)526 lnet_prepare(lnet_pid_t requested_pid)
527 {
528 	/* Prepare to bring up the network */
529 	struct lnet_res_container **recs;
530 	int rc = 0;
531 
532 	if (requested_pid == LNET_PID_ANY) {
533 		/* Don't instantiate LNET just for me */
534 		return -ENETDOWN;
535 	}
536 
537 	LASSERT(!the_lnet.ln_refcount);
538 
539 	the_lnet.ln_routing = 0;
540 
541 	LASSERT(!(requested_pid & LNET_PID_USERFLAG));
542 	the_lnet.ln_pid = requested_pid;
543 
544 	INIT_LIST_HEAD(&the_lnet.ln_test_peers);
545 	INIT_LIST_HEAD(&the_lnet.ln_nis);
546 	INIT_LIST_HEAD(&the_lnet.ln_nis_cpt);
547 	INIT_LIST_HEAD(&the_lnet.ln_nis_zombie);
548 	INIT_LIST_HEAD(&the_lnet.ln_routers);
549 	INIT_LIST_HEAD(&the_lnet.ln_drop_rules);
550 	INIT_LIST_HEAD(&the_lnet.ln_delay_rules);
551 
552 	rc = lnet_create_remote_nets_table();
553 	if (rc)
554 		goto failed;
555 	/*
556 	 * NB the interface cookie in wire handles guards against delayed
557 	 * replies and ACKs appearing valid after reboot.
558 	 */
559 	the_lnet.ln_interface_cookie = ktime_get_ns();
560 
561 	the_lnet.ln_counters = cfs_percpt_alloc(lnet_cpt_table(),
562 						sizeof(lnet_counters_t));
563 	if (!the_lnet.ln_counters) {
564 		CERROR("Failed to allocate counters for LNet\n");
565 		rc = -ENOMEM;
566 		goto failed;
567 	}
568 
569 	rc = lnet_peer_tables_create();
570 	if (rc)
571 		goto failed;
572 
573 	rc = lnet_msg_containers_create();
574 	if (rc)
575 		goto failed;
576 
577 	rc = lnet_res_container_setup(&the_lnet.ln_eq_container, 0,
578 				      LNET_COOKIE_TYPE_EQ);
579 	if (rc)
580 		goto failed;
581 
582 	recs = lnet_res_containers_create(LNET_COOKIE_TYPE_ME);
583 	if (!recs) {
584 		rc = -ENOMEM;
585 		goto failed;
586 	}
587 
588 	the_lnet.ln_me_containers = recs;
589 
590 	recs = lnet_res_containers_create(LNET_COOKIE_TYPE_MD);
591 	if (!recs) {
592 		rc = -ENOMEM;
593 		goto failed;
594 	}
595 
596 	the_lnet.ln_md_containers = recs;
597 
598 	rc = lnet_portals_create();
599 	if (rc) {
600 		CERROR("Failed to create portals for LNet: %d\n", rc);
601 		goto failed;
602 	}
603 
604 	return 0;
605 
606  failed:
607 	lnet_unprepare();
608 	return rc;
609 }
610 
611 static int
lnet_unprepare(void)612 lnet_unprepare(void)
613 {
614 	/*
615 	 * NB no LNET_LOCK since this is the last reference.  All LND instances
616 	 * have shut down already, so it is safe to unlink and free all
617 	 * descriptors, even those that appear committed to a network op (eg MD
618 	 * with non-zero pending count)
619 	 */
620 	lnet_fail_nid(LNET_NID_ANY, 0);
621 
622 	LASSERT(!the_lnet.ln_refcount);
623 	LASSERT(list_empty(&the_lnet.ln_test_peers));
624 	LASSERT(list_empty(&the_lnet.ln_nis));
625 	LASSERT(list_empty(&the_lnet.ln_nis_cpt));
626 	LASSERT(list_empty(&the_lnet.ln_nis_zombie));
627 
628 	lnet_portals_destroy();
629 
630 	if (the_lnet.ln_md_containers) {
631 		lnet_res_containers_destroy(the_lnet.ln_md_containers);
632 		the_lnet.ln_md_containers = NULL;
633 	}
634 
635 	if (the_lnet.ln_me_containers) {
636 		lnet_res_containers_destroy(the_lnet.ln_me_containers);
637 		the_lnet.ln_me_containers = NULL;
638 	}
639 
640 	lnet_res_container_cleanup(&the_lnet.ln_eq_container);
641 
642 	lnet_msg_containers_destroy();
643 	lnet_peer_tables_destroy();
644 	lnet_rtrpools_free(0);
645 
646 	if (the_lnet.ln_counters) {
647 		cfs_percpt_free(the_lnet.ln_counters);
648 		the_lnet.ln_counters = NULL;
649 	}
650 	lnet_destroy_remote_nets_table();
651 
652 	return 0;
653 }
654 
655 lnet_ni_t  *
lnet_net2ni_locked(__u32 net,int cpt)656 lnet_net2ni_locked(__u32 net, int cpt)
657 {
658 	struct list_head *tmp;
659 	lnet_ni_t *ni;
660 
661 	LASSERT(cpt != LNET_LOCK_EX);
662 
663 	list_for_each(tmp, &the_lnet.ln_nis) {
664 		ni = list_entry(tmp, lnet_ni_t, ni_list);
665 
666 		if (LNET_NIDNET(ni->ni_nid) == net) {
667 			lnet_ni_addref_locked(ni, cpt);
668 			return ni;
669 		}
670 	}
671 
672 	return NULL;
673 }
674 
675 lnet_ni_t *
lnet_net2ni(__u32 net)676 lnet_net2ni(__u32 net)
677 {
678 	lnet_ni_t *ni;
679 
680 	lnet_net_lock(0);
681 	ni = lnet_net2ni_locked(net, 0);
682 	lnet_net_unlock(0);
683 
684 	return ni;
685 }
686 EXPORT_SYMBOL(lnet_net2ni);
687 
688 static unsigned int
lnet_nid_cpt_hash(lnet_nid_t nid,unsigned int number)689 lnet_nid_cpt_hash(lnet_nid_t nid, unsigned int number)
690 {
691 	__u64 key = nid;
692 	unsigned int val;
693 
694 	LASSERT(number >= 1 && number <= LNET_CPT_NUMBER);
695 
696 	if (number == 1)
697 		return 0;
698 
699 	val = hash_long(key, LNET_CPT_BITS);
700 	/* NB: LNET_CP_NUMBER doesn't have to be PO2 */
701 	if (val < number)
702 		return val;
703 
704 	return (unsigned int)(key + val + (val >> 1)) % number;
705 }
706 
707 int
lnet_cpt_of_nid_locked(lnet_nid_t nid)708 lnet_cpt_of_nid_locked(lnet_nid_t nid)
709 {
710 	struct lnet_ni *ni;
711 
712 	/* must called with hold of lnet_net_lock */
713 	if (LNET_CPT_NUMBER == 1)
714 		return 0; /* the only one */
715 
716 	/* take lnet_net_lock(any) would be OK */
717 	if (!list_empty(&the_lnet.ln_nis_cpt)) {
718 		list_for_each_entry(ni, &the_lnet.ln_nis_cpt, ni_cptlist) {
719 			if (LNET_NIDNET(ni->ni_nid) != LNET_NIDNET(nid))
720 				continue;
721 
722 			LASSERT(ni->ni_cpts);
723 			return ni->ni_cpts[lnet_nid_cpt_hash
724 					   (nid, ni->ni_ncpts)];
725 		}
726 	}
727 
728 	return lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
729 }
730 
731 int
lnet_cpt_of_nid(lnet_nid_t nid)732 lnet_cpt_of_nid(lnet_nid_t nid)
733 {
734 	int cpt;
735 	int cpt2;
736 
737 	if (LNET_CPT_NUMBER == 1)
738 		return 0; /* the only one */
739 
740 	if (list_empty(&the_lnet.ln_nis_cpt))
741 		return lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
742 
743 	cpt = lnet_net_lock_current();
744 	cpt2 = lnet_cpt_of_nid_locked(nid);
745 	lnet_net_unlock(cpt);
746 
747 	return cpt2;
748 }
749 EXPORT_SYMBOL(lnet_cpt_of_nid);
750 
751 int
lnet_islocalnet(__u32 net)752 lnet_islocalnet(__u32 net)
753 {
754 	struct lnet_ni *ni;
755 	int cpt;
756 
757 	cpt = lnet_net_lock_current();
758 
759 	ni = lnet_net2ni_locked(net, cpt);
760 	if (ni)
761 		lnet_ni_decref_locked(ni, cpt);
762 
763 	lnet_net_unlock(cpt);
764 
765 	return !!ni;
766 }
767 
768 lnet_ni_t  *
lnet_nid2ni_locked(lnet_nid_t nid,int cpt)769 lnet_nid2ni_locked(lnet_nid_t nid, int cpt)
770 {
771 	struct lnet_ni *ni;
772 	struct list_head *tmp;
773 
774 	LASSERT(cpt != LNET_LOCK_EX);
775 
776 	list_for_each(tmp, &the_lnet.ln_nis) {
777 		ni = list_entry(tmp, lnet_ni_t, ni_list);
778 
779 		if (ni->ni_nid == nid) {
780 			lnet_ni_addref_locked(ni, cpt);
781 			return ni;
782 		}
783 	}
784 
785 	return NULL;
786 }
787 
788 int
lnet_islocalnid(lnet_nid_t nid)789 lnet_islocalnid(lnet_nid_t nid)
790 {
791 	struct lnet_ni *ni;
792 	int cpt;
793 
794 	cpt = lnet_net_lock_current();
795 	ni = lnet_nid2ni_locked(nid, cpt);
796 	if (ni)
797 		lnet_ni_decref_locked(ni, cpt);
798 	lnet_net_unlock(cpt);
799 
800 	return !!ni;
801 }
802 
803 int
lnet_count_acceptor_nis(void)804 lnet_count_acceptor_nis(void)
805 {
806 	/* Return the # of NIs that need the acceptor. */
807 	int count = 0;
808 	struct list_head *tmp;
809 	struct lnet_ni *ni;
810 	int cpt;
811 
812 	cpt = lnet_net_lock_current();
813 	list_for_each(tmp, &the_lnet.ln_nis) {
814 		ni = list_entry(tmp, lnet_ni_t, ni_list);
815 
816 		if (ni->ni_lnd->lnd_accept)
817 			count++;
818 	}
819 
820 	lnet_net_unlock(cpt);
821 
822 	return count;
823 }
824 
825 static lnet_ping_info_t *
lnet_ping_info_create(int num_ni)826 lnet_ping_info_create(int num_ni)
827 {
828 	lnet_ping_info_t *ping_info;
829 	unsigned int infosz;
830 
831 	infosz = offsetof(lnet_ping_info_t, pi_ni[num_ni]);
832 	LIBCFS_ALLOC(ping_info, infosz);
833 	if (!ping_info) {
834 		CERROR("Can't allocate ping info[%d]\n", num_ni);
835 		return NULL;
836 	}
837 
838 	ping_info->pi_nnis = num_ni;
839 	ping_info->pi_pid = the_lnet.ln_pid;
840 	ping_info->pi_magic = LNET_PROTO_PING_MAGIC;
841 	ping_info->pi_features = LNET_PING_FEAT_NI_STATUS;
842 
843 	return ping_info;
844 }
845 
846 static inline int
lnet_get_ni_count(void)847 lnet_get_ni_count(void)
848 {
849 	struct lnet_ni *ni;
850 	int count = 0;
851 
852 	lnet_net_lock(0);
853 
854 	list_for_each_entry(ni, &the_lnet.ln_nis, ni_list)
855 		count++;
856 
857 	lnet_net_unlock(0);
858 
859 	return count;
860 }
861 
862 static inline void
lnet_ping_info_free(lnet_ping_info_t * pinfo)863 lnet_ping_info_free(lnet_ping_info_t *pinfo)
864 {
865 	LIBCFS_FREE(pinfo,
866 		    offsetof(lnet_ping_info_t,
867 			     pi_ni[pinfo->pi_nnis]));
868 }
869 
870 static void
lnet_ping_info_destroy(void)871 lnet_ping_info_destroy(void)
872 {
873 	struct lnet_ni *ni;
874 
875 	lnet_net_lock(LNET_LOCK_EX);
876 
877 	list_for_each_entry(ni, &the_lnet.ln_nis, ni_list) {
878 		lnet_ni_lock(ni);
879 		ni->ni_status = NULL;
880 		lnet_ni_unlock(ni);
881 	}
882 
883 	lnet_ping_info_free(the_lnet.ln_ping_info);
884 	the_lnet.ln_ping_info = NULL;
885 
886 	lnet_net_unlock(LNET_LOCK_EX);
887 }
888 
889 static void
lnet_ping_event_handler(lnet_event_t * event)890 lnet_ping_event_handler(lnet_event_t *event)
891 {
892 	lnet_ping_info_t *pinfo = event->md.user_ptr;
893 
894 	if (event->unlinked)
895 		pinfo->pi_features = LNET_PING_FEAT_INVAL;
896 }
897 
898 static int
lnet_ping_info_setup(lnet_ping_info_t ** ppinfo,lnet_handle_md_t * md_handle,int ni_count,bool set_eq)899 lnet_ping_info_setup(lnet_ping_info_t **ppinfo, lnet_handle_md_t *md_handle,
900 		     int ni_count, bool set_eq)
901 {
902 	lnet_process_id_t id = {LNET_NID_ANY, LNET_PID_ANY};
903 	lnet_handle_me_t me_handle;
904 	lnet_md_t md = { NULL };
905 	int rc, rc2;
906 
907 	if (set_eq) {
908 		rc = LNetEQAlloc(0, lnet_ping_event_handler,
909 				 &the_lnet.ln_ping_target_eq);
910 		if (rc) {
911 			CERROR("Can't allocate ping EQ: %d\n", rc);
912 			return rc;
913 		}
914 	}
915 
916 	*ppinfo = lnet_ping_info_create(ni_count);
917 	if (!*ppinfo) {
918 		rc = -ENOMEM;
919 		goto failed_0;
920 	}
921 
922 	rc = LNetMEAttach(LNET_RESERVED_PORTAL, id,
923 			  LNET_PROTO_PING_MATCHBITS, 0,
924 			  LNET_UNLINK, LNET_INS_AFTER,
925 			  &me_handle);
926 	if (rc) {
927 		CERROR("Can't create ping ME: %d\n", rc);
928 		goto failed_1;
929 	}
930 
931 	/* initialize md content */
932 	md.start = *ppinfo;
933 	md.length = offsetof(lnet_ping_info_t,
934 			     pi_ni[(*ppinfo)->pi_nnis]);
935 	md.threshold = LNET_MD_THRESH_INF;
936 	md.max_size = 0;
937 	md.options = LNET_MD_OP_GET | LNET_MD_TRUNCATE |
938 		     LNET_MD_MANAGE_REMOTE;
939 	md.user_ptr  = NULL;
940 	md.eq_handle = the_lnet.ln_ping_target_eq;
941 	md.user_ptr = *ppinfo;
942 
943 	rc = LNetMDAttach(me_handle, md, LNET_RETAIN, md_handle);
944 	if (rc) {
945 		CERROR("Can't attach ping MD: %d\n", rc);
946 		goto failed_2;
947 	}
948 
949 	return 0;
950 
951 failed_2:
952 	rc2 = LNetMEUnlink(me_handle);
953 	LASSERT(!rc2);
954 failed_1:
955 	lnet_ping_info_free(*ppinfo);
956 	*ppinfo = NULL;
957 failed_0:
958 	if (set_eq)
959 		LNetEQFree(the_lnet.ln_ping_target_eq);
960 	return rc;
961 }
962 
963 static void
lnet_ping_md_unlink(lnet_ping_info_t * pinfo,lnet_handle_md_t * md_handle)964 lnet_ping_md_unlink(lnet_ping_info_t *pinfo, lnet_handle_md_t *md_handle)
965 {
966 	sigset_t blocked = cfs_block_allsigs();
967 
968 	LNetMDUnlink(*md_handle);
969 	LNetInvalidateHandle(md_handle);
970 
971 	/* NB md could be busy; this just starts the unlink */
972 	while (pinfo->pi_features != LNET_PING_FEAT_INVAL) {
973 		CDEBUG(D_NET, "Still waiting for ping MD to unlink\n");
974 		set_current_state(TASK_UNINTERRUPTIBLE);
975 		schedule_timeout(cfs_time_seconds(1));
976 	}
977 
978 	cfs_restore_sigs(blocked);
979 }
980 
981 static void
lnet_ping_info_install_locked(lnet_ping_info_t * ping_info)982 lnet_ping_info_install_locked(lnet_ping_info_t *ping_info)
983 {
984 	lnet_ni_status_t *ns;
985 	lnet_ni_t *ni;
986 	int i = 0;
987 
988 	list_for_each_entry(ni, &the_lnet.ln_nis, ni_list) {
989 		LASSERT(i < ping_info->pi_nnis);
990 
991 		ns = &ping_info->pi_ni[i];
992 
993 		ns->ns_nid = ni->ni_nid;
994 
995 		lnet_ni_lock(ni);
996 		ns->ns_status = (ni->ni_status) ?
997 				 ni->ni_status->ns_status : LNET_NI_STATUS_UP;
998 		ni->ni_status = ns;
999 		lnet_ni_unlock(ni);
1000 
1001 		i++;
1002 	}
1003 }
1004 
1005 static void
lnet_ping_target_update(lnet_ping_info_t * pinfo,lnet_handle_md_t md_handle)1006 lnet_ping_target_update(lnet_ping_info_t *pinfo, lnet_handle_md_t md_handle)
1007 {
1008 	lnet_ping_info_t *old_pinfo = NULL;
1009 	lnet_handle_md_t old_md;
1010 
1011 	/* switch the NIs to point to the new ping info created */
1012 	lnet_net_lock(LNET_LOCK_EX);
1013 
1014 	if (!the_lnet.ln_routing)
1015 		pinfo->pi_features |= LNET_PING_FEAT_RTE_DISABLED;
1016 	lnet_ping_info_install_locked(pinfo);
1017 
1018 	if (the_lnet.ln_ping_info) {
1019 		old_pinfo = the_lnet.ln_ping_info;
1020 		old_md = the_lnet.ln_ping_target_md;
1021 	}
1022 	the_lnet.ln_ping_target_md = md_handle;
1023 	the_lnet.ln_ping_info = pinfo;
1024 
1025 	lnet_net_unlock(LNET_LOCK_EX);
1026 
1027 	if (old_pinfo) {
1028 		/* unlink the old ping info */
1029 		lnet_ping_md_unlink(old_pinfo, &old_md);
1030 		lnet_ping_info_free(old_pinfo);
1031 	}
1032 }
1033 
1034 static void
lnet_ping_target_fini(void)1035 lnet_ping_target_fini(void)
1036 {
1037 	int rc;
1038 
1039 	lnet_ping_md_unlink(the_lnet.ln_ping_info,
1040 			    &the_lnet.ln_ping_target_md);
1041 
1042 	rc = LNetEQFree(the_lnet.ln_ping_target_eq);
1043 	LASSERT(!rc);
1044 
1045 	lnet_ping_info_destroy();
1046 }
1047 
1048 static int
lnet_ni_tq_credits(lnet_ni_t * ni)1049 lnet_ni_tq_credits(lnet_ni_t *ni)
1050 {
1051 	int credits;
1052 
1053 	LASSERT(ni->ni_ncpts >= 1);
1054 
1055 	if (ni->ni_ncpts == 1)
1056 		return ni->ni_maxtxcredits;
1057 
1058 	credits = ni->ni_maxtxcredits / ni->ni_ncpts;
1059 	credits = max(credits, 8 * ni->ni_peertxcredits);
1060 	credits = min(credits, ni->ni_maxtxcredits);
1061 
1062 	return credits;
1063 }
1064 
1065 static void
lnet_ni_unlink_locked(lnet_ni_t * ni)1066 lnet_ni_unlink_locked(lnet_ni_t *ni)
1067 {
1068 	if (!list_empty(&ni->ni_cptlist)) {
1069 		list_del_init(&ni->ni_cptlist);
1070 		lnet_ni_decref_locked(ni, 0);
1071 	}
1072 
1073 	/* move it to zombie list and nobody can find it anymore */
1074 	LASSERT(!list_empty(&ni->ni_list));
1075 	list_move(&ni->ni_list, &the_lnet.ln_nis_zombie);
1076 	lnet_ni_decref_locked(ni, 0);	/* drop ln_nis' ref */
1077 }
1078 
1079 static void
lnet_clear_zombies_nis_locked(void)1080 lnet_clear_zombies_nis_locked(void)
1081 {
1082 	int i;
1083 	int islo;
1084 	lnet_ni_t *ni;
1085 	lnet_ni_t *temp;
1086 
1087 	/*
1088 	 * Now wait for the NI's I just nuked to show up on ln_zombie_nis
1089 	 * and shut them down in guaranteed thread context
1090 	 */
1091 	i = 2;
1092 	list_for_each_entry_safe(ni, temp, &the_lnet.ln_nis_zombie, ni_list) {
1093 		int *ref;
1094 		int j;
1095 
1096 		list_del_init(&ni->ni_list);
1097 		cfs_percpt_for_each(ref, j, ni->ni_refs) {
1098 			if (!*ref)
1099 				continue;
1100 			/* still busy, add it back to zombie list */
1101 			list_add(&ni->ni_list, &the_lnet.ln_nis_zombie);
1102 			break;
1103 		}
1104 
1105 		if (!list_empty(&ni->ni_list)) {
1106 			lnet_net_unlock(LNET_LOCK_EX);
1107 			++i;
1108 			if ((i & (-i)) == i) {
1109 				CDEBUG(D_WARNING, "Waiting for zombie LNI %s\n",
1110 				       libcfs_nid2str(ni->ni_nid));
1111 			}
1112 			set_current_state(TASK_UNINTERRUPTIBLE);
1113 			schedule_timeout(cfs_time_seconds(1));
1114 			lnet_net_lock(LNET_LOCK_EX);
1115 			continue;
1116 		}
1117 
1118 		ni->ni_lnd->lnd_refcount--;
1119 		lnet_net_unlock(LNET_LOCK_EX);
1120 
1121 		islo = ni->ni_lnd->lnd_type == LOLND;
1122 
1123 		LASSERT(!in_interrupt());
1124 		ni->ni_lnd->lnd_shutdown(ni);
1125 
1126 		/*
1127 		 * can't deref lnd anymore now; it might have unregistered
1128 		 * itself...
1129 		 */
1130 		if (!islo)
1131 			CDEBUG(D_LNI, "Removed LNI %s\n",
1132 			       libcfs_nid2str(ni->ni_nid));
1133 
1134 		lnet_ni_free(ni);
1135 		i = 2;
1136 
1137 		lnet_net_lock(LNET_LOCK_EX);
1138 	}
1139 }
1140 
1141 static void
lnet_shutdown_lndnis(void)1142 lnet_shutdown_lndnis(void)
1143 {
1144 	lnet_ni_t *ni;
1145 	lnet_ni_t *temp;
1146 	int i;
1147 
1148 	/* NB called holding the global mutex */
1149 
1150 	/* All quiet on the API front */
1151 	LASSERT(!the_lnet.ln_shutdown);
1152 	LASSERT(!the_lnet.ln_refcount);
1153 	LASSERT(list_empty(&the_lnet.ln_nis_zombie));
1154 
1155 	lnet_net_lock(LNET_LOCK_EX);
1156 	the_lnet.ln_shutdown = 1;	/* flag shutdown */
1157 
1158 	/* Unlink NIs from the global table */
1159 	list_for_each_entry_safe(ni, temp, &the_lnet.ln_nis, ni_list) {
1160 		lnet_ni_unlink_locked(ni);
1161 	}
1162 
1163 	/* Drop the cached loopback NI. */
1164 	if (the_lnet.ln_loni) {
1165 		lnet_ni_decref_locked(the_lnet.ln_loni, 0);
1166 		the_lnet.ln_loni = NULL;
1167 	}
1168 
1169 	lnet_net_unlock(LNET_LOCK_EX);
1170 
1171 	/*
1172 	 * Clear lazy portals and drop delayed messages which hold refs
1173 	 * on their lnet_msg_t::msg_rxpeer
1174 	 */
1175 	for (i = 0; i < the_lnet.ln_nportals; i++)
1176 		LNetClearLazyPortal(i);
1177 
1178 	/*
1179 	 * Clear the peer table and wait for all peers to go (they hold refs on
1180 	 * their NIs)
1181 	 */
1182 	lnet_peer_tables_cleanup(NULL);
1183 
1184 	lnet_net_lock(LNET_LOCK_EX);
1185 
1186 	lnet_clear_zombies_nis_locked();
1187 	the_lnet.ln_shutdown = 0;
1188 	lnet_net_unlock(LNET_LOCK_EX);
1189 }
1190 
1191 /* shutdown down the NI and release refcount */
1192 static void
lnet_shutdown_lndni(struct lnet_ni * ni)1193 lnet_shutdown_lndni(struct lnet_ni *ni)
1194 {
1195 	int i;
1196 
1197 	lnet_net_lock(LNET_LOCK_EX);
1198 	lnet_ni_unlink_locked(ni);
1199 	lnet_net_unlock(LNET_LOCK_EX);
1200 
1201 	/* clear messages for this NI on the lazy portal */
1202 	for (i = 0; i < the_lnet.ln_nportals; i++)
1203 		lnet_clear_lazy_portal(ni, i, "Shutting down NI");
1204 
1205 	/* Do peer table cleanup for this ni */
1206 	lnet_peer_tables_cleanup(ni);
1207 
1208 	lnet_net_lock(LNET_LOCK_EX);
1209 	lnet_clear_zombies_nis_locked();
1210 	lnet_net_unlock(LNET_LOCK_EX);
1211 }
1212 
1213 static int
lnet_startup_lndni(struct lnet_ni * ni,struct lnet_ioctl_config_data * conf)1214 lnet_startup_lndni(struct lnet_ni *ni, struct lnet_ioctl_config_data *conf)
1215 {
1216 	struct lnet_ioctl_config_lnd_tunables *lnd_tunables = NULL;
1217 	int rc = -EINVAL;
1218 	int lnd_type;
1219 	lnd_t *lnd;
1220 	struct lnet_tx_queue *tq;
1221 	int i;
1222 
1223 	lnd_type = LNET_NETTYP(LNET_NIDNET(ni->ni_nid));
1224 
1225 	LASSERT(libcfs_isknown_lnd(lnd_type));
1226 
1227 	if (lnd_type == CIBLND || lnd_type == OPENIBLND ||
1228 	    lnd_type == IIBLND || lnd_type == VIBLND) {
1229 		CERROR("LND %s obsoleted\n", libcfs_lnd2str(lnd_type));
1230 		goto failed0;
1231 	}
1232 
1233 	/* Make sure this new NI is unique. */
1234 	lnet_net_lock(LNET_LOCK_EX);
1235 	rc = lnet_net_unique(LNET_NIDNET(ni->ni_nid), &the_lnet.ln_nis);
1236 	lnet_net_unlock(LNET_LOCK_EX);
1237 	if (!rc) {
1238 		if (lnd_type == LOLND) {
1239 			lnet_ni_free(ni);
1240 			return 0;
1241 		}
1242 
1243 		CERROR("Net %s is not unique\n",
1244 		       libcfs_net2str(LNET_NIDNET(ni->ni_nid)));
1245 		rc = -EEXIST;
1246 		goto failed0;
1247 	}
1248 
1249 	mutex_lock(&the_lnet.ln_lnd_mutex);
1250 	lnd = lnet_find_lnd_by_type(lnd_type);
1251 
1252 	if (!lnd) {
1253 		mutex_unlock(&the_lnet.ln_lnd_mutex);
1254 		rc = request_module("%s", libcfs_lnd2modname(lnd_type));
1255 		mutex_lock(&the_lnet.ln_lnd_mutex);
1256 
1257 		lnd = lnet_find_lnd_by_type(lnd_type);
1258 		if (!lnd) {
1259 			mutex_unlock(&the_lnet.ln_lnd_mutex);
1260 			CERROR("Can't load LND %s, module %s, rc=%d\n",
1261 			       libcfs_lnd2str(lnd_type),
1262 			       libcfs_lnd2modname(lnd_type), rc);
1263 			rc = -EINVAL;
1264 			goto failed0;
1265 		}
1266 	}
1267 
1268 	lnet_net_lock(LNET_LOCK_EX);
1269 	lnd->lnd_refcount++;
1270 	lnet_net_unlock(LNET_LOCK_EX);
1271 
1272 	ni->ni_lnd = lnd;
1273 
1274 	if (conf && conf->cfg_hdr.ioc_len > sizeof(*conf))
1275 		lnd_tunables = (struct lnet_ioctl_config_lnd_tunables *)conf->cfg_bulk;
1276 
1277 	if (lnd_tunables) {
1278 		LIBCFS_ALLOC(ni->ni_lnd_tunables,
1279 			     sizeof(*ni->ni_lnd_tunables));
1280 		if (!ni->ni_lnd_tunables) {
1281 			mutex_unlock(&the_lnet.ln_lnd_mutex);
1282 			rc = -ENOMEM;
1283 			goto failed0;
1284 		}
1285 		memcpy(ni->ni_lnd_tunables, lnd_tunables,
1286 		       sizeof(*ni->ni_lnd_tunables));
1287 	}
1288 
1289 	/*
1290 	 * If given some LND tunable parameters, parse those now to
1291 	 * override the values in the NI structure.
1292 	 */
1293 	if (conf) {
1294 		if (conf->cfg_config_u.cfg_net.net_peer_rtr_credits >= 0)
1295 			ni->ni_peerrtrcredits =
1296 				conf->cfg_config_u.cfg_net.net_peer_rtr_credits;
1297 		if (conf->cfg_config_u.cfg_net.net_peer_timeout >= 0)
1298 			ni->ni_peertimeout =
1299 				conf->cfg_config_u.cfg_net.net_peer_timeout;
1300 		if (conf->cfg_config_u.cfg_net.net_peer_tx_credits != -1)
1301 			ni->ni_peertxcredits =
1302 				conf->cfg_config_u.cfg_net.net_peer_tx_credits;
1303 		if (conf->cfg_config_u.cfg_net.net_max_tx_credits >= 0)
1304 			ni->ni_maxtxcredits =
1305 				conf->cfg_config_u.cfg_net.net_max_tx_credits;
1306 	}
1307 
1308 	rc = lnd->lnd_startup(ni);
1309 
1310 	mutex_unlock(&the_lnet.ln_lnd_mutex);
1311 
1312 	if (rc) {
1313 		LCONSOLE_ERROR_MSG(0x105, "Error %d starting up LNI %s\n",
1314 				   rc, libcfs_lnd2str(lnd->lnd_type));
1315 		lnet_net_lock(LNET_LOCK_EX);
1316 		lnd->lnd_refcount--;
1317 		lnet_net_unlock(LNET_LOCK_EX);
1318 		goto failed0;
1319 	}
1320 
1321 	LASSERT(ni->ni_peertimeout <= 0 || lnd->lnd_query);
1322 
1323 	lnet_net_lock(LNET_LOCK_EX);
1324 	/* refcount for ln_nis */
1325 	lnet_ni_addref_locked(ni, 0);
1326 	list_add_tail(&ni->ni_list, &the_lnet.ln_nis);
1327 	if (ni->ni_cpts) {
1328 		lnet_ni_addref_locked(ni, 0);
1329 		list_add_tail(&ni->ni_cptlist, &the_lnet.ln_nis_cpt);
1330 	}
1331 
1332 	lnet_net_unlock(LNET_LOCK_EX);
1333 
1334 	if (lnd->lnd_type == LOLND) {
1335 		lnet_ni_addref(ni);
1336 		LASSERT(!the_lnet.ln_loni);
1337 		the_lnet.ln_loni = ni;
1338 		return 0;
1339 	}
1340 
1341 	if (!ni->ni_peertxcredits || !ni->ni_maxtxcredits) {
1342 		LCONSOLE_ERROR_MSG(0x107, "LNI %s has no %scredits\n",
1343 				   libcfs_lnd2str(lnd->lnd_type),
1344 				   !ni->ni_peertxcredits ?
1345 				   "" : "per-peer ");
1346 		/*
1347 		 * shutdown the NI since if we get here then it must've already
1348 		 * been started
1349 		 */
1350 		lnet_shutdown_lndni(ni);
1351 		return -EINVAL;
1352 	}
1353 
1354 	cfs_percpt_for_each(tq, i, ni->ni_tx_queues) {
1355 		tq->tq_credits_min =
1356 		tq->tq_credits_max =
1357 		tq->tq_credits = lnet_ni_tq_credits(ni);
1358 	}
1359 
1360 	CDEBUG(D_LNI, "Added LNI %s [%d/%d/%d/%d]\n",
1361 	       libcfs_nid2str(ni->ni_nid), ni->ni_peertxcredits,
1362 	       lnet_ni_tq_credits(ni) * LNET_CPT_NUMBER,
1363 	       ni->ni_peerrtrcredits, ni->ni_peertimeout);
1364 
1365 	return 0;
1366 failed0:
1367 	lnet_ni_free(ni);
1368 	return rc;
1369 }
1370 
1371 static int
lnet_startup_lndnis(struct list_head * nilist)1372 lnet_startup_lndnis(struct list_head *nilist)
1373 {
1374 	struct lnet_ni *ni;
1375 	int rc;
1376 	int ni_count = 0;
1377 
1378 	while (!list_empty(nilist)) {
1379 		ni = list_entry(nilist->next, lnet_ni_t, ni_list);
1380 		list_del(&ni->ni_list);
1381 		rc = lnet_startup_lndni(ni, NULL);
1382 
1383 		if (rc < 0)
1384 			goto failed;
1385 
1386 		ni_count++;
1387 	}
1388 
1389 	return ni_count;
1390 failed:
1391 	lnet_shutdown_lndnis();
1392 
1393 	return rc;
1394 }
1395 
1396 /**
1397  * Initialize LNet library.
1398  *
1399  * Automatically called at module loading time. Caller has to call
1400  * lnet_lib_exit() after a call to lnet_lib_init(), if and only if the
1401  * latter returned 0. It must be called exactly once.
1402  *
1403  * \retval 0 on success
1404  * \retval -ve on failures.
1405  */
lnet_lib_init(void)1406 int lnet_lib_init(void)
1407 {
1408 	int rc;
1409 
1410 	lnet_assert_wire_constants();
1411 
1412 	memset(&the_lnet, 0, sizeof(the_lnet));
1413 
1414 	/* refer to global cfs_cpt_table for now */
1415 	the_lnet.ln_cpt_table	= cfs_cpt_table;
1416 	the_lnet.ln_cpt_number	= cfs_cpt_number(cfs_cpt_table);
1417 
1418 	LASSERT(the_lnet.ln_cpt_number > 0);
1419 	if (the_lnet.ln_cpt_number > LNET_CPT_MAX) {
1420 		/* we are under risk of consuming all lh_cookie */
1421 		CERROR("Can't have %d CPTs for LNet (max allowed is %d), please change setting of CPT-table and retry\n",
1422 		       the_lnet.ln_cpt_number, LNET_CPT_MAX);
1423 		return -E2BIG;
1424 	}
1425 
1426 	while ((1 << the_lnet.ln_cpt_bits) < the_lnet.ln_cpt_number)
1427 		the_lnet.ln_cpt_bits++;
1428 
1429 	rc = lnet_create_locks();
1430 	if (rc) {
1431 		CERROR("Can't create LNet global locks: %d\n", rc);
1432 		return rc;
1433 	}
1434 
1435 	the_lnet.ln_refcount = 0;
1436 	LNetInvalidateHandle(&the_lnet.ln_rc_eqh);
1437 	INIT_LIST_HEAD(&the_lnet.ln_lnds);
1438 	INIT_LIST_HEAD(&the_lnet.ln_rcd_zombie);
1439 	INIT_LIST_HEAD(&the_lnet.ln_rcd_deathrow);
1440 
1441 	/*
1442 	 * The hash table size is the number of bits it takes to express the set
1443 	 * ln_num_routes, minus 1 (better to under estimate than over so we
1444 	 * don't waste memory).
1445 	 */
1446 	if (rnet_htable_size <= 0)
1447 		rnet_htable_size = LNET_REMOTE_NETS_HASH_DEFAULT;
1448 	else if (rnet_htable_size > LNET_REMOTE_NETS_HASH_MAX)
1449 		rnet_htable_size = LNET_REMOTE_NETS_HASH_MAX;
1450 	the_lnet.ln_remote_nets_hbits = max_t(int, 1,
1451 					   order_base_2(rnet_htable_size) - 1);
1452 
1453 	/*
1454 	 * All LNDs apart from the LOLND are in separate modules.  They
1455 	 * register themselves when their module loads, and unregister
1456 	 * themselves when their module is unloaded.
1457 	 */
1458 	lnet_register_lnd(&the_lolnd);
1459 	return 0;
1460 }
1461 
1462 /**
1463  * Finalize LNet library.
1464  *
1465  * \pre lnet_lib_init() called with success.
1466  * \pre All LNet users called LNetNIFini() for matching LNetNIInit() calls.
1467  */
lnet_lib_exit(void)1468 void lnet_lib_exit(void)
1469 {
1470 	LASSERT(!the_lnet.ln_refcount);
1471 
1472 	while (!list_empty(&the_lnet.ln_lnds))
1473 		lnet_unregister_lnd(list_entry(the_lnet.ln_lnds.next,
1474 					       lnd_t, lnd_list));
1475 	lnet_destroy_locks();
1476 }
1477 
1478 /**
1479  * Set LNet PID and start LNet interfaces, routing, and forwarding.
1480  *
1481  * Users must call this function at least once before any other functions.
1482  * For each successful call there must be a corresponding call to
1483  * LNetNIFini(). For subsequent calls to LNetNIInit(), \a requested_pid is
1484  * ignored.
1485  *
1486  * The PID used by LNet may be different from the one requested.
1487  * See LNetGetId().
1488  *
1489  * \param requested_pid PID requested by the caller.
1490  *
1491  * \return >= 0 on success, and < 0 error code on failures.
1492  */
1493 int
LNetNIInit(lnet_pid_t requested_pid)1494 LNetNIInit(lnet_pid_t requested_pid)
1495 {
1496 	int im_a_router = 0;
1497 	int rc;
1498 	int ni_count;
1499 	lnet_ping_info_t *pinfo;
1500 	lnet_handle_md_t md_handle;
1501 	struct list_head net_head;
1502 
1503 	INIT_LIST_HEAD(&net_head);
1504 
1505 	mutex_lock(&the_lnet.ln_api_mutex);
1506 
1507 	CDEBUG(D_OTHER, "refs %d\n", the_lnet.ln_refcount);
1508 
1509 	if (the_lnet.ln_refcount > 0) {
1510 		rc = the_lnet.ln_refcount++;
1511 		mutex_unlock(&the_lnet.ln_api_mutex);
1512 		return rc;
1513 	}
1514 
1515 	rc = lnet_prepare(requested_pid);
1516 	if (rc) {
1517 		mutex_unlock(&the_lnet.ln_api_mutex);
1518 		return rc;
1519 	}
1520 
1521 	/* Add in the loopback network */
1522 	if (!lnet_ni_alloc(LNET_MKNET(LOLND, 0), NULL, &net_head)) {
1523 		rc = -ENOMEM;
1524 		goto err_empty_list;
1525 	}
1526 
1527 	/*
1528 	 * If LNet is being initialized via DLC it is possible
1529 	 * that the user requests not to load module parameters (ones which
1530 	 * are supported by DLC) on initialization.  Therefore, make sure not
1531 	 * to load networks, routes and forwarding from module parameters
1532 	 * in this case. On cleanup in case of failure only clean up
1533 	 * routes if it has been loaded
1534 	 */
1535 	if (!the_lnet.ln_nis_from_mod_params) {
1536 		rc = lnet_parse_networks(&net_head, lnet_get_networks());
1537 		if (rc < 0)
1538 			goto err_empty_list;
1539 	}
1540 
1541 	ni_count = lnet_startup_lndnis(&net_head);
1542 	if (ni_count < 0) {
1543 		rc = ni_count;
1544 		goto err_empty_list;
1545 	}
1546 
1547 	if (!the_lnet.ln_nis_from_mod_params) {
1548 		rc = lnet_parse_routes(lnet_get_routes(), &im_a_router);
1549 		if (rc)
1550 			goto err_shutdown_lndnis;
1551 
1552 		rc = lnet_check_routes();
1553 		if (rc)
1554 			goto err_destory_routes;
1555 
1556 		rc = lnet_rtrpools_alloc(im_a_router);
1557 		if (rc)
1558 			goto err_destory_routes;
1559 	}
1560 
1561 	rc = lnet_acceptor_start();
1562 	if (rc)
1563 		goto err_destory_routes;
1564 
1565 	the_lnet.ln_refcount = 1;
1566 	/* Now I may use my own API functions... */
1567 
1568 	rc = lnet_ping_info_setup(&pinfo, &md_handle, ni_count, true);
1569 	if (rc)
1570 		goto err_acceptor_stop;
1571 
1572 	lnet_ping_target_update(pinfo, md_handle);
1573 
1574 	rc = lnet_router_checker_start();
1575 	if (rc)
1576 		goto err_stop_ping;
1577 
1578 	lnet_fault_init();
1579 	lnet_router_debugfs_init();
1580 
1581 	mutex_unlock(&the_lnet.ln_api_mutex);
1582 
1583 	return 0;
1584 
1585 err_stop_ping:
1586 	lnet_ping_target_fini();
1587 err_acceptor_stop:
1588 	the_lnet.ln_refcount = 0;
1589 	lnet_acceptor_stop();
1590 err_destory_routes:
1591 	if (!the_lnet.ln_nis_from_mod_params)
1592 		lnet_destroy_routes();
1593 err_shutdown_lndnis:
1594 	lnet_shutdown_lndnis();
1595 err_empty_list:
1596 	lnet_unprepare();
1597 	LASSERT(rc < 0);
1598 	mutex_unlock(&the_lnet.ln_api_mutex);
1599 	while (!list_empty(&net_head)) {
1600 		struct lnet_ni *ni;
1601 
1602 		ni = list_entry(net_head.next, struct lnet_ni, ni_list);
1603 		list_del_init(&ni->ni_list);
1604 		lnet_ni_free(ni);
1605 	}
1606 	return rc;
1607 }
1608 EXPORT_SYMBOL(LNetNIInit);
1609 
1610 /**
1611  * Stop LNet interfaces, routing, and forwarding.
1612  *
1613  * Users must call this function once for each successful call to LNetNIInit().
1614  * Once the LNetNIFini() operation has been started, the results of pending
1615  * API operations are undefined.
1616  *
1617  * \return always 0 for current implementation.
1618  */
1619 int
LNetNIFini(void)1620 LNetNIFini(void)
1621 {
1622 	mutex_lock(&the_lnet.ln_api_mutex);
1623 
1624 	LASSERT(the_lnet.ln_refcount > 0);
1625 
1626 	if (the_lnet.ln_refcount != 1) {
1627 		the_lnet.ln_refcount--;
1628 	} else {
1629 		LASSERT(!the_lnet.ln_niinit_self);
1630 
1631 		lnet_fault_fini();
1632 		lnet_router_debugfs_fini();
1633 		lnet_router_checker_stop();
1634 		lnet_ping_target_fini();
1635 
1636 		/* Teardown fns that use my own API functions BEFORE here */
1637 		the_lnet.ln_refcount = 0;
1638 
1639 		lnet_acceptor_stop();
1640 		lnet_destroy_routes();
1641 		lnet_shutdown_lndnis();
1642 		lnet_unprepare();
1643 	}
1644 
1645 	mutex_unlock(&the_lnet.ln_api_mutex);
1646 	return 0;
1647 }
1648 EXPORT_SYMBOL(LNetNIFini);
1649 
1650 /**
1651  * Grabs the ni data from the ni structure and fills the out
1652  * parameters
1653  *
1654  * \param[in] ni network       interface structure
1655  * \param[out] config	       NI configuration
1656  */
1657 static void
lnet_fill_ni_info(struct lnet_ni * ni,struct lnet_ioctl_config_data * config)1658 lnet_fill_ni_info(struct lnet_ni *ni, struct lnet_ioctl_config_data *config)
1659 {
1660 	struct lnet_ioctl_config_lnd_tunables *lnd_cfg = NULL;
1661 	struct lnet_ioctl_net_config *net_config;
1662 	size_t min_size, tunable_size = 0;
1663 	int i;
1664 
1665 	if (!ni || !config)
1666 		return;
1667 
1668 	net_config = (struct lnet_ioctl_net_config *)config->cfg_bulk;
1669 	if (!net_config)
1670 		return;
1671 
1672 	BUILD_BUG_ON(ARRAY_SIZE(ni->ni_interfaces) !=
1673 		     ARRAY_SIZE(net_config->ni_interfaces));
1674 
1675 	for (i = 0; i < ARRAY_SIZE(ni->ni_interfaces); i++) {
1676 		if (!ni->ni_interfaces[i])
1677 			break;
1678 
1679 		strncpy(net_config->ni_interfaces[i],
1680 			ni->ni_interfaces[i],
1681 			sizeof(net_config->ni_interfaces[i]));
1682 	}
1683 
1684 	config->cfg_nid = ni->ni_nid;
1685 	config->cfg_config_u.cfg_net.net_peer_timeout = ni->ni_peertimeout;
1686 	config->cfg_config_u.cfg_net.net_max_tx_credits = ni->ni_maxtxcredits;
1687 	config->cfg_config_u.cfg_net.net_peer_tx_credits = ni->ni_peertxcredits;
1688 	config->cfg_config_u.cfg_net.net_peer_rtr_credits = ni->ni_peerrtrcredits;
1689 
1690 	net_config->ni_status = ni->ni_status->ns_status;
1691 
1692 	if (ni->ni_cpts) {
1693 		int num_cpts = min(ni->ni_ncpts, LNET_MAX_SHOW_NUM_CPT);
1694 
1695 		for (i = 0; i < num_cpts; i++)
1696 			net_config->ni_cpts[i] = ni->ni_cpts[i];
1697 
1698 		config->cfg_ncpts = num_cpts;
1699 	}
1700 
1701 	/*
1702 	 * See if user land tools sent in a newer and larger version
1703 	 * of struct lnet_tunables than what the kernel uses.
1704 	 */
1705 	min_size = sizeof(*config) + sizeof(*net_config);
1706 
1707 	if (config->cfg_hdr.ioc_len > min_size)
1708 		tunable_size = config->cfg_hdr.ioc_len - min_size;
1709 
1710 	/* Don't copy to much data to user space */
1711 	min_size = min(tunable_size, sizeof(*ni->ni_lnd_tunables));
1712 	lnd_cfg = (struct lnet_ioctl_config_lnd_tunables *)net_config->cfg_bulk;
1713 
1714 	if (ni->ni_lnd_tunables && lnd_cfg && min_size) {
1715 		memcpy(lnd_cfg, ni->ni_lnd_tunables, min_size);
1716 		config->cfg_config_u.cfg_net.net_interface_count = 1;
1717 
1718 		/* Tell user land that kernel side has less data */
1719 		if (tunable_size > sizeof(*ni->ni_lnd_tunables)) {
1720 			min_size = tunable_size - sizeof(ni->ni_lnd_tunables);
1721 			config->cfg_hdr.ioc_len -= min_size;
1722 		}
1723 	}
1724 }
1725 
1726 static int
lnet_get_net_config(struct lnet_ioctl_config_data * config)1727 lnet_get_net_config(struct lnet_ioctl_config_data *config)
1728 {
1729 	struct lnet_ni *ni;
1730 	struct list_head *tmp;
1731 	int idx = config->cfg_count;
1732 	int cpt, i = 0;
1733 	int rc = -ENOENT;
1734 
1735 	cpt = lnet_net_lock_current();
1736 
1737 	list_for_each(tmp, &the_lnet.ln_nis) {
1738 		if (i++ != idx)
1739 			continue;
1740 
1741 		ni = list_entry(tmp, lnet_ni_t, ni_list);
1742 		lnet_ni_lock(ni);
1743 		lnet_fill_ni_info(ni, config);
1744 		lnet_ni_unlock(ni);
1745 		rc = 0;
1746 		break;
1747 	}
1748 
1749 	lnet_net_unlock(cpt);
1750 	return rc;
1751 }
1752 
1753 int
lnet_dyn_add_ni(lnet_pid_t requested_pid,struct lnet_ioctl_config_data * conf)1754 lnet_dyn_add_ni(lnet_pid_t requested_pid, struct lnet_ioctl_config_data *conf)
1755 {
1756 	char *nets = conf->cfg_config_u.cfg_net.net_intf;
1757 	lnet_ping_info_t *pinfo;
1758 	lnet_handle_md_t md_handle;
1759 	struct lnet_ni *ni;
1760 	struct list_head net_head;
1761 	lnet_remotenet_t *rnet;
1762 	int rc;
1763 
1764 	INIT_LIST_HEAD(&net_head);
1765 
1766 	/* Create a ni structure for the network string */
1767 	rc = lnet_parse_networks(&net_head, nets);
1768 	if (rc <= 0)
1769 		return !rc ? -EINVAL : rc;
1770 
1771 	mutex_lock(&the_lnet.ln_api_mutex);
1772 
1773 	if (rc > 1) {
1774 		rc = -EINVAL; /* only add one interface per call */
1775 		goto failed0;
1776 	}
1777 
1778 	ni = list_entry(net_head.next, struct lnet_ni, ni_list);
1779 
1780 	lnet_net_lock(LNET_LOCK_EX);
1781 	rnet = lnet_find_net_locked(LNET_NIDNET(ni->ni_nid));
1782 	lnet_net_unlock(LNET_LOCK_EX);
1783 	/*
1784 	 * make sure that the net added doesn't invalidate the current
1785 	 * configuration LNet is keeping
1786 	 */
1787 	if (rnet) {
1788 		CERROR("Adding net %s will invalidate routing configuration\n",
1789 		       nets);
1790 		rc = -EUSERS;
1791 		goto failed0;
1792 	}
1793 
1794 	rc = lnet_ping_info_setup(&pinfo, &md_handle, 1 + lnet_get_ni_count(),
1795 				  false);
1796 	if (rc)
1797 		goto failed0;
1798 
1799 	list_del_init(&ni->ni_list);
1800 
1801 	rc = lnet_startup_lndni(ni, conf);
1802 	if (rc)
1803 		goto failed1;
1804 
1805 	if (ni->ni_lnd->lnd_accept) {
1806 		rc = lnet_acceptor_start();
1807 		if (rc < 0) {
1808 			/* shutdown the ni that we just started */
1809 			CERROR("Failed to start up acceptor thread\n");
1810 			lnet_shutdown_lndni(ni);
1811 			goto failed1;
1812 		}
1813 	}
1814 
1815 	lnet_ping_target_update(pinfo, md_handle);
1816 	mutex_unlock(&the_lnet.ln_api_mutex);
1817 
1818 	return 0;
1819 
1820 failed1:
1821 	lnet_ping_md_unlink(pinfo, &md_handle);
1822 	lnet_ping_info_free(pinfo);
1823 failed0:
1824 	mutex_unlock(&the_lnet.ln_api_mutex);
1825 	while (!list_empty(&net_head)) {
1826 		ni = list_entry(net_head.next, struct lnet_ni, ni_list);
1827 		list_del_init(&ni->ni_list);
1828 		lnet_ni_free(ni);
1829 	}
1830 	return rc;
1831 }
1832 
1833 int
lnet_dyn_del_ni(__u32 net)1834 lnet_dyn_del_ni(__u32 net)
1835 {
1836 	lnet_ni_t *ni;
1837 	lnet_ping_info_t *pinfo;
1838 	lnet_handle_md_t md_handle;
1839 	int rc;
1840 
1841 	/* don't allow userspace to shutdown the LOLND */
1842 	if (LNET_NETTYP(net) == LOLND)
1843 		return -EINVAL;
1844 
1845 	mutex_lock(&the_lnet.ln_api_mutex);
1846 	/* create and link a new ping info, before removing the old one */
1847 	rc = lnet_ping_info_setup(&pinfo, &md_handle,
1848 				  lnet_get_ni_count() - 1, false);
1849 	if (rc)
1850 		goto out;
1851 
1852 	ni = lnet_net2ni(net);
1853 	if (!ni) {
1854 		rc = -EINVAL;
1855 		goto failed;
1856 	}
1857 
1858 	/* decrement the reference counter taken by lnet_net2ni() */
1859 	lnet_ni_decref_locked(ni, 0);
1860 
1861 	lnet_shutdown_lndni(ni);
1862 
1863 	if (!lnet_count_acceptor_nis())
1864 		lnet_acceptor_stop();
1865 
1866 	lnet_ping_target_update(pinfo, md_handle);
1867 	goto out;
1868 failed:
1869 	lnet_ping_md_unlink(pinfo, &md_handle);
1870 	lnet_ping_info_free(pinfo);
1871 out:
1872 	mutex_unlock(&the_lnet.ln_api_mutex);
1873 
1874 	return rc;
1875 }
1876 
1877 /**
1878  * LNet ioctl handler.
1879  *
1880  */
1881 int
LNetCtl(unsigned int cmd,void * arg)1882 LNetCtl(unsigned int cmd, void *arg)
1883 {
1884 	struct libcfs_ioctl_data *data = arg;
1885 	struct lnet_ioctl_config_data *config;
1886 	lnet_process_id_t id = {0};
1887 	lnet_ni_t *ni;
1888 	int rc;
1889 	unsigned long secs_passed;
1890 
1891 	BUILD_BUG_ON(LIBCFS_IOC_DATA_MAX <
1892 		     sizeof(struct lnet_ioctl_net_config) +
1893 		     sizeof(struct lnet_ioctl_config_data));
1894 
1895 	switch (cmd) {
1896 	case IOC_LIBCFS_GET_NI:
1897 		rc = LNetGetId(data->ioc_count, &id);
1898 		data->ioc_nid = id.nid;
1899 		return rc;
1900 
1901 	case IOC_LIBCFS_FAIL_NID:
1902 		return lnet_fail_nid(data->ioc_nid, data->ioc_count);
1903 
1904 	case IOC_LIBCFS_ADD_ROUTE:
1905 		config = arg;
1906 
1907 		if (config->cfg_hdr.ioc_len < sizeof(*config))
1908 			return -EINVAL;
1909 
1910 		mutex_lock(&the_lnet.ln_api_mutex);
1911 		rc = lnet_add_route(config->cfg_net,
1912 				    config->cfg_config_u.cfg_route.rtr_hop,
1913 				    config->cfg_nid,
1914 				    config->cfg_config_u.cfg_route.rtr_priority);
1915 		if (!rc) {
1916 			rc = lnet_check_routes();
1917 			if (rc)
1918 				lnet_del_route(config->cfg_net,
1919 					       config->cfg_nid);
1920 		}
1921 		mutex_unlock(&the_lnet.ln_api_mutex);
1922 		return rc;
1923 
1924 	case IOC_LIBCFS_DEL_ROUTE:
1925 		config = arg;
1926 
1927 		if (config->cfg_hdr.ioc_len < sizeof(*config))
1928 			return -EINVAL;
1929 
1930 		mutex_lock(&the_lnet.ln_api_mutex);
1931 		rc = lnet_del_route(config->cfg_net, config->cfg_nid);
1932 		mutex_unlock(&the_lnet.ln_api_mutex);
1933 		return rc;
1934 
1935 	case IOC_LIBCFS_GET_ROUTE:
1936 		config = arg;
1937 
1938 		if (config->cfg_hdr.ioc_len < sizeof(*config))
1939 			return -EINVAL;
1940 
1941 		return lnet_get_route(config->cfg_count,
1942 				      &config->cfg_net,
1943 				      &config->cfg_config_u.cfg_route.rtr_hop,
1944 				      &config->cfg_nid,
1945 				      &config->cfg_config_u.cfg_route.rtr_flags,
1946 				      &config->cfg_config_u.cfg_route.rtr_priority);
1947 
1948 	case IOC_LIBCFS_GET_NET: {
1949 		size_t total = sizeof(*config) +
1950 			       sizeof(struct lnet_ioctl_net_config);
1951 		config = arg;
1952 
1953 		if (config->cfg_hdr.ioc_len < total)
1954 			return -EINVAL;
1955 
1956 		return lnet_get_net_config(config);
1957 	}
1958 
1959 	case IOC_LIBCFS_GET_LNET_STATS: {
1960 		struct lnet_ioctl_lnet_stats *lnet_stats = arg;
1961 
1962 		if (lnet_stats->st_hdr.ioc_len < sizeof(*lnet_stats))
1963 			return -EINVAL;
1964 
1965 		lnet_counters_get(&lnet_stats->st_cntrs);
1966 		return 0;
1967 	}
1968 
1969 	case IOC_LIBCFS_CONFIG_RTR:
1970 		config = arg;
1971 
1972 		if (config->cfg_hdr.ioc_len < sizeof(*config))
1973 			return -EINVAL;
1974 
1975 		mutex_lock(&the_lnet.ln_api_mutex);
1976 		if (config->cfg_config_u.cfg_buffers.buf_enable) {
1977 			rc = lnet_rtrpools_enable();
1978 			mutex_unlock(&the_lnet.ln_api_mutex);
1979 			return rc;
1980 		}
1981 		lnet_rtrpools_disable();
1982 		mutex_unlock(&the_lnet.ln_api_mutex);
1983 		return 0;
1984 
1985 	case IOC_LIBCFS_ADD_BUF:
1986 		config = arg;
1987 
1988 		if (config->cfg_hdr.ioc_len < sizeof(*config))
1989 			return -EINVAL;
1990 
1991 		mutex_lock(&the_lnet.ln_api_mutex);
1992 		rc = lnet_rtrpools_adjust(config->cfg_config_u.cfg_buffers.buf_tiny,
1993 					  config->cfg_config_u.cfg_buffers.buf_small,
1994 					  config->cfg_config_u.cfg_buffers.buf_large);
1995 		mutex_unlock(&the_lnet.ln_api_mutex);
1996 		return rc;
1997 
1998 	case IOC_LIBCFS_GET_BUF: {
1999 		struct lnet_ioctl_pool_cfg *pool_cfg;
2000 		size_t total = sizeof(*config) + sizeof(*pool_cfg);
2001 
2002 		config = arg;
2003 
2004 		if (config->cfg_hdr.ioc_len < total)
2005 			return -EINVAL;
2006 
2007 		pool_cfg = (struct lnet_ioctl_pool_cfg *)config->cfg_bulk;
2008 		return lnet_get_rtr_pool_cfg(config->cfg_count, pool_cfg);
2009 	}
2010 
2011 	case IOC_LIBCFS_GET_PEER_INFO: {
2012 		struct lnet_ioctl_peer *peer_info = arg;
2013 
2014 		if (peer_info->pr_hdr.ioc_len < sizeof(*peer_info))
2015 			return -EINVAL;
2016 
2017 		return lnet_get_peer_info(peer_info->pr_count,
2018 			&peer_info->pr_nid,
2019 			peer_info->pr_lnd_u.pr_peer_credits.cr_aliveness,
2020 			&peer_info->pr_lnd_u.pr_peer_credits.cr_ncpt,
2021 			&peer_info->pr_lnd_u.pr_peer_credits.cr_refcount,
2022 			&peer_info->pr_lnd_u.pr_peer_credits.cr_ni_peer_tx_credits,
2023 			&peer_info->pr_lnd_u.pr_peer_credits.cr_peer_tx_credits,
2024 			&peer_info->pr_lnd_u.pr_peer_credits.cr_peer_rtr_credits,
2025 			&peer_info->pr_lnd_u.pr_peer_credits.cr_peer_min_rtr_credits,
2026 			&peer_info->pr_lnd_u.pr_peer_credits.cr_peer_tx_qnob);
2027 	}
2028 
2029 	case IOC_LIBCFS_NOTIFY_ROUTER:
2030 		secs_passed = (ktime_get_real_seconds() - data->ioc_u64[0]);
2031 		secs_passed *= msecs_to_jiffies(MSEC_PER_SEC);
2032 
2033 		return lnet_notify(NULL, data->ioc_nid, data->ioc_flags,
2034 				   jiffies - secs_passed);
2035 
2036 	case IOC_LIBCFS_LNET_DIST:
2037 		rc = LNetDist(data->ioc_nid, &data->ioc_nid, &data->ioc_u32[1]);
2038 		if (rc < 0 && rc != -EHOSTUNREACH)
2039 			return rc;
2040 
2041 		data->ioc_u32[0] = rc;
2042 		return 0;
2043 
2044 	case IOC_LIBCFS_TESTPROTOCOMPAT:
2045 		lnet_net_lock(LNET_LOCK_EX);
2046 		the_lnet.ln_testprotocompat = data->ioc_flags;
2047 		lnet_net_unlock(LNET_LOCK_EX);
2048 		return 0;
2049 
2050 	case IOC_LIBCFS_LNET_FAULT:
2051 		return lnet_fault_ctl(data->ioc_flags, data);
2052 
2053 	case IOC_LIBCFS_PING:
2054 		id.nid = data->ioc_nid;
2055 		id.pid = data->ioc_u32[0];
2056 		rc = lnet_ping(id, data->ioc_u32[1], /* timeout */
2057 			       data->ioc_pbuf1,
2058 			       data->ioc_plen1 / sizeof(lnet_process_id_t));
2059 		if (rc < 0)
2060 			return rc;
2061 		data->ioc_count = rc;
2062 		return 0;
2063 
2064 	default:
2065 		ni = lnet_net2ni(data->ioc_net);
2066 		if (!ni)
2067 			return -EINVAL;
2068 
2069 		if (!ni->ni_lnd->lnd_ctl)
2070 			rc = -EINVAL;
2071 		else
2072 			rc = ni->ni_lnd->lnd_ctl(ni, cmd, arg);
2073 
2074 		lnet_ni_decref(ni);
2075 		return rc;
2076 	}
2077 	/* not reached */
2078 }
2079 EXPORT_SYMBOL(LNetCtl);
2080 
LNetDebugPeer(lnet_process_id_t id)2081 void LNetDebugPeer(lnet_process_id_t id)
2082 {
2083 	lnet_debug_peer(id.nid);
2084 }
2085 EXPORT_SYMBOL(LNetDebugPeer);
2086 
2087 /**
2088  * Retrieve the lnet_process_id_t ID of LNet interface at \a index. Note that
2089  * all interfaces share a same PID, as requested by LNetNIInit().
2090  *
2091  * \param index Index of the interface to look up.
2092  * \param id On successful return, this location will hold the
2093  * lnet_process_id_t ID of the interface.
2094  *
2095  * \retval 0 If an interface exists at \a index.
2096  * \retval -ENOENT If no interface has been found.
2097  */
2098 int
LNetGetId(unsigned int index,lnet_process_id_t * id)2099 LNetGetId(unsigned int index, lnet_process_id_t *id)
2100 {
2101 	struct lnet_ni *ni;
2102 	struct list_head *tmp;
2103 	int cpt;
2104 	int rc = -ENOENT;
2105 
2106 	LASSERT(the_lnet.ln_refcount > 0);
2107 
2108 	cpt = lnet_net_lock_current();
2109 
2110 	list_for_each(tmp, &the_lnet.ln_nis) {
2111 		if (index--)
2112 			continue;
2113 
2114 		ni = list_entry(tmp, lnet_ni_t, ni_list);
2115 
2116 		id->nid = ni->ni_nid;
2117 		id->pid = the_lnet.ln_pid;
2118 		rc = 0;
2119 		break;
2120 	}
2121 
2122 	lnet_net_unlock(cpt);
2123 	return rc;
2124 }
2125 EXPORT_SYMBOL(LNetGetId);
2126 
2127 /**
2128  * Print a string representation of handle \a h into buffer \a str of
2129  * \a len bytes.
2130  */
2131 void
LNetSnprintHandle(char * str,int len,lnet_handle_any_t h)2132 LNetSnprintHandle(char *str, int len, lnet_handle_any_t h)
2133 {
2134 	snprintf(str, len, "%#llx", h.cookie);
2135 }
2136 EXPORT_SYMBOL(LNetSnprintHandle);
2137 
lnet_ping(lnet_process_id_t id,int timeout_ms,lnet_process_id_t __user * ids,int n_ids)2138 static int lnet_ping(lnet_process_id_t id, int timeout_ms,
2139 		     lnet_process_id_t __user *ids, int n_ids)
2140 {
2141 	lnet_handle_eq_t eqh;
2142 	lnet_handle_md_t mdh;
2143 	lnet_event_t event;
2144 	lnet_md_t md = { NULL };
2145 	int which;
2146 	int unlinked = 0;
2147 	int replied = 0;
2148 	const int a_long_time = 60000; /* mS */
2149 	int infosz;
2150 	lnet_ping_info_t *info;
2151 	lnet_process_id_t tmpid;
2152 	int i;
2153 	int nob;
2154 	int rc;
2155 	int rc2;
2156 	sigset_t blocked;
2157 
2158 	infosz = offsetof(lnet_ping_info_t, pi_ni[n_ids]);
2159 
2160 	if (n_ids <= 0 ||
2161 	    id.nid == LNET_NID_ANY ||
2162 	    timeout_ms > 500000 ||	      /* arbitrary limit! */
2163 	    n_ids > 20)			 /* arbitrary limit! */
2164 		return -EINVAL;
2165 
2166 	if (id.pid == LNET_PID_ANY)
2167 		id.pid = LNET_PID_LUSTRE;
2168 
2169 	LIBCFS_ALLOC(info, infosz);
2170 	if (!info)
2171 		return -ENOMEM;
2172 
2173 	/* NB 2 events max (including any unlink event) */
2174 	rc = LNetEQAlloc(2, LNET_EQ_HANDLER_NONE, &eqh);
2175 	if (rc) {
2176 		CERROR("Can't allocate EQ: %d\n", rc);
2177 		goto out_0;
2178 	}
2179 
2180 	/* initialize md content */
2181 	md.start     = info;
2182 	md.length    = infosz;
2183 	md.threshold = 2; /*GET/REPLY*/
2184 	md.max_size  = 0;
2185 	md.options   = LNET_MD_TRUNCATE;
2186 	md.user_ptr  = NULL;
2187 	md.eq_handle = eqh;
2188 
2189 	rc = LNetMDBind(md, LNET_UNLINK, &mdh);
2190 	if (rc) {
2191 		CERROR("Can't bind MD: %d\n", rc);
2192 		goto out_1;
2193 	}
2194 
2195 	rc = LNetGet(LNET_NID_ANY, mdh, id,
2196 		     LNET_RESERVED_PORTAL,
2197 		     LNET_PROTO_PING_MATCHBITS, 0);
2198 
2199 	if (rc) {
2200 		/* Don't CERROR; this could be deliberate! */
2201 
2202 		rc2 = LNetMDUnlink(mdh);
2203 		LASSERT(!rc2);
2204 
2205 		/* NB must wait for the UNLINK event below... */
2206 		unlinked = 1;
2207 		timeout_ms = a_long_time;
2208 	}
2209 
2210 	do {
2211 		/* MUST block for unlink to complete */
2212 		if (unlinked)
2213 			blocked = cfs_block_allsigs();
2214 
2215 		rc2 = LNetEQPoll(&eqh, 1, timeout_ms, &event, &which);
2216 
2217 		if (unlinked)
2218 			cfs_restore_sigs(blocked);
2219 
2220 		CDEBUG(D_NET, "poll %d(%d %d)%s\n", rc2,
2221 		       (rc2 <= 0) ? -1 : event.type,
2222 		       (rc2 <= 0) ? -1 : event.status,
2223 		       (rc2 > 0 && event.unlinked) ? " unlinked" : "");
2224 
2225 		LASSERT(rc2 != -EOVERFLOW);     /* can't miss anything */
2226 
2227 		if (rc2 <= 0 || event.status) {
2228 			/* timeout or error */
2229 			if (!replied && !rc)
2230 				rc = (rc2 < 0) ? rc2 :
2231 				     !rc2 ? -ETIMEDOUT :
2232 				     event.status;
2233 
2234 			if (!unlinked) {
2235 				/* Ensure completion in finite time... */
2236 				LNetMDUnlink(mdh);
2237 				/* No assertion (racing with network) */
2238 				unlinked = 1;
2239 				timeout_ms = a_long_time;
2240 			} else if (!rc2) {
2241 				/* timed out waiting for unlink */
2242 				CWARN("ping %s: late network completion\n",
2243 				      libcfs_id2str(id));
2244 			}
2245 		} else if (event.type == LNET_EVENT_REPLY) {
2246 			replied = 1;
2247 			rc = event.mlength;
2248 		}
2249 
2250 	} while (rc2 <= 0 || !event.unlinked);
2251 
2252 	if (!replied) {
2253 		if (rc >= 0)
2254 			CWARN("%s: Unexpected rc >= 0 but no reply!\n",
2255 			      libcfs_id2str(id));
2256 		rc = -EIO;
2257 		goto out_1;
2258 	}
2259 
2260 	nob = rc;
2261 	LASSERT(nob >= 0 && nob <= infosz);
2262 
2263 	rc = -EPROTO;			   /* if I can't parse... */
2264 
2265 	if (nob < 8) {
2266 		/* can't check magic/version */
2267 		CERROR("%s: ping info too short %d\n",
2268 		       libcfs_id2str(id), nob);
2269 		goto out_1;
2270 	}
2271 
2272 	if (info->pi_magic == __swab32(LNET_PROTO_PING_MAGIC)) {
2273 		lnet_swap_pinginfo(info);
2274 	} else if (info->pi_magic != LNET_PROTO_PING_MAGIC) {
2275 		CERROR("%s: Unexpected magic %08x\n",
2276 		       libcfs_id2str(id), info->pi_magic);
2277 		goto out_1;
2278 	}
2279 
2280 	if (!(info->pi_features & LNET_PING_FEAT_NI_STATUS)) {
2281 		CERROR("%s: ping w/o NI status: 0x%x\n",
2282 		       libcfs_id2str(id), info->pi_features);
2283 		goto out_1;
2284 	}
2285 
2286 	if (nob < offsetof(lnet_ping_info_t, pi_ni[0])) {
2287 		CERROR("%s: Short reply %d(%d min)\n", libcfs_id2str(id),
2288 		       nob, (int)offsetof(lnet_ping_info_t, pi_ni[0]));
2289 		goto out_1;
2290 	}
2291 
2292 	if (info->pi_nnis < n_ids)
2293 		n_ids = info->pi_nnis;
2294 
2295 	if (nob < offsetof(lnet_ping_info_t, pi_ni[n_ids])) {
2296 		CERROR("%s: Short reply %d(%d expected)\n", libcfs_id2str(id),
2297 		       nob, (int)offsetof(lnet_ping_info_t, pi_ni[n_ids]));
2298 		goto out_1;
2299 	}
2300 
2301 	rc = -EFAULT;			   /* If I SEGV... */
2302 
2303 	memset(&tmpid, 0, sizeof(tmpid));
2304 	for (i = 0; i < n_ids; i++) {
2305 		tmpid.pid = info->pi_pid;
2306 		tmpid.nid = info->pi_ni[i].ns_nid;
2307 		if (copy_to_user(&ids[i], &tmpid, sizeof(tmpid)))
2308 			goto out_1;
2309 	}
2310 	rc = info->pi_nnis;
2311 
2312  out_1:
2313 	rc2 = LNetEQFree(eqh);
2314 	if (rc2)
2315 		CERROR("rc2 %d\n", rc2);
2316 	LASSERT(!rc2);
2317 
2318  out_0:
2319 	LIBCFS_FREE(info, infosz);
2320 	return rc;
2321 }
2322