• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2015, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  */
32 
33 #define DEBUG_SUBSYSTEM S_LNET
34 #include <linux/log2.h>
35 #include <linux/ktime.h>
36 
37 #include <linux/lnet/lib-lnet.h>
38 #include <uapi/linux/lnet/lnet-dlc.h>
39 
40 #define D_LNI D_CONSOLE
41 
42 struct lnet the_lnet;		/* THE state of the network */
43 EXPORT_SYMBOL(the_lnet);
44 
45 static char *ip2nets = "";
46 module_param(ip2nets, charp, 0444);
47 MODULE_PARM_DESC(ip2nets, "LNET network <- IP table");
48 
49 static char *networks = "";
50 module_param(networks, charp, 0444);
51 MODULE_PARM_DESC(networks, "local networks");
52 
53 static char *routes = "";
54 module_param(routes, charp, 0444);
55 MODULE_PARM_DESC(routes, "routes to non-local networks");
56 
57 static int rnet_htable_size = LNET_REMOTE_NETS_HASH_DEFAULT;
58 module_param(rnet_htable_size, int, 0444);
59 MODULE_PARM_DESC(rnet_htable_size, "size of remote network hash table");
60 
61 static int lnet_ping(struct lnet_process_id id, int timeout_ms,
62 		     struct lnet_process_id __user *ids, int n_ids);
63 
64 static char *
lnet_get_routes(void)65 lnet_get_routes(void)
66 {
67 	return routes;
68 }
69 
70 static char *
lnet_get_networks(void)71 lnet_get_networks(void)
72 {
73 	char *nets;
74 	int rc;
75 
76 	if (*networks && *ip2nets) {
77 		LCONSOLE_ERROR_MSG(0x101, "Please specify EITHER 'networks' or 'ip2nets' but not both at once\n");
78 		return NULL;
79 	}
80 
81 	if (*ip2nets) {
82 		rc = lnet_parse_ip2nets(&nets, ip2nets);
83 		return !rc ? nets : NULL;
84 	}
85 
86 	if (*networks)
87 		return networks;
88 
89 	return "tcp";
90 }
91 
92 static void
lnet_init_locks(void)93 lnet_init_locks(void)
94 {
95 	spin_lock_init(&the_lnet.ln_eq_wait_lock);
96 	init_waitqueue_head(&the_lnet.ln_eq_waitq);
97 	init_waitqueue_head(&the_lnet.ln_rc_waitq);
98 	mutex_init(&the_lnet.ln_lnd_mutex);
99 	mutex_init(&the_lnet.ln_api_mutex);
100 }
101 
102 static int
lnet_create_remote_nets_table(void)103 lnet_create_remote_nets_table(void)
104 {
105 	int i;
106 	struct list_head *hash;
107 
108 	LASSERT(!the_lnet.ln_remote_nets_hash);
109 	LASSERT(the_lnet.ln_remote_nets_hbits > 0);
110 	LIBCFS_ALLOC(hash, LNET_REMOTE_NETS_HASH_SIZE * sizeof(*hash));
111 	if (!hash) {
112 		CERROR("Failed to create remote nets hash table\n");
113 		return -ENOMEM;
114 	}
115 
116 	for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++)
117 		INIT_LIST_HEAD(&hash[i]);
118 	the_lnet.ln_remote_nets_hash = hash;
119 	return 0;
120 }
121 
122 static void
lnet_destroy_remote_nets_table(void)123 lnet_destroy_remote_nets_table(void)
124 {
125 	int i;
126 
127 	if (!the_lnet.ln_remote_nets_hash)
128 		return;
129 
130 	for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++)
131 		LASSERT(list_empty(&the_lnet.ln_remote_nets_hash[i]));
132 
133 	LIBCFS_FREE(the_lnet.ln_remote_nets_hash,
134 		    LNET_REMOTE_NETS_HASH_SIZE *
135 		    sizeof(the_lnet.ln_remote_nets_hash[0]));
136 	the_lnet.ln_remote_nets_hash = NULL;
137 }
138 
139 static void
lnet_destroy_locks(void)140 lnet_destroy_locks(void)
141 {
142 	if (the_lnet.ln_res_lock) {
143 		cfs_percpt_lock_free(the_lnet.ln_res_lock);
144 		the_lnet.ln_res_lock = NULL;
145 	}
146 
147 	if (the_lnet.ln_net_lock) {
148 		cfs_percpt_lock_free(the_lnet.ln_net_lock);
149 		the_lnet.ln_net_lock = NULL;
150 	}
151 }
152 
153 static int
lnet_create_locks(void)154 lnet_create_locks(void)
155 {
156 	lnet_init_locks();
157 
158 	the_lnet.ln_res_lock = cfs_percpt_lock_alloc(lnet_cpt_table());
159 	if (!the_lnet.ln_res_lock)
160 		goto failed;
161 
162 	the_lnet.ln_net_lock = cfs_percpt_lock_alloc(lnet_cpt_table());
163 	if (!the_lnet.ln_net_lock)
164 		goto failed;
165 
166 	return 0;
167 
168  failed:
169 	lnet_destroy_locks();
170 	return -ENOMEM;
171 }
172 
lnet_assert_wire_constants(void)173 static void lnet_assert_wire_constants(void)
174 {
175 	/*
176 	 * Wire protocol assertions generated by 'wirecheck'
177 	 * running on Linux robert.bartonsoftware.com 2.6.8-1.521
178 	 * #1 Mon Aug 16 09:01:18 EDT 2004 i686 athlon i386 GNU/Linux
179 	 * with gcc version 3.3.3 20040412 (Red Hat Linux 3.3.3-7)
180 	 */
181 
182 	/* Constants... */
183 	BUILD_BUG_ON(LNET_PROTO_TCP_MAGIC != 0xeebc0ded);
184 	BUILD_BUG_ON(LNET_PROTO_TCP_VERSION_MAJOR != 1);
185 	BUILD_BUG_ON(LNET_PROTO_TCP_VERSION_MINOR != 0);
186 	BUILD_BUG_ON(LNET_MSG_ACK != 0);
187 	BUILD_BUG_ON(LNET_MSG_PUT != 1);
188 	BUILD_BUG_ON(LNET_MSG_GET != 2);
189 	BUILD_BUG_ON(LNET_MSG_REPLY != 3);
190 	BUILD_BUG_ON(LNET_MSG_HELLO != 4);
191 
192 	/* Checks for struct ptl_handle_wire_t */
193 	BUILD_BUG_ON((int)sizeof(struct lnet_handle_wire) != 16);
194 	BUILD_BUG_ON((int)offsetof(struct lnet_handle_wire, wh_interface_cookie) != 0);
195 	BUILD_BUG_ON((int)sizeof(((struct lnet_handle_wire *)0)->wh_interface_cookie) != 8);
196 	BUILD_BUG_ON((int)offsetof(struct lnet_handle_wire, wh_object_cookie) != 8);
197 	BUILD_BUG_ON((int)sizeof(((struct lnet_handle_wire *)0)->wh_object_cookie) != 8);
198 
199 	/* Checks for struct struct lnet_magicversion */
200 	BUILD_BUG_ON((int)sizeof(struct lnet_magicversion) != 8);
201 	BUILD_BUG_ON((int)offsetof(struct lnet_magicversion, magic) != 0);
202 	BUILD_BUG_ON((int)sizeof(((struct lnet_magicversion *)0)->magic) != 4);
203 	BUILD_BUG_ON((int)offsetof(struct lnet_magicversion, version_major) != 4);
204 	BUILD_BUG_ON((int)sizeof(((struct lnet_magicversion *)0)->version_major) != 2);
205 	BUILD_BUG_ON((int)offsetof(struct lnet_magicversion, version_minor) != 6);
206 	BUILD_BUG_ON((int)sizeof(((struct lnet_magicversion *)0)->version_minor) != 2);
207 
208 	/* Checks for struct struct lnet_hdr */
209 	BUILD_BUG_ON((int)sizeof(struct lnet_hdr) != 72);
210 	BUILD_BUG_ON((int)offsetof(struct lnet_hdr, dest_nid) != 0);
211 	BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->dest_nid) != 8);
212 	BUILD_BUG_ON((int)offsetof(struct lnet_hdr, src_nid) != 8);
213 	BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->src_nid) != 8);
214 	BUILD_BUG_ON((int)offsetof(struct lnet_hdr, dest_pid) != 16);
215 	BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->dest_pid) != 4);
216 	BUILD_BUG_ON((int)offsetof(struct lnet_hdr, src_pid) != 20);
217 	BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->src_pid) != 4);
218 	BUILD_BUG_ON((int)offsetof(struct lnet_hdr, type) != 24);
219 	BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->type) != 4);
220 	BUILD_BUG_ON((int)offsetof(struct lnet_hdr, payload_length) != 28);
221 	BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->payload_length) != 4);
222 	BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg) != 32);
223 	BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg) != 40);
224 
225 	/* Ack */
226 	BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.ack.dst_wmd) != 32);
227 	BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.ack.dst_wmd) != 16);
228 	BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.ack.match_bits) != 48);
229 	BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.ack.match_bits) != 8);
230 	BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.ack.mlength) != 56);
231 	BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.ack.mlength) != 4);
232 
233 	/* Put */
234 	BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.put.ack_wmd) != 32);
235 	BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.put.ack_wmd) != 16);
236 	BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.put.match_bits) != 48);
237 	BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.put.match_bits) != 8);
238 	BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.put.hdr_data) != 56);
239 	BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.put.hdr_data) != 8);
240 	BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.put.ptl_index) != 64);
241 	BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.put.ptl_index) != 4);
242 	BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.put.offset) != 68);
243 	BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.put.offset) != 4);
244 
245 	/* Get */
246 	BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.get.return_wmd) != 32);
247 	BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.get.return_wmd) != 16);
248 	BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.get.match_bits) != 48);
249 	BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.get.match_bits) != 8);
250 	BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.get.ptl_index) != 56);
251 	BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.get.ptl_index) != 4);
252 	BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.get.src_offset) != 60);
253 	BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.get.src_offset) != 4);
254 	BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.get.sink_length) != 64);
255 	BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.get.sink_length) != 4);
256 
257 	/* Reply */
258 	BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.reply.dst_wmd) != 32);
259 	BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.reply.dst_wmd) != 16);
260 
261 	/* Hello */
262 	BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.hello.incarnation) != 32);
263 	BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.hello.incarnation) != 8);
264 	BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.hello.type) != 40);
265 	BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.hello.type) != 4);
266 }
267 
268 static struct lnet_lnd *
lnet_find_lnd_by_type(__u32 type)269 lnet_find_lnd_by_type(__u32 type)
270 {
271 	struct lnet_lnd *lnd;
272 	struct list_head *tmp;
273 
274 	/* holding lnd mutex */
275 	list_for_each(tmp, &the_lnet.ln_lnds) {
276 		lnd = list_entry(tmp, struct lnet_lnd, lnd_list);
277 
278 		if (lnd->lnd_type == type)
279 			return lnd;
280 	}
281 
282 	return NULL;
283 }
284 
285 void
lnet_register_lnd(struct lnet_lnd * lnd)286 lnet_register_lnd(struct lnet_lnd *lnd)
287 {
288 	mutex_lock(&the_lnet.ln_lnd_mutex);
289 
290 	LASSERT(libcfs_isknown_lnd(lnd->lnd_type));
291 	LASSERT(!lnet_find_lnd_by_type(lnd->lnd_type));
292 
293 	list_add_tail(&lnd->lnd_list, &the_lnet.ln_lnds);
294 	lnd->lnd_refcount = 0;
295 
296 	CDEBUG(D_NET, "%s LND registered\n", libcfs_lnd2str(lnd->lnd_type));
297 
298 	mutex_unlock(&the_lnet.ln_lnd_mutex);
299 }
300 EXPORT_SYMBOL(lnet_register_lnd);
301 
302 void
lnet_unregister_lnd(struct lnet_lnd * lnd)303 lnet_unregister_lnd(struct lnet_lnd *lnd)
304 {
305 	mutex_lock(&the_lnet.ln_lnd_mutex);
306 
307 	LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == lnd);
308 	LASSERT(!lnd->lnd_refcount);
309 
310 	list_del(&lnd->lnd_list);
311 	CDEBUG(D_NET, "%s LND unregistered\n", libcfs_lnd2str(lnd->lnd_type));
312 
313 	mutex_unlock(&the_lnet.ln_lnd_mutex);
314 }
315 EXPORT_SYMBOL(lnet_unregister_lnd);
316 
317 void
lnet_counters_get(struct lnet_counters * counters)318 lnet_counters_get(struct lnet_counters *counters)
319 {
320 	struct lnet_counters *ctr;
321 	int i;
322 
323 	memset(counters, 0, sizeof(*counters));
324 
325 	lnet_net_lock(LNET_LOCK_EX);
326 
327 	cfs_percpt_for_each(ctr, i, the_lnet.ln_counters) {
328 		counters->msgs_max     += ctr->msgs_max;
329 		counters->msgs_alloc   += ctr->msgs_alloc;
330 		counters->errors       += ctr->errors;
331 		counters->send_count   += ctr->send_count;
332 		counters->recv_count   += ctr->recv_count;
333 		counters->route_count  += ctr->route_count;
334 		counters->drop_count   += ctr->drop_count;
335 		counters->send_length  += ctr->send_length;
336 		counters->recv_length  += ctr->recv_length;
337 		counters->route_length += ctr->route_length;
338 		counters->drop_length  += ctr->drop_length;
339 	}
340 	lnet_net_unlock(LNET_LOCK_EX);
341 }
342 EXPORT_SYMBOL(lnet_counters_get);
343 
344 void
lnet_counters_reset(void)345 lnet_counters_reset(void)
346 {
347 	struct lnet_counters *counters;
348 	int i;
349 
350 	lnet_net_lock(LNET_LOCK_EX);
351 
352 	cfs_percpt_for_each(counters, i, the_lnet.ln_counters)
353 		memset(counters, 0, sizeof(struct lnet_counters));
354 
355 	lnet_net_unlock(LNET_LOCK_EX);
356 }
357 
358 static char *
lnet_res_type2str(int type)359 lnet_res_type2str(int type)
360 {
361 	switch (type) {
362 	default:
363 		LBUG();
364 	case LNET_COOKIE_TYPE_MD:
365 		return "MD";
366 	case LNET_COOKIE_TYPE_ME:
367 		return "ME";
368 	case LNET_COOKIE_TYPE_EQ:
369 		return "EQ";
370 	}
371 }
372 
373 static void
lnet_res_container_cleanup(struct lnet_res_container * rec)374 lnet_res_container_cleanup(struct lnet_res_container *rec)
375 {
376 	int count = 0;
377 
378 	if (!rec->rec_type) /* not set yet, it's uninitialized */
379 		return;
380 
381 	while (!list_empty(&rec->rec_active)) {
382 		struct list_head *e = rec->rec_active.next;
383 
384 		list_del_init(e);
385 		if (rec->rec_type == LNET_COOKIE_TYPE_EQ) {
386 			lnet_eq_free(list_entry(e, struct lnet_eq, eq_list));
387 
388 		} else if (rec->rec_type == LNET_COOKIE_TYPE_MD) {
389 			lnet_md_free(list_entry(e, struct lnet_libmd, md_list));
390 
391 		} else { /* NB: Active MEs should be attached on portals */
392 			LBUG();
393 		}
394 		count++;
395 	}
396 
397 	if (count > 0) {
398 		/*
399 		 * Found alive MD/ME/EQ, user really should unlink/free
400 		 * all of them before finalize LNet, but if someone didn't,
401 		 * we have to recycle garbage for him
402 		 */
403 		CERROR("%d active elements on exit of %s container\n",
404 		       count, lnet_res_type2str(rec->rec_type));
405 	}
406 
407 	if (rec->rec_lh_hash) {
408 		LIBCFS_FREE(rec->rec_lh_hash,
409 			    LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0]));
410 		rec->rec_lh_hash = NULL;
411 	}
412 
413 	rec->rec_type = 0; /* mark it as finalized */
414 }
415 
416 static int
lnet_res_container_setup(struct lnet_res_container * rec,int cpt,int type)417 lnet_res_container_setup(struct lnet_res_container *rec, int cpt, int type)
418 {
419 	int rc = 0;
420 	int i;
421 
422 	LASSERT(!rec->rec_type);
423 
424 	rec->rec_type = type;
425 	INIT_LIST_HEAD(&rec->rec_active);
426 	rec->rec_lh_cookie = (cpt << LNET_COOKIE_TYPE_BITS) | type;
427 
428 	/* Arbitrary choice of hash table size */
429 	LIBCFS_CPT_ALLOC(rec->rec_lh_hash, lnet_cpt_table(), cpt,
430 			 LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0]));
431 	if (!rec->rec_lh_hash) {
432 		rc = -ENOMEM;
433 		goto out;
434 	}
435 
436 	for (i = 0; i < LNET_LH_HASH_SIZE; i++)
437 		INIT_LIST_HEAD(&rec->rec_lh_hash[i]);
438 
439 	return 0;
440 
441 out:
442 	CERROR("Failed to setup %s resource container\n",
443 	       lnet_res_type2str(type));
444 	lnet_res_container_cleanup(rec);
445 	return rc;
446 }
447 
448 static void
lnet_res_containers_destroy(struct lnet_res_container ** recs)449 lnet_res_containers_destroy(struct lnet_res_container **recs)
450 {
451 	struct lnet_res_container *rec;
452 	int i;
453 
454 	cfs_percpt_for_each(rec, i, recs)
455 		lnet_res_container_cleanup(rec);
456 
457 	cfs_percpt_free(recs);
458 }
459 
460 static struct lnet_res_container **
lnet_res_containers_create(int type)461 lnet_res_containers_create(int type)
462 {
463 	struct lnet_res_container **recs;
464 	struct lnet_res_container *rec;
465 	int rc;
466 	int i;
467 
468 	recs = cfs_percpt_alloc(lnet_cpt_table(), sizeof(*rec));
469 	if (!recs) {
470 		CERROR("Failed to allocate %s resource containers\n",
471 		       lnet_res_type2str(type));
472 		return NULL;
473 	}
474 
475 	cfs_percpt_for_each(rec, i, recs) {
476 		rc = lnet_res_container_setup(rec, i, type);
477 		if (rc) {
478 			lnet_res_containers_destroy(recs);
479 			return NULL;
480 		}
481 	}
482 
483 	return recs;
484 }
485 
486 struct lnet_libhandle *
lnet_res_lh_lookup(struct lnet_res_container * rec,__u64 cookie)487 lnet_res_lh_lookup(struct lnet_res_container *rec, __u64 cookie)
488 {
489 	/* ALWAYS called with lnet_res_lock held */
490 	struct list_head *head;
491 	struct lnet_libhandle *lh;
492 	unsigned int hash;
493 
494 	if ((cookie & LNET_COOKIE_MASK) != rec->rec_type)
495 		return NULL;
496 
497 	hash = cookie >> (LNET_COOKIE_TYPE_BITS + LNET_CPT_BITS);
498 	head = &rec->rec_lh_hash[hash & LNET_LH_HASH_MASK];
499 
500 	list_for_each_entry(lh, head, lh_hash_chain) {
501 		if (lh->lh_cookie == cookie)
502 			return lh;
503 	}
504 
505 	return NULL;
506 }
507 
508 void
lnet_res_lh_initialize(struct lnet_res_container * rec,struct lnet_libhandle * lh)509 lnet_res_lh_initialize(struct lnet_res_container *rec,
510 		       struct lnet_libhandle *lh)
511 {
512 	/* ALWAYS called with lnet_res_lock held */
513 	unsigned int ibits = LNET_COOKIE_TYPE_BITS + LNET_CPT_BITS;
514 	unsigned int hash;
515 
516 	lh->lh_cookie = rec->rec_lh_cookie;
517 	rec->rec_lh_cookie += 1 << ibits;
518 
519 	hash = (lh->lh_cookie >> ibits) & LNET_LH_HASH_MASK;
520 
521 	list_add(&lh->lh_hash_chain, &rec->rec_lh_hash[hash]);
522 }
523 
524 static int lnet_unprepare(void);
525 
526 static int
lnet_prepare(lnet_pid_t requested_pid)527 lnet_prepare(lnet_pid_t requested_pid)
528 {
529 	/* Prepare to bring up the network */
530 	struct lnet_res_container **recs;
531 	int rc = 0;
532 
533 	if (requested_pid == LNET_PID_ANY) {
534 		/* Don't instantiate LNET just for me */
535 		return -ENETDOWN;
536 	}
537 
538 	LASSERT(!the_lnet.ln_refcount);
539 
540 	the_lnet.ln_routing = 0;
541 
542 	LASSERT(!(requested_pid & LNET_PID_USERFLAG));
543 	the_lnet.ln_pid = requested_pid;
544 
545 	INIT_LIST_HEAD(&the_lnet.ln_test_peers);
546 	INIT_LIST_HEAD(&the_lnet.ln_nis);
547 	INIT_LIST_HEAD(&the_lnet.ln_nis_cpt);
548 	INIT_LIST_HEAD(&the_lnet.ln_nis_zombie);
549 	INIT_LIST_HEAD(&the_lnet.ln_routers);
550 	INIT_LIST_HEAD(&the_lnet.ln_drop_rules);
551 	INIT_LIST_HEAD(&the_lnet.ln_delay_rules);
552 
553 	rc = lnet_create_remote_nets_table();
554 	if (rc)
555 		goto failed;
556 	/*
557 	 * NB the interface cookie in wire handles guards against delayed
558 	 * replies and ACKs appearing valid after reboot.
559 	 */
560 	the_lnet.ln_interface_cookie = ktime_get_ns();
561 
562 	the_lnet.ln_counters = cfs_percpt_alloc(lnet_cpt_table(),
563 						sizeof(struct lnet_counters));
564 	if (!the_lnet.ln_counters) {
565 		CERROR("Failed to allocate counters for LNet\n");
566 		rc = -ENOMEM;
567 		goto failed;
568 	}
569 
570 	rc = lnet_peer_tables_create();
571 	if (rc)
572 		goto failed;
573 
574 	rc = lnet_msg_containers_create();
575 	if (rc)
576 		goto failed;
577 
578 	rc = lnet_res_container_setup(&the_lnet.ln_eq_container, 0,
579 				      LNET_COOKIE_TYPE_EQ);
580 	if (rc)
581 		goto failed;
582 
583 	recs = lnet_res_containers_create(LNET_COOKIE_TYPE_ME);
584 	if (!recs) {
585 		rc = -ENOMEM;
586 		goto failed;
587 	}
588 
589 	the_lnet.ln_me_containers = recs;
590 
591 	recs = lnet_res_containers_create(LNET_COOKIE_TYPE_MD);
592 	if (!recs) {
593 		rc = -ENOMEM;
594 		goto failed;
595 	}
596 
597 	the_lnet.ln_md_containers = recs;
598 
599 	rc = lnet_portals_create();
600 	if (rc) {
601 		CERROR("Failed to create portals for LNet: %d\n", rc);
602 		goto failed;
603 	}
604 
605 	return 0;
606 
607  failed:
608 	lnet_unprepare();
609 	return rc;
610 }
611 
612 static int
lnet_unprepare(void)613 lnet_unprepare(void)
614 {
615 	/*
616 	 * NB no LNET_LOCK since this is the last reference.  All LND instances
617 	 * have shut down already, so it is safe to unlink and free all
618 	 * descriptors, even those that appear committed to a network op (eg MD
619 	 * with non-zero pending count)
620 	 */
621 	lnet_fail_nid(LNET_NID_ANY, 0);
622 
623 	LASSERT(!the_lnet.ln_refcount);
624 	LASSERT(list_empty(&the_lnet.ln_test_peers));
625 	LASSERT(list_empty(&the_lnet.ln_nis));
626 	LASSERT(list_empty(&the_lnet.ln_nis_cpt));
627 	LASSERT(list_empty(&the_lnet.ln_nis_zombie));
628 
629 	lnet_portals_destroy();
630 
631 	if (the_lnet.ln_md_containers) {
632 		lnet_res_containers_destroy(the_lnet.ln_md_containers);
633 		the_lnet.ln_md_containers = NULL;
634 	}
635 
636 	if (the_lnet.ln_me_containers) {
637 		lnet_res_containers_destroy(the_lnet.ln_me_containers);
638 		the_lnet.ln_me_containers = NULL;
639 	}
640 
641 	lnet_res_container_cleanup(&the_lnet.ln_eq_container);
642 
643 	lnet_msg_containers_destroy();
644 	lnet_peer_tables_destroy();
645 	lnet_rtrpools_free(0);
646 
647 	if (the_lnet.ln_counters) {
648 		cfs_percpt_free(the_lnet.ln_counters);
649 		the_lnet.ln_counters = NULL;
650 	}
651 	lnet_destroy_remote_nets_table();
652 
653 	return 0;
654 }
655 
656 struct lnet_ni  *
lnet_net2ni_locked(__u32 net,int cpt)657 lnet_net2ni_locked(__u32 net, int cpt)
658 {
659 	struct list_head *tmp;
660 	struct lnet_ni *ni;
661 
662 	LASSERT(cpt != LNET_LOCK_EX);
663 
664 	list_for_each(tmp, &the_lnet.ln_nis) {
665 		ni = list_entry(tmp, struct lnet_ni, ni_list);
666 
667 		if (LNET_NIDNET(ni->ni_nid) == net) {
668 			lnet_ni_addref_locked(ni, cpt);
669 			return ni;
670 		}
671 	}
672 
673 	return NULL;
674 }
675 
676 struct lnet_ni *
lnet_net2ni(__u32 net)677 lnet_net2ni(__u32 net)
678 {
679 	struct lnet_ni *ni;
680 
681 	lnet_net_lock(0);
682 	ni = lnet_net2ni_locked(net, 0);
683 	lnet_net_unlock(0);
684 
685 	return ni;
686 }
687 EXPORT_SYMBOL(lnet_net2ni);
688 
689 static unsigned int
lnet_nid_cpt_hash(lnet_nid_t nid,unsigned int number)690 lnet_nid_cpt_hash(lnet_nid_t nid, unsigned int number)
691 {
692 	__u64 key = nid;
693 	unsigned int val;
694 
695 	LASSERT(number >= 1 && number <= LNET_CPT_NUMBER);
696 
697 	if (number == 1)
698 		return 0;
699 
700 	val = hash_long(key, LNET_CPT_BITS);
701 	/* NB: LNET_CP_NUMBER doesn't have to be PO2 */
702 	if (val < number)
703 		return val;
704 
705 	return (unsigned int)(key + val + (val >> 1)) % number;
706 }
707 
708 int
lnet_cpt_of_nid_locked(lnet_nid_t nid)709 lnet_cpt_of_nid_locked(lnet_nid_t nid)
710 {
711 	struct lnet_ni *ni;
712 
713 	/* must called with hold of lnet_net_lock */
714 	if (LNET_CPT_NUMBER == 1)
715 		return 0; /* the only one */
716 
717 	/* take lnet_net_lock(any) would be OK */
718 	if (!list_empty(&the_lnet.ln_nis_cpt)) {
719 		list_for_each_entry(ni, &the_lnet.ln_nis_cpt, ni_cptlist) {
720 			if (LNET_NIDNET(ni->ni_nid) != LNET_NIDNET(nid))
721 				continue;
722 
723 			LASSERT(ni->ni_cpts);
724 			return ni->ni_cpts[lnet_nid_cpt_hash
725 					   (nid, ni->ni_ncpts)];
726 		}
727 	}
728 
729 	return lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
730 }
731 
732 int
lnet_cpt_of_nid(lnet_nid_t nid)733 lnet_cpt_of_nid(lnet_nid_t nid)
734 {
735 	int cpt;
736 	int cpt2;
737 
738 	if (LNET_CPT_NUMBER == 1)
739 		return 0; /* the only one */
740 
741 	if (list_empty(&the_lnet.ln_nis_cpt))
742 		return lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
743 
744 	cpt = lnet_net_lock_current();
745 	cpt2 = lnet_cpt_of_nid_locked(nid);
746 	lnet_net_unlock(cpt);
747 
748 	return cpt2;
749 }
750 EXPORT_SYMBOL(lnet_cpt_of_nid);
751 
752 int
lnet_islocalnet(__u32 net)753 lnet_islocalnet(__u32 net)
754 {
755 	struct lnet_ni *ni;
756 	int cpt;
757 
758 	cpt = lnet_net_lock_current();
759 
760 	ni = lnet_net2ni_locked(net, cpt);
761 	if (ni)
762 		lnet_ni_decref_locked(ni, cpt);
763 
764 	lnet_net_unlock(cpt);
765 
766 	return !!ni;
767 }
768 
769 struct lnet_ni  *
lnet_nid2ni_locked(lnet_nid_t nid,int cpt)770 lnet_nid2ni_locked(lnet_nid_t nid, int cpt)
771 {
772 	struct lnet_ni *ni;
773 	struct list_head *tmp;
774 
775 	LASSERT(cpt != LNET_LOCK_EX);
776 
777 	list_for_each(tmp, &the_lnet.ln_nis) {
778 		ni = list_entry(tmp, struct lnet_ni, ni_list);
779 
780 		if (ni->ni_nid == nid) {
781 			lnet_ni_addref_locked(ni, cpt);
782 			return ni;
783 		}
784 	}
785 
786 	return NULL;
787 }
788 
789 int
lnet_islocalnid(lnet_nid_t nid)790 lnet_islocalnid(lnet_nid_t nid)
791 {
792 	struct lnet_ni *ni;
793 	int cpt;
794 
795 	cpt = lnet_net_lock_current();
796 	ni = lnet_nid2ni_locked(nid, cpt);
797 	if (ni)
798 		lnet_ni_decref_locked(ni, cpt);
799 	lnet_net_unlock(cpt);
800 
801 	return !!ni;
802 }
803 
804 int
lnet_count_acceptor_nis(void)805 lnet_count_acceptor_nis(void)
806 {
807 	/* Return the # of NIs that need the acceptor. */
808 	int count = 0;
809 	struct list_head *tmp;
810 	struct lnet_ni *ni;
811 	int cpt;
812 
813 	cpt = lnet_net_lock_current();
814 	list_for_each(tmp, &the_lnet.ln_nis) {
815 		ni = list_entry(tmp, struct lnet_ni, ni_list);
816 
817 		if (ni->ni_lnd->lnd_accept)
818 			count++;
819 	}
820 
821 	lnet_net_unlock(cpt);
822 
823 	return count;
824 }
825 
826 static struct lnet_ping_info *
lnet_ping_info_create(int num_ni)827 lnet_ping_info_create(int num_ni)
828 {
829 	struct lnet_ping_info *ping_info;
830 	unsigned int infosz;
831 
832 	infosz = offsetof(struct lnet_ping_info, pi_ni[num_ni]);
833 	LIBCFS_ALLOC(ping_info, infosz);
834 	if (!ping_info) {
835 		CERROR("Can't allocate ping info[%d]\n", num_ni);
836 		return NULL;
837 	}
838 
839 	ping_info->pi_nnis = num_ni;
840 	ping_info->pi_pid = the_lnet.ln_pid;
841 	ping_info->pi_magic = LNET_PROTO_PING_MAGIC;
842 	ping_info->pi_features = LNET_PING_FEAT_NI_STATUS;
843 
844 	return ping_info;
845 }
846 
847 static inline int
lnet_get_ni_count(void)848 lnet_get_ni_count(void)
849 {
850 	struct lnet_ni *ni;
851 	int count = 0;
852 
853 	lnet_net_lock(0);
854 
855 	list_for_each_entry(ni, &the_lnet.ln_nis, ni_list)
856 		count++;
857 
858 	lnet_net_unlock(0);
859 
860 	return count;
861 }
862 
863 static inline void
lnet_ping_info_free(struct lnet_ping_info * pinfo)864 lnet_ping_info_free(struct lnet_ping_info *pinfo)
865 {
866 	LIBCFS_FREE(pinfo,
867 		    offsetof(struct lnet_ping_info,
868 			     pi_ni[pinfo->pi_nnis]));
869 }
870 
871 static void
lnet_ping_info_destroy(void)872 lnet_ping_info_destroy(void)
873 {
874 	struct lnet_ni *ni;
875 
876 	lnet_net_lock(LNET_LOCK_EX);
877 
878 	list_for_each_entry(ni, &the_lnet.ln_nis, ni_list) {
879 		lnet_ni_lock(ni);
880 		ni->ni_status = NULL;
881 		lnet_ni_unlock(ni);
882 	}
883 
884 	lnet_ping_info_free(the_lnet.ln_ping_info);
885 	the_lnet.ln_ping_info = NULL;
886 
887 	lnet_net_unlock(LNET_LOCK_EX);
888 }
889 
890 static void
lnet_ping_event_handler(struct lnet_event * event)891 lnet_ping_event_handler(struct lnet_event *event)
892 {
893 	struct lnet_ping_info *pinfo = event->md.user_ptr;
894 
895 	if (event->unlinked)
896 		pinfo->pi_features = LNET_PING_FEAT_INVAL;
897 }
898 
899 static int
lnet_ping_info_setup(struct lnet_ping_info ** ppinfo,struct lnet_handle_md * md_handle,int ni_count,bool set_eq)900 lnet_ping_info_setup(struct lnet_ping_info **ppinfo,
901 		     struct lnet_handle_md *md_handle,
902 		     int ni_count, bool set_eq)
903 {
904 	struct lnet_process_id id = {LNET_NID_ANY, LNET_PID_ANY};
905 	struct lnet_handle_me me_handle;
906 	struct lnet_md md = { NULL };
907 	int rc, rc2;
908 
909 	if (set_eq) {
910 		rc = LNetEQAlloc(0, lnet_ping_event_handler,
911 				 &the_lnet.ln_ping_target_eq);
912 		if (rc) {
913 			CERROR("Can't allocate ping EQ: %d\n", rc);
914 			return rc;
915 		}
916 	}
917 
918 	*ppinfo = lnet_ping_info_create(ni_count);
919 	if (!*ppinfo) {
920 		rc = -ENOMEM;
921 		goto failed_0;
922 	}
923 
924 	rc = LNetMEAttach(LNET_RESERVED_PORTAL, id,
925 			  LNET_PROTO_PING_MATCHBITS, 0,
926 			  LNET_UNLINK, LNET_INS_AFTER,
927 			  &me_handle);
928 	if (rc) {
929 		CERROR("Can't create ping ME: %d\n", rc);
930 		goto failed_1;
931 	}
932 
933 	/* initialize md content */
934 	md.start = *ppinfo;
935 	md.length = offsetof(struct lnet_ping_info,
936 			     pi_ni[(*ppinfo)->pi_nnis]);
937 	md.threshold = LNET_MD_THRESH_INF;
938 	md.max_size = 0;
939 	md.options = LNET_MD_OP_GET | LNET_MD_TRUNCATE |
940 		     LNET_MD_MANAGE_REMOTE;
941 	md.user_ptr  = NULL;
942 	md.eq_handle = the_lnet.ln_ping_target_eq;
943 	md.user_ptr = *ppinfo;
944 
945 	rc = LNetMDAttach(me_handle, md, LNET_RETAIN, md_handle);
946 	if (rc) {
947 		CERROR("Can't attach ping MD: %d\n", rc);
948 		goto failed_2;
949 	}
950 
951 	return 0;
952 
953 failed_2:
954 	rc2 = LNetMEUnlink(me_handle);
955 	LASSERT(!rc2);
956 failed_1:
957 	lnet_ping_info_free(*ppinfo);
958 	*ppinfo = NULL;
959 failed_0:
960 	if (set_eq)
961 		LNetEQFree(the_lnet.ln_ping_target_eq);
962 	return rc;
963 }
964 
965 static void
lnet_ping_md_unlink(struct lnet_ping_info * pinfo,struct lnet_handle_md * md_handle)966 lnet_ping_md_unlink(struct lnet_ping_info *pinfo,
967 		    struct lnet_handle_md *md_handle)
968 {
969 	sigset_t blocked = cfs_block_allsigs();
970 
971 	LNetMDUnlink(*md_handle);
972 	LNetInvalidateMDHandle(md_handle);
973 
974 	/* NB md could be busy; this just starts the unlink */
975 	while (pinfo->pi_features != LNET_PING_FEAT_INVAL) {
976 		CDEBUG(D_NET, "Still waiting for ping MD to unlink\n");
977 		set_current_state(TASK_UNINTERRUPTIBLE);
978 		schedule_timeout(cfs_time_seconds(1));
979 	}
980 
981 	cfs_restore_sigs(blocked);
982 }
983 
984 static void
lnet_ping_info_install_locked(struct lnet_ping_info * ping_info)985 lnet_ping_info_install_locked(struct lnet_ping_info *ping_info)
986 {
987 	struct lnet_ni_status *ns;
988 	struct lnet_ni *ni;
989 	int i = 0;
990 
991 	list_for_each_entry(ni, &the_lnet.ln_nis, ni_list) {
992 		LASSERT(i < ping_info->pi_nnis);
993 
994 		ns = &ping_info->pi_ni[i];
995 
996 		ns->ns_nid = ni->ni_nid;
997 
998 		lnet_ni_lock(ni);
999 		ns->ns_status = (ni->ni_status) ?
1000 				 ni->ni_status->ns_status : LNET_NI_STATUS_UP;
1001 		ni->ni_status = ns;
1002 		lnet_ni_unlock(ni);
1003 
1004 		i++;
1005 	}
1006 }
1007 
1008 static void
lnet_ping_target_update(struct lnet_ping_info * pinfo,struct lnet_handle_md md_handle)1009 lnet_ping_target_update(struct lnet_ping_info *pinfo,
1010 			struct lnet_handle_md md_handle)
1011 {
1012 	struct lnet_ping_info *old_pinfo = NULL;
1013 	struct lnet_handle_md old_md;
1014 
1015 	/* switch the NIs to point to the new ping info created */
1016 	lnet_net_lock(LNET_LOCK_EX);
1017 
1018 	if (!the_lnet.ln_routing)
1019 		pinfo->pi_features |= LNET_PING_FEAT_RTE_DISABLED;
1020 	lnet_ping_info_install_locked(pinfo);
1021 
1022 	if (the_lnet.ln_ping_info) {
1023 		old_pinfo = the_lnet.ln_ping_info;
1024 		old_md = the_lnet.ln_ping_target_md;
1025 	}
1026 	the_lnet.ln_ping_target_md = md_handle;
1027 	the_lnet.ln_ping_info = pinfo;
1028 
1029 	lnet_net_unlock(LNET_LOCK_EX);
1030 
1031 	if (old_pinfo) {
1032 		/* unlink the old ping info */
1033 		lnet_ping_md_unlink(old_pinfo, &old_md);
1034 		lnet_ping_info_free(old_pinfo);
1035 	}
1036 }
1037 
1038 static void
lnet_ping_target_fini(void)1039 lnet_ping_target_fini(void)
1040 {
1041 	int rc;
1042 
1043 	lnet_ping_md_unlink(the_lnet.ln_ping_info,
1044 			    &the_lnet.ln_ping_target_md);
1045 
1046 	rc = LNetEQFree(the_lnet.ln_ping_target_eq);
1047 	LASSERT(!rc);
1048 
1049 	lnet_ping_info_destroy();
1050 }
1051 
1052 static int
lnet_ni_tq_credits(struct lnet_ni * ni)1053 lnet_ni_tq_credits(struct lnet_ni *ni)
1054 {
1055 	int credits;
1056 
1057 	LASSERT(ni->ni_ncpts >= 1);
1058 
1059 	if (ni->ni_ncpts == 1)
1060 		return ni->ni_maxtxcredits;
1061 
1062 	credits = ni->ni_maxtxcredits / ni->ni_ncpts;
1063 	credits = max(credits, 8 * ni->ni_peertxcredits);
1064 	credits = min(credits, ni->ni_maxtxcredits);
1065 
1066 	return credits;
1067 }
1068 
1069 static void
lnet_ni_unlink_locked(struct lnet_ni * ni)1070 lnet_ni_unlink_locked(struct lnet_ni *ni)
1071 {
1072 	if (!list_empty(&ni->ni_cptlist)) {
1073 		list_del_init(&ni->ni_cptlist);
1074 		lnet_ni_decref_locked(ni, 0);
1075 	}
1076 
1077 	/* move it to zombie list and nobody can find it anymore */
1078 	LASSERT(!list_empty(&ni->ni_list));
1079 	list_move(&ni->ni_list, &the_lnet.ln_nis_zombie);
1080 	lnet_ni_decref_locked(ni, 0);	/* drop ln_nis' ref */
1081 }
1082 
1083 static void
lnet_clear_zombies_nis_locked(void)1084 lnet_clear_zombies_nis_locked(void)
1085 {
1086 	int i;
1087 	int islo;
1088 	struct lnet_ni *ni;
1089 	struct lnet_ni *temp;
1090 
1091 	/*
1092 	 * Now wait for the NI's I just nuked to show up on ln_zombie_nis
1093 	 * and shut them down in guaranteed thread context
1094 	 */
1095 	i = 2;
1096 	list_for_each_entry_safe(ni, temp, &the_lnet.ln_nis_zombie, ni_list) {
1097 		int *ref;
1098 		int j;
1099 
1100 		list_del_init(&ni->ni_list);
1101 		cfs_percpt_for_each(ref, j, ni->ni_refs) {
1102 			if (!*ref)
1103 				continue;
1104 			/* still busy, add it back to zombie list */
1105 			list_add(&ni->ni_list, &the_lnet.ln_nis_zombie);
1106 			break;
1107 		}
1108 
1109 		if (!list_empty(&ni->ni_list)) {
1110 			lnet_net_unlock(LNET_LOCK_EX);
1111 			++i;
1112 			if ((i & (-i)) == i) {
1113 				CDEBUG(D_WARNING, "Waiting for zombie LNI %s\n",
1114 				       libcfs_nid2str(ni->ni_nid));
1115 			}
1116 			set_current_state(TASK_UNINTERRUPTIBLE);
1117 			schedule_timeout(cfs_time_seconds(1));
1118 			lnet_net_lock(LNET_LOCK_EX);
1119 			continue;
1120 		}
1121 
1122 		ni->ni_lnd->lnd_refcount--;
1123 		lnet_net_unlock(LNET_LOCK_EX);
1124 
1125 		islo = ni->ni_lnd->lnd_type == LOLND;
1126 
1127 		LASSERT(!in_interrupt());
1128 		ni->ni_lnd->lnd_shutdown(ni);
1129 
1130 		/*
1131 		 * can't deref lnd anymore now; it might have unregistered
1132 		 * itself...
1133 		 */
1134 		if (!islo)
1135 			CDEBUG(D_LNI, "Removed LNI %s\n",
1136 			       libcfs_nid2str(ni->ni_nid));
1137 
1138 		lnet_ni_free(ni);
1139 		i = 2;
1140 
1141 		lnet_net_lock(LNET_LOCK_EX);
1142 	}
1143 }
1144 
1145 static void
lnet_shutdown_lndnis(void)1146 lnet_shutdown_lndnis(void)
1147 {
1148 	struct lnet_ni *ni;
1149 	struct lnet_ni *temp;
1150 	int i;
1151 
1152 	/* NB called holding the global mutex */
1153 
1154 	/* All quiet on the API front */
1155 	LASSERT(!the_lnet.ln_shutdown);
1156 	LASSERT(!the_lnet.ln_refcount);
1157 	LASSERT(list_empty(&the_lnet.ln_nis_zombie));
1158 
1159 	lnet_net_lock(LNET_LOCK_EX);
1160 	the_lnet.ln_shutdown = 1;	/* flag shutdown */
1161 
1162 	/* Unlink NIs from the global table */
1163 	list_for_each_entry_safe(ni, temp, &the_lnet.ln_nis, ni_list) {
1164 		lnet_ni_unlink_locked(ni);
1165 	}
1166 
1167 	/* Drop the cached loopback NI. */
1168 	if (the_lnet.ln_loni) {
1169 		lnet_ni_decref_locked(the_lnet.ln_loni, 0);
1170 		the_lnet.ln_loni = NULL;
1171 	}
1172 
1173 	lnet_net_unlock(LNET_LOCK_EX);
1174 
1175 	/*
1176 	 * Clear lazy portals and drop delayed messages which hold refs
1177 	 * on their lnet_msg::msg_rxpeer
1178 	 */
1179 	for (i = 0; i < the_lnet.ln_nportals; i++)
1180 		LNetClearLazyPortal(i);
1181 
1182 	/*
1183 	 * Clear the peer table and wait for all peers to go (they hold refs on
1184 	 * their NIs)
1185 	 */
1186 	lnet_peer_tables_cleanup(NULL);
1187 
1188 	lnet_net_lock(LNET_LOCK_EX);
1189 
1190 	lnet_clear_zombies_nis_locked();
1191 	the_lnet.ln_shutdown = 0;
1192 	lnet_net_unlock(LNET_LOCK_EX);
1193 }
1194 
1195 /* shutdown down the NI and release refcount */
1196 static void
lnet_shutdown_lndni(struct lnet_ni * ni)1197 lnet_shutdown_lndni(struct lnet_ni *ni)
1198 {
1199 	int i;
1200 
1201 	lnet_net_lock(LNET_LOCK_EX);
1202 	lnet_ni_unlink_locked(ni);
1203 	lnet_net_unlock(LNET_LOCK_EX);
1204 
1205 	/* clear messages for this NI on the lazy portal */
1206 	for (i = 0; i < the_lnet.ln_nportals; i++)
1207 		lnet_clear_lazy_portal(ni, i, "Shutting down NI");
1208 
1209 	/* Do peer table cleanup for this ni */
1210 	lnet_peer_tables_cleanup(ni);
1211 
1212 	lnet_net_lock(LNET_LOCK_EX);
1213 	lnet_clear_zombies_nis_locked();
1214 	lnet_net_unlock(LNET_LOCK_EX);
1215 }
1216 
1217 static int
lnet_startup_lndni(struct lnet_ni * ni,struct lnet_ioctl_config_data * conf)1218 lnet_startup_lndni(struct lnet_ni *ni, struct lnet_ioctl_config_data *conf)
1219 {
1220 	struct lnet_ioctl_config_lnd_tunables *lnd_tunables = NULL;
1221 	int rc = -EINVAL;
1222 	int lnd_type;
1223 	struct lnet_lnd *lnd;
1224 	struct lnet_tx_queue *tq;
1225 	int i;
1226 
1227 	lnd_type = LNET_NETTYP(LNET_NIDNET(ni->ni_nid));
1228 
1229 	LASSERT(libcfs_isknown_lnd(lnd_type));
1230 
1231 	if (lnd_type == CIBLND || lnd_type == OPENIBLND ||
1232 	    lnd_type == IIBLND || lnd_type == VIBLND) {
1233 		CERROR("LND %s obsoleted\n", libcfs_lnd2str(lnd_type));
1234 		goto failed0;
1235 	}
1236 
1237 	/* Make sure this new NI is unique. */
1238 	lnet_net_lock(LNET_LOCK_EX);
1239 	rc = lnet_net_unique(LNET_NIDNET(ni->ni_nid), &the_lnet.ln_nis);
1240 	lnet_net_unlock(LNET_LOCK_EX);
1241 	if (!rc) {
1242 		if (lnd_type == LOLND) {
1243 			lnet_ni_free(ni);
1244 			return 0;
1245 		}
1246 
1247 		CERROR("Net %s is not unique\n",
1248 		       libcfs_net2str(LNET_NIDNET(ni->ni_nid)));
1249 		rc = -EEXIST;
1250 		goto failed0;
1251 	}
1252 
1253 	mutex_lock(&the_lnet.ln_lnd_mutex);
1254 	lnd = lnet_find_lnd_by_type(lnd_type);
1255 
1256 	if (!lnd) {
1257 		mutex_unlock(&the_lnet.ln_lnd_mutex);
1258 		rc = request_module("%s", libcfs_lnd2modname(lnd_type));
1259 		mutex_lock(&the_lnet.ln_lnd_mutex);
1260 
1261 		lnd = lnet_find_lnd_by_type(lnd_type);
1262 		if (!lnd) {
1263 			mutex_unlock(&the_lnet.ln_lnd_mutex);
1264 			CERROR("Can't load LND %s, module %s, rc=%d\n",
1265 			       libcfs_lnd2str(lnd_type),
1266 			       libcfs_lnd2modname(lnd_type), rc);
1267 			rc = -EINVAL;
1268 			goto failed0;
1269 		}
1270 	}
1271 
1272 	lnet_net_lock(LNET_LOCK_EX);
1273 	lnd->lnd_refcount++;
1274 	lnet_net_unlock(LNET_LOCK_EX);
1275 
1276 	ni->ni_lnd = lnd;
1277 
1278 	if (conf && conf->cfg_hdr.ioc_len > sizeof(*conf))
1279 		lnd_tunables = (struct lnet_ioctl_config_lnd_tunables *)conf->cfg_bulk;
1280 
1281 	if (lnd_tunables) {
1282 		LIBCFS_ALLOC(ni->ni_lnd_tunables,
1283 			     sizeof(*ni->ni_lnd_tunables));
1284 		if (!ni->ni_lnd_tunables) {
1285 			mutex_unlock(&the_lnet.ln_lnd_mutex);
1286 			rc = -ENOMEM;
1287 			goto failed0;
1288 		}
1289 		memcpy(ni->ni_lnd_tunables, lnd_tunables,
1290 		       sizeof(*ni->ni_lnd_tunables));
1291 	}
1292 
1293 	/*
1294 	 * If given some LND tunable parameters, parse those now to
1295 	 * override the values in the NI structure.
1296 	 */
1297 	if (conf) {
1298 		if (conf->cfg_config_u.cfg_net.net_peer_rtr_credits >= 0)
1299 			ni->ni_peerrtrcredits =
1300 				conf->cfg_config_u.cfg_net.net_peer_rtr_credits;
1301 		if (conf->cfg_config_u.cfg_net.net_peer_timeout >= 0)
1302 			ni->ni_peertimeout =
1303 				conf->cfg_config_u.cfg_net.net_peer_timeout;
1304 		if (conf->cfg_config_u.cfg_net.net_peer_tx_credits != -1)
1305 			ni->ni_peertxcredits =
1306 				conf->cfg_config_u.cfg_net.net_peer_tx_credits;
1307 		if (conf->cfg_config_u.cfg_net.net_max_tx_credits >= 0)
1308 			ni->ni_maxtxcredits =
1309 				conf->cfg_config_u.cfg_net.net_max_tx_credits;
1310 	}
1311 
1312 	rc = lnd->lnd_startup(ni);
1313 
1314 	mutex_unlock(&the_lnet.ln_lnd_mutex);
1315 
1316 	if (rc) {
1317 		LCONSOLE_ERROR_MSG(0x105, "Error %d starting up LNI %s\n",
1318 				   rc, libcfs_lnd2str(lnd->lnd_type));
1319 		lnet_net_lock(LNET_LOCK_EX);
1320 		lnd->lnd_refcount--;
1321 		lnet_net_unlock(LNET_LOCK_EX);
1322 		goto failed0;
1323 	}
1324 
1325 	LASSERT(ni->ni_peertimeout <= 0 || lnd->lnd_query);
1326 
1327 	lnet_net_lock(LNET_LOCK_EX);
1328 	/* refcount for ln_nis */
1329 	lnet_ni_addref_locked(ni, 0);
1330 	list_add_tail(&ni->ni_list, &the_lnet.ln_nis);
1331 	if (ni->ni_cpts) {
1332 		lnet_ni_addref_locked(ni, 0);
1333 		list_add_tail(&ni->ni_cptlist, &the_lnet.ln_nis_cpt);
1334 	}
1335 
1336 	lnet_net_unlock(LNET_LOCK_EX);
1337 
1338 	if (lnd->lnd_type == LOLND) {
1339 		lnet_ni_addref(ni);
1340 		LASSERT(!the_lnet.ln_loni);
1341 		the_lnet.ln_loni = ni;
1342 		return 0;
1343 	}
1344 
1345 	if (!ni->ni_peertxcredits || !ni->ni_maxtxcredits) {
1346 		LCONSOLE_ERROR_MSG(0x107, "LNI %s has no %scredits\n",
1347 				   libcfs_lnd2str(lnd->lnd_type),
1348 				   !ni->ni_peertxcredits ?
1349 				   "" : "per-peer ");
1350 		/*
1351 		 * shutdown the NI since if we get here then it must've already
1352 		 * been started
1353 		 */
1354 		lnet_shutdown_lndni(ni);
1355 		return -EINVAL;
1356 	}
1357 
1358 	cfs_percpt_for_each(tq, i, ni->ni_tx_queues) {
1359 		tq->tq_credits_min =
1360 		tq->tq_credits_max =
1361 		tq->tq_credits = lnet_ni_tq_credits(ni);
1362 	}
1363 
1364 	CDEBUG(D_LNI, "Added LNI %s [%d/%d/%d/%d]\n",
1365 	       libcfs_nid2str(ni->ni_nid), ni->ni_peertxcredits,
1366 	       lnet_ni_tq_credits(ni) * LNET_CPT_NUMBER,
1367 	       ni->ni_peerrtrcredits, ni->ni_peertimeout);
1368 
1369 	return 0;
1370 failed0:
1371 	lnet_ni_free(ni);
1372 	return rc;
1373 }
1374 
1375 static int
lnet_startup_lndnis(struct list_head * nilist)1376 lnet_startup_lndnis(struct list_head *nilist)
1377 {
1378 	struct lnet_ni *ni;
1379 	int rc;
1380 	int ni_count = 0;
1381 
1382 	while (!list_empty(nilist)) {
1383 		ni = list_entry(nilist->next, struct lnet_ni, ni_list);
1384 		list_del(&ni->ni_list);
1385 		rc = lnet_startup_lndni(ni, NULL);
1386 
1387 		if (rc < 0)
1388 			goto failed;
1389 
1390 		ni_count++;
1391 	}
1392 
1393 	return ni_count;
1394 failed:
1395 	lnet_shutdown_lndnis();
1396 
1397 	return rc;
1398 }
1399 
1400 /**
1401  * Initialize LNet library.
1402  *
1403  * Automatically called at module loading time. Caller has to call
1404  * lnet_lib_exit() after a call to lnet_lib_init(), if and only if the
1405  * latter returned 0. It must be called exactly once.
1406  *
1407  * \retval 0 on success
1408  * \retval -ve on failures.
1409  */
lnet_lib_init(void)1410 int lnet_lib_init(void)
1411 {
1412 	int rc;
1413 
1414 	lnet_assert_wire_constants();
1415 
1416 	memset(&the_lnet, 0, sizeof(the_lnet));
1417 
1418 	/* refer to global cfs_cpt_table for now */
1419 	the_lnet.ln_cpt_table	= cfs_cpt_table;
1420 	the_lnet.ln_cpt_number	= cfs_cpt_number(cfs_cpt_table);
1421 
1422 	LASSERT(the_lnet.ln_cpt_number > 0);
1423 	if (the_lnet.ln_cpt_number > LNET_CPT_MAX) {
1424 		/* we are under risk of consuming all lh_cookie */
1425 		CERROR("Can't have %d CPTs for LNet (max allowed is %d), please change setting of CPT-table and retry\n",
1426 		       the_lnet.ln_cpt_number, LNET_CPT_MAX);
1427 		return -E2BIG;
1428 	}
1429 
1430 	while ((1 << the_lnet.ln_cpt_bits) < the_lnet.ln_cpt_number)
1431 		the_lnet.ln_cpt_bits++;
1432 
1433 	rc = lnet_create_locks();
1434 	if (rc) {
1435 		CERROR("Can't create LNet global locks: %d\n", rc);
1436 		return rc;
1437 	}
1438 
1439 	the_lnet.ln_refcount = 0;
1440 	LNetInvalidateEQHandle(&the_lnet.ln_rc_eqh);
1441 	INIT_LIST_HEAD(&the_lnet.ln_lnds);
1442 	INIT_LIST_HEAD(&the_lnet.ln_rcd_zombie);
1443 	INIT_LIST_HEAD(&the_lnet.ln_rcd_deathrow);
1444 
1445 	/*
1446 	 * The hash table size is the number of bits it takes to express the set
1447 	 * ln_num_routes, minus 1 (better to under estimate than over so we
1448 	 * don't waste memory).
1449 	 */
1450 	if (rnet_htable_size <= 0)
1451 		rnet_htable_size = LNET_REMOTE_NETS_HASH_DEFAULT;
1452 	else if (rnet_htable_size > LNET_REMOTE_NETS_HASH_MAX)
1453 		rnet_htable_size = LNET_REMOTE_NETS_HASH_MAX;
1454 	the_lnet.ln_remote_nets_hbits = max_t(int, 1,
1455 					   order_base_2(rnet_htable_size) - 1);
1456 
1457 	/*
1458 	 * All LNDs apart from the LOLND are in separate modules.  They
1459 	 * register themselves when their module loads, and unregister
1460 	 * themselves when their module is unloaded.
1461 	 */
1462 	lnet_register_lnd(&the_lolnd);
1463 	return 0;
1464 }
1465 
1466 /**
1467  * Finalize LNet library.
1468  *
1469  * \pre lnet_lib_init() called with success.
1470  * \pre All LNet users called LNetNIFini() for matching LNetNIInit() calls.
1471  */
lnet_lib_exit(void)1472 void lnet_lib_exit(void)
1473 {
1474 	LASSERT(!the_lnet.ln_refcount);
1475 
1476 	while (!list_empty(&the_lnet.ln_lnds))
1477 		lnet_unregister_lnd(list_entry(the_lnet.ln_lnds.next,
1478 					       struct lnet_lnd, lnd_list));
1479 	lnet_destroy_locks();
1480 }
1481 
1482 /**
1483  * Set LNet PID and start LNet interfaces, routing, and forwarding.
1484  *
1485  * Users must call this function at least once before any other functions.
1486  * For each successful call there must be a corresponding call to
1487  * LNetNIFini(). For subsequent calls to LNetNIInit(), \a requested_pid is
1488  * ignored.
1489  *
1490  * The PID used by LNet may be different from the one requested.
1491  * See LNetGetId().
1492  *
1493  * \param requested_pid PID requested by the caller.
1494  *
1495  * \return >= 0 on success, and < 0 error code on failures.
1496  */
1497 int
LNetNIInit(lnet_pid_t requested_pid)1498 LNetNIInit(lnet_pid_t requested_pid)
1499 {
1500 	int im_a_router = 0;
1501 	int rc;
1502 	int ni_count;
1503 	struct lnet_ping_info *pinfo;
1504 	struct lnet_handle_md md_handle;
1505 	struct list_head net_head;
1506 
1507 	INIT_LIST_HEAD(&net_head);
1508 
1509 	mutex_lock(&the_lnet.ln_api_mutex);
1510 
1511 	CDEBUG(D_OTHER, "refs %d\n", the_lnet.ln_refcount);
1512 
1513 	if (the_lnet.ln_refcount > 0) {
1514 		rc = the_lnet.ln_refcount++;
1515 		mutex_unlock(&the_lnet.ln_api_mutex);
1516 		return rc;
1517 	}
1518 
1519 	rc = lnet_prepare(requested_pid);
1520 	if (rc) {
1521 		mutex_unlock(&the_lnet.ln_api_mutex);
1522 		return rc;
1523 	}
1524 
1525 	/* Add in the loopback network */
1526 	if (!lnet_ni_alloc(LNET_MKNET(LOLND, 0), NULL, &net_head)) {
1527 		rc = -ENOMEM;
1528 		goto err_empty_list;
1529 	}
1530 
1531 	/*
1532 	 * If LNet is being initialized via DLC it is possible
1533 	 * that the user requests not to load module parameters (ones which
1534 	 * are supported by DLC) on initialization.  Therefore, make sure not
1535 	 * to load networks, routes and forwarding from module parameters
1536 	 * in this case. On cleanup in case of failure only clean up
1537 	 * routes if it has been loaded
1538 	 */
1539 	if (!the_lnet.ln_nis_from_mod_params) {
1540 		rc = lnet_parse_networks(&net_head, lnet_get_networks());
1541 		if (rc < 0)
1542 			goto err_empty_list;
1543 	}
1544 
1545 	ni_count = lnet_startup_lndnis(&net_head);
1546 	if (ni_count < 0) {
1547 		rc = ni_count;
1548 		goto err_empty_list;
1549 	}
1550 
1551 	if (!the_lnet.ln_nis_from_mod_params) {
1552 		rc = lnet_parse_routes(lnet_get_routes(), &im_a_router);
1553 		if (rc)
1554 			goto err_shutdown_lndnis;
1555 
1556 		rc = lnet_check_routes();
1557 		if (rc)
1558 			goto err_destroy_routes;
1559 
1560 		rc = lnet_rtrpools_alloc(im_a_router);
1561 		if (rc)
1562 			goto err_destroy_routes;
1563 	}
1564 
1565 	rc = lnet_acceptor_start();
1566 	if (rc)
1567 		goto err_destroy_routes;
1568 
1569 	the_lnet.ln_refcount = 1;
1570 	/* Now I may use my own API functions... */
1571 
1572 	rc = lnet_ping_info_setup(&pinfo, &md_handle, ni_count, true);
1573 	if (rc)
1574 		goto err_acceptor_stop;
1575 
1576 	lnet_ping_target_update(pinfo, md_handle);
1577 
1578 	rc = lnet_router_checker_start();
1579 	if (rc)
1580 		goto err_stop_ping;
1581 
1582 	lnet_fault_init();
1583 	lnet_router_debugfs_init();
1584 
1585 	mutex_unlock(&the_lnet.ln_api_mutex);
1586 
1587 	return 0;
1588 
1589 err_stop_ping:
1590 	lnet_ping_target_fini();
1591 err_acceptor_stop:
1592 	the_lnet.ln_refcount = 0;
1593 	lnet_acceptor_stop();
1594 err_destroy_routes:
1595 	if (!the_lnet.ln_nis_from_mod_params)
1596 		lnet_destroy_routes();
1597 err_shutdown_lndnis:
1598 	lnet_shutdown_lndnis();
1599 err_empty_list:
1600 	lnet_unprepare();
1601 	LASSERT(rc < 0);
1602 	mutex_unlock(&the_lnet.ln_api_mutex);
1603 	while (!list_empty(&net_head)) {
1604 		struct lnet_ni *ni;
1605 
1606 		ni = list_entry(net_head.next, struct lnet_ni, ni_list);
1607 		list_del_init(&ni->ni_list);
1608 		lnet_ni_free(ni);
1609 	}
1610 	return rc;
1611 }
1612 EXPORT_SYMBOL(LNetNIInit);
1613 
1614 /**
1615  * Stop LNet interfaces, routing, and forwarding.
1616  *
1617  * Users must call this function once for each successful call to LNetNIInit().
1618  * Once the LNetNIFini() operation has been started, the results of pending
1619  * API operations are undefined.
1620  *
1621  * \return always 0 for current implementation.
1622  */
1623 int
LNetNIFini(void)1624 LNetNIFini(void)
1625 {
1626 	mutex_lock(&the_lnet.ln_api_mutex);
1627 
1628 	LASSERT(the_lnet.ln_refcount > 0);
1629 
1630 	if (the_lnet.ln_refcount != 1) {
1631 		the_lnet.ln_refcount--;
1632 	} else {
1633 		LASSERT(!the_lnet.ln_niinit_self);
1634 
1635 		lnet_fault_fini();
1636 		lnet_router_debugfs_fini();
1637 		lnet_router_checker_stop();
1638 		lnet_ping_target_fini();
1639 
1640 		/* Teardown fns that use my own API functions BEFORE here */
1641 		the_lnet.ln_refcount = 0;
1642 
1643 		lnet_acceptor_stop();
1644 		lnet_destroy_routes();
1645 		lnet_shutdown_lndnis();
1646 		lnet_unprepare();
1647 	}
1648 
1649 	mutex_unlock(&the_lnet.ln_api_mutex);
1650 	return 0;
1651 }
1652 EXPORT_SYMBOL(LNetNIFini);
1653 
1654 /**
1655  * Grabs the ni data from the ni structure and fills the out
1656  * parameters
1657  *
1658  * \param[in] ni network       interface structure
1659  * \param[out] config	       NI configuration
1660  */
1661 static void
lnet_fill_ni_info(struct lnet_ni * ni,struct lnet_ioctl_config_data * config)1662 lnet_fill_ni_info(struct lnet_ni *ni, struct lnet_ioctl_config_data *config)
1663 {
1664 	struct lnet_ioctl_config_lnd_tunables *lnd_cfg = NULL;
1665 	struct lnet_ioctl_net_config *net_config;
1666 	size_t min_size, tunable_size = 0;
1667 	int i;
1668 
1669 	if (!ni || !config)
1670 		return;
1671 
1672 	net_config = (struct lnet_ioctl_net_config *)config->cfg_bulk;
1673 	if (!net_config)
1674 		return;
1675 
1676 	BUILD_BUG_ON(ARRAY_SIZE(ni->ni_interfaces) !=
1677 		     ARRAY_SIZE(net_config->ni_interfaces));
1678 
1679 	for (i = 0; i < ARRAY_SIZE(ni->ni_interfaces); i++) {
1680 		if (!ni->ni_interfaces[i])
1681 			break;
1682 
1683 		strncpy(net_config->ni_interfaces[i],
1684 			ni->ni_interfaces[i],
1685 			sizeof(net_config->ni_interfaces[i]));
1686 	}
1687 
1688 	config->cfg_nid = ni->ni_nid;
1689 	config->cfg_config_u.cfg_net.net_peer_timeout = ni->ni_peertimeout;
1690 	config->cfg_config_u.cfg_net.net_max_tx_credits = ni->ni_maxtxcredits;
1691 	config->cfg_config_u.cfg_net.net_peer_tx_credits = ni->ni_peertxcredits;
1692 	config->cfg_config_u.cfg_net.net_peer_rtr_credits = ni->ni_peerrtrcredits;
1693 
1694 	net_config->ni_status = ni->ni_status->ns_status;
1695 
1696 	if (ni->ni_cpts) {
1697 		int num_cpts = min(ni->ni_ncpts, LNET_MAX_SHOW_NUM_CPT);
1698 
1699 		for (i = 0; i < num_cpts; i++)
1700 			net_config->ni_cpts[i] = ni->ni_cpts[i];
1701 
1702 		config->cfg_ncpts = num_cpts;
1703 	}
1704 
1705 	/*
1706 	 * See if user land tools sent in a newer and larger version
1707 	 * of struct lnet_tunables than what the kernel uses.
1708 	 */
1709 	min_size = sizeof(*config) + sizeof(*net_config);
1710 
1711 	if (config->cfg_hdr.ioc_len > min_size)
1712 		tunable_size = config->cfg_hdr.ioc_len - min_size;
1713 
1714 	/* Don't copy to much data to user space */
1715 	min_size = min(tunable_size, sizeof(*ni->ni_lnd_tunables));
1716 	lnd_cfg = (struct lnet_ioctl_config_lnd_tunables *)net_config->cfg_bulk;
1717 
1718 	if (ni->ni_lnd_tunables && lnd_cfg && min_size) {
1719 		memcpy(lnd_cfg, ni->ni_lnd_tunables, min_size);
1720 		config->cfg_config_u.cfg_net.net_interface_count = 1;
1721 
1722 		/* Tell user land that kernel side has less data */
1723 		if (tunable_size > sizeof(*ni->ni_lnd_tunables)) {
1724 			min_size = tunable_size - sizeof(ni->ni_lnd_tunables);
1725 			config->cfg_hdr.ioc_len -= min_size;
1726 		}
1727 	}
1728 }
1729 
1730 static int
lnet_get_net_config(struct lnet_ioctl_config_data * config)1731 lnet_get_net_config(struct lnet_ioctl_config_data *config)
1732 {
1733 	struct lnet_ni *ni;
1734 	struct list_head *tmp;
1735 	int idx = config->cfg_count;
1736 	int cpt, i = 0;
1737 	int rc = -ENOENT;
1738 
1739 	cpt = lnet_net_lock_current();
1740 
1741 	list_for_each(tmp, &the_lnet.ln_nis) {
1742 		if (i++ != idx)
1743 			continue;
1744 
1745 		ni = list_entry(tmp, struct lnet_ni, ni_list);
1746 		lnet_ni_lock(ni);
1747 		lnet_fill_ni_info(ni, config);
1748 		lnet_ni_unlock(ni);
1749 		rc = 0;
1750 		break;
1751 	}
1752 
1753 	lnet_net_unlock(cpt);
1754 	return rc;
1755 }
1756 
1757 int
lnet_dyn_add_ni(lnet_pid_t requested_pid,struct lnet_ioctl_config_data * conf)1758 lnet_dyn_add_ni(lnet_pid_t requested_pid, struct lnet_ioctl_config_data *conf)
1759 {
1760 	char *nets = conf->cfg_config_u.cfg_net.net_intf;
1761 	struct lnet_ping_info *pinfo;
1762 	struct lnet_handle_md md_handle;
1763 	struct lnet_ni *ni;
1764 	struct list_head net_head;
1765 	struct lnet_remotenet *rnet;
1766 	int rc;
1767 
1768 	INIT_LIST_HEAD(&net_head);
1769 
1770 	/* Create a ni structure for the network string */
1771 	rc = lnet_parse_networks(&net_head, nets);
1772 	if (rc <= 0)
1773 		return !rc ? -EINVAL : rc;
1774 
1775 	mutex_lock(&the_lnet.ln_api_mutex);
1776 
1777 	if (rc > 1) {
1778 		rc = -EINVAL; /* only add one interface per call */
1779 		goto failed0;
1780 	}
1781 
1782 	ni = list_entry(net_head.next, struct lnet_ni, ni_list);
1783 
1784 	lnet_net_lock(LNET_LOCK_EX);
1785 	rnet = lnet_find_net_locked(LNET_NIDNET(ni->ni_nid));
1786 	lnet_net_unlock(LNET_LOCK_EX);
1787 	/*
1788 	 * make sure that the net added doesn't invalidate the current
1789 	 * configuration LNet is keeping
1790 	 */
1791 	if (rnet) {
1792 		CERROR("Adding net %s will invalidate routing configuration\n",
1793 		       nets);
1794 		rc = -EUSERS;
1795 		goto failed0;
1796 	}
1797 
1798 	rc = lnet_ping_info_setup(&pinfo, &md_handle, 1 + lnet_get_ni_count(),
1799 				  false);
1800 	if (rc)
1801 		goto failed0;
1802 
1803 	list_del_init(&ni->ni_list);
1804 
1805 	rc = lnet_startup_lndni(ni, conf);
1806 	if (rc)
1807 		goto failed1;
1808 
1809 	if (ni->ni_lnd->lnd_accept) {
1810 		rc = lnet_acceptor_start();
1811 		if (rc < 0) {
1812 			/* shutdown the ni that we just started */
1813 			CERROR("Failed to start up acceptor thread\n");
1814 			lnet_shutdown_lndni(ni);
1815 			goto failed1;
1816 		}
1817 	}
1818 
1819 	lnet_ping_target_update(pinfo, md_handle);
1820 	mutex_unlock(&the_lnet.ln_api_mutex);
1821 
1822 	return 0;
1823 
1824 failed1:
1825 	lnet_ping_md_unlink(pinfo, &md_handle);
1826 	lnet_ping_info_free(pinfo);
1827 failed0:
1828 	mutex_unlock(&the_lnet.ln_api_mutex);
1829 	while (!list_empty(&net_head)) {
1830 		ni = list_entry(net_head.next, struct lnet_ni, ni_list);
1831 		list_del_init(&ni->ni_list);
1832 		lnet_ni_free(ni);
1833 	}
1834 	return rc;
1835 }
1836 
1837 int
lnet_dyn_del_ni(__u32 net)1838 lnet_dyn_del_ni(__u32 net)
1839 {
1840 	struct lnet_ni *ni;
1841 	struct lnet_ping_info *pinfo;
1842 	struct lnet_handle_md md_handle;
1843 	int rc;
1844 
1845 	/* don't allow userspace to shutdown the LOLND */
1846 	if (LNET_NETTYP(net) == LOLND)
1847 		return -EINVAL;
1848 
1849 	mutex_lock(&the_lnet.ln_api_mutex);
1850 	/* create and link a new ping info, before removing the old one */
1851 	rc = lnet_ping_info_setup(&pinfo, &md_handle,
1852 				  lnet_get_ni_count() - 1, false);
1853 	if (rc)
1854 		goto out;
1855 
1856 	ni = lnet_net2ni(net);
1857 	if (!ni) {
1858 		rc = -EINVAL;
1859 		goto failed;
1860 	}
1861 
1862 	/* decrement the reference counter taken by lnet_net2ni() */
1863 	lnet_ni_decref_locked(ni, 0);
1864 
1865 	lnet_shutdown_lndni(ni);
1866 
1867 	if (!lnet_count_acceptor_nis())
1868 		lnet_acceptor_stop();
1869 
1870 	lnet_ping_target_update(pinfo, md_handle);
1871 	goto out;
1872 failed:
1873 	lnet_ping_md_unlink(pinfo, &md_handle);
1874 	lnet_ping_info_free(pinfo);
1875 out:
1876 	mutex_unlock(&the_lnet.ln_api_mutex);
1877 
1878 	return rc;
1879 }
1880 
1881 /**
1882  * LNet ioctl handler.
1883  *
1884  */
1885 int
LNetCtl(unsigned int cmd,void * arg)1886 LNetCtl(unsigned int cmd, void *arg)
1887 {
1888 	struct libcfs_ioctl_data *data = arg;
1889 	struct lnet_ioctl_config_data *config;
1890 	struct lnet_process_id id = {0};
1891 	struct lnet_ni *ni;
1892 	int rc;
1893 	unsigned long secs_passed;
1894 
1895 	BUILD_BUG_ON(LIBCFS_IOC_DATA_MAX <
1896 		     sizeof(struct lnet_ioctl_net_config) +
1897 		     sizeof(struct lnet_ioctl_config_data));
1898 
1899 	switch (cmd) {
1900 	case IOC_LIBCFS_GET_NI:
1901 		rc = LNetGetId(data->ioc_count, &id);
1902 		data->ioc_nid = id.nid;
1903 		return rc;
1904 
1905 	case IOC_LIBCFS_FAIL_NID:
1906 		return lnet_fail_nid(data->ioc_nid, data->ioc_count);
1907 
1908 	case IOC_LIBCFS_ADD_ROUTE:
1909 		config = arg;
1910 
1911 		if (config->cfg_hdr.ioc_len < sizeof(*config))
1912 			return -EINVAL;
1913 
1914 		mutex_lock(&the_lnet.ln_api_mutex);
1915 		rc = lnet_add_route(config->cfg_net,
1916 				    config->cfg_config_u.cfg_route.rtr_hop,
1917 				    config->cfg_nid,
1918 				    config->cfg_config_u.cfg_route.rtr_priority);
1919 		if (!rc) {
1920 			rc = lnet_check_routes();
1921 			if (rc)
1922 				lnet_del_route(config->cfg_net,
1923 					       config->cfg_nid);
1924 		}
1925 		mutex_unlock(&the_lnet.ln_api_mutex);
1926 		return rc;
1927 
1928 	case IOC_LIBCFS_DEL_ROUTE:
1929 		config = arg;
1930 
1931 		if (config->cfg_hdr.ioc_len < sizeof(*config))
1932 			return -EINVAL;
1933 
1934 		mutex_lock(&the_lnet.ln_api_mutex);
1935 		rc = lnet_del_route(config->cfg_net, config->cfg_nid);
1936 		mutex_unlock(&the_lnet.ln_api_mutex);
1937 		return rc;
1938 
1939 	case IOC_LIBCFS_GET_ROUTE:
1940 		config = arg;
1941 
1942 		if (config->cfg_hdr.ioc_len < sizeof(*config))
1943 			return -EINVAL;
1944 
1945 		return lnet_get_route(config->cfg_count,
1946 				      &config->cfg_net,
1947 				      &config->cfg_config_u.cfg_route.rtr_hop,
1948 				      &config->cfg_nid,
1949 				      &config->cfg_config_u.cfg_route.rtr_flags,
1950 				      &config->cfg_config_u.cfg_route.rtr_priority);
1951 
1952 	case IOC_LIBCFS_GET_NET: {
1953 		size_t total = sizeof(*config) +
1954 			       sizeof(struct lnet_ioctl_net_config);
1955 		config = arg;
1956 
1957 		if (config->cfg_hdr.ioc_len < total)
1958 			return -EINVAL;
1959 
1960 		return lnet_get_net_config(config);
1961 	}
1962 
1963 	case IOC_LIBCFS_GET_LNET_STATS: {
1964 		struct lnet_ioctl_lnet_stats *lnet_stats = arg;
1965 
1966 		if (lnet_stats->st_hdr.ioc_len < sizeof(*lnet_stats))
1967 			return -EINVAL;
1968 
1969 		lnet_counters_get(&lnet_stats->st_cntrs);
1970 		return 0;
1971 	}
1972 
1973 	case IOC_LIBCFS_CONFIG_RTR:
1974 		config = arg;
1975 
1976 		if (config->cfg_hdr.ioc_len < sizeof(*config))
1977 			return -EINVAL;
1978 
1979 		mutex_lock(&the_lnet.ln_api_mutex);
1980 		if (config->cfg_config_u.cfg_buffers.buf_enable) {
1981 			rc = lnet_rtrpools_enable();
1982 			mutex_unlock(&the_lnet.ln_api_mutex);
1983 			return rc;
1984 		}
1985 		lnet_rtrpools_disable();
1986 		mutex_unlock(&the_lnet.ln_api_mutex);
1987 		return 0;
1988 
1989 	case IOC_LIBCFS_ADD_BUF:
1990 		config = arg;
1991 
1992 		if (config->cfg_hdr.ioc_len < sizeof(*config))
1993 			return -EINVAL;
1994 
1995 		mutex_lock(&the_lnet.ln_api_mutex);
1996 		rc = lnet_rtrpools_adjust(config->cfg_config_u.cfg_buffers.buf_tiny,
1997 					  config->cfg_config_u.cfg_buffers.buf_small,
1998 					  config->cfg_config_u.cfg_buffers.buf_large);
1999 		mutex_unlock(&the_lnet.ln_api_mutex);
2000 		return rc;
2001 
2002 	case IOC_LIBCFS_GET_BUF: {
2003 		struct lnet_ioctl_pool_cfg *pool_cfg;
2004 		size_t total = sizeof(*config) + sizeof(*pool_cfg);
2005 
2006 		config = arg;
2007 
2008 		if (config->cfg_hdr.ioc_len < total)
2009 			return -EINVAL;
2010 
2011 		pool_cfg = (struct lnet_ioctl_pool_cfg *)config->cfg_bulk;
2012 		return lnet_get_rtr_pool_cfg(config->cfg_count, pool_cfg);
2013 	}
2014 
2015 	case IOC_LIBCFS_GET_PEER_INFO: {
2016 		struct lnet_ioctl_peer *peer_info = arg;
2017 
2018 		if (peer_info->pr_hdr.ioc_len < sizeof(*peer_info))
2019 			return -EINVAL;
2020 
2021 		return lnet_get_peer_info(peer_info->pr_count,
2022 			&peer_info->pr_nid,
2023 			peer_info->pr_lnd_u.pr_peer_credits.cr_aliveness,
2024 			&peer_info->pr_lnd_u.pr_peer_credits.cr_ncpt,
2025 			&peer_info->pr_lnd_u.pr_peer_credits.cr_refcount,
2026 			&peer_info->pr_lnd_u.pr_peer_credits.cr_ni_peer_tx_credits,
2027 			&peer_info->pr_lnd_u.pr_peer_credits.cr_peer_tx_credits,
2028 			&peer_info->pr_lnd_u.pr_peer_credits.cr_peer_rtr_credits,
2029 			&peer_info->pr_lnd_u.pr_peer_credits.cr_peer_min_rtr_credits,
2030 			&peer_info->pr_lnd_u.pr_peer_credits.cr_peer_tx_qnob);
2031 	}
2032 
2033 	case IOC_LIBCFS_NOTIFY_ROUTER:
2034 		secs_passed = (ktime_get_real_seconds() - data->ioc_u64[0]);
2035 		secs_passed *= msecs_to_jiffies(MSEC_PER_SEC);
2036 
2037 		return lnet_notify(NULL, data->ioc_nid, data->ioc_flags,
2038 				   jiffies - secs_passed);
2039 
2040 	case IOC_LIBCFS_LNET_DIST:
2041 		rc = LNetDist(data->ioc_nid, &data->ioc_nid, &data->ioc_u32[1]);
2042 		if (rc < 0 && rc != -EHOSTUNREACH)
2043 			return rc;
2044 
2045 		data->ioc_u32[0] = rc;
2046 		return 0;
2047 
2048 	case IOC_LIBCFS_TESTPROTOCOMPAT:
2049 		lnet_net_lock(LNET_LOCK_EX);
2050 		the_lnet.ln_testprotocompat = data->ioc_flags;
2051 		lnet_net_unlock(LNET_LOCK_EX);
2052 		return 0;
2053 
2054 	case IOC_LIBCFS_LNET_FAULT:
2055 		return lnet_fault_ctl(data->ioc_flags, data);
2056 
2057 	case IOC_LIBCFS_PING:
2058 		id.nid = data->ioc_nid;
2059 		id.pid = data->ioc_u32[0];
2060 		rc = lnet_ping(id, data->ioc_u32[1], /* timeout */
2061 			       data->ioc_pbuf1,
2062 			       data->ioc_plen1 / sizeof(struct lnet_process_id));
2063 		if (rc < 0)
2064 			return rc;
2065 		data->ioc_count = rc;
2066 		return 0;
2067 
2068 	default:
2069 		ni = lnet_net2ni(data->ioc_net);
2070 		if (!ni)
2071 			return -EINVAL;
2072 
2073 		if (!ni->ni_lnd->lnd_ctl)
2074 			rc = -EINVAL;
2075 		else
2076 			rc = ni->ni_lnd->lnd_ctl(ni, cmd, arg);
2077 
2078 		lnet_ni_decref(ni);
2079 		return rc;
2080 	}
2081 	/* not reached */
2082 }
2083 EXPORT_SYMBOL(LNetCtl);
2084 
LNetDebugPeer(struct lnet_process_id id)2085 void LNetDebugPeer(struct lnet_process_id id)
2086 {
2087 	lnet_debug_peer(id.nid);
2088 }
2089 EXPORT_SYMBOL(LNetDebugPeer);
2090 
2091 /**
2092  * Retrieve the lnet_process_id ID of LNet interface at \a index. Note that
2093  * all interfaces share a same PID, as requested by LNetNIInit().
2094  *
2095  * \param index Index of the interface to look up.
2096  * \param id On successful return, this location will hold the
2097  * lnet_process_id ID of the interface.
2098  *
2099  * \retval 0 If an interface exists at \a index.
2100  * \retval -ENOENT If no interface has been found.
2101  */
2102 int
LNetGetId(unsigned int index,struct lnet_process_id * id)2103 LNetGetId(unsigned int index, struct lnet_process_id *id)
2104 {
2105 	struct lnet_ni *ni;
2106 	struct list_head *tmp;
2107 	int cpt;
2108 	int rc = -ENOENT;
2109 
2110 	LASSERT(the_lnet.ln_refcount > 0);
2111 
2112 	cpt = lnet_net_lock_current();
2113 
2114 	list_for_each(tmp, &the_lnet.ln_nis) {
2115 		if (index--)
2116 			continue;
2117 
2118 		ni = list_entry(tmp, struct lnet_ni, ni_list);
2119 
2120 		id->nid = ni->ni_nid;
2121 		id->pid = the_lnet.ln_pid;
2122 		rc = 0;
2123 		break;
2124 	}
2125 
2126 	lnet_net_unlock(cpt);
2127 	return rc;
2128 }
2129 EXPORT_SYMBOL(LNetGetId);
2130 
lnet_ping(struct lnet_process_id id,int timeout_ms,struct lnet_process_id __user * ids,int n_ids)2131 static int lnet_ping(struct lnet_process_id id, int timeout_ms,
2132 		     struct lnet_process_id __user *ids, int n_ids)
2133 {
2134 	struct lnet_handle_eq eqh;
2135 	struct lnet_handle_md mdh;
2136 	struct lnet_event event;
2137 	struct lnet_md md = { NULL };
2138 	int which;
2139 	int unlinked = 0;
2140 	int replied = 0;
2141 	const int a_long_time = 60000; /* mS */
2142 	int infosz;
2143 	struct lnet_ping_info *info;
2144 	struct lnet_process_id tmpid;
2145 	int i;
2146 	int nob;
2147 	int rc;
2148 	int rc2;
2149 	sigset_t blocked;
2150 
2151 	infosz = offsetof(struct lnet_ping_info, pi_ni[n_ids]);
2152 
2153 	if (n_ids <= 0 ||
2154 	    id.nid == LNET_NID_ANY ||
2155 	    timeout_ms > 500000 ||	      /* arbitrary limit! */
2156 	    n_ids > 20)			 /* arbitrary limit! */
2157 		return -EINVAL;
2158 
2159 	if (id.pid == LNET_PID_ANY)
2160 		id.pid = LNET_PID_LUSTRE;
2161 
2162 	LIBCFS_ALLOC(info, infosz);
2163 	if (!info)
2164 		return -ENOMEM;
2165 
2166 	/* NB 2 events max (including any unlink event) */
2167 	rc = LNetEQAlloc(2, LNET_EQ_HANDLER_NONE, &eqh);
2168 	if (rc) {
2169 		CERROR("Can't allocate EQ: %d\n", rc);
2170 		goto out_0;
2171 	}
2172 
2173 	/* initialize md content */
2174 	md.start     = info;
2175 	md.length    = infosz;
2176 	md.threshold = 2; /*GET/REPLY*/
2177 	md.max_size  = 0;
2178 	md.options   = LNET_MD_TRUNCATE;
2179 	md.user_ptr  = NULL;
2180 	md.eq_handle = eqh;
2181 
2182 	rc = LNetMDBind(md, LNET_UNLINK, &mdh);
2183 	if (rc) {
2184 		CERROR("Can't bind MD: %d\n", rc);
2185 		goto out_1;
2186 	}
2187 
2188 	rc = LNetGet(LNET_NID_ANY, mdh, id,
2189 		     LNET_RESERVED_PORTAL,
2190 		     LNET_PROTO_PING_MATCHBITS, 0);
2191 
2192 	if (rc) {
2193 		/* Don't CERROR; this could be deliberate! */
2194 
2195 		rc2 = LNetMDUnlink(mdh);
2196 		LASSERT(!rc2);
2197 
2198 		/* NB must wait for the UNLINK event below... */
2199 		unlinked = 1;
2200 		timeout_ms = a_long_time;
2201 	}
2202 
2203 	do {
2204 		/* MUST block for unlink to complete */
2205 		if (unlinked)
2206 			blocked = cfs_block_allsigs();
2207 
2208 		rc2 = LNetEQPoll(&eqh, 1, timeout_ms, &event, &which);
2209 
2210 		if (unlinked)
2211 			cfs_restore_sigs(blocked);
2212 
2213 		CDEBUG(D_NET, "poll %d(%d %d)%s\n", rc2,
2214 		       (rc2 <= 0) ? -1 : event.type,
2215 		       (rc2 <= 0) ? -1 : event.status,
2216 		       (rc2 > 0 && event.unlinked) ? " unlinked" : "");
2217 
2218 		LASSERT(rc2 != -EOVERFLOW);     /* can't miss anything */
2219 
2220 		if (rc2 <= 0 || event.status) {
2221 			/* timeout or error */
2222 			if (!replied && !rc)
2223 				rc = (rc2 < 0) ? rc2 :
2224 				     !rc2 ? -ETIMEDOUT :
2225 				     event.status;
2226 
2227 			if (!unlinked) {
2228 				/* Ensure completion in finite time... */
2229 				LNetMDUnlink(mdh);
2230 				/* No assertion (racing with network) */
2231 				unlinked = 1;
2232 				timeout_ms = a_long_time;
2233 			} else if (!rc2) {
2234 				/* timed out waiting for unlink */
2235 				CWARN("ping %s: late network completion\n",
2236 				      libcfs_id2str(id));
2237 			}
2238 		} else if (event.type == LNET_EVENT_REPLY) {
2239 			replied = 1;
2240 			rc = event.mlength;
2241 		}
2242 
2243 	} while (rc2 <= 0 || !event.unlinked);
2244 
2245 	if (!replied) {
2246 		if (rc >= 0)
2247 			CWARN("%s: Unexpected rc >= 0 but no reply!\n",
2248 			      libcfs_id2str(id));
2249 		rc = -EIO;
2250 		goto out_1;
2251 	}
2252 
2253 	nob = rc;
2254 	LASSERT(nob >= 0 && nob <= infosz);
2255 
2256 	rc = -EPROTO;			   /* if I can't parse... */
2257 
2258 	if (nob < 8) {
2259 		/* can't check magic/version */
2260 		CERROR("%s: ping info too short %d\n",
2261 		       libcfs_id2str(id), nob);
2262 		goto out_1;
2263 	}
2264 
2265 	if (info->pi_magic == __swab32(LNET_PROTO_PING_MAGIC)) {
2266 		lnet_swap_pinginfo(info);
2267 	} else if (info->pi_magic != LNET_PROTO_PING_MAGIC) {
2268 		CERROR("%s: Unexpected magic %08x\n",
2269 		       libcfs_id2str(id), info->pi_magic);
2270 		goto out_1;
2271 	}
2272 
2273 	if (!(info->pi_features & LNET_PING_FEAT_NI_STATUS)) {
2274 		CERROR("%s: ping w/o NI status: 0x%x\n",
2275 		       libcfs_id2str(id), info->pi_features);
2276 		goto out_1;
2277 	}
2278 
2279 	if (nob < offsetof(struct lnet_ping_info, pi_ni[0])) {
2280 		CERROR("%s: Short reply %d(%d min)\n", libcfs_id2str(id),
2281 		       nob, (int)offsetof(struct lnet_ping_info, pi_ni[0]));
2282 		goto out_1;
2283 	}
2284 
2285 	if (info->pi_nnis < n_ids)
2286 		n_ids = info->pi_nnis;
2287 
2288 	if (nob < offsetof(struct lnet_ping_info, pi_ni[n_ids])) {
2289 		CERROR("%s: Short reply %d(%d expected)\n", libcfs_id2str(id),
2290 		       nob, (int)offsetof(struct lnet_ping_info, pi_ni[n_ids]));
2291 		goto out_1;
2292 	}
2293 
2294 	rc = -EFAULT;			   /* If I SEGV... */
2295 
2296 	memset(&tmpid, 0, sizeof(tmpid));
2297 	for (i = 0; i < n_ids; i++) {
2298 		tmpid.pid = info->pi_pid;
2299 		tmpid.nid = info->pi_ni[i].ns_nid;
2300 		if (copy_to_user(&ids[i], &tmpid, sizeof(tmpid)))
2301 			goto out_1;
2302 	}
2303 	rc = info->pi_nnis;
2304 
2305  out_1:
2306 	rc2 = LNetEQFree(eqh);
2307 	if (rc2)
2308 		CERROR("rc2 %d\n", rc2);
2309 	LASSERT(!rc2);
2310 
2311  out_0:
2312 	LIBCFS_FREE(info, infosz);
2313 	return rc;
2314 }
2315