• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
3  *
4  * Copyright (c) 2011, 2012, Intel Corporation.
5  *
6  *   This file is part of Portals
7  *   http://sourceforge.net/projects/sandiaportals/
8  *
9  *   Portals is free software; you can redistribute it and/or
10  *   modify it under the terms of version 2 of the GNU General Public
11  *   License as published by the Free Software Foundation.
12  *
13  *   Portals is distributed in the hope that it will be useful,
14  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *   GNU General Public License for more details.
17  *
18  *   You should have received a copy of the GNU General Public License
19  *   along with Portals; if not, write to the Free Software
20  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21  *
22  */
23 
24 #define DEBUG_SUBSYSTEM S_LNET
25 #include "../../include/linux/lnet/lib-lnet.h"
26 
27 #if  defined(LNET_ROUTER)
28 
29 #define LNET_NRB_TINY_MIN	512	/* min value for each CPT */
30 #define LNET_NRB_TINY		(LNET_NRB_TINY_MIN * 4)
31 #define LNET_NRB_SMALL_MIN	4096	/* min value for each CPT */
32 #define LNET_NRB_SMALL		(LNET_NRB_SMALL_MIN * 4)
33 #define LNET_NRB_LARGE_MIN	256	/* min value for each CPT */
34 #define LNET_NRB_LARGE		(LNET_NRB_LARGE_MIN * 4)
35 
36 static char *forwarding = "";
37 module_param(forwarding, charp, 0444);
38 MODULE_PARM_DESC(forwarding, "Explicitly enable/disable forwarding between networks");
39 
40 static int tiny_router_buffers;
41 module_param(tiny_router_buffers, int, 0444);
42 MODULE_PARM_DESC(tiny_router_buffers, "# of 0 payload messages to buffer in the router");
43 static int small_router_buffers;
44 module_param(small_router_buffers, int, 0444);
45 MODULE_PARM_DESC(small_router_buffers, "# of small (1 page) messages to buffer in the router");
46 static int large_router_buffers;
47 module_param(large_router_buffers, int, 0444);
48 MODULE_PARM_DESC(large_router_buffers, "# of large messages to buffer in the router");
49 static int peer_buffer_credits = 0;
50 module_param(peer_buffer_credits, int, 0444);
51 MODULE_PARM_DESC(peer_buffer_credits, "# router buffer credits per peer");
52 
53 static int auto_down = 1;
54 module_param(auto_down, int, 0444);
55 MODULE_PARM_DESC(auto_down, "Automatically mark peers down on comms error");
56 
57 int
lnet_peer_buffer_credits(lnet_ni_t * ni)58 lnet_peer_buffer_credits(lnet_ni_t *ni)
59 {
60 	/* NI option overrides LNet default */
61 	if (ni->ni_peerrtrcredits > 0)
62 		return ni->ni_peerrtrcredits;
63 	if (peer_buffer_credits > 0)
64 		return peer_buffer_credits;
65 
66 	/* As an approximation, allow this peer the same number of router
67 	 * buffers as it is allowed outstanding sends */
68 	return ni->ni_peertxcredits;
69 }
70 
71 /* forward ref's */
72 static int lnet_router_checker(void *);
73 #else
74 
75 int
lnet_peer_buffer_credits(lnet_ni_t * ni)76 lnet_peer_buffer_credits(lnet_ni_t *ni)
77 {
78 	return 0;
79 }
80 
81 #endif
82 
83 static int check_routers_before_use = 0;
84 module_param(check_routers_before_use, int, 0444);
85 MODULE_PARM_DESC(check_routers_before_use, "Assume routers are down and ping them before use");
86 
87 static int avoid_asym_router_failure = 1;
88 module_param(avoid_asym_router_failure, int, 0644);
89 MODULE_PARM_DESC(avoid_asym_router_failure, "Avoid asymmetrical router failures (0 to disable)");
90 
91 static int dead_router_check_interval = 60;
92 module_param(dead_router_check_interval, int, 0644);
93 MODULE_PARM_DESC(dead_router_check_interval, "Seconds between dead router health checks (<= 0 to disable)");
94 
95 static int live_router_check_interval = 60;
96 module_param(live_router_check_interval, int, 0644);
97 MODULE_PARM_DESC(live_router_check_interval, "Seconds between live router health checks (<= 0 to disable)");
98 
99 static int router_ping_timeout = 50;
100 module_param(router_ping_timeout, int, 0644);
101 MODULE_PARM_DESC(router_ping_timeout, "Seconds to wait for the reply to a router health query");
102 
103 int
lnet_peers_start_down(void)104 lnet_peers_start_down(void)
105 {
106 	return check_routers_before_use;
107 }
108 
109 void
lnet_notify_locked(lnet_peer_t * lp,int notifylnd,int alive,unsigned long when)110 lnet_notify_locked(lnet_peer_t *lp, int notifylnd, int alive, unsigned long when)
111 {
112 	if (time_before(when, lp->lp_timestamp)) { /* out of date information */
113 		CDEBUG(D_NET, "Out of date\n");
114 		return;
115 	}
116 
117 	lp->lp_timestamp = when;		/* update timestamp */
118 	lp->lp_ping_deadline = 0;	       /* disable ping timeout */
119 
120 	if (lp->lp_alive_count != 0 &&	  /* got old news */
121 	    (!lp->lp_alive) == (!alive)) {      /* new date for old news */
122 		CDEBUG(D_NET, "Old news\n");
123 		return;
124 	}
125 
126 	/* Flag that notification is outstanding */
127 
128 	lp->lp_alive_count++;
129 	lp->lp_alive = !(!alive);	       /* 1 bit! */
130 	lp->lp_notify = 1;
131 	lp->lp_notifylnd |= notifylnd;
132 	if (lp->lp_alive)
133 		lp->lp_ping_feats = LNET_PING_FEAT_INVAL; /* reset */
134 
135 	CDEBUG(D_NET, "set %s %d\n", libcfs_nid2str(lp->lp_nid), alive);
136 }
137 
138 static void
lnet_ni_notify_locked(lnet_ni_t * ni,lnet_peer_t * lp)139 lnet_ni_notify_locked(lnet_ni_t *ni, lnet_peer_t *lp)
140 {
141 	int	alive;
142 	int	notifylnd;
143 
144 	/* Notify only in 1 thread at any time to ensure ordered notification.
145 	 * NB individual events can be missed; the only guarantee is that you
146 	 * always get the most recent news */
147 
148 	if (lp->lp_notifying || ni == NULL)
149 		return;
150 
151 	lp->lp_notifying = 1;
152 
153 	while (lp->lp_notify) {
154 		alive     = lp->lp_alive;
155 		notifylnd = lp->lp_notifylnd;
156 
157 		lp->lp_notifylnd = 0;
158 		lp->lp_notify    = 0;
159 
160 		if (notifylnd && ni->ni_lnd->lnd_notify != NULL) {
161 			lnet_net_unlock(lp->lp_cpt);
162 
163 			/* A new notification could happen now; I'll handle it
164 			 * when control returns to me */
165 
166 			(ni->ni_lnd->lnd_notify)(ni, lp->lp_nid, alive);
167 
168 			lnet_net_lock(lp->lp_cpt);
169 		}
170 	}
171 
172 	lp->lp_notifying = 0;
173 }
174 
175 
176 static void
lnet_rtr_addref_locked(lnet_peer_t * lp)177 lnet_rtr_addref_locked(lnet_peer_t *lp)
178 {
179 	LASSERT(lp->lp_refcount > 0);
180 	LASSERT(lp->lp_rtr_refcount >= 0);
181 
182 	/* lnet_net_lock must be exclusively locked */
183 	lp->lp_rtr_refcount++;
184 	if (lp->lp_rtr_refcount == 1) {
185 		struct list_head *pos;
186 
187 		/* a simple insertion sort */
188 		list_for_each_prev(pos, &the_lnet.ln_routers) {
189 			lnet_peer_t *rtr = list_entry(pos, lnet_peer_t,
190 							  lp_rtr_list);
191 
192 			if (rtr->lp_nid < lp->lp_nid)
193 				break;
194 		}
195 
196 		list_add(&lp->lp_rtr_list, pos);
197 		/* addref for the_lnet.ln_routers */
198 		lnet_peer_addref_locked(lp);
199 		the_lnet.ln_routers_version++;
200 	}
201 }
202 
203 static void
lnet_rtr_decref_locked(lnet_peer_t * lp)204 lnet_rtr_decref_locked(lnet_peer_t *lp)
205 {
206 	LASSERT(lp->lp_refcount > 0);
207 	LASSERT(lp->lp_rtr_refcount > 0);
208 
209 	/* lnet_net_lock must be exclusively locked */
210 	lp->lp_rtr_refcount--;
211 	if (lp->lp_rtr_refcount == 0) {
212 		LASSERT(list_empty(&lp->lp_routes));
213 
214 		if (lp->lp_rcd != NULL) {
215 			list_add(&lp->lp_rcd->rcd_list,
216 				     &the_lnet.ln_rcd_deathrow);
217 			lp->lp_rcd = NULL;
218 		}
219 
220 		list_del(&lp->lp_rtr_list);
221 		/* decref for the_lnet.ln_routers */
222 		lnet_peer_decref_locked(lp);
223 		the_lnet.ln_routers_version++;
224 	}
225 }
226 
227 lnet_remotenet_t *
lnet_find_net_locked(__u32 net)228 lnet_find_net_locked (__u32 net)
229 {
230 	lnet_remotenet_t	*rnet;
231 	struct list_head		*tmp;
232 	struct list_head		*rn_list;
233 
234 	LASSERT(!the_lnet.ln_shutdown);
235 
236 	rn_list = lnet_net2rnethash(net);
237 	list_for_each(tmp, rn_list) {
238 		rnet = list_entry(tmp, lnet_remotenet_t, lrn_list);
239 
240 		if (rnet->lrn_net == net)
241 			return rnet;
242 	}
243 	return NULL;
244 }
245 
lnet_shuffle_seed(void)246 static void lnet_shuffle_seed(void)
247 {
248 	static int seeded = 0;
249 	int lnd_type, seed[2];
250 	struct timeval tv;
251 	lnet_ni_t *ni;
252 	struct list_head *tmp;
253 
254 	if (seeded)
255 		return;
256 
257 	cfs_get_random_bytes(seed, sizeof(seed));
258 
259 	/* Nodes with small feet have little entropy
260 	 * the NID for this node gives the most entropy in the low bits */
261 	list_for_each(tmp, &the_lnet.ln_nis) {
262 		ni = list_entry(tmp, lnet_ni_t, ni_list);
263 		lnd_type = LNET_NETTYP(LNET_NIDNET(ni->ni_nid));
264 
265 		if (lnd_type != LOLND)
266 			seed[0] ^= (LNET_NIDADDR(ni->ni_nid) | lnd_type);
267 	}
268 
269 	do_gettimeofday(&tv);
270 	cfs_srand(tv.tv_sec ^ seed[0], tv.tv_usec ^ seed[1]);
271 	seeded = 1;
272 	return;
273 }
274 
275 /* NB expects LNET_LOCK held */
276 static void
lnet_add_route_to_rnet(lnet_remotenet_t * rnet,lnet_route_t * route)277 lnet_add_route_to_rnet (lnet_remotenet_t *rnet, lnet_route_t *route)
278 {
279 	unsigned int      len = 0;
280 	unsigned int      offset = 0;
281 	struct list_head       *e;
282 
283 	lnet_shuffle_seed();
284 
285 	list_for_each (e, &rnet->lrn_routes) {
286 		len++;
287 	}
288 
289 	/* len+1 positions to add a new entry, also prevents division by 0 */
290 	offset = cfs_rand() % (len + 1);
291 	list_for_each (e, &rnet->lrn_routes) {
292 		if (offset == 0)
293 			break;
294 		offset--;
295 	}
296 	list_add(&route->lr_list, e);
297 	list_add(&route->lr_gwlist, &route->lr_gateway->lp_routes);
298 
299 	the_lnet.ln_remote_nets_version++;
300 	lnet_rtr_addref_locked(route->lr_gateway);
301 }
302 
303 int
lnet_add_route(__u32 net,unsigned int hops,lnet_nid_t gateway,unsigned int priority)304 lnet_add_route(__u32 net, unsigned int hops, lnet_nid_t gateway,
305 	       unsigned int priority)
306 {
307 	struct list_head	  *e;
308 	lnet_remotenet_t    *rnet;
309 	lnet_remotenet_t    *rnet2;
310 	lnet_route_t	*route;
311 	lnet_ni_t	   *ni;
312 	int		  add_route;
313 	int		  rc;
314 
315 	CDEBUG(D_NET, "Add route: net %s hops %u priority %u gw %s\n",
316 	       libcfs_net2str(net), hops, priority, libcfs_nid2str(gateway));
317 
318 	if (gateway == LNET_NID_ANY ||
319 	    LNET_NETTYP(LNET_NIDNET(gateway)) == LOLND ||
320 	    net == LNET_NIDNET(LNET_NID_ANY) ||
321 	    LNET_NETTYP(net) == LOLND ||
322 	    LNET_NIDNET(gateway) == net ||
323 	    hops < 1 || hops > 255)
324 		return -EINVAL;
325 
326 	if (lnet_islocalnet(net))	       /* it's a local network */
327 		return 0;		       /* ignore the route entry */
328 
329 	/* Assume net, route, all new */
330 	LIBCFS_ALLOC(route, sizeof(*route));
331 	LIBCFS_ALLOC(rnet, sizeof(*rnet));
332 	if (route == NULL || rnet == NULL) {
333 		CERROR("Out of memory creating route %s %d %s\n",
334 		       libcfs_net2str(net), hops, libcfs_nid2str(gateway));
335 		if (route != NULL)
336 			LIBCFS_FREE(route, sizeof(*route));
337 		if (rnet != NULL)
338 			LIBCFS_FREE(rnet, sizeof(*rnet));
339 		return -ENOMEM;
340 	}
341 
342 	INIT_LIST_HEAD(&rnet->lrn_routes);
343 	rnet->lrn_net = net;
344 	route->lr_hops = hops;
345 	route->lr_net = net;
346 	route->lr_priority = priority;
347 
348 	lnet_net_lock(LNET_LOCK_EX);
349 
350 	rc = lnet_nid2peer_locked(&route->lr_gateway, gateway, LNET_LOCK_EX);
351 	if (rc != 0) {
352 		lnet_net_unlock(LNET_LOCK_EX);
353 
354 		LIBCFS_FREE(route, sizeof(*route));
355 		LIBCFS_FREE(rnet, sizeof(*rnet));
356 
357 		if (rc == -EHOSTUNREACH) { /* gateway is not on a local net */
358 			return 0;	/* ignore the route entry */
359 		} else {
360 			CERROR("Error %d creating route %s %d %s\n", rc,
361 			       libcfs_net2str(net), hops,
362 			       libcfs_nid2str(gateway));
363 		}
364 		return rc;
365 	}
366 
367 	LASSERT (!the_lnet.ln_shutdown);
368 
369 	rnet2 = lnet_find_net_locked(net);
370 	if (rnet2 == NULL) {
371 		/* new network */
372 		list_add_tail(&rnet->lrn_list, lnet_net2rnethash(net));
373 		rnet2 = rnet;
374 	}
375 
376 	/* Search for a duplicate route (it's a NOOP if it is) */
377 	add_route = 1;
378 	list_for_each (e, &rnet2->lrn_routes) {
379 		lnet_route_t *route2 = list_entry(e, lnet_route_t, lr_list);
380 
381 		if (route2->lr_gateway == route->lr_gateway) {
382 			add_route = 0;
383 			break;
384 		}
385 
386 		/* our lookups must be true */
387 		LASSERT (route2->lr_gateway->lp_nid != gateway);
388 	}
389 
390 	if (add_route) {
391 		lnet_peer_addref_locked(route->lr_gateway); /* +1 for notify */
392 		lnet_add_route_to_rnet(rnet2, route);
393 
394 		ni = route->lr_gateway->lp_ni;
395 		lnet_net_unlock(LNET_LOCK_EX);
396 
397 		/* XXX Assume alive */
398 		if (ni->ni_lnd->lnd_notify != NULL)
399 			(ni->ni_lnd->lnd_notify)(ni, gateway, 1);
400 
401 		lnet_net_lock(LNET_LOCK_EX);
402 	}
403 
404 	/* -1 for notify or !add_route */
405 	lnet_peer_decref_locked(route->lr_gateway);
406 	lnet_net_unlock(LNET_LOCK_EX);
407 
408 	if (!add_route)
409 		LIBCFS_FREE(route, sizeof(*route));
410 
411 	if (rnet != rnet2)
412 		LIBCFS_FREE(rnet, sizeof(*rnet));
413 
414 	return 0;
415 }
416 
417 int
lnet_check_routes(void)418 lnet_check_routes(void)
419 {
420 	lnet_remotenet_t	*rnet;
421 	lnet_route_t		*route;
422 	lnet_route_t		*route2;
423 	struct list_head		*e1;
424 	struct list_head		*e2;
425 	int			cpt;
426 	struct list_head		*rn_list;
427 	int			i;
428 
429 	cpt = lnet_net_lock_current();
430 
431 	for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++) {
432 		rn_list = &the_lnet.ln_remote_nets_hash[i];
433 		list_for_each(e1, rn_list) {
434 			rnet = list_entry(e1, lnet_remotenet_t, lrn_list);
435 
436 			route2 = NULL;
437 			list_for_each(e2, &rnet->lrn_routes) {
438 				lnet_nid_t	nid1;
439 				lnet_nid_t	nid2;
440 				int		net;
441 
442 				route = list_entry(e2, lnet_route_t,
443 						       lr_list);
444 
445 				if (route2 == NULL) {
446 					route2 = route;
447 					continue;
448 				}
449 
450 				if (route->lr_gateway->lp_ni ==
451 				    route2->lr_gateway->lp_ni)
452 					continue;
453 
454 				nid1 = route->lr_gateway->lp_nid;
455 				nid2 = route2->lr_gateway->lp_nid;
456 				net = rnet->lrn_net;
457 
458 				lnet_net_unlock(cpt);
459 
460 				CERROR("Routes to %s via %s and %s not "
461 				       "supported\n",
462 				       libcfs_net2str(net),
463 				       libcfs_nid2str(nid1),
464 				       libcfs_nid2str(nid2));
465 				return -EINVAL;
466 			}
467 		}
468 	}
469 
470 	lnet_net_unlock(cpt);
471 	return 0;
472 }
473 
474 int
lnet_del_route(__u32 net,lnet_nid_t gw_nid)475 lnet_del_route(__u32 net, lnet_nid_t gw_nid)
476 {
477 	struct lnet_peer	*gateway;
478 	lnet_remotenet_t	*rnet;
479 	lnet_route_t		*route;
480 	struct list_head		*e1;
481 	struct list_head		*e2;
482 	int			rc = -ENOENT;
483 	struct list_head		*rn_list;
484 	int			idx = 0;
485 
486 	CDEBUG(D_NET, "Del route: net %s : gw %s\n",
487 	       libcfs_net2str(net), libcfs_nid2str(gw_nid));
488 
489 	/* NB Caller may specify either all routes via the given gateway
490 	 * or a specific route entry actual NIDs) */
491 
492 	lnet_net_lock(LNET_LOCK_EX);
493 	if (net == LNET_NIDNET(LNET_NID_ANY))
494 		rn_list = &the_lnet.ln_remote_nets_hash[0];
495 	else
496 		rn_list = lnet_net2rnethash(net);
497 
498  again:
499 	list_for_each(e1, rn_list) {
500 		rnet = list_entry(e1, lnet_remotenet_t, lrn_list);
501 
502 		if (!(net == LNET_NIDNET(LNET_NID_ANY) ||
503 			net == rnet->lrn_net))
504 			continue;
505 
506 		list_for_each(e2, &rnet->lrn_routes) {
507 			route = list_entry(e2, lnet_route_t, lr_list);
508 
509 			gateway = route->lr_gateway;
510 			if (!(gw_nid == LNET_NID_ANY ||
511 			      gw_nid == gateway->lp_nid))
512 				continue;
513 
514 			list_del(&route->lr_list);
515 			list_del(&route->lr_gwlist);
516 			the_lnet.ln_remote_nets_version++;
517 
518 			if (list_empty(&rnet->lrn_routes))
519 				list_del(&rnet->lrn_list);
520 			else
521 				rnet = NULL;
522 
523 			lnet_rtr_decref_locked(gateway);
524 			lnet_peer_decref_locked(gateway);
525 
526 			lnet_net_unlock(LNET_LOCK_EX);
527 
528 			LIBCFS_FREE(route, sizeof(*route));
529 
530 			if (rnet != NULL)
531 				LIBCFS_FREE(rnet, sizeof(*rnet));
532 
533 			rc = 0;
534 			lnet_net_lock(LNET_LOCK_EX);
535 			goto again;
536 		}
537 	}
538 
539 	if (net == LNET_NIDNET(LNET_NID_ANY) &&
540 	    ++idx < LNET_REMOTE_NETS_HASH_SIZE) {
541 		rn_list = &the_lnet.ln_remote_nets_hash[idx];
542 		goto again;
543 	}
544 	lnet_net_unlock(LNET_LOCK_EX);
545 
546 	return rc;
547 }
548 
549 void
lnet_destroy_routes(void)550 lnet_destroy_routes (void)
551 {
552 	lnet_del_route(LNET_NIDNET(LNET_NID_ANY), LNET_NID_ANY);
553 }
554 
555 int
lnet_get_route(int idx,__u32 * net,__u32 * hops,lnet_nid_t * gateway,__u32 * alive,__u32 * priority)556 lnet_get_route(int idx, __u32 *net, __u32 *hops,
557 	       lnet_nid_t *gateway, __u32 *alive, __u32 *priority)
558 {
559 	struct list_head		*e1;
560 	struct list_head		*e2;
561 	lnet_remotenet_t	*rnet;
562 	lnet_route_t		*route;
563 	int			cpt;
564 	int			i;
565 	struct list_head		*rn_list;
566 
567 	cpt = lnet_net_lock_current();
568 
569 	for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++) {
570 		rn_list = &the_lnet.ln_remote_nets_hash[i];
571 		list_for_each(e1, rn_list) {
572 			rnet = list_entry(e1, lnet_remotenet_t, lrn_list);
573 
574 			list_for_each(e2, &rnet->lrn_routes) {
575 				route = list_entry(e2, lnet_route_t,
576 						       lr_list);
577 
578 				if (idx-- == 0) {
579 					*net	  = rnet->lrn_net;
580 					*hops	  = route->lr_hops;
581 					*priority = route->lr_priority;
582 					*gateway  = route->lr_gateway->lp_nid;
583 					*alive	  = route->lr_gateway->lp_alive;
584 					lnet_net_unlock(cpt);
585 					return 0;
586 				}
587 			}
588 		}
589 	}
590 
591 	lnet_net_unlock(cpt);
592 	return -ENOENT;
593 }
594 
595 void
lnet_swap_pinginfo(lnet_ping_info_t * info)596 lnet_swap_pinginfo(lnet_ping_info_t *info)
597 {
598 	int	       i;
599 	lnet_ni_status_t *stat;
600 
601 	__swab32s(&info->pi_magic);
602 	__swab32s(&info->pi_features);
603 	__swab32s(&info->pi_pid);
604 	__swab32s(&info->pi_nnis);
605 	for (i = 0; i < info->pi_nnis && i < LNET_MAX_RTR_NIS; i++) {
606 		stat = &info->pi_ni[i];
607 		__swab64s(&stat->ns_nid);
608 		__swab32s(&stat->ns_status);
609 	}
610 	return;
611 }
612 
613 /**
614  * parse router-checker pinginfo, record number of down NIs for remote
615  * networks on that router.
616  */
617 static void
lnet_parse_rc_info(lnet_rc_data_t * rcd)618 lnet_parse_rc_info(lnet_rc_data_t *rcd)
619 {
620 	lnet_ping_info_t	*info = rcd->rcd_pinginfo;
621 	struct lnet_peer	*gw   = rcd->rcd_gateway;
622 	lnet_route_t		*rtr;
623 
624 	if (!gw->lp_alive)
625 		return;
626 
627 	if (info->pi_magic == __swab32(LNET_PROTO_PING_MAGIC))
628 		lnet_swap_pinginfo(info);
629 
630 	/* NB always racing with network! */
631 	if (info->pi_magic != LNET_PROTO_PING_MAGIC) {
632 		CDEBUG(D_NET, "%s: Unexpected magic %08x\n",
633 		       libcfs_nid2str(gw->lp_nid), info->pi_magic);
634 		gw->lp_ping_feats = LNET_PING_FEAT_INVAL;
635 		return;
636 	}
637 
638 	gw->lp_ping_feats = info->pi_features;
639 	if ((gw->lp_ping_feats & LNET_PING_FEAT_MASK) == 0) {
640 		CDEBUG(D_NET, "%s: Unexpected features 0x%x\n",
641 		       libcfs_nid2str(gw->lp_nid), gw->lp_ping_feats);
642 		return; /* nothing I can understand */
643 	}
644 
645 	if ((gw->lp_ping_feats & LNET_PING_FEAT_NI_STATUS) == 0)
646 		return; /* can't carry NI status info */
647 
648 	list_for_each_entry(rtr, &gw->lp_routes, lr_gwlist) {
649 		int	ptl_status = LNET_NI_STATUS_INVALID;
650 		int	down = 0;
651 		int	up = 0;
652 		int	i;
653 
654 		for (i = 0; i < info->pi_nnis && i < LNET_MAX_RTR_NIS; i++) {
655 			lnet_ni_status_t *stat = &info->pi_ni[i];
656 			lnet_nid_t	 nid = stat->ns_nid;
657 
658 			if (nid == LNET_NID_ANY) {
659 				CDEBUG(D_NET, "%s: unexpected LNET_NID_ANY\n",
660 				       libcfs_nid2str(gw->lp_nid));
661 				gw->lp_ping_feats = LNET_PING_FEAT_INVAL;
662 				return;
663 			}
664 
665 			if (LNET_NETTYP(LNET_NIDNET(nid)) == LOLND)
666 				continue;
667 
668 			if (stat->ns_status == LNET_NI_STATUS_DOWN) {
669 				if (LNET_NETTYP(LNET_NIDNET(nid)) != PTLLND)
670 					down++;
671 				else if (ptl_status != LNET_NI_STATUS_UP)
672 					ptl_status = LNET_NI_STATUS_DOWN;
673 				continue;
674 			}
675 
676 			if (stat->ns_status == LNET_NI_STATUS_UP) {
677 				if (LNET_NIDNET(nid) == rtr->lr_net) {
678 					up = 1;
679 					break;
680 				}
681 				/* ptl NIs are considered down only when
682 				 * they're all down */
683 				if (LNET_NETTYP(LNET_NIDNET(nid)) == PTLLND)
684 					ptl_status = LNET_NI_STATUS_UP;
685 				continue;
686 			}
687 
688 			CDEBUG(D_NET, "%s: Unexpected status 0x%x\n",
689 			       libcfs_nid2str(gw->lp_nid), stat->ns_status);
690 			gw->lp_ping_feats = LNET_PING_FEAT_INVAL;
691 			return;
692 		}
693 
694 		if (up) { /* ignore downed NIs if NI for dest network is up */
695 			rtr->lr_downis = 0;
696 			continue;
697 		}
698 		rtr->lr_downis = down + (ptl_status == LNET_NI_STATUS_DOWN);
699 	}
700 }
701 
702 static void
lnet_router_checker_event(lnet_event_t * event)703 lnet_router_checker_event(lnet_event_t *event)
704 {
705 	lnet_rc_data_t		*rcd = event->md.user_ptr;
706 	struct lnet_peer	*lp;
707 
708 	LASSERT(rcd != NULL);
709 
710 	if (event->unlinked) {
711 		LNetInvalidateHandle(&rcd->rcd_mdh);
712 		return;
713 	}
714 
715 	LASSERT(event->type == LNET_EVENT_SEND ||
716 		event->type == LNET_EVENT_REPLY);
717 
718 	lp = rcd->rcd_gateway;
719 	LASSERT(lp != NULL);
720 
721 	 /* NB: it's called with holding lnet_res_lock, we have a few
722 	  * places need to hold both locks at the same time, please take
723 	  * care of lock ordering */
724 	lnet_net_lock(lp->lp_cpt);
725 	if (!lnet_isrouter(lp) || lp->lp_rcd != rcd) {
726 		/* ignore if no longer a router or rcd is replaced */
727 		goto out;
728 	}
729 
730 	if (event->type == LNET_EVENT_SEND) {
731 		lp->lp_ping_notsent = 0;
732 		if (event->status == 0)
733 			goto out;
734 	}
735 
736 	/* LNET_EVENT_REPLY */
737 	/* A successful REPLY means the router is up.  If _any_ comms
738 	 * to the router fail I assume it's down (this will happen if
739 	 * we ping alive routers to try to detect router death before
740 	 * apps get burned). */
741 
742 	lnet_notify_locked(lp, 1, (event->status == 0), cfs_time_current());
743 	/* The router checker will wake up very shortly and do the
744 	 * actual notification.
745 	 * XXX If 'lp' stops being a router before then, it will still
746 	 * have the notification pending!!! */
747 
748 	if (avoid_asym_router_failure && event->status == 0)
749 		lnet_parse_rc_info(rcd);
750 
751  out:
752 	lnet_net_unlock(lp->lp_cpt);
753 }
754 
755 void
lnet_wait_known_routerstate(void)756 lnet_wait_known_routerstate(void)
757 {
758 	lnet_peer_t	 *rtr;
759 	struct list_head	  *entry;
760 	int		  all_known;
761 
762 	LASSERT (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING);
763 
764 	for (;;) {
765 		int	cpt = lnet_net_lock_current();
766 
767 		all_known = 1;
768 		list_for_each (entry, &the_lnet.ln_routers) {
769 			rtr = list_entry(entry, lnet_peer_t, lp_rtr_list);
770 
771 			if (rtr->lp_alive_count == 0) {
772 				all_known = 0;
773 				break;
774 			}
775 		}
776 
777 		lnet_net_unlock(cpt);
778 
779 		if (all_known)
780 			return;
781 
782 		set_current_state(TASK_UNINTERRUPTIBLE);
783 		schedule_timeout(cfs_time_seconds(1));
784 	}
785 }
786 
787 void
lnet_update_ni_status_locked(void)788 lnet_update_ni_status_locked(void)
789 {
790 	lnet_ni_t	*ni;
791 	long		now;
792 	int		timeout;
793 
794 	LASSERT(the_lnet.ln_routing);
795 
796 	timeout = router_ping_timeout +
797 		  MAX(live_router_check_interval, dead_router_check_interval);
798 
799 	now = get_seconds();
800 	list_for_each_entry(ni, &the_lnet.ln_nis, ni_list) {
801 		if (ni->ni_lnd->lnd_type == LOLND)
802 			continue;
803 
804 		if (now < ni->ni_last_alive + timeout)
805 			continue;
806 
807 		lnet_ni_lock(ni);
808 		/* re-check with lock */
809 		if (now < ni->ni_last_alive + timeout) {
810 			lnet_ni_unlock(ni);
811 			continue;
812 		}
813 
814 		LASSERT(ni->ni_status != NULL);
815 
816 		if (ni->ni_status->ns_status != LNET_NI_STATUS_DOWN) {
817 			CDEBUG(D_NET, "NI(%s:%d) status changed to down\n",
818 			       libcfs_nid2str(ni->ni_nid), timeout);
819 			/* NB: so far, this is the only place to set
820 			 * NI status to "down" */
821 			ni->ni_status->ns_status = LNET_NI_STATUS_DOWN;
822 		}
823 		lnet_ni_unlock(ni);
824 	}
825 }
826 
827 void
lnet_destroy_rc_data(lnet_rc_data_t * rcd)828 lnet_destroy_rc_data(lnet_rc_data_t *rcd)
829 {
830 	LASSERT(list_empty(&rcd->rcd_list));
831 	/* detached from network */
832 	LASSERT(LNetHandleIsInvalid(rcd->rcd_mdh));
833 
834 	if (rcd->rcd_gateway != NULL) {
835 		int cpt = rcd->rcd_gateway->lp_cpt;
836 
837 		lnet_net_lock(cpt);
838 		lnet_peer_decref_locked(rcd->rcd_gateway);
839 		lnet_net_unlock(cpt);
840 	}
841 
842 	if (rcd->rcd_pinginfo != NULL)
843 		LIBCFS_FREE(rcd->rcd_pinginfo, LNET_PINGINFO_SIZE);
844 
845 	LIBCFS_FREE(rcd, sizeof(*rcd));
846 }
847 
848 lnet_rc_data_t *
lnet_create_rc_data_locked(lnet_peer_t * gateway)849 lnet_create_rc_data_locked(lnet_peer_t *gateway)
850 {
851 	lnet_rc_data_t		*rcd = NULL;
852 	lnet_ping_info_t	*pi;
853 	int			rc;
854 	int			i;
855 
856 	lnet_net_unlock(gateway->lp_cpt);
857 
858 	LIBCFS_ALLOC(rcd, sizeof(*rcd));
859 	if (rcd == NULL)
860 		goto out;
861 
862 	LNetInvalidateHandle(&rcd->rcd_mdh);
863 	INIT_LIST_HEAD(&rcd->rcd_list);
864 
865 	LIBCFS_ALLOC(pi, LNET_PINGINFO_SIZE);
866 	if (pi == NULL)
867 		goto out;
868 
869 	for (i = 0; i < LNET_MAX_RTR_NIS; i++) {
870 		pi->pi_ni[i].ns_nid = LNET_NID_ANY;
871 		pi->pi_ni[i].ns_status = LNET_NI_STATUS_INVALID;
872 	}
873 	rcd->rcd_pinginfo = pi;
874 
875 	LASSERT (!LNetHandleIsInvalid(the_lnet.ln_rc_eqh));
876 	rc = LNetMDBind((lnet_md_t){.start     = pi,
877 				    .user_ptr  = rcd,
878 				    .length    = LNET_PINGINFO_SIZE,
879 				    .threshold = LNET_MD_THRESH_INF,
880 				    .options   = LNET_MD_TRUNCATE,
881 				    .eq_handle = the_lnet.ln_rc_eqh},
882 			LNET_UNLINK,
883 			&rcd->rcd_mdh);
884 	if (rc < 0) {
885 		CERROR("Can't bind MD: %d\n", rc);
886 		goto out;
887 	}
888 	LASSERT(rc == 0);
889 
890 	lnet_net_lock(gateway->lp_cpt);
891 	/* router table changed or someone has created rcd for this gateway */
892 	if (!lnet_isrouter(gateway) || gateway->lp_rcd != NULL) {
893 		lnet_net_unlock(gateway->lp_cpt);
894 		goto out;
895 	}
896 
897 	lnet_peer_addref_locked(gateway);
898 	rcd->rcd_gateway = gateway;
899 	gateway->lp_rcd = rcd;
900 	gateway->lp_ping_notsent = 0;
901 
902 	return rcd;
903 
904  out:
905 	if (rcd != NULL) {
906 		if (!LNetHandleIsInvalid(rcd->rcd_mdh)) {
907 			rc = LNetMDUnlink(rcd->rcd_mdh);
908 			LASSERT(rc == 0);
909 		}
910 		lnet_destroy_rc_data(rcd);
911 	}
912 
913 	lnet_net_lock(gateway->lp_cpt);
914 	return gateway->lp_rcd;
915 }
916 
917 static int
lnet_router_check_interval(lnet_peer_t * rtr)918 lnet_router_check_interval (lnet_peer_t *rtr)
919 {
920 	int secs;
921 
922 	secs = rtr->lp_alive ? live_router_check_interval :
923 			       dead_router_check_interval;
924 	if (secs < 0)
925 		secs = 0;
926 
927 	return secs;
928 }
929 
930 static void
lnet_ping_router_locked(lnet_peer_t * rtr)931 lnet_ping_router_locked (lnet_peer_t *rtr)
932 {
933 	lnet_rc_data_t *rcd = NULL;
934 	unsigned long      now = cfs_time_current();
935 	int	     secs;
936 
937 	lnet_peer_addref_locked(rtr);
938 
939 	if (rtr->lp_ping_deadline != 0 && /* ping timed out? */
940 	    cfs_time_after(now, rtr->lp_ping_deadline))
941 		lnet_notify_locked(rtr, 1, 0, now);
942 
943 	/* Run any outstanding notifications */
944 	lnet_ni_notify_locked(rtr->lp_ni, rtr);
945 
946 	if (!lnet_isrouter(rtr) ||
947 	    the_lnet.ln_rc_state != LNET_RC_STATE_RUNNING) {
948 		/* router table changed or router checker is shutting down */
949 		lnet_peer_decref_locked(rtr);
950 		return;
951 	}
952 
953 	rcd = rtr->lp_rcd != NULL ?
954 	      rtr->lp_rcd : lnet_create_rc_data_locked(rtr);
955 
956 	if (rcd == NULL)
957 		return;
958 
959 	secs = lnet_router_check_interval(rtr);
960 
961 	CDEBUG(D_NET,
962 	       "rtr %s %d: deadline %lu ping_notsent %d alive %d "
963 	       "alive_count %d lp_ping_timestamp %lu\n",
964 	       libcfs_nid2str(rtr->lp_nid), secs,
965 	       rtr->lp_ping_deadline, rtr->lp_ping_notsent,
966 	       rtr->lp_alive, rtr->lp_alive_count, rtr->lp_ping_timestamp);
967 
968 	if (secs != 0 && !rtr->lp_ping_notsent &&
969 	    cfs_time_after(now, cfs_time_add(rtr->lp_ping_timestamp,
970 					     cfs_time_seconds(secs)))) {
971 		int	       rc;
972 		lnet_process_id_t id;
973 		lnet_handle_md_t  mdh;
974 
975 		id.nid = rtr->lp_nid;
976 		id.pid = LUSTRE_SRV_LNET_PID;
977 		CDEBUG(D_NET, "Check: %s\n", libcfs_id2str(id));
978 
979 		rtr->lp_ping_notsent   = 1;
980 		rtr->lp_ping_timestamp = now;
981 
982 		mdh = rcd->rcd_mdh;
983 
984 		if (rtr->lp_ping_deadline == 0) {
985 			rtr->lp_ping_deadline =
986 				cfs_time_shift(router_ping_timeout);
987 		}
988 
989 		lnet_net_unlock(rtr->lp_cpt);
990 
991 		rc = LNetGet(LNET_NID_ANY, mdh, id, LNET_RESERVED_PORTAL,
992 			     LNET_PROTO_PING_MATCHBITS, 0);
993 
994 		lnet_net_lock(rtr->lp_cpt);
995 		if (rc != 0)
996 			rtr->lp_ping_notsent = 0; /* no event pending */
997 	}
998 
999 	lnet_peer_decref_locked(rtr);
1000 	return;
1001 }
1002 
1003 int
lnet_router_checker_start(void)1004 lnet_router_checker_start(void)
1005 {
1006 	int	  rc;
1007 	int	  eqsz;
1008 
1009 	LASSERT (the_lnet.ln_rc_state == LNET_RC_STATE_SHUTDOWN);
1010 
1011 	if (check_routers_before_use &&
1012 	    dead_router_check_interval <= 0) {
1013 		LCONSOLE_ERROR_MSG(0x10a, "'dead_router_check_interval' must be"
1014 				   " set if 'check_routers_before_use' is set"
1015 				   "\n");
1016 		return -EINVAL;
1017 	}
1018 
1019 	if (!the_lnet.ln_routing &&
1020 	    live_router_check_interval <= 0 &&
1021 	    dead_router_check_interval <= 0)
1022 		return 0;
1023 
1024 	sema_init(&the_lnet.ln_rc_signal, 0);
1025 	/* EQ size doesn't matter; the callback is guaranteed to get every
1026 	 * event */
1027 	eqsz = 0;
1028 	rc = LNetEQAlloc(eqsz, lnet_router_checker_event,
1029 			 &the_lnet.ln_rc_eqh);
1030 	if (rc != 0) {
1031 		CERROR("Can't allocate EQ(%d): %d\n", eqsz, rc);
1032 		return -ENOMEM;
1033 	}
1034 
1035 	the_lnet.ln_rc_state = LNET_RC_STATE_RUNNING;
1036 	rc = PTR_ERR(kthread_run(lnet_router_checker,
1037 				 NULL, "router_checker"));
1038 	if (IS_ERR_VALUE(rc)) {
1039 		CERROR("Can't start router checker thread: %d\n", rc);
1040 		/* block until event callback signals exit */
1041 		down(&the_lnet.ln_rc_signal);
1042 		rc = LNetEQFree(the_lnet.ln_rc_eqh);
1043 		LASSERT(rc == 0);
1044 		the_lnet.ln_rc_state = LNET_RC_STATE_SHUTDOWN;
1045 		return -ENOMEM;
1046 	}
1047 
1048 	if (check_routers_before_use) {
1049 		/* Note that a helpful side-effect of pinging all known routers
1050 		 * at startup is that it makes them drop stale connections they
1051 		 * may have to a previous instance of me. */
1052 		lnet_wait_known_routerstate();
1053 	}
1054 
1055 	return 0;
1056 }
1057 
1058 void
lnet_router_checker_stop(void)1059 lnet_router_checker_stop (void)
1060 {
1061 	int rc;
1062 
1063 	if (the_lnet.ln_rc_state == LNET_RC_STATE_SHUTDOWN)
1064 		return;
1065 
1066 	LASSERT (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING);
1067 	the_lnet.ln_rc_state = LNET_RC_STATE_STOPPING;
1068 
1069 	/* block until event callback signals exit */
1070 	down(&the_lnet.ln_rc_signal);
1071 	LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_SHUTDOWN);
1072 
1073 	rc = LNetEQFree(the_lnet.ln_rc_eqh);
1074 	LASSERT (rc == 0);
1075 	return;
1076 }
1077 
1078 static void
lnet_prune_rc_data(int wait_unlink)1079 lnet_prune_rc_data(int wait_unlink)
1080 {
1081 	lnet_rc_data_t		*rcd;
1082 	lnet_rc_data_t		*tmp;
1083 	lnet_peer_t		*lp;
1084 	struct list_head		head;
1085 	int			i = 2;
1086 
1087 	if (likely(the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING &&
1088 		   list_empty(&the_lnet.ln_rcd_deathrow) &&
1089 		   list_empty(&the_lnet.ln_rcd_zombie)))
1090 		return;
1091 
1092 	INIT_LIST_HEAD(&head);
1093 
1094 	lnet_net_lock(LNET_LOCK_EX);
1095 
1096 	if (the_lnet.ln_rc_state != LNET_RC_STATE_RUNNING) {
1097 		/* router checker is stopping, prune all */
1098 		list_for_each_entry(lp, &the_lnet.ln_routers,
1099 					lp_rtr_list) {
1100 			if (lp->lp_rcd == NULL)
1101 				continue;
1102 
1103 			LASSERT(list_empty(&lp->lp_rcd->rcd_list));
1104 			list_add(&lp->lp_rcd->rcd_list,
1105 				     &the_lnet.ln_rcd_deathrow);
1106 			lp->lp_rcd = NULL;
1107 		}
1108 	}
1109 
1110 	/* unlink all RCDs on deathrow list */
1111 	list_splice_init(&the_lnet.ln_rcd_deathrow, &head);
1112 
1113 	if (!list_empty(&head)) {
1114 		lnet_net_unlock(LNET_LOCK_EX);
1115 
1116 		list_for_each_entry(rcd, &head, rcd_list)
1117 			LNetMDUnlink(rcd->rcd_mdh);
1118 
1119 		lnet_net_lock(LNET_LOCK_EX);
1120 	}
1121 
1122 	list_splice_init(&head, &the_lnet.ln_rcd_zombie);
1123 
1124 	/* release all zombie RCDs */
1125 	while (!list_empty(&the_lnet.ln_rcd_zombie)) {
1126 		list_for_each_entry_safe(rcd, tmp, &the_lnet.ln_rcd_zombie,
1127 					     rcd_list) {
1128 			if (LNetHandleIsInvalid(rcd->rcd_mdh))
1129 				list_move(&rcd->rcd_list, &head);
1130 		}
1131 
1132 		wait_unlink = wait_unlink &&
1133 			      !list_empty(&the_lnet.ln_rcd_zombie);
1134 
1135 		lnet_net_unlock(LNET_LOCK_EX);
1136 
1137 		while (!list_empty(&head)) {
1138 			rcd = list_entry(head.next,
1139 					     lnet_rc_data_t, rcd_list);
1140 			list_del_init(&rcd->rcd_list);
1141 			lnet_destroy_rc_data(rcd);
1142 		}
1143 
1144 		if (!wait_unlink)
1145 			return;
1146 
1147 		i++;
1148 		CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
1149 		       "Waiting for rc buffers to unlink\n");
1150 		set_current_state(TASK_UNINTERRUPTIBLE);
1151 		schedule_timeout(cfs_time_seconds(1) / 4);
1152 
1153 		lnet_net_lock(LNET_LOCK_EX);
1154 	}
1155 
1156 	lnet_net_unlock(LNET_LOCK_EX);
1157 }
1158 
1159 
1160 #if  defined(LNET_ROUTER)
1161 
1162 static int
lnet_router_checker(void * arg)1163 lnet_router_checker(void *arg)
1164 {
1165 	lnet_peer_t       *rtr;
1166 	struct list_head	*entry;
1167 
1168 	cfs_block_allsigs();
1169 
1170 	LASSERT (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING);
1171 
1172 	while (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING) {
1173 		__u64	version;
1174 		int	cpt;
1175 		int	cpt2;
1176 
1177 		cpt = lnet_net_lock_current();
1178 rescan:
1179 		version = the_lnet.ln_routers_version;
1180 
1181 		list_for_each(entry, &the_lnet.ln_routers) {
1182 			rtr = list_entry(entry, lnet_peer_t, lp_rtr_list);
1183 
1184 			cpt2 = lnet_cpt_of_nid_locked(rtr->lp_nid);
1185 			if (cpt != cpt2) {
1186 				lnet_net_unlock(cpt);
1187 				cpt = cpt2;
1188 				lnet_net_lock(cpt);
1189 				/* the routers list has changed */
1190 				if (version != the_lnet.ln_routers_version)
1191 					goto rescan;
1192 			}
1193 
1194 			lnet_ping_router_locked(rtr);
1195 
1196 			/* NB dropped lock */
1197 			if (version != the_lnet.ln_routers_version) {
1198 				/* the routers list has changed */
1199 				goto rescan;
1200 			}
1201 		}
1202 
1203 		if (the_lnet.ln_routing)
1204 			lnet_update_ni_status_locked();
1205 
1206 		lnet_net_unlock(cpt);
1207 
1208 		lnet_prune_rc_data(0); /* don't wait for UNLINK */
1209 
1210 		/* Call schedule_timeout() here always adds 1 to load average
1211 		 * because kernel counts # active tasks as nr_running
1212 		 * + nr_uninterruptible. */
1213 		set_current_state(TASK_INTERRUPTIBLE);
1214 		schedule_timeout(cfs_time_seconds(1));
1215 	}
1216 
1217 	LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_STOPPING);
1218 
1219 	lnet_prune_rc_data(1); /* wait for UNLINK */
1220 
1221 	the_lnet.ln_rc_state = LNET_RC_STATE_SHUTDOWN;
1222 	up(&the_lnet.ln_rc_signal);
1223 	/* The unlink event callback will signal final completion */
1224 	return 0;
1225 }
1226 
1227 void
lnet_destroy_rtrbuf(lnet_rtrbuf_t * rb,int npages)1228 lnet_destroy_rtrbuf(lnet_rtrbuf_t *rb, int npages)
1229 {
1230 	int sz = offsetof(lnet_rtrbuf_t, rb_kiov[npages]);
1231 
1232 	while (--npages >= 0)
1233 		__free_page(rb->rb_kiov[npages].kiov_page);
1234 
1235 	LIBCFS_FREE(rb, sz);
1236 }
1237 
1238 lnet_rtrbuf_t *
lnet_new_rtrbuf(lnet_rtrbufpool_t * rbp,int cpt)1239 lnet_new_rtrbuf(lnet_rtrbufpool_t *rbp, int cpt)
1240 {
1241 	int	    npages = rbp->rbp_npages;
1242 	int	    sz = offsetof(lnet_rtrbuf_t, rb_kiov[npages]);
1243 	struct page   *page;
1244 	lnet_rtrbuf_t *rb;
1245 	int	    i;
1246 
1247 	LIBCFS_CPT_ALLOC(rb, lnet_cpt_table(), cpt, sz);
1248 	if (rb == NULL)
1249 		return NULL;
1250 
1251 	rb->rb_pool = rbp;
1252 
1253 	for (i = 0; i < npages; i++) {
1254 		page = alloc_pages_node(
1255 				cfs_cpt_spread_node(lnet_cpt_table(), cpt),
1256 				__GFP_ZERO | GFP_IOFS, 0);
1257 		if (page == NULL) {
1258 			while (--i >= 0)
1259 				__free_page(rb->rb_kiov[i].kiov_page);
1260 
1261 			LIBCFS_FREE(rb, sz);
1262 			return NULL;
1263 		}
1264 
1265 		rb->rb_kiov[i].kiov_len = PAGE_CACHE_SIZE;
1266 		rb->rb_kiov[i].kiov_offset = 0;
1267 		rb->rb_kiov[i].kiov_page = page;
1268 	}
1269 
1270 	return rb;
1271 }
1272 
1273 void
lnet_rtrpool_free_bufs(lnet_rtrbufpool_t * rbp)1274 lnet_rtrpool_free_bufs(lnet_rtrbufpool_t *rbp)
1275 {
1276 	int		npages = rbp->rbp_npages;
1277 	int		nbuffers = 0;
1278 	lnet_rtrbuf_t	*rb;
1279 
1280 	if (rbp->rbp_nbuffers == 0) /* not initialized or already freed */
1281 		return;
1282 
1283 	LASSERT (list_empty(&rbp->rbp_msgs));
1284 	LASSERT (rbp->rbp_credits == rbp->rbp_nbuffers);
1285 
1286 	while (!list_empty(&rbp->rbp_bufs)) {
1287 		LASSERT (rbp->rbp_credits > 0);
1288 
1289 		rb = list_entry(rbp->rbp_bufs.next,
1290 				    lnet_rtrbuf_t, rb_list);
1291 		list_del(&rb->rb_list);
1292 		lnet_destroy_rtrbuf(rb, npages);
1293 		nbuffers++;
1294 	}
1295 
1296 	LASSERT (rbp->rbp_nbuffers == nbuffers);
1297 	LASSERT (rbp->rbp_credits == nbuffers);
1298 
1299 	rbp->rbp_nbuffers = rbp->rbp_credits = 0;
1300 }
1301 
1302 int
lnet_rtrpool_alloc_bufs(lnet_rtrbufpool_t * rbp,int nbufs,int cpt)1303 lnet_rtrpool_alloc_bufs(lnet_rtrbufpool_t *rbp, int nbufs, int cpt)
1304 {
1305 	lnet_rtrbuf_t *rb;
1306 	int	    i;
1307 
1308 	if (rbp->rbp_nbuffers != 0) {
1309 		LASSERT (rbp->rbp_nbuffers == nbufs);
1310 		return 0;
1311 	}
1312 
1313 	for (i = 0; i < nbufs; i++) {
1314 		rb = lnet_new_rtrbuf(rbp, cpt);
1315 
1316 		if (rb == NULL) {
1317 			CERROR("Failed to allocate %d router bufs of %d pages\n",
1318 			       nbufs, rbp->rbp_npages);
1319 			return -ENOMEM;
1320 		}
1321 
1322 		rbp->rbp_nbuffers++;
1323 		rbp->rbp_credits++;
1324 		rbp->rbp_mincredits++;
1325 		list_add(&rb->rb_list, &rbp->rbp_bufs);
1326 
1327 		/* No allocation "under fire" */
1328 		/* Otherwise we'd need code to schedule blocked msgs etc */
1329 		LASSERT (!the_lnet.ln_routing);
1330 	}
1331 
1332 	LASSERT (rbp->rbp_credits == nbufs);
1333 	return 0;
1334 }
1335 
1336 void
lnet_rtrpool_init(lnet_rtrbufpool_t * rbp,int npages)1337 lnet_rtrpool_init(lnet_rtrbufpool_t *rbp, int npages)
1338 {
1339 	INIT_LIST_HEAD(&rbp->rbp_msgs);
1340 	INIT_LIST_HEAD(&rbp->rbp_bufs);
1341 
1342 	rbp->rbp_npages = npages;
1343 	rbp->rbp_credits = 0;
1344 	rbp->rbp_mincredits = 0;
1345 }
1346 
1347 void
lnet_rtrpools_free(void)1348 lnet_rtrpools_free(void)
1349 {
1350 	lnet_rtrbufpool_t *rtrp;
1351 	int		  i;
1352 
1353 	if (the_lnet.ln_rtrpools == NULL) /* uninitialized or freed */
1354 		return;
1355 
1356 	cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
1357 		lnet_rtrpool_free_bufs(&rtrp[0]);
1358 		lnet_rtrpool_free_bufs(&rtrp[1]);
1359 		lnet_rtrpool_free_bufs(&rtrp[2]);
1360 	}
1361 
1362 	cfs_percpt_free(the_lnet.ln_rtrpools);
1363 	the_lnet.ln_rtrpools = NULL;
1364 }
1365 
1366 static int
lnet_nrb_tiny_calculate(int npages)1367 lnet_nrb_tiny_calculate(int npages)
1368 {
1369 	int	nrbs = LNET_NRB_TINY;
1370 
1371 	if (tiny_router_buffers < 0) {
1372 		LCONSOLE_ERROR_MSG(0x10c,
1373 				   "tiny_router_buffers=%d invalid when "
1374 				   "routing enabled\n", tiny_router_buffers);
1375 		return -1;
1376 	}
1377 
1378 	if (tiny_router_buffers > 0)
1379 		nrbs = tiny_router_buffers;
1380 
1381 	nrbs /= LNET_CPT_NUMBER;
1382 	return max(nrbs, LNET_NRB_TINY_MIN);
1383 }
1384 
1385 static int
lnet_nrb_small_calculate(int npages)1386 lnet_nrb_small_calculate(int npages)
1387 {
1388 	int	nrbs = LNET_NRB_SMALL;
1389 
1390 	if (small_router_buffers < 0) {
1391 		LCONSOLE_ERROR_MSG(0x10c,
1392 				   "small_router_buffers=%d invalid when "
1393 				   "routing enabled\n", small_router_buffers);
1394 		return -1;
1395 	}
1396 
1397 	if (small_router_buffers > 0)
1398 		nrbs = small_router_buffers;
1399 
1400 	nrbs /= LNET_CPT_NUMBER;
1401 	return max(nrbs, LNET_NRB_SMALL_MIN);
1402 }
1403 
1404 static int
lnet_nrb_large_calculate(int npages)1405 lnet_nrb_large_calculate(int npages)
1406 {
1407 	int	nrbs = LNET_NRB_LARGE;
1408 
1409 	if (large_router_buffers < 0) {
1410 		LCONSOLE_ERROR_MSG(0x10c,
1411 				   "large_router_buffers=%d invalid when "
1412 				   "routing enabled\n", large_router_buffers);
1413 		return -1;
1414 	}
1415 
1416 	if (large_router_buffers > 0)
1417 		nrbs = large_router_buffers;
1418 
1419 	nrbs /= LNET_CPT_NUMBER;
1420 	return max(nrbs, LNET_NRB_LARGE_MIN);
1421 }
1422 
1423 int
lnet_rtrpools_alloc(int im_a_router)1424 lnet_rtrpools_alloc(int im_a_router)
1425 {
1426 	lnet_rtrbufpool_t *rtrp;
1427 	int	large_pages = (LNET_MTU + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1428 	int	small_pages = 1;
1429 	int	nrb_tiny;
1430 	int	nrb_small;
1431 	int	nrb_large;
1432 	int	rc;
1433 	int	i;
1434 
1435 	if (!strcmp(forwarding, "")) {
1436 		/* not set either way */
1437 		if (!im_a_router)
1438 			return 0;
1439 	} else if (!strcmp(forwarding, "disabled")) {
1440 		/* explicitly disabled */
1441 		return 0;
1442 	} else if (!strcmp(forwarding, "enabled")) {
1443 		/* explicitly enabled */
1444 	} else {
1445 		LCONSOLE_ERROR_MSG(0x10b, "'forwarding' not set to either "
1446 				   "'enabled' or 'disabled'\n");
1447 		return -EINVAL;
1448 	}
1449 
1450 	nrb_tiny = lnet_nrb_tiny_calculate(0);
1451 	if (nrb_tiny < 0)
1452 		return -EINVAL;
1453 
1454 	nrb_small = lnet_nrb_small_calculate(small_pages);
1455 	if (nrb_small < 0)
1456 		return -EINVAL;
1457 
1458 	nrb_large = lnet_nrb_large_calculate(large_pages);
1459 	if (nrb_large < 0)
1460 		return -EINVAL;
1461 
1462 	the_lnet.ln_rtrpools = cfs_percpt_alloc(lnet_cpt_table(),
1463 						LNET_NRBPOOLS *
1464 						sizeof(lnet_rtrbufpool_t));
1465 	if (the_lnet.ln_rtrpools == NULL) {
1466 		LCONSOLE_ERROR_MSG(0x10c,
1467 				   "Failed to initialize router buffe pool\n");
1468 		return -ENOMEM;
1469 	}
1470 
1471 	cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
1472 		lnet_rtrpool_init(&rtrp[0], 0);
1473 		rc = lnet_rtrpool_alloc_bufs(&rtrp[0], nrb_tiny, i);
1474 		if (rc != 0)
1475 			goto failed;
1476 
1477 		lnet_rtrpool_init(&rtrp[1], small_pages);
1478 		rc = lnet_rtrpool_alloc_bufs(&rtrp[1], nrb_small, i);
1479 		if (rc != 0)
1480 			goto failed;
1481 
1482 		lnet_rtrpool_init(&rtrp[2], large_pages);
1483 		rc = lnet_rtrpool_alloc_bufs(&rtrp[2], nrb_large, i);
1484 		if (rc != 0)
1485 			goto failed;
1486 	}
1487 
1488 	lnet_net_lock(LNET_LOCK_EX);
1489 	the_lnet.ln_routing = 1;
1490 	lnet_net_unlock(LNET_LOCK_EX);
1491 
1492 	return 0;
1493 
1494  failed:
1495 	lnet_rtrpools_free();
1496 	return rc;
1497 }
1498 
1499 int
lnet_notify(lnet_ni_t * ni,lnet_nid_t nid,int alive,unsigned long when)1500 lnet_notify(lnet_ni_t *ni, lnet_nid_t nid, int alive, unsigned long when)
1501 {
1502 	struct lnet_peer	*lp = NULL;
1503 	unsigned long		now = cfs_time_current();
1504 	int			cpt = lnet_cpt_of_nid(nid);
1505 
1506 	LASSERT (!in_interrupt ());
1507 
1508 	CDEBUG (D_NET, "%s notifying %s: %s\n",
1509 		(ni == NULL) ? "userspace" : libcfs_nid2str(ni->ni_nid),
1510 		libcfs_nid2str(nid),
1511 		alive ? "up" : "down");
1512 
1513 	if (ni != NULL &&
1514 	    LNET_NIDNET(ni->ni_nid) != LNET_NIDNET(nid)) {
1515 		CWARN ("Ignoring notification of %s %s by %s (different net)\n",
1516 			libcfs_nid2str(nid), alive ? "birth" : "death",
1517 			libcfs_nid2str(ni->ni_nid));
1518 		return -EINVAL;
1519 	}
1520 
1521 	/* can't do predictions... */
1522 	if (cfs_time_after(when, now)) {
1523 		CWARN ("Ignoring prediction from %s of %s %s "
1524 		       "%ld seconds in the future\n",
1525 		       (ni == NULL) ? "userspace" : libcfs_nid2str(ni->ni_nid),
1526 		       libcfs_nid2str(nid), alive ? "up" : "down",
1527 		       cfs_duration_sec(cfs_time_sub(when, now)));
1528 		return -EINVAL;
1529 	}
1530 
1531 	if (ni != NULL && !alive &&	     /* LND telling me she's down */
1532 	    !auto_down) {		       /* auto-down disabled */
1533 		CDEBUG(D_NET, "Auto-down disabled\n");
1534 		return 0;
1535 	}
1536 
1537 	lnet_net_lock(cpt);
1538 
1539 	if (the_lnet.ln_shutdown) {
1540 		lnet_net_unlock(cpt);
1541 		return -ESHUTDOWN;
1542 	}
1543 
1544 	lp = lnet_find_peer_locked(the_lnet.ln_peer_tables[cpt], nid);
1545 	if (lp == NULL) {
1546 		/* nid not found */
1547 		lnet_net_unlock(cpt);
1548 		CDEBUG(D_NET, "%s not found\n", libcfs_nid2str(nid));
1549 		return 0;
1550 	}
1551 
1552 	/* We can't fully trust LND on reporting exact peer last_alive
1553 	 * if he notifies us about dead peer. For example ksocklnd can
1554 	 * call us with when == _time_when_the_node_was_booted_ if
1555 	 * no connections were successfully established */
1556 	if (ni != NULL && !alive && when < lp->lp_last_alive)
1557 		when = lp->lp_last_alive;
1558 
1559 	lnet_notify_locked(lp, ni == NULL, alive, when);
1560 
1561 	lnet_ni_notify_locked(ni, lp);
1562 
1563 	lnet_peer_decref_locked(lp);
1564 
1565 	lnet_net_unlock(cpt);
1566 	return 0;
1567 }
1568 EXPORT_SYMBOL(lnet_notify);
1569 
1570 void
lnet_get_tunables(void)1571 lnet_get_tunables (void)
1572 {
1573 	return;
1574 }
1575 
1576 #else
1577 
1578 int
lnet_notify(lnet_ni_t * ni,lnet_nid_t nid,int alive,unsigned long when)1579 lnet_notify (lnet_ni_t *ni, lnet_nid_t nid, int alive, unsigned long when)
1580 {
1581 	return -EOPNOTSUPP;
1582 }
1583 
1584 void
lnet_router_checker(void)1585 lnet_router_checker (void)
1586 {
1587 	static time_t last = 0;
1588 	static int    running = 0;
1589 
1590 	time_t	    now = get_seconds();
1591 	int	       interval = now - last;
1592 	int	       rc;
1593 	__u64	     version;
1594 	lnet_peer_t      *rtr;
1595 
1596 	/* It's no use to call me again within a sec - all intervals and
1597 	 * timeouts are measured in seconds */
1598 	if (last != 0 && interval < 2)
1599 		return;
1600 
1601 	if (last != 0 &&
1602 	    interval > MAX(live_router_check_interval,
1603 			   dead_router_check_interval))
1604 		CNETERR("Checker(%d/%d) not called for %d seconds\n",
1605 			live_router_check_interval, dead_router_check_interval,
1606 			interval);
1607 
1608 	LASSERT(LNET_CPT_NUMBER == 1);
1609 
1610 	lnet_net_lock(0);
1611 	LASSERT(!running); /* recursion check */
1612 	running = 1;
1613 	lnet_net_unlock(0);
1614 
1615 	last = now;
1616 
1617 	if (the_lnet.ln_rc_state == LNET_RC_STATE_STOPPING)
1618 		lnet_prune_rc_data(0); /* unlink all rcd and nowait */
1619 
1620 	/* consume all pending events */
1621 	while (1) {
1622 		int	  i;
1623 		lnet_event_t ev;
1624 
1625 		/* NB ln_rc_eqh must be the 1st in 'eventqs' otherwise the
1626 		 * recursion breaker in LNetEQPoll would fail */
1627 		rc = LNetEQPoll(&the_lnet.ln_rc_eqh, 1, 0, &ev, &i);
1628 		if (rc == 0)   /* no event pending */
1629 			break;
1630 
1631 		/* NB a lost SENT prevents me from pinging a router again */
1632 		if (rc == -EOVERFLOW) {
1633 			CERROR("Dropped an event!!!\n");
1634 			abort();
1635 		}
1636 
1637 		LASSERT (rc == 1);
1638 
1639 		lnet_router_checker_event(&ev);
1640 	}
1641 
1642 	if (the_lnet.ln_rc_state == LNET_RC_STATE_STOPPING) {
1643 		lnet_prune_rc_data(1); /* release rcd */
1644 		the_lnet.ln_rc_state = LNET_RC_STATE_SHUTDOWN;
1645 		running = 0;
1646 		return;
1647 	}
1648 
1649 	LASSERT (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING);
1650 
1651 	lnet_net_lock(0);
1652 
1653 	version = the_lnet.ln_routers_version;
1654 	list_for_each_entry (rtr, &the_lnet.ln_routers, lp_rtr_list) {
1655 		lnet_ping_router_locked(rtr);
1656 		LASSERT (version == the_lnet.ln_routers_version);
1657 	}
1658 
1659 	lnet_net_unlock(0);
1660 
1661 	running = 0; /* lock only needed for the recursion check */
1662 	return;
1663 }
1664 
1665 /* NB lnet_peers_start_down depends on me,
1666  * so must be called before any peer creation */
1667 void
lnet_get_tunables(void)1668 lnet_get_tunables (void)
1669 {
1670 	char *s;
1671 
1672 	s = getenv("LNET_ROUTER_PING_TIMEOUT");
1673 	if (s != NULL) router_ping_timeout = atoi(s);
1674 
1675 	s = getenv("LNET_LIVE_ROUTER_CHECK_INTERVAL");
1676 	if (s != NULL) live_router_check_interval = atoi(s);
1677 
1678 	s = getenv("LNET_DEAD_ROUTER_CHECK_INTERVAL");
1679 	if (s != NULL) dead_router_check_interval = atoi(s);
1680 
1681 	/* This replaces old lnd_notify mechanism */
1682 	check_routers_before_use = 1;
1683 	if (dead_router_check_interval <= 0)
1684 		dead_router_check_interval = 30;
1685 }
1686 
1687 void
lnet_rtrpools_free(void)1688 lnet_rtrpools_free(void)
1689 {
1690 }
1691 
1692 int
lnet_rtrpools_alloc(int im_a_arouter)1693 lnet_rtrpools_alloc(int im_a_arouter)
1694 {
1695 	return 0;
1696 }
1697 
1698 #endif
1699