• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lnet/lnet/lib-move.c
37  *
38  * Data movement routines
39  */
40 
41 #define DEBUG_SUBSYSTEM S_LNET
42 
43 #include "../../include/linux/lnet/lib-lnet.h"
44 
45 static int local_nid_dist_zero = 1;
46 module_param(local_nid_dist_zero, int, 0444);
47 MODULE_PARM_DESC(local_nid_dist_zero, "Reserved");
48 
49 int
lnet_fail_nid(lnet_nid_t nid,unsigned int threshold)50 lnet_fail_nid(lnet_nid_t nid, unsigned int threshold)
51 {
52 	lnet_test_peer_t  *tp;
53 	struct list_head	*el;
54 	struct list_head	*next;
55 	struct list_head	 cull;
56 
57 	LASSERT(the_lnet.ln_init);
58 
59 	/* NB: use lnet_net_lock(0) to serialize operations on test peers */
60 	if (threshold != 0) {
61 		/* Adding a new entry */
62 		LIBCFS_ALLOC(tp, sizeof(*tp));
63 		if (tp == NULL)
64 			return -ENOMEM;
65 
66 		tp->tp_nid = nid;
67 		tp->tp_threshold = threshold;
68 
69 		lnet_net_lock(0);
70 		list_add_tail(&tp->tp_list, &the_lnet.ln_test_peers);
71 		lnet_net_unlock(0);
72 		return 0;
73 	}
74 
75 	/* removing entries */
76 	INIT_LIST_HEAD(&cull);
77 
78 	lnet_net_lock(0);
79 
80 	list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
81 		tp = list_entry(el, lnet_test_peer_t, tp_list);
82 
83 		if (tp->tp_threshold == 0 ||    /* needs culling anyway */
84 		    nid == LNET_NID_ANY ||       /* removing all entries */
85 		    tp->tp_nid == nid) {	  /* matched this one */
86 			list_del(&tp->tp_list);
87 			list_add(&tp->tp_list, &cull);
88 		}
89 	}
90 
91 	lnet_net_unlock(0);
92 
93 	while (!list_empty(&cull)) {
94 		tp = list_entry(cull.next, lnet_test_peer_t, tp_list);
95 
96 		list_del(&tp->tp_list);
97 		LIBCFS_FREE(tp, sizeof(*tp));
98 	}
99 	return 0;
100 }
101 
102 static int
fail_peer(lnet_nid_t nid,int outgoing)103 fail_peer(lnet_nid_t nid, int outgoing)
104 {
105 	lnet_test_peer_t *tp;
106 	struct list_head       *el;
107 	struct list_head       *next;
108 	struct list_head	cull;
109 	int	       fail = 0;
110 
111 	INIT_LIST_HEAD(&cull);
112 
113 	/* NB: use lnet_net_lock(0) to serialize operations on test peers */
114 	lnet_net_lock(0);
115 
116 	list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
117 		tp = list_entry(el, lnet_test_peer_t, tp_list);
118 
119 		if (tp->tp_threshold == 0) {
120 			/* zombie entry */
121 			if (outgoing) {
122 				/* only cull zombies on outgoing tests,
123 				 * since we may be at interrupt priority on
124 				 * incoming messages. */
125 				list_del(&tp->tp_list);
126 				list_add(&tp->tp_list, &cull);
127 			}
128 			continue;
129 		}
130 
131 		if (tp->tp_nid == LNET_NID_ANY || /* fail every peer */
132 		    nid == tp->tp_nid) {	/* fail this peer */
133 			fail = 1;
134 
135 			if (tp->tp_threshold != LNET_MD_THRESH_INF) {
136 				tp->tp_threshold--;
137 				if (outgoing &&
138 				    tp->tp_threshold == 0) {
139 					/* see above */
140 					list_del(&tp->tp_list);
141 					list_add(&tp->tp_list, &cull);
142 				}
143 			}
144 			break;
145 		}
146 	}
147 
148 	lnet_net_unlock(0);
149 
150 	while (!list_empty(&cull)) {
151 		tp = list_entry(cull.next, lnet_test_peer_t, tp_list);
152 		list_del(&tp->tp_list);
153 
154 		LIBCFS_FREE(tp, sizeof(*tp));
155 	}
156 
157 	return fail;
158 }
159 
160 unsigned int
lnet_iov_nob(unsigned int niov,struct iovec * iov)161 lnet_iov_nob(unsigned int niov, struct iovec *iov)
162 {
163 	unsigned int nob = 0;
164 
165 	while (niov-- > 0)
166 		nob += (iov++)->iov_len;
167 
168 	return nob;
169 }
170 EXPORT_SYMBOL(lnet_iov_nob);
171 
172 void
lnet_copy_iov2iov(unsigned int ndiov,struct iovec * diov,unsigned int doffset,unsigned int nsiov,struct iovec * siov,unsigned int soffset,unsigned int nob)173 lnet_copy_iov2iov(unsigned int ndiov, struct iovec *diov, unsigned int doffset,
174 		   unsigned int nsiov, struct iovec *siov, unsigned int soffset,
175 		   unsigned int nob)
176 {
177 	/* NB diov, siov are READ-ONLY */
178 	unsigned int  this_nob;
179 
180 	if (nob == 0)
181 		return;
182 
183 	/* skip complete frags before 'doffset' */
184 	LASSERT(ndiov > 0);
185 	while (doffset >= diov->iov_len) {
186 		doffset -= diov->iov_len;
187 		diov++;
188 		ndiov--;
189 		LASSERT(ndiov > 0);
190 	}
191 
192 	/* skip complete frags before 'soffset' */
193 	LASSERT(nsiov > 0);
194 	while (soffset >= siov->iov_len) {
195 		soffset -= siov->iov_len;
196 		siov++;
197 		nsiov--;
198 		LASSERT(nsiov > 0);
199 	}
200 
201 	do {
202 		LASSERT(ndiov > 0);
203 		LASSERT(nsiov > 0);
204 		this_nob = MIN(diov->iov_len - doffset,
205 			       siov->iov_len - soffset);
206 		this_nob = MIN(this_nob, nob);
207 
208 		memcpy((char *)diov->iov_base + doffset,
209 			(char *)siov->iov_base + soffset, this_nob);
210 		nob -= this_nob;
211 
212 		if (diov->iov_len > doffset + this_nob) {
213 			doffset += this_nob;
214 		} else {
215 			diov++;
216 			ndiov--;
217 			doffset = 0;
218 		}
219 
220 		if (siov->iov_len > soffset + this_nob) {
221 			soffset += this_nob;
222 		} else {
223 			siov++;
224 			nsiov--;
225 			soffset = 0;
226 		}
227 	} while (nob > 0);
228 }
229 EXPORT_SYMBOL(lnet_copy_iov2iov);
230 
231 int
lnet_extract_iov(int dst_niov,struct iovec * dst,int src_niov,struct iovec * src,unsigned int offset,unsigned int len)232 lnet_extract_iov(int dst_niov, struct iovec *dst,
233 		  int src_niov, struct iovec *src,
234 		  unsigned int offset, unsigned int len)
235 {
236 	/* Initialise 'dst' to the subset of 'src' starting at 'offset',
237 	 * for exactly 'len' bytes, and return the number of entries.
238 	 * NB not destructive to 'src' */
239 	unsigned int    frag_len;
240 	unsigned int    niov;
241 
242 	if (len == 0)			   /* no data => */
243 		return 0;		     /* no frags */
244 
245 	LASSERT(src_niov > 0);
246 	while (offset >= src->iov_len) {      /* skip initial frags */
247 		offset -= src->iov_len;
248 		src_niov--;
249 		src++;
250 		LASSERT(src_niov > 0);
251 	}
252 
253 	niov = 1;
254 	for (;;) {
255 		LASSERT(src_niov > 0);
256 		LASSERT((int)niov <= dst_niov);
257 
258 		frag_len = src->iov_len - offset;
259 		dst->iov_base = ((char *)src->iov_base) + offset;
260 
261 		if (len <= frag_len) {
262 			dst->iov_len = len;
263 			return niov;
264 		}
265 
266 		dst->iov_len = frag_len;
267 
268 		len -= frag_len;
269 		dst++;
270 		src++;
271 		niov++;
272 		src_niov--;
273 		offset = 0;
274 	}
275 }
276 EXPORT_SYMBOL(lnet_extract_iov);
277 
278 
279 unsigned int
lnet_kiov_nob(unsigned int niov,lnet_kiov_t * kiov)280 lnet_kiov_nob(unsigned int niov, lnet_kiov_t *kiov)
281 {
282 	unsigned int  nob = 0;
283 
284 	while (niov-- > 0)
285 		nob += (kiov++)->kiov_len;
286 
287 	return nob;
288 }
289 EXPORT_SYMBOL(lnet_kiov_nob);
290 
291 void
lnet_copy_kiov2kiov(unsigned int ndiov,lnet_kiov_t * diov,unsigned int doffset,unsigned int nsiov,lnet_kiov_t * siov,unsigned int soffset,unsigned int nob)292 lnet_copy_kiov2kiov(unsigned int ndiov, lnet_kiov_t *diov, unsigned int doffset,
293 		    unsigned int nsiov, lnet_kiov_t *siov, unsigned int soffset,
294 		    unsigned int nob)
295 {
296 	/* NB diov, siov are READ-ONLY */
297 	unsigned int    this_nob;
298 	char	   *daddr = NULL;
299 	char	   *saddr = NULL;
300 
301 	if (nob == 0)
302 		return;
303 
304 	LASSERT(!in_interrupt());
305 
306 	LASSERT(ndiov > 0);
307 	while (doffset >= diov->kiov_len) {
308 		doffset -= diov->kiov_len;
309 		diov++;
310 		ndiov--;
311 		LASSERT(ndiov > 0);
312 	}
313 
314 	LASSERT(nsiov > 0);
315 	while (soffset >= siov->kiov_len) {
316 		soffset -= siov->kiov_len;
317 		siov++;
318 		nsiov--;
319 		LASSERT(nsiov > 0);
320 	}
321 
322 	do {
323 		LASSERT(ndiov > 0);
324 		LASSERT(nsiov > 0);
325 		this_nob = MIN(diov->kiov_len - doffset,
326 			       siov->kiov_len - soffset);
327 		this_nob = MIN(this_nob, nob);
328 
329 		if (daddr == NULL)
330 			daddr = ((char *)kmap(diov->kiov_page)) +
331 				diov->kiov_offset + doffset;
332 		if (saddr == NULL)
333 			saddr = ((char *)kmap(siov->kiov_page)) +
334 				siov->kiov_offset + soffset;
335 
336 		/* Vanishing risk of kmap deadlock when mapping 2 pages.
337 		 * However in practice at least one of the kiovs will be mapped
338 		 * kernel pages and the map/unmap will be NOOPs */
339 
340 		memcpy(daddr, saddr, this_nob);
341 		nob -= this_nob;
342 
343 		if (diov->kiov_len > doffset + this_nob) {
344 			daddr += this_nob;
345 			doffset += this_nob;
346 		} else {
347 			kunmap(diov->kiov_page);
348 			daddr = NULL;
349 			diov++;
350 			ndiov--;
351 			doffset = 0;
352 		}
353 
354 		if (siov->kiov_len > soffset + this_nob) {
355 			saddr += this_nob;
356 			soffset += this_nob;
357 		} else {
358 			kunmap(siov->kiov_page);
359 			saddr = NULL;
360 			siov++;
361 			nsiov--;
362 			soffset = 0;
363 		}
364 	} while (nob > 0);
365 
366 	if (daddr != NULL)
367 		kunmap(diov->kiov_page);
368 	if (saddr != NULL)
369 		kunmap(siov->kiov_page);
370 }
371 EXPORT_SYMBOL(lnet_copy_kiov2kiov);
372 
373 void
lnet_copy_kiov2iov(unsigned int niov,struct iovec * iov,unsigned int iovoffset,unsigned int nkiov,lnet_kiov_t * kiov,unsigned int kiovoffset,unsigned int nob)374 lnet_copy_kiov2iov(unsigned int niov, struct iovec *iov, unsigned int iovoffset,
375 		   unsigned int nkiov, lnet_kiov_t *kiov,
376 		   unsigned int kiovoffset, unsigned int nob)
377 {
378 	/* NB iov, kiov are READ-ONLY */
379 	unsigned int    this_nob;
380 	char	   *addr = NULL;
381 
382 	if (nob == 0)
383 		return;
384 
385 	LASSERT(!in_interrupt());
386 
387 	LASSERT(niov > 0);
388 	while (iovoffset >= iov->iov_len) {
389 		iovoffset -= iov->iov_len;
390 		iov++;
391 		niov--;
392 		LASSERT(niov > 0);
393 	}
394 
395 	LASSERT(nkiov > 0);
396 	while (kiovoffset >= kiov->kiov_len) {
397 		kiovoffset -= kiov->kiov_len;
398 		kiov++;
399 		nkiov--;
400 		LASSERT(nkiov > 0);
401 	}
402 
403 	do {
404 		LASSERT(niov > 0);
405 		LASSERT(nkiov > 0);
406 		this_nob = MIN(iov->iov_len - iovoffset,
407 			       kiov->kiov_len - kiovoffset);
408 		this_nob = MIN(this_nob, nob);
409 
410 		if (addr == NULL)
411 			addr = ((char *)kmap(kiov->kiov_page)) +
412 				kiov->kiov_offset + kiovoffset;
413 
414 		memcpy((char *)iov->iov_base + iovoffset, addr, this_nob);
415 		nob -= this_nob;
416 
417 		if (iov->iov_len > iovoffset + this_nob) {
418 			iovoffset += this_nob;
419 		} else {
420 			iov++;
421 			niov--;
422 			iovoffset = 0;
423 		}
424 
425 		if (kiov->kiov_len > kiovoffset + this_nob) {
426 			addr += this_nob;
427 			kiovoffset += this_nob;
428 		} else {
429 			kunmap(kiov->kiov_page);
430 			addr = NULL;
431 			kiov++;
432 			nkiov--;
433 			kiovoffset = 0;
434 		}
435 
436 	} while (nob > 0);
437 
438 	if (addr != NULL)
439 		kunmap(kiov->kiov_page);
440 }
441 EXPORT_SYMBOL(lnet_copy_kiov2iov);
442 
443 void
lnet_copy_iov2kiov(unsigned int nkiov,lnet_kiov_t * kiov,unsigned int kiovoffset,unsigned int niov,struct iovec * iov,unsigned int iovoffset,unsigned int nob)444 lnet_copy_iov2kiov(unsigned int nkiov, lnet_kiov_t *kiov,
445 		   unsigned int kiovoffset, unsigned int niov,
446 		   struct iovec *iov, unsigned int iovoffset,
447 		   unsigned int nob)
448 {
449 	/* NB kiov, iov are READ-ONLY */
450 	unsigned int    this_nob;
451 	char	   *addr = NULL;
452 
453 	if (nob == 0)
454 		return;
455 
456 	LASSERT(!in_interrupt());
457 
458 	LASSERT(nkiov > 0);
459 	while (kiovoffset >= kiov->kiov_len) {
460 		kiovoffset -= kiov->kiov_len;
461 		kiov++;
462 		nkiov--;
463 		LASSERT(nkiov > 0);
464 	}
465 
466 	LASSERT(niov > 0);
467 	while (iovoffset >= iov->iov_len) {
468 		iovoffset -= iov->iov_len;
469 		iov++;
470 		niov--;
471 		LASSERT(niov > 0);
472 	}
473 
474 	do {
475 		LASSERT(nkiov > 0);
476 		LASSERT(niov > 0);
477 		this_nob = MIN(kiov->kiov_len - kiovoffset,
478 			       iov->iov_len - iovoffset);
479 		this_nob = MIN(this_nob, nob);
480 
481 		if (addr == NULL)
482 			addr = ((char *)kmap(kiov->kiov_page)) +
483 				kiov->kiov_offset + kiovoffset;
484 
485 		memcpy(addr, (char *)iov->iov_base + iovoffset, this_nob);
486 		nob -= this_nob;
487 
488 		if (kiov->kiov_len > kiovoffset + this_nob) {
489 			addr += this_nob;
490 			kiovoffset += this_nob;
491 		} else {
492 			kunmap(kiov->kiov_page);
493 			addr = NULL;
494 			kiov++;
495 			nkiov--;
496 			kiovoffset = 0;
497 		}
498 
499 		if (iov->iov_len > iovoffset + this_nob) {
500 			iovoffset += this_nob;
501 		} else {
502 			iov++;
503 			niov--;
504 			iovoffset = 0;
505 		}
506 	} while (nob > 0);
507 
508 	if (addr != NULL)
509 		kunmap(kiov->kiov_page);
510 }
511 EXPORT_SYMBOL(lnet_copy_iov2kiov);
512 
513 int
lnet_extract_kiov(int dst_niov,lnet_kiov_t * dst,int src_niov,lnet_kiov_t * src,unsigned int offset,unsigned int len)514 lnet_extract_kiov(int dst_niov, lnet_kiov_t *dst,
515 		   int src_niov, lnet_kiov_t *src,
516 		   unsigned int offset, unsigned int len)
517 {
518 	/* Initialise 'dst' to the subset of 'src' starting at 'offset',
519 	 * for exactly 'len' bytes, and return the number of entries.
520 	 * NB not destructive to 'src' */
521 	unsigned int    frag_len;
522 	unsigned int    niov;
523 
524 	if (len == 0)			   /* no data => */
525 		return 0;		     /* no frags */
526 
527 	LASSERT(src_niov > 0);
528 	while (offset >= src->kiov_len) {      /* skip initial frags */
529 		offset -= src->kiov_len;
530 		src_niov--;
531 		src++;
532 		LASSERT(src_niov > 0);
533 	}
534 
535 	niov = 1;
536 	for (;;) {
537 		LASSERT(src_niov > 0);
538 		LASSERT((int)niov <= dst_niov);
539 
540 		frag_len = src->kiov_len - offset;
541 		dst->kiov_page = src->kiov_page;
542 		dst->kiov_offset = src->kiov_offset + offset;
543 
544 		if (len <= frag_len) {
545 			dst->kiov_len = len;
546 			LASSERT(dst->kiov_offset + dst->kiov_len
547 					     <= PAGE_CACHE_SIZE);
548 			return niov;
549 		}
550 
551 		dst->kiov_len = frag_len;
552 		LASSERT(dst->kiov_offset + dst->kiov_len <= PAGE_CACHE_SIZE);
553 
554 		len -= frag_len;
555 		dst++;
556 		src++;
557 		niov++;
558 		src_niov--;
559 		offset = 0;
560 	}
561 }
562 EXPORT_SYMBOL(lnet_extract_kiov);
563 
564 void
lnet_ni_recv(lnet_ni_t * ni,void * private,lnet_msg_t * msg,int delayed,unsigned int offset,unsigned int mlen,unsigned int rlen)565 lnet_ni_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
566 	     unsigned int offset, unsigned int mlen, unsigned int rlen)
567 {
568 	unsigned int  niov = 0;
569 	struct iovec *iov = NULL;
570 	lnet_kiov_t  *kiov = NULL;
571 	int	   rc;
572 
573 	LASSERT(!in_interrupt());
574 	LASSERT(mlen == 0 || msg != NULL);
575 
576 	if (msg != NULL) {
577 		LASSERT(msg->msg_receiving);
578 		LASSERT(!msg->msg_sending);
579 		LASSERT(rlen == msg->msg_len);
580 		LASSERT(mlen <= msg->msg_len);
581 		LASSERT(msg->msg_offset == offset);
582 		LASSERT(msg->msg_wanted == mlen);
583 
584 		msg->msg_receiving = 0;
585 
586 		if (mlen != 0) {
587 			niov = msg->msg_niov;
588 			iov  = msg->msg_iov;
589 			kiov = msg->msg_kiov;
590 
591 			LASSERT(niov > 0);
592 			LASSERT((iov == NULL) != (kiov == NULL));
593 		}
594 	}
595 
596 	rc = (ni->ni_lnd->lnd_recv)(ni, private, msg, delayed,
597 				    niov, iov, kiov, offset, mlen, rlen);
598 	if (rc < 0)
599 		lnet_finalize(ni, msg, rc);
600 }
601 
602 void
lnet_setpayloadbuffer(lnet_msg_t * msg)603 lnet_setpayloadbuffer(lnet_msg_t *msg)
604 {
605 	lnet_libmd_t *md = msg->msg_md;
606 
607 	LASSERT(msg->msg_len > 0);
608 	LASSERT(!msg->msg_routing);
609 	LASSERT(md != NULL);
610 	LASSERT(msg->msg_niov == 0);
611 	LASSERT(msg->msg_iov == NULL);
612 	LASSERT(msg->msg_kiov == NULL);
613 
614 	msg->msg_niov = md->md_niov;
615 	if ((md->md_options & LNET_MD_KIOV) != 0)
616 		msg->msg_kiov = md->md_iov.kiov;
617 	else
618 		msg->msg_iov = md->md_iov.iov;
619 }
620 
621 void
lnet_prep_send(lnet_msg_t * msg,int type,lnet_process_id_t target,unsigned int offset,unsigned int len)622 lnet_prep_send(lnet_msg_t *msg, int type, lnet_process_id_t target,
623 	       unsigned int offset, unsigned int len)
624 {
625 	msg->msg_type = type;
626 	msg->msg_target = target;
627 	msg->msg_len = len;
628 	msg->msg_offset = offset;
629 
630 	if (len != 0)
631 		lnet_setpayloadbuffer(msg);
632 
633 	memset(&msg->msg_hdr, 0, sizeof(msg->msg_hdr));
634 	msg->msg_hdr.type	   = cpu_to_le32(type);
635 	msg->msg_hdr.dest_nid       = cpu_to_le64(target.nid);
636 	msg->msg_hdr.dest_pid       = cpu_to_le32(target.pid);
637 	/* src_nid will be set later */
638 	msg->msg_hdr.src_pid	= cpu_to_le32(the_lnet.ln_pid);
639 	msg->msg_hdr.payload_length = cpu_to_le32(len);
640 }
641 
642 void
lnet_ni_send(lnet_ni_t * ni,lnet_msg_t * msg)643 lnet_ni_send(lnet_ni_t *ni, lnet_msg_t *msg)
644 {
645 	void   *priv = msg->msg_private;
646 	int     rc;
647 
648 	LASSERT(!in_interrupt());
649 	LASSERT(LNET_NETTYP(LNET_NIDNET(ni->ni_nid)) == LOLND ||
650 		 (msg->msg_txcredit && msg->msg_peertxcredit));
651 
652 	rc = (ni->ni_lnd->lnd_send)(ni, priv, msg);
653 	if (rc < 0)
654 		lnet_finalize(ni, msg, rc);
655 }
656 
657 int
lnet_ni_eager_recv(lnet_ni_t * ni,lnet_msg_t * msg)658 lnet_ni_eager_recv(lnet_ni_t *ni, lnet_msg_t *msg)
659 {
660 	int	rc;
661 
662 	LASSERT(!msg->msg_sending);
663 	LASSERT(msg->msg_receiving);
664 	LASSERT(!msg->msg_rx_ready_delay);
665 	LASSERT(ni->ni_lnd->lnd_eager_recv != NULL);
666 
667 	msg->msg_rx_ready_delay = 1;
668 	rc = (ni->ni_lnd->lnd_eager_recv)(ni, msg->msg_private, msg,
669 					  &msg->msg_private);
670 	if (rc != 0) {
671 		CERROR("recv from %s / send to %s aborted: "
672 		       "eager_recv failed %d\n",
673 		       libcfs_nid2str(msg->msg_rxpeer->lp_nid),
674 		       libcfs_id2str(msg->msg_target), rc);
675 		LASSERT(rc < 0); /* required by my callers */
676 	}
677 
678 	return rc;
679 }
680 
681 /* NB: caller shall hold a ref on 'lp' as I'd drop lnet_net_lock */
682 void
lnet_ni_query_locked(lnet_ni_t * ni,lnet_peer_t * lp)683 lnet_ni_query_locked(lnet_ni_t *ni, lnet_peer_t *lp)
684 {
685 	unsigned long last_alive = 0;
686 
687 	LASSERT(lnet_peer_aliveness_enabled(lp));
688 	LASSERT(ni->ni_lnd->lnd_query != NULL);
689 
690 	lnet_net_unlock(lp->lp_cpt);
691 	(ni->ni_lnd->lnd_query)(ni, lp->lp_nid, &last_alive);
692 	lnet_net_lock(lp->lp_cpt);
693 
694 	lp->lp_last_query = cfs_time_current();
695 
696 	if (last_alive != 0) /* NI has updated timestamp */
697 		lp->lp_last_alive = last_alive;
698 }
699 
700 /* NB: always called with lnet_net_lock held */
701 static inline int
lnet_peer_is_alive(lnet_peer_t * lp,unsigned long now)702 lnet_peer_is_alive(lnet_peer_t *lp, unsigned long now)
703 {
704 	int	alive;
705 	unsigned long deadline;
706 
707 	LASSERT(lnet_peer_aliveness_enabled(lp));
708 
709 	/* Trust lnet_notify() if it has more recent aliveness news, but
710 	 * ignore the initial assumed death (see lnet_peers_start_down()).
711 	 */
712 	if (!lp->lp_alive && lp->lp_alive_count > 0 &&
713 	    cfs_time_aftereq(lp->lp_timestamp, lp->lp_last_alive))
714 		return 0;
715 
716 	deadline = cfs_time_add(lp->lp_last_alive,
717 				cfs_time_seconds(lp->lp_ni->ni_peertimeout));
718 	alive = cfs_time_after(deadline, now);
719 
720 	/* Update obsolete lp_alive except for routers assumed to be dead
721 	 * initially, because router checker would update aliveness in this
722 	 * case, and moreover lp_last_alive at peer creation is assumed.
723 	 */
724 	if (alive && !lp->lp_alive &&
725 	    !(lnet_isrouter(lp) && lp->lp_alive_count == 0))
726 		lnet_notify_locked(lp, 0, 1, lp->lp_last_alive);
727 
728 	return alive;
729 }
730 
731 
732 /* NB: returns 1 when alive, 0 when dead, negative when error;
733  *     may drop the lnet_net_lock */
734 int
lnet_peer_alive_locked(lnet_peer_t * lp)735 lnet_peer_alive_locked(lnet_peer_t *lp)
736 {
737 	unsigned long now = cfs_time_current();
738 
739 	if (!lnet_peer_aliveness_enabled(lp))
740 		return -ENODEV;
741 
742 	if (lnet_peer_is_alive(lp, now))
743 		return 1;
744 
745 	/* Peer appears dead, but we should avoid frequent NI queries (at
746 	 * most once per lnet_queryinterval seconds). */
747 	if (lp->lp_last_query != 0) {
748 		static const int lnet_queryinterval = 1;
749 
750 		unsigned long next_query =
751 			   cfs_time_add(lp->lp_last_query,
752 					cfs_time_seconds(lnet_queryinterval));
753 
754 		if (time_before(now, next_query)) {
755 			if (lp->lp_alive)
756 				CWARN("Unexpected aliveness of peer %s: "
757 				      "%d < %d (%d/%d)\n",
758 				      libcfs_nid2str(lp->lp_nid),
759 				      (int)now, (int)next_query,
760 				      lnet_queryinterval,
761 				      lp->lp_ni->ni_peertimeout);
762 			return 0;
763 		}
764 	}
765 
766 	/* query NI for latest aliveness news */
767 	lnet_ni_query_locked(lp->lp_ni, lp);
768 
769 	if (lnet_peer_is_alive(lp, now))
770 		return 1;
771 
772 	lnet_notify_locked(lp, 0, 0, lp->lp_last_alive);
773 	return 0;
774 }
775 
776 /**
777  * \param msg The message to be sent.
778  * \param do_send True if lnet_ni_send() should be called in this function.
779  *	  lnet_send() is going to lnet_net_unlock immediately after this, so
780  *	  it sets do_send FALSE and I don't do the unlock/send/lock bit.
781  *
782  * \retval 0 If \a msg sent or OK to send.
783  * \retval EAGAIN If \a msg blocked for credit.
784  * \retval EHOSTUNREACH If the next hop of the message appears dead.
785  * \retval ECANCELED If the MD of the message has been unlinked.
786  */
787 static int
lnet_post_send_locked(lnet_msg_t * msg,int do_send)788 lnet_post_send_locked(lnet_msg_t *msg, int do_send)
789 {
790 	lnet_peer_t		*lp = msg->msg_txpeer;
791 	lnet_ni_t		*ni = lp->lp_ni;
792 	int			cpt = msg->msg_tx_cpt;
793 	struct lnet_tx_queue	*tq = ni->ni_tx_queues[cpt];
794 
795 	/* non-lnet_send() callers have checked before */
796 	LASSERT(!do_send || msg->msg_tx_delayed);
797 	LASSERT(!msg->msg_receiving);
798 	LASSERT(msg->msg_tx_committed);
799 
800 	/* NB 'lp' is always the next hop */
801 	if ((msg->msg_target.pid & LNET_PID_USERFLAG) == 0 &&
802 	    lnet_peer_alive_locked(lp) == 0) {
803 		the_lnet.ln_counters[cpt]->drop_count++;
804 		the_lnet.ln_counters[cpt]->drop_length += msg->msg_len;
805 		lnet_net_unlock(cpt);
806 
807 		CNETERR("Dropping message for %s: peer not alive\n",
808 			libcfs_id2str(msg->msg_target));
809 		if (do_send)
810 			lnet_finalize(ni, msg, -EHOSTUNREACH);
811 
812 		lnet_net_lock(cpt);
813 		return EHOSTUNREACH;
814 	}
815 
816 	if (msg->msg_md != NULL &&
817 	    (msg->msg_md->md_flags & LNET_MD_FLAG_ABORTED) != 0) {
818 		lnet_net_unlock(cpt);
819 
820 		CNETERR("Aborting message for %s: LNetM[DE]Unlink() already "
821 			"called on the MD/ME.\n",
822 			libcfs_id2str(msg->msg_target));
823 		if (do_send)
824 			lnet_finalize(ni, msg, -ECANCELED);
825 
826 		lnet_net_lock(cpt);
827 		return ECANCELED;
828 	}
829 
830 	if (!msg->msg_peertxcredit) {
831 		LASSERT((lp->lp_txcredits < 0) ==
832 			 !list_empty(&lp->lp_txq));
833 
834 		msg->msg_peertxcredit = 1;
835 		lp->lp_txqnob += msg->msg_len + sizeof(lnet_hdr_t);
836 		lp->lp_txcredits--;
837 
838 		if (lp->lp_txcredits < lp->lp_mintxcredits)
839 			lp->lp_mintxcredits = lp->lp_txcredits;
840 
841 		if (lp->lp_txcredits < 0) {
842 			msg->msg_tx_delayed = 1;
843 			list_add_tail(&msg->msg_list, &lp->lp_txq);
844 			return EAGAIN;
845 		}
846 	}
847 
848 	if (!msg->msg_txcredit) {
849 		LASSERT((tq->tq_credits < 0) ==
850 			!list_empty(&tq->tq_delayed));
851 
852 		msg->msg_txcredit = 1;
853 		tq->tq_credits--;
854 
855 		if (tq->tq_credits < tq->tq_credits_min)
856 			tq->tq_credits_min = tq->tq_credits;
857 
858 		if (tq->tq_credits < 0) {
859 			msg->msg_tx_delayed = 1;
860 			list_add_tail(&msg->msg_list, &tq->tq_delayed);
861 			return EAGAIN;
862 		}
863 	}
864 
865 	if (do_send) {
866 		lnet_net_unlock(cpt);
867 		lnet_ni_send(ni, msg);
868 		lnet_net_lock(cpt);
869 	}
870 	return 0;
871 }
872 
873 
874 lnet_rtrbufpool_t *
lnet_msg2bufpool(lnet_msg_t * msg)875 lnet_msg2bufpool(lnet_msg_t *msg)
876 {
877 	lnet_rtrbufpool_t	*rbp;
878 	int			cpt;
879 
880 	LASSERT(msg->msg_rx_committed);
881 
882 	cpt = msg->msg_rx_cpt;
883 	rbp = &the_lnet.ln_rtrpools[cpt][0];
884 
885 	LASSERT(msg->msg_len <= LNET_MTU);
886 	while (msg->msg_len > (unsigned int)rbp->rbp_npages * PAGE_CACHE_SIZE) {
887 		rbp++;
888 		LASSERT(rbp < &the_lnet.ln_rtrpools[cpt][LNET_NRBPOOLS]);
889 	}
890 
891 	return rbp;
892 }
893 
894 int
lnet_post_routed_recv_locked(lnet_msg_t * msg,int do_recv)895 lnet_post_routed_recv_locked(lnet_msg_t *msg, int do_recv)
896 {
897 	/* lnet_parse is going to lnet_net_unlock immediately after this, so it
898 	 * sets do_recv FALSE and I don't do the unlock/send/lock bit.  I
899 	 * return EAGAIN if msg blocked and 0 if received or OK to receive */
900 	lnet_peer_t	 *lp = msg->msg_rxpeer;
901 	lnet_rtrbufpool_t   *rbp;
902 	lnet_rtrbuf_t       *rb;
903 
904 	LASSERT(msg->msg_iov == NULL);
905 	LASSERT(msg->msg_kiov == NULL);
906 	LASSERT(msg->msg_niov == 0);
907 	LASSERT(msg->msg_routing);
908 	LASSERT(msg->msg_receiving);
909 	LASSERT(!msg->msg_sending);
910 
911 	/* non-lnet_parse callers only receive delayed messages */
912 	LASSERT(!do_recv || msg->msg_rx_delayed);
913 
914 	if (!msg->msg_peerrtrcredit) {
915 		LASSERT((lp->lp_rtrcredits < 0) ==
916 			 !list_empty(&lp->lp_rtrq));
917 
918 		msg->msg_peerrtrcredit = 1;
919 		lp->lp_rtrcredits--;
920 		if (lp->lp_rtrcredits < lp->lp_minrtrcredits)
921 			lp->lp_minrtrcredits = lp->lp_rtrcredits;
922 
923 		if (lp->lp_rtrcredits < 0) {
924 			/* must have checked eager_recv before here */
925 			LASSERT(msg->msg_rx_ready_delay);
926 			msg->msg_rx_delayed = 1;
927 			list_add_tail(&msg->msg_list, &lp->lp_rtrq);
928 			return EAGAIN;
929 		}
930 	}
931 
932 	rbp = lnet_msg2bufpool(msg);
933 
934 	if (!msg->msg_rtrcredit) {
935 		LASSERT((rbp->rbp_credits < 0) ==
936 			 !list_empty(&rbp->rbp_msgs));
937 
938 		msg->msg_rtrcredit = 1;
939 		rbp->rbp_credits--;
940 		if (rbp->rbp_credits < rbp->rbp_mincredits)
941 			rbp->rbp_mincredits = rbp->rbp_credits;
942 
943 		if (rbp->rbp_credits < 0) {
944 			/* must have checked eager_recv before here */
945 			LASSERT(msg->msg_rx_ready_delay);
946 			msg->msg_rx_delayed = 1;
947 			list_add_tail(&msg->msg_list, &rbp->rbp_msgs);
948 			return EAGAIN;
949 		}
950 	}
951 
952 	LASSERT(!list_empty(&rbp->rbp_bufs));
953 	rb = list_entry(rbp->rbp_bufs.next, lnet_rtrbuf_t, rb_list);
954 	list_del(&rb->rb_list);
955 
956 	msg->msg_niov = rbp->rbp_npages;
957 	msg->msg_kiov = &rb->rb_kiov[0];
958 
959 	if (do_recv) {
960 		int cpt = msg->msg_rx_cpt;
961 
962 		lnet_net_unlock(cpt);
963 		lnet_ni_recv(lp->lp_ni, msg->msg_private, msg, 1,
964 			     0, msg->msg_len, msg->msg_len);
965 		lnet_net_lock(cpt);
966 	}
967 	return 0;
968 }
969 
970 void
lnet_return_tx_credits_locked(lnet_msg_t * msg)971 lnet_return_tx_credits_locked(lnet_msg_t *msg)
972 {
973 	lnet_peer_t	*txpeer = msg->msg_txpeer;
974 	lnet_msg_t	*msg2;
975 
976 	if (msg->msg_txcredit) {
977 		struct lnet_ni	     *ni = txpeer->lp_ni;
978 		struct lnet_tx_queue *tq = ni->ni_tx_queues[msg->msg_tx_cpt];
979 
980 		/* give back NI txcredits */
981 		msg->msg_txcredit = 0;
982 
983 		LASSERT((tq->tq_credits < 0) ==
984 			!list_empty(&tq->tq_delayed));
985 
986 		tq->tq_credits++;
987 		if (tq->tq_credits <= 0) {
988 			msg2 = list_entry(tq->tq_delayed.next,
989 					      lnet_msg_t, msg_list);
990 			list_del(&msg2->msg_list);
991 
992 			LASSERT(msg2->msg_txpeer->lp_ni == ni);
993 			LASSERT(msg2->msg_tx_delayed);
994 
995 			(void) lnet_post_send_locked(msg2, 1);
996 		}
997 	}
998 
999 	if (msg->msg_peertxcredit) {
1000 		/* give back peer txcredits */
1001 		msg->msg_peertxcredit = 0;
1002 
1003 		LASSERT((txpeer->lp_txcredits < 0) ==
1004 			!list_empty(&txpeer->lp_txq));
1005 
1006 		txpeer->lp_txqnob -= msg->msg_len + sizeof(lnet_hdr_t);
1007 		LASSERT(txpeer->lp_txqnob >= 0);
1008 
1009 		txpeer->lp_txcredits++;
1010 		if (txpeer->lp_txcredits <= 0) {
1011 			msg2 = list_entry(txpeer->lp_txq.next,
1012 					      lnet_msg_t, msg_list);
1013 			list_del(&msg2->msg_list);
1014 
1015 			LASSERT(msg2->msg_txpeer == txpeer);
1016 			LASSERT(msg2->msg_tx_delayed);
1017 
1018 			(void) lnet_post_send_locked(msg2, 1);
1019 		}
1020 	}
1021 
1022 	if (txpeer != NULL) {
1023 		msg->msg_txpeer = NULL;
1024 		lnet_peer_decref_locked(txpeer);
1025 	}
1026 }
1027 
1028 void
lnet_return_rx_credits_locked(lnet_msg_t * msg)1029 lnet_return_rx_credits_locked(lnet_msg_t *msg)
1030 {
1031 	lnet_peer_t	*rxpeer = msg->msg_rxpeer;
1032 	lnet_msg_t	*msg2;
1033 
1034 	if (msg->msg_rtrcredit) {
1035 		/* give back global router credits */
1036 		lnet_rtrbuf_t     *rb;
1037 		lnet_rtrbufpool_t *rbp;
1038 
1039 		/* NB If a msg ever blocks for a buffer in rbp_msgs, it stays
1040 		 * there until it gets one allocated, or aborts the wait
1041 		 * itself */
1042 		LASSERT(msg->msg_kiov != NULL);
1043 
1044 		rb = list_entry(msg->msg_kiov, lnet_rtrbuf_t, rb_kiov[0]);
1045 		rbp = rb->rb_pool;
1046 		LASSERT(rbp == lnet_msg2bufpool(msg));
1047 
1048 		msg->msg_kiov = NULL;
1049 		msg->msg_rtrcredit = 0;
1050 
1051 		LASSERT((rbp->rbp_credits < 0) ==
1052 			!list_empty(&rbp->rbp_msgs));
1053 		LASSERT((rbp->rbp_credits > 0) ==
1054 			!list_empty(&rbp->rbp_bufs));
1055 
1056 		list_add(&rb->rb_list, &rbp->rbp_bufs);
1057 		rbp->rbp_credits++;
1058 		if (rbp->rbp_credits <= 0) {
1059 			msg2 = list_entry(rbp->rbp_msgs.next,
1060 					      lnet_msg_t, msg_list);
1061 			list_del(&msg2->msg_list);
1062 
1063 			(void) lnet_post_routed_recv_locked(msg2, 1);
1064 		}
1065 	}
1066 
1067 	if (msg->msg_peerrtrcredit) {
1068 		/* give back peer router credits */
1069 		msg->msg_peerrtrcredit = 0;
1070 
1071 		LASSERT((rxpeer->lp_rtrcredits < 0) ==
1072 			!list_empty(&rxpeer->lp_rtrq));
1073 
1074 		rxpeer->lp_rtrcredits++;
1075 		if (rxpeer->lp_rtrcredits <= 0) {
1076 			msg2 = list_entry(rxpeer->lp_rtrq.next,
1077 					      lnet_msg_t, msg_list);
1078 			list_del(&msg2->msg_list);
1079 
1080 			(void) lnet_post_routed_recv_locked(msg2, 1);
1081 		}
1082 	}
1083 	if (rxpeer != NULL) {
1084 		msg->msg_rxpeer = NULL;
1085 		lnet_peer_decref_locked(rxpeer);
1086 	}
1087 }
1088 
1089 static int
lnet_compare_routes(lnet_route_t * r1,lnet_route_t * r2)1090 lnet_compare_routes(lnet_route_t *r1, lnet_route_t *r2)
1091 {
1092 	lnet_peer_t *p1 = r1->lr_gateway;
1093 	lnet_peer_t *p2 = r2->lr_gateway;
1094 
1095 	if (r1->lr_priority < r2->lr_priority)
1096 		return 1;
1097 
1098 	if (r1->lr_priority > r2->lr_priority)
1099 		return -1;
1100 
1101 	if (r1->lr_hops < r2->lr_hops)
1102 		return 1;
1103 
1104 	if (r1->lr_hops > r2->lr_hops)
1105 		return -1;
1106 
1107 	if (p1->lp_txqnob < p2->lp_txqnob)
1108 		return 1;
1109 
1110 	if (p1->lp_txqnob > p2->lp_txqnob)
1111 		return -1;
1112 
1113 	if (p1->lp_txcredits > p2->lp_txcredits)
1114 		return 1;
1115 
1116 	if (p1->lp_txcredits < p2->lp_txcredits)
1117 		return -1;
1118 
1119 	if (r1->lr_seq - r2->lr_seq <= 0)
1120 		return 1;
1121 
1122 	return -1;
1123 }
1124 
1125 static lnet_peer_t *
lnet_find_route_locked(lnet_ni_t * ni,lnet_nid_t target,lnet_nid_t rtr_nid)1126 lnet_find_route_locked(lnet_ni_t *ni, lnet_nid_t target, lnet_nid_t rtr_nid)
1127 {
1128 	lnet_remotenet_t	*rnet;
1129 	lnet_route_t		*rtr;
1130 	lnet_route_t		*rtr_best;
1131 	lnet_route_t		*rtr_last;
1132 	struct lnet_peer	*lp_best;
1133 	struct lnet_peer	*lp;
1134 	int			rc;
1135 
1136 	/* If @rtr_nid is not LNET_NID_ANY, return the gateway with
1137 	 * rtr_nid nid, otherwise find the best gateway I can use */
1138 
1139 	rnet = lnet_find_net_locked(LNET_NIDNET(target));
1140 	if (rnet == NULL)
1141 		return NULL;
1142 
1143 	lp_best = NULL;
1144 	rtr_best = rtr_last = NULL;
1145 	list_for_each_entry(rtr, &rnet->lrn_routes, lr_list) {
1146 		lp = rtr->lr_gateway;
1147 
1148 		if (!lp->lp_alive || /* gateway is down */
1149 		    ((lp->lp_ping_feats & LNET_PING_FEAT_NI_STATUS) != 0 &&
1150 		     rtr->lr_downis != 0)) /* NI to target is down */
1151 			continue;
1152 
1153 		if (ni != NULL && lp->lp_ni != ni)
1154 			continue;
1155 
1156 		if (lp->lp_nid == rtr_nid) /* it's pre-determined router */
1157 			return lp;
1158 
1159 		if (lp_best == NULL) {
1160 			rtr_best = rtr_last = rtr;
1161 			lp_best = lp;
1162 			continue;
1163 		}
1164 
1165 		/* no protection on below fields, but it's harmless */
1166 		if (rtr_last->lr_seq - rtr->lr_seq < 0)
1167 			rtr_last = rtr;
1168 
1169 		rc = lnet_compare_routes(rtr, rtr_best);
1170 		if (rc < 0)
1171 			continue;
1172 
1173 		rtr_best = rtr;
1174 		lp_best = lp;
1175 	}
1176 
1177 	/* set sequence number on the best router to the latest sequence + 1
1178 	 * so we can round-robin all routers, it's race and inaccurate but
1179 	 * harmless and functional  */
1180 	if (rtr_best != NULL)
1181 		rtr_best->lr_seq = rtr_last->lr_seq + 1;
1182 	return lp_best;
1183 }
1184 
1185 int
lnet_send(lnet_nid_t src_nid,lnet_msg_t * msg,lnet_nid_t rtr_nid)1186 lnet_send(lnet_nid_t src_nid, lnet_msg_t *msg, lnet_nid_t rtr_nid)
1187 {
1188 	lnet_nid_t		dst_nid = msg->msg_target.nid;
1189 	struct lnet_ni		*src_ni;
1190 	struct lnet_ni		*local_ni;
1191 	struct lnet_peer	*lp;
1192 	int			cpt;
1193 	int			cpt2;
1194 	int			rc;
1195 
1196 	/* NB: rtr_nid is set to LNET_NID_ANY for all current use-cases,
1197 	 * but we might want to use pre-determined router for ACK/REPLY
1198 	 * in the future */
1199 	/* NB: ni != NULL == interface pre-determined (ACK/REPLY) */
1200 	LASSERT(msg->msg_txpeer == NULL);
1201 	LASSERT(!msg->msg_sending);
1202 	LASSERT(!msg->msg_target_is_router);
1203 	LASSERT(!msg->msg_receiving);
1204 
1205 	msg->msg_sending = 1;
1206 
1207 	LASSERT(!msg->msg_tx_committed);
1208 	cpt = lnet_cpt_of_nid(rtr_nid == LNET_NID_ANY ? dst_nid : rtr_nid);
1209  again:
1210 	lnet_net_lock(cpt);
1211 
1212 	if (the_lnet.ln_shutdown) {
1213 		lnet_net_unlock(cpt);
1214 		return -ESHUTDOWN;
1215 	}
1216 
1217 	if (src_nid == LNET_NID_ANY) {
1218 		src_ni = NULL;
1219 	} else {
1220 		src_ni = lnet_nid2ni_locked(src_nid, cpt);
1221 		if (src_ni == NULL) {
1222 			lnet_net_unlock(cpt);
1223 			LCONSOLE_WARN("Can't send to %s: src %s is not a "
1224 				      "local nid\n", libcfs_nid2str(dst_nid),
1225 				      libcfs_nid2str(src_nid));
1226 			return -EINVAL;
1227 		}
1228 		LASSERT(!msg->msg_routing);
1229 	}
1230 
1231 	/* Is this for someone on a local network? */
1232 	local_ni = lnet_net2ni_locked(LNET_NIDNET(dst_nid), cpt);
1233 
1234 	if (local_ni != NULL) {
1235 		if (src_ni == NULL) {
1236 			src_ni = local_ni;
1237 			src_nid = src_ni->ni_nid;
1238 		} else if (src_ni == local_ni) {
1239 			lnet_ni_decref_locked(local_ni, cpt);
1240 		} else {
1241 			lnet_ni_decref_locked(local_ni, cpt);
1242 			lnet_ni_decref_locked(src_ni, cpt);
1243 			lnet_net_unlock(cpt);
1244 			LCONSOLE_WARN("No route to %s via from %s\n",
1245 				      libcfs_nid2str(dst_nid),
1246 				      libcfs_nid2str(src_nid));
1247 			return -EINVAL;
1248 		}
1249 
1250 		LASSERT(src_nid != LNET_NID_ANY);
1251 		lnet_msg_commit(msg, cpt);
1252 
1253 		if (!msg->msg_routing)
1254 			msg->msg_hdr.src_nid = cpu_to_le64(src_nid);
1255 
1256 		if (src_ni == the_lnet.ln_loni) {
1257 			/* No send credit hassles with LOLND */
1258 			lnet_net_unlock(cpt);
1259 			lnet_ni_send(src_ni, msg);
1260 
1261 			lnet_net_lock(cpt);
1262 			lnet_ni_decref_locked(src_ni, cpt);
1263 			lnet_net_unlock(cpt);
1264 			return 0;
1265 		}
1266 
1267 		rc = lnet_nid2peer_locked(&lp, dst_nid, cpt);
1268 		/* lp has ref on src_ni; lose mine */
1269 		lnet_ni_decref_locked(src_ni, cpt);
1270 		if (rc != 0) {
1271 			lnet_net_unlock(cpt);
1272 			LCONSOLE_WARN("Error %d finding peer %s\n", rc,
1273 				      libcfs_nid2str(dst_nid));
1274 			/* ENOMEM or shutting down */
1275 			return rc;
1276 		}
1277 		LASSERT(lp->lp_ni == src_ni);
1278 	} else {
1279 		/* sending to a remote network */
1280 		lp = lnet_find_route_locked(src_ni, dst_nid, rtr_nid);
1281 		if (lp == NULL) {
1282 			if (src_ni != NULL)
1283 				lnet_ni_decref_locked(src_ni, cpt);
1284 			lnet_net_unlock(cpt);
1285 
1286 			LCONSOLE_WARN("No route to %s via %s "
1287 				      "(all routers down)\n",
1288 				      libcfs_id2str(msg->msg_target),
1289 				      libcfs_nid2str(src_nid));
1290 			return -EHOSTUNREACH;
1291 		}
1292 
1293 		/* rtr_nid is LNET_NID_ANY or NID of pre-determined router,
1294 		 * it's possible that rtr_nid isn't LNET_NID_ANY and lp isn't
1295 		 * pre-determined router, this can happen if router table
1296 		 * was changed when we release the lock */
1297 		if (rtr_nid != lp->lp_nid) {
1298 			cpt2 = lnet_cpt_of_nid_locked(lp->lp_nid);
1299 			if (cpt2 != cpt) {
1300 				if (src_ni != NULL)
1301 					lnet_ni_decref_locked(src_ni, cpt);
1302 				lnet_net_unlock(cpt);
1303 
1304 				rtr_nid = lp->lp_nid;
1305 				cpt = cpt2;
1306 				goto again;
1307 			}
1308 		}
1309 
1310 		CDEBUG(D_NET, "Best route to %s via %s for %s %d\n",
1311 		       libcfs_nid2str(dst_nid), libcfs_nid2str(lp->lp_nid),
1312 		       lnet_msgtyp2str(msg->msg_type), msg->msg_len);
1313 
1314 		if (src_ni == NULL) {
1315 			src_ni = lp->lp_ni;
1316 			src_nid = src_ni->ni_nid;
1317 		} else {
1318 			LASSERT(src_ni == lp->lp_ni);
1319 			lnet_ni_decref_locked(src_ni, cpt);
1320 		}
1321 
1322 		lnet_peer_addref_locked(lp);
1323 
1324 		LASSERT(src_nid != LNET_NID_ANY);
1325 		lnet_msg_commit(msg, cpt);
1326 
1327 		if (!msg->msg_routing) {
1328 			/* I'm the source and now I know which NI to send on */
1329 			msg->msg_hdr.src_nid = cpu_to_le64(src_nid);
1330 		}
1331 
1332 		msg->msg_target_is_router = 1;
1333 		msg->msg_target.nid = lp->lp_nid;
1334 		msg->msg_target.pid = LUSTRE_SRV_LNET_PID;
1335 	}
1336 
1337 	/* 'lp' is our best choice of peer */
1338 
1339 	LASSERT(!msg->msg_peertxcredit);
1340 	LASSERT(!msg->msg_txcredit);
1341 	LASSERT(msg->msg_txpeer == NULL);
1342 
1343 	msg->msg_txpeer = lp;		   /* msg takes my ref on lp */
1344 
1345 	rc = lnet_post_send_locked(msg, 0);
1346 	lnet_net_unlock(cpt);
1347 
1348 	if (rc == EHOSTUNREACH || rc == ECANCELED)
1349 		return -rc;
1350 
1351 	if (rc == 0)
1352 		lnet_ni_send(src_ni, msg);
1353 
1354 	return 0; /* rc == 0 or EAGAIN */
1355 }
1356 
1357 static void
lnet_drop_message(lnet_ni_t * ni,int cpt,void * private,unsigned int nob)1358 lnet_drop_message(lnet_ni_t *ni, int cpt, void *private, unsigned int nob)
1359 {
1360 	lnet_net_lock(cpt);
1361 	the_lnet.ln_counters[cpt]->drop_count++;
1362 	the_lnet.ln_counters[cpt]->drop_length += nob;
1363 	lnet_net_unlock(cpt);
1364 
1365 	lnet_ni_recv(ni, private, NULL, 0, 0, 0, nob);
1366 }
1367 
1368 static void
lnet_recv_put(lnet_ni_t * ni,lnet_msg_t * msg)1369 lnet_recv_put(lnet_ni_t *ni, lnet_msg_t *msg)
1370 {
1371 	lnet_hdr_t	*hdr = &msg->msg_hdr;
1372 
1373 	if (msg->msg_wanted != 0)
1374 		lnet_setpayloadbuffer(msg);
1375 
1376 	lnet_build_msg_event(msg, LNET_EVENT_PUT);
1377 
1378 	/* Must I ACK?  If so I'll grab the ack_wmd out of the header and put
1379 	 * it back into the ACK during lnet_finalize() */
1380 	msg->msg_ack = (!lnet_is_wire_handle_none(&hdr->msg.put.ack_wmd) &&
1381 			(msg->msg_md->md_options & LNET_MD_ACK_DISABLE) == 0);
1382 
1383 	lnet_ni_recv(ni, msg->msg_private, msg, msg->msg_rx_delayed,
1384 		     msg->msg_offset, msg->msg_wanted, hdr->payload_length);
1385 }
1386 
1387 static int
lnet_parse_put(lnet_ni_t * ni,lnet_msg_t * msg)1388 lnet_parse_put(lnet_ni_t *ni, lnet_msg_t *msg)
1389 {
1390 	lnet_hdr_t		*hdr = &msg->msg_hdr;
1391 	struct lnet_match_info	info;
1392 	int			rc;
1393 
1394 	/* Convert put fields to host byte order */
1395 	hdr->msg.put.match_bits	= le64_to_cpu(hdr->msg.put.match_bits);
1396 	hdr->msg.put.ptl_index	= le32_to_cpu(hdr->msg.put.ptl_index);
1397 	hdr->msg.put.offset	= le32_to_cpu(hdr->msg.put.offset);
1398 
1399 	info.mi_id.nid	= hdr->src_nid;
1400 	info.mi_id.pid	= hdr->src_pid;
1401 	info.mi_opc	= LNET_MD_OP_PUT;
1402 	info.mi_portal	= hdr->msg.put.ptl_index;
1403 	info.mi_rlength	= hdr->payload_length;
1404 	info.mi_roffset	= hdr->msg.put.offset;
1405 	info.mi_mbits	= hdr->msg.put.match_bits;
1406 
1407 	msg->msg_rx_ready_delay = ni->ni_lnd->lnd_eager_recv == NULL;
1408 
1409  again:
1410 	rc = lnet_ptl_match_md(&info, msg);
1411 	switch (rc) {
1412 	default:
1413 		LBUG();
1414 
1415 	case LNET_MATCHMD_OK:
1416 		lnet_recv_put(ni, msg);
1417 		return 0;
1418 
1419 	case LNET_MATCHMD_NONE:
1420 		if (msg->msg_rx_delayed) /* attached on delayed list */
1421 			return 0;
1422 
1423 		rc = lnet_ni_eager_recv(ni, msg);
1424 		if (rc == 0)
1425 			goto again;
1426 		/* fall through */
1427 
1428 	case LNET_MATCHMD_DROP:
1429 		CNETERR("Dropping PUT from %s portal %d match %llu offset %d length %d: %d\n",
1430 			libcfs_id2str(info.mi_id), info.mi_portal,
1431 			info.mi_mbits, info.mi_roffset, info.mi_rlength, rc);
1432 
1433 		return ENOENT;	/* +ve: OK but no match */
1434 	}
1435 }
1436 
1437 static int
lnet_parse_get(lnet_ni_t * ni,lnet_msg_t * msg,int rdma_get)1438 lnet_parse_get(lnet_ni_t *ni, lnet_msg_t *msg, int rdma_get)
1439 {
1440 	struct lnet_match_info	info;
1441 	lnet_hdr_t		*hdr = &msg->msg_hdr;
1442 	lnet_handle_wire_t	reply_wmd;
1443 	int			rc;
1444 
1445 	/* Convert get fields to host byte order */
1446 	hdr->msg.get.match_bits	  = le64_to_cpu(hdr->msg.get.match_bits);
1447 	hdr->msg.get.ptl_index	  = le32_to_cpu(hdr->msg.get.ptl_index);
1448 	hdr->msg.get.sink_length  = le32_to_cpu(hdr->msg.get.sink_length);
1449 	hdr->msg.get.src_offset	  = le32_to_cpu(hdr->msg.get.src_offset);
1450 
1451 	info.mi_id.nid	= hdr->src_nid;
1452 	info.mi_id.pid	= hdr->src_pid;
1453 	info.mi_opc	= LNET_MD_OP_GET;
1454 	info.mi_portal	= hdr->msg.get.ptl_index;
1455 	info.mi_rlength	= hdr->msg.get.sink_length;
1456 	info.mi_roffset	= hdr->msg.get.src_offset;
1457 	info.mi_mbits	= hdr->msg.get.match_bits;
1458 
1459 	rc = lnet_ptl_match_md(&info, msg);
1460 	if (rc == LNET_MATCHMD_DROP) {
1461 		CNETERR("Dropping GET from %s portal %d match %llu offset %d length %d\n",
1462 			libcfs_id2str(info.mi_id), info.mi_portal,
1463 			info.mi_mbits, info.mi_roffset, info.mi_rlength);
1464 		return ENOENT;	/* +ve: OK but no match */
1465 	}
1466 
1467 	LASSERT(rc == LNET_MATCHMD_OK);
1468 
1469 	lnet_build_msg_event(msg, LNET_EVENT_GET);
1470 
1471 	reply_wmd = hdr->msg.get.return_wmd;
1472 
1473 	lnet_prep_send(msg, LNET_MSG_REPLY, info.mi_id,
1474 		       msg->msg_offset, msg->msg_wanted);
1475 
1476 	msg->msg_hdr.msg.reply.dst_wmd = reply_wmd;
1477 
1478 	if (rdma_get) {
1479 		/* The LND completes the REPLY from her recv procedure */
1480 		lnet_ni_recv(ni, msg->msg_private, msg, 0,
1481 			     msg->msg_offset, msg->msg_len, msg->msg_len);
1482 		return 0;
1483 	}
1484 
1485 	lnet_ni_recv(ni, msg->msg_private, NULL, 0, 0, 0, 0);
1486 	msg->msg_receiving = 0;
1487 
1488 	rc = lnet_send(ni->ni_nid, msg, LNET_NID_ANY);
1489 	if (rc < 0) {
1490 		/* didn't get as far as lnet_ni_send() */
1491 		CERROR("%s: Unable to send REPLY for GET from %s: %d\n",
1492 		       libcfs_nid2str(ni->ni_nid),
1493 		       libcfs_id2str(info.mi_id), rc);
1494 
1495 		lnet_finalize(ni, msg, rc);
1496 	}
1497 
1498 	return 0;
1499 }
1500 
1501 static int
lnet_parse_reply(lnet_ni_t * ni,lnet_msg_t * msg)1502 lnet_parse_reply(lnet_ni_t *ni, lnet_msg_t *msg)
1503 {
1504 	void	     *private = msg->msg_private;
1505 	lnet_hdr_t       *hdr = &msg->msg_hdr;
1506 	lnet_process_id_t src = {0};
1507 	lnet_libmd_t     *md;
1508 	int	       rlength;
1509 	int	       mlength;
1510 	int			cpt;
1511 
1512 	cpt = lnet_cpt_of_cookie(hdr->msg.reply.dst_wmd.wh_object_cookie);
1513 	lnet_res_lock(cpt);
1514 
1515 	src.nid = hdr->src_nid;
1516 	src.pid = hdr->src_pid;
1517 
1518 	/* NB handles only looked up by creator (no flips) */
1519 	md = lnet_wire_handle2md(&hdr->msg.reply.dst_wmd);
1520 	if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
1521 		CNETERR("%s: Dropping REPLY from %s for %s MD %#llx.%#llx\n",
1522 			libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
1523 			(md == NULL) ? "invalid" : "inactive",
1524 			hdr->msg.reply.dst_wmd.wh_interface_cookie,
1525 			hdr->msg.reply.dst_wmd.wh_object_cookie);
1526 		if (md != NULL && md->md_me != NULL)
1527 			CERROR("REPLY MD also attached to portal %d\n",
1528 			       md->md_me->me_portal);
1529 
1530 		lnet_res_unlock(cpt);
1531 		return ENOENT;		  /* +ve: OK but no match */
1532 	}
1533 
1534 	LASSERT(md->md_offset == 0);
1535 
1536 	rlength = hdr->payload_length;
1537 	mlength = MIN(rlength, (int)md->md_length);
1538 
1539 	if (mlength < rlength &&
1540 	    (md->md_options & LNET_MD_TRUNCATE) == 0) {
1541 		CNETERR("%s: Dropping REPLY from %s length %d for MD %#llx would overflow (%d)\n",
1542 			libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
1543 			rlength, hdr->msg.reply.dst_wmd.wh_object_cookie,
1544 			mlength);
1545 		lnet_res_unlock(cpt);
1546 		return ENOENT;	  /* +ve: OK but no match */
1547 	}
1548 
1549 	CDEBUG(D_NET, "%s: Reply from %s of length %d/%d into md %#llx\n",
1550 	       libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
1551 	       mlength, rlength, hdr->msg.reply.dst_wmd.wh_object_cookie);
1552 
1553 	lnet_msg_attach_md(msg, md, 0, mlength);
1554 
1555 	if (mlength != 0)
1556 		lnet_setpayloadbuffer(msg);
1557 
1558 	lnet_res_unlock(cpt);
1559 
1560 	lnet_build_msg_event(msg, LNET_EVENT_REPLY);
1561 
1562 	lnet_ni_recv(ni, private, msg, 0, 0, mlength, rlength);
1563 	return 0;
1564 }
1565 
1566 static int
lnet_parse_ack(lnet_ni_t * ni,lnet_msg_t * msg)1567 lnet_parse_ack(lnet_ni_t *ni, lnet_msg_t *msg)
1568 {
1569 	lnet_hdr_t       *hdr = &msg->msg_hdr;
1570 	lnet_process_id_t src = {0};
1571 	lnet_libmd_t     *md;
1572 	int			cpt;
1573 
1574 	src.nid = hdr->src_nid;
1575 	src.pid = hdr->src_pid;
1576 
1577 	/* Convert ack fields to host byte order */
1578 	hdr->msg.ack.match_bits = le64_to_cpu(hdr->msg.ack.match_bits);
1579 	hdr->msg.ack.mlength = le32_to_cpu(hdr->msg.ack.mlength);
1580 
1581 	cpt = lnet_cpt_of_cookie(hdr->msg.ack.dst_wmd.wh_object_cookie);
1582 	lnet_res_lock(cpt);
1583 
1584 	/* NB handles only looked up by creator (no flips) */
1585 	md = lnet_wire_handle2md(&hdr->msg.ack.dst_wmd);
1586 	if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
1587 		/* Don't moan; this is expected */
1588 		CDEBUG(D_NET,
1589 		       "%s: Dropping ACK from %s to %s MD %#llx.%#llx\n",
1590 		       libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
1591 		       (md == NULL) ? "invalid" : "inactive",
1592 		       hdr->msg.ack.dst_wmd.wh_interface_cookie,
1593 		       hdr->msg.ack.dst_wmd.wh_object_cookie);
1594 		if (md != NULL && md->md_me != NULL)
1595 			CERROR("Source MD also attached to portal %d\n",
1596 			       md->md_me->me_portal);
1597 
1598 		lnet_res_unlock(cpt);
1599 		return ENOENT;		  /* +ve! */
1600 	}
1601 
1602 	CDEBUG(D_NET, "%s: ACK from %s into md %#llx\n",
1603 	       libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
1604 	       hdr->msg.ack.dst_wmd.wh_object_cookie);
1605 
1606 	lnet_msg_attach_md(msg, md, 0, 0);
1607 
1608 	lnet_res_unlock(cpt);
1609 
1610 	lnet_build_msg_event(msg, LNET_EVENT_ACK);
1611 
1612 	lnet_ni_recv(ni, msg->msg_private, msg, 0, 0, 0, msg->msg_len);
1613 	return 0;
1614 }
1615 
1616 static int
lnet_parse_forward_locked(lnet_ni_t * ni,lnet_msg_t * msg)1617 lnet_parse_forward_locked(lnet_ni_t *ni, lnet_msg_t *msg)
1618 {
1619 	int	rc = 0;
1620 
1621 	if (msg->msg_rxpeer->lp_rtrcredits <= 0 ||
1622 	    lnet_msg2bufpool(msg)->rbp_credits <= 0) {
1623 		if (ni->ni_lnd->lnd_eager_recv == NULL) {
1624 			msg->msg_rx_ready_delay = 1;
1625 		} else {
1626 			lnet_net_unlock(msg->msg_rx_cpt);
1627 			rc = lnet_ni_eager_recv(ni, msg);
1628 			lnet_net_lock(msg->msg_rx_cpt);
1629 		}
1630 	}
1631 
1632 	if (rc == 0)
1633 		rc = lnet_post_routed_recv_locked(msg, 0);
1634 	return rc;
1635 }
1636 
1637 char *
lnet_msgtyp2str(int type)1638 lnet_msgtyp2str(int type)
1639 {
1640 	switch (type) {
1641 	case LNET_MSG_ACK:
1642 		return "ACK";
1643 	case LNET_MSG_PUT:
1644 		return "PUT";
1645 	case LNET_MSG_GET:
1646 		return "GET";
1647 	case LNET_MSG_REPLY:
1648 		return "REPLY";
1649 	case LNET_MSG_HELLO:
1650 		return "HELLO";
1651 	default:
1652 		return "<UNKNOWN>";
1653 	}
1654 }
1655 EXPORT_SYMBOL(lnet_msgtyp2str);
1656 
1657 void
lnet_print_hdr(lnet_hdr_t * hdr)1658 lnet_print_hdr(lnet_hdr_t *hdr)
1659 {
1660 	lnet_process_id_t src = {0};
1661 	lnet_process_id_t dst = {0};
1662 	char *type_str = lnet_msgtyp2str(hdr->type);
1663 
1664 	src.nid = hdr->src_nid;
1665 	src.pid = hdr->src_pid;
1666 
1667 	dst.nid = hdr->dest_nid;
1668 	dst.pid = hdr->dest_pid;
1669 
1670 	CWARN("P3 Header at %p of type %s\n", hdr, type_str);
1671 	CWARN("    From %s\n", libcfs_id2str(src));
1672 	CWARN("    To   %s\n", libcfs_id2str(dst));
1673 
1674 	switch (hdr->type) {
1675 	default:
1676 		break;
1677 
1678 	case LNET_MSG_PUT:
1679 		CWARN("    Ptl index %d, ack md %#llx.%#llx, "
1680 		      "match bits %llu\n",
1681 		      hdr->msg.put.ptl_index,
1682 		      hdr->msg.put.ack_wmd.wh_interface_cookie,
1683 		      hdr->msg.put.ack_wmd.wh_object_cookie,
1684 		      hdr->msg.put.match_bits);
1685 		CWARN("    Length %d, offset %d, hdr data %#llx\n",
1686 		      hdr->payload_length, hdr->msg.put.offset,
1687 		      hdr->msg.put.hdr_data);
1688 		break;
1689 
1690 	case LNET_MSG_GET:
1691 		CWARN("    Ptl index %d, return md %#llx.%#llx, "
1692 		      "match bits %llu\n", hdr->msg.get.ptl_index,
1693 		      hdr->msg.get.return_wmd.wh_interface_cookie,
1694 		      hdr->msg.get.return_wmd.wh_object_cookie,
1695 		      hdr->msg.get.match_bits);
1696 		CWARN("    Length %d, src offset %d\n",
1697 		      hdr->msg.get.sink_length,
1698 		      hdr->msg.get.src_offset);
1699 		break;
1700 
1701 	case LNET_MSG_ACK:
1702 		CWARN("    dst md %#llx.%#llx, "
1703 		      "manipulated length %d\n",
1704 		      hdr->msg.ack.dst_wmd.wh_interface_cookie,
1705 		      hdr->msg.ack.dst_wmd.wh_object_cookie,
1706 		      hdr->msg.ack.mlength);
1707 		break;
1708 
1709 	case LNET_MSG_REPLY:
1710 		CWARN("    dst md %#llx.%#llx, "
1711 		      "length %d\n",
1712 		      hdr->msg.reply.dst_wmd.wh_interface_cookie,
1713 		      hdr->msg.reply.dst_wmd.wh_object_cookie,
1714 		      hdr->payload_length);
1715 	}
1716 
1717 }
1718 
1719 int
lnet_parse(lnet_ni_t * ni,lnet_hdr_t * hdr,lnet_nid_t from_nid,void * private,int rdma_req)1720 lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid,
1721 	   void *private, int rdma_req)
1722 {
1723 	int		rc = 0;
1724 	int		cpt;
1725 	int		for_me;
1726 	struct lnet_msg	*msg;
1727 	lnet_pid_t     dest_pid;
1728 	lnet_nid_t     dest_nid;
1729 	lnet_nid_t     src_nid;
1730 	__u32	  payload_length;
1731 	__u32	  type;
1732 
1733 	LASSERT(!in_interrupt());
1734 
1735 	type = le32_to_cpu(hdr->type);
1736 	src_nid = le64_to_cpu(hdr->src_nid);
1737 	dest_nid = le64_to_cpu(hdr->dest_nid);
1738 	dest_pid = le32_to_cpu(hdr->dest_pid);
1739 	payload_length = le32_to_cpu(hdr->payload_length);
1740 
1741 	for_me = (ni->ni_nid == dest_nid);
1742 	cpt = lnet_cpt_of_nid(from_nid);
1743 
1744 	switch (type) {
1745 	case LNET_MSG_ACK:
1746 	case LNET_MSG_GET:
1747 		if (payload_length > 0) {
1748 			CERROR("%s, src %s: bad %s payload %d (0 expected)\n",
1749 			       libcfs_nid2str(from_nid),
1750 			       libcfs_nid2str(src_nid),
1751 			       lnet_msgtyp2str(type), payload_length);
1752 			return -EPROTO;
1753 		}
1754 		break;
1755 
1756 	case LNET_MSG_PUT:
1757 	case LNET_MSG_REPLY:
1758 		if (payload_length >
1759 		   (__u32)(for_me ? LNET_MAX_PAYLOAD : LNET_MTU)) {
1760 			CERROR("%s, src %s: bad %s payload %d "
1761 			       "(%d max expected)\n",
1762 			       libcfs_nid2str(from_nid),
1763 			       libcfs_nid2str(src_nid),
1764 			       lnet_msgtyp2str(type),
1765 			       payload_length,
1766 			       for_me ? LNET_MAX_PAYLOAD : LNET_MTU);
1767 			return -EPROTO;
1768 		}
1769 		break;
1770 
1771 	default:
1772 		CERROR("%s, src %s: Bad message type 0x%x\n",
1773 		       libcfs_nid2str(from_nid),
1774 		       libcfs_nid2str(src_nid), type);
1775 		return -EPROTO;
1776 	}
1777 
1778 	if (the_lnet.ln_routing &&
1779 	    ni->ni_last_alive != get_seconds()) {
1780 		lnet_ni_lock(ni);
1781 
1782 		/* NB: so far here is the only place to set NI status to "up */
1783 		ni->ni_last_alive = get_seconds();
1784 		if (ni->ni_status != NULL &&
1785 		    ni->ni_status->ns_status == LNET_NI_STATUS_DOWN)
1786 			ni->ni_status->ns_status = LNET_NI_STATUS_UP;
1787 		lnet_ni_unlock(ni);
1788 	}
1789 
1790 	/* Regard a bad destination NID as a protocol error.  Senders should
1791 	 * know what they're doing; if they don't they're misconfigured, buggy
1792 	 * or malicious so we chop them off at the knees :) */
1793 
1794 	if (!for_me) {
1795 		if (LNET_NIDNET(dest_nid) == LNET_NIDNET(ni->ni_nid)) {
1796 			/* should have gone direct */
1797 			CERROR("%s, src %s: Bad dest nid %s "
1798 				"(should have been sent direct)\n",
1799 				libcfs_nid2str(from_nid),
1800 				libcfs_nid2str(src_nid),
1801 				libcfs_nid2str(dest_nid));
1802 			return -EPROTO;
1803 		}
1804 
1805 		if (lnet_islocalnid(dest_nid)) {
1806 			/* dest is another local NI; sender should have used
1807 			 * this node's NID on its own network */
1808 			CERROR("%s, src %s: Bad dest nid %s "
1809 				"(it's my nid but on a different network)\n",
1810 				libcfs_nid2str(from_nid),
1811 				libcfs_nid2str(src_nid),
1812 				libcfs_nid2str(dest_nid));
1813 			return -EPROTO;
1814 		}
1815 
1816 		if (rdma_req && type == LNET_MSG_GET) {
1817 			CERROR("%s, src %s: Bad optimized GET for %s "
1818 				"(final destination must be me)\n",
1819 				libcfs_nid2str(from_nid),
1820 				libcfs_nid2str(src_nid),
1821 				libcfs_nid2str(dest_nid));
1822 			return -EPROTO;
1823 		}
1824 
1825 		if (!the_lnet.ln_routing) {
1826 			CERROR("%s, src %s: Dropping message for %s "
1827 				"(routing not enabled)\n",
1828 				libcfs_nid2str(from_nid),
1829 				libcfs_nid2str(src_nid),
1830 				libcfs_nid2str(dest_nid));
1831 			goto drop;
1832 		}
1833 	}
1834 
1835 	/* Message looks OK; we're not going to return an error, so we MUST
1836 	 * call back lnd_recv() come what may... */
1837 
1838 	if (!list_empty(&the_lnet.ln_test_peers) && /* normally we don't */
1839 	    fail_peer(src_nid, 0)) {	     /* shall we now? */
1840 		CERROR("%s, src %s: Dropping %s to simulate failure\n",
1841 		       libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
1842 		       lnet_msgtyp2str(type));
1843 		goto drop;
1844 	}
1845 
1846 	msg = lnet_msg_alloc();
1847 	if (msg == NULL) {
1848 		CERROR("%s, src %s: Dropping %s (out of memory)\n",
1849 		       libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
1850 		       lnet_msgtyp2str(type));
1851 		goto drop;
1852 	}
1853 
1854 	/* msg zeroed in lnet_msg_alloc;
1855 	 * i.e. flags all clear, pointers NULL etc
1856 	 */
1857 
1858 	msg->msg_type = type;
1859 	msg->msg_private = private;
1860 	msg->msg_receiving = 1;
1861 	msg->msg_len = msg->msg_wanted = payload_length;
1862 	msg->msg_offset = 0;
1863 	msg->msg_hdr = *hdr;
1864 	/* for building message event */
1865 	msg->msg_from = from_nid;
1866 	if (!for_me) {
1867 		msg->msg_target.pid	= dest_pid;
1868 		msg->msg_target.nid	= dest_nid;
1869 		msg->msg_routing	= 1;
1870 
1871 	} else {
1872 		/* convert common msg->hdr fields to host byteorder */
1873 		msg->msg_hdr.type	= type;
1874 		msg->msg_hdr.src_nid	= src_nid;
1875 		msg->msg_hdr.src_pid	= le32_to_cpu(msg->msg_hdr.src_pid);
1876 		msg->msg_hdr.dest_nid	= dest_nid;
1877 		msg->msg_hdr.dest_pid	= dest_pid;
1878 		msg->msg_hdr.payload_length = payload_length;
1879 	}
1880 
1881 	lnet_net_lock(cpt);
1882 	rc = lnet_nid2peer_locked(&msg->msg_rxpeer, from_nid, cpt);
1883 	if (rc != 0) {
1884 		lnet_net_unlock(cpt);
1885 		CERROR("%s, src %s: Dropping %s "
1886 		       "(error %d looking up sender)\n",
1887 		       libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
1888 		       lnet_msgtyp2str(type), rc);
1889 		lnet_msg_free(msg);
1890 		goto drop;
1891 	}
1892 
1893 	lnet_msg_commit(msg, cpt);
1894 
1895 	if (!for_me) {
1896 		rc = lnet_parse_forward_locked(ni, msg);
1897 		lnet_net_unlock(cpt);
1898 
1899 		if (rc < 0)
1900 			goto free_drop;
1901 		if (rc == 0) {
1902 			lnet_ni_recv(ni, msg->msg_private, msg, 0,
1903 				     0, payload_length, payload_length);
1904 		}
1905 		return 0;
1906 	}
1907 
1908 	lnet_net_unlock(cpt);
1909 
1910 	switch (type) {
1911 	case LNET_MSG_ACK:
1912 		rc = lnet_parse_ack(ni, msg);
1913 		break;
1914 	case LNET_MSG_PUT:
1915 		rc = lnet_parse_put(ni, msg);
1916 		break;
1917 	case LNET_MSG_GET:
1918 		rc = lnet_parse_get(ni, msg, rdma_req);
1919 		break;
1920 	case LNET_MSG_REPLY:
1921 		rc = lnet_parse_reply(ni, msg);
1922 		break;
1923 	default:
1924 		LASSERT(0);
1925 		rc = -EPROTO;
1926 		goto free_drop;  /* prevent an unused label if !kernel */
1927 	}
1928 
1929 	if (rc == 0)
1930 		return 0;
1931 
1932 	LASSERT(rc == ENOENT);
1933 
1934  free_drop:
1935 	LASSERT(msg->msg_md == NULL);
1936 	lnet_finalize(ni, msg, rc);
1937 
1938  drop:
1939 	lnet_drop_message(ni, cpt, private, payload_length);
1940 	return 0;
1941 }
1942 EXPORT_SYMBOL(lnet_parse);
1943 
1944 void
lnet_drop_delayed_msg_list(struct list_head * head,char * reason)1945 lnet_drop_delayed_msg_list(struct list_head *head, char *reason)
1946 {
1947 	while (!list_empty(head)) {
1948 		lnet_process_id_t	id = {0};
1949 		lnet_msg_t		*msg;
1950 
1951 		msg = list_entry(head->next, lnet_msg_t, msg_list);
1952 		list_del(&msg->msg_list);
1953 
1954 		id.nid = msg->msg_hdr.src_nid;
1955 		id.pid = msg->msg_hdr.src_pid;
1956 
1957 		LASSERT(msg->msg_md == NULL);
1958 		LASSERT(msg->msg_rx_delayed);
1959 		LASSERT(msg->msg_rxpeer != NULL);
1960 		LASSERT(msg->msg_hdr.type == LNET_MSG_PUT);
1961 
1962 		CWARN("Dropping delayed PUT from %s portal %d match %llu offset %d length %d: %s\n",
1963 		      libcfs_id2str(id),
1964 		      msg->msg_hdr.msg.put.ptl_index,
1965 		      msg->msg_hdr.msg.put.match_bits,
1966 		      msg->msg_hdr.msg.put.offset,
1967 		      msg->msg_hdr.payload_length, reason);
1968 
1969 		/* NB I can't drop msg's ref on msg_rxpeer until after I've
1970 		 * called lnet_drop_message(), so I just hang onto msg as well
1971 		 * until that's done */
1972 
1973 		lnet_drop_message(msg->msg_rxpeer->lp_ni,
1974 				  msg->msg_rxpeer->lp_cpt,
1975 				  msg->msg_private, msg->msg_len);
1976 		/*
1977 		 * NB: message will not generate event because w/o attached MD,
1978 		 * but we still should give error code so lnet_msg_decommit()
1979 		 * can skip counters operations and other checks.
1980 		 */
1981 		lnet_finalize(msg->msg_rxpeer->lp_ni, msg, -ENOENT);
1982 	}
1983 }
1984 
1985 void
lnet_recv_delayed_msg_list(struct list_head * head)1986 lnet_recv_delayed_msg_list(struct list_head *head)
1987 {
1988 	while (!list_empty(head)) {
1989 		lnet_msg_t	  *msg;
1990 		lnet_process_id_t  id;
1991 
1992 		msg = list_entry(head->next, lnet_msg_t, msg_list);
1993 		list_del(&msg->msg_list);
1994 
1995 		/* md won't disappear under me, since each msg
1996 		 * holds a ref on it */
1997 
1998 		id.nid = msg->msg_hdr.src_nid;
1999 		id.pid = msg->msg_hdr.src_pid;
2000 
2001 		LASSERT(msg->msg_rx_delayed);
2002 		LASSERT(msg->msg_md != NULL);
2003 		LASSERT(msg->msg_rxpeer != NULL);
2004 		LASSERT(msg->msg_hdr.type == LNET_MSG_PUT);
2005 
2006 		CDEBUG(D_NET, "Resuming delayed PUT from %s portal %d "
2007 		       "match %llu offset %d length %d.\n",
2008 			libcfs_id2str(id), msg->msg_hdr.msg.put.ptl_index,
2009 			msg->msg_hdr.msg.put.match_bits,
2010 			msg->msg_hdr.msg.put.offset,
2011 			msg->msg_hdr.payload_length);
2012 
2013 		lnet_recv_put(msg->msg_rxpeer->lp_ni, msg);
2014 	}
2015 }
2016 
2017 /**
2018  * Initiate an asynchronous PUT operation.
2019  *
2020  * There are several events associated with a PUT: completion of the send on
2021  * the initiator node (LNET_EVENT_SEND), and when the send completes
2022  * successfully, the receipt of an acknowledgment (LNET_EVENT_ACK) indicating
2023  * that the operation was accepted by the target. The event LNET_EVENT_PUT is
2024  * used at the target node to indicate the completion of incoming data
2025  * delivery.
2026  *
2027  * The local events will be logged in the EQ associated with the MD pointed to
2028  * by \a mdh handle. Using a MD without an associated EQ results in these
2029  * events being discarded. In this case, the caller must have another
2030  * mechanism (e.g., a higher level protocol) for determining when it is safe
2031  * to modify the memory region associated with the MD.
2032  *
2033  * Note that LNet does not guarantee the order of LNET_EVENT_SEND and
2034  * LNET_EVENT_ACK, though intuitively ACK should happen after SEND.
2035  *
2036  * \param self Indicates the NID of a local interface through which to send
2037  * the PUT request. Use LNET_NID_ANY to let LNet choose one by itself.
2038  * \param mdh A handle for the MD that describes the memory to be sent. The MD
2039  * must be "free floating" (See LNetMDBind()).
2040  * \param ack Controls whether an acknowledgment is requested.
2041  * Acknowledgments are only sent when they are requested by the initiating
2042  * process and the target MD enables them.
2043  * \param target A process identifier for the target process.
2044  * \param portal The index in the \a target's portal table.
2045  * \param match_bits The match bits to use for MD selection at the target
2046  * process.
2047  * \param offset The offset into the target MD (only used when the target
2048  * MD has the LNET_MD_MANAGE_REMOTE option set).
2049  * \param hdr_data 64 bits of user data that can be included in the message
2050  * header. This data is written to an event queue entry at the target if an
2051  * EQ is present on the matching MD.
2052  *
2053  * \retval  0      Success, and only in this case events will be generated
2054  * and logged to EQ (if it exists).
2055  * \retval -EIO    Simulated failure.
2056  * \retval -ENOMEM Memory allocation failure.
2057  * \retval -ENOENT Invalid MD object.
2058  *
2059  * \see lnet_event_t::hdr_data and lnet_event_kind_t.
2060  */
2061 int
LNetPut(lnet_nid_t self,lnet_handle_md_t mdh,lnet_ack_req_t ack,lnet_process_id_t target,unsigned int portal,__u64 match_bits,unsigned int offset,__u64 hdr_data)2062 LNetPut(lnet_nid_t self, lnet_handle_md_t mdh, lnet_ack_req_t ack,
2063 	lnet_process_id_t target, unsigned int portal,
2064 	__u64 match_bits, unsigned int offset,
2065 	__u64 hdr_data)
2066 {
2067 	struct lnet_msg		*msg;
2068 	struct lnet_libmd	*md;
2069 	int			cpt;
2070 	int			rc;
2071 
2072 	LASSERT(the_lnet.ln_init);
2073 	LASSERT(the_lnet.ln_refcount > 0);
2074 
2075 	if (!list_empty(&the_lnet.ln_test_peers) && /* normally we don't */
2076 	    fail_peer(target.nid, 1)) { /* shall we now? */
2077 		CERROR("Dropping PUT to %s: simulated failure\n",
2078 		       libcfs_id2str(target));
2079 		return -EIO;
2080 	}
2081 
2082 	msg = lnet_msg_alloc();
2083 	if (msg == NULL) {
2084 		CERROR("Dropping PUT to %s: ENOMEM on lnet_msg_t\n",
2085 		       libcfs_id2str(target));
2086 		return -ENOMEM;
2087 	}
2088 	msg->msg_vmflush = !!memory_pressure_get();
2089 
2090 	cpt = lnet_cpt_of_cookie(mdh.cookie);
2091 	lnet_res_lock(cpt);
2092 
2093 	md = lnet_handle2md(&mdh);
2094 	if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
2095 		CERROR("Dropping PUT (%llu:%d:%s): MD (%d) invalid\n",
2096 		       match_bits, portal, libcfs_id2str(target),
2097 		       md == NULL ? -1 : md->md_threshold);
2098 		if (md != NULL && md->md_me != NULL)
2099 			CERROR("Source MD also attached to portal %d\n",
2100 			       md->md_me->me_portal);
2101 		lnet_res_unlock(cpt);
2102 
2103 		lnet_msg_free(msg);
2104 		return -ENOENT;
2105 	}
2106 
2107 	CDEBUG(D_NET, "LNetPut -> %s\n", libcfs_id2str(target));
2108 
2109 	lnet_msg_attach_md(msg, md, 0, 0);
2110 
2111 	lnet_prep_send(msg, LNET_MSG_PUT, target, 0, md->md_length);
2112 
2113 	msg->msg_hdr.msg.put.match_bits = cpu_to_le64(match_bits);
2114 	msg->msg_hdr.msg.put.ptl_index = cpu_to_le32(portal);
2115 	msg->msg_hdr.msg.put.offset = cpu_to_le32(offset);
2116 	msg->msg_hdr.msg.put.hdr_data = hdr_data;
2117 
2118 	/* NB handles only looked up by creator (no flips) */
2119 	if (ack == LNET_ACK_REQ) {
2120 		msg->msg_hdr.msg.put.ack_wmd.wh_interface_cookie =
2121 			the_lnet.ln_interface_cookie;
2122 		msg->msg_hdr.msg.put.ack_wmd.wh_object_cookie =
2123 			md->md_lh.lh_cookie;
2124 	} else {
2125 		msg->msg_hdr.msg.put.ack_wmd.wh_interface_cookie =
2126 			LNET_WIRE_HANDLE_COOKIE_NONE;
2127 		msg->msg_hdr.msg.put.ack_wmd.wh_object_cookie =
2128 			LNET_WIRE_HANDLE_COOKIE_NONE;
2129 	}
2130 
2131 	lnet_res_unlock(cpt);
2132 
2133 	lnet_build_msg_event(msg, LNET_EVENT_SEND);
2134 
2135 	rc = lnet_send(self, msg, LNET_NID_ANY);
2136 	if (rc != 0) {
2137 		CNETERR("Error sending PUT to %s: %d\n",
2138 		       libcfs_id2str(target), rc);
2139 		lnet_finalize(NULL, msg, rc);
2140 	}
2141 
2142 	/* completion will be signalled by an event */
2143 	return 0;
2144 }
2145 EXPORT_SYMBOL(LNetPut);
2146 
2147 lnet_msg_t *
lnet_create_reply_msg(lnet_ni_t * ni,lnet_msg_t * getmsg)2148 lnet_create_reply_msg(lnet_ni_t *ni, lnet_msg_t *getmsg)
2149 {
2150 	/* The LND can DMA direct to the GET md (i.e. no REPLY msg).  This
2151 	 * returns a msg for the LND to pass to lnet_finalize() when the sink
2152 	 * data has been received.
2153 	 *
2154 	 * CAVEAT EMPTOR: 'getmsg' is the original GET, which is freed when
2155 	 * lnet_finalize() is called on it, so the LND must call this first */
2156 
2157 	struct lnet_msg		*msg = lnet_msg_alloc();
2158 	struct lnet_libmd	*getmd = getmsg->msg_md;
2159 	lnet_process_id_t	peer_id = getmsg->msg_target;
2160 	int			cpt;
2161 
2162 	LASSERT(!getmsg->msg_target_is_router);
2163 	LASSERT(!getmsg->msg_routing);
2164 
2165 	cpt = lnet_cpt_of_cookie(getmd->md_lh.lh_cookie);
2166 	lnet_res_lock(cpt);
2167 
2168 	LASSERT(getmd->md_refcount > 0);
2169 
2170 	if (msg == NULL) {
2171 		CERROR("%s: Dropping REPLY from %s: can't allocate msg\n",
2172 			libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id));
2173 		goto drop;
2174 	}
2175 
2176 	if (getmd->md_threshold == 0) {
2177 		CERROR("%s: Dropping REPLY from %s for inactive MD %p\n",
2178 			libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id),
2179 			getmd);
2180 		lnet_res_unlock(cpt);
2181 		goto drop;
2182 	}
2183 
2184 	LASSERT(getmd->md_offset == 0);
2185 
2186 	CDEBUG(D_NET, "%s: Reply from %s md %p\n",
2187 	       libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id), getmd);
2188 
2189 	/* setup information for lnet_build_msg_event */
2190 	msg->msg_from = peer_id.nid;
2191 	msg->msg_type = LNET_MSG_GET; /* flag this msg as an "optimized" GET */
2192 	msg->msg_hdr.src_nid = peer_id.nid;
2193 	msg->msg_hdr.payload_length = getmd->md_length;
2194 	msg->msg_receiving = 1; /* required by lnet_msg_attach_md */
2195 
2196 	lnet_msg_attach_md(msg, getmd, getmd->md_offset, getmd->md_length);
2197 	lnet_res_unlock(cpt);
2198 
2199 	cpt = lnet_cpt_of_nid(peer_id.nid);
2200 
2201 	lnet_net_lock(cpt);
2202 	lnet_msg_commit(msg, cpt);
2203 	lnet_net_unlock(cpt);
2204 
2205 	lnet_build_msg_event(msg, LNET_EVENT_REPLY);
2206 
2207 	return msg;
2208 
2209  drop:
2210 	cpt = lnet_cpt_of_nid(peer_id.nid);
2211 
2212 	lnet_net_lock(cpt);
2213 	the_lnet.ln_counters[cpt]->drop_count++;
2214 	the_lnet.ln_counters[cpt]->drop_length += getmd->md_length;
2215 	lnet_net_unlock(cpt);
2216 
2217 	if (msg != NULL)
2218 		lnet_msg_free(msg);
2219 
2220 	return NULL;
2221 }
2222 EXPORT_SYMBOL(lnet_create_reply_msg);
2223 
2224 void
lnet_set_reply_msg_len(lnet_ni_t * ni,lnet_msg_t * reply,unsigned int len)2225 lnet_set_reply_msg_len(lnet_ni_t *ni, lnet_msg_t *reply, unsigned int len)
2226 {
2227 	/* Set the REPLY length, now the RDMA that elides the REPLY message has
2228 	 * completed and I know it. */
2229 	LASSERT(reply != NULL);
2230 	LASSERT(reply->msg_type == LNET_MSG_GET);
2231 	LASSERT(reply->msg_ev.type == LNET_EVENT_REPLY);
2232 
2233 	/* NB I trusted my peer to RDMA.  If she tells me she's written beyond
2234 	 * the end of my buffer, I might as well be dead. */
2235 	LASSERT(len <= reply->msg_ev.mlength);
2236 
2237 	reply->msg_ev.mlength = len;
2238 }
2239 EXPORT_SYMBOL(lnet_set_reply_msg_len);
2240 
2241 /**
2242  * Initiate an asynchronous GET operation.
2243  *
2244  * On the initiator node, an LNET_EVENT_SEND is logged when the GET request
2245  * is sent, and an LNET_EVENT_REPLY is logged when the data returned from
2246  * the target node in the REPLY has been written to local MD.
2247  *
2248  * On the target node, an LNET_EVENT_GET is logged when the GET request
2249  * arrives and is accepted into a MD.
2250  *
2251  * \param self,target,portal,match_bits,offset See the discussion in LNetPut().
2252  * \param mdh A handle for the MD that describes the memory into which the
2253  * requested data will be received. The MD must be "free floating"
2254  * (See LNetMDBind()).
2255  *
2256  * \retval  0      Success, and only in this case events will be generated
2257  * and logged to EQ (if it exists) of the MD.
2258  * \retval -EIO    Simulated failure.
2259  * \retval -ENOMEM Memory allocation failure.
2260  * \retval -ENOENT Invalid MD object.
2261  */
2262 int
LNetGet(lnet_nid_t self,lnet_handle_md_t mdh,lnet_process_id_t target,unsigned int portal,__u64 match_bits,unsigned int offset)2263 LNetGet(lnet_nid_t self, lnet_handle_md_t mdh,
2264 	lnet_process_id_t target, unsigned int portal,
2265 	__u64 match_bits, unsigned int offset)
2266 {
2267 	struct lnet_msg		*msg;
2268 	struct lnet_libmd	*md;
2269 	int			cpt;
2270 	int			rc;
2271 
2272 	LASSERT(the_lnet.ln_init);
2273 	LASSERT(the_lnet.ln_refcount > 0);
2274 
2275 	if (!list_empty(&the_lnet.ln_test_peers) && /* normally we don't */
2276 	    fail_peer(target.nid, 1)) {	  /* shall we now? */
2277 		CERROR("Dropping GET to %s: simulated failure\n",
2278 		       libcfs_id2str(target));
2279 		return -EIO;
2280 	}
2281 
2282 	msg = lnet_msg_alloc();
2283 	if (msg == NULL) {
2284 		CERROR("Dropping GET to %s: ENOMEM on lnet_msg_t\n",
2285 		       libcfs_id2str(target));
2286 		return -ENOMEM;
2287 	}
2288 
2289 	cpt = lnet_cpt_of_cookie(mdh.cookie);
2290 	lnet_res_lock(cpt);
2291 
2292 	md = lnet_handle2md(&mdh);
2293 	if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
2294 		CERROR("Dropping GET (%llu:%d:%s): MD (%d) invalid\n",
2295 		       match_bits, portal, libcfs_id2str(target),
2296 		       md == NULL ? -1 : md->md_threshold);
2297 		if (md != NULL && md->md_me != NULL)
2298 			CERROR("REPLY MD also attached to portal %d\n",
2299 			       md->md_me->me_portal);
2300 
2301 		lnet_res_unlock(cpt);
2302 
2303 		lnet_msg_free(msg);
2304 		return -ENOENT;
2305 	}
2306 
2307 	CDEBUG(D_NET, "LNetGet -> %s\n", libcfs_id2str(target));
2308 
2309 	lnet_msg_attach_md(msg, md, 0, 0);
2310 
2311 	lnet_prep_send(msg, LNET_MSG_GET, target, 0, 0);
2312 
2313 	msg->msg_hdr.msg.get.match_bits = cpu_to_le64(match_bits);
2314 	msg->msg_hdr.msg.get.ptl_index = cpu_to_le32(portal);
2315 	msg->msg_hdr.msg.get.src_offset = cpu_to_le32(offset);
2316 	msg->msg_hdr.msg.get.sink_length = cpu_to_le32(md->md_length);
2317 
2318 	/* NB handles only looked up by creator (no flips) */
2319 	msg->msg_hdr.msg.get.return_wmd.wh_interface_cookie =
2320 		the_lnet.ln_interface_cookie;
2321 	msg->msg_hdr.msg.get.return_wmd.wh_object_cookie =
2322 		md->md_lh.lh_cookie;
2323 
2324 	lnet_res_unlock(cpt);
2325 
2326 	lnet_build_msg_event(msg, LNET_EVENT_SEND);
2327 
2328 	rc = lnet_send(self, msg, LNET_NID_ANY);
2329 	if (rc < 0) {
2330 		CNETERR("Error sending GET to %s: %d\n",
2331 		       libcfs_id2str(target), rc);
2332 		lnet_finalize(NULL, msg, rc);
2333 	}
2334 
2335 	/* completion will be signalled by an event */
2336 	return 0;
2337 }
2338 EXPORT_SYMBOL(LNetGet);
2339 
2340 /**
2341  * Calculate distance to node at \a dstnid.
2342  *
2343  * \param dstnid Target NID.
2344  * \param srcnidp If not NULL, NID of the local interface to reach \a dstnid
2345  * is saved here.
2346  * \param orderp If not NULL, order of the route to reach \a dstnid is saved
2347  * here.
2348  *
2349  * \retval 0 If \a dstnid belongs to a local interface, and reserved option
2350  * local_nid_dist_zero is set, which is the default.
2351  * \retval positives Distance to target NID, i.e. number of hops plus one.
2352  * \retval -EHOSTUNREACH If \a dstnid is not reachable.
2353  */
2354 int
LNetDist(lnet_nid_t dstnid,lnet_nid_t * srcnidp,__u32 * orderp)2355 LNetDist(lnet_nid_t dstnid, lnet_nid_t *srcnidp, __u32 *orderp)
2356 {
2357 	struct list_head		*e;
2358 	struct lnet_ni		*ni;
2359 	lnet_remotenet_t	*rnet;
2360 	__u32			dstnet = LNET_NIDNET(dstnid);
2361 	int			hops;
2362 	int			cpt;
2363 	__u32			order = 2;
2364 	struct list_head		*rn_list;
2365 
2366 	/* if !local_nid_dist_zero, I don't return a distance of 0 ever
2367 	 * (when lustre sees a distance of 0, it substitutes 0@lo), so I
2368 	 * keep order 0 free for 0@lo and order 1 free for a local NID
2369 	 * match */
2370 
2371 	LASSERT(the_lnet.ln_init);
2372 	LASSERT(the_lnet.ln_refcount > 0);
2373 
2374 	cpt = lnet_net_lock_current();
2375 
2376 	list_for_each(e, &the_lnet.ln_nis) {
2377 		ni = list_entry(e, lnet_ni_t, ni_list);
2378 
2379 		if (ni->ni_nid == dstnid) {
2380 			if (srcnidp != NULL)
2381 				*srcnidp = dstnid;
2382 			if (orderp != NULL) {
2383 				if (LNET_NETTYP(LNET_NIDNET(dstnid)) == LOLND)
2384 					*orderp = 0;
2385 				else
2386 					*orderp = 1;
2387 			}
2388 			lnet_net_unlock(cpt);
2389 
2390 			return local_nid_dist_zero ? 0 : 1;
2391 		}
2392 
2393 		if (LNET_NIDNET(ni->ni_nid) == dstnet) {
2394 			if (srcnidp != NULL)
2395 				*srcnidp = ni->ni_nid;
2396 			if (orderp != NULL)
2397 				*orderp = order;
2398 			lnet_net_unlock(cpt);
2399 			return 1;
2400 		}
2401 
2402 		order++;
2403 	}
2404 
2405 	rn_list = lnet_net2rnethash(dstnet);
2406 	list_for_each(e, rn_list) {
2407 		rnet = list_entry(e, lnet_remotenet_t, lrn_list);
2408 
2409 		if (rnet->lrn_net == dstnet) {
2410 			lnet_route_t *route;
2411 			lnet_route_t *shortest = NULL;
2412 
2413 			LASSERT(!list_empty(&rnet->lrn_routes));
2414 
2415 			list_for_each_entry(route, &rnet->lrn_routes,
2416 						lr_list) {
2417 				if (shortest == NULL ||
2418 				    route->lr_hops < shortest->lr_hops)
2419 					shortest = route;
2420 			}
2421 
2422 			LASSERT(shortest != NULL);
2423 			hops = shortest->lr_hops;
2424 			if (srcnidp != NULL)
2425 				*srcnidp = shortest->lr_gateway->lp_ni->ni_nid;
2426 			if (orderp != NULL)
2427 				*orderp = order;
2428 			lnet_net_unlock(cpt);
2429 			return hops + 1;
2430 		}
2431 		order++;
2432 	}
2433 
2434 	lnet_net_unlock(cpt);
2435 	return -EHOSTUNREACH;
2436 }
2437 EXPORT_SYMBOL(LNetDist);
2438 
2439 /**
2440  * Set the number of asynchronous messages expected from a target process.
2441  *
2442  * This function is only meaningful for userspace callers. It's a no-op when
2443  * called from kernel.
2444  *
2445  * Asynchronous messages are those that can come from a target when the
2446  * userspace process is not waiting for IO to complete; e.g., AST callbacks
2447  * from Lustre servers. Specifying the expected number of such messages
2448  * allows them to be eagerly received when user process is not running in
2449  * LNet; otherwise network errors may occur.
2450  *
2451  * \param id Process ID of the target process.
2452  * \param nasync Number of asynchronous messages expected from the target.
2453  *
2454  * \return 0 on success, and an error code otherwise.
2455  */
2456 int
LNetSetAsync(lnet_process_id_t id,int nasync)2457 LNetSetAsync(lnet_process_id_t id, int nasync)
2458 {
2459 	return 0;
2460 }
2461 EXPORT_SYMBOL(LNetSetAsync);
2462