1 /*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
19 *
20 * GPL HEADER END
21 */
22 /*
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
25 *
26 * Copyright (c) 2011, 2015, Intel Corporation.
27 */
28 /*
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
31 *
32 * lnet/lnet/lib-move.c
33 *
34 * Data movement routines
35 */
36
37 #define DEBUG_SUBSYSTEM S_LNET
38
39 #include "../../include/linux/lnet/lib-lnet.h"
40 #include <linux/nsproxy.h>
41 #include <net/net_namespace.h>
42
43 static int local_nid_dist_zero = 1;
44 module_param(local_nid_dist_zero, int, 0444);
45 MODULE_PARM_DESC(local_nid_dist_zero, "Reserved");
46
47 int
lnet_fail_nid(lnet_nid_t nid,unsigned int threshold)48 lnet_fail_nid(lnet_nid_t nid, unsigned int threshold)
49 {
50 lnet_test_peer_t *tp;
51 lnet_test_peer_t *temp;
52 struct list_head *el;
53 struct list_head *next;
54 struct list_head cull;
55
56 /* NB: use lnet_net_lock(0) to serialize operations on test peers */
57 if (threshold) {
58 /* Adding a new entry */
59 LIBCFS_ALLOC(tp, sizeof(*tp));
60 if (!tp)
61 return -ENOMEM;
62
63 tp->tp_nid = nid;
64 tp->tp_threshold = threshold;
65
66 lnet_net_lock(0);
67 list_add_tail(&tp->tp_list, &the_lnet.ln_test_peers);
68 lnet_net_unlock(0);
69 return 0;
70 }
71
72 /* removing entries */
73 INIT_LIST_HEAD(&cull);
74
75 lnet_net_lock(0);
76
77 list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
78 tp = list_entry(el, lnet_test_peer_t, tp_list);
79
80 if (!tp->tp_threshold || /* needs culling anyway */
81 nid == LNET_NID_ANY || /* removing all entries */
82 tp->tp_nid == nid) { /* matched this one */
83 list_del(&tp->tp_list);
84 list_add(&tp->tp_list, &cull);
85 }
86 }
87
88 lnet_net_unlock(0);
89
90 list_for_each_entry_safe(tp, temp, &cull, tp_list) {
91 list_del(&tp->tp_list);
92 LIBCFS_FREE(tp, sizeof(*tp));
93 }
94 return 0;
95 }
96
97 static int
fail_peer(lnet_nid_t nid,int outgoing)98 fail_peer(lnet_nid_t nid, int outgoing)
99 {
100 lnet_test_peer_t *tp;
101 lnet_test_peer_t *temp;
102 struct list_head *el;
103 struct list_head *next;
104 struct list_head cull;
105 int fail = 0;
106
107 INIT_LIST_HEAD(&cull);
108
109 /* NB: use lnet_net_lock(0) to serialize operations on test peers */
110 lnet_net_lock(0);
111
112 list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
113 tp = list_entry(el, lnet_test_peer_t, tp_list);
114
115 if (!tp->tp_threshold) {
116 /* zombie entry */
117 if (outgoing) {
118 /*
119 * only cull zombies on outgoing tests,
120 * since we may be at interrupt priority on
121 * incoming messages.
122 */
123 list_del(&tp->tp_list);
124 list_add(&tp->tp_list, &cull);
125 }
126 continue;
127 }
128
129 if (tp->tp_nid == LNET_NID_ANY || /* fail every peer */
130 nid == tp->tp_nid) { /* fail this peer */
131 fail = 1;
132
133 if (tp->tp_threshold != LNET_MD_THRESH_INF) {
134 tp->tp_threshold--;
135 if (outgoing &&
136 !tp->tp_threshold) {
137 /* see above */
138 list_del(&tp->tp_list);
139 list_add(&tp->tp_list, &cull);
140 }
141 }
142 break;
143 }
144 }
145
146 lnet_net_unlock(0);
147
148 list_for_each_entry_safe(tp, temp, &cull, tp_list) {
149 list_del(&tp->tp_list);
150
151 LIBCFS_FREE(tp, sizeof(*tp));
152 }
153
154 return fail;
155 }
156
157 unsigned int
lnet_iov_nob(unsigned int niov,struct kvec * iov)158 lnet_iov_nob(unsigned int niov, struct kvec *iov)
159 {
160 unsigned int nob = 0;
161
162 LASSERT(!niov || iov);
163 while (niov-- > 0)
164 nob += (iov++)->iov_len;
165
166 return nob;
167 }
168 EXPORT_SYMBOL(lnet_iov_nob);
169
170 void
lnet_copy_iov2iter(struct iov_iter * to,unsigned int nsiov,const struct kvec * siov,unsigned int soffset,unsigned int nob)171 lnet_copy_iov2iter(struct iov_iter *to,
172 unsigned int nsiov, const struct kvec *siov,
173 unsigned int soffset, unsigned int nob)
174 {
175 /* NB diov, siov are READ-ONLY */
176 const char *s;
177 size_t left;
178
179 if (!nob)
180 return;
181
182 /* skip complete frags before 'soffset' */
183 LASSERT(nsiov > 0);
184 while (soffset >= siov->iov_len) {
185 soffset -= siov->iov_len;
186 siov++;
187 nsiov--;
188 LASSERT(nsiov > 0);
189 }
190
191 s = (char *)siov->iov_base + soffset;
192 left = siov->iov_len - soffset;
193 do {
194 size_t n, copy = left;
195 LASSERT(nsiov > 0);
196
197 if (copy > nob)
198 copy = nob;
199 n = copy_to_iter(s, copy, to);
200 if (n != copy)
201 return;
202 nob -= n;
203
204 siov++;
205 s = (char *)siov->iov_base;
206 left = siov->iov_len;
207 nsiov--;
208 } while (nob > 0);
209 }
210 EXPORT_SYMBOL(lnet_copy_iov2iter);
211
212 void
lnet_copy_kiov2iter(struct iov_iter * to,unsigned int nsiov,const lnet_kiov_t * siov,unsigned int soffset,unsigned int nob)213 lnet_copy_kiov2iter(struct iov_iter *to,
214 unsigned int nsiov, const lnet_kiov_t *siov,
215 unsigned int soffset, unsigned int nob)
216 {
217 if (!nob)
218 return;
219
220 LASSERT(!in_interrupt());
221
222 LASSERT(nsiov > 0);
223 while (soffset >= siov->bv_len) {
224 soffset -= siov->bv_len;
225 siov++;
226 nsiov--;
227 LASSERT(nsiov > 0);
228 }
229
230 do {
231 size_t copy = siov->bv_len - soffset, n;
232
233 LASSERT(nsiov > 0);
234
235 if (copy > nob)
236 copy = nob;
237 n = copy_page_to_iter(siov->bv_page,
238 siov->bv_offset + soffset,
239 copy, to);
240 if (n != copy)
241 return;
242 nob -= n;
243 siov++;
244 nsiov--;
245 soffset = 0;
246 } while (nob > 0);
247 }
248 EXPORT_SYMBOL(lnet_copy_kiov2iter);
249
250 int
lnet_extract_iov(int dst_niov,struct kvec * dst,int src_niov,const struct kvec * src,unsigned int offset,unsigned int len)251 lnet_extract_iov(int dst_niov, struct kvec *dst,
252 int src_niov, const struct kvec *src,
253 unsigned int offset, unsigned int len)
254 {
255 /*
256 * Initialise 'dst' to the subset of 'src' starting at 'offset',
257 * for exactly 'len' bytes, and return the number of entries.
258 * NB not destructive to 'src'
259 */
260 unsigned int frag_len;
261 unsigned int niov;
262
263 if (!len) /* no data => */
264 return 0; /* no frags */
265
266 LASSERT(src_niov > 0);
267 while (offset >= src->iov_len) { /* skip initial frags */
268 offset -= src->iov_len;
269 src_niov--;
270 src++;
271 LASSERT(src_niov > 0);
272 }
273
274 niov = 1;
275 for (;;) {
276 LASSERT(src_niov > 0);
277 LASSERT((int)niov <= dst_niov);
278
279 frag_len = src->iov_len - offset;
280 dst->iov_base = ((char *)src->iov_base) + offset;
281
282 if (len <= frag_len) {
283 dst->iov_len = len;
284 return niov;
285 }
286
287 dst->iov_len = frag_len;
288
289 len -= frag_len;
290 dst++;
291 src++;
292 niov++;
293 src_niov--;
294 offset = 0;
295 }
296 }
297 EXPORT_SYMBOL(lnet_extract_iov);
298
299 unsigned int
lnet_kiov_nob(unsigned int niov,lnet_kiov_t * kiov)300 lnet_kiov_nob(unsigned int niov, lnet_kiov_t *kiov)
301 {
302 unsigned int nob = 0;
303
304 LASSERT(!niov || kiov);
305 while (niov-- > 0)
306 nob += (kiov++)->bv_len;
307
308 return nob;
309 }
310 EXPORT_SYMBOL(lnet_kiov_nob);
311
312 int
lnet_extract_kiov(int dst_niov,lnet_kiov_t * dst,int src_niov,const lnet_kiov_t * src,unsigned int offset,unsigned int len)313 lnet_extract_kiov(int dst_niov, lnet_kiov_t *dst,
314 int src_niov, const lnet_kiov_t *src,
315 unsigned int offset, unsigned int len)
316 {
317 /*
318 * Initialise 'dst' to the subset of 'src' starting at 'offset',
319 * for exactly 'len' bytes, and return the number of entries.
320 * NB not destructive to 'src'
321 */
322 unsigned int frag_len;
323 unsigned int niov;
324
325 if (!len) /* no data => */
326 return 0; /* no frags */
327
328 LASSERT(src_niov > 0);
329 while (offset >= src->bv_len) { /* skip initial frags */
330 offset -= src->bv_len;
331 src_niov--;
332 src++;
333 LASSERT(src_niov > 0);
334 }
335
336 niov = 1;
337 for (;;) {
338 LASSERT(src_niov > 0);
339 LASSERT((int)niov <= dst_niov);
340
341 frag_len = src->bv_len - offset;
342 dst->bv_page = src->bv_page;
343 dst->bv_offset = src->bv_offset + offset;
344
345 if (len <= frag_len) {
346 dst->bv_len = len;
347 LASSERT(dst->bv_offset + dst->bv_len
348 <= PAGE_SIZE);
349 return niov;
350 }
351
352 dst->bv_len = frag_len;
353 LASSERT(dst->bv_offset + dst->bv_len <= PAGE_SIZE);
354
355 len -= frag_len;
356 dst++;
357 src++;
358 niov++;
359 src_niov--;
360 offset = 0;
361 }
362 }
363 EXPORT_SYMBOL(lnet_extract_kiov);
364
365 void
lnet_ni_recv(lnet_ni_t * ni,void * private,lnet_msg_t * msg,int delayed,unsigned int offset,unsigned int mlen,unsigned int rlen)366 lnet_ni_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
367 unsigned int offset, unsigned int mlen, unsigned int rlen)
368 {
369 unsigned int niov = 0;
370 struct kvec *iov = NULL;
371 lnet_kiov_t *kiov = NULL;
372 struct iov_iter to;
373 int rc;
374
375 LASSERT(!in_interrupt());
376 LASSERT(!mlen || msg);
377
378 if (msg) {
379 LASSERT(msg->msg_receiving);
380 LASSERT(!msg->msg_sending);
381 LASSERT(rlen == msg->msg_len);
382 LASSERT(mlen <= msg->msg_len);
383 LASSERT(msg->msg_offset == offset);
384 LASSERT(msg->msg_wanted == mlen);
385
386 msg->msg_receiving = 0;
387
388 if (mlen) {
389 niov = msg->msg_niov;
390 iov = msg->msg_iov;
391 kiov = msg->msg_kiov;
392
393 LASSERT(niov > 0);
394 LASSERT(!iov != !kiov);
395 }
396 }
397
398 if (iov) {
399 iov_iter_kvec(&to, ITER_KVEC | READ, iov, niov, mlen + offset);
400 iov_iter_advance(&to, offset);
401 } else {
402 iov_iter_bvec(&to, ITER_BVEC | READ, kiov, niov, mlen + offset);
403 iov_iter_advance(&to, offset);
404 }
405 rc = ni->ni_lnd->lnd_recv(ni, private, msg, delayed, &to, rlen);
406 if (rc < 0)
407 lnet_finalize(ni, msg, rc);
408 }
409
410 static void
lnet_setpayloadbuffer(lnet_msg_t * msg)411 lnet_setpayloadbuffer(lnet_msg_t *msg)
412 {
413 lnet_libmd_t *md = msg->msg_md;
414
415 LASSERT(msg->msg_len > 0);
416 LASSERT(!msg->msg_routing);
417 LASSERT(md);
418 LASSERT(!msg->msg_niov);
419 LASSERT(!msg->msg_iov);
420 LASSERT(!msg->msg_kiov);
421
422 msg->msg_niov = md->md_niov;
423 if (md->md_options & LNET_MD_KIOV)
424 msg->msg_kiov = md->md_iov.kiov;
425 else
426 msg->msg_iov = md->md_iov.iov;
427 }
428
429 void
lnet_prep_send(lnet_msg_t * msg,int type,lnet_process_id_t target,unsigned int offset,unsigned int len)430 lnet_prep_send(lnet_msg_t *msg, int type, lnet_process_id_t target,
431 unsigned int offset, unsigned int len)
432 {
433 msg->msg_type = type;
434 msg->msg_target = target;
435 msg->msg_len = len;
436 msg->msg_offset = offset;
437
438 if (len)
439 lnet_setpayloadbuffer(msg);
440
441 memset(&msg->msg_hdr, 0, sizeof(msg->msg_hdr));
442 msg->msg_hdr.type = cpu_to_le32(type);
443 msg->msg_hdr.dest_nid = cpu_to_le64(target.nid);
444 msg->msg_hdr.dest_pid = cpu_to_le32(target.pid);
445 /* src_nid will be set later */
446 msg->msg_hdr.src_pid = cpu_to_le32(the_lnet.ln_pid);
447 msg->msg_hdr.payload_length = cpu_to_le32(len);
448 }
449
450 static void
lnet_ni_send(lnet_ni_t * ni,lnet_msg_t * msg)451 lnet_ni_send(lnet_ni_t *ni, lnet_msg_t *msg)
452 {
453 void *priv = msg->msg_private;
454 int rc;
455
456 LASSERT(!in_interrupt());
457 LASSERT(LNET_NETTYP(LNET_NIDNET(ni->ni_nid)) == LOLND ||
458 (msg->msg_txcredit && msg->msg_peertxcredit));
459
460 rc = ni->ni_lnd->lnd_send(ni, priv, msg);
461 if (rc < 0)
462 lnet_finalize(ni, msg, rc);
463 }
464
465 static int
lnet_ni_eager_recv(lnet_ni_t * ni,lnet_msg_t * msg)466 lnet_ni_eager_recv(lnet_ni_t *ni, lnet_msg_t *msg)
467 {
468 int rc;
469
470 LASSERT(!msg->msg_sending);
471 LASSERT(msg->msg_receiving);
472 LASSERT(!msg->msg_rx_ready_delay);
473 LASSERT(ni->ni_lnd->lnd_eager_recv);
474
475 msg->msg_rx_ready_delay = 1;
476 rc = ni->ni_lnd->lnd_eager_recv(ni, msg->msg_private, msg,
477 &msg->msg_private);
478 if (rc) {
479 CERROR("recv from %s / send to %s aborted: eager_recv failed %d\n",
480 libcfs_nid2str(msg->msg_rxpeer->lp_nid),
481 libcfs_id2str(msg->msg_target), rc);
482 LASSERT(rc < 0); /* required by my callers */
483 }
484
485 return rc;
486 }
487
488 /* NB: caller shall hold a ref on 'lp' as I'd drop lnet_net_lock */
489 static void
lnet_ni_query_locked(lnet_ni_t * ni,lnet_peer_t * lp)490 lnet_ni_query_locked(lnet_ni_t *ni, lnet_peer_t *lp)
491 {
492 unsigned long last_alive = 0;
493
494 LASSERT(lnet_peer_aliveness_enabled(lp));
495 LASSERT(ni->ni_lnd->lnd_query);
496
497 lnet_net_unlock(lp->lp_cpt);
498 ni->ni_lnd->lnd_query(ni, lp->lp_nid, &last_alive);
499 lnet_net_lock(lp->lp_cpt);
500
501 lp->lp_last_query = cfs_time_current();
502
503 if (last_alive) /* NI has updated timestamp */
504 lp->lp_last_alive = last_alive;
505 }
506
507 /* NB: always called with lnet_net_lock held */
508 static inline int
lnet_peer_is_alive(lnet_peer_t * lp,unsigned long now)509 lnet_peer_is_alive(lnet_peer_t *lp, unsigned long now)
510 {
511 int alive;
512 unsigned long deadline;
513
514 LASSERT(lnet_peer_aliveness_enabled(lp));
515
516 /* Trust lnet_notify() if it has more recent aliveness news, but
517 * ignore the initial assumed death (see lnet_peers_start_down()).
518 */
519 if (!lp->lp_alive && lp->lp_alive_count > 0 &&
520 cfs_time_aftereq(lp->lp_timestamp, lp->lp_last_alive))
521 return 0;
522
523 deadline = cfs_time_add(lp->lp_last_alive,
524 cfs_time_seconds(lp->lp_ni->ni_peertimeout));
525 alive = cfs_time_after(deadline, now);
526
527 /* Update obsolete lp_alive except for routers assumed to be dead
528 * initially, because router checker would update aliveness in this
529 * case, and moreover lp_last_alive at peer creation is assumed.
530 */
531 if (alive && !lp->lp_alive &&
532 !(lnet_isrouter(lp) && !lp->lp_alive_count))
533 lnet_notify_locked(lp, 0, 1, lp->lp_last_alive);
534
535 return alive;
536 }
537
538 /*
539 * NB: returns 1 when alive, 0 when dead, negative when error;
540 * may drop the lnet_net_lock
541 */
542 static int
lnet_peer_alive_locked(lnet_peer_t * lp)543 lnet_peer_alive_locked(lnet_peer_t *lp)
544 {
545 unsigned long now = cfs_time_current();
546
547 if (!lnet_peer_aliveness_enabled(lp))
548 return -ENODEV;
549
550 if (lnet_peer_is_alive(lp, now))
551 return 1;
552
553 /*
554 * Peer appears dead, but we should avoid frequent NI queries (at
555 * most once per lnet_queryinterval seconds).
556 */
557 if (lp->lp_last_query) {
558 static const int lnet_queryinterval = 1;
559
560 unsigned long next_query =
561 cfs_time_add(lp->lp_last_query,
562 cfs_time_seconds(lnet_queryinterval));
563
564 if (time_before(now, next_query)) {
565 if (lp->lp_alive)
566 CWARN("Unexpected aliveness of peer %s: %d < %d (%d/%d)\n",
567 libcfs_nid2str(lp->lp_nid),
568 (int)now, (int)next_query,
569 lnet_queryinterval,
570 lp->lp_ni->ni_peertimeout);
571 return 0;
572 }
573 }
574
575 /* query NI for latest aliveness news */
576 lnet_ni_query_locked(lp->lp_ni, lp);
577
578 if (lnet_peer_is_alive(lp, now))
579 return 1;
580
581 lnet_notify_locked(lp, 0, 0, lp->lp_last_alive);
582 return 0;
583 }
584
585 /**
586 * \param msg The message to be sent.
587 * \param do_send True if lnet_ni_send() should be called in this function.
588 * lnet_send() is going to lnet_net_unlock immediately after this, so
589 * it sets do_send FALSE and I don't do the unlock/send/lock bit.
590 *
591 * \retval LNET_CREDIT_OK If \a msg sent or OK to send.
592 * \retval LNET_CREDIT_WAIT If \a msg blocked for credit.
593 * \retval -EHOSTUNREACH If the next hop of the message appears dead.
594 * \retval -ECANCELED If the MD of the message has been unlinked.
595 */
596 static int
lnet_post_send_locked(lnet_msg_t * msg,int do_send)597 lnet_post_send_locked(lnet_msg_t *msg, int do_send)
598 {
599 lnet_peer_t *lp = msg->msg_txpeer;
600 lnet_ni_t *ni = lp->lp_ni;
601 int cpt = msg->msg_tx_cpt;
602 struct lnet_tx_queue *tq = ni->ni_tx_queues[cpt];
603
604 /* non-lnet_send() callers have checked before */
605 LASSERT(!do_send || msg->msg_tx_delayed);
606 LASSERT(!msg->msg_receiving);
607 LASSERT(msg->msg_tx_committed);
608
609 /* NB 'lp' is always the next hop */
610 if (!(msg->msg_target.pid & LNET_PID_USERFLAG) &&
611 !lnet_peer_alive_locked(lp)) {
612 the_lnet.ln_counters[cpt]->drop_count++;
613 the_lnet.ln_counters[cpt]->drop_length += msg->msg_len;
614 lnet_net_unlock(cpt);
615
616 CNETERR("Dropping message for %s: peer not alive\n",
617 libcfs_id2str(msg->msg_target));
618 if (do_send)
619 lnet_finalize(ni, msg, -EHOSTUNREACH);
620
621 lnet_net_lock(cpt);
622 return -EHOSTUNREACH;
623 }
624
625 if (msg->msg_md &&
626 (msg->msg_md->md_flags & LNET_MD_FLAG_ABORTED)) {
627 lnet_net_unlock(cpt);
628
629 CNETERR("Aborting message for %s: LNetM[DE]Unlink() already called on the MD/ME.\n",
630 libcfs_id2str(msg->msg_target));
631 if (do_send)
632 lnet_finalize(ni, msg, -ECANCELED);
633
634 lnet_net_lock(cpt);
635 return -ECANCELED;
636 }
637
638 if (!msg->msg_peertxcredit) {
639 LASSERT((lp->lp_txcredits < 0) ==
640 !list_empty(&lp->lp_txq));
641
642 msg->msg_peertxcredit = 1;
643 lp->lp_txqnob += msg->msg_len + sizeof(lnet_hdr_t);
644 lp->lp_txcredits--;
645
646 if (lp->lp_txcredits < lp->lp_mintxcredits)
647 lp->lp_mintxcredits = lp->lp_txcredits;
648
649 if (lp->lp_txcredits < 0) {
650 msg->msg_tx_delayed = 1;
651 list_add_tail(&msg->msg_list, &lp->lp_txq);
652 return LNET_CREDIT_WAIT;
653 }
654 }
655
656 if (!msg->msg_txcredit) {
657 LASSERT((tq->tq_credits < 0) ==
658 !list_empty(&tq->tq_delayed));
659
660 msg->msg_txcredit = 1;
661 tq->tq_credits--;
662
663 if (tq->tq_credits < tq->tq_credits_min)
664 tq->tq_credits_min = tq->tq_credits;
665
666 if (tq->tq_credits < 0) {
667 msg->msg_tx_delayed = 1;
668 list_add_tail(&msg->msg_list, &tq->tq_delayed);
669 return LNET_CREDIT_WAIT;
670 }
671 }
672
673 if (do_send) {
674 lnet_net_unlock(cpt);
675 lnet_ni_send(ni, msg);
676 lnet_net_lock(cpt);
677 }
678 return LNET_CREDIT_OK;
679 }
680
681 static lnet_rtrbufpool_t *
lnet_msg2bufpool(lnet_msg_t * msg)682 lnet_msg2bufpool(lnet_msg_t *msg)
683 {
684 lnet_rtrbufpool_t *rbp;
685 int cpt;
686
687 LASSERT(msg->msg_rx_committed);
688
689 cpt = msg->msg_rx_cpt;
690 rbp = &the_lnet.ln_rtrpools[cpt][0];
691
692 LASSERT(msg->msg_len <= LNET_MTU);
693 while (msg->msg_len > (unsigned int)rbp->rbp_npages * PAGE_SIZE) {
694 rbp++;
695 LASSERT(rbp < &the_lnet.ln_rtrpools[cpt][LNET_NRBPOOLS]);
696 }
697
698 return rbp;
699 }
700
701 static int
lnet_post_routed_recv_locked(lnet_msg_t * msg,int do_recv)702 lnet_post_routed_recv_locked(lnet_msg_t *msg, int do_recv)
703 {
704 /*
705 * lnet_parse is going to lnet_net_unlock immediately after this, so it
706 * sets do_recv FALSE and I don't do the unlock/send/lock bit.
707 * I return LNET_CREDIT_WAIT if msg blocked and LNET_CREDIT_OK if
708 * received or OK to receive
709 */
710 lnet_peer_t *lp = msg->msg_rxpeer;
711 lnet_rtrbufpool_t *rbp;
712 lnet_rtrbuf_t *rb;
713
714 LASSERT(!msg->msg_iov);
715 LASSERT(!msg->msg_kiov);
716 LASSERT(!msg->msg_niov);
717 LASSERT(msg->msg_routing);
718 LASSERT(msg->msg_receiving);
719 LASSERT(!msg->msg_sending);
720
721 /* non-lnet_parse callers only receive delayed messages */
722 LASSERT(!do_recv || msg->msg_rx_delayed);
723
724 if (!msg->msg_peerrtrcredit) {
725 LASSERT((lp->lp_rtrcredits < 0) ==
726 !list_empty(&lp->lp_rtrq));
727
728 msg->msg_peerrtrcredit = 1;
729 lp->lp_rtrcredits--;
730 if (lp->lp_rtrcredits < lp->lp_minrtrcredits)
731 lp->lp_minrtrcredits = lp->lp_rtrcredits;
732
733 if (lp->lp_rtrcredits < 0) {
734 /* must have checked eager_recv before here */
735 LASSERT(msg->msg_rx_ready_delay);
736 msg->msg_rx_delayed = 1;
737 list_add_tail(&msg->msg_list, &lp->lp_rtrq);
738 return LNET_CREDIT_WAIT;
739 }
740 }
741
742 rbp = lnet_msg2bufpool(msg);
743
744 if (!msg->msg_rtrcredit) {
745 msg->msg_rtrcredit = 1;
746 rbp->rbp_credits--;
747 if (rbp->rbp_credits < rbp->rbp_mincredits)
748 rbp->rbp_mincredits = rbp->rbp_credits;
749
750 if (rbp->rbp_credits < 0) {
751 /* must have checked eager_recv before here */
752 LASSERT(msg->msg_rx_ready_delay);
753 msg->msg_rx_delayed = 1;
754 list_add_tail(&msg->msg_list, &rbp->rbp_msgs);
755 return LNET_CREDIT_WAIT;
756 }
757 }
758
759 LASSERT(!list_empty(&rbp->rbp_bufs));
760 rb = list_entry(rbp->rbp_bufs.next, lnet_rtrbuf_t, rb_list);
761 list_del(&rb->rb_list);
762
763 msg->msg_niov = rbp->rbp_npages;
764 msg->msg_kiov = &rb->rb_kiov[0];
765
766 if (do_recv) {
767 int cpt = msg->msg_rx_cpt;
768
769 lnet_net_unlock(cpt);
770 lnet_ni_recv(lp->lp_ni, msg->msg_private, msg, 1,
771 0, msg->msg_len, msg->msg_len);
772 lnet_net_lock(cpt);
773 }
774 return LNET_CREDIT_OK;
775 }
776
777 void
lnet_return_tx_credits_locked(lnet_msg_t * msg)778 lnet_return_tx_credits_locked(lnet_msg_t *msg)
779 {
780 lnet_peer_t *txpeer = msg->msg_txpeer;
781 lnet_msg_t *msg2;
782
783 if (msg->msg_txcredit) {
784 struct lnet_ni *ni = txpeer->lp_ni;
785 struct lnet_tx_queue *tq = ni->ni_tx_queues[msg->msg_tx_cpt];
786
787 /* give back NI txcredits */
788 msg->msg_txcredit = 0;
789
790 LASSERT((tq->tq_credits < 0) ==
791 !list_empty(&tq->tq_delayed));
792
793 tq->tq_credits++;
794 if (tq->tq_credits <= 0) {
795 msg2 = list_entry(tq->tq_delayed.next,
796 lnet_msg_t, msg_list);
797 list_del(&msg2->msg_list);
798
799 LASSERT(msg2->msg_txpeer->lp_ni == ni);
800 LASSERT(msg2->msg_tx_delayed);
801
802 (void)lnet_post_send_locked(msg2, 1);
803 }
804 }
805
806 if (msg->msg_peertxcredit) {
807 /* give back peer txcredits */
808 msg->msg_peertxcredit = 0;
809
810 LASSERT((txpeer->lp_txcredits < 0) ==
811 !list_empty(&txpeer->lp_txq));
812
813 txpeer->lp_txqnob -= msg->msg_len + sizeof(lnet_hdr_t);
814 LASSERT(txpeer->lp_txqnob >= 0);
815
816 txpeer->lp_txcredits++;
817 if (txpeer->lp_txcredits <= 0) {
818 msg2 = list_entry(txpeer->lp_txq.next,
819 lnet_msg_t, msg_list);
820 list_del(&msg2->msg_list);
821
822 LASSERT(msg2->msg_txpeer == txpeer);
823 LASSERT(msg2->msg_tx_delayed);
824
825 (void)lnet_post_send_locked(msg2, 1);
826 }
827 }
828
829 if (txpeer) {
830 msg->msg_txpeer = NULL;
831 lnet_peer_decref_locked(txpeer);
832 }
833 }
834
835 void
lnet_schedule_blocked_locked(lnet_rtrbufpool_t * rbp)836 lnet_schedule_blocked_locked(lnet_rtrbufpool_t *rbp)
837 {
838 lnet_msg_t *msg;
839
840 if (list_empty(&rbp->rbp_msgs))
841 return;
842 msg = list_entry(rbp->rbp_msgs.next,
843 lnet_msg_t, msg_list);
844 list_del(&msg->msg_list);
845
846 (void)lnet_post_routed_recv_locked(msg, 1);
847 }
848
849 void
lnet_drop_routed_msgs_locked(struct list_head * list,int cpt)850 lnet_drop_routed_msgs_locked(struct list_head *list, int cpt)
851 {
852 struct list_head drop;
853 lnet_msg_t *msg;
854 lnet_msg_t *tmp;
855
856 INIT_LIST_HEAD(&drop);
857
858 list_splice_init(list, &drop);
859
860 lnet_net_unlock(cpt);
861
862 list_for_each_entry_safe(msg, tmp, &drop, msg_list) {
863 lnet_ni_recv(msg->msg_rxpeer->lp_ni, msg->msg_private, NULL,
864 0, 0, 0, msg->msg_hdr.payload_length);
865 list_del_init(&msg->msg_list);
866 lnet_finalize(NULL, msg, -ECANCELED);
867 }
868
869 lnet_net_lock(cpt);
870 }
871
872 void
lnet_return_rx_credits_locked(lnet_msg_t * msg)873 lnet_return_rx_credits_locked(lnet_msg_t *msg)
874 {
875 lnet_peer_t *rxpeer = msg->msg_rxpeer;
876 lnet_msg_t *msg2;
877
878 if (msg->msg_rtrcredit) {
879 /* give back global router credits */
880 lnet_rtrbuf_t *rb;
881 lnet_rtrbufpool_t *rbp;
882
883 /*
884 * NB If a msg ever blocks for a buffer in rbp_msgs, it stays
885 * there until it gets one allocated, or aborts the wait
886 * itself
887 */
888 LASSERT(msg->msg_kiov);
889
890 rb = list_entry(msg->msg_kiov, lnet_rtrbuf_t, rb_kiov[0]);
891 rbp = rb->rb_pool;
892
893 msg->msg_kiov = NULL;
894 msg->msg_rtrcredit = 0;
895
896 LASSERT(rbp == lnet_msg2bufpool(msg));
897
898 LASSERT((rbp->rbp_credits > 0) ==
899 !list_empty(&rbp->rbp_bufs));
900
901 /*
902 * If routing is now turned off, we just drop this buffer and
903 * don't bother trying to return credits.
904 */
905 if (!the_lnet.ln_routing) {
906 lnet_destroy_rtrbuf(rb, rbp->rbp_npages);
907 goto routing_off;
908 }
909
910 /*
911 * It is possible that a user has lowered the desired number of
912 * buffers in this pool. Make sure we never put back
913 * more buffers than the stated number.
914 */
915 if (unlikely(rbp->rbp_credits >= rbp->rbp_req_nbuffers)) {
916 /* Discard this buffer so we don't have too many. */
917 lnet_destroy_rtrbuf(rb, rbp->rbp_npages);
918 rbp->rbp_nbuffers--;
919 } else {
920 list_add(&rb->rb_list, &rbp->rbp_bufs);
921 rbp->rbp_credits++;
922 if (rbp->rbp_credits <= 0)
923 lnet_schedule_blocked_locked(rbp);
924 }
925 }
926
927 routing_off:
928 if (msg->msg_peerrtrcredit) {
929 /* give back peer router credits */
930 msg->msg_peerrtrcredit = 0;
931
932 LASSERT((rxpeer->lp_rtrcredits < 0) ==
933 !list_empty(&rxpeer->lp_rtrq));
934
935 rxpeer->lp_rtrcredits++;
936 /*
937 * drop all messages which are queued to be routed on that
938 * peer.
939 */
940 if (!the_lnet.ln_routing) {
941 lnet_drop_routed_msgs_locked(&rxpeer->lp_rtrq,
942 msg->msg_rx_cpt);
943 } else if (rxpeer->lp_rtrcredits <= 0) {
944 msg2 = list_entry(rxpeer->lp_rtrq.next,
945 lnet_msg_t, msg_list);
946 list_del(&msg2->msg_list);
947
948 (void)lnet_post_routed_recv_locked(msg2, 1);
949 }
950 }
951 if (rxpeer) {
952 msg->msg_rxpeer = NULL;
953 lnet_peer_decref_locked(rxpeer);
954 }
955 }
956
957 static int
lnet_compare_routes(lnet_route_t * r1,lnet_route_t * r2)958 lnet_compare_routes(lnet_route_t *r1, lnet_route_t *r2)
959 {
960 lnet_peer_t *p1 = r1->lr_gateway;
961 lnet_peer_t *p2 = r2->lr_gateway;
962 int r1_hops = (r1->lr_hops == LNET_UNDEFINED_HOPS) ? 1 : r1->lr_hops;
963 int r2_hops = (r2->lr_hops == LNET_UNDEFINED_HOPS) ? 1 : r2->lr_hops;
964
965 if (r1->lr_priority < r2->lr_priority)
966 return 1;
967
968 if (r1->lr_priority > r2->lr_priority)
969 return -ERANGE;
970
971 if (r1_hops < r2_hops)
972 return 1;
973
974 if (r1_hops > r2_hops)
975 return -ERANGE;
976
977 if (p1->lp_txqnob < p2->lp_txqnob)
978 return 1;
979
980 if (p1->lp_txqnob > p2->lp_txqnob)
981 return -ERANGE;
982
983 if (p1->lp_txcredits > p2->lp_txcredits)
984 return 1;
985
986 if (p1->lp_txcredits < p2->lp_txcredits)
987 return -ERANGE;
988
989 if (r1->lr_seq - r2->lr_seq <= 0)
990 return 1;
991
992 return -ERANGE;
993 }
994
995 static lnet_peer_t *
lnet_find_route_locked(lnet_ni_t * ni,lnet_nid_t target,lnet_nid_t rtr_nid)996 lnet_find_route_locked(lnet_ni_t *ni, lnet_nid_t target, lnet_nid_t rtr_nid)
997 {
998 lnet_remotenet_t *rnet;
999 lnet_route_t *route;
1000 lnet_route_t *best_route;
1001 lnet_route_t *last_route;
1002 struct lnet_peer *lp_best;
1003 struct lnet_peer *lp;
1004 int rc;
1005
1006 /*
1007 * If @rtr_nid is not LNET_NID_ANY, return the gateway with
1008 * rtr_nid nid, otherwise find the best gateway I can use
1009 */
1010 rnet = lnet_find_net_locked(LNET_NIDNET(target));
1011 if (!rnet)
1012 return NULL;
1013
1014 lp_best = NULL;
1015 best_route = NULL;
1016 last_route = NULL;
1017 list_for_each_entry(route, &rnet->lrn_routes, lr_list) {
1018 lp = route->lr_gateway;
1019
1020 if (!lnet_is_route_alive(route))
1021 continue;
1022
1023 if (ni && lp->lp_ni != ni)
1024 continue;
1025
1026 if (lp->lp_nid == rtr_nid) /* it's pre-determined router */
1027 return lp;
1028
1029 if (!lp_best) {
1030 best_route = route;
1031 last_route = route;
1032 lp_best = lp;
1033 continue;
1034 }
1035
1036 /* no protection on below fields, but it's harmless */
1037 if (last_route->lr_seq - route->lr_seq < 0)
1038 last_route = route;
1039
1040 rc = lnet_compare_routes(route, best_route);
1041 if (rc < 0)
1042 continue;
1043
1044 best_route = route;
1045 lp_best = lp;
1046 }
1047
1048 /*
1049 * set sequence number on the best router to the latest sequence + 1
1050 * so we can round-robin all routers, it's race and inaccurate but
1051 * harmless and functional
1052 */
1053 if (best_route)
1054 best_route->lr_seq = last_route->lr_seq + 1;
1055 return lp_best;
1056 }
1057
1058 int
lnet_send(lnet_nid_t src_nid,lnet_msg_t * msg,lnet_nid_t rtr_nid)1059 lnet_send(lnet_nid_t src_nid, lnet_msg_t *msg, lnet_nid_t rtr_nid)
1060 {
1061 lnet_nid_t dst_nid = msg->msg_target.nid;
1062 struct lnet_ni *src_ni;
1063 struct lnet_ni *local_ni;
1064 struct lnet_peer *lp;
1065 int cpt;
1066 int cpt2;
1067 int rc;
1068
1069 /*
1070 * NB: rtr_nid is set to LNET_NID_ANY for all current use-cases,
1071 * but we might want to use pre-determined router for ACK/REPLY
1072 * in the future
1073 */
1074 /* NB: ni == interface pre-determined (ACK/REPLY) */
1075 LASSERT(!msg->msg_txpeer);
1076 LASSERT(!msg->msg_sending);
1077 LASSERT(!msg->msg_target_is_router);
1078 LASSERT(!msg->msg_receiving);
1079
1080 msg->msg_sending = 1;
1081
1082 LASSERT(!msg->msg_tx_committed);
1083 cpt = lnet_cpt_of_nid(rtr_nid == LNET_NID_ANY ? dst_nid : rtr_nid);
1084 again:
1085 lnet_net_lock(cpt);
1086
1087 if (the_lnet.ln_shutdown) {
1088 lnet_net_unlock(cpt);
1089 return -ESHUTDOWN;
1090 }
1091
1092 if (src_nid == LNET_NID_ANY) {
1093 src_ni = NULL;
1094 } else {
1095 src_ni = lnet_nid2ni_locked(src_nid, cpt);
1096 if (!src_ni) {
1097 lnet_net_unlock(cpt);
1098 LCONSOLE_WARN("Can't send to %s: src %s is not a local nid\n",
1099 libcfs_nid2str(dst_nid),
1100 libcfs_nid2str(src_nid));
1101 return -EINVAL;
1102 }
1103 LASSERT(!msg->msg_routing);
1104 }
1105
1106 /* Is this for someone on a local network? */
1107 local_ni = lnet_net2ni_locked(LNET_NIDNET(dst_nid), cpt);
1108
1109 if (local_ni) {
1110 if (!src_ni) {
1111 src_ni = local_ni;
1112 src_nid = src_ni->ni_nid;
1113 } else if (src_ni == local_ni) {
1114 lnet_ni_decref_locked(local_ni, cpt);
1115 } else {
1116 lnet_ni_decref_locked(local_ni, cpt);
1117 lnet_ni_decref_locked(src_ni, cpt);
1118 lnet_net_unlock(cpt);
1119 LCONSOLE_WARN("No route to %s via from %s\n",
1120 libcfs_nid2str(dst_nid),
1121 libcfs_nid2str(src_nid));
1122 return -EINVAL;
1123 }
1124
1125 LASSERT(src_nid != LNET_NID_ANY);
1126 lnet_msg_commit(msg, cpt);
1127
1128 if (!msg->msg_routing)
1129 msg->msg_hdr.src_nid = cpu_to_le64(src_nid);
1130
1131 if (src_ni == the_lnet.ln_loni) {
1132 /* No send credit hassles with LOLND */
1133 lnet_net_unlock(cpt);
1134 lnet_ni_send(src_ni, msg);
1135
1136 lnet_net_lock(cpt);
1137 lnet_ni_decref_locked(src_ni, cpt);
1138 lnet_net_unlock(cpt);
1139 return 0;
1140 }
1141
1142 rc = lnet_nid2peer_locked(&lp, dst_nid, cpt);
1143 /* lp has ref on src_ni; lose mine */
1144 lnet_ni_decref_locked(src_ni, cpt);
1145 if (rc) {
1146 lnet_net_unlock(cpt);
1147 LCONSOLE_WARN("Error %d finding peer %s\n", rc,
1148 libcfs_nid2str(dst_nid));
1149 /* ENOMEM or shutting down */
1150 return rc;
1151 }
1152 LASSERT(lp->lp_ni == src_ni);
1153 } else {
1154 /* sending to a remote network */
1155 lp = lnet_find_route_locked(src_ni, dst_nid, rtr_nid);
1156 if (!lp) {
1157 if (src_ni)
1158 lnet_ni_decref_locked(src_ni, cpt);
1159 lnet_net_unlock(cpt);
1160
1161 LCONSOLE_WARN("No route to %s via %s (all routers down)\n",
1162 libcfs_id2str(msg->msg_target),
1163 libcfs_nid2str(src_nid));
1164 return -EHOSTUNREACH;
1165 }
1166
1167 /*
1168 * rtr_nid is LNET_NID_ANY or NID of pre-determined router,
1169 * it's possible that rtr_nid isn't LNET_NID_ANY and lp isn't
1170 * pre-determined router, this can happen if router table
1171 * was changed when we release the lock
1172 */
1173 if (rtr_nid != lp->lp_nid) {
1174 cpt2 = lnet_cpt_of_nid_locked(lp->lp_nid);
1175 if (cpt2 != cpt) {
1176 if (src_ni)
1177 lnet_ni_decref_locked(src_ni, cpt);
1178 lnet_net_unlock(cpt);
1179
1180 rtr_nid = lp->lp_nid;
1181 cpt = cpt2;
1182 goto again;
1183 }
1184 }
1185
1186 CDEBUG(D_NET, "Best route to %s via %s for %s %d\n",
1187 libcfs_nid2str(dst_nid), libcfs_nid2str(lp->lp_nid),
1188 lnet_msgtyp2str(msg->msg_type), msg->msg_len);
1189
1190 if (!src_ni) {
1191 src_ni = lp->lp_ni;
1192 src_nid = src_ni->ni_nid;
1193 } else {
1194 LASSERT(src_ni == lp->lp_ni);
1195 lnet_ni_decref_locked(src_ni, cpt);
1196 }
1197
1198 lnet_peer_addref_locked(lp);
1199
1200 LASSERT(src_nid != LNET_NID_ANY);
1201 lnet_msg_commit(msg, cpt);
1202
1203 if (!msg->msg_routing) {
1204 /* I'm the source and now I know which NI to send on */
1205 msg->msg_hdr.src_nid = cpu_to_le64(src_nid);
1206 }
1207
1208 msg->msg_target_is_router = 1;
1209 msg->msg_target.nid = lp->lp_nid;
1210 msg->msg_target.pid = LNET_PID_LUSTRE;
1211 }
1212
1213 /* 'lp' is our best choice of peer */
1214
1215 LASSERT(!msg->msg_peertxcredit);
1216 LASSERT(!msg->msg_txcredit);
1217 LASSERT(!msg->msg_txpeer);
1218
1219 msg->msg_txpeer = lp; /* msg takes my ref on lp */
1220
1221 rc = lnet_post_send_locked(msg, 0);
1222 lnet_net_unlock(cpt);
1223
1224 if (rc < 0)
1225 return rc;
1226
1227 if (rc == LNET_CREDIT_OK)
1228 lnet_ni_send(src_ni, msg);
1229
1230 return 0; /* rc == LNET_CREDIT_OK or LNET_CREDIT_WAIT */
1231 }
1232
1233 void
lnet_drop_message(lnet_ni_t * ni,int cpt,void * private,unsigned int nob)1234 lnet_drop_message(lnet_ni_t *ni, int cpt, void *private, unsigned int nob)
1235 {
1236 lnet_net_lock(cpt);
1237 the_lnet.ln_counters[cpt]->drop_count++;
1238 the_lnet.ln_counters[cpt]->drop_length += nob;
1239 lnet_net_unlock(cpt);
1240
1241 lnet_ni_recv(ni, private, NULL, 0, 0, 0, nob);
1242 }
1243
1244 static void
lnet_recv_put(lnet_ni_t * ni,lnet_msg_t * msg)1245 lnet_recv_put(lnet_ni_t *ni, lnet_msg_t *msg)
1246 {
1247 lnet_hdr_t *hdr = &msg->msg_hdr;
1248
1249 if (msg->msg_wanted)
1250 lnet_setpayloadbuffer(msg);
1251
1252 lnet_build_msg_event(msg, LNET_EVENT_PUT);
1253
1254 /*
1255 * Must I ACK? If so I'll grab the ack_wmd out of the header and put
1256 * it back into the ACK during lnet_finalize()
1257 */
1258 msg->msg_ack = !lnet_is_wire_handle_none(&hdr->msg.put.ack_wmd) &&
1259 !(msg->msg_md->md_options & LNET_MD_ACK_DISABLE);
1260
1261 lnet_ni_recv(ni, msg->msg_private, msg, msg->msg_rx_delayed,
1262 msg->msg_offset, msg->msg_wanted, hdr->payload_length);
1263 }
1264
1265 static int
lnet_parse_put(lnet_ni_t * ni,lnet_msg_t * msg)1266 lnet_parse_put(lnet_ni_t *ni, lnet_msg_t *msg)
1267 {
1268 lnet_hdr_t *hdr = &msg->msg_hdr;
1269 struct lnet_match_info info;
1270 bool ready_delay;
1271 int rc;
1272
1273 /* Convert put fields to host byte order */
1274 hdr->msg.put.match_bits = le64_to_cpu(hdr->msg.put.match_bits);
1275 hdr->msg.put.ptl_index = le32_to_cpu(hdr->msg.put.ptl_index);
1276 hdr->msg.put.offset = le32_to_cpu(hdr->msg.put.offset);
1277
1278 info.mi_id.nid = hdr->src_nid;
1279 info.mi_id.pid = hdr->src_pid;
1280 info.mi_opc = LNET_MD_OP_PUT;
1281 info.mi_portal = hdr->msg.put.ptl_index;
1282 info.mi_rlength = hdr->payload_length;
1283 info.mi_roffset = hdr->msg.put.offset;
1284 info.mi_mbits = hdr->msg.put.match_bits;
1285
1286 msg->msg_rx_ready_delay = !ni->ni_lnd->lnd_eager_recv;
1287 ready_delay = msg->msg_rx_ready_delay;
1288
1289 again:
1290 rc = lnet_ptl_match_md(&info, msg);
1291 switch (rc) {
1292 default:
1293 LBUG();
1294
1295 case LNET_MATCHMD_OK:
1296 lnet_recv_put(ni, msg);
1297 return 0;
1298
1299 case LNET_MATCHMD_NONE:
1300 /**
1301 * no eager_recv or has already called it, should
1302 * have been attached on delayed list
1303 */
1304 if (ready_delay)
1305 return 0;
1306
1307 rc = lnet_ni_eager_recv(ni, msg);
1308 if (!rc) {
1309 ready_delay = true;
1310 goto again;
1311 }
1312 /* fall through */
1313
1314 case LNET_MATCHMD_DROP:
1315 CNETERR("Dropping PUT from %s portal %d match %llu offset %d length %d: %d\n",
1316 libcfs_id2str(info.mi_id), info.mi_portal,
1317 info.mi_mbits, info.mi_roffset, info.mi_rlength, rc);
1318
1319 return -ENOENT; /* -ve: OK but no match */
1320 }
1321 }
1322
1323 static int
lnet_parse_get(lnet_ni_t * ni,lnet_msg_t * msg,int rdma_get)1324 lnet_parse_get(lnet_ni_t *ni, lnet_msg_t *msg, int rdma_get)
1325 {
1326 struct lnet_match_info info;
1327 lnet_hdr_t *hdr = &msg->msg_hdr;
1328 lnet_handle_wire_t reply_wmd;
1329 int rc;
1330
1331 /* Convert get fields to host byte order */
1332 hdr->msg.get.match_bits = le64_to_cpu(hdr->msg.get.match_bits);
1333 hdr->msg.get.ptl_index = le32_to_cpu(hdr->msg.get.ptl_index);
1334 hdr->msg.get.sink_length = le32_to_cpu(hdr->msg.get.sink_length);
1335 hdr->msg.get.src_offset = le32_to_cpu(hdr->msg.get.src_offset);
1336
1337 info.mi_id.nid = hdr->src_nid;
1338 info.mi_id.pid = hdr->src_pid;
1339 info.mi_opc = LNET_MD_OP_GET;
1340 info.mi_portal = hdr->msg.get.ptl_index;
1341 info.mi_rlength = hdr->msg.get.sink_length;
1342 info.mi_roffset = hdr->msg.get.src_offset;
1343 info.mi_mbits = hdr->msg.get.match_bits;
1344
1345 rc = lnet_ptl_match_md(&info, msg);
1346 if (rc == LNET_MATCHMD_DROP) {
1347 CNETERR("Dropping GET from %s portal %d match %llu offset %d length %d\n",
1348 libcfs_id2str(info.mi_id), info.mi_portal,
1349 info.mi_mbits, info.mi_roffset, info.mi_rlength);
1350 return -ENOENT; /* -ve: OK but no match */
1351 }
1352
1353 LASSERT(rc == LNET_MATCHMD_OK);
1354
1355 lnet_build_msg_event(msg, LNET_EVENT_GET);
1356
1357 reply_wmd = hdr->msg.get.return_wmd;
1358
1359 lnet_prep_send(msg, LNET_MSG_REPLY, info.mi_id,
1360 msg->msg_offset, msg->msg_wanted);
1361
1362 msg->msg_hdr.msg.reply.dst_wmd = reply_wmd;
1363
1364 if (rdma_get) {
1365 /* The LND completes the REPLY from her recv procedure */
1366 lnet_ni_recv(ni, msg->msg_private, msg, 0,
1367 msg->msg_offset, msg->msg_len, msg->msg_len);
1368 return 0;
1369 }
1370
1371 lnet_ni_recv(ni, msg->msg_private, NULL, 0, 0, 0, 0);
1372 msg->msg_receiving = 0;
1373
1374 rc = lnet_send(ni->ni_nid, msg, LNET_NID_ANY);
1375 if (rc < 0) {
1376 /* didn't get as far as lnet_ni_send() */
1377 CERROR("%s: Unable to send REPLY for GET from %s: %d\n",
1378 libcfs_nid2str(ni->ni_nid),
1379 libcfs_id2str(info.mi_id), rc);
1380
1381 lnet_finalize(ni, msg, rc);
1382 }
1383
1384 return 0;
1385 }
1386
1387 static int
lnet_parse_reply(lnet_ni_t * ni,lnet_msg_t * msg)1388 lnet_parse_reply(lnet_ni_t *ni, lnet_msg_t *msg)
1389 {
1390 void *private = msg->msg_private;
1391 lnet_hdr_t *hdr = &msg->msg_hdr;
1392 lnet_process_id_t src = {0};
1393 lnet_libmd_t *md;
1394 int rlength;
1395 int mlength;
1396 int cpt;
1397
1398 cpt = lnet_cpt_of_cookie(hdr->msg.reply.dst_wmd.wh_object_cookie);
1399 lnet_res_lock(cpt);
1400
1401 src.nid = hdr->src_nid;
1402 src.pid = hdr->src_pid;
1403
1404 /* NB handles only looked up by creator (no flips) */
1405 md = lnet_wire_handle2md(&hdr->msg.reply.dst_wmd);
1406 if (!md || !md->md_threshold || md->md_me) {
1407 CNETERR("%s: Dropping REPLY from %s for %s MD %#llx.%#llx\n",
1408 libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
1409 !md ? "invalid" : "inactive",
1410 hdr->msg.reply.dst_wmd.wh_interface_cookie,
1411 hdr->msg.reply.dst_wmd.wh_object_cookie);
1412 if (md && md->md_me)
1413 CERROR("REPLY MD also attached to portal %d\n",
1414 md->md_me->me_portal);
1415
1416 lnet_res_unlock(cpt);
1417 return -ENOENT; /* -ve: OK but no match */
1418 }
1419
1420 LASSERT(!md->md_offset);
1421
1422 rlength = hdr->payload_length;
1423 mlength = min_t(uint, rlength, md->md_length);
1424
1425 if (mlength < rlength &&
1426 !(md->md_options & LNET_MD_TRUNCATE)) {
1427 CNETERR("%s: Dropping REPLY from %s length %d for MD %#llx would overflow (%d)\n",
1428 libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
1429 rlength, hdr->msg.reply.dst_wmd.wh_object_cookie,
1430 mlength);
1431 lnet_res_unlock(cpt);
1432 return -ENOENT; /* -ve: OK but no match */
1433 }
1434
1435 CDEBUG(D_NET, "%s: Reply from %s of length %d/%d into md %#llx\n",
1436 libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
1437 mlength, rlength, hdr->msg.reply.dst_wmd.wh_object_cookie);
1438
1439 lnet_msg_attach_md(msg, md, 0, mlength);
1440
1441 if (mlength)
1442 lnet_setpayloadbuffer(msg);
1443
1444 lnet_res_unlock(cpt);
1445
1446 lnet_build_msg_event(msg, LNET_EVENT_REPLY);
1447
1448 lnet_ni_recv(ni, private, msg, 0, 0, mlength, rlength);
1449 return 0;
1450 }
1451
1452 static int
lnet_parse_ack(lnet_ni_t * ni,lnet_msg_t * msg)1453 lnet_parse_ack(lnet_ni_t *ni, lnet_msg_t *msg)
1454 {
1455 lnet_hdr_t *hdr = &msg->msg_hdr;
1456 lnet_process_id_t src = {0};
1457 lnet_libmd_t *md;
1458 int cpt;
1459
1460 src.nid = hdr->src_nid;
1461 src.pid = hdr->src_pid;
1462
1463 /* Convert ack fields to host byte order */
1464 hdr->msg.ack.match_bits = le64_to_cpu(hdr->msg.ack.match_bits);
1465 hdr->msg.ack.mlength = le32_to_cpu(hdr->msg.ack.mlength);
1466
1467 cpt = lnet_cpt_of_cookie(hdr->msg.ack.dst_wmd.wh_object_cookie);
1468 lnet_res_lock(cpt);
1469
1470 /* NB handles only looked up by creator (no flips) */
1471 md = lnet_wire_handle2md(&hdr->msg.ack.dst_wmd);
1472 if (!md || !md->md_threshold || md->md_me) {
1473 /* Don't moan; this is expected */
1474 CDEBUG(D_NET,
1475 "%s: Dropping ACK from %s to %s MD %#llx.%#llx\n",
1476 libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
1477 !md ? "invalid" : "inactive",
1478 hdr->msg.ack.dst_wmd.wh_interface_cookie,
1479 hdr->msg.ack.dst_wmd.wh_object_cookie);
1480 if (md && md->md_me)
1481 CERROR("Source MD also attached to portal %d\n",
1482 md->md_me->me_portal);
1483
1484 lnet_res_unlock(cpt);
1485 return -ENOENT; /* -ve! */
1486 }
1487
1488 CDEBUG(D_NET, "%s: ACK from %s into md %#llx\n",
1489 libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
1490 hdr->msg.ack.dst_wmd.wh_object_cookie);
1491
1492 lnet_msg_attach_md(msg, md, 0, 0);
1493
1494 lnet_res_unlock(cpt);
1495
1496 lnet_build_msg_event(msg, LNET_EVENT_ACK);
1497
1498 lnet_ni_recv(ni, msg->msg_private, msg, 0, 0, 0, msg->msg_len);
1499 return 0;
1500 }
1501
1502 /**
1503 * \retval LNET_CREDIT_OK If \a msg is forwarded
1504 * \retval LNET_CREDIT_WAIT If \a msg is blocked because w/o buffer
1505 * \retval -ve error code
1506 */
1507 int
lnet_parse_forward_locked(lnet_ni_t * ni,lnet_msg_t * msg)1508 lnet_parse_forward_locked(lnet_ni_t *ni, lnet_msg_t *msg)
1509 {
1510 int rc = 0;
1511
1512 if (!the_lnet.ln_routing)
1513 return -ECANCELED;
1514
1515 if (msg->msg_rxpeer->lp_rtrcredits <= 0 ||
1516 lnet_msg2bufpool(msg)->rbp_credits <= 0) {
1517 if (!ni->ni_lnd->lnd_eager_recv) {
1518 msg->msg_rx_ready_delay = 1;
1519 } else {
1520 lnet_net_unlock(msg->msg_rx_cpt);
1521 rc = lnet_ni_eager_recv(ni, msg);
1522 lnet_net_lock(msg->msg_rx_cpt);
1523 }
1524 }
1525
1526 if (!rc)
1527 rc = lnet_post_routed_recv_locked(msg, 0);
1528 return rc;
1529 }
1530
1531 int
lnet_parse_local(lnet_ni_t * ni,lnet_msg_t * msg)1532 lnet_parse_local(lnet_ni_t *ni, lnet_msg_t *msg)
1533 {
1534 int rc;
1535
1536 switch (msg->msg_type) {
1537 case LNET_MSG_ACK:
1538 rc = lnet_parse_ack(ni, msg);
1539 break;
1540 case LNET_MSG_PUT:
1541 rc = lnet_parse_put(ni, msg);
1542 break;
1543 case LNET_MSG_GET:
1544 rc = lnet_parse_get(ni, msg, msg->msg_rdma_get);
1545 break;
1546 case LNET_MSG_REPLY:
1547 rc = lnet_parse_reply(ni, msg);
1548 break;
1549 default: /* prevent an unused label if !kernel */
1550 LASSERT(0);
1551 return -EPROTO;
1552 }
1553
1554 LASSERT(!rc || rc == -ENOENT);
1555 return rc;
1556 }
1557
1558 char *
lnet_msgtyp2str(int type)1559 lnet_msgtyp2str(int type)
1560 {
1561 switch (type) {
1562 case LNET_MSG_ACK:
1563 return "ACK";
1564 case LNET_MSG_PUT:
1565 return "PUT";
1566 case LNET_MSG_GET:
1567 return "GET";
1568 case LNET_MSG_REPLY:
1569 return "REPLY";
1570 case LNET_MSG_HELLO:
1571 return "HELLO";
1572 default:
1573 return "<UNKNOWN>";
1574 }
1575 }
1576
1577 void
lnet_print_hdr(lnet_hdr_t * hdr)1578 lnet_print_hdr(lnet_hdr_t *hdr)
1579 {
1580 lnet_process_id_t src = {0};
1581 lnet_process_id_t dst = {0};
1582 char *type_str = lnet_msgtyp2str(hdr->type);
1583
1584 src.nid = hdr->src_nid;
1585 src.pid = hdr->src_pid;
1586
1587 dst.nid = hdr->dest_nid;
1588 dst.pid = hdr->dest_pid;
1589
1590 CWARN("P3 Header at %p of type %s\n", hdr, type_str);
1591 CWARN(" From %s\n", libcfs_id2str(src));
1592 CWARN(" To %s\n", libcfs_id2str(dst));
1593
1594 switch (hdr->type) {
1595 default:
1596 break;
1597
1598 case LNET_MSG_PUT:
1599 CWARN(" Ptl index %d, ack md %#llx.%#llx, match bits %llu\n",
1600 hdr->msg.put.ptl_index,
1601 hdr->msg.put.ack_wmd.wh_interface_cookie,
1602 hdr->msg.put.ack_wmd.wh_object_cookie,
1603 hdr->msg.put.match_bits);
1604 CWARN(" Length %d, offset %d, hdr data %#llx\n",
1605 hdr->payload_length, hdr->msg.put.offset,
1606 hdr->msg.put.hdr_data);
1607 break;
1608
1609 case LNET_MSG_GET:
1610 CWARN(" Ptl index %d, return md %#llx.%#llx, match bits %llu\n",
1611 hdr->msg.get.ptl_index,
1612 hdr->msg.get.return_wmd.wh_interface_cookie,
1613 hdr->msg.get.return_wmd.wh_object_cookie,
1614 hdr->msg.get.match_bits);
1615 CWARN(" Length %d, src offset %d\n",
1616 hdr->msg.get.sink_length,
1617 hdr->msg.get.src_offset);
1618 break;
1619
1620 case LNET_MSG_ACK:
1621 CWARN(" dst md %#llx.%#llx, manipulated length %d\n",
1622 hdr->msg.ack.dst_wmd.wh_interface_cookie,
1623 hdr->msg.ack.dst_wmd.wh_object_cookie,
1624 hdr->msg.ack.mlength);
1625 break;
1626
1627 case LNET_MSG_REPLY:
1628 CWARN(" dst md %#llx.%#llx, length %d\n",
1629 hdr->msg.reply.dst_wmd.wh_interface_cookie,
1630 hdr->msg.reply.dst_wmd.wh_object_cookie,
1631 hdr->payload_length);
1632 }
1633 }
1634
1635 int
lnet_parse(lnet_ni_t * ni,lnet_hdr_t * hdr,lnet_nid_t from_nid,void * private,int rdma_req)1636 lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid,
1637 void *private, int rdma_req)
1638 {
1639 int rc = 0;
1640 int cpt;
1641 int for_me;
1642 struct lnet_msg *msg;
1643 lnet_pid_t dest_pid;
1644 lnet_nid_t dest_nid;
1645 lnet_nid_t src_nid;
1646 __u32 payload_length;
1647 __u32 type;
1648
1649 LASSERT(!in_interrupt());
1650
1651 type = le32_to_cpu(hdr->type);
1652 src_nid = le64_to_cpu(hdr->src_nid);
1653 dest_nid = le64_to_cpu(hdr->dest_nid);
1654 dest_pid = le32_to_cpu(hdr->dest_pid);
1655 payload_length = le32_to_cpu(hdr->payload_length);
1656
1657 for_me = (ni->ni_nid == dest_nid);
1658 cpt = lnet_cpt_of_nid(from_nid);
1659
1660 switch (type) {
1661 case LNET_MSG_ACK:
1662 case LNET_MSG_GET:
1663 if (payload_length > 0) {
1664 CERROR("%s, src %s: bad %s payload %d (0 expected)\n",
1665 libcfs_nid2str(from_nid),
1666 libcfs_nid2str(src_nid),
1667 lnet_msgtyp2str(type), payload_length);
1668 return -EPROTO;
1669 }
1670 break;
1671
1672 case LNET_MSG_PUT:
1673 case LNET_MSG_REPLY:
1674 if (payload_length >
1675 (__u32)(for_me ? LNET_MAX_PAYLOAD : LNET_MTU)) {
1676 CERROR("%s, src %s: bad %s payload %d (%d max expected)\n",
1677 libcfs_nid2str(from_nid),
1678 libcfs_nid2str(src_nid),
1679 lnet_msgtyp2str(type),
1680 payload_length,
1681 for_me ? LNET_MAX_PAYLOAD : LNET_MTU);
1682 return -EPROTO;
1683 }
1684 break;
1685
1686 default:
1687 CERROR("%s, src %s: Bad message type 0x%x\n",
1688 libcfs_nid2str(from_nid),
1689 libcfs_nid2str(src_nid), type);
1690 return -EPROTO;
1691 }
1692
1693 if (the_lnet.ln_routing &&
1694 ni->ni_last_alive != ktime_get_real_seconds()) {
1695 /* NB: so far here is the only place to set NI status to "up */
1696 lnet_ni_lock(ni);
1697 ni->ni_last_alive = ktime_get_real_seconds();
1698 if (ni->ni_status &&
1699 ni->ni_status->ns_status == LNET_NI_STATUS_DOWN)
1700 ni->ni_status->ns_status = LNET_NI_STATUS_UP;
1701 lnet_ni_unlock(ni);
1702 }
1703
1704 /*
1705 * Regard a bad destination NID as a protocol error. Senders should
1706 * know what they're doing; if they don't they're misconfigured, buggy
1707 * or malicious so we chop them off at the knees :)
1708 */
1709 if (!for_me) {
1710 if (LNET_NIDNET(dest_nid) == LNET_NIDNET(ni->ni_nid)) {
1711 /* should have gone direct */
1712 CERROR("%s, src %s: Bad dest nid %s (should have been sent direct)\n",
1713 libcfs_nid2str(from_nid),
1714 libcfs_nid2str(src_nid),
1715 libcfs_nid2str(dest_nid));
1716 return -EPROTO;
1717 }
1718
1719 if (lnet_islocalnid(dest_nid)) {
1720 /*
1721 * dest is another local NI; sender should have used
1722 * this node's NID on its own network
1723 */
1724 CERROR("%s, src %s: Bad dest nid %s (it's my nid but on a different network)\n",
1725 libcfs_nid2str(from_nid),
1726 libcfs_nid2str(src_nid),
1727 libcfs_nid2str(dest_nid));
1728 return -EPROTO;
1729 }
1730
1731 if (rdma_req && type == LNET_MSG_GET) {
1732 CERROR("%s, src %s: Bad optimized GET for %s (final destination must be me)\n",
1733 libcfs_nid2str(from_nid),
1734 libcfs_nid2str(src_nid),
1735 libcfs_nid2str(dest_nid));
1736 return -EPROTO;
1737 }
1738
1739 if (!the_lnet.ln_routing) {
1740 CERROR("%s, src %s: Dropping message for %s (routing not enabled)\n",
1741 libcfs_nid2str(from_nid),
1742 libcfs_nid2str(src_nid),
1743 libcfs_nid2str(dest_nid));
1744 goto drop;
1745 }
1746 }
1747
1748 /*
1749 * Message looks OK; we're not going to return an error, so we MUST
1750 * call back lnd_recv() come what may...
1751 */
1752 if (!list_empty(&the_lnet.ln_test_peers) && /* normally we don't */
1753 fail_peer(src_nid, 0)) { /* shall we now? */
1754 CERROR("%s, src %s: Dropping %s to simulate failure\n",
1755 libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
1756 lnet_msgtyp2str(type));
1757 goto drop;
1758 }
1759
1760 if (!list_empty(&the_lnet.ln_drop_rules) &&
1761 lnet_drop_rule_match(hdr)) {
1762 CDEBUG(D_NET, "%s, src %s, dst %s: Dropping %s to simulate silent message loss\n",
1763 libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
1764 libcfs_nid2str(dest_nid), lnet_msgtyp2str(type));
1765 goto drop;
1766 }
1767
1768 msg = lnet_msg_alloc();
1769 if (!msg) {
1770 CERROR("%s, src %s: Dropping %s (out of memory)\n",
1771 libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
1772 lnet_msgtyp2str(type));
1773 goto drop;
1774 }
1775
1776 /* msg zeroed in lnet_msg_alloc;
1777 * i.e. flags all clear, pointers NULL etc
1778 */
1779 msg->msg_type = type;
1780 msg->msg_private = private;
1781 msg->msg_receiving = 1;
1782 msg->msg_rdma_get = rdma_req;
1783 msg->msg_wanted = payload_length;
1784 msg->msg_len = payload_length;
1785 msg->msg_offset = 0;
1786 msg->msg_hdr = *hdr;
1787 /* for building message event */
1788 msg->msg_from = from_nid;
1789 if (!for_me) {
1790 msg->msg_target.pid = dest_pid;
1791 msg->msg_target.nid = dest_nid;
1792 msg->msg_routing = 1;
1793
1794 } else {
1795 /* convert common msg->hdr fields to host byteorder */
1796 msg->msg_hdr.type = type;
1797 msg->msg_hdr.src_nid = src_nid;
1798 msg->msg_hdr.src_pid = le32_to_cpu(msg->msg_hdr.src_pid);
1799 msg->msg_hdr.dest_nid = dest_nid;
1800 msg->msg_hdr.dest_pid = dest_pid;
1801 msg->msg_hdr.payload_length = payload_length;
1802 }
1803
1804 lnet_net_lock(cpt);
1805 rc = lnet_nid2peer_locked(&msg->msg_rxpeer, from_nid, cpt);
1806 if (rc) {
1807 lnet_net_unlock(cpt);
1808 CERROR("%s, src %s: Dropping %s (error %d looking up sender)\n",
1809 libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
1810 lnet_msgtyp2str(type), rc);
1811 lnet_msg_free(msg);
1812 if (rc == -ESHUTDOWN)
1813 /* We are shutting down. Don't do anything more */
1814 return 0;
1815 goto drop;
1816 }
1817
1818 if (lnet_isrouter(msg->msg_rxpeer)) {
1819 lnet_peer_set_alive(msg->msg_rxpeer);
1820 if (avoid_asym_router_failure &&
1821 LNET_NIDNET(src_nid) != LNET_NIDNET(from_nid)) {
1822 /* received a remote message from router, update
1823 * remote NI status on this router.
1824 * NB: multi-hop routed message will be ignored.
1825 */
1826 lnet_router_ni_update_locked(msg->msg_rxpeer,
1827 LNET_NIDNET(src_nid));
1828 }
1829 }
1830
1831 lnet_msg_commit(msg, cpt);
1832
1833 /* message delay simulation */
1834 if (unlikely(!list_empty(&the_lnet.ln_delay_rules) &&
1835 lnet_delay_rule_match_locked(hdr, msg))) {
1836 lnet_net_unlock(cpt);
1837 return 0;
1838 }
1839
1840 if (!for_me) {
1841 rc = lnet_parse_forward_locked(ni, msg);
1842 lnet_net_unlock(cpt);
1843
1844 if (rc < 0)
1845 goto free_drop;
1846
1847 if (rc == LNET_CREDIT_OK) {
1848 lnet_ni_recv(ni, msg->msg_private, msg, 0,
1849 0, payload_length, payload_length);
1850 }
1851 return 0;
1852 }
1853
1854 lnet_net_unlock(cpt);
1855
1856 rc = lnet_parse_local(ni, msg);
1857 if (rc)
1858 goto free_drop;
1859 return 0;
1860
1861 free_drop:
1862 LASSERT(!msg->msg_md);
1863 lnet_finalize(ni, msg, rc);
1864
1865 drop:
1866 lnet_drop_message(ni, cpt, private, payload_length);
1867 return 0;
1868 }
1869 EXPORT_SYMBOL(lnet_parse);
1870
1871 void
lnet_drop_delayed_msg_list(struct list_head * head,char * reason)1872 lnet_drop_delayed_msg_list(struct list_head *head, char *reason)
1873 {
1874 while (!list_empty(head)) {
1875 lnet_process_id_t id = {0};
1876 lnet_msg_t *msg;
1877
1878 msg = list_entry(head->next, lnet_msg_t, msg_list);
1879 list_del(&msg->msg_list);
1880
1881 id.nid = msg->msg_hdr.src_nid;
1882 id.pid = msg->msg_hdr.src_pid;
1883
1884 LASSERT(!msg->msg_md);
1885 LASSERT(msg->msg_rx_delayed);
1886 LASSERT(msg->msg_rxpeer);
1887 LASSERT(msg->msg_hdr.type == LNET_MSG_PUT);
1888
1889 CWARN("Dropping delayed PUT from %s portal %d match %llu offset %d length %d: %s\n",
1890 libcfs_id2str(id),
1891 msg->msg_hdr.msg.put.ptl_index,
1892 msg->msg_hdr.msg.put.match_bits,
1893 msg->msg_hdr.msg.put.offset,
1894 msg->msg_hdr.payload_length, reason);
1895
1896 /*
1897 * NB I can't drop msg's ref on msg_rxpeer until after I've
1898 * called lnet_drop_message(), so I just hang onto msg as well
1899 * until that's done
1900 */
1901 lnet_drop_message(msg->msg_rxpeer->lp_ni,
1902 msg->msg_rxpeer->lp_cpt,
1903 msg->msg_private, msg->msg_len);
1904 /*
1905 * NB: message will not generate event because w/o attached MD,
1906 * but we still should give error code so lnet_msg_decommit()
1907 * can skip counters operations and other checks.
1908 */
1909 lnet_finalize(msg->msg_rxpeer->lp_ni, msg, -ENOENT);
1910 }
1911 }
1912
1913 void
lnet_recv_delayed_msg_list(struct list_head * head)1914 lnet_recv_delayed_msg_list(struct list_head *head)
1915 {
1916 while (!list_empty(head)) {
1917 lnet_msg_t *msg;
1918 lnet_process_id_t id;
1919
1920 msg = list_entry(head->next, lnet_msg_t, msg_list);
1921 list_del(&msg->msg_list);
1922
1923 /*
1924 * md won't disappear under me, since each msg
1925 * holds a ref on it
1926 */
1927 id.nid = msg->msg_hdr.src_nid;
1928 id.pid = msg->msg_hdr.src_pid;
1929
1930 LASSERT(msg->msg_rx_delayed);
1931 LASSERT(msg->msg_md);
1932 LASSERT(msg->msg_rxpeer);
1933 LASSERT(msg->msg_hdr.type == LNET_MSG_PUT);
1934
1935 CDEBUG(D_NET, "Resuming delayed PUT from %s portal %d match %llu offset %d length %d.\n",
1936 libcfs_id2str(id), msg->msg_hdr.msg.put.ptl_index,
1937 msg->msg_hdr.msg.put.match_bits,
1938 msg->msg_hdr.msg.put.offset,
1939 msg->msg_hdr.payload_length);
1940
1941 lnet_recv_put(msg->msg_rxpeer->lp_ni, msg);
1942 }
1943 }
1944
1945 /**
1946 * Initiate an asynchronous PUT operation.
1947 *
1948 * There are several events associated with a PUT: completion of the send on
1949 * the initiator node (LNET_EVENT_SEND), and when the send completes
1950 * successfully, the receipt of an acknowledgment (LNET_EVENT_ACK) indicating
1951 * that the operation was accepted by the target. The event LNET_EVENT_PUT is
1952 * used at the target node to indicate the completion of incoming data
1953 * delivery.
1954 *
1955 * The local events will be logged in the EQ associated with the MD pointed to
1956 * by \a mdh handle. Using a MD without an associated EQ results in these
1957 * events being discarded. In this case, the caller must have another
1958 * mechanism (e.g., a higher level protocol) for determining when it is safe
1959 * to modify the memory region associated with the MD.
1960 *
1961 * Note that LNet does not guarantee the order of LNET_EVENT_SEND and
1962 * LNET_EVENT_ACK, though intuitively ACK should happen after SEND.
1963 *
1964 * \param self Indicates the NID of a local interface through which to send
1965 * the PUT request. Use LNET_NID_ANY to let LNet choose one by itself.
1966 * \param mdh A handle for the MD that describes the memory to be sent. The MD
1967 * must be "free floating" (See LNetMDBind()).
1968 * \param ack Controls whether an acknowledgment is requested.
1969 * Acknowledgments are only sent when they are requested by the initiating
1970 * process and the target MD enables them.
1971 * \param target A process identifier for the target process.
1972 * \param portal The index in the \a target's portal table.
1973 * \param match_bits The match bits to use for MD selection at the target
1974 * process.
1975 * \param offset The offset into the target MD (only used when the target
1976 * MD has the LNET_MD_MANAGE_REMOTE option set).
1977 * \param hdr_data 64 bits of user data that can be included in the message
1978 * header. This data is written to an event queue entry at the target if an
1979 * EQ is present on the matching MD.
1980 *
1981 * \retval 0 Success, and only in this case events will be generated
1982 * and logged to EQ (if it exists).
1983 * \retval -EIO Simulated failure.
1984 * \retval -ENOMEM Memory allocation failure.
1985 * \retval -ENOENT Invalid MD object.
1986 *
1987 * \see lnet_event_t::hdr_data and lnet_event_kind_t.
1988 */
1989 int
LNetPut(lnet_nid_t self,lnet_handle_md_t mdh,lnet_ack_req_t ack,lnet_process_id_t target,unsigned int portal,__u64 match_bits,unsigned int offset,__u64 hdr_data)1990 LNetPut(lnet_nid_t self, lnet_handle_md_t mdh, lnet_ack_req_t ack,
1991 lnet_process_id_t target, unsigned int portal,
1992 __u64 match_bits, unsigned int offset,
1993 __u64 hdr_data)
1994 {
1995 struct lnet_msg *msg;
1996 struct lnet_libmd *md;
1997 int cpt;
1998 int rc;
1999
2000 LASSERT(the_lnet.ln_refcount > 0);
2001
2002 if (!list_empty(&the_lnet.ln_test_peers) && /* normally we don't */
2003 fail_peer(target.nid, 1)) { /* shall we now? */
2004 CERROR("Dropping PUT to %s: simulated failure\n",
2005 libcfs_id2str(target));
2006 return -EIO;
2007 }
2008
2009 msg = lnet_msg_alloc();
2010 if (!msg) {
2011 CERROR("Dropping PUT to %s: ENOMEM on lnet_msg_t\n",
2012 libcfs_id2str(target));
2013 return -ENOMEM;
2014 }
2015 msg->msg_vmflush = !!memory_pressure_get();
2016
2017 cpt = lnet_cpt_of_cookie(mdh.cookie);
2018 lnet_res_lock(cpt);
2019
2020 md = lnet_handle2md(&mdh);
2021 if (!md || !md->md_threshold || md->md_me) {
2022 CERROR("Dropping PUT (%llu:%d:%s): MD (%d) invalid\n",
2023 match_bits, portal, libcfs_id2str(target),
2024 !md ? -1 : md->md_threshold);
2025 if (md && md->md_me)
2026 CERROR("Source MD also attached to portal %d\n",
2027 md->md_me->me_portal);
2028 lnet_res_unlock(cpt);
2029
2030 lnet_msg_free(msg);
2031 return -ENOENT;
2032 }
2033
2034 CDEBUG(D_NET, "LNetPut -> %s\n", libcfs_id2str(target));
2035
2036 lnet_msg_attach_md(msg, md, 0, 0);
2037
2038 lnet_prep_send(msg, LNET_MSG_PUT, target, 0, md->md_length);
2039
2040 msg->msg_hdr.msg.put.match_bits = cpu_to_le64(match_bits);
2041 msg->msg_hdr.msg.put.ptl_index = cpu_to_le32(portal);
2042 msg->msg_hdr.msg.put.offset = cpu_to_le32(offset);
2043 msg->msg_hdr.msg.put.hdr_data = hdr_data;
2044
2045 /* NB handles only looked up by creator (no flips) */
2046 if (ack == LNET_ACK_REQ) {
2047 msg->msg_hdr.msg.put.ack_wmd.wh_interface_cookie =
2048 the_lnet.ln_interface_cookie;
2049 msg->msg_hdr.msg.put.ack_wmd.wh_object_cookie =
2050 md->md_lh.lh_cookie;
2051 } else {
2052 msg->msg_hdr.msg.put.ack_wmd.wh_interface_cookie =
2053 LNET_WIRE_HANDLE_COOKIE_NONE;
2054 msg->msg_hdr.msg.put.ack_wmd.wh_object_cookie =
2055 LNET_WIRE_HANDLE_COOKIE_NONE;
2056 }
2057
2058 lnet_res_unlock(cpt);
2059
2060 lnet_build_msg_event(msg, LNET_EVENT_SEND);
2061
2062 rc = lnet_send(self, msg, LNET_NID_ANY);
2063 if (rc) {
2064 CNETERR("Error sending PUT to %s: %d\n",
2065 libcfs_id2str(target), rc);
2066 lnet_finalize(NULL, msg, rc);
2067 }
2068
2069 /* completion will be signalled by an event */
2070 return 0;
2071 }
2072 EXPORT_SYMBOL(LNetPut);
2073
2074 lnet_msg_t *
lnet_create_reply_msg(lnet_ni_t * ni,lnet_msg_t * getmsg)2075 lnet_create_reply_msg(lnet_ni_t *ni, lnet_msg_t *getmsg)
2076 {
2077 /*
2078 * The LND can DMA direct to the GET md (i.e. no REPLY msg). This
2079 * returns a msg for the LND to pass to lnet_finalize() when the sink
2080 * data has been received.
2081 *
2082 * CAVEAT EMPTOR: 'getmsg' is the original GET, which is freed when
2083 * lnet_finalize() is called on it, so the LND must call this first
2084 */
2085 struct lnet_msg *msg = lnet_msg_alloc();
2086 struct lnet_libmd *getmd = getmsg->msg_md;
2087 lnet_process_id_t peer_id = getmsg->msg_target;
2088 int cpt;
2089
2090 LASSERT(!getmsg->msg_target_is_router);
2091 LASSERT(!getmsg->msg_routing);
2092
2093 if (!msg) {
2094 CERROR("%s: Dropping REPLY from %s: can't allocate msg\n",
2095 libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id));
2096 goto drop;
2097 }
2098
2099 cpt = lnet_cpt_of_cookie(getmd->md_lh.lh_cookie);
2100 lnet_res_lock(cpt);
2101
2102 LASSERT(getmd->md_refcount > 0);
2103
2104 if (!getmd->md_threshold) {
2105 CERROR("%s: Dropping REPLY from %s for inactive MD %p\n",
2106 libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id),
2107 getmd);
2108 lnet_res_unlock(cpt);
2109 goto drop;
2110 }
2111
2112 LASSERT(!getmd->md_offset);
2113
2114 CDEBUG(D_NET, "%s: Reply from %s md %p\n",
2115 libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id), getmd);
2116
2117 /* setup information for lnet_build_msg_event */
2118 msg->msg_from = peer_id.nid;
2119 msg->msg_type = LNET_MSG_GET; /* flag this msg as an "optimized" GET */
2120 msg->msg_hdr.src_nid = peer_id.nid;
2121 msg->msg_hdr.payload_length = getmd->md_length;
2122 msg->msg_receiving = 1; /* required by lnet_msg_attach_md */
2123
2124 lnet_msg_attach_md(msg, getmd, getmd->md_offset, getmd->md_length);
2125 lnet_res_unlock(cpt);
2126
2127 cpt = lnet_cpt_of_nid(peer_id.nid);
2128
2129 lnet_net_lock(cpt);
2130 lnet_msg_commit(msg, cpt);
2131 lnet_net_unlock(cpt);
2132
2133 lnet_build_msg_event(msg, LNET_EVENT_REPLY);
2134
2135 return msg;
2136
2137 drop:
2138 cpt = lnet_cpt_of_nid(peer_id.nid);
2139
2140 lnet_net_lock(cpt);
2141 the_lnet.ln_counters[cpt]->drop_count++;
2142 the_lnet.ln_counters[cpt]->drop_length += getmd->md_length;
2143 lnet_net_unlock(cpt);
2144
2145 if (msg)
2146 lnet_msg_free(msg);
2147
2148 return NULL;
2149 }
2150 EXPORT_SYMBOL(lnet_create_reply_msg);
2151
2152 void
lnet_set_reply_msg_len(lnet_ni_t * ni,lnet_msg_t * reply,unsigned int len)2153 lnet_set_reply_msg_len(lnet_ni_t *ni, lnet_msg_t *reply, unsigned int len)
2154 {
2155 /*
2156 * Set the REPLY length, now the RDMA that elides the REPLY message has
2157 * completed and I know it.
2158 */
2159 LASSERT(reply);
2160 LASSERT(reply->msg_type == LNET_MSG_GET);
2161 LASSERT(reply->msg_ev.type == LNET_EVENT_REPLY);
2162
2163 /*
2164 * NB I trusted my peer to RDMA. If she tells me she's written beyond
2165 * the end of my buffer, I might as well be dead.
2166 */
2167 LASSERT(len <= reply->msg_ev.mlength);
2168
2169 reply->msg_ev.mlength = len;
2170 }
2171 EXPORT_SYMBOL(lnet_set_reply_msg_len);
2172
2173 /**
2174 * Initiate an asynchronous GET operation.
2175 *
2176 * On the initiator node, an LNET_EVENT_SEND is logged when the GET request
2177 * is sent, and an LNET_EVENT_REPLY is logged when the data returned from
2178 * the target node in the REPLY has been written to local MD.
2179 *
2180 * On the target node, an LNET_EVENT_GET is logged when the GET request
2181 * arrives and is accepted into a MD.
2182 *
2183 * \param self,target,portal,match_bits,offset See the discussion in LNetPut().
2184 * \param mdh A handle for the MD that describes the memory into which the
2185 * requested data will be received. The MD must be "free floating"
2186 * (See LNetMDBind()).
2187 *
2188 * \retval 0 Success, and only in this case events will be generated
2189 * and logged to EQ (if it exists) of the MD.
2190 * \retval -EIO Simulated failure.
2191 * \retval -ENOMEM Memory allocation failure.
2192 * \retval -ENOENT Invalid MD object.
2193 */
2194 int
LNetGet(lnet_nid_t self,lnet_handle_md_t mdh,lnet_process_id_t target,unsigned int portal,__u64 match_bits,unsigned int offset)2195 LNetGet(lnet_nid_t self, lnet_handle_md_t mdh,
2196 lnet_process_id_t target, unsigned int portal,
2197 __u64 match_bits, unsigned int offset)
2198 {
2199 struct lnet_msg *msg;
2200 struct lnet_libmd *md;
2201 int cpt;
2202 int rc;
2203
2204 LASSERT(the_lnet.ln_refcount > 0);
2205
2206 if (!list_empty(&the_lnet.ln_test_peers) && /* normally we don't */
2207 fail_peer(target.nid, 1)) { /* shall we now? */
2208 CERROR("Dropping GET to %s: simulated failure\n",
2209 libcfs_id2str(target));
2210 return -EIO;
2211 }
2212
2213 msg = lnet_msg_alloc();
2214 if (!msg) {
2215 CERROR("Dropping GET to %s: ENOMEM on lnet_msg_t\n",
2216 libcfs_id2str(target));
2217 return -ENOMEM;
2218 }
2219
2220 cpt = lnet_cpt_of_cookie(mdh.cookie);
2221 lnet_res_lock(cpt);
2222
2223 md = lnet_handle2md(&mdh);
2224 if (!md || !md->md_threshold || md->md_me) {
2225 CERROR("Dropping GET (%llu:%d:%s): MD (%d) invalid\n",
2226 match_bits, portal, libcfs_id2str(target),
2227 !md ? -1 : md->md_threshold);
2228 if (md && md->md_me)
2229 CERROR("REPLY MD also attached to portal %d\n",
2230 md->md_me->me_portal);
2231
2232 lnet_res_unlock(cpt);
2233
2234 lnet_msg_free(msg);
2235 return -ENOENT;
2236 }
2237
2238 CDEBUG(D_NET, "LNetGet -> %s\n", libcfs_id2str(target));
2239
2240 lnet_msg_attach_md(msg, md, 0, 0);
2241
2242 lnet_prep_send(msg, LNET_MSG_GET, target, 0, 0);
2243
2244 msg->msg_hdr.msg.get.match_bits = cpu_to_le64(match_bits);
2245 msg->msg_hdr.msg.get.ptl_index = cpu_to_le32(portal);
2246 msg->msg_hdr.msg.get.src_offset = cpu_to_le32(offset);
2247 msg->msg_hdr.msg.get.sink_length = cpu_to_le32(md->md_length);
2248
2249 /* NB handles only looked up by creator (no flips) */
2250 msg->msg_hdr.msg.get.return_wmd.wh_interface_cookie =
2251 the_lnet.ln_interface_cookie;
2252 msg->msg_hdr.msg.get.return_wmd.wh_object_cookie =
2253 md->md_lh.lh_cookie;
2254
2255 lnet_res_unlock(cpt);
2256
2257 lnet_build_msg_event(msg, LNET_EVENT_SEND);
2258
2259 rc = lnet_send(self, msg, LNET_NID_ANY);
2260 if (rc < 0) {
2261 CNETERR("Error sending GET to %s: %d\n",
2262 libcfs_id2str(target), rc);
2263 lnet_finalize(NULL, msg, rc);
2264 }
2265
2266 /* completion will be signalled by an event */
2267 return 0;
2268 }
2269 EXPORT_SYMBOL(LNetGet);
2270
2271 /**
2272 * Calculate distance to node at \a dstnid.
2273 *
2274 * \param dstnid Target NID.
2275 * \param srcnidp If not NULL, NID of the local interface to reach \a dstnid
2276 * is saved here.
2277 * \param orderp If not NULL, order of the route to reach \a dstnid is saved
2278 * here.
2279 *
2280 * \retval 0 If \a dstnid belongs to a local interface, and reserved option
2281 * local_nid_dist_zero is set, which is the default.
2282 * \retval positives Distance to target NID, i.e. number of hops plus one.
2283 * \retval -EHOSTUNREACH If \a dstnid is not reachable.
2284 */
2285 int
LNetDist(lnet_nid_t dstnid,lnet_nid_t * srcnidp,__u32 * orderp)2286 LNetDist(lnet_nid_t dstnid, lnet_nid_t *srcnidp, __u32 *orderp)
2287 {
2288 struct list_head *e;
2289 struct lnet_ni *ni;
2290 lnet_remotenet_t *rnet;
2291 __u32 dstnet = LNET_NIDNET(dstnid);
2292 int hops;
2293 int cpt;
2294 __u32 order = 2;
2295 struct list_head *rn_list;
2296
2297 /*
2298 * if !local_nid_dist_zero, I don't return a distance of 0 ever
2299 * (when lustre sees a distance of 0, it substitutes 0@lo), so I
2300 * keep order 0 free for 0@lo and order 1 free for a local NID
2301 * match
2302 */
2303 LASSERT(the_lnet.ln_refcount > 0);
2304
2305 cpt = lnet_net_lock_current();
2306
2307 list_for_each(e, &the_lnet.ln_nis) {
2308 ni = list_entry(e, lnet_ni_t, ni_list);
2309
2310 if (ni->ni_nid == dstnid) {
2311 if (srcnidp)
2312 *srcnidp = dstnid;
2313 if (orderp) {
2314 if (LNET_NETTYP(LNET_NIDNET(dstnid)) == LOLND)
2315 *orderp = 0;
2316 else
2317 *orderp = 1;
2318 }
2319 lnet_net_unlock(cpt);
2320
2321 return local_nid_dist_zero ? 0 : 1;
2322 }
2323
2324 if (LNET_NIDNET(ni->ni_nid) == dstnet) {
2325 /*
2326 * Check if ni was originally created in
2327 * current net namespace.
2328 * If not, assign order above 0xffff0000,
2329 * to make this ni not a priority.
2330 */
2331 if (!net_eq(ni->ni_net_ns, current->nsproxy->net_ns))
2332 order += 0xffff0000;
2333
2334 if (srcnidp)
2335 *srcnidp = ni->ni_nid;
2336 if (orderp)
2337 *orderp = order;
2338 lnet_net_unlock(cpt);
2339 return 1;
2340 }
2341
2342 order++;
2343 }
2344
2345 rn_list = lnet_net2rnethash(dstnet);
2346 list_for_each(e, rn_list) {
2347 rnet = list_entry(e, lnet_remotenet_t, lrn_list);
2348
2349 if (rnet->lrn_net == dstnet) {
2350 lnet_route_t *route;
2351 lnet_route_t *shortest = NULL;
2352 __u32 shortest_hops = LNET_UNDEFINED_HOPS;
2353 __u32 route_hops;
2354
2355 LASSERT(!list_empty(&rnet->lrn_routes));
2356
2357 list_for_each_entry(route, &rnet->lrn_routes,
2358 lr_list) {
2359 route_hops = route->lr_hops;
2360 if (route_hops == LNET_UNDEFINED_HOPS)
2361 route_hops = 1;
2362 if (!shortest ||
2363 route_hops < shortest_hops) {
2364 shortest = route;
2365 shortest_hops = route_hops;
2366 }
2367 }
2368
2369 LASSERT(shortest);
2370 hops = shortest_hops;
2371 if (srcnidp)
2372 *srcnidp = shortest->lr_gateway->lp_ni->ni_nid;
2373 if (orderp)
2374 *orderp = order;
2375 lnet_net_unlock(cpt);
2376 return hops + 1;
2377 }
2378 order++;
2379 }
2380
2381 lnet_net_unlock(cpt);
2382 return -EHOSTUNREACH;
2383 }
2384 EXPORT_SYMBOL(LNetDist);
2385