• Home
  • Raw
  • Download

Lines Matching +full:atomic +full:- +full:threshold +full:- +full:us

14  *      - Redistributions of source code must retain the above
18 * - Redistributions in binary form must reproduce the above
70 if (cp->cp_xmit_rm) { in rds_send_path_reset()
71 rm = cp->cp_xmit_rm; in rds_send_path_reset()
72 cp->cp_xmit_rm = NULL; in rds_send_path_reset()
81 cp->cp_xmit_sg = 0; in rds_send_path_reset()
82 cp->cp_xmit_hdr_off = 0; in rds_send_path_reset()
83 cp->cp_xmit_data_off = 0; in rds_send_path_reset()
84 cp->cp_xmit_atomic_sent = 0; in rds_send_path_reset()
85 cp->cp_xmit_rdma_sent = 0; in rds_send_path_reset()
86 cp->cp_xmit_data_sent = 0; in rds_send_path_reset()
88 cp->cp_conn->c_map_queued = 0; in rds_send_path_reset()
90 cp->cp_unacked_packets = rds_sysctl_max_unacked_packets; in rds_send_path_reset()
91 cp->cp_unacked_bytes = rds_sysctl_max_unacked_bytes; in rds_send_path_reset()
94 spin_lock_irqsave(&cp->cp_lock, flags); in rds_send_path_reset()
95 list_for_each_entry_safe(rm, tmp, &cp->cp_retrans, m_conn_item) { in rds_send_path_reset()
96 set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags); in rds_send_path_reset()
97 set_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags); in rds_send_path_reset()
99 list_splice_init(&cp->cp_retrans, &cp->cp_send_queue); in rds_send_path_reset()
100 spin_unlock_irqrestore(&cp->cp_lock, flags); in rds_send_path_reset()
106 return test_and_set_bit(RDS_IN_XMIT, &cp->cp_flags) == 0; in acquire_in_xmit()
111 clear_bit(RDS_IN_XMIT, &cp->cp_flags); in release_in_xmit()
116 * the system-wide hashed waitqueue buckets in the fast path only to in release_in_xmit()
119 if (waitqueue_active(&cp->cp_waitq)) in release_in_xmit()
120 wake_up_all(&cp->cp_waitq); in release_in_xmit()
124 * We're making the conscious trade-off here to only send one message
127 * - tx queueing is a simple fifo list
128 * - reassembly is optional and easily done by transports per conn
129 * - no per flow rx lookup at all, straight to the socket
130 * - less per-frag memory and wire overhead
132 * - queued acks can be delayed behind large messages
134 * - small message latency is higher behind queued large messages
135 * - large message latency isn't starved by intervening small sends
139 struct rds_connection *conn = cp->cp_conn; in rds_send_xmit()
157 * avoids blocking the caller and trading per-connection data between in rds_send_xmit()
162 ret = -ENOMEM; in rds_send_xmit()
166 if (rds_destroy_pending(cp->cp_conn)) { in rds_send_xmit()
168 ret = -ENETUNREACH; /* dont requeue send work */ in rds_send_xmit()
180 send_gen = READ_ONCE(cp->cp_send_gen) + 1; in rds_send_xmit()
181 WRITE_ONCE(cp->cp_send_gen, send_gen); in rds_send_xmit()
193 if (conn->c_trans->xmit_path_prepare) in rds_send_xmit()
194 conn->c_trans->xmit_path_prepare(cp); in rds_send_xmit()
202 rm = cp->cp_xmit_rm; in rds_send_xmit()
210 ret = -EAGAIN; in rds_send_xmit()
219 if (!rm && test_and_clear_bit(0, &conn->c_map_queued)) { in rds_send_xmit()
225 rm->data.op_active = 1; in rds_send_xmit()
226 rm->m_inc.i_conn_path = cp; in rds_send_xmit()
227 rm->m_inc.i_conn = cp->cp_conn; in rds_send_xmit()
229 cp->cp_xmit_rm = rm; in rds_send_xmit()
252 spin_lock_irqsave(&cp->cp_lock, flags); in rds_send_xmit()
254 if (!list_empty(&cp->cp_send_queue)) { in rds_send_xmit()
255 rm = list_entry(cp->cp_send_queue.next, in rds_send_xmit()
264 list_move_tail(&rm->m_conn_item, in rds_send_xmit()
265 &cp->cp_retrans); in rds_send_xmit()
268 spin_unlock_irqrestore(&cp->cp_lock, flags); in rds_send_xmit()
280 if (test_bit(RDS_MSG_FLUSH, &rm->m_flags) || in rds_send_xmit()
281 (rm->rdma.op_active && in rds_send_xmit()
282 test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags))) { in rds_send_xmit()
283 spin_lock_irqsave(&cp->cp_lock, flags); in rds_send_xmit()
284 if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) in rds_send_xmit()
285 list_move(&rm->m_conn_item, &to_be_dropped); in rds_send_xmit()
286 spin_unlock_irqrestore(&cp->cp_lock, flags); in rds_send_xmit()
291 len = ntohl(rm->m_inc.i_hdr.h_len); in rds_send_xmit()
292 if (cp->cp_unacked_packets == 0 || in rds_send_xmit()
293 cp->cp_unacked_bytes < len) { in rds_send_xmit()
294 set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags); in rds_send_xmit()
296 cp->cp_unacked_packets = in rds_send_xmit()
298 cp->cp_unacked_bytes = in rds_send_xmit()
302 cp->cp_unacked_bytes -= len; in rds_send_xmit()
303 cp->cp_unacked_packets--; in rds_send_xmit()
306 cp->cp_xmit_rm = rm; in rds_send_xmit()
310 if (rm->rdma.op_active && !cp->cp_xmit_rdma_sent) { in rds_send_xmit()
311 rm->m_final_op = &rm->rdma; in rds_send_xmit()
315 set_bit(RDS_MSG_MAPPED, &rm->m_flags); in rds_send_xmit()
316 ret = conn->c_trans->xmit_rdma(conn, &rm->rdma); in rds_send_xmit()
318 clear_bit(RDS_MSG_MAPPED, &rm->m_flags); in rds_send_xmit()
319 wake_up_interruptible(&rm->m_flush_wait); in rds_send_xmit()
322 cp->cp_xmit_rdma_sent = 1; in rds_send_xmit()
326 if (rm->atomic.op_active && !cp->cp_xmit_atomic_sent) { in rds_send_xmit()
327 rm->m_final_op = &rm->atomic; in rds_send_xmit()
331 set_bit(RDS_MSG_MAPPED, &rm->m_flags); in rds_send_xmit()
332 ret = conn->c_trans->xmit_atomic(conn, &rm->atomic); in rds_send_xmit()
334 clear_bit(RDS_MSG_MAPPED, &rm->m_flags); in rds_send_xmit()
335 wake_up_interruptible(&rm->m_flush_wait); in rds_send_xmit()
338 cp->cp_xmit_atomic_sent = 1; in rds_send_xmit()
345 * We permit 0-byte sends; rds-ping depends on this. in rds_send_xmit()
349 if (rm->data.op_nents == 0) { in rds_send_xmit()
353 ops_present = (rm->atomic.op_active || rm->rdma.op_active); in rds_send_xmit()
354 if (rm->atomic.op_active && !rm->atomic.op_silent) in rds_send_xmit()
356 if (rm->rdma.op_active && !rm->rdma.op_silent) in rds_send_xmit()
360 && !rm->m_rdma_cookie) in rds_send_xmit()
361 rm->data.op_active = 0; in rds_send_xmit()
364 if (rm->data.op_active && !cp->cp_xmit_data_sent) { in rds_send_xmit()
365 rm->m_final_op = &rm->data; in rds_send_xmit()
367 ret = conn->c_trans->xmit(conn, rm, in rds_send_xmit()
368 cp->cp_xmit_hdr_off, in rds_send_xmit()
369 cp->cp_xmit_sg, in rds_send_xmit()
370 cp->cp_xmit_data_off); in rds_send_xmit()
374 if (cp->cp_xmit_hdr_off < sizeof(struct rds_header)) { in rds_send_xmit()
376 sizeof(struct rds_header) - in rds_send_xmit()
377 cp->cp_xmit_hdr_off); in rds_send_xmit()
378 cp->cp_xmit_hdr_off += tmp; in rds_send_xmit()
379 ret -= tmp; in rds_send_xmit()
382 sg = &rm->data.op_sg[cp->cp_xmit_sg]; in rds_send_xmit()
384 tmp = min_t(int, ret, sg->length - in rds_send_xmit()
385 cp->cp_xmit_data_off); in rds_send_xmit()
386 cp->cp_xmit_data_off += tmp; in rds_send_xmit()
387 ret -= tmp; in rds_send_xmit()
388 if (cp->cp_xmit_data_off == sg->length) { in rds_send_xmit()
389 cp->cp_xmit_data_off = 0; in rds_send_xmit()
391 cp->cp_xmit_sg++; in rds_send_xmit()
392 BUG_ON(ret != 0 && cp->cp_xmit_sg == in rds_send_xmit()
393 rm->data.op_nents); in rds_send_xmit()
397 if (cp->cp_xmit_hdr_off == sizeof(struct rds_header) && in rds_send_xmit()
398 (cp->cp_xmit_sg == rm->data.op_nents)) in rds_send_xmit()
399 cp->cp_xmit_data_sent = 1; in rds_send_xmit()
407 if (!rm->data.op_active || cp->cp_xmit_data_sent) { in rds_send_xmit()
408 cp->cp_xmit_rm = NULL; in rds_send_xmit()
409 cp->cp_xmit_sg = 0; in rds_send_xmit()
410 cp->cp_xmit_hdr_off = 0; in rds_send_xmit()
411 cp->cp_xmit_data_off = 0; in rds_send_xmit()
412 cp->cp_xmit_rdma_sent = 0; in rds_send_xmit()
413 cp->cp_xmit_atomic_sent = 0; in rds_send_xmit()
414 cp->cp_xmit_data_sent = 0; in rds_send_xmit()
421 if (conn->c_trans->xmit_path_complete) in rds_send_xmit()
422 conn->c_trans->xmit_path_complete(cp); in rds_send_xmit()
441 * call us when more room is available, such as from the tx in rds_send_xmit()
452 raced = send_gen != READ_ONCE(cp->cp_send_gen); in rds_send_xmit()
454 if ((test_bit(0, &conn->c_map_queued) || in rds_send_xmit()
455 !list_empty(&cp->cp_send_queue)) && !raced) { in rds_send_xmit()
459 if (rds_destroy_pending(cp->cp_conn)) in rds_send_xmit()
460 ret = -ENETUNREACH; in rds_send_xmit()
462 queue_delayed_work(rds_wq, &cp->cp_send_w, 1); in rds_send_xmit()
475 u32 len = be32_to_cpu(rm->m_inc.i_hdr.h_len); in rds_send_sndbuf_remove()
477 assert_spin_locked(&rs->rs_lock); in rds_send_sndbuf_remove()
479 BUG_ON(rs->rs_snd_bytes < len); in rds_send_sndbuf_remove()
480 rs->rs_snd_bytes -= len; in rds_send_sndbuf_remove()
482 if (rs->rs_snd_bytes == 0) in rds_send_sndbuf_remove()
491 return be64_to_cpu(rm->m_inc.i_hdr.h_sequence) <= ack; in rds_send_is_acked()
496 * handling code - except that we call here as soon as we get
507 spin_lock_irqsave(&rm->m_rs_lock, flags); in rds_rdma_send_complete()
509 ro = &rm->rdma; in rds_rdma_send_complete()
510 if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) && in rds_rdma_send_complete()
511 ro->op_active && ro->op_notify && ro->op_notifier) { in rds_rdma_send_complete()
512 notifier = ro->op_notifier; in rds_rdma_send_complete()
513 rs = rm->m_rs; in rds_rdma_send_complete()
516 notifier->n_status = status; in rds_rdma_send_complete()
517 spin_lock(&rs->rs_lock); in rds_rdma_send_complete()
518 list_add_tail(&notifier->n_list, &rs->rs_notify_queue); in rds_rdma_send_complete()
519 spin_unlock(&rs->rs_lock); in rds_rdma_send_complete()
521 ro->op_notifier = NULL; in rds_rdma_send_complete()
524 spin_unlock_irqrestore(&rm->m_rs_lock, flags); in rds_rdma_send_complete()
534 * Just like above, except looks at atomic op
543 spin_lock_irqsave(&rm->m_rs_lock, flags); in rds_atomic_send_complete()
545 ao = &rm->atomic; in rds_atomic_send_complete()
546 if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) in rds_atomic_send_complete()
547 && ao->op_active && ao->op_notify && ao->op_notifier) { in rds_atomic_send_complete()
548 notifier = ao->op_notifier; in rds_atomic_send_complete()
549 rs = rm->m_rs; in rds_atomic_send_complete()
552 notifier->n_status = status; in rds_atomic_send_complete()
553 spin_lock(&rs->rs_lock); in rds_atomic_send_complete()
554 list_add_tail(&notifier->n_list, &rs->rs_notify_queue); in rds_atomic_send_complete()
555 spin_unlock(&rs->rs_lock); in rds_atomic_send_complete()
557 ao->op_notifier = NULL; in rds_atomic_send_complete()
560 spin_unlock_irqrestore(&rm->m_rs_lock, flags); in rds_atomic_send_complete()
571 * don't do any locking - we have all the ingredients (message,
580 ro = &rm->rdma; in __rds_send_complete()
581 if (ro->op_active && ro->op_notify && ro->op_notifier) { in __rds_send_complete()
582 ro->op_notifier->n_status = status; in __rds_send_complete()
583 list_add_tail(&ro->op_notifier->n_list, &rs->rs_notify_queue); in __rds_send_complete()
584 ro->op_notifier = NULL; in __rds_send_complete()
587 ao = &rm->atomic; in __rds_send_complete()
588 if (ao->op_active && ao->op_notify && ao->op_notifier) { in __rds_send_complete()
589 ao->op_notifier->n_status = status; in __rds_send_complete()
590 list_add_tail(&ao->op_notifier->n_list, &rs->rs_notify_queue); in __rds_send_complete()
591 ao->op_notifier = NULL; in __rds_send_complete()
594 /* No need to wake the app - caller does this */ in __rds_send_complete()
614 rm = list_entry(messages->next, struct rds_message, in rds_send_remove_from_sock()
616 list_del_init(&rm->m_conn_item); in rds_send_remove_from_sock()
620 * else beat us to removing it from the sock. If we race in rds_send_remove_from_sock()
624 * The message spinlock makes sure nobody clears rm->m_rs in rds_send_remove_from_sock()
628 spin_lock_irqsave(&rm->m_rs_lock, flags); in rds_send_remove_from_sock()
629 if (!test_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) in rds_send_remove_from_sock()
632 if (rs != rm->m_rs) { in rds_send_remove_from_sock()
637 rs = rm->m_rs; in rds_send_remove_from_sock()
643 spin_lock(&rs->rs_lock); in rds_send_remove_from_sock()
645 if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) { in rds_send_remove_from_sock()
646 struct rm_rdma_op *ro = &rm->rdma; in rds_send_remove_from_sock()
649 list_del_init(&rm->m_sock_item); in rds_send_remove_from_sock()
652 if (ro->op_active && ro->op_notifier && in rds_send_remove_from_sock()
653 (ro->op_notify || (ro->op_recverr && status))) { in rds_send_remove_from_sock()
654 notifier = ro->op_notifier; in rds_send_remove_from_sock()
655 list_add_tail(&notifier->n_list, in rds_send_remove_from_sock()
656 &rs->rs_notify_queue); in rds_send_remove_from_sock()
657 if (!notifier->n_status) in rds_send_remove_from_sock()
658 notifier->n_status = status; in rds_send_remove_from_sock()
659 rm->rdma.op_notifier = NULL; in rds_send_remove_from_sock()
663 spin_unlock(&rs->rs_lock); in rds_send_remove_from_sock()
666 spin_unlock_irqrestore(&rm->m_rs_lock, flags); in rds_send_remove_from_sock()
683 * assigned the m_ack_seq yet - but that's fine as long as tcp_is_acked
693 spin_lock_irqsave(&cp->cp_lock, flags); in rds_send_path_drop_acked()
695 list_for_each_entry_safe(rm, tmp, &cp->cp_retrans, m_conn_item) { in rds_send_path_drop_acked()
699 list_move(&rm->m_conn_item, &list); in rds_send_path_drop_acked()
700 clear_bit(RDS_MSG_ON_CONN, &rm->m_flags); in rds_send_path_drop_acked()
707 spin_unlock_irqrestore(&cp->cp_lock, flags); in rds_send_path_drop_acked()
717 WARN_ON(conn->c_trans->t_mp_capable); in rds_send_drop_acked()
718 rds_send_path_drop_acked(&conn->c_path[0], ack, is_acked); in rds_send_drop_acked()
731 spin_lock_irqsave(&rs->rs_lock, flags); in rds_send_drop_to()
733 list_for_each_entry_safe(rm, tmp, &rs->rs_send_queue, m_sock_item) { in rds_send_drop_to()
735 (!ipv6_addr_equal(&dest->sin6_addr, &rm->m_daddr) || in rds_send_drop_to()
736 dest->sin6_port != rm->m_inc.i_hdr.h_dport)) in rds_send_drop_to()
739 list_move(&rm->m_sock_item, &list); in rds_send_drop_to()
741 clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags); in rds_send_drop_to()
747 spin_unlock_irqrestore(&rs->rs_lock, flags); in rds_send_drop_to()
755 conn = rm->m_inc.i_conn; in rds_send_drop_to()
756 if (conn->c_trans->t_mp_capable) in rds_send_drop_to()
757 cp = rm->m_inc.i_conn_path; in rds_send_drop_to()
759 cp = &conn->c_path[0]; in rds_send_drop_to()
761 spin_lock_irqsave(&cp->cp_lock, flags); in rds_send_drop_to()
763 * Maybe someone else beat us to removing rm from the conn. in rds_send_drop_to()
767 if (!test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) { in rds_send_drop_to()
768 spin_unlock_irqrestore(&cp->cp_lock, flags); in rds_send_drop_to()
771 list_del_init(&rm->m_conn_item); in rds_send_drop_to()
772 spin_unlock_irqrestore(&cp->cp_lock, flags); in rds_send_drop_to()
778 spin_lock_irqsave(&rm->m_rs_lock, flags); in rds_send_drop_to()
780 spin_lock(&rs->rs_lock); in rds_send_drop_to()
782 spin_unlock(&rs->rs_lock); in rds_send_drop_to()
784 spin_unlock_irqrestore(&rm->m_rs_lock, flags); in rds_send_drop_to()
793 list_del_init(&rm->m_sock_item); in rds_send_drop_to()
798 * taking m_rs_lock is the only thing that keeps us in rds_send_drop_to()
801 spin_lock_irqsave(&rm->m_rs_lock, flags); in rds_send_drop_to()
803 spin_lock(&rs->rs_lock); in rds_send_drop_to()
805 spin_unlock(&rs->rs_lock); in rds_send_drop_to()
807 spin_unlock_irqrestore(&rm->m_rs_lock, flags); in rds_send_drop_to()
815 * possible that another thread can race with us and remove the
829 len = be32_to_cpu(rm->m_inc.i_hdr.h_len); in rds_send_queue_rm()
833 spin_lock_irqsave(&rs->rs_lock, flags); in rds_send_queue_rm()
837 * and userspace gets -EAGAIN. But poll() indicates there's send in rds_send_queue_rm()
843 if (rs->rs_snd_bytes < rds_sk_sndbuf(rs)) { in rds_send_queue_rm()
844 rs->rs_snd_bytes += len; in rds_send_queue_rm()
849 * throughput hits a certain threshold. in rds_send_queue_rm()
851 if (rs->rs_snd_bytes >= rds_sk_sndbuf(rs) / 2) in rds_send_queue_rm()
852 set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags); in rds_send_queue_rm()
854 list_add_tail(&rm->m_sock_item, &rs->rs_send_queue); in rds_send_queue_rm()
855 set_bit(RDS_MSG_ON_SOCK, &rm->m_flags); in rds_send_queue_rm()
858 rm->m_rs = rs; in rds_send_queue_rm()
862 rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport, 0); in rds_send_queue_rm()
863 rm->m_inc.i_conn = conn; in rds_send_queue_rm()
864 rm->m_inc.i_conn_path = cp; in rds_send_queue_rm()
867 spin_lock(&cp->cp_lock); in rds_send_queue_rm()
868 rm->m_inc.i_hdr.h_sequence = cpu_to_be64(cp->cp_next_tx_seq++); in rds_send_queue_rm()
869 list_add_tail(&rm->m_conn_item, &cp->cp_send_queue); in rds_send_queue_rm()
870 set_bit(RDS_MSG_ON_CONN, &rm->m_flags); in rds_send_queue_rm()
871 spin_unlock(&cp->cp_lock); in rds_send_queue_rm()
874 rm, len, rs, rs->rs_snd_bytes, in rds_send_queue_rm()
875 (unsigned long long)be64_to_cpu(rm->m_inc.i_hdr.h_sequence)); in rds_send_queue_rm()
880 spin_unlock_irqrestore(&rs->rs_lock, flags); in rds_send_queue_rm()
900 return -EINVAL; in rds_rm_size()
904 return -EINVAL; in rds_rm_size()
906 if (cmsg->cmsg_level != SOL_RDS) in rds_rm_size()
909 switch (cmsg->cmsg_type) { in rds_rm_size()
911 if (vct->indx >= vct->len) { in rds_rm_size()
912 vct->len += vct->incr; in rds_rm_size()
914 krealloc(vct->vec, in rds_rm_size()
915 vct->len * in rds_rm_size()
919 vct->len -= vct->incr; in rds_rm_size()
920 return -ENOMEM; in rds_rm_size()
922 vct->vec = tmp_iov; in rds_rm_size()
924 iov = &vct->vec[vct->indx]; in rds_rm_size()
926 vct->indx++; in rds_rm_size()
954 return -EINVAL; in rds_rm_size()
959 if ((msg->msg_flags & MSG_ZEROCOPY) && !zcopy_cookie) in rds_rm_size()
960 return -EINVAL; in rds_rm_size()
964 /* Ensure (DEST, MAP) are never used with (ARGS, ATOMIC) */ in rds_rm_size()
966 return -EINVAL; in rds_rm_size()
976 if (cmsg->cmsg_len < CMSG_LEN(sizeof(*cookie)) || in rds_cmsg_zcopy()
977 !rm->data.op_mmp_znotifier) in rds_cmsg_zcopy()
978 return -EINVAL; in rds_cmsg_zcopy()
980 rm->data.op_mmp_znotifier->z_cookie = *cookie; in rds_cmsg_zcopy()
993 return -EINVAL; in rds_cmsg_send()
995 if (cmsg->cmsg_level != SOL_RDS) in rds_cmsg_send()
999 * rm->rdma.m_rdma_cookie and rm->rdma.m_rdma_mr. in rds_cmsg_send()
1001 switch (cmsg->cmsg_type) { in rds_cmsg_send()
1003 if (ind >= vct->indx) in rds_cmsg_send()
1004 return -ENOMEM; in rds_cmsg_send()
1005 ret = rds_cmsg_rdma_args(rs, rm, cmsg, &vct->vec[ind]); in rds_cmsg_send()
1017 else if (ret == -ENODEV) in rds_cmsg_send()
1021 ret = -EAGAIN; in rds_cmsg_send()
1035 return -EINVAL; in rds_cmsg_send()
1050 if (conn->c_npaths == 0) in rds_send_mprds_hash()
1053 hash = RDS_MPATH_HASH(rs, conn->c_npaths); in rds_send_mprds_hash()
1054 if (conn->c_npaths == 0 && hash != 0) { in rds_send_mprds_hash()
1058 * until it is up to be sure that the non-zero c_path can be in rds_send_mprds_hash()
1060 * c_path in case the connection ends up being non-MP capable. in rds_send_mprds_hash()
1062 if (conn->c_npaths == 0) { in rds_send_mprds_hash()
1068 if (wait_event_interruptible(conn->c_hs_waitq, in rds_send_mprds_hash()
1069 conn->c_npaths != 0)) in rds_send_mprds_hash()
1072 if (conn->c_npaths == 1) in rds_send_mprds_hash()
1085 return -EINVAL; in rds_rdma_bytes()
1087 if (cmsg->cmsg_level != SOL_RDS) in rds_rdma_bytes()
1090 if (cmsg->cmsg_type == RDS_CMSG_RDMA_ARGS) { in rds_rdma_bytes()
1091 if (cmsg->cmsg_len < in rds_rdma_bytes()
1093 return -EINVAL; in rds_rdma_bytes()
1095 *rdma_bytes += args->remote_vec.bytes; in rds_rdma_bytes()
1103 struct sock *sk = sock->sk; in rds_sendmsg()
1105 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name); in rds_sendmsg()
1106 DECLARE_SOCKADDR(struct sockaddr_in *, usin, msg->msg_name); in rds_sendmsg()
1112 int nonblock = msg->msg_flags & MSG_DONTWAIT; in rds_sendmsg()
1118 bool zcopy = ((msg->msg_flags & MSG_ZEROCOPY) && in rds_sendmsg()
1132 if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_CMSG_COMPAT | MSG_ZEROCOPY)) { in rds_sendmsg()
1133 ret = -EOPNOTSUPP; in rds_sendmsg()
1137 namelen = msg->msg_namelen; in rds_sendmsg()
1140 ret = -EINVAL; in rds_sendmsg()
1143 switch (usin->sin_family) { in rds_sendmsg()
1145 if (usin->sin_addr.s_addr == htonl(INADDR_ANY) || in rds_sendmsg()
1146 usin->sin_addr.s_addr == htonl(INADDR_BROADCAST) || in rds_sendmsg()
1147 ipv4_is_multicast(usin->sin_addr.s_addr)) { in rds_sendmsg()
1148 ret = -EINVAL; in rds_sendmsg()
1151 ipv6_addr_set_v4mapped(usin->sin_addr.s_addr, &daddr); in rds_sendmsg()
1152 dport = usin->sin_port; in rds_sendmsg()
1160 ret = -EINVAL; in rds_sendmsg()
1163 addr_type = ipv6_addr_type(&sin6->sin6_addr); in rds_sendmsg()
1168 ret = -EINVAL; in rds_sendmsg()
1175 addr4 = sin6->sin6_addr.s6_addr32[3]; in rds_sendmsg()
1179 ret = -EINVAL; in rds_sendmsg()
1184 if (sin6->sin6_scope_id == 0) { in rds_sendmsg()
1185 ret = -EINVAL; in rds_sendmsg()
1188 scope_id = sin6->sin6_scope_id; in rds_sendmsg()
1191 daddr = sin6->sin6_addr; in rds_sendmsg()
1192 dport = sin6->sin6_port; in rds_sendmsg()
1198 ret = -EINVAL; in rds_sendmsg()
1202 /* We only care about consistency with ->connect() */ in rds_sendmsg()
1204 daddr = rs->rs_conn_addr; in rds_sendmsg()
1205 dport = rs->rs_conn_port; in rds_sendmsg()
1206 scope_id = rs->rs_bound_scope_id; in rds_sendmsg()
1211 if (ipv6_addr_any(&rs->rs_bound_addr) || ipv6_addr_any(&daddr)) { in rds_sendmsg()
1213 ret = -ENOTCONN; in rds_sendmsg()
1221 ipv6_addr_v4mapped(&rs->rs_bound_addr)) { in rds_sendmsg()
1223 ret = -EOPNOTSUPP; in rds_sendmsg()
1228 * communicating beween link local and non-link local address. in rds_sendmsg()
1230 if (scope_id != rs->rs_bound_scope_id) { in rds_sendmsg()
1232 scope_id = rs->rs_bound_scope_id; in rds_sendmsg()
1233 } else if (rs->rs_bound_scope_id) { in rds_sendmsg()
1235 ret = -EINVAL; in rds_sendmsg()
1248 ret = -EMSGSIZE; in rds_sendmsg()
1253 ret = -EMSGSIZE; in rds_sendmsg()
1258 if (rs->rs_transport->t_type != RDS_TRANS_TCP) { in rds_sendmsg()
1259 ret = -EOPNOTSUPP; in rds_sendmsg()
1262 num_sgs = iov_iter_npages(&msg->msg_iter, INT_MAX); in rds_sendmsg()
1271 ret = -ENOMEM; in rds_sendmsg()
1277 rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs); in rds_sendmsg()
1278 if (IS_ERR(rm->data.op_sg)) { in rds_sendmsg()
1279 ret = PTR_ERR(rm->data.op_sg); in rds_sendmsg()
1282 ret = rds_message_copy_from_user(rm, &msg->msg_iter, zcopy); in rds_sendmsg()
1286 rm->data.op_active = 1; in rds_sendmsg()
1288 rm->m_daddr = daddr; in rds_sendmsg()
1292 if (rs->rs_conn && ipv6_addr_equal(&rs->rs_conn->c_faddr, &daddr) && in rds_sendmsg()
1293 rs->rs_tos == rs->rs_conn->c_tos) { in rds_sendmsg()
1294 conn = rs->rs_conn; in rds_sendmsg()
1296 conn = rds_conn_create_outgoing(sock_net(sock->sk), in rds_sendmsg()
1297 &rs->rs_bound_addr, &daddr, in rds_sendmsg()
1298 rs->rs_transport, rs->rs_tos, in rds_sendmsg()
1299 sock->sk->sk_allocation, in rds_sendmsg()
1305 rs->rs_conn = conn; in rds_sendmsg()
1308 if (conn->c_trans->t_mp_capable) in rds_sendmsg()
1309 cpath = &conn->c_path[rds_send_mprds_hash(rs, conn, nonblock)]; in rds_sendmsg()
1311 cpath = &conn->c_path[0]; in rds_sendmsg()
1313 rm->m_conn_path = cpath; in rds_sendmsg()
1319 if (ret == -EAGAIN) in rds_sendmsg()
1324 if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) { in rds_sendmsg()
1326 &rm->rdma, conn->c_trans->xmit_rdma); in rds_sendmsg()
1327 ret = -EOPNOTSUPP; in rds_sendmsg()
1331 if (rm->atomic.op_active && !conn->c_trans->xmit_atomic) { in rds_sendmsg()
1333 &rm->atomic, conn->c_trans->xmit_atomic); in rds_sendmsg()
1334 ret = -EOPNOTSUPP; in rds_sendmsg()
1339 ret = -EAGAIN; in rds_sendmsg()
1346 ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs); in rds_sendmsg()
1348 rs->rs_seen_congestion = 1; in rds_sendmsg()
1351 while (!rds_send_queue_rm(rs, conn, cpath, rm, rs->rs_bound_port, in rds_sendmsg()
1356 ret = -EAGAIN; in rds_sendmsg()
1362 rs->rs_bound_port, in rds_sendmsg()
1372 ret = -ETIMEDOUT; in rds_sendmsg()
1378 * to retry sends in the rds thread if the transport asks us to. in rds_sendmsg()
1383 if (ret == -ENOMEM || ret == -EAGAIN) { in rds_sendmsg()
1386 if (rds_destroy_pending(cpath->cp_conn)) in rds_sendmsg()
1387 ret = -ENETUNREACH; in rds_sendmsg()
1389 queue_delayed_work(rds_wq, &cpath->cp_send_w, 1); in rds_sendmsg()
1411 rds_rdma_unuse(rs, rds_rdma_cookie_key(rm->m_rdma_cookie), 1); in rds_sendmsg()
1436 ret = -ENOMEM; in rds_send_probe()
1440 rm->m_daddr = cp->cp_conn->c_faddr; in rds_send_probe()
1441 rm->data.op_active = 1; in rds_send_probe()
1445 ret = rds_cong_wait(cp->cp_conn->c_fcong, dport, 1, NULL); in rds_send_probe()
1449 spin_lock_irqsave(&cp->cp_lock, flags); in rds_send_probe()
1450 list_add_tail(&rm->m_conn_item, &cp->cp_send_queue); in rds_send_probe()
1451 set_bit(RDS_MSG_ON_CONN, &rm->m_flags); in rds_send_probe()
1453 rm->m_inc.i_conn = cp->cp_conn; in rds_send_probe()
1454 rm->m_inc.i_conn_path = cp; in rds_send_probe()
1456 rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport, in rds_send_probe()
1457 cp->cp_next_tx_seq); in rds_send_probe()
1458 rm->m_inc.i_hdr.h_flags |= h_flags; in rds_send_probe()
1459 cp->cp_next_tx_seq++; in rds_send_probe()
1462 cp->cp_conn->c_trans->t_mp_capable) { in rds_send_probe()
1464 u32 my_gen_num = cpu_to_be32(cp->cp_conn->c_my_gen_num); in rds_send_probe()
1466 rds_message_add_extension(&rm->m_inc.i_hdr, in rds_send_probe()
1469 rds_message_add_extension(&rm->m_inc.i_hdr, in rds_send_probe()
1474 spin_unlock_irqrestore(&cp->cp_lock, flags); in rds_send_probe()
1481 if (!rds_destroy_pending(cp->cp_conn)) in rds_send_probe()
1482 queue_delayed_work(rds_wq, &cp->cp_send_w, 1); in rds_send_probe()
1504 struct rds_conn_path *cp = &conn->c_path[cp_index]; in rds_send_ping()
1506 spin_lock_irqsave(&cp->cp_lock, flags); in rds_send_ping()
1507 if (conn->c_ping_triggered) { in rds_send_ping()
1508 spin_unlock_irqrestore(&cp->cp_lock, flags); in rds_send_ping()
1511 conn->c_ping_triggered = 1; in rds_send_ping()
1512 spin_unlock_irqrestore(&cp->cp_lock, flags); in rds_send_ping()