Lines Matching refs:sk
54 #define __iucv_sock_wait(sk, condition, timeo, ret) \ argument
59 prepare_to_wait(sk_sleep(sk), &__wait, TASK_INTERRUPTIBLE); \
69 release_sock(sk); \
71 lock_sock(sk); \
72 ret = sock_error(sk); \
76 finish_wait(sk_sleep(sk), &__wait); \
79 #define iucv_sock_wait(sk, condition, timeo) \ argument
83 __iucv_sock_wait(sk, condition, timeo, __ret); \
89 static void iucv_sock_kill(struct sock *sk);
90 static void iucv_sock_close(struct sock *sk);
165 static int iucv_sock_in_state(struct sock *sk, int state, int state2) in iucv_sock_in_state() argument
167 return (sk->sk_state == state || sk->sk_state == state2); in iucv_sock_in_state()
178 static inline int iucv_below_msglim(struct sock *sk) in iucv_below_msglim() argument
180 struct iucv_sock *iucv = iucv_sk(sk); in iucv_below_msglim()
182 if (sk->sk_state != IUCV_CONNECTED) in iucv_below_msglim()
194 static void iucv_sock_wake_msglim(struct sock *sk) in iucv_sock_wake_msglim() argument
199 wq = rcu_dereference(sk->sk_wq); in iucv_sock_wake_msglim()
202 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); in iucv_sock_wake_msglim()
289 struct sock *sk; in __iucv_get_sock_by_name() local
291 sk_for_each(sk, &iucv_sk_list.head) in __iucv_get_sock_by_name()
292 if (!memcmp(&iucv_sk(sk)->src_name, nm, 8)) in __iucv_get_sock_by_name()
293 return sk; in __iucv_get_sock_by_name()
298 static void iucv_sock_destruct(struct sock *sk) in iucv_sock_destruct() argument
300 skb_queue_purge(&sk->sk_receive_queue); in iucv_sock_destruct()
301 skb_queue_purge(&sk->sk_error_queue); in iucv_sock_destruct()
303 sk_mem_reclaim(sk); in iucv_sock_destruct()
305 if (!sock_flag(sk, SOCK_DEAD)) { in iucv_sock_destruct()
306 pr_err("Attempt to release alive iucv socket %p\n", sk); in iucv_sock_destruct()
310 WARN_ON(atomic_read(&sk->sk_rmem_alloc)); in iucv_sock_destruct()
311 WARN_ON(refcount_read(&sk->sk_wmem_alloc)); in iucv_sock_destruct()
312 WARN_ON(sk->sk_wmem_queued); in iucv_sock_destruct()
313 WARN_ON(sk->sk_forward_alloc); in iucv_sock_destruct()
319 struct sock *sk; in iucv_sock_cleanup_listen() local
322 while ((sk = iucv_accept_dequeue(parent, NULL))) { in iucv_sock_cleanup_listen()
323 iucv_sock_close(sk); in iucv_sock_cleanup_listen()
324 iucv_sock_kill(sk); in iucv_sock_cleanup_listen()
330 static void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk) in iucv_sock_link() argument
333 sk_add_node(sk, &l->head); in iucv_sock_link()
337 static void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk) in iucv_sock_unlink() argument
340 sk_del_node_init(sk); in iucv_sock_unlink()
345 static void iucv_sock_kill(struct sock *sk) in iucv_sock_kill() argument
347 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket) in iucv_sock_kill()
350 iucv_sock_unlink(&iucv_sk_list, sk); in iucv_sock_kill()
351 sock_set_flag(sk, SOCK_DEAD); in iucv_sock_kill()
352 sock_put(sk); in iucv_sock_kill()
356 static void iucv_sever_path(struct sock *sk, int with_user_data) in iucv_sever_path() argument
359 struct iucv_sock *iucv = iucv_sk(sk); in iucv_sever_path()
376 static int iucv_send_ctrl(struct sock *sk, u8 flags) in iucv_send_ctrl() argument
378 struct iucv_sock *iucv = iucv_sk(sk); in iucv_send_ctrl()
386 if (sk->sk_shutdown & SEND_SHUTDOWN) { in iucv_send_ctrl()
388 shutdown = sk->sk_shutdown; in iucv_send_ctrl()
389 sk->sk_shutdown &= RCV_SHUTDOWN; in iucv_send_ctrl()
391 skb = sock_alloc_send_skb(sk, blen, 1, &err); in iucv_send_ctrl()
394 err = afiucv_hs_send(NULL, sk, skb, flags); in iucv_send_ctrl()
397 sk->sk_shutdown = shutdown; in iucv_send_ctrl()
402 static void iucv_sock_close(struct sock *sk) in iucv_sock_close() argument
404 struct iucv_sock *iucv = iucv_sk(sk); in iucv_sock_close()
408 lock_sock(sk); in iucv_sock_close()
410 switch (sk->sk_state) { in iucv_sock_close()
412 iucv_sock_cleanup_listen(sk); in iucv_sock_close()
417 err = iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN); in iucv_sock_close()
418 sk->sk_state = IUCV_DISCONN; in iucv_sock_close()
419 sk->sk_state_change(sk); in iucv_sock_close()
424 sk->sk_state = IUCV_CLOSING; in iucv_sock_close()
425 sk->sk_state_change(sk); in iucv_sock_close()
428 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) in iucv_sock_close()
429 timeo = sk->sk_lingertime; in iucv_sock_close()
432 iucv_sock_wait(sk, in iucv_sock_close()
433 iucv_sock_in_state(sk, IUCV_CLOSED, 0), in iucv_sock_close()
439 sk->sk_state = IUCV_CLOSED; in iucv_sock_close()
440 sk->sk_state_change(sk); in iucv_sock_close()
442 sk->sk_err = ECONNRESET; in iucv_sock_close()
443 sk->sk_state_change(sk); in iucv_sock_close()
450 iucv_sever_path(sk, 1); in iucv_sock_close()
456 sk->sk_bound_dev_if = 0; in iucv_sock_close()
460 sock_set_flag(sk, SOCK_ZAPPED); in iucv_sock_close()
462 release_sock(sk); in iucv_sock_close()
465 static void iucv_sock_init(struct sock *sk, struct sock *parent) in iucv_sock_init() argument
468 sk->sk_type = parent->sk_type; in iucv_sock_init()
469 security_sk_clone(parent, sk); in iucv_sock_init()
475 struct sock *sk; in iucv_sock_alloc() local
478 sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto, kern); in iucv_sock_alloc()
479 if (!sk) in iucv_sock_alloc()
481 iucv = iucv_sk(sk); in iucv_sock_alloc()
483 sock_init_data(sock, sk); in iucv_sock_alloc()
504 sk->sk_destruct = iucv_sock_destruct; in iucv_sock_alloc()
505 sk->sk_sndtimeo = IUCV_CONN_TIMEOUT; in iucv_sock_alloc()
507 sock_reset_flag(sk, SOCK_ZAPPED); in iucv_sock_alloc()
509 sk->sk_protocol = proto; in iucv_sock_alloc()
510 sk->sk_state = IUCV_OPEN; in iucv_sock_alloc()
512 iucv_sock_link(&iucv_sk_list, sk); in iucv_sock_alloc()
513 return sk; in iucv_sock_alloc()
516 static void iucv_accept_enqueue(struct sock *parent, struct sock *sk) in iucv_accept_enqueue() argument
521 sock_hold(sk); in iucv_accept_enqueue()
523 list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q); in iucv_accept_enqueue()
525 iucv_sk(sk)->parent = parent; in iucv_accept_enqueue()
529 static void iucv_accept_unlink(struct sock *sk) in iucv_accept_unlink() argument
532 struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent); in iucv_accept_unlink()
535 list_del_init(&iucv_sk(sk)->accept_q); in iucv_accept_unlink()
537 sk_acceptq_removed(iucv_sk(sk)->parent); in iucv_accept_unlink()
538 iucv_sk(sk)->parent = NULL; in iucv_accept_unlink()
539 sock_put(sk); in iucv_accept_unlink()
546 struct sock *sk; in iucv_accept_dequeue() local
549 sk = (struct sock *) isk; in iucv_accept_dequeue()
550 lock_sock(sk); in iucv_accept_dequeue()
552 if (sk->sk_state == IUCV_CLOSED) { in iucv_accept_dequeue()
553 iucv_accept_unlink(sk); in iucv_accept_dequeue()
554 release_sock(sk); in iucv_accept_dequeue()
558 if (sk->sk_state == IUCV_CONNECTED || in iucv_accept_dequeue()
559 sk->sk_state == IUCV_DISCONN || in iucv_accept_dequeue()
561 iucv_accept_unlink(sk); in iucv_accept_dequeue()
563 sock_graft(sk, newsock); in iucv_accept_dequeue()
565 release_sock(sk); in iucv_accept_dequeue()
566 return sk; in iucv_accept_dequeue()
569 release_sock(sk); in iucv_accept_dequeue()
592 struct sock *sk = sock->sk; in iucv_sock_bind() local
602 lock_sock(sk); in iucv_sock_bind()
603 if (sk->sk_state != IUCV_OPEN) { in iucv_sock_bind()
610 iucv = iucv_sk(sk); in iucv_sock_bind()
635 sk->sk_bound_dev_if = dev->ifindex; in iucv_sock_bind()
638 sk->sk_state = IUCV_BOUND; in iucv_sock_bind()
652 sk->sk_state = IUCV_BOUND; in iucv_sock_bind()
654 sk->sk_allocation |= GFP_DMA; in iucv_sock_bind()
665 release_sock(sk); in iucv_sock_bind()
670 static int iucv_sock_autobind(struct sock *sk) in iucv_sock_autobind() argument
672 struct iucv_sock *iucv = iucv_sk(sk); in iucv_sock_autobind()
680 sk->sk_allocation |= GFP_DMA; in iucv_sock_autobind()
695 struct sock *sk = sock->sk; in afiucv_path_connect() local
696 struct iucv_sock *iucv = iucv_sk(sk); in afiucv_path_connect()
713 sk); in afiucv_path_connect()
742 struct sock *sk = sock->sk; in iucv_sock_connect() local
743 struct iucv_sock *iucv = iucv_sk(sk); in iucv_sock_connect()
749 if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND) in iucv_sock_connect()
752 if (sk->sk_state == IUCV_OPEN && in iucv_sock_connect()
756 if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_SEQPACKET) in iucv_sock_connect()
759 if (sk->sk_state == IUCV_OPEN) { in iucv_sock_connect()
760 err = iucv_sock_autobind(sk); in iucv_sock_connect()
765 lock_sock(sk); in iucv_sock_connect()
772 err = iucv_send_ctrl(sock->sk, AF_IUCV_FLAG_SYN); in iucv_sock_connect()
778 if (sk->sk_state != IUCV_CONNECTED) in iucv_sock_connect()
779 err = iucv_sock_wait(sk, iucv_sock_in_state(sk, IUCV_CONNECTED, in iucv_sock_connect()
781 sock_sndtimeo(sk, flags & O_NONBLOCK)); in iucv_sock_connect()
783 if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_CLOSED) in iucv_sock_connect()
787 iucv_sever_path(sk, 0); in iucv_sock_connect()
790 release_sock(sk); in iucv_sock_connect()
797 struct sock *sk = sock->sk; in iucv_sock_listen() local
800 lock_sock(sk); in iucv_sock_listen()
803 if (sk->sk_state != IUCV_BOUND) in iucv_sock_listen()
809 sk->sk_max_ack_backlog = backlog; in iucv_sock_listen()
810 sk->sk_ack_backlog = 0; in iucv_sock_listen()
811 sk->sk_state = IUCV_LISTEN; in iucv_sock_listen()
815 release_sock(sk); in iucv_sock_listen()
824 struct sock *sk = sock->sk, *nsk; in iucv_sock_accept() local
828 lock_sock_nested(sk, SINGLE_DEPTH_NESTING); in iucv_sock_accept()
830 if (sk->sk_state != IUCV_LISTEN) { in iucv_sock_accept()
835 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); in iucv_sock_accept()
838 add_wait_queue_exclusive(sk_sleep(sk), &wait); in iucv_sock_accept()
839 while (!(nsk = iucv_accept_dequeue(sk, newsock))) { in iucv_sock_accept()
846 release_sock(sk); in iucv_sock_accept()
848 lock_sock_nested(sk, SINGLE_DEPTH_NESTING); in iucv_sock_accept()
850 if (sk->sk_state != IUCV_LISTEN) { in iucv_sock_accept()
862 remove_wait_queue(sk_sleep(sk), &wait); in iucv_sock_accept()
870 release_sock(sk); in iucv_sock_accept()
878 struct sock *sk = sock->sk; in iucv_sock_getname() local
879 struct iucv_sock *iucv = iucv_sk(sk); in iucv_sock_getname()
924 struct sock *sk = sock->sk; in iucv_sock_sendmsg() local
925 struct iucv_sock *iucv = iucv_sk(sk); in iucv_sock_sendmsg()
938 err = sock_error(sk); in iucv_sock_sendmsg()
946 if (sk->sk_type == SOCK_SEQPACKET && !(msg->msg_flags & MSG_EOR)) in iucv_sock_sendmsg()
949 lock_sock(sk); in iucv_sock_sendmsg()
951 if (sk->sk_shutdown & SEND_SHUTDOWN) { in iucv_sock_sendmsg()
957 if (sk->sk_state != IUCV_CONNECTED) { in iucv_sock_sendmsg()
1020 skb = sock_alloc_send_pskb(sk, headroom + linear, len - linear, in iucv_sock_sendmsg()
1034 timeo = sock_sndtimeo(sk, noblock); in iucv_sock_sendmsg()
1035 err = iucv_sock_wait(sk, iucv_below_msglim(sk), timeo); in iucv_sock_sendmsg()
1040 if (sk->sk_state != IUCV_CONNECTED) { in iucv_sock_sendmsg()
1051 err = afiucv_hs_send(&txmsg, sk, skb, 0); in iucv_sock_sendmsg()
1117 release_sock(sk); in iucv_sock_sendmsg()
1123 release_sock(sk); in iucv_sock_sendmsg()
1159 static void iucv_process_message(struct sock *sk, struct sk_buff *skb, in iucv_process_message() argument
1208 if (sk_filter(sk, skb)) { in iucv_process_message()
1209 atomic_inc(&sk->sk_drops); /* skb rejected by filter */ in iucv_process_message()
1213 if (__sock_queue_rcv_skb(sk, skb)) /* handle rcv queue full */ in iucv_process_message()
1214 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb); in iucv_process_message()
1221 static void iucv_process_message_q(struct sock *sk) in iucv_process_message_q() argument
1223 struct iucv_sock *iucv = iucv_sk(sk); in iucv_process_message_q()
1231 iucv_process_message(sk, skb, p->path, &p->msg); in iucv_process_message_q()
1243 struct sock *sk = sock->sk; in iucv_sock_recvmsg() local
1244 struct iucv_sock *iucv = iucv_sk(sk); in iucv_sock_recvmsg()
1250 if ((sk->sk_state == IUCV_DISCONN) && in iucv_sock_recvmsg()
1252 skb_queue_empty(&sk->sk_receive_queue) && in iucv_sock_recvmsg()
1261 skb = skb_recv_datagram(sk, flags, noblock, &err); in iucv_sock_recvmsg()
1263 if (sk->sk_shutdown & RCV_SHUTDOWN) in iucv_sock_recvmsg()
1272 sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN; in iucv_sock_recvmsg()
1277 skb_queue_head(&sk->sk_receive_queue, skb); in iucv_sock_recvmsg()
1282 if (sk->sk_type == SOCK_SEQPACKET) { in iucv_sock_recvmsg()
1297 skb_queue_head(&sk->sk_receive_queue, skb); in iucv_sock_recvmsg()
1305 if (sk->sk_type == SOCK_STREAM) { in iucv_sock_recvmsg()
1308 skb_queue_head(&sk->sk_receive_queue, skb); in iucv_sock_recvmsg()
1318 iucv_sock_close(sk); in iucv_sock_recvmsg()
1328 if (__sock_queue_rcv_skb(sk, rskb)) { in iucv_sock_recvmsg()
1338 iucv_process_message_q(sk); in iucv_sock_recvmsg()
1341 err = iucv_send_ctrl(sk, AF_IUCV_FLAG_WIN); in iucv_sock_recvmsg()
1343 sk->sk_state = IUCV_DISCONN; in iucv_sock_recvmsg()
1344 sk->sk_state_change(sk); in iucv_sock_recvmsg()
1353 if (sk->sk_type == SOCK_SEQPACKET && (flags & MSG_TRUNC)) in iucv_sock_recvmsg()
1362 struct sock *sk; in iucv_accept_poll() local
1365 sk = (struct sock *) isk; in iucv_accept_poll()
1367 if (sk->sk_state == IUCV_CONNECTED) in iucv_accept_poll()
1377 struct sock *sk = sock->sk; in iucv_sock_poll() local
1382 if (sk->sk_state == IUCV_LISTEN) in iucv_sock_poll()
1383 return iucv_accept_poll(sk); in iucv_sock_poll()
1385 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) in iucv_sock_poll()
1387 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0); in iucv_sock_poll()
1389 if (sk->sk_shutdown & RCV_SHUTDOWN) in iucv_sock_poll()
1392 if (sk->sk_shutdown == SHUTDOWN_MASK) in iucv_sock_poll()
1395 if (!skb_queue_empty(&sk->sk_receive_queue) || in iucv_sock_poll()
1396 (sk->sk_shutdown & RCV_SHUTDOWN)) in iucv_sock_poll()
1399 if (sk->sk_state == IUCV_CLOSED) in iucv_sock_poll()
1402 if (sk->sk_state == IUCV_DISCONN) in iucv_sock_poll()
1405 if (sock_writeable(sk) && iucv_below_msglim(sk)) in iucv_sock_poll()
1408 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); in iucv_sock_poll()
1415 struct sock *sk = sock->sk; in iucv_sock_shutdown() local
1416 struct iucv_sock *iucv = iucv_sk(sk); in iucv_sock_shutdown()
1425 lock_sock(sk); in iucv_sock_shutdown()
1426 switch (sk->sk_state) { in iucv_sock_shutdown()
1438 sk->sk_state == IUCV_CONNECTED) { in iucv_sock_shutdown()
1458 iucv_send_ctrl(sk, AF_IUCV_FLAG_SHT); in iucv_sock_shutdown()
1461 sk->sk_shutdown |= how; in iucv_sock_shutdown()
1470 skb_queue_purge(&sk->sk_receive_queue); in iucv_sock_shutdown()
1474 sk->sk_state_change(sk); in iucv_sock_shutdown()
1477 release_sock(sk); in iucv_sock_shutdown()
1483 struct sock *sk = sock->sk; in iucv_sock_release() local
1486 if (!sk) in iucv_sock_release()
1489 iucv_sock_close(sk); in iucv_sock_release()
1491 sock_orphan(sk); in iucv_sock_release()
1492 iucv_sock_kill(sk); in iucv_sock_release()
1500 struct sock *sk = sock->sk; in iucv_sock_setsockopt() local
1501 struct iucv_sock *iucv = iucv_sk(sk); in iucv_sock_setsockopt()
1516 lock_sock(sk); in iucv_sock_setsockopt()
1525 switch (sk->sk_state) { in iucv_sock_setsockopt()
1542 release_sock(sk); in iucv_sock_setsockopt()
1550 struct sock *sk = sock->sk; in iucv_sock_getsockopt() local
1551 struct iucv_sock *iucv = iucv_sk(sk); in iucv_sock_getsockopt()
1571 lock_sock(sk); in iucv_sock_getsockopt()
1574 release_sock(sk); in iucv_sock_getsockopt()
1577 if (sk->sk_state == IUCV_OPEN) in iucv_sock_getsockopt()
1603 struct sock *sk, *nsk; in iucv_callback_connreq() local
1612 sk = NULL; in iucv_callback_connreq()
1613 sk_for_each(sk, &iucv_sk_list.head) in iucv_callback_connreq()
1614 if (sk->sk_state == IUCV_LISTEN && in iucv_callback_connreq()
1615 !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) { in iucv_callback_connreq()
1620 iucv = iucv_sk(sk); in iucv_callback_connreq()
1628 bh_lock_sock(sk); in iucv_callback_connreq()
1634 if (sk->sk_state != IUCV_LISTEN) { in iucv_callback_connreq()
1641 if (sk_acceptq_is_full(sk)) { in iucv_callback_connreq()
1648 nsk = iucv_sock_alloc(NULL, sk->sk_protocol, GFP_ATOMIC, 0); in iucv_callback_connreq()
1656 iucv_sock_init(nsk, sk); in iucv_callback_connreq()
1683 iucv_accept_enqueue(sk, nsk); in iucv_callback_connreq()
1687 sk->sk_data_ready(sk); in iucv_callback_connreq()
1690 bh_unlock_sock(sk); in iucv_callback_connreq()
1696 struct sock *sk = path->private; in iucv_callback_connack() local
1698 sk->sk_state = IUCV_CONNECTED; in iucv_callback_connack()
1699 sk->sk_state_change(sk); in iucv_callback_connack()
1704 struct sock *sk = path->private; in iucv_callback_rx() local
1705 struct iucv_sock *iucv = iucv_sk(sk); in iucv_callback_rx()
1710 if (sk->sk_shutdown & RCV_SHUTDOWN) { in iucv_callback_rx()
1721 len = atomic_read(&sk->sk_rmem_alloc); in iucv_callback_rx()
1723 if (len > sk->sk_rcvbuf) in iucv_callback_rx()
1730 iucv_process_message(sk, skb, path, msg); in iucv_callback_rx()
1749 struct sock *sk = path->private; in iucv_callback_txdone() local
1751 struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q; in iucv_callback_txdone()
1755 bh_lock_sock(sk); in iucv_callback_txdone()
1771 iucv_sock_wake_msglim(sk); in iucv_callback_txdone()
1774 if (sk->sk_state == IUCV_CLOSING) { in iucv_callback_txdone()
1775 if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) { in iucv_callback_txdone()
1776 sk->sk_state = IUCV_CLOSED; in iucv_callback_txdone()
1777 sk->sk_state_change(sk); in iucv_callback_txdone()
1780 bh_unlock_sock(sk); in iucv_callback_txdone()
1786 struct sock *sk = path->private; in iucv_callback_connrej() local
1788 if (sk->sk_state == IUCV_CLOSED) in iucv_callback_connrej()
1791 bh_lock_sock(sk); in iucv_callback_connrej()
1792 iucv_sever_path(sk, 1); in iucv_callback_connrej()
1793 sk->sk_state = IUCV_DISCONN; in iucv_callback_connrej()
1795 sk->sk_state_change(sk); in iucv_callback_connrej()
1796 bh_unlock_sock(sk); in iucv_callback_connrej()
1804 struct sock *sk = path->private; in iucv_callback_shutdown() local
1806 bh_lock_sock(sk); in iucv_callback_shutdown()
1807 if (sk->sk_state != IUCV_CLOSED) { in iucv_callback_shutdown()
1808 sk->sk_shutdown |= SEND_SHUTDOWN; in iucv_callback_shutdown()
1809 sk->sk_state_change(sk); in iucv_callback_shutdown()
1811 bh_unlock_sock(sk); in iucv_callback_shutdown()
1838 static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb) in afiucv_hs_callback_syn() argument
1845 iucv = iucv_sk(sk); in afiucv_hs_callback_syn()
1854 nsk = iucv_sock_alloc(NULL, sk->sk_protocol, GFP_ATOMIC, 0); in afiucv_hs_callback_syn()
1855 bh_lock_sock(sk); in afiucv_hs_callback_syn()
1856 if ((sk->sk_state != IUCV_LISTEN) || in afiucv_hs_callback_syn()
1857 sk_acceptq_is_full(sk) || in afiucv_hs_callback_syn()
1864 bh_unlock_sock(sk); in afiucv_hs_callback_syn()
1869 iucv_sock_init(nsk, sk); in afiucv_hs_callback_syn()
1880 nsk->sk_bound_dev_if = sk->sk_bound_dev_if; in afiucv_hs_callback_syn()
1889 iucv_accept_enqueue(sk, nsk); in afiucv_hs_callback_syn()
1891 sk->sk_data_ready(sk); in afiucv_hs_callback_syn()
1894 bh_unlock_sock(sk); in afiucv_hs_callback_syn()
1903 static int afiucv_hs_callback_synack(struct sock *sk, struct sk_buff *skb) in afiucv_hs_callback_synack() argument
1905 struct iucv_sock *iucv = iucv_sk(sk); in afiucv_hs_callback_synack()
1909 if (sk->sk_state != IUCV_BOUND) in afiucv_hs_callback_synack()
1911 bh_lock_sock(sk); in afiucv_hs_callback_synack()
1913 sk->sk_state = IUCV_CONNECTED; in afiucv_hs_callback_synack()
1914 sk->sk_state_change(sk); in afiucv_hs_callback_synack()
1915 bh_unlock_sock(sk); in afiucv_hs_callback_synack()
1924 static int afiucv_hs_callback_synfin(struct sock *sk, struct sk_buff *skb) in afiucv_hs_callback_synfin() argument
1926 struct iucv_sock *iucv = iucv_sk(sk); in afiucv_hs_callback_synfin()
1930 if (sk->sk_state != IUCV_BOUND) in afiucv_hs_callback_synfin()
1932 bh_lock_sock(sk); in afiucv_hs_callback_synfin()
1933 sk->sk_state = IUCV_DISCONN; in afiucv_hs_callback_synfin()
1934 sk->sk_state_change(sk); in afiucv_hs_callback_synfin()
1935 bh_unlock_sock(sk); in afiucv_hs_callback_synfin()
1944 static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb) in afiucv_hs_callback_fin() argument
1946 struct iucv_sock *iucv = iucv_sk(sk); in afiucv_hs_callback_fin()
1951 bh_lock_sock(sk); in afiucv_hs_callback_fin()
1952 if (sk->sk_state == IUCV_CONNECTED) { in afiucv_hs_callback_fin()
1953 sk->sk_state = IUCV_DISCONN; in afiucv_hs_callback_fin()
1954 sk->sk_state_change(sk); in afiucv_hs_callback_fin()
1956 bh_unlock_sock(sk); in afiucv_hs_callback_fin()
1965 static int afiucv_hs_callback_win(struct sock *sk, struct sk_buff *skb) in afiucv_hs_callback_win() argument
1967 struct iucv_sock *iucv = iucv_sk(sk); in afiucv_hs_callback_win()
1972 if (sk->sk_state != IUCV_CONNECTED) in afiucv_hs_callback_win()
1976 iucv_sock_wake_msglim(sk); in afiucv_hs_callback_win()
1983 static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb) in afiucv_hs_callback_rx() argument
1985 struct iucv_sock *iucv = iucv_sk(sk); in afiucv_hs_callback_rx()
1992 if (sk->sk_state != IUCV_CONNECTED) { in afiucv_hs_callback_rx()
1997 if (sk->sk_shutdown & RCV_SHUTDOWN) { in afiucv_hs_callback_rx()
2007 if (sk_filter(sk, skb)) { in afiucv_hs_callback_rx()
2008 atomic_inc(&sk->sk_drops); /* skb rejected by filter */ in afiucv_hs_callback_rx()
2015 if (__sock_queue_rcv_skb(sk, skb)) in afiucv_hs_callback_rx()
2019 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb); in afiucv_hs_callback_rx()
2032 struct sock *sk; in afiucv_hs_rcv() local
2050 sk = NULL; in afiucv_hs_rcv()
2052 sk_for_each(sk, &iucv_sk_list.head) { in afiucv_hs_rcv()
2054 if ((!memcmp(&iucv_sk(sk)->src_name, in afiucv_hs_rcv()
2056 (!memcmp(&iucv_sk(sk)->src_user_id, in afiucv_hs_rcv()
2058 (!memcmp(&iucv_sk(sk)->dst_name, nullstring, 8)) && in afiucv_hs_rcv()
2059 (!memcmp(&iucv_sk(sk)->dst_user_id, in afiucv_hs_rcv()
2061 iucv = iucv_sk(sk); in afiucv_hs_rcv()
2065 if ((!memcmp(&iucv_sk(sk)->src_name, in afiucv_hs_rcv()
2067 (!memcmp(&iucv_sk(sk)->src_user_id, in afiucv_hs_rcv()
2069 (!memcmp(&iucv_sk(sk)->dst_name, in afiucv_hs_rcv()
2071 (!memcmp(&iucv_sk(sk)->dst_user_id, in afiucv_hs_rcv()
2073 iucv = iucv_sk(sk); in afiucv_hs_rcv()
2080 sk = NULL; in afiucv_hs_rcv()
2094 err = afiucv_hs_callback_syn(sk, skb); in afiucv_hs_rcv()
2098 err = afiucv_hs_callback_synack(sk, skb); in afiucv_hs_rcv()
2102 err = afiucv_hs_callback_synfin(sk, skb); in afiucv_hs_rcv()
2106 err = afiucv_hs_callback_fin(sk, skb); in afiucv_hs_rcv()
2109 err = afiucv_hs_callback_win(sk, skb); in afiucv_hs_rcv()
2121 err = afiucv_hs_callback_rx(sk, skb); in afiucv_hs_rcv()
2137 struct sock *isk = skb->sk; in afiucv_hs_callback_txnotify()
2138 struct sock *sk = NULL; in afiucv_hs_callback_txnotify() local
2146 sk_for_each(sk, &iucv_sk_list.head) in afiucv_hs_callback_txnotify()
2147 if (sk == isk) { in afiucv_hs_callback_txnotify()
2148 iucv = iucv_sk(sk); in afiucv_hs_callback_txnotify()
2153 if (!iucv || sock_flag(sk, SOCK_ZAPPED)) in afiucv_hs_callback_txnotify()
2164 iucv_sock_wake_msglim(sk); in afiucv_hs_callback_txnotify()
2173 iucv_sock_wake_msglim(sk); in afiucv_hs_callback_txnotify()
2183 if (sk->sk_state == IUCV_CONNECTED) { in afiucv_hs_callback_txnotify()
2184 sk->sk_state = IUCV_DISCONN; in afiucv_hs_callback_txnotify()
2185 sk->sk_state_change(sk); in afiucv_hs_callback_txnotify()
2194 if (sk->sk_state == IUCV_CLOSING) { in afiucv_hs_callback_txnotify()
2195 if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) { in afiucv_hs_callback_txnotify()
2196 sk->sk_state = IUCV_CLOSED; in afiucv_hs_callback_txnotify()
2197 sk->sk_state_change(sk); in afiucv_hs_callback_txnotify()
2210 struct sock *sk; in afiucv_netdev_event() local
2216 sk_for_each(sk, &iucv_sk_list.head) { in afiucv_netdev_event()
2217 iucv = iucv_sk(sk); in afiucv_netdev_event()
2219 (sk->sk_state == IUCV_CONNECTED)) { in afiucv_netdev_event()
2221 iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN); in afiucv_netdev_event()
2222 sk->sk_state = IUCV_DISCONN; in afiucv_netdev_event()
2223 sk->sk_state_change(sk); in afiucv_netdev_event()
2262 struct sock *sk; in iucv_sock_create() local
2279 sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL, kern); in iucv_sock_create()
2280 if (!sk) in iucv_sock_create()
2283 iucv_sock_init(sk, NULL); in iucv_sock_create()