• Home
  • Raw
  • Download

Lines Matching refs:sk

53 #define __iucv_sock_wait(sk, condition, timeo, ret)			\  argument
58 prepare_to_wait(sk_sleep(sk), &__wait, TASK_INTERRUPTIBLE); \
68 release_sock(sk); \
70 lock_sock(sk); \
71 ret = sock_error(sk); \
75 finish_wait(sk_sleep(sk), &__wait); \
78 #define iucv_sock_wait(sk, condition, timeo) \ argument
82 __iucv_sock_wait(sk, condition, timeo, __ret); \
86 static void iucv_sock_kill(struct sock *sk);
87 static void iucv_sock_close(struct sock *sk);
152 struct sock *sk; in afiucv_pm_freeze() local
159 sk_for_each(sk, &iucv_sk_list.head) { in afiucv_pm_freeze()
160 iucv = iucv_sk(sk); in afiucv_pm_freeze()
161 switch (sk->sk_state) { in afiucv_pm_freeze()
165 iucv_sever_path(sk, 0); in afiucv_pm_freeze()
189 struct sock *sk; in afiucv_pm_restore_thaw() local
195 sk_for_each(sk, &iucv_sk_list.head) { in afiucv_pm_restore_thaw()
196 switch (sk->sk_state) { in afiucv_pm_restore_thaw()
198 sk->sk_err = EPIPE; in afiucv_pm_restore_thaw()
199 sk->sk_state = IUCV_DISCONN; in afiucv_pm_restore_thaw()
200 sk->sk_state_change(sk); in afiucv_pm_restore_thaw()
272 static int iucv_sock_in_state(struct sock *sk, int state, int state2) in iucv_sock_in_state() argument
274 return (sk->sk_state == state || sk->sk_state == state2); in iucv_sock_in_state()
285 static inline int iucv_below_msglim(struct sock *sk) in iucv_below_msglim() argument
287 struct iucv_sock *iucv = iucv_sk(sk); in iucv_below_msglim()
289 if (sk->sk_state != IUCV_CONNECTED) in iucv_below_msglim()
301 static void iucv_sock_wake_msglim(struct sock *sk) in iucv_sock_wake_msglim() argument
306 wq = rcu_dereference(sk->sk_wq); in iucv_sock_wake_msglim()
309 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); in iucv_sock_wake_msglim()
383 struct sock *sk; in __iucv_get_sock_by_name() local
385 sk_for_each(sk, &iucv_sk_list.head) in __iucv_get_sock_by_name()
386 if (!memcmp(&iucv_sk(sk)->src_name, nm, 8)) in __iucv_get_sock_by_name()
387 return sk; in __iucv_get_sock_by_name()
392 static void iucv_sock_destruct(struct sock *sk) in iucv_sock_destruct() argument
394 skb_queue_purge(&sk->sk_receive_queue); in iucv_sock_destruct()
395 skb_queue_purge(&sk->sk_error_queue); in iucv_sock_destruct()
397 sk_mem_reclaim(sk); in iucv_sock_destruct()
399 if (!sock_flag(sk, SOCK_DEAD)) { in iucv_sock_destruct()
400 pr_err("Attempt to release alive iucv socket %p\n", sk); in iucv_sock_destruct()
404 WARN_ON(atomic_read(&sk->sk_rmem_alloc)); in iucv_sock_destruct()
405 WARN_ON(refcount_read(&sk->sk_wmem_alloc)); in iucv_sock_destruct()
406 WARN_ON(sk->sk_wmem_queued); in iucv_sock_destruct()
407 WARN_ON(sk->sk_forward_alloc); in iucv_sock_destruct()
413 struct sock *sk; in iucv_sock_cleanup_listen() local
416 while ((sk = iucv_accept_dequeue(parent, NULL))) { in iucv_sock_cleanup_listen()
417 iucv_sock_close(sk); in iucv_sock_cleanup_listen()
418 iucv_sock_kill(sk); in iucv_sock_cleanup_listen()
425 static void iucv_sock_kill(struct sock *sk) in iucv_sock_kill() argument
427 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket) in iucv_sock_kill()
430 iucv_sock_unlink(&iucv_sk_list, sk); in iucv_sock_kill()
431 sock_set_flag(sk, SOCK_DEAD); in iucv_sock_kill()
432 sock_put(sk); in iucv_sock_kill()
436 static void iucv_sever_path(struct sock *sk, int with_user_data) in iucv_sever_path() argument
439 struct iucv_sock *iucv = iucv_sk(sk); in iucv_sever_path()
456 static int iucv_send_ctrl(struct sock *sk, u8 flags) in iucv_send_ctrl() argument
464 if (sk->sk_shutdown & SEND_SHUTDOWN) { in iucv_send_ctrl()
466 shutdown = sk->sk_shutdown; in iucv_send_ctrl()
467 sk->sk_shutdown &= RCV_SHUTDOWN; in iucv_send_ctrl()
469 skb = sock_alloc_send_skb(sk, blen, 1, &err); in iucv_send_ctrl()
472 err = afiucv_hs_send(NULL, sk, skb, flags); in iucv_send_ctrl()
475 sk->sk_shutdown = shutdown; in iucv_send_ctrl()
480 static void iucv_sock_close(struct sock *sk) in iucv_sock_close() argument
482 struct iucv_sock *iucv = iucv_sk(sk); in iucv_sock_close()
486 lock_sock(sk); in iucv_sock_close()
488 switch (sk->sk_state) { in iucv_sock_close()
490 iucv_sock_cleanup_listen(sk); in iucv_sock_close()
495 err = iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN); in iucv_sock_close()
496 sk->sk_state = IUCV_DISCONN; in iucv_sock_close()
497 sk->sk_state_change(sk); in iucv_sock_close()
500 sk->sk_state = IUCV_CLOSING; in iucv_sock_close()
501 sk->sk_state_change(sk); in iucv_sock_close()
504 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) in iucv_sock_close()
505 timeo = sk->sk_lingertime; in iucv_sock_close()
508 iucv_sock_wait(sk, in iucv_sock_close()
509 iucv_sock_in_state(sk, IUCV_CLOSED, 0), in iucv_sock_close()
514 sk->sk_state = IUCV_CLOSED; in iucv_sock_close()
515 sk->sk_state_change(sk); in iucv_sock_close()
517 sk->sk_err = ECONNRESET; in iucv_sock_close()
518 sk->sk_state_change(sk); in iucv_sock_close()
524 iucv_sever_path(sk, 1); in iucv_sock_close()
530 sk->sk_bound_dev_if = 0; in iucv_sock_close()
534 sock_set_flag(sk, SOCK_ZAPPED); in iucv_sock_close()
536 release_sock(sk); in iucv_sock_close()
539 static void iucv_sock_init(struct sock *sk, struct sock *parent) in iucv_sock_init() argument
542 sk->sk_type = parent->sk_type; in iucv_sock_init()
543 security_sk_clone(parent, sk); in iucv_sock_init()
549 struct sock *sk; in iucv_sock_alloc() local
552 sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto, kern); in iucv_sock_alloc()
553 if (!sk) in iucv_sock_alloc()
555 iucv = iucv_sk(sk); in iucv_sock_alloc()
557 sock_init_data(sock, sk); in iucv_sock_alloc()
578 sk->sk_destruct = iucv_sock_destruct; in iucv_sock_alloc()
579 sk->sk_sndtimeo = IUCV_CONN_TIMEOUT; in iucv_sock_alloc()
580 sk->sk_allocation = GFP_DMA; in iucv_sock_alloc()
582 sock_reset_flag(sk, SOCK_ZAPPED); in iucv_sock_alloc()
584 sk->sk_protocol = proto; in iucv_sock_alloc()
585 sk->sk_state = IUCV_OPEN; in iucv_sock_alloc()
587 iucv_sock_link(&iucv_sk_list, sk); in iucv_sock_alloc()
588 return sk; in iucv_sock_alloc()
595 struct sock *sk; in iucv_sock_create() local
614 sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL, kern); in iucv_sock_create()
615 if (!sk) in iucv_sock_create()
618 iucv_sock_init(sk, NULL); in iucv_sock_create()
623 void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk) in iucv_sock_link() argument
626 sk_add_node(sk, &l->head); in iucv_sock_link()
630 void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk) in iucv_sock_unlink() argument
633 sk_del_node_init(sk); in iucv_sock_unlink()
637 void iucv_accept_enqueue(struct sock *parent, struct sock *sk) in iucv_accept_enqueue() argument
642 sock_hold(sk); in iucv_accept_enqueue()
644 list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q); in iucv_accept_enqueue()
646 iucv_sk(sk)->parent = parent; in iucv_accept_enqueue()
650 void iucv_accept_unlink(struct sock *sk) in iucv_accept_unlink() argument
653 struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent); in iucv_accept_unlink()
656 list_del_init(&iucv_sk(sk)->accept_q); in iucv_accept_unlink()
658 sk_acceptq_removed(iucv_sk(sk)->parent); in iucv_accept_unlink()
659 iucv_sk(sk)->parent = NULL; in iucv_accept_unlink()
660 sock_put(sk); in iucv_accept_unlink()
666 struct sock *sk; in iucv_accept_dequeue() local
669 sk = (struct sock *) isk; in iucv_accept_dequeue()
670 lock_sock(sk); in iucv_accept_dequeue()
672 if (sk->sk_state == IUCV_CLOSED) { in iucv_accept_dequeue()
673 iucv_accept_unlink(sk); in iucv_accept_dequeue()
674 release_sock(sk); in iucv_accept_dequeue()
678 if (sk->sk_state == IUCV_CONNECTED || in iucv_accept_dequeue()
679 sk->sk_state == IUCV_DISCONN || in iucv_accept_dequeue()
681 iucv_accept_unlink(sk); in iucv_accept_dequeue()
683 sock_graft(sk, newsock); in iucv_accept_dequeue()
685 release_sock(sk); in iucv_accept_dequeue()
686 return sk; in iucv_accept_dequeue()
689 release_sock(sk); in iucv_accept_dequeue()
711 struct sock *sk = sock->sk; in iucv_sock_bind() local
722 lock_sock(sk); in iucv_sock_bind()
723 if (sk->sk_state != IUCV_OPEN) { in iucv_sock_bind()
730 iucv = iucv_sk(sk); in iucv_sock_bind()
755 sk->sk_bound_dev_if = dev->ifindex; in iucv_sock_bind()
758 sk->sk_state = IUCV_BOUND; in iucv_sock_bind()
772 sk->sk_state = IUCV_BOUND; in iucv_sock_bind()
784 release_sock(sk); in iucv_sock_bind()
789 static int iucv_sock_autobind(struct sock *sk) in iucv_sock_autobind() argument
791 struct iucv_sock *iucv = iucv_sk(sk); in iucv_sock_autobind()
812 struct sock *sk = sock->sk; in afiucv_path_connect() local
813 struct iucv_sock *iucv = iucv_sk(sk); in afiucv_path_connect()
830 sk); in afiucv_path_connect()
859 struct sock *sk = sock->sk; in iucv_sock_connect() local
860 struct iucv_sock *iucv = iucv_sk(sk); in iucv_sock_connect()
866 if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND) in iucv_sock_connect()
869 if (sk->sk_state == IUCV_OPEN && in iucv_sock_connect()
873 if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_SEQPACKET) in iucv_sock_connect()
876 if (sk->sk_state == IUCV_OPEN) { in iucv_sock_connect()
877 err = iucv_sock_autobind(sk); in iucv_sock_connect()
882 lock_sock(sk); in iucv_sock_connect()
889 err = iucv_send_ctrl(sock->sk, AF_IUCV_FLAG_SYN); in iucv_sock_connect()
895 if (sk->sk_state != IUCV_CONNECTED) in iucv_sock_connect()
896 err = iucv_sock_wait(sk, iucv_sock_in_state(sk, IUCV_CONNECTED, in iucv_sock_connect()
898 sock_sndtimeo(sk, flags & O_NONBLOCK)); in iucv_sock_connect()
900 if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_CLOSED) in iucv_sock_connect()
904 iucv_sever_path(sk, 0); in iucv_sock_connect()
907 release_sock(sk); in iucv_sock_connect()
914 struct sock *sk = sock->sk; in iucv_sock_listen() local
917 lock_sock(sk); in iucv_sock_listen()
920 if (sk->sk_state != IUCV_BOUND) in iucv_sock_listen()
926 sk->sk_max_ack_backlog = backlog; in iucv_sock_listen()
927 sk->sk_ack_backlog = 0; in iucv_sock_listen()
928 sk->sk_state = IUCV_LISTEN; in iucv_sock_listen()
932 release_sock(sk); in iucv_sock_listen()
941 struct sock *sk = sock->sk, *nsk; in iucv_sock_accept() local
945 lock_sock_nested(sk, SINGLE_DEPTH_NESTING); in iucv_sock_accept()
947 if (sk->sk_state != IUCV_LISTEN) { in iucv_sock_accept()
952 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); in iucv_sock_accept()
955 add_wait_queue_exclusive(sk_sleep(sk), &wait); in iucv_sock_accept()
956 while (!(nsk = iucv_accept_dequeue(sk, newsock))) { in iucv_sock_accept()
963 release_sock(sk); in iucv_sock_accept()
965 lock_sock_nested(sk, SINGLE_DEPTH_NESTING); in iucv_sock_accept()
967 if (sk->sk_state != IUCV_LISTEN) { in iucv_sock_accept()
979 remove_wait_queue(sk_sleep(sk), &wait); in iucv_sock_accept()
987 release_sock(sk); in iucv_sock_accept()
995 struct sock *sk = sock->sk; in iucv_sock_getname() local
996 struct iucv_sock *iucv = iucv_sk(sk); in iucv_sock_getname()
1042 struct sock *sk = sock->sk; in iucv_sock_sendmsg() local
1043 struct iucv_sock *iucv = iucv_sk(sk); in iucv_sock_sendmsg()
1056 err = sock_error(sk); in iucv_sock_sendmsg()
1064 if (sk->sk_type == SOCK_SEQPACKET && !(msg->msg_flags & MSG_EOR)) in iucv_sock_sendmsg()
1067 lock_sock(sk); in iucv_sock_sendmsg()
1069 if (sk->sk_shutdown & SEND_SHUTDOWN) { in iucv_sock_sendmsg()
1075 if (sk->sk_state != IUCV_CONNECTED) { in iucv_sock_sendmsg()
1138 skb = sock_alloc_send_pskb(sk, headroom + linear, len - linear, in iucv_sock_sendmsg()
1152 timeo = sock_sndtimeo(sk, noblock); in iucv_sock_sendmsg()
1153 err = iucv_sock_wait(sk, iucv_below_msglim(sk), timeo); in iucv_sock_sendmsg()
1158 if (sk->sk_state != IUCV_CONNECTED) { in iucv_sock_sendmsg()
1169 err = afiucv_hs_send(&txmsg, sk, skb, 0); in iucv_sock_sendmsg()
1235 release_sock(sk); in iucv_sock_sendmsg()
1241 release_sock(sk); in iucv_sock_sendmsg()
1277 static void iucv_process_message(struct sock *sk, struct sk_buff *skb, in iucv_process_message() argument
1326 if (sk_filter(sk, skb)) { in iucv_process_message()
1327 atomic_inc(&sk->sk_drops); /* skb rejected by filter */ in iucv_process_message()
1331 if (__sock_queue_rcv_skb(sk, skb)) /* handle rcv queue full */ in iucv_process_message()
1332 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb); in iucv_process_message()
1339 static void iucv_process_message_q(struct sock *sk) in iucv_process_message_q() argument
1341 struct iucv_sock *iucv = iucv_sk(sk); in iucv_process_message_q()
1349 iucv_process_message(sk, skb, p->path, &p->msg); in iucv_process_message_q()
1361 struct sock *sk = sock->sk; in iucv_sock_recvmsg() local
1362 struct iucv_sock *iucv = iucv_sk(sk); in iucv_sock_recvmsg()
1368 if ((sk->sk_state == IUCV_DISCONN) && in iucv_sock_recvmsg()
1370 skb_queue_empty(&sk->sk_receive_queue) && in iucv_sock_recvmsg()
1379 skb = skb_recv_datagram(sk, flags, noblock, &err); in iucv_sock_recvmsg()
1381 if (sk->sk_shutdown & RCV_SHUTDOWN) in iucv_sock_recvmsg()
1390 sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN; in iucv_sock_recvmsg()
1395 skb_queue_head(&sk->sk_receive_queue, skb); in iucv_sock_recvmsg()
1400 if (sk->sk_type == SOCK_SEQPACKET) { in iucv_sock_recvmsg()
1415 skb_queue_head(&sk->sk_receive_queue, skb); in iucv_sock_recvmsg()
1423 if (sk->sk_type == SOCK_STREAM) { in iucv_sock_recvmsg()
1426 skb_queue_head(&sk->sk_receive_queue, skb); in iucv_sock_recvmsg()
1436 iucv_sock_close(sk); in iucv_sock_recvmsg()
1446 if (__sock_queue_rcv_skb(sk, rskb)) { in iucv_sock_recvmsg()
1456 iucv_process_message_q(sk); in iucv_sock_recvmsg()
1459 err = iucv_send_ctrl(sk, AF_IUCV_FLAG_WIN); in iucv_sock_recvmsg()
1461 sk->sk_state = IUCV_DISCONN; in iucv_sock_recvmsg()
1462 sk->sk_state_change(sk); in iucv_sock_recvmsg()
1471 if (sk->sk_type == SOCK_SEQPACKET && (flags & MSG_TRUNC)) in iucv_sock_recvmsg()
1480 struct sock *sk; in iucv_accept_poll() local
1483 sk = (struct sock *) isk; in iucv_accept_poll()
1485 if (sk->sk_state == IUCV_CONNECTED) in iucv_accept_poll()
1495 struct sock *sk = sock->sk; in iucv_sock_poll() local
1498 sock_poll_wait(file, sk_sleep(sk), wait); in iucv_sock_poll()
1500 if (sk->sk_state == IUCV_LISTEN) in iucv_sock_poll()
1501 return iucv_accept_poll(sk); in iucv_sock_poll()
1503 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) in iucv_sock_poll()
1505 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0); in iucv_sock_poll()
1507 if (sk->sk_shutdown & RCV_SHUTDOWN) in iucv_sock_poll()
1510 if (sk->sk_shutdown == SHUTDOWN_MASK) in iucv_sock_poll()
1513 if (!skb_queue_empty(&sk->sk_receive_queue) || in iucv_sock_poll()
1514 (sk->sk_shutdown & RCV_SHUTDOWN)) in iucv_sock_poll()
1517 if (sk->sk_state == IUCV_CLOSED) in iucv_sock_poll()
1520 if (sk->sk_state == IUCV_DISCONN) in iucv_sock_poll()
1523 if (sock_writeable(sk) && iucv_below_msglim(sk)) in iucv_sock_poll()
1526 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); in iucv_sock_poll()
1533 struct sock *sk = sock->sk; in iucv_sock_shutdown() local
1534 struct iucv_sock *iucv = iucv_sk(sk); in iucv_sock_shutdown()
1543 lock_sock(sk); in iucv_sock_shutdown()
1544 switch (sk->sk_state) { in iucv_sock_shutdown()
1575 iucv_send_ctrl(sk, AF_IUCV_FLAG_SHT); in iucv_sock_shutdown()
1578 sk->sk_shutdown |= how; in iucv_sock_shutdown()
1587 skb_queue_purge(&sk->sk_receive_queue); in iucv_sock_shutdown()
1591 sk->sk_state_change(sk); in iucv_sock_shutdown()
1594 release_sock(sk); in iucv_sock_shutdown()
1600 struct sock *sk = sock->sk; in iucv_sock_release() local
1603 if (!sk) in iucv_sock_release()
1606 iucv_sock_close(sk); in iucv_sock_release()
1608 sock_orphan(sk); in iucv_sock_release()
1609 iucv_sock_kill(sk); in iucv_sock_release()
1617 struct sock *sk = sock->sk; in iucv_sock_setsockopt() local
1618 struct iucv_sock *iucv = iucv_sk(sk); in iucv_sock_setsockopt()
1633 lock_sock(sk); in iucv_sock_setsockopt()
1642 switch (sk->sk_state) { in iucv_sock_setsockopt()
1659 release_sock(sk); in iucv_sock_setsockopt()
1667 struct sock *sk = sock->sk; in iucv_sock_getsockopt() local
1668 struct iucv_sock *iucv = iucv_sk(sk); in iucv_sock_getsockopt()
1688 lock_sock(sk); in iucv_sock_getsockopt()
1691 release_sock(sk); in iucv_sock_getsockopt()
1694 if (sk->sk_state == IUCV_OPEN) in iucv_sock_getsockopt()
1720 struct sock *sk, *nsk; in iucv_callback_connreq() local
1729 sk = NULL; in iucv_callback_connreq()
1730 sk_for_each(sk, &iucv_sk_list.head) in iucv_callback_connreq()
1731 if (sk->sk_state == IUCV_LISTEN && in iucv_callback_connreq()
1732 !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) { in iucv_callback_connreq()
1737 iucv = iucv_sk(sk); in iucv_callback_connreq()
1745 bh_lock_sock(sk); in iucv_callback_connreq()
1751 if (sk->sk_state != IUCV_LISTEN) { in iucv_callback_connreq()
1758 if (sk_acceptq_is_full(sk)) { in iucv_callback_connreq()
1765 nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC, 0); in iucv_callback_connreq()
1773 iucv_sock_init(nsk, sk); in iucv_callback_connreq()
1798 iucv_accept_enqueue(sk, nsk); in iucv_callback_connreq()
1802 sk->sk_data_ready(sk); in iucv_callback_connreq()
1805 bh_unlock_sock(sk); in iucv_callback_connreq()
1811 struct sock *sk = path->private; in iucv_callback_connack() local
1813 sk->sk_state = IUCV_CONNECTED; in iucv_callback_connack()
1814 sk->sk_state_change(sk); in iucv_callback_connack()
1819 struct sock *sk = path->private; in iucv_callback_rx() local
1820 struct iucv_sock *iucv = iucv_sk(sk); in iucv_callback_rx()
1825 if (sk->sk_shutdown & RCV_SHUTDOWN) { in iucv_callback_rx()
1836 len = atomic_read(&sk->sk_rmem_alloc); in iucv_callback_rx()
1838 if (len > sk->sk_rcvbuf) in iucv_callback_rx()
1845 iucv_process_message(sk, skb, path, msg); in iucv_callback_rx()
1864 struct sock *sk = path->private; in iucv_callback_txdone() local
1866 struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q; in iucv_callback_txdone()
1870 bh_lock_sock(sk); in iucv_callback_txdone()
1889 iucv_sock_wake_msglim(sk); in iucv_callback_txdone()
1893 if (sk->sk_state == IUCV_CLOSING) { in iucv_callback_txdone()
1894 if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) { in iucv_callback_txdone()
1895 sk->sk_state = IUCV_CLOSED; in iucv_callback_txdone()
1896 sk->sk_state_change(sk); in iucv_callback_txdone()
1899 bh_unlock_sock(sk); in iucv_callback_txdone()
1905 struct sock *sk = path->private; in iucv_callback_connrej() local
1907 if (sk->sk_state == IUCV_CLOSED) in iucv_callback_connrej()
1910 bh_lock_sock(sk); in iucv_callback_connrej()
1911 iucv_sever_path(sk, 1); in iucv_callback_connrej()
1912 sk->sk_state = IUCV_DISCONN; in iucv_callback_connrej()
1914 sk->sk_state_change(sk); in iucv_callback_connrej()
1915 bh_unlock_sock(sk); in iucv_callback_connrej()
1923 struct sock *sk = path->private; in iucv_callback_shutdown() local
1925 bh_lock_sock(sk); in iucv_callback_shutdown()
1926 if (sk->sk_state != IUCV_CLOSED) { in iucv_callback_shutdown()
1927 sk->sk_shutdown |= SEND_SHUTDOWN; in iucv_callback_shutdown()
1928 sk->sk_state_change(sk); in iucv_callback_shutdown()
1930 bh_unlock_sock(sk); in iucv_callback_shutdown()
1958 static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb) in afiucv_hs_callback_syn() argument
1965 iucv = iucv_sk(sk); in afiucv_hs_callback_syn()
1975 nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC, 0); in afiucv_hs_callback_syn()
1976 bh_lock_sock(sk); in afiucv_hs_callback_syn()
1977 if ((sk->sk_state != IUCV_LISTEN) || in afiucv_hs_callback_syn()
1978 sk_acceptq_is_full(sk) || in afiucv_hs_callback_syn()
1985 bh_unlock_sock(sk); in afiucv_hs_callback_syn()
1990 iucv_sock_init(nsk, sk); in afiucv_hs_callback_syn()
2001 nsk->sk_bound_dev_if = sk->sk_bound_dev_if; in afiucv_hs_callback_syn()
2010 iucv_accept_enqueue(sk, nsk); in afiucv_hs_callback_syn()
2012 sk->sk_data_ready(sk); in afiucv_hs_callback_syn()
2015 bh_unlock_sock(sk); in afiucv_hs_callback_syn()
2024 static int afiucv_hs_callback_synack(struct sock *sk, struct sk_buff *skb) in afiucv_hs_callback_synack() argument
2026 struct iucv_sock *iucv = iucv_sk(sk); in afiucv_hs_callback_synack()
2032 if (sk->sk_state != IUCV_BOUND) in afiucv_hs_callback_synack()
2034 bh_lock_sock(sk); in afiucv_hs_callback_synack()
2036 sk->sk_state = IUCV_CONNECTED; in afiucv_hs_callback_synack()
2037 sk->sk_state_change(sk); in afiucv_hs_callback_synack()
2038 bh_unlock_sock(sk); in afiucv_hs_callback_synack()
2047 static int afiucv_hs_callback_synfin(struct sock *sk, struct sk_buff *skb) in afiucv_hs_callback_synfin() argument
2049 struct iucv_sock *iucv = iucv_sk(sk); in afiucv_hs_callback_synfin()
2053 if (sk->sk_state != IUCV_BOUND) in afiucv_hs_callback_synfin()
2055 bh_lock_sock(sk); in afiucv_hs_callback_synfin()
2056 sk->sk_state = IUCV_DISCONN; in afiucv_hs_callback_synfin()
2057 sk->sk_state_change(sk); in afiucv_hs_callback_synfin()
2058 bh_unlock_sock(sk); in afiucv_hs_callback_synfin()
2067 static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb) in afiucv_hs_callback_fin() argument
2069 struct iucv_sock *iucv = iucv_sk(sk); in afiucv_hs_callback_fin()
2074 bh_lock_sock(sk); in afiucv_hs_callback_fin()
2075 if (sk->sk_state == IUCV_CONNECTED) { in afiucv_hs_callback_fin()
2076 sk->sk_state = IUCV_DISCONN; in afiucv_hs_callback_fin()
2077 sk->sk_state_change(sk); in afiucv_hs_callback_fin()
2079 bh_unlock_sock(sk); in afiucv_hs_callback_fin()
2088 static int afiucv_hs_callback_win(struct sock *sk, struct sk_buff *skb) in afiucv_hs_callback_win() argument
2090 struct iucv_sock *iucv = iucv_sk(sk); in afiucv_hs_callback_win()
2097 if (sk->sk_state != IUCV_CONNECTED) in afiucv_hs_callback_win()
2101 iucv_sock_wake_msglim(sk); in afiucv_hs_callback_win()
2108 static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb) in afiucv_hs_callback_rx() argument
2110 struct iucv_sock *iucv = iucv_sk(sk); in afiucv_hs_callback_rx()
2117 if (sk->sk_state != IUCV_CONNECTED) { in afiucv_hs_callback_rx()
2122 if (sk->sk_shutdown & RCV_SHUTDOWN) { in afiucv_hs_callback_rx()
2132 if (sk_filter(sk, skb)) { in afiucv_hs_callback_rx()
2133 atomic_inc(&sk->sk_drops); /* skb rejected by filter */ in afiucv_hs_callback_rx()
2140 if (__sock_queue_rcv_skb(sk, skb)) in afiucv_hs_callback_rx()
2144 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb); in afiucv_hs_callback_rx()
2157 struct sock *sk; in afiucv_hs_rcv() local
2185 sk = NULL; in afiucv_hs_rcv()
2187 sk_for_each(sk, &iucv_sk_list.head) { in afiucv_hs_rcv()
2189 if ((!memcmp(&iucv_sk(sk)->src_name, in afiucv_hs_rcv()
2191 (!memcmp(&iucv_sk(sk)->src_user_id, in afiucv_hs_rcv()
2193 (!memcmp(&iucv_sk(sk)->dst_name, nullstring, 8)) && in afiucv_hs_rcv()
2194 (!memcmp(&iucv_sk(sk)->dst_user_id, in afiucv_hs_rcv()
2196 iucv = iucv_sk(sk); in afiucv_hs_rcv()
2200 if ((!memcmp(&iucv_sk(sk)->src_name, in afiucv_hs_rcv()
2202 (!memcmp(&iucv_sk(sk)->src_user_id, in afiucv_hs_rcv()
2204 (!memcmp(&iucv_sk(sk)->dst_name, in afiucv_hs_rcv()
2206 (!memcmp(&iucv_sk(sk)->dst_user_id, in afiucv_hs_rcv()
2208 iucv = iucv_sk(sk); in afiucv_hs_rcv()
2215 sk = NULL; in afiucv_hs_rcv()
2229 err = afiucv_hs_callback_syn(sk, skb); in afiucv_hs_rcv()
2233 err = afiucv_hs_callback_synack(sk, skb); in afiucv_hs_rcv()
2237 err = afiucv_hs_callback_synfin(sk, skb); in afiucv_hs_rcv()
2241 err = afiucv_hs_callback_fin(sk, skb); in afiucv_hs_rcv()
2244 err = afiucv_hs_callback_win(sk, skb); in afiucv_hs_rcv()
2256 err = afiucv_hs_callback_rx(sk, skb); in afiucv_hs_rcv()
2272 struct sock *isk = skb->sk; in afiucv_hs_callback_txnotify()
2273 struct sock *sk = NULL; in afiucv_hs_callback_txnotify() local
2281 sk_for_each(sk, &iucv_sk_list.head) in afiucv_hs_callback_txnotify()
2282 if (sk == isk) { in afiucv_hs_callback_txnotify()
2283 iucv = iucv_sk(sk); in afiucv_hs_callback_txnotify()
2288 if (!iucv || sock_flag(sk, SOCK_ZAPPED)) in afiucv_hs_callback_txnotify()
2303 iucv_sock_wake_msglim(sk); in afiucv_hs_callback_txnotify()
2312 iucv_sock_wake_msglim(sk); in afiucv_hs_callback_txnotify()
2322 if (sk->sk_state == IUCV_CONNECTED) { in afiucv_hs_callback_txnotify()
2323 sk->sk_state = IUCV_DISCONN; in afiucv_hs_callback_txnotify()
2324 sk->sk_state_change(sk); in afiucv_hs_callback_txnotify()
2336 if (sk->sk_state == IUCV_CLOSING) { in afiucv_hs_callback_txnotify()
2337 if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) { in afiucv_hs_callback_txnotify()
2338 sk->sk_state = IUCV_CLOSED; in afiucv_hs_callback_txnotify()
2339 sk->sk_state_change(sk); in afiucv_hs_callback_txnotify()
2352 struct sock *sk; in afiucv_netdev_event() local
2358 sk_for_each(sk, &iucv_sk_list.head) { in afiucv_netdev_event()
2359 iucv = iucv_sk(sk); in afiucv_netdev_event()
2361 (sk->sk_state == IUCV_CONNECTED)) { in afiucv_netdev_event()
2363 iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN); in afiucv_netdev_event()
2364 sk->sk_state = IUCV_DISCONN; in afiucv_netdev_event()
2365 sk->sk_state_change(sk); in afiucv_netdev_event()