/drivers/isdn/mISDN/ |
D | socket.c | 31 #define _pms(sk) ((struct mISDN_sock *)sk) argument 55 mISDN_sock_link(struct mISDN_sock_list *l, struct sock *sk) in mISDN_sock_link() argument 58 sk_add_node(sk, &l->head); in mISDN_sock_link() 62 static void mISDN_sock_unlink(struct mISDN_sock_list *l, struct sock *sk) in mISDN_sock_unlink() argument 65 sk_del_node_init(sk); in mISDN_sock_unlink() 78 if (msk->sk.sk_state == MISDN_CLOSED) in mISDN_send() 81 err = sock_queue_rcv_skb(&msk->sk, skb); in mISDN_send() 97 msk->sk.sk_state = MISDN_CLOSED; in mISDN_ctrl() 104 mISDN_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) in mISDN_sock_cmsg() argument 108 if (_pms(sk)->cmask & MISDN_TIME_STAMP) { in mISDN_sock_cmsg() [all …]
|
D | dsp_dtmf.c | 123 s32 sk, sk1, sk2; in dsp_dtmf_goertzel_decode() local 160 sk = (*hfccoeff++) >> 4; in dsp_dtmf_goertzel_decode() 161 if (sk > 32767 || sk < -32767 || sk2 > 32767 in dsp_dtmf_goertzel_decode() 167 (sk * sk) - in dsp_dtmf_goertzel_decode() 168 (((cos2pik[k] * sk) >> 15) * sk2) + in dsp_dtmf_goertzel_decode() 185 sk = 0; in dsp_dtmf_goertzel_decode() 191 sk = ((cos2pik_ * sk1) >> 15) - sk2 + (*buf++); in dsp_dtmf_goertzel_decode() 193 sk1 = sk; in dsp_dtmf_goertzel_decode() 195 sk >>= 8; in dsp_dtmf_goertzel_decode() 197 if (sk > 32767 || sk < -32767 || sk2 > 32767 || sk2 < -32767) in dsp_dtmf_goertzel_decode() [all …]
|
D | stack.c | 67 struct sock *sk; in send_socklist() local 71 sk_for_each(sk, &sl->head) { in send_socklist() 72 if (sk->sk_state != MISDN_BOUND) in send_socklist() 80 if (!sock_queue_rcv_skb(sk, cskb)) in send_socklist() 453 sk_add_node(&msk->sk, &dev->D.st->l1sock.head); in connect_layer1() 594 sk_del_node_init(&msk->sk); in delete_channel()
|
/drivers/net/ppp/ |
D | pppoe.c | 92 static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb); 289 struct sock *sk; in pppoe_flush_dev() local 299 sk = sk_pppox(po); in pppoe_flush_dev() 309 sock_hold(sk); in pppoe_flush_dev() 311 lock_sock(sk); in pppoe_flush_dev() 314 sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND | PPPOX_ZOMBIE)) { in pppoe_flush_dev() 315 pppox_unbind_sock(sk); in pppoe_flush_dev() 316 sk->sk_state_change(sk); in pppoe_flush_dev() 321 release_sock(sk); in pppoe_flush_dev() 322 sock_put(sk); in pppoe_flush_dev() [all …]
|
D | pppopns.c | 72 struct sock *sk = (struct sock *)sk_raw->sk_user_data; in pppopns_recv_core() local 73 struct pppopns_opt *opt = &pppox_sk(sk)->proto.pns; in pppopns_recv_core() 126 skb_set_owner_r(skb, sk); in pppopns_recv_core() 127 skb_queue_walk(&sk->sk_receive_queue, skb1) { in pppopns_recv_core() 134 skb_insert(skb1, skb, &sk->sk_receive_queue); in pppopns_recv_core() 141 skb_queue_tail(&sk->sk_receive_queue, skb); in pppopns_recv_core() 148 skb_queue_walk_safe(&sk->sk_receive_queue, skb, skb1) { in pppopns_recv_core() 150 if (atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf && in pppopns_recv_core() 154 skb_unlink(skb, &sk->sk_receive_queue); in pppopns_recv_core() 157 ppp_input(&pppox_sk(sk)->chan, skb); in pppopns_recv_core() [all …]
|
D | pppolac.c | 73 struct sock *sk = (struct sock *)sk_udp->sk_user_data; in pppolac_recv_core() local 74 struct pppolac_opt *opt = &pppox_sk(sk)->proto.lac; in pppolac_recv_core() 145 skb_set_owner_r(skb, sk); in pppolac_recv_core() 146 skb_queue_walk(&sk->sk_receive_queue, skb1) { in pppolac_recv_core() 153 skb_insert(skb1, skb, &sk->sk_receive_queue); in pppolac_recv_core() 160 skb_queue_tail(&sk->sk_receive_queue, skb); in pppolac_recv_core() 167 skb_queue_walk_safe(&sk->sk_receive_queue, skb, skb1) { in pppolac_recv_core() 169 if (atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf && in pppolac_recv_core() 173 skb_unlink(skb, &sk->sk_receive_queue); in pppolac_recv_core() 176 ppp_input(&pppox_sk(sk)->chan, skb); in pppolac_recv_core() [all …]
|
D | pptp.c | 172 struct sock *sk = (struct sock *) chan->private; in pptp_xmit() local 173 struct pppox_sock *po = pppox_sk(sk); in pptp_xmit() 192 rt = ip_route_output_ports(sock_net(sk), &fl4, NULL, in pptp_xmit() 210 if (skb->sk) in pptp_xmit() 211 skb_set_owner_w(new_skb, skb->sk); in pptp_xmit() 267 if (ip_dont_fragment(sk, &rt->dst)) in pptp_xmit() 295 static int pptp_rcv_core(struct sock *sk, struct sk_buff *skb) in pptp_rcv_core() argument 297 struct pppox_sock *po = pppox_sk(sk); in pptp_rcv_core() 303 if (!(sk->sk_state & PPPOX_CONNECTED)) { in pptp_rcv_core() 304 if (sock_queue_rcv_skb(sk, skb)) in pptp_rcv_core() [all …]
|
D | pppox.c | 57 void pppox_unbind_sock(struct sock *sk) in pppox_unbind_sock() argument 61 if (sk->sk_state & (PPPOX_BOUND | PPPOX_CONNECTED | PPPOX_ZOMBIE)) { in pppox_unbind_sock() 62 ppp_unregister_channel(&pppox_sk(sk)->chan); in pppox_unbind_sock() 63 sk->sk_state = PPPOX_DEAD; in pppox_unbind_sock() 73 struct sock *sk = sock->sk; in pppox_ioctl() local 74 struct pppox_sock *po = pppox_sk(sk); in pppox_ioctl() 77 lock_sock(sk); in pppox_ioctl() 83 if (!(sk->sk_state & PPPOX_CONNECTED)) in pppox_ioctl() 92 sk->sk_state |= PPPOX_BOUND; in pppox_ioctl() 96 rc = pppox_protos[sk->sk_protocol]->ioctl ? in pppox_ioctl() [all …]
|
/drivers/scsi/ |
D | iscsi_tcp.c | 114 static inline int iscsi_sw_sk_state_check(struct sock *sk) in iscsi_sw_sk_state_check() argument 116 struct iscsi_conn *conn = sk->sk_user_data; in iscsi_sw_sk_state_check() 118 if ((sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) && in iscsi_sw_sk_state_check() 120 !atomic_read(&sk->sk_rmem_alloc)) { in iscsi_sw_sk_state_check() 128 static void iscsi_sw_tcp_data_ready(struct sock *sk) in iscsi_sw_tcp_data_ready() argument 134 read_lock(&sk->sk_callback_lock); in iscsi_sw_tcp_data_ready() 135 conn = sk->sk_user_data; in iscsi_sw_tcp_data_ready() 137 read_unlock(&sk->sk_callback_lock); in iscsi_sw_tcp_data_ready() 150 tcp_read_sock(sk, &rd_desc, iscsi_sw_tcp_recv); in iscsi_sw_tcp_data_ready() 152 iscsi_sw_sk_state_check(sk); in iscsi_sw_tcp_data_ready() [all …]
|
D | u14-34f.c | 1553 static void sort(unsigned long sk[], unsigned int da[], unsigned int n, in sort() argument 1563 if (sk[j] > sk[k]) k = j; in sort() 1566 if (sk[j] < sk[k]) k = j; in sort() 1570 x = sk[k]; sk[k] = sk[i]; sk[i] = x; in sort()
|
D | eata.c | 2069 static void sort(unsigned long sk[], unsigned int da[], unsigned int n, in sort() argument 2080 if (sk[j] > sk[k]) in sort() 2083 if (sk[j] < sk[k]) in sort() 2088 x = sk[k]; in sort() 2089 sk[k] = sk[i]; in sort() 2090 sk[i] = x; in sort()
|
/drivers/target/iscsi/ |
D | iscsi_target_nego.c | 411 static void iscsi_target_sk_data_ready(struct sock *sk) in iscsi_target_sk_data_ready() argument 413 struct iscsi_conn *conn = sk->sk_user_data; in iscsi_target_sk_data_ready() 418 write_lock_bh(&sk->sk_callback_lock); in iscsi_target_sk_data_ready() 419 if (!sk->sk_user_data) { in iscsi_target_sk_data_ready() 420 write_unlock_bh(&sk->sk_callback_lock); in iscsi_target_sk_data_ready() 424 write_unlock_bh(&sk->sk_callback_lock); in iscsi_target_sk_data_ready() 429 write_unlock_bh(&sk->sk_callback_lock); in iscsi_target_sk_data_ready() 434 write_unlock_bh(&sk->sk_callback_lock); in iscsi_target_sk_data_ready() 444 write_unlock_bh(&sk->sk_callback_lock); in iscsi_target_sk_data_ready() 451 struct sock *sk; in iscsi_target_set_sock_callbacks() local [all …]
|
/drivers/staging/lustre/lnet/klnds/socklnd/ |
D | socklnd_lib-linux.c | 67 int caps = conn->ksnc_sock->sk->sk_route_caps; in ksocknal_lib_zc_capable() 134 struct sock *sk = sock->sk; in ksocknal_lib_send_kiov() local 147 if (sk->sk_prot->sendpage != NULL) { in ksocknal_lib_send_kiov() 148 rc = sk->sk_prot->sendpage(sk, page, in ksocknal_lib_send_kiov() 151 rc = cfs_tcp_sendpage(sk, page, offset, fragsize, in ksocknal_lib_send_kiov() 467 sock->sk->sk_allocation = GFP_NOFS; in ksocknal_lib_setup_sock() 558 struct sock *sk; in ksocknal_lib_push_conn() local 568 sk = conn->ksnc_sock->sk; in ksocknal_lib_push_conn() 569 tp = tcp_sk(sk); in ksocknal_lib_push_conn() 571 lock_sock (sk); in ksocknal_lib_push_conn() [all …]
|
D | socklnd_lib-linux.h | 79 #define SOCKNAL_WSPACE(sk) sk_stream_wspace(sk) argument 80 #define SOCKNAL_MIN_WSPACE(sk) sk_stream_min_wspace(sk) argument
|
D | socklnd_cb.c | 215 bufnob = conn->ksnc_sock->sk->sk_wmem_queued; in ksocknal_transmit() 633 c->ksnc_sock->sk->sk_wmem_queued; in ksocknal_find_conn_locked() 729 bufnob = conn->ksnc_sock->sk->sk_wmem_queued; in ksocknal_queue_tx_locked() 2261 error = conn->ksnc_sock->sk->sk_err; in ksocknal_find_timed_out_conn() 2312 conn->ksnc_sock->sk->sk_wmem_queued != 0) && in ksocknal_find_timed_out_conn() 2508 resid, conn->ksnc_sock->sk->sk_wmem_queued); in ksocknal_check_peer_timeouts()
|
/drivers/net/ |
D | macvtap.c | 36 struct sock sk; member 127 sock_hold(&q->sk); in macvtap_set_queue() 189 sock_put(&q->sk); in macvtap_put_queue() 196 sock_put(&q->sk); in macvtap_put_queue() 272 sock_put(&qlist[j]->sk); in macvtap_del_queues() 291 if (skb_queue_len(&q->sk.sk_receive_queue) >= dev->tx_queue_len) in macvtap_handle_frame() 309 skb_queue_tail(&q->sk.sk_receive_queue, skb); in macvtap_handle_frame() 318 skb_queue_tail(&q->sk.sk_receive_queue, segs); in macvtap_handle_frame() 331 skb_queue_tail(&q->sk.sk_receive_queue, skb); in macvtap_handle_frame() 335 wake_up_interruptible_poll(sk_sleep(&q->sk), POLLIN | POLLRDNORM | POLLRDBAND); in macvtap_handle_frame() [all …]
|
D | tun.c | 135 struct sock sk; member 434 skb_queue_purge(&tfile->sk.sk_receive_queue); in tun_queue_purge() 435 skb_queue_purge(&tfile->sk.sk_error_queue); in tun_queue_purge() 457 sock_put(&tfile->sk); in __tun_detach() 468 sock_put(&tfile->sk); in __tun_detach() 482 sk_release_kernel(&tfile->sk); in __tun_detach() 502 tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN; in tun_detach_all() 503 tfile->socket.sk->sk_data_ready(tfile->socket.sk); in tun_detach_all() 508 tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN; in tun_detach_all() 509 tfile->socket.sk->sk_data_ready(tfile->socket.sk); in tun_detach_all() [all …]
|
D | vxlan.c | 279 if (inet_sk(vs->sock->sk)->inet_sport == port && in vxlan_find_sock() 280 inet_sk(vs->sock->sk)->sk.sk_family == family) in vxlan_find_sock() 639 struct sock *sk = vs->sock->sk; in vxlan_notify_add_rx_port() local 640 struct net *net = sock_net(sk); in vxlan_notify_add_rx_port() 641 sa_family_t sa_family = sk->sk_family; in vxlan_notify_add_rx_port() 642 __be16 port = inet_sk(sk)->inet_sport; in vxlan_notify_add_rx_port() 664 struct sock *sk = vs->sock->sk; in vxlan_notify_del_rx_port() local 665 struct net *net = sock_net(sk); in vxlan_notify_del_rx_port() 666 sa_family_t sa_family = sk->sk_family; in vxlan_notify_del_rx_port() 667 __be16 port = inet_sk(sk)->inet_sport; in vxlan_notify_del_rx_port() [all …]
|
/drivers/isdn/i4l/ |
D | isdn_audio.c | 450 int sk, in isdn_audio_goertzel() local 467 sk = sk1 = sk2 = 0; in isdn_audio_goertzel() 469 sk = sample[n] + ((cos2pik[k] * sk1) >> 15) - sk2; in isdn_audio_goertzel() 471 sk1 = sk; in isdn_audio_goertzel() 474 sk >>= 1; in isdn_audio_goertzel() 479 if (sk < -32768 || sk > 32767) in isdn_audio_goertzel() 481 "isdn_audio: dtmf goertzel overflow, sk=%d\n", sk); in isdn_audio_goertzel() 486 ((sk * sk) >> AMP_BITS) - in isdn_audio_goertzel() 487 ((((cos2pik[k] * sk) >> 15) * sk2) >> AMP_BITS) + in isdn_audio_goertzel()
|
/drivers/staging/lustre/include/linux/lnet/linux/ |
D | lnet.h | 53 #define cfs_tcp_sendpage(sk, page, offset, size, flags) \ argument 54 tcp_sendpage(sk, page, offset, size, flags)
|
/drivers/staging/lustre/lustre/libcfs/linux/ |
D | linux-tcpip.c | 490 *txbufsize = sock->sk->sk_sndbuf; in libcfs_sock_getbuf() 494 *rxbufsize = sock->sk->sk_rcvbuf; in libcfs_sock_getbuf() 548 add_wait_queue(sk_sleep(sock->sk), &wait); in libcfs_sock_accept() 557 remove_wait_queue(sk_sleep(sock->sk), &wait); in libcfs_sock_accept() 576 wake_up_all(sk_sleep(sock->sk)); in libcfs_sock_abort_accept()
|
/drivers/block/drbd/ |
D | drbd_receiver.c | 558 sock->sk->sk_sndbuf = snd; in drbd_setbufsize() 559 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK; in drbd_setbufsize() 562 sock->sk->sk_rcvbuf = rcv; in drbd_setbufsize() 563 sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK; in drbd_setbufsize() 608 sock->sk->sk_rcvtimeo = in drbd_try_connect() 609 sock->sk->sk_sndtimeo = connect_int * HZ; in drbd_try_connect() 659 void (*original_sk_state_change)(struct sock *sk); 663 static void drbd_incoming_connection(struct sock *sk) in drbd_incoming_connection() argument 665 struct accept_wait_data *ad = sk->sk_user_data; in drbd_incoming_connection() 666 void (*state_change)(struct sock *sk); in drbd_incoming_connection() [all …]
|
D | drbd_worker.c | 626 struct sock *sk = connection->data.socket->sk; in make_resync_request() local 627 int queued = sk->sk_wmem_queued; in make_resync_request() 628 int sndbuf = sk->sk_sndbuf; in make_resync_request() 631 if (sk->sk_socket) in make_resync_request() 632 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); in make_resync_request()
|
/drivers/vhost/ |
D | net.c | 234 sock_flag(sock->sk, SOCK_ZEROCOPY); in vhost_sock_zcopy() 462 static int peek_head_len(struct sock *sk) in peek_head_len() argument 468 spin_lock_irqsave(&sk->sk_receive_queue.lock, flags); in peek_head_len() 469 head = skb_peek(&sk->sk_receive_queue); in peek_head_len() 476 spin_unlock_irqrestore(&sk->sk_receive_queue.lock, flags); in peek_head_len() 592 while ((sock_len = peek_head_len(sock->sk))) { in handle_rx() 852 if (sock->sk->sk_type != SOCK_RAW) { in get_raw_socket()
|
/drivers/infiniband/hw/usnic/ |
D | usnic_transport.c | 176 *proto = sock->sk->sk_protocol; in usnic_transport_sock_get_addr()
|