Home
last modified time | relevance | path

Searched refs:sk (Results 1 – 25 of 100) sorted by relevance

1234

/drivers/isdn/mISDN/
Dsocket.c22 #define _pms(sk) ((struct mISDN_sock *)sk) argument
46 mISDN_sock_link(struct mISDN_sock_list *l, struct sock *sk) in mISDN_sock_link() argument
49 sk_add_node(sk, &l->head); in mISDN_sock_link()
53 static void mISDN_sock_unlink(struct mISDN_sock_list *l, struct sock *sk) in mISDN_sock_unlink() argument
56 sk_del_node_init(sk); in mISDN_sock_unlink()
69 if (msk->sk.sk_state == MISDN_CLOSED) in mISDN_send()
72 err = sock_queue_rcv_skb(&msk->sk, skb); in mISDN_send()
88 msk->sk.sk_state = MISDN_CLOSED; in mISDN_ctrl()
95 mISDN_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) in mISDN_sock_cmsg() argument
99 if (_pms(sk)->cmask & MISDN_TIME_STAMP) { in mISDN_sock_cmsg()
[all …]
Ddsp_dtmf.c123 s32 sk, sk1, sk2; in dsp_dtmf_goertzel_decode() local
160 sk = (*hfccoeff++) >> 4; in dsp_dtmf_goertzel_decode()
161 if (sk > 32767 || sk < -32767 || sk2 > 32767 in dsp_dtmf_goertzel_decode()
167 (sk * sk) - in dsp_dtmf_goertzel_decode()
168 (((cos2pik[k] * sk) >> 15) * sk2) + in dsp_dtmf_goertzel_decode()
185 sk = 0; in dsp_dtmf_goertzel_decode()
191 sk = ((cos2pik_ * sk1) >> 15) - sk2 + (*buf++); in dsp_dtmf_goertzel_decode()
193 sk1 = sk; in dsp_dtmf_goertzel_decode()
195 sk >>= 8; in dsp_dtmf_goertzel_decode()
197 if (sk > 32767 || sk < -32767 || sk2 > 32767 || sk2 < -32767) in dsp_dtmf_goertzel_decode()
[all …]
/drivers/net/ethernet/chelsio/inline_crypto/chtls/
Dchtls_cm.c94 struct sock *sk) in chtls_find_netdev() argument
104 switch (sk->sk_family) { in chtls_find_netdev()
106 if (likely(!inet_sk(sk)->inet_rcv_saddr)) in chtls_find_netdev()
108 ndev = __ip_dev_find(&init_net, inet_sk(sk)->inet_rcv_saddr, false); in chtls_find_netdev()
112 addr_type = ipv6_addr_type(&sk->sk_v6_rcv_saddr); in chtls_find_netdev()
118 &sk->sk_v6_rcv_saddr, temp, 1)) { in chtls_find_netdev()
141 static void assign_rxopt(struct sock *sk, unsigned int opt) in assign_rxopt() argument
147 csk = rcu_dereference_sk_user_data(sk); in assign_rxopt()
148 tp = tcp_sk(sk); in assign_rxopt()
169 static void chtls_purge_receive_queue(struct sock *sk) in chtls_purge_receive_queue() argument
[all …]
Dchtls_io.c43 static int nos_ivs(struct sock *sk, unsigned int size) in nos_ivs() argument
45 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); in nos_ivs()
50 static int set_ivs_imm(struct sock *sk, const struct sk_buff *skb) in set_ivs_imm() argument
52 int ivs_size = nos_ivs(sk, skb->len) * CIPHER_BLOCK_SIZE; in set_ivs_imm()
64 static int max_ivs_size(struct sock *sk, int size) in max_ivs_size() argument
66 return nos_ivs(sk, size) * CIPHER_BLOCK_SIZE; in max_ivs_size()
69 static int ivs_size(struct sock *sk, const struct sk_buff *skb) in ivs_size() argument
71 return set_ivs_imm(sk, skb) ? (nos_ivs(sk, skb->len) * in ivs_size()
89 static struct sk_buff *create_flowc_wr_skb(struct sock *sk, in create_flowc_wr_skb() argument
93 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); in create_flowc_wr_skb()
[all …]
Dchtls_hw.c41 static void __set_tcb_field(struct sock *sk, struct sk_buff *skb, u16 word, in __set_tcb_field() argument
50 csk = rcu_dereference_sk_user_data(sk); in __set_tcb_field()
61 static int chtls_set_tcb_field(struct sock *sk, u16 word, u64 mask, u64 val) in chtls_set_tcb_field() argument
78 csk = rcu_dereference_sk_user_data(sk); in chtls_set_tcb_field()
80 __set_tcb_field(sk, skb, word, mask, val, 0, 1); in chtls_set_tcb_field()
91 void chtls_set_tcb_field_rpl_skb(struct sock *sk, u16 word, in chtls_set_tcb_field_rpl_skb() argument
105 __set_tcb_field(sk, skb, word, mask, val, cookie, 0); in chtls_set_tcb_field_rpl_skb()
106 send_or_defer(sk, tcp_sk(sk), skb, through_l2t); in chtls_set_tcb_field_rpl_skb()
112 int chtls_set_tcb_tflag(struct sock *sk, unsigned int bit_pos, int val) in chtls_set_tcb_tflag() argument
114 return chtls_set_tcb_field(sk, 1, 1ULL << bit_pos, in chtls_set_tcb_tflag()
[all …]
Dchtls.h161 struct sock *sk; /* The listening socket */ member
246 struct sock *sk; member
279 struct sock *sk; member
414 void (*backlog_rcv)(struct sock *sk, struct sk_buff *skb);
456 #define TCP_PAGE(sk) (sk->sk_frag.page) argument
457 #define TCP_OFF(sk) (sk->sk_frag.offset) argument
481 static inline int csk_flag(const struct sock *sk, enum csk_flags flag) in csk_flag() argument
483 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); in csk_flag()
509 struct sock *sk, in process_cpl_msg() argument
516 bh_lock_sock(sk); in process_cpl_msg()
[all …]
Dchtls_main.c63 ret = chtls_listen_start(clisten->cdev, clisten->sk); in listen_notify_handler()
67 chtls_listen_stop(clisten->cdev, clisten->sk); in listen_notify_handler()
78 static int listen_backlog_rcv(struct sock *sk, struct sk_buff *skb) in listen_backlog_rcv() argument
81 return tcp_v4_do_rcv(sk, skb); in listen_backlog_rcv()
82 BLOG_SKB_CB(skb)->backlog_rcv(sk, skb); in listen_backlog_rcv()
86 static int chtls_start_listen(struct chtls_dev *cdev, struct sock *sk) in chtls_start_listen() argument
90 if (sk->sk_protocol != IPPROTO_TCP) in chtls_start_listen()
93 if (sk->sk_family == PF_INET && in chtls_start_listen()
94 LOOPBACK(inet_sk(sk)->inet_rcv_saddr)) in chtls_start_listen()
97 sk->sk_backlog_rcv = listen_backlog_rcv; in chtls_start_listen()
[all …]
Dchtls_cm.h89 #define ACCEPT_QUEUE(sk) (&inet_csk(sk)->icsk_accept_queue.rskq_accept_head) argument
98 #define INC_ORPHAN_COUNT(sk) this_cpu_inc(*(sk)->sk_prot->orphan_count) argument
110 static inline unsigned int sk_in_state(const struct sock *sk, in sk_in_state() argument
113 return states & (1 << sk->sk_state); in sk_in_state()
143 static inline void sk_wakeup_sleepers(struct sock *sk, bool interruptable) in sk_wakeup_sleepers() argument
148 wq = rcu_dereference(sk->sk_wq); in sk_wakeup_sleepers()
151 wake_up_interruptible(sk_sleep(sk)); in sk_wakeup_sleepers()
153 wake_up_all(sk_sleep(sk)); in sk_wakeup_sleepers()
172 static inline void chtls_free_skb(struct sock *sk, struct sk_buff *skb) in chtls_free_skb() argument
175 __skb_unlink(skb, &sk->sk_receive_queue); in chtls_free_skb()
[all …]
/drivers/net/ppp/
Dpppoe.c87 static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb);
282 struct sock *sk; in pppoe_flush_dev() local
292 sk = sk_pppox(po); in pppoe_flush_dev()
302 sock_hold(sk); in pppoe_flush_dev()
304 lock_sock(sk); in pppoe_flush_dev()
307 sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) { in pppoe_flush_dev()
308 pppox_unbind_sock(sk); in pppoe_flush_dev()
309 sk->sk_state_change(sk); in pppoe_flush_dev()
314 release_sock(sk); in pppoe_flush_dev()
315 sock_put(sk); in pppoe_flush_dev()
[all …]
Dpptp.c135 struct sock *sk = &po->sk; in pptp_route_output() local
138 net = sock_net(sk); in pptp_route_output()
139 flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark, 0, in pptp_route_output()
143 0, 0, sock_net_uid(net, sk)); in pptp_route_output()
144 security_sk_classify_flow(sk, flowi4_to_flowi_common(fl4)); in pptp_route_output()
146 return ip_route_output_flow(net, fl4, sk); in pptp_route_output()
151 struct sock *sk = (struct sock *) chan->private; in pptp_xmit() local
152 struct pppox_sock *po = pppox_sk(sk); in pptp_xmit()
153 struct net *net = sock_net(sk); in pptp_xmit()
186 if (skb->sk) in pptp_xmit()
[all …]
Dpppox.c53 void pppox_unbind_sock(struct sock *sk) in pppox_unbind_sock() argument
57 if (sk->sk_state & (PPPOX_BOUND | PPPOX_CONNECTED)) { in pppox_unbind_sock()
58 ppp_unregister_channel(&pppox_sk(sk)->chan); in pppox_unbind_sock()
59 sk->sk_state = PPPOX_DEAD; in pppox_unbind_sock()
69 struct sock *sk = sock->sk; in pppox_ioctl() local
70 struct pppox_sock *po = pppox_sk(sk); in pppox_ioctl()
73 lock_sock(sk); in pppox_ioctl()
79 if (!(sk->sk_state & PPPOX_CONNECTED)) in pppox_ioctl()
88 sk->sk_state |= PPPOX_BOUND; in pppox_ioctl()
92 rc = pppox_protos[sk->sk_protocol]->ioctl ? in pppox_ioctl()
[all …]
/drivers/scsi/
Discsi_tcp.c115 static inline int iscsi_sw_sk_state_check(struct sock *sk) in iscsi_sw_sk_state_check() argument
117 struct iscsi_conn *conn = sk->sk_user_data; in iscsi_sw_sk_state_check()
119 if ((sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) && in iscsi_sw_sk_state_check()
121 !atomic_read(&sk->sk_rmem_alloc)) { in iscsi_sw_sk_state_check()
133 struct sock *sk = tcp_sw_conn->sock->sk; in iscsi_sw_tcp_recv_data() local
145 tcp_read_sock(sk, &rd_desc, iscsi_sw_tcp_recv); in iscsi_sw_tcp_recv_data()
151 iscsi_sw_sk_state_check(sk); in iscsi_sw_tcp_recv_data()
160 struct sock *sk = tcp_sw_conn->sock->sk; in iscsi_sw_tcp_recv_data_work() local
162 lock_sock(sk); in iscsi_sw_tcp_recv_data_work()
164 release_sock(sk); in iscsi_sw_tcp_recv_data_work()
[all …]
/drivers/target/iscsi/
Discsi_target_nego.c382 static void iscsi_target_sk_data_ready(struct sock *sk) in iscsi_target_sk_data_ready() argument
384 struct iscsit_conn *conn = sk->sk_user_data; in iscsi_target_sk_data_ready()
389 write_lock_bh(&sk->sk_callback_lock); in iscsi_target_sk_data_ready()
390 if (!sk->sk_user_data) { in iscsi_target_sk_data_ready()
391 write_unlock_bh(&sk->sk_callback_lock); in iscsi_target_sk_data_ready()
395 write_unlock_bh(&sk->sk_callback_lock); in iscsi_target_sk_data_ready()
400 write_unlock_bh(&sk->sk_callback_lock); in iscsi_target_sk_data_ready()
405 write_unlock_bh(&sk->sk_callback_lock); in iscsi_target_sk_data_ready()
409 conn->orig_data_ready(sk); in iscsi_target_sk_data_ready()
418 write_unlock_bh(&sk->sk_callback_lock); in iscsi_target_sk_data_ready()
[all …]
/drivers/net/vxlan/
Dvxlan_multicast.c23 struct sock *sk; in vxlan_igmp_join() local
32 sk = sock4->sock->sk; in vxlan_igmp_join()
33 lock_sock(sk); in vxlan_igmp_join()
34 ret = ip_mc_join_group(sk, &mreq); in vxlan_igmp_join()
35 release_sock(sk); in vxlan_igmp_join()
40 sk = sock6->sock->sk; in vxlan_igmp_join()
41 lock_sock(sk); in vxlan_igmp_join()
42 ret = ipv6_stub->ipv6_sock_mc_join(sk, ifindex, in vxlan_igmp_join()
44 release_sock(sk); in vxlan_igmp_join()
57 struct sock *sk; in vxlan_igmp_leave() local
[all …]
/drivers/infiniband/sw/siw/
Dsiw_cm.c41 static void siw_sk_assign_cm_upcalls(struct sock *sk) in siw_sk_assign_cm_upcalls() argument
43 write_lock_bh(&sk->sk_callback_lock); in siw_sk_assign_cm_upcalls()
44 sk->sk_state_change = siw_cm_llp_state_change; in siw_sk_assign_cm_upcalls()
45 sk->sk_data_ready = siw_cm_llp_data_ready; in siw_sk_assign_cm_upcalls()
46 sk->sk_write_space = siw_cm_llp_write_space; in siw_sk_assign_cm_upcalls()
47 sk->sk_error_report = siw_cm_llp_error_report; in siw_sk_assign_cm_upcalls()
48 write_unlock_bh(&sk->sk_callback_lock); in siw_sk_assign_cm_upcalls()
51 static void siw_sk_save_upcalls(struct sock *sk) in siw_sk_save_upcalls() argument
53 struct siw_cep *cep = sk_to_cep(sk); in siw_sk_save_upcalls()
55 write_lock_bh(&sk->sk_callback_lock); in siw_sk_save_upcalls()
[all …]
Dsiw_cm.h62 void (*sk_state_change)(struct sock *sk);
63 void (*sk_data_ready)(struct sock *sk);
64 void (*sk_write_space)(struct sock *sk);
65 void (*sk_error_report)(struct sock *sk);
130 #define sk_to_qp(sk) (((struct siw_cep *)((sk)->sk_user_data))->qp) argument
131 #define sk_to_cep(sk) ((struct siw_cep *)((sk)->sk_user_data)) argument
/drivers/input/serio/
Dioc3kbd.c127 struct serio *sk, *sa; in ioc3kbd_probe() local
142 sk = kzalloc(sizeof(*sk), GFP_KERNEL); in ioc3kbd_probe()
143 if (!sk) in ioc3kbd_probe()
148 kfree(sk); in ioc3kbd_probe()
152 sk->id.type = SERIO_8042; in ioc3kbd_probe()
153 sk->write = ioc3kbd_write; in ioc3kbd_probe()
154 sk->start = ioc3kbd_start; in ioc3kbd_probe()
155 sk->stop = ioc3kbd_stop; in ioc3kbd_probe()
156 snprintf(sk->name, sizeof(sk->name), "IOC3 keyboard %d", pdev->id); in ioc3kbd_probe()
157 snprintf(sk->phys, sizeof(sk->phys), "ioc3/serio%dkbd", pdev->id); in ioc3kbd_probe()
[all …]
/drivers/net/ethernet/netronome/nfp/crypto/
Dtls.c176 struct sock *sk, int direction) in nfp_net_tls_set_ipv4() argument
178 struct inet_sock *inet = inet_sk(sk); in nfp_net_tls_set_ipv4()
194 struct sock *sk, int direction) in nfp_net_tls_set_ipv6() argument
197 struct ipv6_pinfo *np = inet6_sk(sk); in nfp_net_tls_set_ipv6()
204 memcpy(req->src_ip, &sk->sk_v6_daddr, sizeof(req->src_ip)); in nfp_net_tls_set_ipv6()
214 struct nfp_crypto_req_add_back *back, struct sock *sk, in nfp_net_tls_set_l4() argument
217 struct inet_sock *inet = inet_sk(sk); in nfp_net_tls_set_l4()
264 nfp_net_tls_add(struct net_device *netdev, struct sock *sk, in nfp_net_tls_add() argument
289 switch (sk->sk_family) { in nfp_net_tls_add()
292 if (ipv6_only_sock(sk) || in nfp_net_tls_add()
[all …]
/drivers/net/
Dgtp.c56 struct sock *sk; member
212 !net_eq(sock_net(pctx->sk), dev_net(pctx->dev)))) { in gtp_rx()
239 const struct sock *sk, in ip4_route_output_gtp() argument
243 fl4->flowi4_oif = sk->sk_bound_dev_if; in ip4_route_output_gtp()
246 fl4->flowi4_tos = RT_CONN_FLAGS(sk); in ip4_route_output_gtp()
247 fl4->flowi4_proto = sk->sk_protocol; in ip4_route_output_gtp()
249 return ip_route_output_key(sock_net(sk), fl4); in ip4_route_output_gtp()
621 static void __gtp_encap_destroy(struct sock *sk) in __gtp_encap_destroy() argument
625 lock_sock(sk); in __gtp_encap_destroy()
626 gtp = sk->sk_user_data; in __gtp_encap_destroy()
[all …]
Dvrf.c442 static int vrf_ip6_local_out(struct net *net, struct sock *sk, in vrf_ip6_local_out() argument
450 sk, skb, NULL, skb_dst(skb)->dev, dst_output); in vrf_ip6_local_out()
453 err = dst_output(net, sk, skb); in vrf_ip6_local_out()
501 ret = vrf_ip6_local_out(net, skb->sk, skb); in vrf_process_v6_outbound()
522 static int vrf_ip_local_out(struct net *net, struct sock *sk, in vrf_ip_local_out() argument
529 err = nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk, in vrf_ip_local_out()
532 err = dst_output(net, sk, skb); in vrf_ip_local_out()
584 ret = vrf_ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb); in vrf_process_v4_outbound()
653 static int vrf_finish_output6(struct net *net, struct sock *sk, in vrf_finish_output6() argument
687 static int vrf_output6(struct net *net, struct sock *sk, struct sk_buff *skb) in vrf_output6() argument
[all …]
/drivers/infiniband/hw/erdma/
Derdma_cm.c21 static void erdma_cm_llp_state_change(struct sock *sk);
22 static void erdma_cm_llp_data_ready(struct sock *sk);
23 static void erdma_cm_llp_error_report(struct sock *sk);
25 static void erdma_sk_assign_cm_upcalls(struct sock *sk) in erdma_sk_assign_cm_upcalls() argument
27 write_lock_bh(&sk->sk_callback_lock); in erdma_sk_assign_cm_upcalls()
28 sk->sk_state_change = erdma_cm_llp_state_change; in erdma_sk_assign_cm_upcalls()
29 sk->sk_data_ready = erdma_cm_llp_data_ready; in erdma_sk_assign_cm_upcalls()
30 sk->sk_error_report = erdma_cm_llp_error_report; in erdma_sk_assign_cm_upcalls()
31 write_unlock_bh(&sk->sk_callback_lock); in erdma_sk_assign_cm_upcalls()
34 static void erdma_sk_save_upcalls(struct sock *sk) in erdma_sk_save_upcalls() argument
[all …]
Derdma_cm.h68 void (*sk_state_change)(struct sock *sk);
69 void (*sk_data_ready)(struct sock *sk, int bytes);
70 void (*sk_error_report)(struct sock *sk);
114 void (*sk_state_change)(struct sock *sk);
115 void (*sk_data_ready)(struct sock *sk);
116 void (*sk_error_report)(struct sock *sk);
165 #define sk_to_cep(sk) ((struct erdma_cep *)((sk)->sk_user_data)) argument
/drivers/net/ethernet/mellanox/mlx5/core/en_accel/
Dfs_tcp.c29 static void accel_fs_tcp_set_ipv4_flow(struct mlx5_flow_spec *spec, struct sock *sk) in accel_fs_tcp_set_ipv4_flow() argument
37 &inet_sk(sk)->inet_daddr, 4); in accel_fs_tcp_set_ipv4_flow()
40 &inet_sk(sk)->inet_rcv_saddr, 4); in accel_fs_tcp_set_ipv4_flow()
48 static void accel_fs_tcp_set_ipv6_flow(struct mlx5_flow_spec *spec, struct sock *sk) in accel_fs_tcp_set_ipv6_flow() argument
56 &sk->sk_v6_daddr, 16); in accel_fs_tcp_set_ipv6_flow()
59 &inet6_sk(sk)->saddr, 16); in accel_fs_tcp_set_ipv6_flow()
75 struct sock *sk, u32 tirn, in mlx5e_accel_fs_add_sk() argument
91 switch (sk->sk_family) { in mlx5e_accel_fs_add_sk()
93 accel_fs_tcp_set_ipv4_flow(spec, sk); in mlx5e_accel_fs_add_sk()
96 &inet_sk(sk)->inet_rcv_saddr, in mlx5e_accel_fs_add_sk()
[all …]
Dktls_rx.c48 struct sock *sk; member
114 rule = mlx5e_accel_fs_add_sk(accel_rule->priv->fs, priv_rx->sk, in accel_rule_handle_work()
451 tls_offload_rx_resync_async_request_end(priv_rx->sk, cpu_to_be32(hw_seq)); in mlx5e_ktls_handle_get_psv_completion()
462 static bool resync_queue_get_psv(struct sock *sk) in resync_queue_get_psv() argument
467 priv_rx = mlx5e_get_ktls_rx_priv_ctx(tls_get_ctx(sk)); in resync_queue_get_psv()
488 struct sock *sk = NULL; in resync_update_sn() local
502 sk = inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo, in resync_update_sn()
512 sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo, in resync_update_sn()
521 if (unlikely(!sk)) in resync_update_sn()
524 if (unlikely(sk->sk_state == TCP_TIME_WAIT)) in resync_update_sn()
[all …]
/drivers/nvme/target/
Dtcp.c234 return queue->sock->sk->sk_incoming_cpu; in queue_cpu()
1411 write_lock_bh(&sock->sk->sk_callback_lock); in nvmet_tcp_restore_socket_callbacks()
1412 sock->sk->sk_data_ready = queue->data_ready; in nvmet_tcp_restore_socket_callbacks()
1413 sock->sk->sk_state_change = queue->state_change; in nvmet_tcp_restore_socket_callbacks()
1414 sock->sk->sk_write_space = queue->write_space; in nvmet_tcp_restore_socket_callbacks()
1415 sock->sk->sk_user_data = NULL; in nvmet_tcp_restore_socket_callbacks()
1416 write_unlock_bh(&sock->sk->sk_callback_lock); in nvmet_tcp_restore_socket_callbacks()
1479 static void nvmet_tcp_data_ready(struct sock *sk) in nvmet_tcp_data_ready() argument
1483 read_lock_bh(&sk->sk_callback_lock); in nvmet_tcp_data_ready()
1484 queue = sk->sk_user_data; in nvmet_tcp_data_ready()
[all …]

1234