/net/bluetooth/ |
D | sco.c | 49 struct sock *sk; member 57 static void sco_sock_close(struct sock *sk); 58 static void sco_sock_kill(struct sock *sk); 61 #define sco_pi(sk) ((struct sco_pinfo *) sk) argument 78 struct sock *sk = (struct sock *)arg; in sco_sock_timeout() local 80 BT_DBG("sock %p state %d", sk, sk->sk_state); in sco_sock_timeout() 82 bh_lock_sock(sk); in sco_sock_timeout() 83 sk->sk_err = ETIMEDOUT; in sco_sock_timeout() 84 sk->sk_state_change(sk); in sco_sock_timeout() 85 bh_unlock_sock(sk); in sco_sock_timeout() [all …]
|
D | l2cap_sock.c | 45 static void l2cap_sock_init(struct sock *sk, struct sock *parent); 83 struct sock *sk = sock->sk; in l2cap_sock_bind() local 84 struct l2cap_chan *chan = l2cap_pi(sk)->chan; in l2cap_sock_bind() 88 BT_DBG("sk %p", sk); in l2cap_sock_bind() 111 lock_sock(sk); in l2cap_sock_bind() 113 if (sk->sk_state != BT_OPEN) { in l2cap_sock_bind() 168 sk->sk_state = BT_BOUND; in l2cap_sock_bind() 171 release_sock(sk); in l2cap_sock_bind() 178 struct sock *sk = sock->sk; in l2cap_sock_connect() local 179 struct l2cap_chan *chan = l2cap_pi(sk)->chan; in l2cap_sock_connect() [all …]
|
D | af_bluetooth.c | 69 void bt_sock_reclassify_lock(struct sock *sk, int proto) in bt_sock_reclassify_lock() argument 71 BUG_ON(!sk); in bt_sock_reclassify_lock() 72 BUG_ON(!sock_allow_reclassification(sk)); in bt_sock_reclassify_lock() 74 sock_lock_init_class_and_name(sk, in bt_sock_reclassify_lock() 161 bt_sock_reclassify_lock(sock->sk, proto); in bt_sock_create() 170 void bt_sock_link(struct bt_sock_list *l, struct sock *sk) in bt_sock_link() argument 173 sk_add_node(sk, &l->head); in bt_sock_link() 178 void bt_sock_unlink(struct bt_sock_list *l, struct sock *sk) in bt_sock_unlink() argument 181 sk_del_node_init(sk); in bt_sock_unlink() 186 void bt_accept_enqueue(struct sock *parent, struct sock *sk, bool bh) in bt_accept_enqueue() argument [all …]
|
/net/netrom/ |
D | nr_timer.c | 38 void nr_init_timers(struct sock *sk) in nr_init_timers() argument 40 struct nr_sock *nr = nr_sk(sk); in nr_init_timers() 42 setup_timer(&nr->t1timer, nr_t1timer_expiry, (unsigned long)sk); in nr_init_timers() 43 setup_timer(&nr->t2timer, nr_t2timer_expiry, (unsigned long)sk); in nr_init_timers() 44 setup_timer(&nr->t4timer, nr_t4timer_expiry, (unsigned long)sk); in nr_init_timers() 45 setup_timer(&nr->idletimer, nr_idletimer_expiry, (unsigned long)sk); in nr_init_timers() 48 sk->sk_timer.data = (unsigned long)sk; in nr_init_timers() 49 sk->sk_timer.function = &nr_heartbeat_expiry; in nr_init_timers() 52 void nr_start_t1timer(struct sock *sk) in nr_start_t1timer() argument 54 struct nr_sock *nr = nr_sk(sk); in nr_start_t1timer() [all …]
|
D | af_netrom.c | 94 static void nr_remove_socket(struct sock *sk) in nr_remove_socket() argument 97 sk_del_node_init(sk); in nr_remove_socket() 137 static void nr_insert_socket(struct sock *sk) in nr_insert_socket() argument 140 sk_add_node(sk, &nr_list); in nr_insert_socket() 218 struct sock *sk; in nr_find_next_circuit() local 225 if ((sk=nr_find_socket(i, j)) == NULL) in nr_find_next_circuit() 227 sock_put(sk); in nr_find_next_circuit() 246 struct sock *sk=(struct sock *)data; in nr_destroy_timer() local 247 bh_lock_sock(sk); in nr_destroy_timer() 248 sock_hold(sk); in nr_destroy_timer() [all …]
|
/net/ipv4/ |
D | tcp_timer.c | 34 static void tcp_write_err(struct sock *sk) in tcp_write_err() argument 36 sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT; in tcp_write_err() 37 sk->sk_error_report(sk); in tcp_write_err() 39 tcp_write_queue_purge(sk); in tcp_write_err() 40 tcp_done(sk); in tcp_write_err() 41 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT); in tcp_write_err() 68 static int tcp_out_of_resources(struct sock *sk, bool do_reset) in tcp_out_of_resources() argument 70 struct tcp_sock *tp = tcp_sk(sk); in tcp_out_of_resources() 79 if (sk->sk_err_soft) in tcp_out_of_resources() 82 if (tcp_check_oom(sk, shift)) { in tcp_out_of_resources() [all …]
|
D | tcp.c | 327 void tcp_enter_memory_pressure(struct sock *sk) in tcp_enter_memory_pressure() argument 338 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURES); in tcp_enter_memory_pressure() 342 void tcp_leave_memory_pressure(struct sock *sk) in tcp_leave_memory_pressure() argument 350 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURESCHRONO, in tcp_leave_memory_pressure() 410 void tcp_init_sock(struct sock *sk) in tcp_init_sock() argument 412 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_init_sock() 413 struct tcp_sock *tp = tcp_sk(sk); in tcp_init_sock() 416 tcp_init_xmit_timers(sk); in tcp_init_sock() 440 tp->reordering = sock_net(sk)->ipv4.sysctl_tcp_reordering; in tcp_init_sock() 441 tcp_assign_congestion_control(sk); in tcp_init_sock() [all …]
|
D | inet_hashtables.c | 46 static u32 sk_ehashfn(const struct sock *sk) in sk_ehashfn() argument 49 if (sk->sk_family == AF_INET6 && in sk_ehashfn() 50 !ipv6_addr_v4mapped(&sk->sk_v6_daddr)) in sk_ehashfn() 51 return inet6_ehashfn(sock_net(sk), in sk_ehashfn() 52 &sk->sk_v6_rcv_saddr, sk->sk_num, in sk_ehashfn() 53 &sk->sk_v6_daddr, sk->sk_dport); in sk_ehashfn() 55 return inet_ehashfn(sock_net(sk), in sk_ehashfn() 56 sk->sk_rcv_saddr, sk->sk_num, in sk_ehashfn() 57 sk->sk_daddr, sk->sk_dport); in sk_ehashfn() 93 void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb, in inet_bind_hash() argument [all …]
|
D | tcp_bbr.c | 180 static bool bbr_full_bw_reached(const struct sock *sk) in bbr_full_bw_reached() argument 182 const struct bbr *bbr = inet_csk_ca(sk); in bbr_full_bw_reached() 188 static u32 bbr_max_bw(const struct sock *sk) in bbr_max_bw() argument 190 struct bbr *bbr = inet_csk_ca(sk); in bbr_max_bw() 196 static u32 bbr_bw(const struct sock *sk) in bbr_bw() argument 198 struct bbr *bbr = inet_csk_ca(sk); in bbr_bw() 200 return bbr->lt_use_bw ? bbr->lt_bw : bbr_max_bw(sk); in bbr_bw() 207 static u64 bbr_rate_bytes_per_sec(struct sock *sk, u64 rate, int gain) in bbr_rate_bytes_per_sec() argument 209 rate *= tcp_mss_to_mtu(sk, tcp_sk(sk)->mss_cache); in bbr_rate_bytes_per_sec() 217 static u32 bbr_bw_to_pacing_rate(struct sock *sk, u32 bw, int gain) in bbr_bw_to_pacing_rate() argument [all …]
|
D | tcp_input.c | 129 static void tcp_gro_dev_warn(struct sock *sk, const struct sk_buff *skb, in tcp_gro_dev_warn() argument 140 dev = dev_get_by_index_rcu(sock_net(sk), skb->skb_iif); in tcp_gro_dev_warn() 151 static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb) in tcp_measure_rcv_mss() argument 153 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_measure_rcv_mss() 165 tcp_sk(sk)->advmss); in tcp_measure_rcv_mss() 169 tcp_gro_dev_warn(sk, skb, len); in tcp_measure_rcv_mss() 189 len -= tcp_sk(sk)->tcp_header_len; in tcp_measure_rcv_mss() 202 static void tcp_incr_quickack(struct sock *sk, unsigned int max_quickacks) in tcp_incr_quickack() argument 204 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_incr_quickack() 205 unsigned int quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss); in tcp_incr_quickack() [all …]
|
/net/core/ |
D | sock.c | 158 bool sk_ns_capable(const struct sock *sk, in sk_ns_capable() argument 161 return file_ns_capable(sk->sk_socket->file, user_ns, cap) && in sk_ns_capable() 175 bool sk_capable(const struct sock *sk, int cap) in sk_capable() argument 177 return sk_ns_capable(sk, &init_user_ns, cap); in sk_capable() 190 bool sk_net_capable(const struct sock *sk, int cap) in sk_net_capable() argument 192 return sk_ns_capable(sk, sock_net(sk)->user_ns, cap); in sk_net_capable() 335 void sk_set_memalloc(struct sock *sk) in sk_set_memalloc() argument 337 sock_set_flag(sk, SOCK_MEMALLOC); in sk_set_memalloc() 338 sk->sk_allocation |= __GFP_MEMALLOC; in sk_set_memalloc() 343 void sk_clear_memalloc(struct sock *sk) in sk_clear_memalloc() argument [all …]
|
/net/bluetooth/rfcomm/ |
D | sock.c | 43 static void rfcomm_sock_close(struct sock *sk); 44 static void rfcomm_sock_kill(struct sock *sk); 52 struct sock *sk = d->owner; in rfcomm_sk_data_ready() local 53 if (!sk) in rfcomm_sk_data_ready() 56 atomic_add(skb->len, &sk->sk_rmem_alloc); in rfcomm_sk_data_ready() 57 skb_queue_tail(&sk->sk_receive_queue, skb); in rfcomm_sk_data_ready() 58 sk->sk_data_ready(sk); in rfcomm_sk_data_ready() 60 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) in rfcomm_sk_data_ready() 66 struct sock *sk = d->owner, *parent; in rfcomm_sk_state_change() local 69 if (!sk) in rfcomm_sk_state_change() [all …]
|
/net/smc/ |
D | smc_close.c | 26 struct sock *sk; in smc_close_cleanup_listen() local 29 while ((sk = smc_accept_dequeue(parent, NULL))) in smc_close_cleanup_listen() 30 smc_close_non_accepted(sk); in smc_close_cleanup_listen() 36 struct sock *sk = &smc->sk; in smc_close_wait_tx_pends() local 40 add_wait_queue(sk_sleep(sk), &wait); in smc_close_wait_tx_pends() 44 rc = sk_wait_event(sk, &timeout, in smc_close_wait_tx_pends() 50 remove_wait_queue(sk_sleep(sk), &wait); in smc_close_wait_tx_pends() 57 struct sock *sk = &smc->sk; in smc_close_stream_wait() local 66 add_wait_queue(sk_sleep(sk), &wait); in smc_close_stream_wait() 70 rc = sk_wait_event(sk, &timeout, in smc_close_stream_wait() [all …]
|
/net/caif/ |
D | caif_socket.c | 47 struct sock sk; /* must be first member */ member 92 static void caif_read_lock(struct sock *sk) in caif_read_lock() argument 95 cf_sk = container_of(sk, struct caifsock, sk); in caif_read_lock() 99 static void caif_read_unlock(struct sock *sk) in caif_read_unlock() argument 102 cf_sk = container_of(sk, struct caifsock, sk); in caif_read_unlock() 109 return cf_sk->sk.sk_rcvbuf / 4; in sk_rcvbuf_lowwater() 112 static void caif_flow_ctrl(struct sock *sk, int mode) in caif_flow_ctrl() argument 115 cf_sk = container_of(sk, struct caifsock, sk); in caif_flow_ctrl() 124 static void caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) in caif_queue_rcv_skb() argument 128 struct sk_buff_head *list = &sk->sk_receive_queue; in caif_queue_rcv_skb() [all …]
|
/net/dccp/ |
D | proto.c | 78 void dccp_set_state(struct sock *sk, const int state) in dccp_set_state() argument 80 const int oldstate = sk->sk_state; in dccp_set_state() 82 dccp_pr_debug("%s(%p) %s --> %s\n", dccp_role(sk), sk, in dccp_set_state() 92 dccp_feat_list_purge(&dccp_sk(sk)->dccps_featneg); in dccp_set_state() 100 sk->sk_prot->unhash(sk); in dccp_set_state() 101 if (inet_csk(sk)->icsk_bind_hash != NULL && in dccp_set_state() 102 !(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) in dccp_set_state() 103 inet_put_port(sk); in dccp_set_state() 113 sk->sk_state = state; in dccp_set_state() 118 static void dccp_finish_passive_close(struct sock *sk) in dccp_finish_passive_close() argument [all …]
|
D | input.c | 26 static void dccp_enqueue_skb(struct sock *sk, struct sk_buff *skb) in dccp_enqueue_skb() argument 29 __skb_queue_tail(&sk->sk_receive_queue, skb); in dccp_enqueue_skb() 30 skb_set_owner_r(skb, sk); in dccp_enqueue_skb() 31 sk->sk_data_ready(sk); in dccp_enqueue_skb() 34 static void dccp_fin(struct sock *sk, struct sk_buff *skb) in dccp_fin() argument 42 sk->sk_shutdown = SHUTDOWN_MASK; in dccp_fin() 43 sock_set_flag(sk, SOCK_DONE); in dccp_fin() 44 dccp_enqueue_skb(sk, skb); in dccp_fin() 47 static int dccp_rcv_close(struct sock *sk, struct sk_buff *skb) in dccp_rcv_close() argument 51 switch (sk->sk_state) { in dccp_rcv_close() [all …]
|
D | timer.c | 24 static void dccp_write_err(struct sock *sk) in dccp_write_err() argument 26 sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT; in dccp_write_err() 27 sk->sk_error_report(sk); in dccp_write_err() 29 dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED); in dccp_write_err() 30 dccp_done(sk); in dccp_write_err() 35 static int dccp_write_timeout(struct sock *sk) in dccp_write_timeout() argument 37 const struct inet_connection_sock *icsk = inet_csk(sk); in dccp_write_timeout() 40 if (sk->sk_state == DCCP_REQUESTING || sk->sk_state == DCCP_PARTOPEN) { in dccp_write_timeout() 42 dst_negative_advice(sk); in dccp_write_timeout() 67 dst_negative_advice(sk); in dccp_write_timeout() [all …]
|
/net/iucv/ |
D | af_iucv.c | 53 #define __iucv_sock_wait(sk, condition, timeo, ret) \ argument 58 prepare_to_wait(sk_sleep(sk), &__wait, TASK_INTERRUPTIBLE); \ 68 release_sock(sk); \ 70 lock_sock(sk); \ 71 ret = sock_error(sk); \ 75 finish_wait(sk_sleep(sk), &__wait); \ 78 #define iucv_sock_wait(sk, condition, timeo) \ argument 82 __iucv_sock_wait(sk, condition, timeo, __ret); \ 86 static void iucv_sock_kill(struct sock *sk); 87 static void iucv_sock_close(struct sock *sk); [all …]
|
/net/nfc/ |
D | llcp_sock.c | 29 static int sock_wait_state(struct sock *sk, int state, unsigned long timeo) in sock_wait_state() argument 34 pr_debug("sk %p", sk); in sock_wait_state() 36 add_wait_queue(sk_sleep(sk), &wait); in sock_wait_state() 39 while (sk->sk_state != state) { in sock_wait_state() 50 release_sock(sk); in sock_wait_state() 52 lock_sock(sk); in sock_wait_state() 55 err = sock_error(sk); in sock_wait_state() 61 remove_wait_queue(sk_sleep(sk), &wait); in sock_wait_state() 73 struct sock *sk = sock->sk; in llcp_sock_bind() local 74 struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk); in llcp_sock_bind() [all …]
|
D | rawsock.c | 34 static void nfc_sock_link(struct nfc_sock_list *l, struct sock *sk) in nfc_sock_link() argument 37 sk_add_node(sk, &l->head); in nfc_sock_link() 41 static void nfc_sock_unlink(struct nfc_sock_list *l, struct sock *sk) in nfc_sock_unlink() argument 44 sk_del_node_init(sk); in nfc_sock_unlink() 48 static void rawsock_write_queue_purge(struct sock *sk) in rawsock_write_queue_purge() argument 50 pr_debug("sk=%p\n", sk); in rawsock_write_queue_purge() 52 spin_lock_bh(&sk->sk_write_queue.lock); in rawsock_write_queue_purge() 53 __skb_queue_purge(&sk->sk_write_queue); in rawsock_write_queue_purge() 54 nfc_rawsock(sk)->tx_work_scheduled = false; in rawsock_write_queue_purge() 55 spin_unlock_bh(&sk->sk_write_queue.lock); in rawsock_write_queue_purge() [all …]
|
/net/x25/ |
D | x25_timer.c | 32 void x25_init_timers(struct sock *sk) in x25_init_timers() argument 34 struct x25_sock *x25 = x25_sk(sk); in x25_init_timers() 36 setup_timer(&x25->timer, x25_timer_expiry, (unsigned long)sk); in x25_init_timers() 39 sk->sk_timer.data = (unsigned long)sk; in x25_init_timers() 40 sk->sk_timer.function = &x25_heartbeat_expiry; in x25_init_timers() 43 void x25_start_heartbeat(struct sock *sk) in x25_start_heartbeat() argument 45 mod_timer(&sk->sk_timer, jiffies + 5 * HZ); in x25_start_heartbeat() 48 void x25_stop_heartbeat(struct sock *sk) in x25_stop_heartbeat() argument 50 del_timer(&sk->sk_timer); in x25_stop_heartbeat() 53 void x25_start_t2timer(struct sock *sk) in x25_start_t2timer() argument [all …]
|
/net/phonet/ |
D | pep.c | 82 static struct sk_buff *pep_alloc_skb(struct sock *sk, const void *payload, in pep_alloc_skb() argument 88 skb_set_owner_w(skb, sk); in pep_alloc_skb() 98 static int pep_reply(struct sock *sk, struct sk_buff *oskb, u8 code, in pep_reply() argument 106 skb = pep_alloc_skb(sk, data, len, priority); in pep_reply() 117 return pn_skb_send(sk, skb, &peer); in pep_reply() 120 static int pep_indicate(struct sock *sk, u8 id, u8 code, in pep_indicate() argument 123 struct pep_sock *pn = pep_sk(sk); in pep_indicate() 127 skb = pep_alloc_skb(sk, data, len, priority); in pep_indicate() 136 return pn_skb_send(sk, skb, NULL); in pep_indicate() 141 static int pipe_handler_request(struct sock *sk, u8 id, u8 code, in pipe_handler_request() argument [all …]
|
/net/rose/ |
D | rose_timer.c | 35 void rose_start_heartbeat(struct sock *sk) in rose_start_heartbeat() argument 37 del_timer(&sk->sk_timer); in rose_start_heartbeat() 39 sk->sk_timer.data = (unsigned long)sk; in rose_start_heartbeat() 40 sk->sk_timer.function = &rose_heartbeat_expiry; in rose_start_heartbeat() 41 sk->sk_timer.expires = jiffies + 5 * HZ; in rose_start_heartbeat() 43 add_timer(&sk->sk_timer); in rose_start_heartbeat() 46 void rose_start_t1timer(struct sock *sk) in rose_start_t1timer() argument 48 struct rose_sock *rose = rose_sk(sk); in rose_start_t1timer() 58 void rose_start_t2timer(struct sock *sk) in rose_start_t2timer() argument 60 struct rose_sock *rose = rose_sk(sk); in rose_start_t2timer() [all …]
|
D | rose_in.c | 39 static int rose_state1_machine(struct sock *sk, struct sk_buff *skb, int frametype) in rose_state1_machine() argument 41 struct rose_sock *rose = rose_sk(sk); in rose_state1_machine() 45 rose_stop_timer(sk); in rose_state1_machine() 46 rose_start_idletimer(sk); in rose_state1_machine() 53 sk->sk_state = TCP_ESTABLISHED; in rose_state1_machine() 54 if (!sock_flag(sk, SOCK_DEAD)) in rose_state1_machine() 55 sk->sk_state_change(sk); in rose_state1_machine() 59 rose_write_internal(sk, ROSE_CLEAR_CONFIRMATION); in rose_state1_machine() 60 rose_disconnect(sk, ECONNREFUSED, skb->data[3], skb->data[4]); in rose_state1_machine() 76 static int rose_state2_machine(struct sock *sk, struct sk_buff *skb, int frametype) in rose_state2_machine() argument [all …]
|
/net/llc/ |
D | llc_conn.c | 33 static void llc_conn_send_pdus(struct sock *sk); 34 static int llc_conn_service(struct sock *sk, struct sk_buff *skb); 35 static int llc_exec_conn_trans_actions(struct sock *sk, 38 static struct llc_conn_state_trans *llc_qualify_conn_ev(struct sock *sk, 61 int llc_conn_state_process(struct sock *sk, struct sk_buff *skb) in llc_conn_state_process() argument 64 struct llc_sock *llc = llc_sk(skb->sk); in llc_conn_state_process() 71 rc = llc_conn_service(skb->sk, skb); in llc_conn_state_process() 80 llc_save_primitive(sk, skb, LLC_DATA_PRIM); in llc_conn_state_process() 81 if (unlikely(sock_queue_rcv_skb(sk, skb))) { in llc_conn_state_process() 97 skb_queue_tail(&sk->sk_receive_queue, skb); in llc_conn_state_process() [all …]
|