/net/vmw_vsock/ |
D | vmci_transport.c | 54 struct sock *pending, 474 struct sock *pending; in vmci_transport_get_pending() local 485 pending = sk_vsock(vpending); in vmci_transport_get_pending() 486 sock_hold(pending); in vmci_transport_get_pending() 491 pending = NULL; in vmci_transport_get_pending() 493 return pending; in vmci_transport_get_pending() 497 static void vmci_transport_release_pending(struct sock *pending) in vmci_transport_release_pending() argument 499 sock_put(pending); in vmci_transport_release_pending() 957 struct sock *pending; in vmci_transport_recv_listen() local 973 pending = vmci_transport_get_pending(sk, pkt); in vmci_transport_recv_listen() [all …]
|
D | af_vsock.c | 357 void vsock_add_pending(struct sock *listener, struct sock *pending) in vsock_add_pending() argument 363 vpending = vsock_sk(pending); in vsock_add_pending() 365 sock_hold(pending); in vsock_add_pending() 371 void vsock_remove_pending(struct sock *listener, struct sock *pending) in vsock_remove_pending() argument 373 struct vsock_sock *vpending = vsock_sk(pending); in vsock_remove_pending() 377 sock_put(pending); in vsock_remove_pending() 649 struct sock *pending; in __vsock_release() local 653 pending = NULL; /* Compiler warning. */ in __vsock_release() 671 while ((pending = vsock_dequeue_accept(sk)) != NULL) { in __vsock_release() 672 __vsock_release(pending); in __vsock_release() [all …]
|
/net/sunrpc/ |
D | xprt.c | 474 rpc_wake_up_status(&xprt->pending, status); in xprt_wake_pending_tasks() 476 rpc_wake_up(&xprt->pending); in xprt_wake_pending_tasks() 495 rpc_sleep_on(&xprt->pending, task, action); in xprt_wait_for_buffer_space() 511 rpc_wake_up_queued_task(&xprt->pending, xprt->snd_task); in xprt_write_space() 718 rpc_sleep_on(&xprt->pending, task, xprt_connect_status); in xprt_connect() 825 rpc_wake_up_queued_task(&xprt->pending, task); in xprt_complete_rqst() 875 rpc_sleep_on(&xprt->pending, task, xprt_timer); in xprt_prepare_transmit() 947 xprt->stat.pending_u += xprt->pending.qlen; in xprt_transmit() 958 rpc_sleep_on(&xprt->pending, task, xprt_timer); in xprt_transmit() 1283 rpc_init_wait_queue(&xprt->pending, "xprt_pending"); in xprt_init() [all …]
|
D | cache.c | 662 struct list_head pending; in cache_revisit_request() local 666 INIT_LIST_HEAD(&pending); in cache_revisit_request() 672 list_add(&dreq->recent, &pending); in cache_revisit_request() 677 while (!list_empty(&pending)) { in cache_revisit_request() 678 dreq = list_entry(pending.next, struct cache_deferred_req, recent); in cache_revisit_request() 687 struct list_head pending; in cache_clean_deferred() local 690 INIT_LIST_HEAD(&pending); in cache_clean_deferred() 696 list_add(&dreq->recent, &pending); in cache_clean_deferred() 701 while (!list_empty(&pending)) { in cache_clean_deferred() 702 dreq = list_entry(pending.next, struct cache_deferred_req, recent); in cache_clean_deferred()
|
D | clnt.c | 1890 rpc_wake_up_queued_task(&task->tk_rqstp->rq_xprt->pending, task); in call_transmit() 2004 rpc_wake_up_queued_task(&req->rq_xprt->pending, task); in call_bc_transmit()
|
/net/batman-adv/ |
D | send.c | 583 bool pending; in batadv_purge_outstanding_packets() local 609 pending = cancel_delayed_work_sync(&forw_packet->delayed_work); in batadv_purge_outstanding_packets() 612 if (pending) { in batadv_purge_outstanding_packets() 639 pending = cancel_delayed_work_sync(&forw_packet->delayed_work); in batadv_purge_outstanding_packets() 642 if (pending) { in batadv_purge_outstanding_packets()
|
/net/ipv6/ |
D | udp.c | 957 if (up->pending == AF_INET) in udp_v6_flush_pending_frames() 959 else if (up->pending) { in udp_v6_flush_pending_frames() 961 up->pending = 0; in udp_v6_flush_pending_frames() 1022 if (up->pending == AF_INET) in udp_v6_push_pending_frames() 1071 up->pending = 0; in udp_v6_push_pending_frames() 1125 } else if (!up->pending) { in udpv6_sendmsg() 1147 if (up->pending == AF_INET) in udpv6_sendmsg() 1156 if (up->pending) { in udpv6_sendmsg() 1162 if (likely(up->pending)) { in udpv6_sendmsg() 1163 if (unlikely(up->pending != AF_INET6)) { in udpv6_sendmsg() [all …]
|
/net/mac80211/ |
D | sta_info.c | 144 ieee80211_purge_tx_queue(&local->hw, &tid_tx->pending); in __cleanup_single_sta() 1093 struct sk_buff_head pending; in ieee80211_sta_ps_deliver_wakeup() local 1117 skb_queue_head_init(&pending); in ieee80211_sta_ps_deliver_wakeup() 1123 int count = skb_queue_len(&pending), tmp; in ieee80211_sta_ps_deliver_wakeup() 1126 skb_queue_splice_tail_init(&sta->tx_filtered[ac], &pending); in ieee80211_sta_ps_deliver_wakeup() 1128 tmp = skb_queue_len(&pending); in ieee80211_sta_ps_deliver_wakeup() 1133 skb_queue_splice_tail_init(&sta->ps_tx_buf[ac], &pending); in ieee80211_sta_ps_deliver_wakeup() 1135 tmp = skb_queue_len(&pending); in ieee80211_sta_ps_deliver_wakeup() 1139 ieee80211_add_pending_skbs(local, &pending); in ieee80211_sta_ps_deliver_wakeup() 1365 struct sk_buff_head pending; in ieee80211_sta_ps_deliver_response() local [all …]
|
D | agg-tx.c | 215 if (!skb_queue_empty(&tid_tx->pending)) { in __acquires() 218 skb_queue_splice_tail_init(&tid_tx->pending, in __acquires() 219 &local->pending[queue]); in __acquires() 604 skb_queue_head_init(&tid_tx->pending); in ieee80211_start_tx_ba_session()
|
D | tx.c | 1094 __skb_queue_tail(&tid_tx->pending, skb); in ieee80211_tx_prep_agg() 1095 if (skb_queue_len(&tid_tx->pending) > STA_MAX_TX_BUFFER) in ieee80211_tx_prep_agg() 1096 purge_skb = __skb_dequeue(&tid_tx->pending); in ieee80211_tx_prep_agg() 1220 (!txpending && !skb_queue_empty(&local->pending[q]))) { in ieee80211_tx_frags() 1247 &local->pending[q]); in ieee80211_tx_frags() 1250 &local->pending[q]); in ieee80211_tx_frags() 2230 while ((skb = skb_dequeue(&local->pending[i])) != NULL) in ieee80211_clear_tx_pending() 2294 skb_queue_empty(&local->pending[i])) in ieee80211_tx_pending() 2297 while (!skb_queue_empty(&local->pending[i])) { in ieee80211_tx_pending() 2298 struct sk_buff *skb = __skb_dequeue(&local->pending[i]); in ieee80211_tx_pending() [all …]
|
D | iface.c | 714 skb_queue_empty(&local->pending[sdata->vif.cab_queue]))) { in ieee80211_do_open() 719 skb_queue_empty(&local->pending[ac_queue])) in ieee80211_do_open() 948 skb_queue_walk_safe(&local->pending[i], skb, tmp) { in ieee80211_do_stop() 951 __skb_unlink(skb, &local->pending[i]); in ieee80211_do_stop()
|
D | sta_info.h | 141 struct sk_buff_head pending; member
|
D | debugfs.c | 337 skb_queue_len(&local->pending[q])); in queues_read()
|
D | util.c | 314 skb_queue_empty(&local->pending[ac_queue]))) in ieee80211_propagate_queue_wake() 346 if (skb_queue_empty(&local->pending[queue])) { in __ieee80211_wake_queue() 450 __skb_queue_tail(&local->pending[queue], skb); in ieee80211_add_pending_skb() 479 __skb_queue_tail(&local->pending[queue], skb); in ieee80211_add_pending_skbs()
|
D | debugfs_sta.c | 189 tid_tx ? skb_queue_len(&tid_tx->pending) : 0); in sta_agg_status_read()
|
/net/bluetooth/rfcomm/ |
D | tty.c | 69 struct sk_buff_head pending; member 274 skb_queue_head_init(&dev->pending); in __rfcomm_dev_add() 288 skb_queue_tail(&dev->pending, skb); in __rfcomm_dev_add() 356 int pending = 40 - atomic_read(&dev->wmem_alloc); in rfcomm_room() local 358 return max(0, pending) * dlc->mtu; in rfcomm_room() 613 if (!skb_queue_empty(&dev->pending)) { in rfcomm_dev_data_ready() 614 skb_queue_tail(&dev->pending, skb); in rfcomm_dev_data_ready() 671 while ((skb = skb_dequeue(&dev->pending))) { in rfcomm_tty_copy_pending()
|
/net/dccp/ |
D | timer.c | 211 !(icsk->icsk_ack.pending & ICSK_ACK_TIMER)) in dccp_delack_timer() 219 icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER; in dccp_delack_timer()
|
D | output.c | 608 if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) { 622 icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
|
/net/netlink/ |
D | af_netlink.h | 21 atomic_t pending; member
|
/net/ipv4/ |
D | udp.c | 693 if (up->pending) { in udp_flush_pending_frames() 695 up->pending = 0; in udp_flush_pending_frames() 859 up->pending = 0; in udp_push_pending_frames() 903 if (up->pending) { in udp_sendmsg() 909 if (likely(up->pending)) { in udp_sendmsg() 910 if (unlikely(up->pending != AF_INET)) { in udp_sendmsg() 1051 if (unlikely(up->pending)) { in udp_sendmsg() 1068 up->pending = AF_INET; in udp_sendmsg() 1080 up->pending = 0; in udp_sendmsg() 1121 if (!up->pending) { in udp_sendpage() [all …]
|
D | tcp_timer.c | 226 !(icsk->icsk_ack.pending & ICSK_ACK_TIMER)) in tcp_delack_timer_handler() 233 icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER; in tcp_delack_timer_handler()
|
D | inet_connection_sock.c | 370 icsk->icsk_pending = icsk->icsk_ack.pending = 0; in inet_csk_init_xmit_timers() 378 icsk->icsk_pending = icsk->icsk_ack.pending = icsk->icsk_ack.blocked = 0; in inet_csk_clear_xmit_timers()
|
D | tcp.c | 1435 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) || in tcp_cleanup_rbuf() 1436 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) && in tcp_cleanup_rbuf() 2594 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED; in do_tcp_setsockopt()
|
D | tcp_output.c | 3177 (icsk->icsk_ack.pending & ICSK_ACK_PUSHED)) in tcp_send_delayed_ack() 3201 if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) { in tcp_send_delayed_ack() 3214 icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER; in tcp_send_delayed_ack()
|
/net/nfc/ |
D | digital_core.c | 36 u8 pending; member 170 if (!cmd || cmd->pending) { in digital_wq_cmd()
|