/kernel/linux/linux-5.10/net/x25/ |
D | x25_in.c | 210 int queued = 0; in x25_state3_machine() local 277 queued = 1; in x25_state3_machine() 315 queued = !sock_queue_rcv_skb(sk, skb); in x25_state3_machine() 319 queued = 1; in x25_state3_machine() 330 return queued; in x25_state3_machine() 418 int queued = 0, frametype, ns, nr, q, d, m; in x25_process_rx_frame() local 427 queued = x25_state1_machine(sk, skb, frametype); in x25_process_rx_frame() 430 queued = x25_state2_machine(sk, skb, frametype); in x25_process_rx_frame() 433 queued = x25_state3_machine(sk, skb, frametype, ns, nr, q, d, m); in x25_process_rx_frame() 436 queued = x25_state4_machine(sk, skb, frametype); in x25_process_rx_frame() [all …]
|
D | x25_dev.c | 51 int queued = 1; in x25_receive_data() local 56 queued = x25_process_rx_frame(sk, skb); in x25_receive_data() 58 queued = !sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf)); in x25_receive_data() 62 return queued; in x25_receive_data()
|
/kernel/linux/linux-5.10/net/rose/ |
D | rose_in.c | 104 int queued = 0; in rose_state3_machine() local 167 queued = 1; in rose_state3_machine() 204 return queued; in rose_state3_machine() 265 int queued = 0, frametype, ns, nr, q, d, m; in rose_process_rx_frame() local 274 queued = rose_state1_machine(sk, skb, frametype); in rose_process_rx_frame() 277 queued = rose_state2_machine(sk, skb, frametype); in rose_process_rx_frame() 280 queued = rose_state3_machine(sk, skb, frametype, ns, nr, q, d, m); in rose_process_rx_frame() 283 queued = rose_state4_machine(sk, skb, frametype); in rose_process_rx_frame() 286 queued = rose_state5_machine(sk, skb, frametype); in rose_process_rx_frame() 292 return queued; in rose_process_rx_frame()
|
/kernel/linux/linux-5.10/net/dccp/ |
D | input.c | 45 int queued = 0; in dccp_rcv_close() local 76 queued = 1; in dccp_rcv_close() 86 return queued; in dccp_rcv_close() 91 int queued = 0; in dccp_rcv_closereq() local 101 return queued; in dccp_rcv_closereq() 113 queued = 1; in dccp_rcv_closereq() 120 return queued; in dccp_rcv_closereq() 524 int queued = 0; in dccp_rcv_respond_partopen_state_process() local 562 queued = 1; /* packet was queued in dccp_rcv_respond_partopen_state_process() 568 return queued; in dccp_rcv_respond_partopen_state_process() [all …]
|
/kernel/linux/linux-5.10/drivers/net/wireless/mediatek/mt76/ |
D | debugfs.c | 41 i, q->queued, q->head, q->tail); in mt76_queues_read() 51 int i, queued; in mt76_rx_queues_read() local 56 queued = mt76_is_usb(dev) ? q->ndesc - q->queued : q->queued; in mt76_rx_queues_read() 58 i, queued, q->head, q->tail); in mt76_rx_queues_read()
|
D | sdio.c | 34 q->queued = 0; in mt76s_alloc_rx_queue() 98 if (q->queued > 0) { in mt76s_get_next_rx_entry() 101 q->queued--; in mt76s_get_next_rx_entry() 140 while (q->queued > 0) { in mt76s_process_tx_queue() 155 wake = q->stopped && q->queued < q->ndesc - 8; in mt76s_process_tx_queue() 159 if (!q->queued) in mt76s_process_tx_queue() 208 if (q->queued == q->ndesc) in mt76s_tx_queue_skb() 222 q->queued++; in mt76s_tx_queue_skb() 234 if (q->queued == q->ndesc) in mt76s_tx_queue_skb_raw() 247 q->queued++; in mt76s_tx_queue_skb_raw()
|
/kernel/linux/linux-5.10/net/ax25/ |
D | ax25_std_in.c | 143 int queued = 0; in ax25_std_state3_machine() local 225 queued = ax25_rx_iframe(ax25, skb); in ax25_std_state3_machine() 258 return queued; in ax25_std_state3_machine() 268 int queued = 0; in ax25_std_state4_machine() local 380 queued = ax25_rx_iframe(ax25, skb); in ax25_std_state4_machine() 413 return queued; in ax25_std_state4_machine() 421 int queued = 0, frametype, ns, nr, pf; in ax25_std_frame_in() local 427 queued = ax25_std_state1_machine(ax25, skb, frametype, pf, type); in ax25_std_frame_in() 430 queued = ax25_std_state2_machine(ax25, skb, frametype, pf, type); in ax25_std_frame_in() 433 queued = ax25_std_state3_machine(ax25, skb, frametype, ns, nr, pf, type); in ax25_std_frame_in() [all …]
|
D | ax25_ds_in.c | 147 int queued = 0; in ax25_ds_state3_machine() local 240 queued = ax25_rx_iframe(ax25, skb); in ax25_ds_state3_machine() 273 return queued; in ax25_ds_state3_machine() 281 int queued = 0, frametype, ns, nr, pf; in ax25_ds_frame_in() local 287 queued = ax25_ds_state1_machine(ax25, skb, frametype, pf, type); in ax25_ds_frame_in() 290 queued = ax25_ds_state2_machine(ax25, skb, frametype, pf, type); in ax25_ds_frame_in() 293 queued = ax25_ds_state3_machine(ax25, skb, frametype, ns, nr, pf, type); in ax25_ds_frame_in() 297 return queued; in ax25_ds_frame_in()
|
D | ax25_in.c | 103 int queued = 0; in ax25_rx_iframe() local 145 queued = 1; in ax25_rx_iframe() 151 return queued; in ax25_rx_iframe() 159 int queued = 0; in ax25_process_rx_frame() local 167 queued = ax25_std_frame_in(ax25, skb, type); in ax25_process_rx_frame() 173 queued = ax25_ds_frame_in(ax25, skb, type); in ax25_process_rx_frame() 175 queued = ax25_std_frame_in(ax25, skb, type); in ax25_process_rx_frame() 180 return queued; in ax25_process_rx_frame()
|
/kernel/linux/linux-5.10/net/netrom/ |
D | nr_in.c | 153 int queued = 0; in nr_state3_machine() local 226 queued = 1; in nr_state3_machine() 273 return queued; in nr_state3_machine() 280 int queued = 0, frametype; in nr_process_rx_frame() local 289 queued = nr_state1_machine(sk, skb, frametype); in nr_process_rx_frame() 292 queued = nr_state2_machine(sk, skb, frametype); in nr_process_rx_frame() 295 queued = nr_state3_machine(sk, skb, frametype); in nr_process_rx_frame() 301 return queued; in nr_process_rx_frame()
|
/kernel/linux/linux-5.10/drivers/gpu/drm/ |
D | drm_flip_work.c | 63 list_add_tail(&task->node, &work->queued); in drm_flip_work_queue_task() 107 list_splice_tail(&work->queued, &work->commited); in drm_flip_work_commit() 108 INIT_LIST_HEAD(&work->queued); in drm_flip_work_commit() 151 INIT_LIST_HEAD(&work->queued); in drm_flip_work_init() 168 WARN_ON(!list_empty(&work->queued) || !list_empty(&work->commited)); in drm_flip_work_cleanup()
|
/kernel/linux/linux-5.10/security/integrity/ima/ |
D | ima_asymmetric_keys.c | 31 bool queued = false; in ima_post_key_create_or_update() local 41 queued = ima_queue_key(keyring, payload, payload_len); in ima_post_key_create_or_update() 43 if (queued) in ima_post_key_create_or_update()
|
D | ima_queue_keys.c | 106 bool queued = false; in ima_queue_key() local 116 queued = true; in ima_queue_key() 120 if (!queued) in ima_queue_key() 123 return queued; in ima_queue_key()
|
/kernel/linux/linux-5.10/Documentation/userspace-api/media/mediactl/ |
D | media-request-ioc-queue.rst | 34 If the request was successfully queued, then the file descriptor can be 37 If the request was already queued before, then ``EBUSY`` is returned. 42 Once a request is queued, then the driver is required to gracefully handle 49 queued directly and you next try to queue a request, or vice versa. 62 The request was already queued or the application queued the first
|
D | media-request-ioc-reinit.rst | 40 A request can only be re-initialized if it either has not been queued 41 yet, or if it was queued and completed. Otherwise it will set ``errno`` 51 The request is queued but not yet completed.
|
/kernel/linux/linux-5.10/drivers/dma/idxd/ |
D | irq.c | 180 int queued = 0; in irq_process_pending_llist() local 194 queued++; in irq_process_pending_llist() 198 return queued; in irq_process_pending_llist() 205 int queued = 0; in irq_process_work_list() local 222 queued++; in irq_process_work_list() 226 return queued; in irq_process_work_list()
|
/kernel/linux/linux-5.10/drivers/md/ |
D | dm-cache-background-tracker.c | 26 struct list_head queued; member 47 INIT_LIST_HEAD(&b->queued); in btracker_create() 205 list_add(&w->list, &b->queued); in btracker_queue() 219 if (list_empty(&b->queued)) in btracker_issue() 222 w = list_first_entry(&b->queued, struct bt_work, list); in btracker_issue()
|
/kernel/linux/linux-5.10/virt/kvm/ |
D | async_pf.c | 134 vcpu->async_pf.queued = 0; in kvm_clear_async_pf_completion_queue() 154 vcpu->async_pf.queued--; in kvm_check_async_pf_completion() 168 if (vcpu->async_pf.queued >= ASYNC_PF_PER_VCPU) in kvm_setup_async_pf() 195 vcpu->async_pf.queued++; in kvm_setup_async_pf() 226 vcpu->async_pf.queued++; in kvm_async_pf_wakeup_all()
|
/kernel/linux/linux-5.10/fs/xfs/ |
D | xfs_mru_cache.c | 102 unsigned int queued; /* work has been queued */ member 204 if (!mru->queued) { in _xfs_mru_cache_list_insert() 205 mru->queued = 1; in _xfs_mru_cache_list_insert() 280 mru->queued = next; in _xfs_mru_cache_reap() 281 if ((mru->queued > 0)) { in _xfs_mru_cache_reap() 388 if (mru->queued) { in xfs_mru_cache_flush()
|
/kernel/linux/linux-5.10/drivers/media/platform/vsp1/ |
D | vsp1_dl.c | 224 struct vsp1_dl_list *queued; member 841 if (!dlm->queued) in vsp1_dl_list_hw_update_pending() 899 __vsp1_dl_list_put(dlm->queued); in vsp1_dl_list_commit_continuous() 900 dlm->queued = dl; in vsp1_dl_list_commit_continuous() 1020 if (dlm->queued) { in vsp1_dlm_irq_frame_end() 1021 if (dlm->queued->flags & VSP1_DL_FRAME_END_INTERNAL) in vsp1_dlm_irq_frame_end() 1023 dlm->queued->flags &= ~VSP1_DL_FRAME_END_INTERNAL; in vsp1_dlm_irq_frame_end() 1026 dlm->active = dlm->queued; in vsp1_dlm_irq_frame_end() 1027 dlm->queued = NULL; in vsp1_dlm_irq_frame_end() 1038 dlm->queued = dlm->pending; in vsp1_dlm_irq_frame_end() [all …]
|
/kernel/linux/linux-5.10/Documentation/features/locking/queued-spinlocks/ |
D | arch-support.txt | 2 # Feature name: queued-spinlocks 4 # description: arch supports queued spinlocks
|
/kernel/linux/linux-5.10/Documentation/features/locking/queued-rwlocks/ |
D | arch-support.txt | 2 # Feature name: queued-rwlocks 4 # description: arch supports queued rwlocks
|
/kernel/linux/linux-5.10/sound/firewire/fireworks/ |
D | fireworks_hwdep.c | 128 bool queued; in hwdep_read() local 133 queued = efw->push_ptr != efw->pull_ptr; in hwdep_read() 135 while (!dev_lock_changed && !queued) { in hwdep_read() 144 queued = efw->push_ptr != efw->pull_ptr; in hwdep_read() 151 else if (queued) in hwdep_read()
|
/kernel/linux/linux-5.10/Documentation/userspace-api/media/v4l/ |
D | vidioc-streamon.rst | 52 If ``VIDIOC_STREAMON`` fails then any already queued buffers will remain 53 queued. 64 If buffers have been queued with :ref:`VIDIOC_QBUF` and 66 ``VIDIOC_STREAMON``, then those queued buffers will also be removed from 78 but ``VIDIOC_STREAMOFF`` will return queued buffers to their starting
|
/kernel/linux/linux-5.10/Documentation/usb/ |
D | ohci.rst | 22 - interrupt transfers can be larger, and can be queued 28 types can be queued. That was also true in "usb-ohci", except for interrupt 30 to overhead in IRQ processing. When interrupt transfers are queued, those
|