| /kernel/linux/linux-6.6/drivers/net/wireless/st/cw1200/ |
| D | queue.c | 3 * O(1) TX queue with built-in allocator for ST-Ericsson CW1200 drivers 12 #include "queue.h" 27 static inline void __cw1200_queue_lock(struct cw1200_queue *queue) in __cw1200_queue_lock() argument 29 struct cw1200_queue_stats *stats = queue->stats; in __cw1200_queue_lock() 30 if (queue->tx_locked_cnt++ == 0) { in __cw1200_queue_lock() 31 pr_debug("[TX] Queue %d is locked.\n", in __cw1200_queue_lock() 32 queue->queue_id); in __cw1200_queue_lock() 33 ieee80211_stop_queue(stats->priv->hw, queue->queue_id); in __cw1200_queue_lock() 37 static inline void __cw1200_queue_unlock(struct cw1200_queue *queue) in __cw1200_queue_unlock() argument 39 struct cw1200_queue_stats *stats = queue->stats; in __cw1200_queue_unlock() [all …]
|
| /kernel/linux/linux-5.10/drivers/net/wireless/st/cw1200/ |
| D | queue.c | 3 * O(1) TX queue with built-in allocator for ST-Ericsson CW1200 drivers 11 #include "queue.h" 26 static inline void __cw1200_queue_lock(struct cw1200_queue *queue) in __cw1200_queue_lock() argument 28 struct cw1200_queue_stats *stats = queue->stats; in __cw1200_queue_lock() 29 if (queue->tx_locked_cnt++ == 0) { in __cw1200_queue_lock() 30 pr_debug("[TX] Queue %d is locked.\n", in __cw1200_queue_lock() 31 queue->queue_id); in __cw1200_queue_lock() 32 ieee80211_stop_queue(stats->priv->hw, queue->queue_id); in __cw1200_queue_lock() 36 static inline void __cw1200_queue_unlock(struct cw1200_queue *queue) in __cw1200_queue_unlock() argument 38 struct cw1200_queue_stats *stats = queue->stats; in __cw1200_queue_unlock() [all …]
|
| /kernel/linux/linux-5.10/drivers/usb/gadget/function/ |
| D | uvc_queue.c | 25 * Video buffers queue management. 31 * the videobuf2 queue operations by serializing calls to videobuf2 and a 32 * spinlock to protect the IRQ queue that holds the buffers to be processed by 37 * videobuf2 queue operations 44 struct uvc_video_queue *queue = vb2_get_drv_priv(vq); in uvc_queue_setup() local 45 struct uvc_video *video = container_of(queue, struct uvc_video, queue); in uvc_queue_setup() 59 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); in uvc_buffer_prepare() local 69 if (unlikely(queue->flags & UVC_QUEUE_DISCONNECTED)) in uvc_buffer_prepare() 85 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); in uvc_buffer_queue() local 90 spin_lock_irqsave(&queue->irqlock, flags); in uvc_buffer_queue() [all …]
|
| /kernel/linux/linux-5.10/drivers/media/usb/uvc/ |
| D | uvc_queue.c | 24 * Video buffers queue management. 30 * the videobuf2 queue operations by serializing calls to videobuf2 and a 31 * spinlock to protect the IRQ queue that holds the buffers to be processed by 36 uvc_queue_to_stream(struct uvc_video_queue *queue) in uvc_queue_to_stream() argument 38 return container_of(queue, struct uvc_streaming, queue); in uvc_queue_to_stream() 49 * This function must be called with the queue spinlock held. 51 static void uvc_queue_return_buffers(struct uvc_video_queue *queue, in uvc_queue_return_buffers() argument 58 while (!list_empty(&queue->irqqueue)) { in uvc_queue_return_buffers() 59 struct uvc_buffer *buf = list_first_entry(&queue->irqqueue, in uvc_queue_return_buffers() 61 queue); in uvc_queue_return_buffers() [all …]
|
| /kernel/linux/linux-6.6/drivers/media/usb/uvc/ |
| D | uvc_queue.c | 24 * Video buffers queue management. 30 * the videobuf2 queue operations by serializing calls to videobuf2 and a 31 * spinlock to protect the IRQ queue that holds the buffers to be processed by 43 * This function must be called with the queue spinlock held. 45 static void uvc_queue_return_buffers(struct uvc_video_queue *queue, in uvc_queue_return_buffers() argument 52 while (!list_empty(&queue->irqqueue)) { in uvc_queue_return_buffers() 53 struct uvc_buffer *buf = list_first_entry(&queue->irqqueue, in uvc_queue_return_buffers() 55 queue); in uvc_queue_return_buffers() 56 list_del(&buf->queue); in uvc_queue_return_buffers() 63 * videobuf2 queue operations [all …]
|
| /kernel/linux/linux-6.6/drivers/usb/gadget/function/ |
| D | uvc_queue.c | 26 * Video buffers queue management. 32 * the videobuf2 queue operations by serializing calls to videobuf2 and a 33 * spinlock to protect the IRQ queue that holds the buffers to be processed by 38 * videobuf2 queue operations 45 struct uvc_video_queue *queue = vb2_get_drv_priv(vq); in uvc_queue_setup() local 46 struct uvc_video *video = container_of(queue, struct uvc_video, queue); in uvc_queue_setup() 73 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); in uvc_buffer_prepare() local 83 if (unlikely(queue->flags & UVC_QUEUE_DISCONNECTED)) in uvc_buffer_prepare() 87 if (queue->use_sg) { in uvc_buffer_prepare() 104 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); in uvc_buffer_queue() local [all …]
|
| /kernel/linux/linux-5.10/drivers/nvme/target/ |
| D | tcp.c | 57 struct nvmet_tcp_queue *queue; member 159 static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue, in nvmet_tcp_cmd_tag() argument 162 if (unlikely(!queue->nr_cmds)) { in nvmet_tcp_cmd_tag() 167 return cmd - queue->cmds; in nvmet_tcp_cmd_tag() 195 nvmet_tcp_get_cmd(struct nvmet_tcp_queue *queue) in nvmet_tcp_get_cmd() argument 199 cmd = list_first_entry_or_null(&queue->free_list, in nvmet_tcp_get_cmd() 215 if (unlikely(cmd == &cmd->queue->connect)) in nvmet_tcp_put_cmd() 218 list_add_tail(&cmd->entry, &cmd->queue->free_list); in nvmet_tcp_put_cmd() 221 static inline int queue_cpu(struct nvmet_tcp_queue *queue) in queue_cpu() argument 223 return queue->sock->sk->sk_incoming_cpu; in queue_cpu() [all …]
|
| /kernel/linux/linux-6.6/drivers/net/wireless/broadcom/b43legacy/ |
| D | pio.c | 22 static void tx_start(struct b43legacy_pioqueue *queue) in tx_start() argument 24 b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, in tx_start() 28 static void tx_octet(struct b43legacy_pioqueue *queue, in tx_octet() argument 31 if (queue->need_workarounds) { in tx_octet() 32 b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, octet); in tx_octet() 33 b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, in tx_octet() 36 b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, in tx_octet() 38 b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, octet); in tx_octet() 63 static void tx_data(struct b43legacy_pioqueue *queue, in tx_data() argument 71 if (queue->need_workarounds) { in tx_data() [all …]
|
| /kernel/linux/linux-5.10/drivers/net/wireless/broadcom/b43legacy/ |
| D | pio.c | 22 static void tx_start(struct b43legacy_pioqueue *queue) in tx_start() argument 24 b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, in tx_start() 28 static void tx_octet(struct b43legacy_pioqueue *queue, in tx_octet() argument 31 if (queue->need_workarounds) { in tx_octet() 32 b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, octet); in tx_octet() 33 b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, in tx_octet() 36 b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, in tx_octet() 38 b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, octet); in tx_octet() 63 static void tx_data(struct b43legacy_pioqueue *queue, in tx_data() argument 71 if (queue->need_workarounds) { in tx_data() [all …]
|
| /kernel/linux/linux-6.6/drivers/nvme/target/ |
| D | tcp.c | 60 * queue before determining it to be idle. This optional module behavior 94 struct nvmet_tcp_queue *queue; member 197 static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue, in nvmet_tcp_cmd_tag() argument 200 if (unlikely(!queue->nr_cmds)) { in nvmet_tcp_cmd_tag() 205 return cmd - queue->cmds; in nvmet_tcp_cmd_tag() 233 nvmet_tcp_get_cmd(struct nvmet_tcp_queue *queue) in nvmet_tcp_get_cmd() argument 237 cmd = list_first_entry_or_null(&queue->free_list, in nvmet_tcp_get_cmd() 253 if (unlikely(cmd == &cmd->queue->connect)) in nvmet_tcp_put_cmd() 256 list_add_tail(&cmd->entry, &cmd->queue->free_list); in nvmet_tcp_put_cmd() 259 static inline int queue_cpu(struct nvmet_tcp_queue *queue) in queue_cpu() argument [all …]
|
| /kernel/liteos_a/kernel/include/ |
| D | los_queue.h | 33 * @defgroup los_queue Queue 51 * Queue error code: The maximum number of queue resources is configured to 0. 55 …* Solution: Configure the maximum number of queue resources to be greater than 0. If queue modules… 56 * set the configuration item for the tailoring of the maximum number of queue resources to NO. 62 * Queue error code: The queue block memory fails to be initialized. 66 …* Solution: Allocate the queue block bigger memory partition, or decrease the maximum number of qu… 72 * Queue error code: The memory for queue creation fails to be requested. 76 …* Solution: Allocate more memory for queue creation, or decrease the queue length and the number o… 77 * in the queue to be created. 83 * Queue error code: The size of the biggest message in the created queue is too big. [all …]
|
| /kernel/linux/linux-6.6/drivers/scsi/arm/ |
| D | queue.c | 3 * linux/drivers/acorn/scsi/queue.c: queue handling primitives 50 #include "queue.h" 55 * Function: void queue_initialise (Queue_t *queue) 56 * Purpose : initialise a queue 57 * Params : queue - queue to initialise 59 int queue_initialise (Queue_t *queue) in queue_initialise() argument 64 spin_lock_init(&queue->queue_lock); in queue_initialise() 65 INIT_LIST_HEAD(&queue->head); in queue_initialise() 66 INIT_LIST_HEAD(&queue->free); in queue_initialise() 74 queue->alloc = q = kmalloc_array(nqueues, sizeof(QE_t), GFP_KERNEL); in queue_initialise() [all …]
|
| D | queue.h | 3 * linux/drivers/acorn/scsi/queue.h: queue handling 18 * Function: void queue_initialise (Queue_t *queue) 19 * Purpose : initialise a queue 20 * Params : queue - queue to initialise 22 extern int queue_initialise (Queue_t *queue); 25 * Function: void queue_free (Queue_t *queue) 26 * Purpose : free a queue 27 * Params : queue - queue to free 29 extern void queue_free (Queue_t *queue); 32 * Function: struct scsi_cmnd *queue_remove (queue) [all …]
|
| /kernel/linux/linux-5.10/drivers/scsi/arm/ |
| D | queue.c | 3 * linux/drivers/acorn/scsi/queue.c: queue handling primitives 46 #include "queue.h" 51 * Function: void queue_initialise (Queue_t *queue) 52 * Purpose : initialise a queue 53 * Params : queue - queue to initialise 55 int queue_initialise (Queue_t *queue) in queue_initialise() argument 60 spin_lock_init(&queue->queue_lock); in queue_initialise() 61 INIT_LIST_HEAD(&queue->head); in queue_initialise() 62 INIT_LIST_HEAD(&queue->free); in queue_initialise() 70 queue->alloc = q = kmalloc_array(nqueues, sizeof(QE_t), GFP_KERNEL); in queue_initialise() [all …]
|
| D | queue.h | 3 * linux/drivers/acorn/scsi/queue.h: queue handling 18 * Function: void queue_initialise (Queue_t *queue) 19 * Purpose : initialise a queue 20 * Params : queue - queue to initialise 22 extern int queue_initialise (Queue_t *queue); 25 * Function: void queue_free (Queue_t *queue) 26 * Purpose : free a queue 27 * Params : queue - queue to free 29 extern void queue_free (Queue_t *queue); 32 * Function: struct scsi_cmnd *queue_remove (queue) [all …]
|
| /kernel/liteos_m/kernel/include/ |
| D | los_queue.h | 33 * @defgroup los_queue Queue 52 * Queue error code: The maximum number of queue resources is configured to 0. 56 …* Solution: Configure the maximum number of queue resources to be greater than 0. If queue modules… 57 * set the configuration item for the tailoring of the maximum number of queue resources to NO. 63 * Queue error code: The queue block memory fails to be initialized. 67 …* Solution: Allocate the queue block bigger memory partition, or decrease the maximum number of qu… 73 * Queue error code: The memory for queue creation fails to be requested. 77 …* Solution: Allocate more memory for queue creation, or decrease the queue length and the number o… 78 * in the queue to be created. 84 * Queue error code: The size of the biggest message in the created queue is too big. [all …]
|
| /kernel/linux/linux-5.10/drivers/net/xen-netback/ |
| D | rx.c | 42 static void xenvif_update_needed_slots(struct xenvif_queue *queue, in xenvif_update_needed_slots() argument 55 WRITE_ONCE(queue->rx_slots_needed, needed); in xenvif_update_needed_slots() 58 static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue) in xenvif_rx_ring_slots_available() argument 63 needed = READ_ONCE(queue->rx_slots_needed); in xenvif_rx_ring_slots_available() 68 prod = queue->rx.sring->req_prod; in xenvif_rx_ring_slots_available() 69 cons = queue->rx.req_cons; in xenvif_rx_ring_slots_available() 74 queue->rx.sring->req_event = prod + 1; in xenvif_rx_ring_slots_available() 80 } while (queue->rx.sring->req_prod != prod); in xenvif_rx_ring_slots_available() 85 bool xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb) in xenvif_rx_queue_tail() argument 90 spin_lock_irqsave(&queue->rx_queue.lock, flags); in xenvif_rx_queue_tail() [all …]
|
| D | interface.c | 46 /* Number of bytes allowed on the internal guest Rx queue. */ 54 void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue, in xenvif_skb_zerocopy_prepare() argument 58 atomic_inc(&queue->inflight_packets); in xenvif_skb_zerocopy_prepare() 61 void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue) in xenvif_skb_zerocopy_complete() argument 63 atomic_dec(&queue->inflight_packets); in xenvif_skb_zerocopy_complete() 69 wake_up(&queue->dealloc_wq); in xenvif_skb_zerocopy_complete() 79 static bool xenvif_handle_tx_interrupt(struct xenvif_queue *queue) in xenvif_handle_tx_interrupt() argument 83 rc = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx); in xenvif_handle_tx_interrupt() 85 napi_schedule(&queue->napi); in xenvif_handle_tx_interrupt() 91 struct xenvif_queue *queue = dev_id; in xenvif_tx_interrupt() local [all …]
|
| D | netback.c | 58 /* The time that packets can stay on the guest Rx internal queue 106 static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, 109 static void make_tx_response(struct xenvif_queue *queue, 114 static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx); 116 static inline int tx_work_todo(struct xenvif_queue *queue); 118 static inline unsigned long idx_to_pfn(struct xenvif_queue *queue, in idx_to_pfn() argument 121 return page_to_pfn(queue->mmap_pages[idx]); in idx_to_pfn() 124 static inline unsigned long idx_to_kaddr(struct xenvif_queue *queue, in idx_to_kaddr() argument 127 return (unsigned long)pfn_to_kaddr(idx_to_pfn(queue, idx)); in idx_to_kaddr() 160 void xenvif_kick_thread(struct xenvif_queue *queue) in xenvif_kick_thread() argument [all …]
|
| /kernel/linux/linux-6.6/drivers/net/xen-netback/ |
| D | rx.c | 42 static void xenvif_update_needed_slots(struct xenvif_queue *queue, in xenvif_update_needed_slots() argument 55 WRITE_ONCE(queue->rx_slots_needed, needed); in xenvif_update_needed_slots() 58 static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue) in xenvif_rx_ring_slots_available() argument 63 needed = READ_ONCE(queue->rx_slots_needed); in xenvif_rx_ring_slots_available() 68 prod = queue->rx.sring->req_prod; in xenvif_rx_ring_slots_available() 69 cons = queue->rx.req_cons; in xenvif_rx_ring_slots_available() 74 queue->rx.sring->req_event = prod + 1; in xenvif_rx_ring_slots_available() 80 } while (queue->rx.sring->req_prod != prod); in xenvif_rx_ring_slots_available() 85 bool xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb) in xenvif_rx_queue_tail() argument 90 spin_lock_irqsave(&queue->rx_queue.lock, flags); in xenvif_rx_queue_tail() [all …]
|
| D | interface.c | 44 /* Number of bytes allowed on the internal guest Rx queue. */ 52 void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue, in xenvif_skb_zerocopy_prepare() argument 56 atomic_inc(&queue->inflight_packets); in xenvif_skb_zerocopy_prepare() 59 void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue) in xenvif_skb_zerocopy_complete() argument 61 atomic_dec(&queue->inflight_packets); in xenvif_skb_zerocopy_complete() 67 wake_up(&queue->dealloc_wq); in xenvif_skb_zerocopy_complete() 77 static bool xenvif_handle_tx_interrupt(struct xenvif_queue *queue) in xenvif_handle_tx_interrupt() argument 81 rc = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx); in xenvif_handle_tx_interrupt() 83 napi_schedule(&queue->napi); in xenvif_handle_tx_interrupt() 89 struct xenvif_queue *queue = dev_id; in xenvif_tx_interrupt() local [all …]
|
| /kernel/linux/linux-6.6/drivers/nvme/host/ |
| D | tcp.c | 82 struct nvme_tcp_queue *queue; member 180 static int nvme_tcp_try_send(struct nvme_tcp_queue *queue); 187 static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue) in nvme_tcp_queue_id() argument 189 return queue - queue->ctrl->queues; in nvme_tcp_queue_id() 192 static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue) in nvme_tcp_tagset() argument 194 u32 queue_idx = nvme_tcp_queue_id(queue); in nvme_tcp_tagset() 197 return queue->ctrl->admin_tag_set.tags[queue_idx]; in nvme_tcp_tagset() 198 return queue->ctrl->tag_set.tags[queue_idx - 1]; in nvme_tcp_tagset() 201 static inline u8 nvme_tcp_hdgst_len(struct nvme_tcp_queue *queue) in nvme_tcp_hdgst_len() argument 203 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0; in nvme_tcp_hdgst_len() [all …]
|
| /kernel/linux/linux-5.10/drivers/nvme/host/ |
| D | tcp.c | 81 struct nvme_tcp_queue *queue; member 176 static int nvme_tcp_try_send(struct nvme_tcp_queue *queue); 183 static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue) in nvme_tcp_queue_id() argument 185 return queue - queue->ctrl->queues; in nvme_tcp_queue_id() 188 static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue) in nvme_tcp_tagset() argument 190 u32 queue_idx = nvme_tcp_queue_id(queue); in nvme_tcp_tagset() 193 return queue->ctrl->admin_tag_set.tags[queue_idx]; in nvme_tcp_tagset() 194 return queue->ctrl->tag_set.tags[queue_idx - 1]; in nvme_tcp_tagset() 197 static inline u8 nvme_tcp_hdgst_len(struct nvme_tcp_queue *queue) in nvme_tcp_hdgst_len() argument 199 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0; in nvme_tcp_hdgst_len() [all …]
|
| /kernel/linux/linux-5.10/drivers/iio/buffer/ |
| D | industrialio-buffer-dma.c | 33 * means of two queues. The incoming queue and the outgoing queue. Blocks on the 34 * incoming queue are waiting for the DMA controller to pick them up and fill 35 * them with data. Block on the outgoing queue have been filled with data and 51 * incoming or outgoing queue the block will be freed. 100 dma_free_coherent(block->queue->dev, PAGE_ALIGN(block->size), in iio_buffer_block_release() 103 iio_buffer_put(&block->queue->buffer); in iio_buffer_block_release() 166 struct iio_dma_buffer_queue *queue, size_t size) in iio_dma_buffer_alloc_block() argument 174 block->vaddr = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size), in iio_dma_buffer_alloc_block() 183 block->queue = queue; in iio_dma_buffer_alloc_block() 187 iio_buffer_get(&queue->buffer); in iio_dma_buffer_alloc_block() [all …]
|
| /kernel/linux/linux-6.6/drivers/iio/buffer/ |
| D | industrialio-buffer-dma.c | 33 * means of two queues. The incoming queue and the outgoing queue. Blocks on the 34 * incoming queue are waiting for the DMA controller to pick them up and fill 35 * them with data. Block on the outgoing queue have been filled with data and 51 * incoming or outgoing queue the block will be freed. 100 dma_free_coherent(block->queue->dev, PAGE_ALIGN(block->size), in iio_buffer_block_release() 103 iio_buffer_put(&block->queue->buffer); in iio_buffer_block_release() 166 struct iio_dma_buffer_queue *queue, size_t size) in iio_dma_buffer_alloc_block() argument 174 block->vaddr = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size), in iio_dma_buffer_alloc_block() 183 block->queue = queue; in iio_dma_buffer_alloc_block() 187 iio_buffer_get(&queue->buffer); in iio_dma_buffer_alloc_block() [all …]
|