Home
last modified time | relevance | path

Searched full:queued (Results 1 – 25 of 2652) sorted by relevance

12345678910>>...107

/kernel/linux/linux-6.6/include/asm-generic/
Dqrwlock.h40 * queued_read_trylock - try to acquire read lock of a queued rwlock
41 * @lock : Pointer to queued rwlock structure
59 * queued_write_trylock - try to acquire write lock of a queued rwlock
60 * @lock : Pointer to queued rwlock structure
75 * queued_read_lock - acquire read lock of a queued rwlock
76 * @lock: Pointer to queued rwlock structure
91 * queued_write_lock - acquire write lock of a queued rwlock
92 * @lock : Pointer to queued rwlock structure
105 * queued_read_unlock - release read lock of a queued rwlock
106 * @lock : Pointer to queued rwlock structure
[all …]
Dqspinlock.h3 * Queued spinlock
48 * @lock: Pointer to queued spinlock structure
63 * @lock: queued spinlock structure
78 * @lock : Pointer to queued spinlock structure
86 * queued_spin_trylock - try to acquire the queued spinlock
87 * @lock : Pointer to queued spinlock structure
104 * queued_spin_lock - acquire a queued spinlock
105 * @lock: Pointer to queued spinlock structure
120 * queued_spin_unlock - release a queued spinlock
121 * @lock : Pointer to queued spinlock structure
[all …]
/kernel/linux/linux-5.10/security/integrity/ima/
Dima_queue_keys.c17 * right away or should be queued for processing later.
28 * If custom IMA policy is not loaded then keys queued up
39 * queued up in case custom IMA policy was not loaded.
48 * This function sets up a worker to free queued keys in case
106 bool queued = false; in ima_queue_key() local
116 queued = true; in ima_queue_key()
120 if (!queued) in ima_queue_key()
123 return queued; in ima_queue_key()
127 * ima_process_queued_keys() - process keys queued for measurement
129 * This function sets ima_process_keys to true and processes queued keys.
[all …]
/kernel/linux/linux-6.6/security/integrity/ima/
Dima_queue_keys.c18 * right away or should be queued for processing later.
29 * If custom IMA policy is not loaded then keys queued up
40 * queued up in case custom IMA policy was not loaded.
49 * This function sets up a worker to free queued keys in case
107 bool queued = false; in ima_queue_key() local
117 queued = true; in ima_queue_key()
121 if (!queued) in ima_queue_key()
124 return queued; in ima_queue_key()
128 * ima_process_queued_keys() - process keys queued for measurement
130 * This function sets ima_process_keys to true and processes queued keys.
[all …]
/kernel/linux/linux-5.10/include/asm-generic/
Dqspinlock.h3 * Queued spinlock
19 * @lock: Pointer to queued spinlock structure
34 * @lock: queued spinlock structure
49 * @lock : Pointer to queued spinlock structure
57 * queued_spin_trylock - try to acquire the queued spinlock
58 * @lock : Pointer to queued spinlock structure
75 * queued_spin_lock - acquire a queued spinlock
76 * @lock: Pointer to queued spinlock structure
91 * queued_spin_unlock - release a queued spinlock
92 * @lock : Pointer to queued spinlock structure
[all …]
/kernel/linux/linux-6.6/block/
Dblk-throttle.h14 * To avoid such starvation, dispatched bios are queued separately
19 * throtl_qnode is used to keep the queued bios separated by their sources.
20 * Bios are queued to throtl_qnode which in turn is queued to
24 * belongs to a throtl_grp and gets queued on itself or the parent, so
26 * queued and decrementing when dequeued is enough to keep the whole blkg
30 struct list_head node; /* service_queue->queued[] */
31 struct bio_list bios; /* queued bios */
39 * Bios queued directly to this service_queue or dispatched from
42 struct list_head queued[2]; /* throtl_qnode [READ/WRITE] */ member
43 unsigned int nr_queued[2]; /* number of queued bios */
[all …]
/kernel/linux/linux-5.10/drivers/net/wireless/mediatek/mt76/
Ddebugfs.c40 "%d: queued=%d head=%d tail=%d\n", in mt76_queues_read()
41 i, q->queued, q->head, q->tail); in mt76_queues_read()
51 int i, queued; in mt76_rx_queues_read() local
56 queued = mt76_is_usb(dev) ? q->ndesc - q->queued : q->queued; in mt76_rx_queues_read()
57 seq_printf(s, "%d: queued=%d head=%d tail=%d\n", in mt76_rx_queues_read()
58 i, queued, q->head, q->tail); in mt76_rx_queues_read()
Dsdio.c34 q->queued = 0; in mt76s_alloc_rx_queue()
98 if (q->queued > 0) { in mt76s_get_next_rx_entry()
101 q->queued--; in mt76s_get_next_rx_entry()
140 while (q->queued > 0) { in mt76s_process_tx_queue()
155 wake = q->stopped && q->queued < q->ndesc - 8; in mt76s_process_tx_queue()
159 if (!q->queued) in mt76s_process_tx_queue()
208 if (q->queued == q->ndesc) in mt76s_tx_queue_skb()
222 q->queued++; in mt76s_tx_queue_skb()
234 if (q->queued == q->ndesc) in mt76s_tx_queue_skb_raw()
247 q->queued++; in mt76s_tx_queue_skb_raw()
/kernel/linux/linux-6.6/drivers/net/wireless/mediatek/mt76/
Ddebugfs.c59 seq_puts(s, " queue | hw-queued | head | tail |\n"); in mt76_queues_read()
67 i, q->queued, q->head, q->tail); in mt76_queues_read()
77 int i, queued; in mt76_rx_queues_read() local
79 seq_puts(s, " queue | hw-queued | head | tail |\n"); in mt76_rx_queues_read()
83 queued = mt76_is_usb(dev) ? q->ndesc - q->queued : q->queued; in mt76_rx_queues_read()
85 i, queued, q->head, q->tail); in mt76_rx_queues_read()
/kernel/linux/linux-5.10/drivers/gpu/drm/
Ddrm_flip_work.c63 list_add_tail(&task->node, &work->queued); in drm_flip_work_queue_task()
92 * drm_flip_work_commit - commit queued work
94 * @wq: the work-queue to run the queued work on
96 * Trigger work previously queued by drm_flip_work_queue() to run
99 * prior), and then from vblank irq commit the queued work.
107 list_splice_tail(&work->queued, &work->commited); in drm_flip_work_commit()
108 INIT_LIST_HEAD(&work->queued); in drm_flip_work_commit()
151 INIT_LIST_HEAD(&work->queued); in drm_flip_work_init()
168 WARN_ON(!list_empty(&work->queued) || !list_empty(&work->commited)); in drm_flip_work_cleanup()
/kernel/linux/linux-6.6/drivers/gpu/drm/
Ddrm_flip_work.c63 list_add_tail(&task->node, &work->queued); in drm_flip_work_queue_task()
92 * drm_flip_work_commit - commit queued work
94 * @wq: the work-queue to run the queued work on
96 * Trigger work previously queued by drm_flip_work_queue() to run
99 * prior), and then from vblank irq commit the queued work.
107 list_splice_tail(&work->queued, &work->commited); in drm_flip_work_commit()
108 INIT_LIST_HEAD(&work->queued); in drm_flip_work_commit()
151 INIT_LIST_HEAD(&work->queued); in drm_flip_work_init()
168 WARN_ON(!list_empty(&work->queued) || !list_empty(&work->commited)); in drm_flip_work_cleanup()
/kernel/linux/linux-5.10/Documentation/userspace-api/media/mediactl/
Dmedia-request-ioc-queue.rst34 If the request was successfully queued, then the file descriptor can be
37 If the request was already queued before, then ``EBUSY`` is returned.
42 Once a request is queued, then the driver is required to gracefully handle
49 queued directly and you next try to queue a request, or vice versa.
62 The request was already queued or the application queued the first
Drequest-api.rst24 buffer queues since in practice only one buffer would be queued at a time.
59 instead of being immediately applied, and buffers queued to a request do not
60 enter the regular buffer queue until the request itself is queued.
66 queued by calling :ref:`MEDIA_REQUEST_IOC_QUEUE` on the request file descriptor.
68 A queued request cannot be modified anymore.
86 a buffer was queued via a request or vice versa will result in an ``EBUSY``
109 request that has been queued but not yet completed will return ``EBUSY``
121 longer in use by the kernel. That is, if the request is queued and then the
165 Once the request is fully prepared, it can be queued to the driver:
245 Once the request is fully prepared, it can be queued to the driver:
/kernel/linux/linux-6.6/Documentation/userspace-api/media/mediactl/
Dmedia-request-ioc-queue.rst34 If the request was successfully queued, then the file descriptor can be
37 If the request was already queued before, then ``EBUSY`` is returned.
42 Once a request is queued, then the driver is required to gracefully handle
49 queued directly and you next try to queue a request, or vice versa.
62 The request was already queued or the application queued the first
Drequest-api.rst24 buffer queues since in practice only one buffer would be queued at a time.
59 instead of being immediately applied, and buffers queued to a request do not
60 enter the regular buffer queue until the request itself is queued.
66 queued by calling :ref:`MEDIA_REQUEST_IOC_QUEUE` on the request file descriptor.
68 A queued request cannot be modified anymore.
86 a buffer was queued via a request or vice versa will result in an ``EBUSY``
109 request that has been queued but not yet completed will return ``EBUSY``
121 longer in use by the kernel. That is, if the request is queued and then the
165 Once the request is fully prepared, it can be queued to the driver:
245 Once the request is fully prepared, it can be queued to the driver:
/kernel/linux/linux-6.6/include/linux/
Ddynamic_queue_limits.h11 * 1) Objects are queued up to some limit specified as number of objects.
14 * 3) Starvation occurs when limit has been reached, all queued data has
17 * 4) Minimizing the amount of queued data is desirable.
24 * dql_avail - returns how many objects are available to be queued based
45 unsigned int num_queued; /* Total ever queued */
72 * Record number of objects queued. Assumes that caller has already checked
91 /* Returns how many objects can be queued, < 0 indicates over limit. */
/kernel/linux/linux-5.10/include/linux/
Ddynamic_queue_limits.h11 * 1) Objects are queued up to some limit specified as number of objects.
14 * 3) Starvation occurs when limit has been reached, all queued data has
17 * 4) Minimizing the amount of queued data is desirable.
24 * dql_avail - returns how many objects are available to be queued based
45 unsigned int num_queued; /* Total ever queued */
72 * Record number of objects queued. Assumes that caller has already checked
91 /* Returns how many objects can be queued, < 0 indicates over limit. */
/kernel/linux/linux-6.6/net/x25/
Dx25_in.c210 int queued = 0; in x25_state3_machine() local
277 queued = 1; in x25_state3_machine()
315 queued = !sock_queue_rcv_skb(sk, skb); in x25_state3_machine()
319 queued = 1; in x25_state3_machine()
330 return queued; in x25_state3_machine()
418 int queued = 0, frametype, ns, nr, q, d, m; in x25_process_rx_frame() local
427 queued = x25_state1_machine(sk, skb, frametype); in x25_process_rx_frame()
430 queued = x25_state2_machine(sk, skb, frametype); in x25_process_rx_frame()
433 queued = x25_state3_machine(sk, skb, frametype, ns, nr, q, d, m); in x25_process_rx_frame()
436 queued = x25_state4_machine(sk, skb, frametype); in x25_process_rx_frame()
[all …]
/kernel/linux/linux-5.10/net/x25/
Dx25_in.c210 int queued = 0; in x25_state3_machine() local
277 queued = 1; in x25_state3_machine()
315 queued = !sock_queue_rcv_skb(sk, skb); in x25_state3_machine()
319 queued = 1; in x25_state3_machine()
330 return queued; in x25_state3_machine()
418 int queued = 0, frametype, ns, nr, q, d, m; in x25_process_rx_frame() local
427 queued = x25_state1_machine(sk, skb, frametype); in x25_process_rx_frame()
430 queued = x25_state2_machine(sk, skb, frametype); in x25_process_rx_frame()
433 queued = x25_state3_machine(sk, skb, frametype, ns, nr, q, d, m); in x25_process_rx_frame()
436 queued = x25_state4_machine(sk, skb, frametype); in x25_process_rx_frame()
[all …]
/kernel/linux/linux-5.10/net/rose/
Drose_in.c104 int queued = 0; in rose_state3_machine() local
167 queued = 1; in rose_state3_machine()
204 return queued; in rose_state3_machine()
265 int queued = 0, frametype, ns, nr, q, d, m; in rose_process_rx_frame() local
274 queued = rose_state1_machine(sk, skb, frametype); in rose_process_rx_frame()
277 queued = rose_state2_machine(sk, skb, frametype); in rose_process_rx_frame()
280 queued = rose_state3_machine(sk, skb, frametype, ns, nr, q, d, m); in rose_process_rx_frame()
283 queued = rose_state4_machine(sk, skb, frametype); in rose_process_rx_frame()
286 queued = rose_state5_machine(sk, skb, frametype); in rose_process_rx_frame()
292 return queued; in rose_process_rx_frame()
/kernel/linux/linux-6.6/net/rose/
Drose_in.c105 int queued = 0; in rose_state3_machine() local
168 queued = 1; in rose_state3_machine()
205 return queued; in rose_state3_machine()
266 int queued = 0, frametype, ns, nr, q, d, m; in rose_process_rx_frame() local
275 queued = rose_state1_machine(sk, skb, frametype); in rose_process_rx_frame()
278 queued = rose_state2_machine(sk, skb, frametype); in rose_process_rx_frame()
281 queued = rose_state3_machine(sk, skb, frametype, ns, nr, q, d, m); in rose_process_rx_frame()
284 queued = rose_state4_machine(sk, skb, frametype); in rose_process_rx_frame()
287 queued = rose_state5_machine(sk, skb, frametype); in rose_process_rx_frame()
293 return queued; in rose_process_rx_frame()
/kernel/linux/linux-5.10/net/ax25/
Dax25_std_in.c143 int queued = 0; in ax25_std_state3_machine() local
225 queued = ax25_rx_iframe(ax25, skb); in ax25_std_state3_machine()
258 return queued; in ax25_std_state3_machine()
268 int queued = 0; in ax25_std_state4_machine() local
380 queued = ax25_rx_iframe(ax25, skb); in ax25_std_state4_machine()
413 return queued; in ax25_std_state4_machine()
421 int queued = 0, frametype, ns, nr, pf; in ax25_std_frame_in() local
427 queued = ax25_std_state1_machine(ax25, skb, frametype, pf, type); in ax25_std_frame_in()
430 queued = ax25_std_state2_machine(ax25, skb, frametype, pf, type); in ax25_std_frame_in()
433 queued = ax25_std_state3_machine(ax25, skb, frametype, ns, nr, pf, type); in ax25_std_frame_in()
[all …]
/kernel/linux/linux-6.6/net/ax25/
Dax25_std_in.c143 int queued = 0; in ax25_std_state3_machine() local
225 queued = ax25_rx_iframe(ax25, skb); in ax25_std_state3_machine()
258 return queued; in ax25_std_state3_machine()
268 int queued = 0; in ax25_std_state4_machine() local
380 queued = ax25_rx_iframe(ax25, skb); in ax25_std_state4_machine()
413 return queued; in ax25_std_state4_machine()
421 int queued = 0, frametype, ns, nr, pf; in ax25_std_frame_in() local
427 queued = ax25_std_state1_machine(ax25, skb, frametype, pf, type); in ax25_std_frame_in()
430 queued = ax25_std_state2_machine(ax25, skb, frametype, pf, type); in ax25_std_frame_in()
433 queued = ax25_std_state3_machine(ax25, skb, frametype, ns, nr, pf, type); in ax25_std_frame_in()
[all …]
/kernel/linux/linux-6.6/include/drm/
Ddrm_flip_work.h47 * @val: value queued via drm_flip_work_queue()
69 * @queued: queued tasks
71 * @lock: lock to access queued and commited lists
77 struct list_head queued; member
/kernel/linux/linux-6.6/kernel/locking/
Dqrwlock.c3 * Queued read/write locks
18 * queued_read_lock_slowpath - acquire read lock of a queued rwlock
19 * @lock: Pointer to queued rwlock structure
63 * queued_write_lock_slowpath - acquire write lock of a queued rwlock
64 * @lock : Pointer to queued rwlock structure

12345678910>>...107