Home
last modified time | relevance | path

Searched refs:rq (Results 1 – 25 of 644) sorted by relevance

12345678910>>...26

/kernel/linux/linux-5.10/drivers/scsi/fnic/
Dvnic_rq.c27 static int vnic_rq_alloc_bufs(struct vnic_rq *rq) in vnic_rq_alloc_bufs() argument
30 unsigned int i, j, count = rq->ring.desc_count; in vnic_rq_alloc_bufs()
34 rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ, GFP_ATOMIC); in vnic_rq_alloc_bufs()
35 if (!rq->bufs[i]) { in vnic_rq_alloc_bufs()
42 buf = rq->bufs[i]; in vnic_rq_alloc_bufs()
45 buf->desc = (u8 *)rq->ring.descs + in vnic_rq_alloc_bufs()
46 rq->ring.desc_size * buf->index; in vnic_rq_alloc_bufs()
48 buf->next = rq->bufs[0]; in vnic_rq_alloc_bufs()
51 buf->next = rq->bufs[i + 1]; in vnic_rq_alloc_bufs()
59 rq->to_use = rq->to_clean = rq->bufs[0]; in vnic_rq_alloc_bufs()
[all …]
Dvnic_rq.h105 static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq) in vnic_rq_desc_avail() argument
108 return rq->ring.desc_avail; in vnic_rq_desc_avail()
111 static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq) in vnic_rq_desc_used() argument
114 return rq->ring.desc_count - rq->ring.desc_avail - 1; in vnic_rq_desc_used()
117 static inline void *vnic_rq_next_desc(struct vnic_rq *rq) in vnic_rq_next_desc() argument
119 return rq->to_use->desc; in vnic_rq_next_desc()
122 static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq) in vnic_rq_next_index() argument
124 return rq->to_use->index; in vnic_rq_next_index()
127 static inline unsigned int vnic_rq_next_buf_index(struct vnic_rq *rq) in vnic_rq_next_buf_index() argument
129 return rq->buf_index++; in vnic_rq_next_buf_index()
[all …]
/kernel/linux/linux-5.10/kernel/sched/
Dpelt.h7 int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
8 int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
11 int update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity);
13 static inline u64 thermal_load_avg(struct rq *rq) in thermal_load_avg() argument
15 return READ_ONCE(rq->avg_thermal.load_avg); in thermal_load_avg()
19 update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity) in update_thermal_load_avg() argument
24 static inline u64 thermal_load_avg(struct rq *rq) in thermal_load_avg() argument
31 int update_irq_load_avg(struct rq *rq, u64 running);
34 update_irq_load_avg(struct rq *rq, u64 running) in update_irq_load_avg() argument
76 static inline void update_rq_clock_pelt(struct rq *rq, s64 delta) in update_rq_clock_pelt() argument
[all …]
Dstats.h9 rq_sched_info_arrive(struct rq *rq, unsigned long long delta) in rq_sched_info_arrive() argument
11 if (rq) { in rq_sched_info_arrive()
12 rq->rq_sched_info.run_delay += delta; in rq_sched_info_arrive()
13 rq->rq_sched_info.pcount++; in rq_sched_info_arrive()
21 rq_sched_info_depart(struct rq *rq, unsigned long long delta) in rq_sched_info_depart() argument
23 if (rq) in rq_sched_info_depart()
24 rq->rq_cpu_time += delta; in rq_sched_info_depart()
28 rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) in rq_sched_info_dequeued() argument
30 if (rq) in rq_sched_info_dequeued()
31 rq->rq_sched_info.run_delay += delta; in rq_sched_info_dequeued()
[all …]
Dsched.h87 struct rq;
146 extern void calc_global_load_tick(struct rq *this_rq);
147 extern long calc_load_fold_active(struct rq *this_rq, long adjust);
153 extern void call_trace_sched_update_nr_running(struct rq *rq, int count);
674 struct rq *rq; /* CPU runqueue to which this cfs_rq is attached */ member
750 struct rq *rq; member
939 extern void rq_attach_root(struct rq *rq, struct root_domain *rd);
999 struct rq { struct
1194 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) in rq_of() argument
1196 return cfs_rq->rq; in rq_of()
[all …]
Dstop_task.c21 balance_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) in balance_stop() argument
23 return sched_stop_runnable(rq); in balance_stop()
28 check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags) in check_preempt_curr_stop() argument
33 static void set_next_task_stop(struct rq *rq, struct task_struct *stop, bool first) in set_next_task_stop() argument
35 stop->se.exec_start = rq_clock_task(rq); in set_next_task_stop()
38 static struct task_struct *pick_next_task_stop(struct rq *rq) in pick_next_task_stop() argument
40 if (!sched_stop_runnable(rq)) in pick_next_task_stop()
43 set_next_task_stop(rq, rq->stop, true); in pick_next_task_stop()
44 return rq->stop; in pick_next_task_stop()
48 enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags) in enqueue_task_stop() argument
[all …]
Ddeadline.c29 static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq) in rq_of_dl_rq()
31 return container_of(dl_rq, struct rq, dl); in rq_of_dl_rq()
37 struct rq *rq = task_rq(p); in dl_rq_of_se() local
39 return &rq->dl; in dl_rq_of_se()
220 struct rq *rq; in dl_change_utilization() local
227 rq = task_rq(p); in dl_change_utilization()
229 sub_running_bw(&p->dl, &rq->dl); in dl_change_utilization()
241 __sub_rq_bw(p->dl.dl_bw, &rq->dl); in dl_change_utilization()
242 __add_rq_bw(new_bw, &rq->dl); in dl_change_utilization()
304 struct rq *rq = rq_of_dl_rq(dl_rq); in task_non_contending() local
[all …]
Drt.c132 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) in rq_of_rt_rq()
134 return rt_rq->rq; in rq_of_rt_rq()
142 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se) in rq_of_rt_se()
146 return rt_rq->rq; in rq_of_rt_se()
171 struct rq *rq = cpu_rq(cpu); in init_tg_rt_entry() local
175 rt_rq->rq = rq; in init_tg_rt_entry()
185 rt_se->rt_rq = &rq->rt; in init_tg_rt_entry()
243 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) in rq_of_rt_rq()
245 return container_of(rt_rq, struct rq, rt); in rq_of_rt_rq()
248 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se) in rq_of_rt_se()
[all …]
Dwalt.h53 static inline u64 scale_exec_time(u64 delta, struct rq *rq) in scale_exec_time() argument
55 unsigned long capcurr = capacity_curr_of(cpu_of(rq)); in scale_exec_time()
97 extern void update_task_ravg(struct task_struct *p, struct rq *rq, int event,
112 walt_inc_cumulative_runnable_avg(struct rq *rq, struct task_struct *p) in walt_inc_cumulative_runnable_avg() argument
117 fixup_cumulative_runnable_avg(&rq->walt_stats, p->ravg.demand_scaled); in walt_inc_cumulative_runnable_avg()
126 if (p->on_rq || (p->last_sleep_ts < rq->window_start)) in walt_inc_cumulative_runnable_avg()
127 walt_fixup_cum_window_demand(rq, p->ravg.demand_scaled); in walt_inc_cumulative_runnable_avg()
131 walt_dec_cumulative_runnable_avg(struct rq *rq, struct task_struct *p) in walt_dec_cumulative_runnable_avg() argument
136 fixup_cumulative_runnable_avg(&rq->walt_stats, in walt_dec_cumulative_runnable_avg()
145 walt_fixup_cum_window_demand(rq, -(s64)p->ravg.demand_scaled); in walt_dec_cumulative_runnable_avg()
[all …]
Dcore.c54 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
189 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) in __task_rq_lock()
190 __acquires(rq->lock) in __task_rq_lock()
192 struct rq *rq; in __task_rq_lock() local
197 rq = task_rq(p); in __task_rq_lock()
198 raw_spin_lock(&rq->lock); in __task_rq_lock()
199 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { in __task_rq_lock()
200 rq_pin_lock(rq, rf); in __task_rq_lock()
201 return rq; in __task_rq_lock()
203 raw_spin_unlock(&rq->lock); in __task_rq_lock()
[all …]
Dwalt.c209 void inc_rq_walt_stats(struct rq *rq, struct task_struct *p) in inc_rq_walt_stats() argument
211 walt_inc_cumulative_runnable_avg(rq, p); in inc_rq_walt_stats()
214 void dec_rq_walt_stats(struct rq *rq, struct task_struct *p) in dec_rq_walt_stats() argument
216 walt_dec_cumulative_runnable_avg(rq, p); in dec_rq_walt_stats()
219 void fixup_walt_sched_stats_common(struct rq *rq, struct task_struct *p, in fixup_walt_sched_stats_common() argument
225 fixup_cumulative_runnable_avg(&rq->walt_stats, task_load_delta); in fixup_walt_sched_stats_common()
227 walt_fixup_cum_window_demand(rq, task_load_delta); in fixup_walt_sched_stats_common()
231 update_window_start(struct rq *rq, u64 wallclock, int event) in update_window_start() argument
235 u64 old_window_start = rq->window_start; in update_window_start()
237 delta = wallclock - rq->window_start; in update_window_start()
[all …]
/kernel/linux/linux-5.10/drivers/net/ethernet/cisco/enic/
Dvnic_rq.c31 static int vnic_rq_alloc_bufs(struct vnic_rq *rq) in vnic_rq_alloc_bufs() argument
34 unsigned int i, j, count = rq->ring.desc_count; in vnic_rq_alloc_bufs()
38 rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ(count), GFP_KERNEL); in vnic_rq_alloc_bufs()
39 if (!rq->bufs[i]) in vnic_rq_alloc_bufs()
44 buf = rq->bufs[i]; in vnic_rq_alloc_bufs()
47 buf->desc = (u8 *)rq->ring.descs + in vnic_rq_alloc_bufs()
48 rq->ring.desc_size * buf->index; in vnic_rq_alloc_bufs()
50 buf->next = rq->bufs[0]; in vnic_rq_alloc_bufs()
53 buf->next = rq->bufs[i + 1]; in vnic_rq_alloc_bufs()
61 rq->to_use = rq->to_clean = rq->bufs[0]; in vnic_rq_alloc_bufs()
[all …]
Dvnic_rq.h97 static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq) in vnic_rq_desc_avail() argument
100 return rq->ring.desc_avail; in vnic_rq_desc_avail()
103 static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq) in vnic_rq_desc_used() argument
106 return rq->ring.desc_count - rq->ring.desc_avail - 1; in vnic_rq_desc_used()
109 static inline void *vnic_rq_next_desc(struct vnic_rq *rq) in vnic_rq_next_desc() argument
111 return rq->to_use->desc; in vnic_rq_next_desc()
114 static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq) in vnic_rq_next_index() argument
116 return rq->to_use->index; in vnic_rq_next_index()
119 static inline void vnic_rq_post(struct vnic_rq *rq, in vnic_rq_post() argument
124 struct vnic_rq_buf *buf = rq->to_use; in vnic_rq_post()
[all …]
/kernel/linux/linux-5.10/drivers/gpu/drm/i915/
Di915_request.h53 #define RQ_TRACE(rq, fmt, ...) do { \ argument
54 const struct i915_request *rq__ = (rq); \
311 void i915_request_set_error_once(struct i915_request *rq, int error);
312 void __i915_request_skip(struct i915_request *rq);
315 void __i915_request_queue(struct i915_request *rq,
318 bool i915_request_retire(struct i915_request *rq);
319 void i915_request_retire_upto(struct i915_request *rq);
331 i915_request_get(struct i915_request *rq) in i915_request_get() argument
333 return to_request(dma_fence_get(&rq->fence)); in i915_request_get()
337 i915_request_get_rcu(struct i915_request *rq) in i915_request_get_rcu() argument
[all …]
Di915_request.c48 void (*hook)(struct i915_request *rq, struct dma_fence *signal);
112 struct i915_request *rq = to_request(fence); in i915_fence_release() local
121 i915_sw_fence_fini(&rq->submit); in i915_fence_release()
122 i915_sw_fence_fini(&rq->semaphore); in i915_fence_release()
155 if (is_power_of_2(rq->execution_mask) && in i915_fence_release()
156 !cmpxchg(&rq->engine->request_pool, NULL, rq)) in i915_fence_release()
159 kmem_cache_free(global.slab_requests, rq); in i915_fence_release()
191 __notify_execute_cb(struct i915_request *rq, bool (*fn)(struct irq_work *wrk)) in __notify_execute_cb() argument
195 if (llist_empty(&rq->execute_cb)) in __notify_execute_cb()
199 llist_del_all(&rq->execute_cb), in __notify_execute_cb()
[all …]
/kernel/linux/linux-5.10/fs/erofs/
Ddecompressor.c25 int (*prepare_destpages)(struct z_erofs_decompress_req *rq,
27 int (*decompress)(struct z_erofs_decompress_req *rq, u8 *out);
31 static int z_erofs_lz4_prepare_destpages(struct z_erofs_decompress_req *rq, in z_erofs_lz4_prepare_destpages() argument
35 PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; in z_erofs_lz4_prepare_destpages()
44 struct page *const page = rq->out[i]; in z_erofs_lz4_prepare_destpages()
54 availables[top++] = rq->out[i - LZ4_MAX_DISTANCE_PAGES]; in z_erofs_lz4_prepare_destpages()
85 rq->out[i] = victim; in z_erofs_lz4_prepare_destpages()
90 static void *generic_copy_inplace_data(struct z_erofs_decompress_req *rq, in generic_copy_inplace_data() argument
97 struct page **in = rq->in; in generic_copy_inplace_data()
100 unsigned int inlen = rq->inputsize - pageofs_in; in generic_copy_inplace_data()
[all …]
/kernel/linux/linux-5.10/drivers/gpu/drm/i915/gt/
Dgen6_engine_cs.c54 gen6_emit_post_sync_nonzero_flush(struct i915_request *rq) in gen6_emit_post_sync_nonzero_flush() argument
57 intel_gt_scratch_offset(rq->engine->gt, in gen6_emit_post_sync_nonzero_flush()
61 cs = intel_ring_begin(rq, 6); in gen6_emit_post_sync_nonzero_flush()
71 intel_ring_advance(rq, cs); in gen6_emit_post_sync_nonzero_flush()
73 cs = intel_ring_begin(rq, 6); in gen6_emit_post_sync_nonzero_flush()
83 intel_ring_advance(rq, cs); in gen6_emit_post_sync_nonzero_flush()
88 int gen6_emit_flush_rcs(struct i915_request *rq, u32 mode) in gen6_emit_flush_rcs() argument
91 intel_gt_scratch_offset(rq->engine->gt, in gen6_emit_flush_rcs()
97 ret = gen6_emit_post_sync_nonzero_flush(rq); in gen6_emit_flush_rcs()
129 cs = intel_ring_begin(rq, 4); in gen6_emit_flush_rcs()
[all …]
Dselftest_lrc.c55 static bool is_active(struct i915_request *rq) in is_active() argument
57 if (i915_request_is_active(rq)) in is_active()
60 if (i915_request_on_hold(rq)) in is_active()
63 if (i915_request_has_initial_breadcrumb(rq) && i915_request_started(rq)) in is_active()
70 struct i915_request *rq, in wait_for_submit() argument
77 if (i915_request_completed(rq)) /* that was quick! */ in wait_for_submit()
82 if (!READ_ONCE(engine->execlists.pending[0]) && is_active(rq)) in wait_for_submit()
93 struct i915_request *rq, in wait_for_reset() argument
105 if (i915_request_completed(rq)) in wait_for_reset()
108 if (READ_ONCE(rq->fence.error)) in wait_for_reset()
[all …]
Dintel_breadcrumbs.c138 check_signal_order(struct intel_context *ce, struct i915_request *rq) in check_signal_order() argument
140 if (rq->context != ce) in check_signal_order()
143 if (!list_is_last(&rq->signal_link, &ce->signals) && in check_signal_order()
144 i915_seqno_passed(rq->fence.seqno, in check_signal_order()
145 list_next_entry(rq, signal_link)->fence.seqno)) in check_signal_order()
148 if (!list_is_first(&rq->signal_link, &ce->signals) && in check_signal_order()
149 i915_seqno_passed(list_prev_entry(rq, signal_link)->fence.seqno, in check_signal_order()
150 rq->fence.seqno)) in check_signal_order()
190 static bool __signal_request(struct i915_request *rq) in __signal_request() argument
192 GEM_BUG_ON(test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)); in __signal_request()
[all …]
/kernel/linux/linux-5.10/block/
Dblk-flush.c98 static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq) in blk_flush_policy() argument
102 if (blk_rq_sectors(rq)) in blk_flush_policy()
106 if (rq->cmd_flags & REQ_PREFLUSH) in blk_flush_policy()
109 (rq->cmd_flags & REQ_FUA)) in blk_flush_policy()
115 static unsigned int blk_flush_cur_seq(struct request *rq) in blk_flush_cur_seq() argument
117 return 1 << ffz(rq->flush.seq); in blk_flush_cur_seq()
120 static void blk_flush_restore_request(struct request *rq) in blk_flush_restore_request() argument
127 rq->bio = rq->biotail; in blk_flush_restore_request()
130 rq->rq_flags &= ~RQF_FLUSH_SEQ; in blk_flush_restore_request()
131 rq->end_io = rq->flush.saved_end_io; in blk_flush_restore_request()
[all …]
Dblk-crypto-internal.h26 bool bio_crypt_rq_ctx_compatible(struct request *rq, struct bio *bio);
52 static inline void blk_crypto_rq_set_defaults(struct request *rq) in blk_crypto_rq_set_defaults() argument
54 rq->crypt_ctx = NULL; in blk_crypto_rq_set_defaults()
55 rq->crypt_keyslot = NULL; in blk_crypto_rq_set_defaults()
58 static inline bool blk_crypto_rq_is_encrypted(struct request *rq) in blk_crypto_rq_is_encrypted() argument
60 return rq->crypt_ctx; in blk_crypto_rq_is_encrypted()
63 static inline bool blk_crypto_rq_has_keyslot(struct request *rq) in blk_crypto_rq_has_keyslot() argument
65 return rq->crypt_keyslot; in blk_crypto_rq_has_keyslot()
70 static inline bool bio_crypt_rq_ctx_compatible(struct request *rq, in bio_crypt_rq_ctx_compatible() argument
94 static inline void blk_crypto_rq_set_defaults(struct request *rq) { } in blk_crypto_rq_set_defaults() argument
[all …]
Dmq-deadline.c68 deadline_rb_root(struct deadline_data *dd, struct request *rq) in deadline_rb_root() argument
70 return &dd->sort_list[rq_data_dir(rq)]; in deadline_rb_root()
77 deadline_latter_request(struct request *rq) in deadline_latter_request() argument
79 struct rb_node *node = rb_next(&rq->rb_node); in deadline_latter_request()
88 deadline_add_rq_rb(struct deadline_data *dd, struct request *rq) in deadline_add_rq_rb() argument
90 struct rb_root *root = deadline_rb_root(dd, rq); in deadline_add_rq_rb()
92 elv_rb_add(root, rq); in deadline_add_rq_rb()
96 deadline_del_rq_rb(struct deadline_data *dd, struct request *rq) in deadline_del_rq_rb() argument
98 const int data_dir = rq_data_dir(rq); in deadline_del_rq_rb()
100 if (dd->next_rq[data_dir] == rq) in deadline_del_rq_rb()
[all …]
/kernel/linux/linux-5.10/drivers/scsi/esas2r/
Desas2r_disc.c49 struct esas2r_request *rq);
51 struct esas2r_request *rq);
55 struct esas2r_request *rq);
59 struct esas2r_request *rq);
61 struct esas2r_request *rq);
63 struct esas2r_request *rq);
65 struct esas2r_request *rq);
67 struct esas2r_request *rq);
69 struct esas2r_request *rq);
71 struct esas2r_request *rq);
[all …]
Desas2r_vda.c59 static void clear_vda_request(struct esas2r_request *rq);
62 struct esas2r_request *rq);
67 struct esas2r_request *rq, in esas2r_process_vda_ioctl() argument
93 clear_vda_request(rq); in esas2r_process_vda_ioctl()
95 rq->vrq->scsi.function = vi->function; in esas2r_process_vda_ioctl()
96 rq->interrupt_cb = esas2r_complete_vda_ioctl; in esas2r_process_vda_ioctl()
97 rq->interrupt_cx = vi; in esas2r_process_vda_ioctl()
112 rq->vrq->flash.length = cpu_to_le32(datalen); in esas2r_process_vda_ioctl()
113 rq->vrq->flash.sub_func = vi->cmd.flash.sub_func; in esas2r_process_vda_ioctl()
115 memcpy(rq->vrq->flash.data.file.file_name, in esas2r_process_vda_ioctl()
[all …]
/kernel/linux/linux-5.10/drivers/net/ethernet/mellanox/mlx5/core/
Den_rx.c58 mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
61 mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
63 static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
64 static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
84 static inline void mlx5e_read_title_slot(struct mlx5e_rq *rq, in mlx5e_read_title_slot() argument
88 struct mlx5e_cq_decomp *cqd = &rq->cqd; in mlx5e_read_title_slot()
94 rq->stats->cqe_compress_blks++; in mlx5e_read_title_slot()
129 static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq, in mlx5e_decompress_cqe() argument
133 struct mlx5e_cq_decomp *cqd = &rq->cqd; in mlx5e_decompress_cqe()
145 if (test_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &rq->state)) { in mlx5e_decompress_cqe()
[all …]

12345678910>>...26