/kernel/linux/linux-5.10/net/core/ |
D | page_pool.c | 271 s32 inflight; in page_pool_inflight() local 273 inflight = _distance(hold_cnt, release_cnt); in page_pool_inflight() 275 trace_page_pool_release(pool, inflight, hold_cnt, release_cnt); in page_pool_inflight() 276 WARN(inflight < 0, "Negative(%d) inflight packet-pages", inflight); in page_pool_inflight() 278 return inflight; in page_pool_inflight() 471 int inflight; in page_pool_release() local 474 inflight = page_pool_inflight(pool); in page_pool_release() 475 if (!inflight) in page_pool_release() 478 return inflight; in page_pool_release() 485 int inflight; in page_pool_release_retry() local [all …]
|
/kernel/linux/linux-5.10/include/trace/events/ |
D | page_pool.h | 17 s32 inflight, u32 hold, u32 release), 19 TP_ARGS(pool, inflight, hold, release), 23 __field(s32, inflight) 31 __entry->inflight = inflight; 38 __entry->pool, __entry->inflight, __entry->hold,
|
D | wbt.h | 132 int step, unsigned int inflight), 134 TP_ARGS(bdi, status, step, inflight), 140 __field(unsigned int, inflight) 148 __entry->inflight = inflight; 152 __entry->status, __entry->step, __entry->inflight)
|
/kernel/linux/linux-5.10/drivers/gpu/drm/i915/gt/uc/ |
D | intel_guc_submission.c | 290 struct i915_request **first = execlists->inflight; in __guc_dequeue() 323 port - execlists->inflight); in __guc_dequeue() 340 *port = schedule_in(last, port - execlists->inflight); in __guc_dequeue() 344 execlists->active = execlists->inflight; in __guc_dequeue() 356 for (port = execlists->inflight; (rq = *port); port++) { in guc_submission_tasklet() 362 if (port != execlists->inflight) { in guc_submission_tasklet() 363 int idx = port - execlists->inflight; in guc_submission_tasklet() 364 int rem = ARRAY_SIZE(execlists->inflight) - idx; in guc_submission_tasklet() 365 memmove(execlists->inflight, port, rem * sizeof(*port)); in guc_submission_tasklet() 401 memset(execlists->inflight, 0, sizeof(execlists->inflight)); in cancel_port_requests() [all …]
|
/kernel/linux/linux-5.10/net/unix/ |
D | garbage.c | 169 atomic_long_dec(&usk->inflight); in dec_inflight() 174 atomic_long_inc(&usk->inflight); in inc_inflight() 179 atomic_long_inc(&u->inflight); in inc_inflight_move_tail() 243 inflight_refs = atomic_long_read(&u->inflight); in unix_gc() 274 if (atomic_long_read(&u->inflight) > 0) { in unix_gc()
|
D | scm.c | 57 if (atomic_long_inc_return(&u->inflight) == 1) { in unix_inflight() 79 BUG_ON(!atomic_long_read(&u->inflight)); in unix_notinflight() 82 if (atomic_long_dec_and_test(&u->inflight)) in unix_notinflight()
|
/kernel/linux/linux-5.10/drivers/gpu/drm/i915/gt/ |
D | intel_context_types.h | 60 struct intel_engine_cs *inflight; member 61 #define intel_context_inflight(ce) ptr_mask_bits(READ_ONCE((ce)->inflight), 2) 62 #define intel_context_inflight_count(ce) ptr_unmask_bits(READ_ONCE((ce)->inflight), 2)
|
/kernel/linux/linux-5.10/drivers/crypto/chelsio/ |
D | chcr_core.c | 56 if (atomic_read(&dev->inflight)) { in detach_work_fn() 60 atomic_read(&dev->inflight)); in detach_work_fn() 65 atomic_read(&dev->inflight)); in detach_work_fn() 103 atomic_set(&dev->inflight, 0); in chcr_dev_add() 122 atomic_set(&dev->inflight, 0); in chcr_dev_init() 232 if (atomic_read(&dev->inflight) != 0) { in chcr_detach_device()
|
D | chcr_core.h | 100 atomic_t inflight; member
|
/kernel/linux/linux-5.10/block/ |
D | blk-wbt.c | 131 int inflight, limit; in wbt_rqw_done() local 133 inflight = atomic_dec_return(&rqw->inflight); in wbt_rqw_done() 159 if (inflight && inflight >= limit) in wbt_rqw_done() 163 int diff = limit - inflight; in wbt_rqw_done() 165 if (!inflight || diff >= rwb->wb_background / 2) in wbt_rqw_done() 357 unsigned int inflight = wbt_inflight(rwb); in wb_timer_fn() local 363 inflight); in wb_timer_fn() 405 if (rqd->scale_step || inflight) in wb_timer_fn() 750 atomic_read(&rwb->rq_wait[i].inflight)); in wbt_inflight_show()
|
D | genhd.c | 117 unsigned int inflight = 0; in part_in_flight() local 121 inflight += part_stat_local_read_cpu(part, in_flight[0], cpu) + in part_in_flight() 124 if ((int)inflight < 0) in part_in_flight() 125 inflight = 0; in part_in_flight() 127 return inflight; in part_in_flight() 130 static void part_in_flight_rw(struct hd_struct *part, unsigned int inflight[2]) in part_in_flight_rw() 134 inflight[0] = 0; in part_in_flight_rw() 135 inflight[1] = 0; in part_in_flight_rw() 137 inflight[0] += part_stat_local_read_cpu(part, in_flight[0], cpu); in part_in_flight_rw() 138 inflight[1] += part_stat_local_read_cpu(part, in_flight[1], cpu); in part_in_flight_rw() [all …]
|
D | blk-iolatency.c | 275 atomic_dec(&rqw->inflight); in iolat_cleanup_cb() 304 atomic_inc(&rqw->inflight); in __blkcg_iolatency_throttle() 601 int inflight = 0; in blkcg_iolatency_done_bio() local 623 inflight = atomic_dec_return(&rqw->inflight); in blkcg_iolatency_done_bio() 624 WARN_ON_ONCE(inflight < 0); in blkcg_iolatency_done_bio()
|
D | blk-rq-qos.h | 24 atomic_t inflight; member 97 atomic_set(&rq_wait->inflight, 0); in rq_wait_init()
|
D | blk-wbt.h | 83 ret += atomic_read(&rwb->rq_wait[i].inflight); in wbt_inflight()
|
/kernel/linux/linux-5.10/drivers/gpu/drm/i915/ |
D | i915_scheduler.c | 196 const struct i915_request *inflight; in kick_submission() local 208 inflight = execlists_active(&engine->execlists); in kick_submission() 209 if (!inflight) in kick_submission() 216 if (inflight->context == rq->context) in kick_submission() 223 inflight->fence.context, inflight->fence.seqno, in kick_submission() 224 inflight->sched.attr.priority); in kick_submission() 227 if (need_preempt(prio, rq_prio(inflight))) in kick_submission()
|
/kernel/linux/linux-5.10/tools/io_uring/ |
D | io_uring-bench.c | 80 int inflight; member 272 s->inflight -= reaped; in reap_events() 292 if (!prepped && s->inflight < DEPTH) { in submitter_fn() 293 to_prep = min(DEPTH - s->inflight, BATCH_SUBMIT); in submitter_fn() 296 s->inflight += prepped; in submitter_fn() 300 if (to_submit && (s->inflight + to_submit <= DEPTH)) in submitter_fn() 303 to_wait = min(s->inflight + to_submit, BATCH_COMPLETE); in submitter_fn() 341 if (s->inflight) in submitter_fn() 581 this_done - done, rpc, ipc, s->inflight, in main()
|
/kernel/linux/linux-5.10/net/atm/ |
D | pppoatm.c | 64 atomic_t inflight; member 137 atomic_dec(&pvcc->inflight); in pppoatm_pop() 242 atomic_inc_not_zero(&pvcc->inflight)) in pppoatm_may_send() 272 atomic_inc_not_zero(&pvcc->inflight)) in pppoatm_may_send() 408 atomic_set(&pvcc->inflight, NONE_INFLIGHT); in pppoatm_assign_vcc()
|
/kernel/linux/linux-5.10/drivers/infiniband/ulp/rtrs/ |
D | rtrs-clt-stats.c | 100 atomic_read(&stats->inflight), sum.failover_cnt); in rtrs_clt_stats_rdma_to_str() 157 atomic_set(&s->inflight, 0); in rtrs_clt_reset_all_stats() 184 atomic_inc(&stats->inflight); in rtrs_clt_update_all_stats()
|
D | README | 100 corresponding path is disconnected, all the inflight IO are failed over to a 131 inflight IO and for the error code. 149 inflight IO and for the error code. The new rkey is sent back using 171 outstanding inflight IO and the error code. 192 outstanding inflight IO and the error code. The new rkey is sent back using
|
/kernel/linux/linux-5.10/net/ipv4/ |
D | tcp_bbr.c | 412 u32 inflight; in bbr_inflight() local 414 inflight = bbr_bdp(sk, bw, gain); in bbr_inflight() 415 inflight = bbr_quantization_budget(sk, inflight); in bbr_inflight() 417 return inflight; in bbr_inflight() 559 u32 inflight, bw; in bbr_is_next_cycle_phase() local 567 inflight = bbr_packets_in_net_at_edt(sk, rs->prior_in_flight); in bbr_is_next_cycle_phase() 578 inflight >= bbr_inflight(sk, bw, bbr->pacing_gain)); in bbr_is_next_cycle_phase() 585 inflight <= bbr_inflight(sk, bw, BBR_UNIT); in bbr_is_next_cycle_phase()
|
/kernel/linux/linux-5.10/drivers/vhost/ |
D | scsi.c | 113 struct vhost_scsi_inflight *inflight; member 224 struct vhost_scsi_inflight *inflight; member 251 struct vhost_scsi_inflight *inflight; in vhost_scsi_done_inflight() local 253 inflight = container_of(kref, struct vhost_scsi_inflight, kref); in vhost_scsi_done_inflight() 254 complete(&inflight->comp); in vhost_scsi_done_inflight() 287 struct vhost_scsi_inflight *inflight; in vhost_scsi_get_inflight() local 291 inflight = &svq->inflights[svq->inflight_idx]; in vhost_scsi_get_inflight() 292 kref_get(&inflight->kref); in vhost_scsi_get_inflight() 294 return inflight; in vhost_scsi_get_inflight() 297 static void vhost_scsi_put_inflight(struct vhost_scsi_inflight *inflight) in vhost_scsi_put_inflight() argument [all …]
|
/kernel/linux/linux-5.10/Documentation/devicetree/bindings/net/ |
D | brcm,bcm7445-switch-v4.0.txt | 43 - brcm,acb-packets-inflight: boolean property, if present indicates that the switch 92 brcm,acb-packets-inflight; 129 brcm,acb-packets-inflight;
|
/kernel/linux/linux-5.10/Documentation/networking/ |
D | page_pool.rst | 14 API keeps track of inflight pages, in order to let API user know 22 release the DMA mapping and inflight state accounting. 91 inflight counters.
|
/kernel/linux/linux-5.10/drivers/crypto/cavium/cpt/ |
D | cpt_hw_types.h | 443 u64 inflight:8; member 445 u64 inflight:8;
|
/kernel/linux/linux-5.10/net/sctp/ |
D | output.c | 651 size_t datasize, rwnd, inflight, flight_size; in sctp_packet_can_append_data() local 670 inflight = q->outstanding_bytes; in sctp_packet_can_append_data() 675 if (datasize > rwnd && inflight > 0) in sctp_packet_can_append_data() 703 if ((sctp_sk(asoc->base.sk)->nodelay || inflight == 0) && in sctp_packet_can_append_data()
|