/drivers/gpu/drm/i915/gt/ |
D | intel_gt_requests.c | 21 list_for_each_entry_safe(rq, rn, &tl->requests, link) in retire_requests() 31 return !list_empty(&engine->kernel_context->timeline->requests); in engine_active() 208 container_of(work, typeof(*gt), requests.retire_work.work); in retire_work_handler() 210 schedule_delayed_work(>->requests.retire_work, in retire_work_handler() 217 INIT_DELAYED_WORK(>->requests.retire_work, retire_work_handler); in intel_gt_init_requests() 222 cancel_delayed_work(>->requests.retire_work); in intel_gt_park_requests() 227 schedule_delayed_work(>->requests.retire_work, in intel_gt_unpark_requests() 234 cancel_delayed_work_sync(>->requests.retire_work); in intel_gt_fini_requests()
|
D | intel_timeline.c | 103 INIT_LIST_HEAD(&timeline->requests); in intel_timeline_init() 394 GEM_BUG_ON(!list_empty(&timeline->requests)); in __intel_timeline_free() 438 list_for_each_entry_safe(rq, rn, &tl->requests, link) { in intel_gt_show_timelines() 463 list_for_each_entry_safe(rq, rn, &tl->requests, link) in intel_gt_show_timelines()
|
D | intel_ring.c | 199 GEM_BUG_ON(list_empty(&tl->requests)); in wait_for_space() 200 list_for_each_entry(target, &tl->requests, link) { in wait_for_space() 210 if (GEM_WARN_ON(&target->link == &tl->requests)) in wait_for_space()
|
D | intel_gt.c | 493 struct i915_request *requests[I915_NUM_ENGINES] = {}; in __engines_record_defaults() local 540 requests[id] = i915_request_get(rq); in __engines_record_defaults() 557 for (id = 0; id < ARRAY_SIZE(requests); id++) { in __engines_record_defaults() 561 rq = requests[id]; in __engines_record_defaults() 592 for (id = 0; id < ARRAY_SIZE(requests); id++) { in __engines_record_defaults() 596 rq = requests[id]; in __engines_record_defaults()
|
D | intel_timeline_types.h | 57 struct list_head requests; member
|
/drivers/gpu/drm/i915/gt/uc/ |
D | intel_guc_ct.c | 107 spin_lock_init(&ct->requests.lock); in intel_guc_ct_init_early() 108 INIT_LIST_HEAD(&ct->requests.pending); in intel_guc_ct_init_early() 109 INIT_LIST_HEAD(&ct->requests.incoming); in intel_guc_ct_init_early() 110 INIT_WORK(&ct->requests.worker, ct_incoming_request_worker_func); in intel_guc_ct_init_early() 377 return ++ct->requests.last_fence; in ct_get_next_fence() 709 spin_lock(&ct->requests.lock); in ct_send() 710 list_add_tail(&request.link, &ct->requests.pending); in ct_send() 711 spin_unlock(&ct->requests.lock); in ct_send() 746 spin_lock_irqsave(&ct->requests.lock, flags); in ct_send() 748 spin_unlock_irqrestore(&ct->requests.lock, flags); in ct_send() [all …]
|
/drivers/media/v4l2-core/ |
D | v4l2-ctrls-request.c | 21 INIT_LIST_HEAD(&hdl->requests); in v4l2_ctrl_handler_init_request() 39 if (hdl->req_obj.ops || list_empty(&hdl->requests)) in v4l2_ctrl_handler_free_request() 47 list_for_each_entry_safe(req, next_req, &hdl->requests, requests) { in v4l2_ctrl_handler_free_request() 102 list_del_init(&hdl->requests); in v4l2_ctrl_request_unbind() 163 list_add_tail(&hdl->requests, &from->requests); in v4l2_ctrl_request_bind()
|
/drivers/base/ |
D | devtmpfs.c | 47 } *requests; variable 105 req->next = requests; in devtmpfs_submit_req() 106 requests = req; in devtmpfs_submit_req() 396 while (requests) { in devtmpfs_work_loop() 397 struct req *req = requests; in devtmpfs_work_loop() 398 requests = NULL; in devtmpfs_work_loop()
|
/drivers/iio/adc/ |
D | twl4030-madc.c | 166 struct twl4030_madc_request requests[TWL4030_MADC_NUM_METHODS]; member 498 madc->requests[i].result_pending = true; in twl4030_madc_threaded_irq_handler() 501 r = &madc->requests[i]; in twl4030_madc_threaded_irq_handler() 523 r = &madc->requests[i]; in twl4030_madc_threaded_irq_handler() 624 if (twl4030_madc->requests[req->method].active) { in twl4030_madc_conversion() 655 twl4030_madc->requests[req->method].active = true; in twl4030_madc_conversion() 659 twl4030_madc->requests[req->method].active = false; in twl4030_madc_conversion() 664 twl4030_madc->requests[req->method].active = false; in twl4030_madc_conversion()
|
/drivers/infiniband/ulp/rtrs/ |
D | Kconfig | 26 requests received from the RTRS client module, it will pass the 27 IO requests to its user eg. RNBD_server.
|
/drivers/gpu/drm/i915/ |
D | i915_scheduler.h | 19 list_for_each_entry(it, &(plist)->requests, sched.link) 22 list_for_each_entry_safe(it, n, &(plist)->requests, sched.link)
|
D | i915_scheduler.c | 85 return &p->requests; in i915_sched_lookup_priolist() 111 INIT_LIST_HEAD(&p->requests); in i915_sched_lookup_priolist() 116 return &p->requests; in i915_sched_lookup_priolist() 467 INIT_LIST_HEAD(&sched_engine->requests); in i915_sched_engine_create()
|
D | i915_request.c | 329 &i915_request_timeline(rq)->requests)); in i915_request_retire() 378 tmp = list_first_entry(&tl->requests, typeof(*tmp), link); in i915_request_retire_upto() 779 list_for_each_entry_safe(rq, rn, &tl->requests, link) in retire_requests() 800 if (list_empty(&tl->requests)) in request_alloc_slow() 804 rq = list_first_entry(&tl->requests, typeof(*rq), link); in request_alloc_slow() 813 rq = list_last_entry(&tl->requests, typeof(*rq), link); in request_alloc_slow() 963 list_add_tail_rcu(&rq->link, &tl->requests); in __i915_request_create() 993 rq = list_first_entry(&tl->requests, typeof(*rq), link); in i915_request_create() 994 if (!list_is_last(&rq->link, &tl->requests)) in i915_request_create() 1044 if (pos == &rcu_dereference(signal->timeline)->requests) in i915_request_await_start()
|
D | i915_priolist_types.h | 42 struct list_head requests; member
|
D | i915_scheduler_types.h | 120 struct list_head requests; member
|
/drivers/media/pci/tw686x/ |
D | tw686x.h | 172 void tw686x_video_irq(struct tw686x_dev *dev, unsigned long requests, 178 void tw686x_audio_irq(struct tw686x_dev *dev, unsigned long requests,
|
/drivers/crypto/allwinner/ |
D | Kconfig | 42 the number of requests per algorithm. 69 the number of requests per flow and per algorithm. 122 the number of requests per flow and per algorithm.
|
/drivers/staging/greybus/tools/ |
D | README.loopback | 41 requests_completed - Number of requests successfully completed. 42 requests_timedout - Number of requests that have timed out. 180 requests per-sec: min=390, max=547, average=469.299988, jitter=157 194 requests per-sec: min=397, max=538, average=461.700012, jitter=141
|
/drivers/mailbox/ |
D | bcm-flexrm-mailbox.c | 276 struct brcm_message *requests[RING_MAX_REQ_COUNT]; member 1008 ring->requests[reqid] = msg; in flexrm_new_request() 1013 ring->requests[reqid] = NULL; in flexrm_new_request() 1075 ring->requests[reqid] = NULL; in flexrm_new_request() 1133 msg = ring->requests[reqid]; in flexrm_process_completions() 1142 ring->requests[reqid] = NULL; in flexrm_process_completions() 1408 msg = ring->requests[reqid]; in flexrm_shutdown() 1413 ring->requests[reqid] = NULL; in flexrm_shutdown() 1568 memset(ring->requests, 0, sizeof(ring->requests)); in flexrm_mbox_probe()
|
/drivers/gpu/drm/i915/gt/selftests/ |
D | mock_timeline.c | 19 INIT_LIST_HEAD(&timeline->requests); in mock_timeline_init()
|
/drivers/input/misc/ |
D | uinput.c | 67 struct uinput_request *requests[UINPUT_NUM_REQUESTS]; member 105 if (!udev->requests[id]) { in uinput_request_alloc_id() 107 udev->requests[id] = request; in uinput_request_alloc_id() 124 return udev->requests[id]; in uinput_request_find() 140 udev->requests[id] = NULL; in uinput_request_release_slot() 210 request = udev->requests[i]; in uinput_flush_requests()
|
/drivers/gpu/drm/i915/selftests/ |
D | i915_mock_selftests.h | 27 selftest(requests, i915_request_mock_selftests)
|
D | i915_live_selftests.h | 28 selftest(requests, i915_request_live_selftests)
|
/drivers/gpu/drm/i915/gem/ |
D | i915_gem_throttle.c | 69 &ce->timeline->requests, in i915_gem_throttle_ioctl()
|
/drivers/acpi/acpica/ |
D | dbstats.c | 86 "%8.2X %8.2X %8.2X %8.2X\n", list->requests, list->hits, in acpi_db_list_info() 87 list->requests - list->hits, list->object_size); in acpi_db_list_info()
|