• Home
  • Raw
  • Download

Lines Matching refs:ce

208 	struct intel_context *ce;  in igt_request_rewind()  local
213 ce = i915_gem_context_get_engine(ctx[0], RCS0); in igt_request_rewind()
214 GEM_BUG_ON(IS_ERR(ce)); in igt_request_rewind()
215 request = mock_request(ce, 2 * HZ); in igt_request_rewind()
216 intel_context_put(ce); in igt_request_rewind()
227 ce = i915_gem_context_get_engine(ctx[1], RCS0); in igt_request_rewind()
228 GEM_BUG_ON(IS_ERR(ce)); in igt_request_rewind()
229 vip = mock_request(ce, 0); in igt_request_rewind()
230 intel_context_put(ce); in igt_request_rewind()
276 struct i915_request *(*request_alloc)(struct intel_context *ce);
280 __mock_request_alloc(struct intel_context *ce) in __mock_request_alloc() argument
282 return mock_request(ce, 0); in __mock_request_alloc()
286 __live_request_alloc(struct intel_context *ce) in __live_request_alloc() argument
288 return intel_context_create_request(ce); in __live_request_alloc()
357 struct intel_context *ce; in __igt_breadcrumbs_smoketest() local
359 ce = i915_gem_context_get_engine(ctx, t->engine->legacy_idx); in __igt_breadcrumbs_smoketest()
360 GEM_BUG_ON(IS_ERR(ce)); in __igt_breadcrumbs_smoketest()
361 rq = t->request_alloc(ce); in __igt_breadcrumbs_smoketest()
362 intel_context_put(ce); in __igt_breadcrumbs_smoketest()
634 struct intel_context *ce; in __cancel_inactive() local
642 ce = intel_context_create(engine); in __cancel_inactive()
643 if (IS_ERR(ce)) { in __cancel_inactive()
644 err = PTR_ERR(ce); in __cancel_inactive()
648 rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK); in __cancel_inactive()
677 intel_context_put(ce); in __cancel_inactive()
687 struct intel_context *ce; in __cancel_active() local
695 ce = intel_context_create(engine); in __cancel_active()
696 if (IS_ERR(ce)) { in __cancel_active()
697 err = PTR_ERR(ce); in __cancel_active()
701 rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK); in __cancel_active()
738 intel_context_put(ce); in __cancel_active()
748 struct intel_context *ce; in __cancel_completed() local
756 ce = intel_context_create(engine); in __cancel_completed()
757 if (IS_ERR(ce)) { in __cancel_completed()
758 err = PTR_ERR(ce); in __cancel_completed()
762 rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK); in __cancel_completed()
787 intel_context_put(ce); in __cancel_completed()
1745 static int switch_to_kernel_sync(struct intel_context *ce, int err) in switch_to_kernel_sync() argument
1750 rq = intel_engine_create_kernel_request(ce->engine); in switch_to_kernel_sync()
1754 fence = i915_active_fence_get(&ce->timeline->last_request); in switch_to_kernel_sync()
1766 while (!err && !intel_engine_is_idle(ce->engine)) in switch_to_kernel_sync()
1767 intel_engine_flush_submission(ce->engine); in switch_to_kernel_sync()
1783 struct intel_context *ce[]; member
1816 static u32 *emit_timestamp_store(u32 *cs, struct intel_context *ce, u32 offset) in emit_timestamp_store() argument
1819 *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP((ce->engine->mmio_base))); in emit_timestamp_store()
1860 static u32 *hwsp_scratch(const struct intel_context *ce) in hwsp_scratch() argument
1862 return memset32(ce->engine->status_page.addr + 1000, 0, 21); in hwsp_scratch()
1865 static u32 hwsp_offset(const struct intel_context *ce, u32 *dw) in hwsp_offset() argument
1867 return (i915_ggtt_offset(ce->engine->status_page.vma) + in hwsp_offset()
1871 static int measure_semaphore_response(struct intel_context *ce) in measure_semaphore_response() argument
1873 u32 *sema = hwsp_scratch(ce); in measure_semaphore_response()
1874 const u32 offset = hwsp_offset(ce, sema); in measure_semaphore_response()
1894 rq = i915_request_create(ce); in measure_semaphore_response()
1908 cs = emit_timestamp_store(cs, ce, offset + i * sizeof(u32)); in measure_semaphore_response()
1922 cycles = ENGINE_READ_FW(ce->engine, RING_TIMESTAMP); in measure_semaphore_response()
1936 ce->engine->name, cycles >> TF_BIAS, in measure_semaphore_response()
1937 cycles_to_ns(ce->engine, cycles)); in measure_semaphore_response()
1939 return intel_gt_wait_for_idle(ce->engine->gt, HZ); in measure_semaphore_response()
1942 intel_gt_set_wedged(ce->engine->gt); in measure_semaphore_response()
1946 static int measure_idle_dispatch(struct intel_context *ce) in measure_idle_dispatch() argument
1948 u32 *sema = hwsp_scratch(ce); in measure_idle_dispatch()
1949 const u32 offset = hwsp_offset(ce, sema); in measure_idle_dispatch()
1969 err = intel_gt_wait_for_idle(ce->engine->gt, HZ / 2); in measure_idle_dispatch()
1973 rq = i915_request_create(ce); in measure_idle_dispatch()
1986 cs = emit_timestamp_store(cs, ce, offset + i * sizeof(u32)); in measure_idle_dispatch()
1992 elapsed[i] = ENGINE_READ_FW(ce->engine, RING_TIMESTAMP); in measure_idle_dispatch()
1998 err = intel_gt_wait_for_idle(ce->engine->gt, HZ / 2); in measure_idle_dispatch()
2007 ce->engine->name, cycles >> TF_BIAS, in measure_idle_dispatch()
2008 cycles_to_ns(ce->engine, cycles)); in measure_idle_dispatch()
2010 return intel_gt_wait_for_idle(ce->engine->gt, HZ); in measure_idle_dispatch()
2013 intel_gt_set_wedged(ce->engine->gt); in measure_idle_dispatch()
2017 static int measure_busy_dispatch(struct intel_context *ce) in measure_busy_dispatch() argument
2019 u32 *sema = hwsp_scratch(ce); in measure_busy_dispatch()
2020 const u32 offset = hwsp_offset(ce, sema); in measure_busy_dispatch()
2041 rq = i915_request_create(ce); in measure_busy_dispatch()
2056 cs = emit_timestamp_store(cs, ce, offset + i * sizeof(u32)); in measure_busy_dispatch()
2067 elapsed[i - 1] = ENGINE_READ_FW(ce->engine, RING_TIMESTAMP); in measure_busy_dispatch()
2084 ce->engine->name, cycles >> TF_BIAS, in measure_busy_dispatch()
2085 cycles_to_ns(ce->engine, cycles)); in measure_busy_dispatch()
2087 return intel_gt_wait_for_idle(ce->engine->gt, HZ); in measure_busy_dispatch()
2090 intel_gt_set_wedged(ce->engine->gt); in measure_busy_dispatch()
2120 static int measure_inter_request(struct intel_context *ce) in measure_inter_request() argument
2122 u32 *sema = hwsp_scratch(ce); in measure_inter_request()
2123 const u32 offset = hwsp_offset(ce, sema); in measure_inter_request()
2142 err = plug(ce->engine, sema, MI_SEMAPHORE_SAD_NEQ_SDD, 0); in measure_inter_request()
2152 intel_engine_flush_submission(ce->engine); in measure_inter_request()
2157 rq = i915_request_create(ce); in measure_inter_request()
2178 cs = emit_timestamp_store(cs, ce, offset + i * sizeof(u32)); in measure_inter_request()
2184 intel_engine_flush_submission(ce->engine); in measure_inter_request()
2188 err = intel_gt_wait_for_idle(ce->engine->gt, HZ / 2); in measure_inter_request()
2197 ce->engine->name, cycles >> TF_BIAS, in measure_inter_request()
2198 cycles_to_ns(ce->engine, cycles)); in measure_inter_request()
2200 return intel_gt_wait_for_idle(ce->engine->gt, HZ); in measure_inter_request()
2207 intel_gt_set_wedged(ce->engine->gt); in measure_inter_request()
2211 static int measure_context_switch(struct intel_context *ce) in measure_context_switch() argument
2213 u32 *sema = hwsp_scratch(ce); in measure_context_switch()
2214 const u32 offset = hwsp_offset(ce, sema); in measure_context_switch()
2233 err = plug(ce->engine, sema, MI_SEMAPHORE_SAD_NEQ_SDD, 0); in measure_context_switch()
2239 ce, ce->engine->kernel_context in measure_context_switch()
2268 cs = emit_timestamp_store(cs, ce, addr); in measure_context_switch()
2280 intel_engine_flush_submission(ce->engine); in measure_context_switch()
2283 err = intel_gt_wait_for_idle(ce->engine->gt, HZ / 2); in measure_context_switch()
2292 ce->engine->name, cycles >> TF_BIAS, in measure_context_switch()
2293 cycles_to_ns(ce->engine, cycles)); in measure_context_switch()
2295 return intel_gt_wait_for_idle(ce->engine->gt, HZ); in measure_context_switch()
2301 intel_gt_set_wedged(ce->engine->gt); in measure_context_switch()
2305 static int measure_preemption(struct intel_context *ce) in measure_preemption() argument
2307 u32 *sema = hwsp_scratch(ce); in measure_preemption()
2308 const u32 offset = hwsp_offset(ce, sema); in measure_preemption()
2330 if (!intel_engine_has_preemption(ce->engine)) in measure_preemption()
2337 rq = i915_request_create(ce); in measure_preemption()
2352 cs = emit_timestamp_store(cs, ce, addr + sizeof(u32)); in measure_preemption()
2362 rq = i915_request_create(ce->engine->kernel_context); in measure_preemption()
2375 cs = emit_timestamp_store(cs, ce, addr); in measure_preemption()
2381 elapsed[i - 1] = ENGINE_READ_FW(ce->engine, RING_TIMESTAMP); in measure_preemption()
2395 ce->engine->name, cycles >> TF_BIAS, in measure_preemption()
2396 cycles_to_ns(ce->engine, cycles)); in measure_preemption()
2403 ce->engine->name, cycles >> TF_BIAS, in measure_preemption()
2404 cycles_to_ns(ce->engine, cycles)); in measure_preemption()
2406 return intel_gt_wait_for_idle(ce->engine->gt, HZ); in measure_preemption()
2409 intel_gt_set_wedged(ce->engine->gt); in measure_preemption()
2425 static int measure_completion(struct intel_context *ce) in measure_completion() argument
2427 u32 *sema = hwsp_scratch(ce); in measure_completion()
2428 const u32 offset = hwsp_offset(ce, sema); in measure_completion()
2449 rq = i915_request_create(ce); in measure_completion()
2464 cs = emit_timestamp_store(cs, ce, offset + i * sizeof(u32)); in measure_completion()
2471 intel_engine_flush_submission(ce->engine); in measure_completion()
2482 elapsed[i - 1] = ENGINE_READ_FW(ce->engine, RING_TIMESTAMP); in measure_completion()
2486 err = intel_gt_wait_for_idle(ce->engine->gt, HZ / 2); in measure_completion()
2497 ce->engine->name, cycles >> TF_BIAS, in measure_completion()
2498 cycles_to_ns(ce->engine, cycles)); in measure_completion()
2500 return intel_gt_wait_for_idle(ce->engine->gt, HZ); in measure_completion()
2503 intel_gt_set_wedged(ce->engine->gt); in measure_completion()
2537 struct intel_context *ce; in perf_request_latency() local
2539 ce = intel_context_create(engine); in perf_request_latency()
2540 if (IS_ERR(ce)) { in perf_request_latency()
2541 err = PTR_ERR(ce); in perf_request_latency()
2545 err = intel_context_pin(ce); in perf_request_latency()
2547 intel_context_put(ce); in perf_request_latency()
2555 err = measure_semaphore_response(ce); in perf_request_latency()
2557 err = measure_idle_dispatch(ce); in perf_request_latency()
2559 err = measure_busy_dispatch(ce); in perf_request_latency()
2561 err = measure_inter_request(ce); in perf_request_latency()
2563 err = measure_context_switch(ce); in perf_request_latency()
2565 err = measure_preemption(ce); in perf_request_latency()
2567 err = measure_completion(ce); in perf_request_latency()
2572 intel_context_unpin(ce); in perf_request_latency()
2573 intel_context_put(ce); in perf_request_latency()
2597 rq = i915_request_create(ps->ce[idx]); in s_sync0()
2631 rq = i915_request_create(ps->ce[idx]); in s_sync1()
2665 rq = i915_request_create(ps->ce[idx]); in s_many()
2700 ps = kzalloc(struct_size(ps, ce, nengines), GFP_KERNEL); in perf_series_engines()
2713 struct intel_context *ce; in perf_series_engines() local
2715 ce = intel_context_create(engine); in perf_series_engines()
2716 if (IS_ERR(ce)) { in perf_series_engines()
2717 err = PTR_ERR(ce); in perf_series_engines()
2721 err = intel_context_pin(ce); in perf_series_engines()
2723 intel_context_put(ce); in perf_series_engines()
2727 ps->ce[idx++] = ce; in perf_series_engines()
2743 struct intel_context *ce = ps->ce[idx]; in perf_series_engines() local
2745 p->engine = ps->ce[idx]->engine; in perf_series_engines()
2753 p->runtime = -intel_context_get_total_runtime_ns(ce); in perf_series_engines()
2762 struct intel_context *ce = ps->ce[idx]; in perf_series_engines() local
2774 err = switch_to_kernel_sync(ce, err); in perf_series_engines()
2775 p->runtime += intel_context_get_total_runtime_ns(ce); in perf_series_engines()
2790 name, p->engine->name, ce->timeline->seqno, in perf_series_engines()
2799 if (IS_ERR_OR_NULL(ps->ce[idx])) in perf_series_engines()
2802 intel_context_unpin(ps->ce[idx]); in perf_series_engines()
2803 intel_context_put(ps->ce[idx]); in perf_series_engines()
2825 struct intel_context *ce; in p_sync0() local
2831 ce = intel_context_create(engine); in p_sync0()
2832 if (IS_ERR(ce)) { in p_sync0()
2833 thread->result = PTR_ERR(ce); in p_sync0()
2837 err = intel_context_pin(ce); in p_sync0()
2839 intel_context_put(ce); in p_sync0()
2856 rq = i915_request_create(ce); in p_sync0()
2885 err = switch_to_kernel_sync(ce, err); in p_sync0()
2886 p->runtime = intel_context_get_total_runtime_ns(ce); in p_sync0()
2889 intel_context_unpin(ce); in p_sync0()
2890 intel_context_put(ce); in p_sync0()
2900 struct intel_context *ce; in p_sync1() local
2906 ce = intel_context_create(engine); in p_sync1()
2907 if (IS_ERR(ce)) { in p_sync1()
2908 thread->result = PTR_ERR(ce); in p_sync1()
2912 err = intel_context_pin(ce); in p_sync1()
2914 intel_context_put(ce); in p_sync1()
2931 rq = i915_request_create(ce); in p_sync1()
2962 err = switch_to_kernel_sync(ce, err); in p_sync1()
2963 p->runtime = intel_context_get_total_runtime_ns(ce); in p_sync1()
2966 intel_context_unpin(ce); in p_sync1()
2967 intel_context_put(ce); in p_sync1()
2976 struct intel_context *ce; in p_many() local
2982 ce = intel_context_create(engine); in p_many()
2983 if (IS_ERR(ce)) { in p_many()
2984 thread->result = PTR_ERR(ce); in p_many()
2988 err = intel_context_pin(ce); in p_many()
2990 intel_context_put(ce); in p_many()
3007 rq = i915_request_create(ce); in p_many()
3027 err = switch_to_kernel_sync(ce, err); in p_many()
3028 p->runtime = intel_context_get_total_runtime_ns(ce); in p_many()
3031 intel_context_unpin(ce); in p_many()
3032 intel_context_put(ce); in p_many()