Lines Matching refs:spin
13 int igt_spinner_init(struct igt_spinner *spin, struct intel_gt *gt) in igt_spinner_init() argument
17 memset(spin, 0, sizeof(*spin)); in igt_spinner_init()
18 spin->gt = gt; in igt_spinner_init()
20 spin->hws = i915_gem_object_create_internal(gt->i915, PAGE_SIZE); in igt_spinner_init()
21 if (IS_ERR(spin->hws)) { in igt_spinner_init()
22 err = PTR_ERR(spin->hws); in igt_spinner_init()
25 i915_gem_object_set_cache_coherency(spin->hws, I915_CACHE_LLC); in igt_spinner_init()
27 spin->obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE); in igt_spinner_init()
28 if (IS_ERR(spin->obj)) { in igt_spinner_init()
29 err = PTR_ERR(spin->obj); in igt_spinner_init()
36 i915_gem_object_put(spin->hws); in igt_spinner_init()
78 int igt_spinner_pin(struct igt_spinner *spin, in igt_spinner_pin() argument
84 if (spin->ce && WARN_ON(spin->ce != ce)) in igt_spinner_pin()
86 spin->ce = ce; in igt_spinner_pin()
88 if (!spin->seqno) { in igt_spinner_pin()
89 vaddr = igt_spinner_pin_obj(ce, ww, spin->hws, I915_MAP_WB, &spin->hws_vma); in igt_spinner_pin()
93 spin->seqno = memset(vaddr, 0xff, PAGE_SIZE); in igt_spinner_pin()
96 if (!spin->batch) { in igt_spinner_pin()
99 mode = i915_coherent_map_type(spin->gt->i915, spin->obj, false); in igt_spinner_pin()
100 vaddr = igt_spinner_pin_obj(ce, ww, spin->obj, mode, &spin->batch_vma); in igt_spinner_pin()
104 spin->batch = vaddr; in igt_spinner_pin()
138 igt_spinner_create_request(struct igt_spinner *spin, in igt_spinner_create_request() argument
149 GEM_BUG_ON(spin->gt != ce->vm->gt); in igt_spinner_create_request()
154 if (!spin->batch) { in igt_spinner_create_request()
155 err = igt_spinner_pin(spin, ce, NULL); in igt_spinner_create_request()
160 hws = spin->hws_vma; in igt_spinner_create_request()
161 vma = spin->batch_vma; in igt_spinner_create_request()
175 batch = spin->batch; in igt_spinner_create_request()
232 hws_seqno(const struct igt_spinner *spin, const struct i915_request *rq) in hws_seqno() argument
234 u32 *seqno = spin->seqno + seqno_offset(rq->fence.context); in hws_seqno()
239 void igt_spinner_end(struct igt_spinner *spin) in igt_spinner_end() argument
241 if (!spin->batch) in igt_spinner_end()
244 *spin->batch = MI_BATCH_BUFFER_END; in igt_spinner_end()
245 intel_gt_chipset_flush(spin->gt); in igt_spinner_end()
248 void igt_spinner_fini(struct igt_spinner *spin) in igt_spinner_fini() argument
250 igt_spinner_end(spin); in igt_spinner_fini()
252 if (spin->batch) { in igt_spinner_fini()
253 i915_vma_unpin(spin->batch_vma); in igt_spinner_fini()
254 i915_gem_object_unpin_map(spin->obj); in igt_spinner_fini()
256 i915_gem_object_put(spin->obj); in igt_spinner_fini()
258 if (spin->seqno) { in igt_spinner_fini()
259 i915_vma_unpin(spin->hws_vma); in igt_spinner_fini()
260 i915_gem_object_unpin_map(spin->hws); in igt_spinner_fini()
262 i915_gem_object_put(spin->hws); in igt_spinner_fini()
265 bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq) in igt_wait_for_spinner() argument
270 return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq), in igt_wait_for_spinner()
273 wait_for(i915_seqno_passed(hws_seqno(spin, rq), in igt_wait_for_spinner()