• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #include "i915_drv.h"
7 #include "gt/intel_context.h"
8 #include "gt/intel_engine_pm.h"
9 #include "i915_gem_client_blt.h"
10 #include "i915_gem_object_blt.h"
11 
12 struct i915_sleeve {
13 	struct i915_vma *vma;
14 	struct drm_i915_gem_object *obj;
15 	struct sg_table *pages;
16 	struct i915_page_sizes page_sizes;
17 };
18 
vma_set_pages(struct i915_vma * vma)19 static int vma_set_pages(struct i915_vma *vma)
20 {
21 	struct i915_sleeve *sleeve = vma->private;
22 
23 	vma->pages = sleeve->pages;
24 	vma->page_sizes = sleeve->page_sizes;
25 
26 	return 0;
27 }
28 
vma_clear_pages(struct i915_vma * vma)29 static void vma_clear_pages(struct i915_vma *vma)
30 {
31 	GEM_BUG_ON(!vma->pages);
32 	vma->pages = NULL;
33 }
34 
vma_bind(struct i915_address_space * vm,struct i915_vm_pt_stash * stash,struct i915_vma * vma,enum i915_cache_level cache_level,u32 flags)35 static void vma_bind(struct i915_address_space *vm,
36 		     struct i915_vm_pt_stash *stash,
37 		     struct i915_vma *vma,
38 		     enum i915_cache_level cache_level,
39 		     u32 flags)
40 {
41 	vm->vma_ops.bind_vma(vm, stash, vma, cache_level, flags);
42 }
43 
vma_unbind(struct i915_address_space * vm,struct i915_vma * vma)44 static void vma_unbind(struct i915_address_space *vm, struct i915_vma *vma)
45 {
46 	vm->vma_ops.unbind_vma(vm, vma);
47 }
48 
49 static const struct i915_vma_ops proxy_vma_ops = {
50 	.set_pages = vma_set_pages,
51 	.clear_pages = vma_clear_pages,
52 	.bind_vma = vma_bind,
53 	.unbind_vma = vma_unbind,
54 };
55 
create_sleeve(struct i915_address_space * vm,struct drm_i915_gem_object * obj,struct sg_table * pages,struct i915_page_sizes * page_sizes)56 static struct i915_sleeve *create_sleeve(struct i915_address_space *vm,
57 					 struct drm_i915_gem_object *obj,
58 					 struct sg_table *pages,
59 					 struct i915_page_sizes *page_sizes)
60 {
61 	struct i915_sleeve *sleeve;
62 	struct i915_vma *vma;
63 	int err;
64 
65 	sleeve = kzalloc(sizeof(*sleeve), GFP_KERNEL);
66 	if (!sleeve)
67 		return ERR_PTR(-ENOMEM);
68 
69 	vma = i915_vma_instance(obj, vm, NULL);
70 	if (IS_ERR(vma)) {
71 		err = PTR_ERR(vma);
72 		goto err_free;
73 	}
74 
75 	vma->private = sleeve;
76 	vma->ops = &proxy_vma_ops;
77 
78 	sleeve->vma = vma;
79 	sleeve->pages = pages;
80 	sleeve->page_sizes = *page_sizes;
81 
82 	return sleeve;
83 
84 err_free:
85 	kfree(sleeve);
86 	return ERR_PTR(err);
87 }
88 
destroy_sleeve(struct i915_sleeve * sleeve)89 static void destroy_sleeve(struct i915_sleeve *sleeve)
90 {
91 	kfree(sleeve);
92 }
93 
94 struct clear_pages_work {
95 	struct dma_fence dma;
96 	struct dma_fence_cb cb;
97 	struct i915_sw_fence wait;
98 	struct work_struct work;
99 	struct irq_work irq_work;
100 	struct i915_sleeve *sleeve;
101 	struct intel_context *ce;
102 	u32 value;
103 };
104 
clear_pages_work_driver_name(struct dma_fence * fence)105 static const char *clear_pages_work_driver_name(struct dma_fence *fence)
106 {
107 	return DRIVER_NAME;
108 }
109 
clear_pages_work_timeline_name(struct dma_fence * fence)110 static const char *clear_pages_work_timeline_name(struct dma_fence *fence)
111 {
112 	return "clear";
113 }
114 
clear_pages_work_release(struct dma_fence * fence)115 static void clear_pages_work_release(struct dma_fence *fence)
116 {
117 	struct clear_pages_work *w = container_of(fence, typeof(*w), dma);
118 
119 	destroy_sleeve(w->sleeve);
120 
121 	i915_sw_fence_fini(&w->wait);
122 
123 	BUILD_BUG_ON(offsetof(typeof(*w), dma));
124 	dma_fence_free(&w->dma);
125 }
126 
127 static const struct dma_fence_ops clear_pages_work_ops = {
128 	.get_driver_name = clear_pages_work_driver_name,
129 	.get_timeline_name = clear_pages_work_timeline_name,
130 	.release = clear_pages_work_release,
131 };
132 
clear_pages_signal_irq_worker(struct irq_work * work)133 static void clear_pages_signal_irq_worker(struct irq_work *work)
134 {
135 	struct clear_pages_work *w = container_of(work, typeof(*w), irq_work);
136 
137 	dma_fence_signal(&w->dma);
138 	dma_fence_put(&w->dma);
139 }
140 
clear_pages_dma_fence_cb(struct dma_fence * fence,struct dma_fence_cb * cb)141 static void clear_pages_dma_fence_cb(struct dma_fence *fence,
142 				     struct dma_fence_cb *cb)
143 {
144 	struct clear_pages_work *w = container_of(cb, typeof(*w), cb);
145 
146 	if (fence->error)
147 		dma_fence_set_error(&w->dma, fence->error);
148 
149 	/*
150 	 * Push the signalling of the fence into yet another worker to avoid
151 	 * the nightmare locking around the fence spinlock.
152 	 */
153 	irq_work_queue(&w->irq_work);
154 }
155 
clear_pages_worker(struct work_struct * work)156 static void clear_pages_worker(struct work_struct *work)
157 {
158 	struct clear_pages_work *w = container_of(work, typeof(*w), work);
159 	struct drm_i915_gem_object *obj = w->sleeve->vma->obj;
160 	struct i915_vma *vma = w->sleeve->vma;
161 	struct i915_gem_ww_ctx ww;
162 	struct i915_request *rq;
163 	struct i915_vma *batch;
164 	int err = w->dma.error;
165 
166 	if (unlikely(err))
167 		goto out_signal;
168 
169 	if (obj->cache_dirty) {
170 		if (i915_gem_object_has_struct_page(obj))
171 			drm_clflush_sg(w->sleeve->pages);
172 		obj->cache_dirty = false;
173 	}
174 	obj->read_domains = I915_GEM_GPU_DOMAINS;
175 	obj->write_domain = 0;
176 
177 	i915_gem_ww_ctx_init(&ww, false);
178 	intel_engine_pm_get(w->ce->engine);
179 retry:
180 	err = intel_context_pin_ww(w->ce, &ww);
181 	if (err)
182 		goto out_signal;
183 
184 	batch = intel_emit_vma_fill_blt(w->ce, vma, &ww, w->value);
185 	if (IS_ERR(batch)) {
186 		err = PTR_ERR(batch);
187 		goto out_ctx;
188 	}
189 
190 	rq = i915_request_create(w->ce);
191 	if (IS_ERR(rq)) {
192 		err = PTR_ERR(rq);
193 		goto out_batch;
194 	}
195 
196 	/* There's no way the fence has signalled */
197 	if (dma_fence_add_callback(&rq->fence, &w->cb,
198 				   clear_pages_dma_fence_cb))
199 		GEM_BUG_ON(1);
200 
201 	err = intel_emit_vma_mark_active(batch, rq);
202 	if (unlikely(err))
203 		goto out_request;
204 
205 	if (w->ce->engine->emit_init_breadcrumb) {
206 		err = w->ce->engine->emit_init_breadcrumb(rq);
207 		if (unlikely(err))
208 			goto out_request;
209 	}
210 
211 	/*
212 	 * w->dma is already exported via (vma|obj)->resv we need only
213 	 * keep track of the GPU activity within this vma/request, and
214 	 * propagate the signal from the request to w->dma.
215 	 */
216 	err = __i915_vma_move_to_active(vma, rq);
217 	if (err)
218 		goto out_request;
219 
220 	err = w->ce->engine->emit_bb_start(rq,
221 					   batch->node.start, batch->node.size,
222 					   0);
223 out_request:
224 	if (unlikely(err)) {
225 		i915_request_set_error_once(rq, err);
226 		err = 0;
227 	}
228 
229 	i915_request_add(rq);
230 out_batch:
231 	intel_emit_vma_release(w->ce, batch);
232 out_ctx:
233 	intel_context_unpin(w->ce);
234 out_signal:
235 	if (err == -EDEADLK) {
236 		err = i915_gem_ww_ctx_backoff(&ww);
237 		if (!err)
238 			goto retry;
239 	}
240 	i915_gem_ww_ctx_fini(&ww);
241 
242 	i915_vma_unpin(w->sleeve->vma);
243 	intel_engine_pm_put(w->ce->engine);
244 
245 	if (unlikely(err)) {
246 		dma_fence_set_error(&w->dma, err);
247 		dma_fence_signal(&w->dma);
248 		dma_fence_put(&w->dma);
249 	}
250 }
251 
pin_wait_clear_pages_work(struct clear_pages_work * w,struct intel_context * ce)252 static int pin_wait_clear_pages_work(struct clear_pages_work *w,
253 				     struct intel_context *ce)
254 {
255 	struct i915_vma *vma = w->sleeve->vma;
256 	struct i915_gem_ww_ctx ww;
257 	int err;
258 
259 	i915_gem_ww_ctx_init(&ww, false);
260 retry:
261 	err = i915_gem_object_lock(vma->obj, &ww);
262 	if (err)
263 		goto out;
264 
265 	err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER);
266 	if (unlikely(err))
267 		goto out;
268 
269 	err = i915_sw_fence_await_reservation(&w->wait,
270 					      vma->obj->base.resv, NULL,
271 					      true, 0, I915_FENCE_GFP);
272 	if (err)
273 		goto err_unpin_vma;
274 
275 	dma_resv_add_excl_fence(vma->obj->base.resv, &w->dma);
276 
277 err_unpin_vma:
278 	if (err)
279 		i915_vma_unpin(vma);
280 out:
281 	if (err == -EDEADLK) {
282 		err = i915_gem_ww_ctx_backoff(&ww);
283 		if (!err)
284 			goto retry;
285 	}
286 	i915_gem_ww_ctx_fini(&ww);
287 	return err;
288 }
289 
290 static int __i915_sw_fence_call
clear_pages_work_notify(struct i915_sw_fence * fence,enum i915_sw_fence_notify state)291 clear_pages_work_notify(struct i915_sw_fence *fence,
292 			enum i915_sw_fence_notify state)
293 {
294 	struct clear_pages_work *w = container_of(fence, typeof(*w), wait);
295 
296 	switch (state) {
297 	case FENCE_COMPLETE:
298 		schedule_work(&w->work);
299 		break;
300 
301 	case FENCE_FREE:
302 		dma_fence_put(&w->dma);
303 		break;
304 	}
305 
306 	return NOTIFY_DONE;
307 }
308 
309 static DEFINE_SPINLOCK(fence_lock);
310 
311 /* XXX: better name please */
i915_gem_schedule_fill_pages_blt(struct drm_i915_gem_object * obj,struct intel_context * ce,struct sg_table * pages,struct i915_page_sizes * page_sizes,u32 value)312 int i915_gem_schedule_fill_pages_blt(struct drm_i915_gem_object *obj,
313 				     struct intel_context *ce,
314 				     struct sg_table *pages,
315 				     struct i915_page_sizes *page_sizes,
316 				     u32 value)
317 {
318 	struct clear_pages_work *work;
319 	struct i915_sleeve *sleeve;
320 	int err;
321 
322 	sleeve = create_sleeve(ce->vm, obj, pages, page_sizes);
323 	if (IS_ERR(sleeve))
324 		return PTR_ERR(sleeve);
325 
326 	work = kmalloc(sizeof(*work), GFP_KERNEL);
327 	if (!work) {
328 		destroy_sleeve(sleeve);
329 		return -ENOMEM;
330 	}
331 
332 	work->value = value;
333 	work->sleeve = sleeve;
334 	work->ce = ce;
335 
336 	INIT_WORK(&work->work, clear_pages_worker);
337 
338 	init_irq_work(&work->irq_work, clear_pages_signal_irq_worker);
339 
340 	dma_fence_init(&work->dma, &clear_pages_work_ops, &fence_lock, 0, 0);
341 	i915_sw_fence_init(&work->wait, clear_pages_work_notify);
342 
343 	err = pin_wait_clear_pages_work(work, ce);
344 	if (err < 0)
345 		dma_fence_set_error(&work->dma, err);
346 
347 	dma_fence_get(&work->dma);
348 	i915_sw_fence_commit(&work->wait);
349 
350 	return err;
351 }
352 
353 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
354 #include "selftests/i915_gem_client_blt.c"
355 #endif
356