• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2013 Red Hat
3  * Author: Rob Clark <robdclark@gmail.com>
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published by
7  * the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program.  If not, see <http://www.gnu.org/licenses/>.
16  */
17 
18 #include "msm_drv.h"
19 #include "msm_gpu.h"
20 #include "msm_gem.h"
21 
22 /*
23  * Cmdstream submission:
24  */
25 
26 /* make sure these don't conflict w/ MSM_SUBMIT_BO_x */
27 #define BO_VALID    0x8000
28 #define BO_LOCKED   0x4000
29 #define BO_PINNED   0x2000
30 
to_user_ptr(u64 address)31 static inline void __user *to_user_ptr(u64 address)
32 {
33 	return (void __user *)(uintptr_t)address;
34 }
35 
submit_create(struct drm_device * dev,struct msm_gpu * gpu,uint32_t nr)36 static struct msm_gem_submit *submit_create(struct drm_device *dev,
37 		struct msm_gpu *gpu, uint32_t nr)
38 {
39 	struct msm_gem_submit *submit;
40 	uint64_t sz = sizeof(*submit) + ((u64)nr * sizeof(submit->bos[0]));
41 
42 	if (sz > SIZE_MAX)
43 		return NULL;
44 
45 	submit = kmalloc(sz, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
46 	if (submit) {
47 		submit->dev = dev;
48 		submit->gpu = gpu;
49 
50 		/* initially, until copy_from_user() and bo lookup succeeds: */
51 		submit->nr_bos = 0;
52 		submit->nr_cmds = 0;
53 
54 		INIT_LIST_HEAD(&submit->bo_list);
55 		ww_acquire_init(&submit->ticket, &reservation_ww_class);
56 	}
57 
58 	return submit;
59 }
60 
61 static inline unsigned long __must_check
copy_from_user_inatomic(void * to,const void __user * from,unsigned long n)62 copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
63 {
64 	if (access_ok(VERIFY_READ, from, n))
65 		return __copy_from_user_inatomic(to, from, n);
66 	return -EFAULT;
67 }
68 
submit_lookup_objects(struct msm_gem_submit * submit,struct drm_msm_gem_submit * args,struct drm_file * file)69 static int submit_lookup_objects(struct msm_gem_submit *submit,
70 		struct drm_msm_gem_submit *args, struct drm_file *file)
71 {
72 	unsigned i;
73 	int ret = 0;
74 
75 	spin_lock(&file->table_lock);
76 	pagefault_disable();
77 
78 	for (i = 0; i < args->nr_bos; i++) {
79 		struct drm_msm_gem_submit_bo submit_bo;
80 		struct drm_gem_object *obj;
81 		struct msm_gem_object *msm_obj;
82 		void __user *userptr =
83 			to_user_ptr(args->bos + (i * sizeof(submit_bo)));
84 
85 		ret = copy_from_user_inatomic(&submit_bo, userptr, sizeof(submit_bo));
86 		if (unlikely(ret)) {
87 			pagefault_enable();
88 			spin_unlock(&file->table_lock);
89 			ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo));
90 			if (ret)
91 				goto out;
92 			spin_lock(&file->table_lock);
93 			pagefault_disable();
94 		}
95 
96 		if ((submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) ||
97 			!(submit_bo.flags & MSM_SUBMIT_BO_FLAGS)) {
98 			DRM_ERROR("invalid flags: %x\n", submit_bo.flags);
99 			ret = -EINVAL;
100 			goto out_unlock;
101 		}
102 
103 		submit->bos[i].flags = submit_bo.flags;
104 		/* in validate_objects() we figure out if this is true: */
105 		submit->bos[i].iova  = submit_bo.presumed;
106 
107 		/* normally use drm_gem_object_lookup(), but for bulk lookup
108 		 * all under single table_lock just hit object_idr directly:
109 		 */
110 		obj = idr_find(&file->object_idr, submit_bo.handle);
111 		if (!obj) {
112 			DRM_ERROR("invalid handle %u at index %u\n", submit_bo.handle, i);
113 			ret = -EINVAL;
114 			goto out_unlock;
115 		}
116 
117 		msm_obj = to_msm_bo(obj);
118 
119 		if (!list_empty(&msm_obj->submit_entry)) {
120 			DRM_ERROR("handle %u at index %u already on submit list\n",
121 					submit_bo.handle, i);
122 			ret = -EINVAL;
123 			goto out_unlock;
124 		}
125 
126 		drm_gem_object_reference(obj);
127 
128 		submit->bos[i].obj = msm_obj;
129 
130 		list_add_tail(&msm_obj->submit_entry, &submit->bo_list);
131 	}
132 
133 out_unlock:
134 	pagefault_enable();
135 	spin_unlock(&file->table_lock);
136 
137 out:
138 	submit->nr_bos = i;
139 
140 	return ret;
141 }
142 
submit_unlock_unpin_bo(struct msm_gem_submit * submit,int i)143 static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i)
144 {
145 	struct msm_gem_object *msm_obj = submit->bos[i].obj;
146 
147 	if (submit->bos[i].flags & BO_PINNED)
148 		msm_gem_put_iova(&msm_obj->base, submit->gpu->id);
149 
150 	if (submit->bos[i].flags & BO_LOCKED)
151 		ww_mutex_unlock(&msm_obj->resv->lock);
152 
153 	if (!(submit->bos[i].flags & BO_VALID))
154 		submit->bos[i].iova = 0;
155 
156 	submit->bos[i].flags &= ~(BO_LOCKED | BO_PINNED);
157 }
158 
159 /* This is where we make sure all the bo's are reserved and pin'd: */
submit_validate_objects(struct msm_gem_submit * submit)160 static int submit_validate_objects(struct msm_gem_submit *submit)
161 {
162 	int contended, slow_locked = -1, i, ret = 0;
163 
164 retry:
165 	submit->valid = true;
166 
167 	for (i = 0; i < submit->nr_bos; i++) {
168 		struct msm_gem_object *msm_obj = submit->bos[i].obj;
169 		uint32_t iova;
170 
171 		if (slow_locked == i)
172 			slow_locked = -1;
173 
174 		contended = i;
175 
176 		if (!(submit->bos[i].flags & BO_LOCKED)) {
177 			ret = ww_mutex_lock_interruptible(&msm_obj->resv->lock,
178 					&submit->ticket);
179 			if (ret)
180 				goto fail;
181 			submit->bos[i].flags |= BO_LOCKED;
182 		}
183 
184 
185 		/* if locking succeeded, pin bo: */
186 		ret = msm_gem_get_iova_locked(&msm_obj->base,
187 				submit->gpu->id, &iova);
188 
189 		/* this would break the logic in the fail path.. there is no
190 		 * reason for this to happen, but just to be on the safe side
191 		 * let's notice if this starts happening in the future:
192 		 */
193 		WARN_ON(ret == -EDEADLK);
194 
195 		if (ret)
196 			goto fail;
197 
198 		submit->bos[i].flags |= BO_PINNED;
199 
200 		if (iova == submit->bos[i].iova) {
201 			submit->bos[i].flags |= BO_VALID;
202 		} else {
203 			submit->bos[i].iova = iova;
204 			submit->bos[i].flags &= ~BO_VALID;
205 			submit->valid = false;
206 		}
207 	}
208 
209 	ww_acquire_done(&submit->ticket);
210 
211 	return 0;
212 
213 fail:
214 	for (; i >= 0; i--)
215 		submit_unlock_unpin_bo(submit, i);
216 
217 	if (slow_locked > 0)
218 		submit_unlock_unpin_bo(submit, slow_locked);
219 
220 	if (ret == -EDEADLK) {
221 		struct msm_gem_object *msm_obj = submit->bos[contended].obj;
222 		/* we lost out in a seqno race, lock and retry.. */
223 		ret = ww_mutex_lock_slow_interruptible(&msm_obj->resv->lock,
224 				&submit->ticket);
225 		if (!ret) {
226 			submit->bos[contended].flags |= BO_LOCKED;
227 			slow_locked = contended;
228 			goto retry;
229 		}
230 	}
231 
232 	return ret;
233 }
234 
submit_bo(struct msm_gem_submit * submit,uint32_t idx,struct msm_gem_object ** obj,uint32_t * iova,bool * valid)235 static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
236 		struct msm_gem_object **obj, uint32_t *iova, bool *valid)
237 {
238 	if (idx >= submit->nr_bos) {
239 		DRM_ERROR("invalid buffer index: %u (out of %u)\n",
240 				idx, submit->nr_bos);
241 		return -EINVAL;
242 	}
243 
244 	if (obj)
245 		*obj = submit->bos[idx].obj;
246 	if (iova)
247 		*iova = submit->bos[idx].iova;
248 	if (valid)
249 		*valid = !!(submit->bos[idx].flags & BO_VALID);
250 
251 	return 0;
252 }
253 
254 /* process the reloc's and patch up the cmdstream as needed: */
submit_reloc(struct msm_gem_submit * submit,struct msm_gem_object * obj,uint32_t offset,uint32_t nr_relocs,uint64_t relocs)255 static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *obj,
256 		uint32_t offset, uint32_t nr_relocs, uint64_t relocs)
257 {
258 	uint32_t i, last_offset = 0;
259 	uint32_t *ptr;
260 	int ret;
261 
262 	if (offset % 4) {
263 		DRM_ERROR("non-aligned cmdstream buffer: %u\n", offset);
264 		return -EINVAL;
265 	}
266 
267 	/* For now, just map the entire thing.  Eventually we probably
268 	 * to do it page-by-page, w/ kmap() if not vmap()d..
269 	 */
270 	ptr = msm_gem_vaddr_locked(&obj->base);
271 
272 	if (IS_ERR(ptr)) {
273 		ret = PTR_ERR(ptr);
274 		DBG("failed to map: %d", ret);
275 		return ret;
276 	}
277 
278 	for (i = 0; i < nr_relocs; i++) {
279 		struct drm_msm_gem_submit_reloc submit_reloc;
280 		void __user *userptr =
281 			to_user_ptr(relocs + (i * sizeof(submit_reloc)));
282 		uint32_t iova, off;
283 		bool valid;
284 
285 		ret = copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc));
286 		if (ret)
287 			return -EFAULT;
288 
289 		if (submit_reloc.submit_offset % 4) {
290 			DRM_ERROR("non-aligned reloc offset: %u\n",
291 					submit_reloc.submit_offset);
292 			return -EINVAL;
293 		}
294 
295 		/* offset in dwords: */
296 		off = submit_reloc.submit_offset / 4;
297 
298 		if ((off >= (obj->base.size / 4)) ||
299 				(off < last_offset)) {
300 			DRM_ERROR("invalid offset %u at reloc %u\n", off, i);
301 			return -EINVAL;
302 		}
303 
304 		ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova, &valid);
305 		if (ret)
306 			return ret;
307 
308 		if (valid)
309 			continue;
310 
311 		iova += submit_reloc.reloc_offset;
312 
313 		if (submit_reloc.shift < 0)
314 			iova >>= -submit_reloc.shift;
315 		else
316 			iova <<= submit_reloc.shift;
317 
318 		ptr[off] = iova | submit_reloc.or;
319 
320 		last_offset = off;
321 	}
322 
323 	return 0;
324 }
325 
submit_cleanup(struct msm_gem_submit * submit,bool fail)326 static void submit_cleanup(struct msm_gem_submit *submit, bool fail)
327 {
328 	unsigned i;
329 
330 	for (i = 0; i < submit->nr_bos; i++) {
331 		struct msm_gem_object *msm_obj = submit->bos[i].obj;
332 		submit_unlock_unpin_bo(submit, i);
333 		list_del_init(&msm_obj->submit_entry);
334 		drm_gem_object_unreference(&msm_obj->base);
335 	}
336 
337 	ww_acquire_fini(&submit->ticket);
338 	kfree(submit);
339 }
340 
msm_ioctl_gem_submit(struct drm_device * dev,void * data,struct drm_file * file)341 int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
342 		struct drm_file *file)
343 {
344 	struct msm_drm_private *priv = dev->dev_private;
345 	struct drm_msm_gem_submit *args = data;
346 	struct msm_file_private *ctx = file->driver_priv;
347 	struct msm_gem_submit *submit;
348 	struct msm_gpu *gpu;
349 	unsigned i;
350 	int ret;
351 
352 	/* for now, we just have 3d pipe.. eventually this would need to
353 	 * be more clever to dispatch to appropriate gpu module:
354 	 */
355 	if (args->pipe != MSM_PIPE_3D0)
356 		return -EINVAL;
357 
358 	gpu = priv->gpu;
359 
360 	if (args->nr_cmds > MAX_CMDS)
361 		return -EINVAL;
362 
363 	mutex_lock(&dev->struct_mutex);
364 
365 	submit = submit_create(dev, gpu, args->nr_bos);
366 	if (!submit) {
367 		ret = -ENOMEM;
368 		goto out;
369 	}
370 
371 	ret = submit_lookup_objects(submit, args, file);
372 	if (ret)
373 		goto out;
374 
375 	ret = submit_validate_objects(submit);
376 	if (ret)
377 		goto out;
378 
379 	for (i = 0; i < args->nr_cmds; i++) {
380 		struct drm_msm_gem_submit_cmd submit_cmd;
381 		void __user *userptr =
382 			to_user_ptr(args->cmds + (i * sizeof(submit_cmd)));
383 		struct msm_gem_object *msm_obj;
384 		uint32_t iova;
385 
386 		ret = copy_from_user(&submit_cmd, userptr, sizeof(submit_cmd));
387 		if (ret) {
388 			ret = -EFAULT;
389 			goto out;
390 		}
391 
392 		/* validate input from userspace: */
393 		switch (submit_cmd.type) {
394 		case MSM_SUBMIT_CMD_BUF:
395 		case MSM_SUBMIT_CMD_IB_TARGET_BUF:
396 		case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
397 			break;
398 		default:
399 			DRM_ERROR("invalid type: %08x\n", submit_cmd.type);
400 			ret = -EINVAL;
401 			goto out;
402 		}
403 
404 		ret = submit_bo(submit, submit_cmd.submit_idx,
405 				&msm_obj, &iova, NULL);
406 		if (ret)
407 			goto out;
408 
409 		if (submit_cmd.size % 4) {
410 			DRM_ERROR("non-aligned cmdstream buffer size: %u\n",
411 					submit_cmd.size);
412 			ret = -EINVAL;
413 			goto out;
414 		}
415 
416 		if ((submit_cmd.size + submit_cmd.submit_offset) >=
417 				msm_obj->base.size) {
418 			DRM_ERROR("invalid cmdstream size: %u\n", submit_cmd.size);
419 			ret = -EINVAL;
420 			goto out;
421 		}
422 
423 		submit->cmd[i].type = submit_cmd.type;
424 		submit->cmd[i].size = submit_cmd.size / 4;
425 		submit->cmd[i].iova = iova + submit_cmd.submit_offset;
426 		submit->cmd[i].idx  = submit_cmd.submit_idx;
427 
428 		if (submit->valid)
429 			continue;
430 
431 		ret = submit_reloc(submit, msm_obj, submit_cmd.submit_offset,
432 				submit_cmd.nr_relocs, submit_cmd.relocs);
433 		if (ret)
434 			goto out;
435 	}
436 
437 	submit->nr_cmds = i;
438 
439 	ret = msm_gpu_submit(gpu, submit, ctx);
440 
441 	args->fence = submit->fence;
442 
443 out:
444 	if (submit)
445 		submit_cleanup(submit, !!ret);
446 	mutex_unlock(&dev->struct_mutex);
447 	return ret;
448 }
449