1 /*
2 * Tegra host1x Job
3 *
4 * Copyright (c) 2010-2015, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19 #include <linux/dma-mapping.h>
20 #include <linux/err.h>
21 #include <linux/host1x.h>
22 #include <linux/kref.h>
23 #include <linux/module.h>
24 #include <linux/scatterlist.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <trace/events/host1x.h>
28
29 #include "channel.h"
30 #include "dev.h"
31 #include "job.h"
32 #include "syncpt.h"
33
34 #define HOST1X_WAIT_SYNCPT_OFFSET 0x8
35
host1x_job_alloc(struct host1x_channel * ch,u32 num_cmdbufs,u32 num_relocs,u32 num_waitchks)36 struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
37 u32 num_cmdbufs, u32 num_relocs,
38 u32 num_waitchks)
39 {
40 struct host1x_job *job = NULL;
41 unsigned int num_unpins = num_cmdbufs + num_relocs;
42 u64 total;
43 void *mem;
44
45 /* Check that we're not going to overflow */
46 total = sizeof(struct host1x_job) +
47 (u64)num_relocs * sizeof(struct host1x_reloc) +
48 (u64)num_unpins * sizeof(struct host1x_job_unpin_data) +
49 (u64)num_waitchks * sizeof(struct host1x_waitchk) +
50 (u64)num_cmdbufs * sizeof(struct host1x_job_gather) +
51 (u64)num_unpins * sizeof(dma_addr_t) +
52 (u64)num_unpins * sizeof(u32 *);
53 if (total > ULONG_MAX)
54 return NULL;
55
56 mem = job = kzalloc(total, GFP_KERNEL);
57 if (!job)
58 return NULL;
59
60 kref_init(&job->ref);
61 job->channel = ch;
62
63 /* Redistribute memory to the structs */
64 mem += sizeof(struct host1x_job);
65 job->relocarray = num_relocs ? mem : NULL;
66 mem += num_relocs * sizeof(struct host1x_reloc);
67 job->unpins = num_unpins ? mem : NULL;
68 mem += num_unpins * sizeof(struct host1x_job_unpin_data);
69 job->waitchk = num_waitchks ? mem : NULL;
70 mem += num_waitchks * sizeof(struct host1x_waitchk);
71 job->gathers = num_cmdbufs ? mem : NULL;
72 mem += num_cmdbufs * sizeof(struct host1x_job_gather);
73 job->addr_phys = num_unpins ? mem : NULL;
74
75 job->reloc_addr_phys = job->addr_phys;
76 job->gather_addr_phys = &job->addr_phys[num_relocs];
77
78 return job;
79 }
80 EXPORT_SYMBOL(host1x_job_alloc);
81
host1x_job_get(struct host1x_job * job)82 struct host1x_job *host1x_job_get(struct host1x_job *job)
83 {
84 kref_get(&job->ref);
85 return job;
86 }
87 EXPORT_SYMBOL(host1x_job_get);
88
job_free(struct kref * ref)89 static void job_free(struct kref *ref)
90 {
91 struct host1x_job *job = container_of(ref, struct host1x_job, ref);
92
93 kfree(job);
94 }
95
host1x_job_put(struct host1x_job * job)96 void host1x_job_put(struct host1x_job *job)
97 {
98 kref_put(&job->ref, job_free);
99 }
100 EXPORT_SYMBOL(host1x_job_put);
101
host1x_job_add_gather(struct host1x_job * job,struct host1x_bo * bo,u32 words,u32 offset)102 void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo,
103 u32 words, u32 offset)
104 {
105 struct host1x_job_gather *cur_gather = &job->gathers[job->num_gathers];
106
107 cur_gather->words = words;
108 cur_gather->bo = bo;
109 cur_gather->offset = offset;
110 job->num_gathers++;
111 }
112 EXPORT_SYMBOL(host1x_job_add_gather);
113
114 /*
115 * NULL an already satisfied WAIT_SYNCPT host method, by patching its
116 * args in the command stream. The method data is changed to reference
117 * a reserved (never given out or incr) HOST1X_SYNCPT_RESERVED syncpt
118 * with a matching threshold value of 0, so is guaranteed to be popped
119 * by the host HW.
120 */
host1x_syncpt_patch_offset(struct host1x_syncpt * sp,struct host1x_bo * h,u32 offset)121 static void host1x_syncpt_patch_offset(struct host1x_syncpt *sp,
122 struct host1x_bo *h, u32 offset)
123 {
124 void *patch_addr = NULL;
125
126 /* patch the wait */
127 patch_addr = host1x_bo_kmap(h, offset >> PAGE_SHIFT);
128 if (patch_addr) {
129 host1x_syncpt_patch_wait(sp,
130 patch_addr + (offset & ~PAGE_MASK));
131 host1x_bo_kunmap(h, offset >> PAGE_SHIFT, patch_addr);
132 } else
133 pr_err("Could not map cmdbuf for wait check\n");
134 }
135
136 /*
137 * Check driver supplied waitchk structs for syncpt thresholds
138 * that have already been satisfied and NULL the comparison (to
139 * avoid a wrap condition in the HW).
140 */
do_waitchks(struct host1x_job * job,struct host1x * host,struct host1x_job_gather * g)141 static int do_waitchks(struct host1x_job *job, struct host1x *host,
142 struct host1x_job_gather *g)
143 {
144 struct host1x_bo *patch = g->bo;
145 int i;
146
147 /* compare syncpt vs wait threshold */
148 for (i = 0; i < job->num_waitchk; i++) {
149 struct host1x_waitchk *wait = &job->waitchk[i];
150 struct host1x_syncpt *sp =
151 host1x_syncpt_get(host, wait->syncpt_id);
152
153 /* validate syncpt id */
154 if (wait->syncpt_id > host1x_syncpt_nb_pts(host))
155 continue;
156
157 /* skip all other gathers */
158 if (patch != wait->bo)
159 continue;
160
161 trace_host1x_syncpt_wait_check(wait->bo, wait->offset,
162 wait->syncpt_id, wait->thresh,
163 host1x_syncpt_read_min(sp));
164
165 if (host1x_syncpt_is_expired(sp, wait->thresh)) {
166 dev_dbg(host->dev,
167 "drop WAIT id %u (%s) thresh 0x%x, min 0x%x\n",
168 wait->syncpt_id, sp->name, wait->thresh,
169 host1x_syncpt_read_min(sp));
170
171 host1x_syncpt_patch_offset(sp, patch,
172 g->offset + wait->offset);
173 }
174
175 wait->bo = NULL;
176 }
177
178 return 0;
179 }
180
pin_job(struct host1x * host,struct host1x_job * job)181 static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
182 {
183 unsigned int i;
184 int err;
185
186 job->num_unpins = 0;
187
188 for (i = 0; i < job->num_relocs; i++) {
189 struct host1x_reloc *reloc = &job->relocarray[i];
190 struct sg_table *sgt;
191 dma_addr_t phys_addr;
192
193 reloc->target.bo = host1x_bo_get(reloc->target.bo);
194 if (!reloc->target.bo) {
195 err = -EINVAL;
196 goto unpin;
197 }
198
199 phys_addr = host1x_bo_pin(reloc->target.bo, &sgt);
200
201 job->addr_phys[job->num_unpins] = phys_addr;
202 job->unpins[job->num_unpins].bo = reloc->target.bo;
203 job->unpins[job->num_unpins].sgt = sgt;
204 job->num_unpins++;
205 }
206
207 for (i = 0; i < job->num_gathers; i++) {
208 struct host1x_job_gather *g = &job->gathers[i];
209 size_t gather_size = 0;
210 struct scatterlist *sg;
211 struct sg_table *sgt;
212 dma_addr_t phys_addr;
213 unsigned long shift;
214 struct iova *alloc;
215 unsigned int j;
216
217 g->bo = host1x_bo_get(g->bo);
218 if (!g->bo) {
219 err = -EINVAL;
220 goto unpin;
221 }
222
223 phys_addr = host1x_bo_pin(g->bo, &sgt);
224
225 if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) && host->domain) {
226 for_each_sg(sgt->sgl, sg, sgt->nents, j)
227 gather_size += sg->length;
228 gather_size = iova_align(&host->iova, gather_size);
229
230 shift = iova_shift(&host->iova);
231 alloc = alloc_iova(&host->iova, gather_size >> shift,
232 host->iova_end >> shift, true);
233 if (!alloc) {
234 err = -ENOMEM;
235 goto unpin;
236 }
237
238 err = iommu_map_sg(host->domain,
239 iova_dma_addr(&host->iova, alloc),
240 sgt->sgl, sgt->nents, IOMMU_READ);
241 if (err == 0) {
242 __free_iova(&host->iova, alloc);
243 err = -EINVAL;
244 goto unpin;
245 }
246
247 job->addr_phys[job->num_unpins] =
248 iova_dma_addr(&host->iova, alloc);
249 job->unpins[job->num_unpins].size = gather_size;
250 } else {
251 job->addr_phys[job->num_unpins] = phys_addr;
252 }
253
254 job->gather_addr_phys[i] = job->addr_phys[job->num_unpins];
255
256 job->unpins[job->num_unpins].bo = g->bo;
257 job->unpins[job->num_unpins].sgt = sgt;
258 job->num_unpins++;
259 }
260
261 return 0;
262
263 unpin:
264 host1x_job_unpin(job);
265 return err;
266 }
267
do_relocs(struct host1x_job * job,struct host1x_job_gather * g)268 static int do_relocs(struct host1x_job *job, struct host1x_job_gather *g)
269 {
270 int i = 0;
271 u32 last_page = ~0;
272 void *cmdbuf_page_addr = NULL;
273 struct host1x_bo *cmdbuf = g->bo;
274
275 /* pin & patch the relocs for one gather */
276 for (i = 0; i < job->num_relocs; i++) {
277 struct host1x_reloc *reloc = &job->relocarray[i];
278 u32 reloc_addr = (job->reloc_addr_phys[i] +
279 reloc->target.offset) >> reloc->shift;
280 u32 *target;
281
282 /* skip all other gathers */
283 if (cmdbuf != reloc->cmdbuf.bo)
284 continue;
285
286 if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) {
287 target = (u32 *)job->gather_copy_mapped +
288 reloc->cmdbuf.offset / sizeof(u32) +
289 g->offset / sizeof(u32);
290 goto patch_reloc;
291 }
292
293 if (last_page != reloc->cmdbuf.offset >> PAGE_SHIFT) {
294 if (cmdbuf_page_addr)
295 host1x_bo_kunmap(cmdbuf, last_page,
296 cmdbuf_page_addr);
297
298 cmdbuf_page_addr = host1x_bo_kmap(cmdbuf,
299 reloc->cmdbuf.offset >> PAGE_SHIFT);
300 last_page = reloc->cmdbuf.offset >> PAGE_SHIFT;
301
302 if (unlikely(!cmdbuf_page_addr)) {
303 pr_err("Could not map cmdbuf for relocation\n");
304 return -ENOMEM;
305 }
306 }
307
308 target = cmdbuf_page_addr + (reloc->cmdbuf.offset & ~PAGE_MASK);
309 patch_reloc:
310 *target = reloc_addr;
311 }
312
313 if (cmdbuf_page_addr)
314 host1x_bo_kunmap(cmdbuf, last_page, cmdbuf_page_addr);
315
316 return 0;
317 }
318
check_reloc(struct host1x_reloc * reloc,struct host1x_bo * cmdbuf,unsigned int offset)319 static bool check_reloc(struct host1x_reloc *reloc, struct host1x_bo *cmdbuf,
320 unsigned int offset)
321 {
322 offset *= sizeof(u32);
323
324 if (reloc->cmdbuf.bo != cmdbuf || reloc->cmdbuf.offset != offset)
325 return false;
326
327 /* relocation shift value validation isn't implemented yet */
328 if (reloc->shift)
329 return false;
330
331 return true;
332 }
333
check_wait(struct host1x_waitchk * wait,struct host1x_bo * cmdbuf,unsigned int offset)334 static bool check_wait(struct host1x_waitchk *wait, struct host1x_bo *cmdbuf,
335 unsigned int offset)
336 {
337 offset *= sizeof(u32);
338
339 if (wait->bo != cmdbuf || wait->offset != offset)
340 return false;
341
342 return true;
343 }
344
345 struct host1x_firewall {
346 struct host1x_job *job;
347 struct device *dev;
348
349 unsigned int num_relocs;
350 struct host1x_reloc *reloc;
351
352 unsigned int num_waitchks;
353 struct host1x_waitchk *waitchk;
354
355 struct host1x_bo *cmdbuf;
356 unsigned int offset;
357
358 u32 words;
359 u32 class;
360 u32 reg;
361 u32 mask;
362 u32 count;
363 };
364
check_register(struct host1x_firewall * fw,unsigned long offset)365 static int check_register(struct host1x_firewall *fw, unsigned long offset)
366 {
367 if (!fw->job->is_addr_reg)
368 return 0;
369
370 if (fw->job->is_addr_reg(fw->dev, fw->class, offset)) {
371 if (!fw->num_relocs)
372 return -EINVAL;
373
374 if (!check_reloc(fw->reloc, fw->cmdbuf, fw->offset))
375 return -EINVAL;
376
377 fw->num_relocs--;
378 fw->reloc++;
379 }
380
381 if (offset == HOST1X_WAIT_SYNCPT_OFFSET) {
382 if (fw->class != HOST1X_CLASS_HOST1X)
383 return -EINVAL;
384
385 if (!fw->num_waitchks)
386 return -EINVAL;
387
388 if (!check_wait(fw->waitchk, fw->cmdbuf, fw->offset))
389 return -EINVAL;
390
391 fw->num_waitchks--;
392 fw->waitchk++;
393 }
394
395 return 0;
396 }
397
check_class(struct host1x_firewall * fw,u32 class)398 static int check_class(struct host1x_firewall *fw, u32 class)
399 {
400 if (!fw->job->is_valid_class) {
401 if (fw->class != class)
402 return -EINVAL;
403 } else {
404 if (!fw->job->is_valid_class(fw->class))
405 return -EINVAL;
406 }
407
408 return 0;
409 }
410
check_mask(struct host1x_firewall * fw)411 static int check_mask(struct host1x_firewall *fw)
412 {
413 u32 mask = fw->mask;
414 u32 reg = fw->reg;
415 int ret;
416
417 while (mask) {
418 if (fw->words == 0)
419 return -EINVAL;
420
421 if (mask & 1) {
422 ret = check_register(fw, reg);
423 if (ret < 0)
424 return ret;
425
426 fw->words--;
427 fw->offset++;
428 }
429 mask >>= 1;
430 reg++;
431 }
432
433 return 0;
434 }
435
check_incr(struct host1x_firewall * fw)436 static int check_incr(struct host1x_firewall *fw)
437 {
438 u32 count = fw->count;
439 u32 reg = fw->reg;
440 int ret;
441
442 while (count) {
443 if (fw->words == 0)
444 return -EINVAL;
445
446 ret = check_register(fw, reg);
447 if (ret < 0)
448 return ret;
449
450 reg++;
451 fw->words--;
452 fw->offset++;
453 count--;
454 }
455
456 return 0;
457 }
458
check_nonincr(struct host1x_firewall * fw)459 static int check_nonincr(struct host1x_firewall *fw)
460 {
461 u32 count = fw->count;
462 int ret;
463
464 while (count) {
465 if (fw->words == 0)
466 return -EINVAL;
467
468 ret = check_register(fw, fw->reg);
469 if (ret < 0)
470 return ret;
471
472 fw->words--;
473 fw->offset++;
474 count--;
475 }
476
477 return 0;
478 }
479
validate(struct host1x_firewall * fw,struct host1x_job_gather * g)480 static int validate(struct host1x_firewall *fw, struct host1x_job_gather *g)
481 {
482 u32 *cmdbuf_base = (u32 *)fw->job->gather_copy_mapped +
483 (g->offset / sizeof(u32));
484 u32 job_class = fw->class;
485 int err = 0;
486
487 fw->words = g->words;
488 fw->cmdbuf = g->bo;
489 fw->offset = 0;
490
491 while (fw->words && !err) {
492 u32 word = cmdbuf_base[fw->offset];
493 u32 opcode = (word & 0xf0000000) >> 28;
494
495 fw->mask = 0;
496 fw->reg = 0;
497 fw->count = 0;
498 fw->words--;
499 fw->offset++;
500
501 switch (opcode) {
502 case 0:
503 fw->class = word >> 6 & 0x3ff;
504 fw->mask = word & 0x3f;
505 fw->reg = word >> 16 & 0xfff;
506 err = check_class(fw, job_class);
507 if (!err)
508 err = check_mask(fw);
509 if (err)
510 goto out;
511 break;
512 case 1:
513 fw->reg = word >> 16 & 0xfff;
514 fw->count = word & 0xffff;
515 err = check_incr(fw);
516 if (err)
517 goto out;
518 break;
519
520 case 2:
521 fw->reg = word >> 16 & 0xfff;
522 fw->count = word & 0xffff;
523 err = check_nonincr(fw);
524 if (err)
525 goto out;
526 break;
527
528 case 3:
529 fw->mask = word & 0xffff;
530 fw->reg = word >> 16 & 0xfff;
531 err = check_mask(fw);
532 if (err)
533 goto out;
534 break;
535 case 4:
536 case 14:
537 break;
538 default:
539 err = -EINVAL;
540 break;
541 }
542 }
543
544 out:
545 return err;
546 }
547
copy_gathers(struct device * host,struct host1x_job * job,struct device * dev)548 static inline int copy_gathers(struct device *host, struct host1x_job *job,
549 struct device *dev)
550 {
551 struct host1x_firewall fw;
552 size_t size = 0;
553 size_t offset = 0;
554 int i;
555
556 fw.job = job;
557 fw.dev = dev;
558 fw.reloc = job->relocarray;
559 fw.num_relocs = job->num_relocs;
560 fw.waitchk = job->waitchk;
561 fw.num_waitchks = job->num_waitchk;
562 fw.class = job->class;
563
564 for (i = 0; i < job->num_gathers; i++) {
565 struct host1x_job_gather *g = &job->gathers[i];
566
567 size += g->words * sizeof(u32);
568 }
569
570 /*
571 * Try a non-blocking allocation from a higher priority pools first,
572 * as awaiting for the allocation here is a major performance hit.
573 */
574 job->gather_copy_mapped = dma_alloc_wc(host, size, &job->gather_copy,
575 GFP_NOWAIT);
576
577 /* the higher priority allocation failed, try the generic-blocking */
578 if (!job->gather_copy_mapped)
579 job->gather_copy_mapped = dma_alloc_wc(host, size,
580 &job->gather_copy,
581 GFP_KERNEL);
582 if (!job->gather_copy_mapped)
583 return -ENOMEM;
584
585 job->gather_copy_size = size;
586
587 for (i = 0; i < job->num_gathers; i++) {
588 struct host1x_job_gather *g = &job->gathers[i];
589 void *gather;
590
591 /* Copy the gather */
592 gather = host1x_bo_mmap(g->bo);
593 memcpy(job->gather_copy_mapped + offset, gather + g->offset,
594 g->words * sizeof(u32));
595 host1x_bo_munmap(g->bo, gather);
596
597 /* Store the location in the buffer */
598 g->base = job->gather_copy;
599 g->offset = offset;
600
601 /* Validate the job */
602 if (validate(&fw, g))
603 return -EINVAL;
604
605 offset += g->words * sizeof(u32);
606 }
607
608 /* No relocs and waitchks should remain at this point */
609 if (fw.num_relocs || fw.num_waitchks)
610 return -EINVAL;
611
612 return 0;
613 }
614
host1x_job_pin(struct host1x_job * job,struct device * dev)615 int host1x_job_pin(struct host1x_job *job, struct device *dev)
616 {
617 int err;
618 unsigned int i, j;
619 struct host1x *host = dev_get_drvdata(dev->parent);
620 DECLARE_BITMAP(waitchk_mask, host1x_syncpt_nb_pts(host));
621
622 bitmap_zero(waitchk_mask, host1x_syncpt_nb_pts(host));
623 for (i = 0; i < job->num_waitchk; i++) {
624 u32 syncpt_id = job->waitchk[i].syncpt_id;
625
626 if (syncpt_id < host1x_syncpt_nb_pts(host))
627 set_bit(syncpt_id, waitchk_mask);
628 }
629
630 /* get current syncpt values for waitchk */
631 for_each_set_bit(i, waitchk_mask, host1x_syncpt_nb_pts(host))
632 host1x_syncpt_load(host->syncpt + i);
633
634 /* pin memory */
635 err = pin_job(host, job);
636 if (err)
637 goto out;
638
639 if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) {
640 err = copy_gathers(host->dev, job, dev);
641 if (err)
642 goto out;
643 }
644
645 /* patch gathers */
646 for (i = 0; i < job->num_gathers; i++) {
647 struct host1x_job_gather *g = &job->gathers[i];
648
649 /* process each gather mem only once */
650 if (g->handled)
651 continue;
652
653 /* copy_gathers() sets gathers base if firewall is enabled */
654 if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
655 g->base = job->gather_addr_phys[i];
656
657 for (j = i + 1; j < job->num_gathers; j++) {
658 if (job->gathers[j].bo == g->bo) {
659 job->gathers[j].handled = true;
660 job->gathers[j].base = g->base;
661 }
662 }
663
664 err = do_relocs(job, g);
665 if (err)
666 break;
667
668 err = do_waitchks(job, host, g);
669 if (err)
670 break;
671 }
672
673 out:
674 if (err)
675 host1x_job_unpin(job);
676 wmb();
677
678 return err;
679 }
680 EXPORT_SYMBOL(host1x_job_pin);
681
host1x_job_unpin(struct host1x_job * job)682 void host1x_job_unpin(struct host1x_job *job)
683 {
684 struct host1x *host = dev_get_drvdata(job->channel->dev->parent);
685 unsigned int i;
686
687 for (i = 0; i < job->num_unpins; i++) {
688 struct host1x_job_unpin_data *unpin = &job->unpins[i];
689
690 if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) &&
691 unpin->size && host->domain) {
692 iommu_unmap(host->domain, job->addr_phys[i],
693 unpin->size);
694 free_iova(&host->iova,
695 iova_pfn(&host->iova, job->addr_phys[i]));
696 }
697
698 host1x_bo_unpin(unpin->bo, unpin->sgt);
699 host1x_bo_put(unpin->bo);
700 }
701
702 job->num_unpins = 0;
703
704 if (job->gather_copy_size)
705 dma_free_wc(host->dev, job->gather_copy_size,
706 job->gather_copy_mapped, job->gather_copy);
707 }
708 EXPORT_SYMBOL(host1x_job_unpin);
709
710 /*
711 * Debug routine used to dump job entries
712 */
host1x_job_dump(struct device * dev,struct host1x_job * job)713 void host1x_job_dump(struct device *dev, struct host1x_job *job)
714 {
715 dev_dbg(dev, " SYNCPT_ID %d\n", job->syncpt_id);
716 dev_dbg(dev, " SYNCPT_VAL %d\n", job->syncpt_end);
717 dev_dbg(dev, " FIRST_GET 0x%x\n", job->first_get);
718 dev_dbg(dev, " TIMEOUT %d\n", job->timeout);
719 dev_dbg(dev, " NUM_SLOTS %d\n", job->num_slots);
720 dev_dbg(dev, " NUM_HANDLES %d\n", job->num_unpins);
721 }
722