1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Tegra host1x driver
4 *
5 * Copyright (c) 2010-2013, NVIDIA Corporation.
6 */
7
8 #include <linux/clk.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/io.h>
11 #include <linux/list.h>
12 #include <linux/module.h>
13 #include <linux/of_device.h>
14 #include <linux/of.h>
15 #include <linux/slab.h>
16
17 #define CREATE_TRACE_POINTS
18 #include <trace/events/host1x.h>
19 #undef CREATE_TRACE_POINTS
20
21 #if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
22 #include <asm/dma-iommu.h>
23 #endif
24
25 #include "bus.h"
26 #include "channel.h"
27 #include "debug.h"
28 #include "dev.h"
29 #include "intr.h"
30
31 #include "hw/host1x01.h"
32 #include "hw/host1x02.h"
33 #include "hw/host1x04.h"
34 #include "hw/host1x05.h"
35 #include "hw/host1x06.h"
36 #include "hw/host1x07.h"
37
host1x_hypervisor_writel(struct host1x * host1x,u32 v,u32 r)38 void host1x_hypervisor_writel(struct host1x *host1x, u32 v, u32 r)
39 {
40 writel(v, host1x->hv_regs + r);
41 }
42
host1x_hypervisor_readl(struct host1x * host1x,u32 r)43 u32 host1x_hypervisor_readl(struct host1x *host1x, u32 r)
44 {
45 return readl(host1x->hv_regs + r);
46 }
47
host1x_sync_writel(struct host1x * host1x,u32 v,u32 r)48 void host1x_sync_writel(struct host1x *host1x, u32 v, u32 r)
49 {
50 void __iomem *sync_regs = host1x->regs + host1x->info->sync_offset;
51
52 writel(v, sync_regs + r);
53 }
54
host1x_sync_readl(struct host1x * host1x,u32 r)55 u32 host1x_sync_readl(struct host1x *host1x, u32 r)
56 {
57 void __iomem *sync_regs = host1x->regs + host1x->info->sync_offset;
58
59 return readl(sync_regs + r);
60 }
61
host1x_ch_writel(struct host1x_channel * ch,u32 v,u32 r)62 void host1x_ch_writel(struct host1x_channel *ch, u32 v, u32 r)
63 {
64 writel(v, ch->regs + r);
65 }
66
host1x_ch_readl(struct host1x_channel * ch,u32 r)67 u32 host1x_ch_readl(struct host1x_channel *ch, u32 r)
68 {
69 return readl(ch->regs + r);
70 }
71
72 static const struct host1x_info host1x01_info = {
73 .nb_channels = 8,
74 .nb_pts = 32,
75 .nb_mlocks = 16,
76 .nb_bases = 8,
77 .init = host1x01_init,
78 .sync_offset = 0x3000,
79 .dma_mask = DMA_BIT_MASK(32),
80 .has_wide_gather = false,
81 .has_hypervisor = false,
82 .num_sid_entries = 0,
83 .sid_table = NULL,
84 };
85
86 static const struct host1x_info host1x02_info = {
87 .nb_channels = 9,
88 .nb_pts = 32,
89 .nb_mlocks = 16,
90 .nb_bases = 12,
91 .init = host1x02_init,
92 .sync_offset = 0x3000,
93 .dma_mask = DMA_BIT_MASK(32),
94 .has_wide_gather = false,
95 .has_hypervisor = false,
96 .num_sid_entries = 0,
97 .sid_table = NULL,
98 };
99
100 static const struct host1x_info host1x04_info = {
101 .nb_channels = 12,
102 .nb_pts = 192,
103 .nb_mlocks = 16,
104 .nb_bases = 64,
105 .init = host1x04_init,
106 .sync_offset = 0x2100,
107 .dma_mask = DMA_BIT_MASK(34),
108 .has_wide_gather = false,
109 .has_hypervisor = false,
110 .num_sid_entries = 0,
111 .sid_table = NULL,
112 };
113
114 static const struct host1x_info host1x05_info = {
115 .nb_channels = 14,
116 .nb_pts = 192,
117 .nb_mlocks = 16,
118 .nb_bases = 64,
119 .init = host1x05_init,
120 .sync_offset = 0x2100,
121 .dma_mask = DMA_BIT_MASK(34),
122 .has_wide_gather = false,
123 .has_hypervisor = false,
124 .num_sid_entries = 0,
125 .sid_table = NULL,
126 };
127
128 static const struct host1x_sid_entry tegra186_sid_table[] = {
129 {
130 /* VIC */
131 .base = 0x1af0,
132 .offset = 0x30,
133 .limit = 0x34
134 },
135 };
136
137 static const struct host1x_info host1x06_info = {
138 .nb_channels = 63,
139 .nb_pts = 576,
140 .nb_mlocks = 24,
141 .nb_bases = 16,
142 .init = host1x06_init,
143 .sync_offset = 0x0,
144 .dma_mask = DMA_BIT_MASK(40),
145 .has_wide_gather = true,
146 .has_hypervisor = true,
147 .num_sid_entries = ARRAY_SIZE(tegra186_sid_table),
148 .sid_table = tegra186_sid_table,
149 };
150
151 static const struct host1x_sid_entry tegra194_sid_table[] = {
152 {
153 /* VIC */
154 .base = 0x1af0,
155 .offset = 0x30,
156 .limit = 0x34
157 },
158 };
159
160 static const struct host1x_info host1x07_info = {
161 .nb_channels = 63,
162 .nb_pts = 704,
163 .nb_mlocks = 32,
164 .nb_bases = 0,
165 .init = host1x07_init,
166 .sync_offset = 0x0,
167 .dma_mask = DMA_BIT_MASK(40),
168 .has_wide_gather = true,
169 .has_hypervisor = true,
170 .num_sid_entries = ARRAY_SIZE(tegra194_sid_table),
171 .sid_table = tegra194_sid_table,
172 };
173
174 static const struct of_device_id host1x_of_match[] = {
175 { .compatible = "nvidia,tegra194-host1x", .data = &host1x07_info, },
176 { .compatible = "nvidia,tegra186-host1x", .data = &host1x06_info, },
177 { .compatible = "nvidia,tegra210-host1x", .data = &host1x05_info, },
178 { .compatible = "nvidia,tegra124-host1x", .data = &host1x04_info, },
179 { .compatible = "nvidia,tegra114-host1x", .data = &host1x02_info, },
180 { .compatible = "nvidia,tegra30-host1x", .data = &host1x01_info, },
181 { .compatible = "nvidia,tegra20-host1x", .data = &host1x01_info, },
182 { },
183 };
184 MODULE_DEVICE_TABLE(of, host1x_of_match);
185
host1x_setup_sid_table(struct host1x * host)186 static void host1x_setup_sid_table(struct host1x *host)
187 {
188 const struct host1x_info *info = host->info;
189 unsigned int i;
190
191 for (i = 0; i < info->num_sid_entries; i++) {
192 const struct host1x_sid_entry *entry = &info->sid_table[i];
193
194 host1x_hypervisor_writel(host, entry->offset, entry->base);
195 host1x_hypervisor_writel(host, entry->limit, entry->base + 4);
196 }
197 }
198
host1x_wants_iommu(struct host1x * host1x)199 static bool host1x_wants_iommu(struct host1x *host1x)
200 {
201 /*
202 * If we support addressing a maximum of 32 bits of physical memory
203 * and if the host1x firewall is enabled, there's no need to enable
204 * IOMMU support. This can happen for example on Tegra20, Tegra30
205 * and Tegra114.
206 *
207 * Tegra124 and later can address up to 34 bits of physical memory and
208 * many platforms come equipped with more than 2 GiB of system memory,
209 * which requires crossing the 4 GiB boundary. But there's a catch: on
210 * SoCs before Tegra186 (i.e. Tegra124 and Tegra210), the host1x can
211 * only address up to 32 bits of memory in GATHER opcodes, which means
212 * that command buffers need to either be in the first 2 GiB of system
213 * memory (which could quickly lead to memory exhaustion), or command
214 * buffers need to be treated differently from other buffers (which is
215 * not possible with the current ABI).
216 *
217 * A third option is to use the IOMMU in these cases to make sure all
218 * buffers will be mapped into a 32-bit IOVA space that host1x can
219 * address. This allows all of the system memory to be used and works
220 * within the limitations of the host1x on these SoCs.
221 *
222 * In summary, default to enable IOMMU on Tegra124 and later. For any
223 * of the earlier SoCs, only use the IOMMU for additional safety when
224 * the host1x firewall is disabled.
225 */
226 if (host1x->info->dma_mask <= DMA_BIT_MASK(32)) {
227 if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
228 return false;
229 }
230
231 return true;
232 }
233
host1x_iommu_attach(struct host1x * host)234 static struct iommu_domain *host1x_iommu_attach(struct host1x *host)
235 {
236 struct iommu_domain *domain = iommu_get_domain_for_dev(host->dev);
237 int err;
238
239 #if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
240 if (host->dev->archdata.mapping) {
241 struct dma_iommu_mapping *mapping =
242 to_dma_iommu_mapping(host->dev);
243 arm_iommu_detach_device(host->dev);
244 arm_iommu_release_mapping(mapping);
245
246 domain = iommu_get_domain_for_dev(host->dev);
247 }
248 #endif
249
250 /*
251 * We may not always want to enable IOMMU support (for example if the
252 * host1x firewall is already enabled and we don't support addressing
253 * more than 32 bits of physical memory), so check for that first.
254 *
255 * Similarly, if host1x is already attached to an IOMMU (via the DMA
256 * API), don't try to attach again.
257 */
258 if (!host1x_wants_iommu(host) || domain)
259 return domain;
260
261 host->group = iommu_group_get(host->dev);
262 if (host->group) {
263 struct iommu_domain_geometry *geometry;
264 dma_addr_t start, end;
265 unsigned long order;
266
267 err = iova_cache_get();
268 if (err < 0)
269 goto put_group;
270
271 host->domain = iommu_domain_alloc(&platform_bus_type);
272 if (!host->domain) {
273 err = -ENOMEM;
274 goto put_cache;
275 }
276
277 err = iommu_attach_group(host->domain, host->group);
278 if (err) {
279 if (err == -ENODEV)
280 err = 0;
281
282 goto free_domain;
283 }
284
285 geometry = &host->domain->geometry;
286 start = geometry->aperture_start & host->info->dma_mask;
287 end = geometry->aperture_end & host->info->dma_mask;
288
289 order = __ffs(host->domain->pgsize_bitmap);
290 init_iova_domain(&host->iova, 1UL << order, start >> order);
291 host->iova_end = end;
292
293 domain = host->domain;
294 }
295
296 return domain;
297
298 free_domain:
299 iommu_domain_free(host->domain);
300 host->domain = NULL;
301 put_cache:
302 iova_cache_put();
303 put_group:
304 iommu_group_put(host->group);
305 host->group = NULL;
306
307 return ERR_PTR(err);
308 }
309
host1x_iommu_init(struct host1x * host)310 static int host1x_iommu_init(struct host1x *host)
311 {
312 u64 mask = host->info->dma_mask;
313 struct iommu_domain *domain;
314 int err;
315
316 domain = host1x_iommu_attach(host);
317 if (IS_ERR(domain)) {
318 err = PTR_ERR(domain);
319 dev_err(host->dev, "failed to attach to IOMMU: %d\n", err);
320 return err;
321 }
322
323 /*
324 * If we're not behind an IOMMU make sure we don't get push buffers
325 * that are allocated outside of the range addressable by the GATHER
326 * opcode.
327 *
328 * Newer generations of Tegra (Tegra186 and later) support a wide
329 * variant of the GATHER opcode that allows addressing more bits.
330 */
331 if (!domain && !host->info->has_wide_gather)
332 mask = DMA_BIT_MASK(32);
333
334 err = dma_coerce_mask_and_coherent(host->dev, mask);
335 if (err < 0) {
336 dev_err(host->dev, "failed to set DMA mask: %d\n", err);
337 return err;
338 }
339
340 return 0;
341 }
342
host1x_iommu_exit(struct host1x * host)343 static void host1x_iommu_exit(struct host1x *host)
344 {
345 if (host->domain) {
346 put_iova_domain(&host->iova);
347 iommu_detach_group(host->domain, host->group);
348
349 iommu_domain_free(host->domain);
350 host->domain = NULL;
351
352 iova_cache_put();
353
354 iommu_group_put(host->group);
355 host->group = NULL;
356 }
357 }
358
host1x_probe(struct platform_device * pdev)359 static int host1x_probe(struct platform_device *pdev)
360 {
361 struct host1x *host;
362 struct resource *regs, *hv_regs = NULL;
363 int syncpt_irq;
364 int err;
365
366 host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
367 if (!host)
368 return -ENOMEM;
369
370 host->info = of_device_get_match_data(&pdev->dev);
371
372 if (host->info->has_hypervisor) {
373 regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vm");
374 if (!regs) {
375 dev_err(&pdev->dev, "failed to get vm registers\n");
376 return -ENXIO;
377 }
378
379 hv_regs = platform_get_resource_byname(pdev, IORESOURCE_MEM,
380 "hypervisor");
381 if (!hv_regs) {
382 dev_err(&pdev->dev,
383 "failed to get hypervisor registers\n");
384 return -ENXIO;
385 }
386 } else {
387 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
388 if (!regs) {
389 dev_err(&pdev->dev, "failed to get registers\n");
390 return -ENXIO;
391 }
392 }
393
394 syncpt_irq = platform_get_irq(pdev, 0);
395 if (syncpt_irq < 0)
396 return syncpt_irq;
397
398 mutex_init(&host->devices_lock);
399 INIT_LIST_HEAD(&host->devices);
400 INIT_LIST_HEAD(&host->list);
401 host->dev = &pdev->dev;
402
403 /* set common host1x device data */
404 platform_set_drvdata(pdev, host);
405
406 host->regs = devm_ioremap_resource(&pdev->dev, regs);
407 if (IS_ERR(host->regs))
408 return PTR_ERR(host->regs);
409
410 if (host->info->has_hypervisor) {
411 host->hv_regs = devm_ioremap_resource(&pdev->dev, hv_regs);
412 if (IS_ERR(host->hv_regs))
413 return PTR_ERR(host->hv_regs);
414 }
415
416 host->dev->dma_parms = &host->dma_parms;
417 dma_set_max_seg_size(host->dev, UINT_MAX);
418
419 if (host->info->init) {
420 err = host->info->init(host);
421 if (err)
422 return err;
423 }
424
425 host->clk = devm_clk_get(&pdev->dev, NULL);
426 if (IS_ERR(host->clk)) {
427 err = PTR_ERR(host->clk);
428
429 if (err != -EPROBE_DEFER)
430 dev_err(&pdev->dev, "failed to get clock: %d\n", err);
431
432 return err;
433 }
434
435 host->rst = devm_reset_control_get(&pdev->dev, "host1x");
436 if (IS_ERR(host->rst)) {
437 err = PTR_ERR(host->rst);
438 dev_err(&pdev->dev, "failed to get reset: %d\n", err);
439 return err;
440 }
441
442 err = host1x_iommu_init(host);
443 if (err < 0) {
444 dev_err(&pdev->dev, "failed to setup IOMMU: %d\n", err);
445 return err;
446 }
447
448 err = host1x_channel_list_init(&host->channel_list,
449 host->info->nb_channels);
450 if (err) {
451 dev_err(&pdev->dev, "failed to initialize channel list\n");
452 goto iommu_exit;
453 }
454
455 err = clk_prepare_enable(host->clk);
456 if (err < 0) {
457 dev_err(&pdev->dev, "failed to enable clock\n");
458 goto free_channels;
459 }
460
461 err = reset_control_deassert(host->rst);
462 if (err < 0) {
463 dev_err(&pdev->dev, "failed to deassert reset: %d\n", err);
464 goto unprepare_disable;
465 }
466
467 err = host1x_syncpt_init(host);
468 if (err) {
469 dev_err(&pdev->dev, "failed to initialize syncpts\n");
470 goto reset_assert;
471 }
472
473 err = host1x_intr_init(host, syncpt_irq);
474 if (err) {
475 dev_err(&pdev->dev, "failed to initialize interrupts\n");
476 goto deinit_syncpt;
477 }
478
479 host1x_debug_init(host);
480
481 if (host->info->has_hypervisor)
482 host1x_setup_sid_table(host);
483
484 err = host1x_register(host);
485 if (err < 0)
486 goto deinit_debugfs;
487
488 err = devm_of_platform_populate(&pdev->dev);
489 if (err < 0)
490 goto unregister;
491
492 return 0;
493
494 unregister:
495 host1x_unregister(host);
496 deinit_debugfs:
497 host1x_debug_deinit(host);
498 host1x_intr_deinit(host);
499 deinit_syncpt:
500 host1x_syncpt_deinit(host);
501 reset_assert:
502 reset_control_assert(host->rst);
503 unprepare_disable:
504 clk_disable_unprepare(host->clk);
505 free_channels:
506 host1x_channel_list_free(&host->channel_list);
507 iommu_exit:
508 host1x_iommu_exit(host);
509
510 return err;
511 }
512
host1x_remove(struct platform_device * pdev)513 static int host1x_remove(struct platform_device *pdev)
514 {
515 struct host1x *host = platform_get_drvdata(pdev);
516
517 host1x_unregister(host);
518 host1x_debug_deinit(host);
519 host1x_intr_deinit(host);
520 host1x_syncpt_deinit(host);
521 reset_control_assert(host->rst);
522 clk_disable_unprepare(host->clk);
523 host1x_iommu_exit(host);
524
525 return 0;
526 }
527
528 static struct platform_driver tegra_host1x_driver = {
529 .driver = {
530 .name = "tegra-host1x",
531 .of_match_table = host1x_of_match,
532 },
533 .probe = host1x_probe,
534 .remove = host1x_remove,
535 };
536
537 static struct platform_driver * const drivers[] = {
538 &tegra_host1x_driver,
539 &tegra_mipi_driver,
540 };
541
tegra_host1x_init(void)542 static int __init tegra_host1x_init(void)
543 {
544 int err;
545
546 err = bus_register(&host1x_bus_type);
547 if (err < 0)
548 return err;
549
550 err = platform_register_drivers(drivers, ARRAY_SIZE(drivers));
551 if (err < 0)
552 bus_unregister(&host1x_bus_type);
553
554 return err;
555 }
556 module_init(tegra_host1x_init);
557
tegra_host1x_exit(void)558 static void __exit tegra_host1x_exit(void)
559 {
560 platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
561 bus_unregister(&host1x_bus_type);
562 }
563 module_exit(tegra_host1x_exit);
564
565 /**
566 * host1x_get_dma_mask() - query the supported DMA mask for host1x
567 * @host1x: host1x instance
568 *
569 * Note that this returns the supported DMA mask for host1x, which can be
570 * different from the applicable DMA mask under certain circumstances.
571 */
host1x_get_dma_mask(struct host1x * host1x)572 u64 host1x_get_dma_mask(struct host1x *host1x)
573 {
574 return host1x->info->dma_mask;
575 }
576 EXPORT_SYMBOL(host1x_get_dma_mask);
577
578 MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
579 MODULE_AUTHOR("Terje Bergstrom <tbergstrom@nvidia.com>");
580 MODULE_DESCRIPTION("Host1x driver for Tegra products");
581 MODULE_LICENSE("GPL");
582