1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2015-2018 Etnaviv Project
4 */
5
6 #include <linux/component.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/module.h>
9 #include <linux/of_platform.h>
10 #include <linux/uaccess.h>
11
12 #include <drm/drm_debugfs.h>
13 #include <drm/drm_drv.h>
14 #include <drm/drm_file.h>
15 #include <drm/drm_ioctl.h>
16 #include <drm/drm_of.h>
17 #include <drm/drm_prime.h>
18
19 #include "etnaviv_cmdbuf.h"
20 #include "etnaviv_drv.h"
21 #include "etnaviv_gpu.h"
22 #include "etnaviv_gem.h"
23 #include "etnaviv_mmu.h"
24 #include "etnaviv_perfmon.h"
25
26 /*
27 * DRM operations:
28 */
29
30
load_gpu(struct drm_device * dev)31 static void load_gpu(struct drm_device *dev)
32 {
33 struct etnaviv_drm_private *priv = dev->dev_private;
34 unsigned int i;
35
36 for (i = 0; i < ETNA_MAX_PIPES; i++) {
37 struct etnaviv_gpu *g = priv->gpu[i];
38
39 if (g) {
40 int ret;
41
42 ret = etnaviv_gpu_init(g);
43 if (ret)
44 priv->gpu[i] = NULL;
45 }
46 }
47 }
48
etnaviv_open(struct drm_device * dev,struct drm_file * file)49 static int etnaviv_open(struct drm_device *dev, struct drm_file *file)
50 {
51 struct etnaviv_drm_private *priv = dev->dev_private;
52 struct etnaviv_file_private *ctx;
53 int ret, i;
54
55 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
56 if (!ctx)
57 return -ENOMEM;
58
59 ctx->mmu = etnaviv_iommu_context_init(priv->mmu_global,
60 priv->cmdbuf_suballoc);
61 if (!ctx->mmu) {
62 ret = -ENOMEM;
63 goto out_free;
64 }
65
66 for (i = 0; i < ETNA_MAX_PIPES; i++) {
67 struct etnaviv_gpu *gpu = priv->gpu[i];
68 struct drm_gpu_scheduler *sched;
69
70 if (gpu) {
71 sched = &gpu->sched;
72 drm_sched_entity_init(&ctx->sched_entity[i],
73 DRM_SCHED_PRIORITY_NORMAL, &sched,
74 1, NULL);
75 }
76 }
77
78 file->driver_priv = ctx;
79
80 return 0;
81
82 out_free:
83 kfree(ctx);
84 return ret;
85 }
86
etnaviv_postclose(struct drm_device * dev,struct drm_file * file)87 static void etnaviv_postclose(struct drm_device *dev, struct drm_file *file)
88 {
89 struct etnaviv_drm_private *priv = dev->dev_private;
90 struct etnaviv_file_private *ctx = file->driver_priv;
91 unsigned int i;
92
93 for (i = 0; i < ETNA_MAX_PIPES; i++) {
94 struct etnaviv_gpu *gpu = priv->gpu[i];
95
96 if (gpu)
97 drm_sched_entity_destroy(&ctx->sched_entity[i]);
98 }
99
100 etnaviv_iommu_context_put(ctx->mmu);
101
102 kfree(ctx);
103 }
104
105 /*
106 * DRM debugfs:
107 */
108
109 #ifdef CONFIG_DEBUG_FS
etnaviv_gem_show(struct drm_device * dev,struct seq_file * m)110 static int etnaviv_gem_show(struct drm_device *dev, struct seq_file *m)
111 {
112 struct etnaviv_drm_private *priv = dev->dev_private;
113
114 etnaviv_gem_describe_objects(priv, m);
115
116 return 0;
117 }
118
etnaviv_mm_show(struct drm_device * dev,struct seq_file * m)119 static int etnaviv_mm_show(struct drm_device *dev, struct seq_file *m)
120 {
121 struct drm_printer p = drm_seq_file_printer(m);
122
123 read_lock(&dev->vma_offset_manager->vm_lock);
124 drm_mm_print(&dev->vma_offset_manager->vm_addr_space_mm, &p);
125 read_unlock(&dev->vma_offset_manager->vm_lock);
126
127 return 0;
128 }
129
etnaviv_mmu_show(struct etnaviv_gpu * gpu,struct seq_file * m)130 static int etnaviv_mmu_show(struct etnaviv_gpu *gpu, struct seq_file *m)
131 {
132 struct drm_printer p = drm_seq_file_printer(m);
133 struct etnaviv_iommu_context *mmu_context;
134
135 seq_printf(m, "Active Objects (%s):\n", dev_name(gpu->dev));
136
137 /*
138 * Lock the GPU to avoid a MMU context switch just now and elevate
139 * the refcount of the current context to avoid it disappearing from
140 * under our feet.
141 */
142 mutex_lock(&gpu->lock);
143 mmu_context = gpu->mmu_context;
144 if (mmu_context)
145 etnaviv_iommu_context_get(mmu_context);
146 mutex_unlock(&gpu->lock);
147
148 if (!mmu_context)
149 return 0;
150
151 mutex_lock(&mmu_context->lock);
152 drm_mm_print(&mmu_context->mm, &p);
153 mutex_unlock(&mmu_context->lock);
154
155 etnaviv_iommu_context_put(mmu_context);
156
157 return 0;
158 }
159
etnaviv_buffer_dump(struct etnaviv_gpu * gpu,struct seq_file * m)160 static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu, struct seq_file *m)
161 {
162 struct etnaviv_cmdbuf *buf = &gpu->buffer;
163 u32 size = buf->size;
164 u32 *ptr = buf->vaddr;
165 u32 i;
166
167 seq_printf(m, "virt %p - phys 0x%llx - free 0x%08x\n",
168 buf->vaddr, (u64)etnaviv_cmdbuf_get_pa(buf),
169 size - buf->user_size);
170
171 for (i = 0; i < size / 4; i++) {
172 if (i && !(i % 4))
173 seq_puts(m, "\n");
174 if (i % 4 == 0)
175 seq_printf(m, "\t0x%p: ", ptr + i);
176 seq_printf(m, "%08x ", *(ptr + i));
177 }
178 seq_puts(m, "\n");
179 }
180
etnaviv_ring_show(struct etnaviv_gpu * gpu,struct seq_file * m)181 static int etnaviv_ring_show(struct etnaviv_gpu *gpu, struct seq_file *m)
182 {
183 seq_printf(m, "Ring Buffer (%s): ", dev_name(gpu->dev));
184
185 mutex_lock(&gpu->lock);
186 etnaviv_buffer_dump(gpu, m);
187 mutex_unlock(&gpu->lock);
188
189 return 0;
190 }
191
show_unlocked(struct seq_file * m,void * arg)192 static int show_unlocked(struct seq_file *m, void *arg)
193 {
194 struct drm_info_node *node = (struct drm_info_node *) m->private;
195 struct drm_device *dev = node->minor->dev;
196 int (*show)(struct drm_device *dev, struct seq_file *m) =
197 node->info_ent->data;
198
199 return show(dev, m);
200 }
201
show_each_gpu(struct seq_file * m,void * arg)202 static int show_each_gpu(struct seq_file *m, void *arg)
203 {
204 struct drm_info_node *node = (struct drm_info_node *) m->private;
205 struct drm_device *dev = node->minor->dev;
206 struct etnaviv_drm_private *priv = dev->dev_private;
207 struct etnaviv_gpu *gpu;
208 int (*show)(struct etnaviv_gpu *gpu, struct seq_file *m) =
209 node->info_ent->data;
210 unsigned int i;
211 int ret = 0;
212
213 for (i = 0; i < ETNA_MAX_PIPES; i++) {
214 gpu = priv->gpu[i];
215 if (!gpu)
216 continue;
217
218 ret = show(gpu, m);
219 if (ret < 0)
220 break;
221 }
222
223 return ret;
224 }
225
226 static struct drm_info_list etnaviv_debugfs_list[] = {
227 {"gpu", show_each_gpu, 0, etnaviv_gpu_debugfs},
228 {"gem", show_unlocked, 0, etnaviv_gem_show},
229 { "mm", show_unlocked, 0, etnaviv_mm_show },
230 {"mmu", show_each_gpu, 0, etnaviv_mmu_show},
231 {"ring", show_each_gpu, 0, etnaviv_ring_show},
232 };
233
etnaviv_debugfs_init(struct drm_minor * minor)234 static void etnaviv_debugfs_init(struct drm_minor *minor)
235 {
236 drm_debugfs_create_files(etnaviv_debugfs_list,
237 ARRAY_SIZE(etnaviv_debugfs_list),
238 minor->debugfs_root, minor);
239 }
240 #endif
241
242 /*
243 * DRM ioctls:
244 */
245
etnaviv_ioctl_get_param(struct drm_device * dev,void * data,struct drm_file * file)246 static int etnaviv_ioctl_get_param(struct drm_device *dev, void *data,
247 struct drm_file *file)
248 {
249 struct etnaviv_drm_private *priv = dev->dev_private;
250 struct drm_etnaviv_param *args = data;
251 struct etnaviv_gpu *gpu;
252
253 if (args->pipe >= ETNA_MAX_PIPES)
254 return -EINVAL;
255
256 gpu = priv->gpu[args->pipe];
257 if (!gpu)
258 return -ENXIO;
259
260 return etnaviv_gpu_get_param(gpu, args->param, &args->value);
261 }
262
etnaviv_ioctl_gem_new(struct drm_device * dev,void * data,struct drm_file * file)263 static int etnaviv_ioctl_gem_new(struct drm_device *dev, void *data,
264 struct drm_file *file)
265 {
266 struct drm_etnaviv_gem_new *args = data;
267
268 if (args->flags & ~(ETNA_BO_CACHED | ETNA_BO_WC | ETNA_BO_UNCACHED |
269 ETNA_BO_FORCE_MMU))
270 return -EINVAL;
271
272 return etnaviv_gem_new_handle(dev, file, args->size,
273 args->flags, &args->handle);
274 }
275
etnaviv_ioctl_gem_cpu_prep(struct drm_device * dev,void * data,struct drm_file * file)276 static int etnaviv_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
277 struct drm_file *file)
278 {
279 struct drm_etnaviv_gem_cpu_prep *args = data;
280 struct drm_gem_object *obj;
281 int ret;
282
283 if (args->op & ~(ETNA_PREP_READ | ETNA_PREP_WRITE | ETNA_PREP_NOSYNC))
284 return -EINVAL;
285
286 obj = drm_gem_object_lookup(file, args->handle);
287 if (!obj)
288 return -ENOENT;
289
290 ret = etnaviv_gem_cpu_prep(obj, args->op, &args->timeout);
291
292 drm_gem_object_put(obj);
293
294 return ret;
295 }
296
etnaviv_ioctl_gem_cpu_fini(struct drm_device * dev,void * data,struct drm_file * file)297 static int etnaviv_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
298 struct drm_file *file)
299 {
300 struct drm_etnaviv_gem_cpu_fini *args = data;
301 struct drm_gem_object *obj;
302 int ret;
303
304 if (args->flags)
305 return -EINVAL;
306
307 obj = drm_gem_object_lookup(file, args->handle);
308 if (!obj)
309 return -ENOENT;
310
311 ret = etnaviv_gem_cpu_fini(obj);
312
313 drm_gem_object_put(obj);
314
315 return ret;
316 }
317
etnaviv_ioctl_gem_info(struct drm_device * dev,void * data,struct drm_file * file)318 static int etnaviv_ioctl_gem_info(struct drm_device *dev, void *data,
319 struct drm_file *file)
320 {
321 struct drm_etnaviv_gem_info *args = data;
322 struct drm_gem_object *obj;
323 int ret;
324
325 if (args->pad)
326 return -EINVAL;
327
328 obj = drm_gem_object_lookup(file, args->handle);
329 if (!obj)
330 return -ENOENT;
331
332 ret = etnaviv_gem_mmap_offset(obj, &args->offset);
333 drm_gem_object_put(obj);
334
335 return ret;
336 }
337
etnaviv_ioctl_wait_fence(struct drm_device * dev,void * data,struct drm_file * file)338 static int etnaviv_ioctl_wait_fence(struct drm_device *dev, void *data,
339 struct drm_file *file)
340 {
341 struct drm_etnaviv_wait_fence *args = data;
342 struct etnaviv_drm_private *priv = dev->dev_private;
343 struct drm_etnaviv_timespec *timeout = &args->timeout;
344 struct etnaviv_gpu *gpu;
345
346 if (args->flags & ~(ETNA_WAIT_NONBLOCK))
347 return -EINVAL;
348
349 if (args->pipe >= ETNA_MAX_PIPES)
350 return -EINVAL;
351
352 gpu = priv->gpu[args->pipe];
353 if (!gpu)
354 return -ENXIO;
355
356 if (args->flags & ETNA_WAIT_NONBLOCK)
357 timeout = NULL;
358
359 return etnaviv_gpu_wait_fence_interruptible(gpu, args->fence,
360 timeout);
361 }
362
etnaviv_ioctl_gem_userptr(struct drm_device * dev,void * data,struct drm_file * file)363 static int etnaviv_ioctl_gem_userptr(struct drm_device *dev, void *data,
364 struct drm_file *file)
365 {
366 struct drm_etnaviv_gem_userptr *args = data;
367
368 if (args->flags & ~(ETNA_USERPTR_READ|ETNA_USERPTR_WRITE) ||
369 args->flags == 0)
370 return -EINVAL;
371
372 if (offset_in_page(args->user_ptr | args->user_size) ||
373 (uintptr_t)args->user_ptr != args->user_ptr ||
374 (u32)args->user_size != args->user_size ||
375 args->user_ptr & ~PAGE_MASK)
376 return -EINVAL;
377
378 if (!access_ok((void __user *)(unsigned long)args->user_ptr,
379 args->user_size))
380 return -EFAULT;
381
382 return etnaviv_gem_new_userptr(dev, file, args->user_ptr,
383 args->user_size, args->flags,
384 &args->handle);
385 }
386
etnaviv_ioctl_gem_wait(struct drm_device * dev,void * data,struct drm_file * file)387 static int etnaviv_ioctl_gem_wait(struct drm_device *dev, void *data,
388 struct drm_file *file)
389 {
390 struct etnaviv_drm_private *priv = dev->dev_private;
391 struct drm_etnaviv_gem_wait *args = data;
392 struct drm_etnaviv_timespec *timeout = &args->timeout;
393 struct drm_gem_object *obj;
394 struct etnaviv_gpu *gpu;
395 int ret;
396
397 if (args->flags & ~(ETNA_WAIT_NONBLOCK))
398 return -EINVAL;
399
400 if (args->pipe >= ETNA_MAX_PIPES)
401 return -EINVAL;
402
403 gpu = priv->gpu[args->pipe];
404 if (!gpu)
405 return -ENXIO;
406
407 obj = drm_gem_object_lookup(file, args->handle);
408 if (!obj)
409 return -ENOENT;
410
411 if (args->flags & ETNA_WAIT_NONBLOCK)
412 timeout = NULL;
413
414 ret = etnaviv_gem_wait_bo(gpu, obj, timeout);
415
416 drm_gem_object_put(obj);
417
418 return ret;
419 }
420
etnaviv_ioctl_pm_query_dom(struct drm_device * dev,void * data,struct drm_file * file)421 static int etnaviv_ioctl_pm_query_dom(struct drm_device *dev, void *data,
422 struct drm_file *file)
423 {
424 struct etnaviv_drm_private *priv = dev->dev_private;
425 struct drm_etnaviv_pm_domain *args = data;
426 struct etnaviv_gpu *gpu;
427
428 if (args->pipe >= ETNA_MAX_PIPES)
429 return -EINVAL;
430
431 gpu = priv->gpu[args->pipe];
432 if (!gpu)
433 return -ENXIO;
434
435 return etnaviv_pm_query_dom(gpu, args);
436 }
437
etnaviv_ioctl_pm_query_sig(struct drm_device * dev,void * data,struct drm_file * file)438 static int etnaviv_ioctl_pm_query_sig(struct drm_device *dev, void *data,
439 struct drm_file *file)
440 {
441 struct etnaviv_drm_private *priv = dev->dev_private;
442 struct drm_etnaviv_pm_signal *args = data;
443 struct etnaviv_gpu *gpu;
444
445 if (args->pipe >= ETNA_MAX_PIPES)
446 return -EINVAL;
447
448 gpu = priv->gpu[args->pipe];
449 if (!gpu)
450 return -ENXIO;
451
452 return etnaviv_pm_query_sig(gpu, args);
453 }
454
455 static const struct drm_ioctl_desc etnaviv_ioctls[] = {
456 #define ETNA_IOCTL(n, func, flags) \
457 DRM_IOCTL_DEF_DRV(ETNAVIV_##n, etnaviv_ioctl_##func, flags)
458 ETNA_IOCTL(GET_PARAM, get_param, DRM_RENDER_ALLOW),
459 ETNA_IOCTL(GEM_NEW, gem_new, DRM_RENDER_ALLOW),
460 ETNA_IOCTL(GEM_INFO, gem_info, DRM_RENDER_ALLOW),
461 ETNA_IOCTL(GEM_CPU_PREP, gem_cpu_prep, DRM_RENDER_ALLOW),
462 ETNA_IOCTL(GEM_CPU_FINI, gem_cpu_fini, DRM_RENDER_ALLOW),
463 ETNA_IOCTL(GEM_SUBMIT, gem_submit, DRM_RENDER_ALLOW),
464 ETNA_IOCTL(WAIT_FENCE, wait_fence, DRM_RENDER_ALLOW),
465 ETNA_IOCTL(GEM_USERPTR, gem_userptr, DRM_RENDER_ALLOW),
466 ETNA_IOCTL(GEM_WAIT, gem_wait, DRM_RENDER_ALLOW),
467 ETNA_IOCTL(PM_QUERY_DOM, pm_query_dom, DRM_RENDER_ALLOW),
468 ETNA_IOCTL(PM_QUERY_SIG, pm_query_sig, DRM_RENDER_ALLOW),
469 };
470
471 DEFINE_DRM_GEM_FOPS(fops);
472
473 static const struct drm_driver etnaviv_drm_driver = {
474 .driver_features = DRIVER_GEM | DRIVER_RENDER,
475 .open = etnaviv_open,
476 .postclose = etnaviv_postclose,
477 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
478 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
479 .gem_prime_import_sg_table = etnaviv_gem_prime_import_sg_table,
480 .gem_prime_mmap = drm_gem_prime_mmap,
481 #ifdef CONFIG_DEBUG_FS
482 .debugfs_init = etnaviv_debugfs_init,
483 #endif
484 .ioctls = etnaviv_ioctls,
485 .num_ioctls = DRM_ETNAVIV_NUM_IOCTLS,
486 .fops = &fops,
487 .name = "etnaviv",
488 .desc = "etnaviv DRM",
489 .date = "20151214",
490 .major = 1,
491 .minor = 3,
492 };
493
494 /*
495 * Platform driver:
496 */
etnaviv_bind(struct device * dev)497 static int etnaviv_bind(struct device *dev)
498 {
499 struct etnaviv_drm_private *priv;
500 struct drm_device *drm;
501 int ret;
502
503 drm = drm_dev_alloc(&etnaviv_drm_driver, dev);
504 if (IS_ERR(drm))
505 return PTR_ERR(drm);
506
507 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
508 if (!priv) {
509 dev_err(dev, "failed to allocate private data\n");
510 ret = -ENOMEM;
511 goto out_put;
512 }
513 drm->dev_private = priv;
514
515 dma_set_max_seg_size(dev, SZ_2G);
516
517 mutex_init(&priv->gem_lock);
518 INIT_LIST_HEAD(&priv->gem_list);
519 priv->num_gpus = 0;
520 priv->shm_gfp_mask = GFP_HIGHUSER | __GFP_RETRY_MAYFAIL | __GFP_NOWARN;
521
522 priv->cmdbuf_suballoc = etnaviv_cmdbuf_suballoc_new(drm->dev);
523 if (IS_ERR(priv->cmdbuf_suballoc)) {
524 dev_err(drm->dev, "Failed to create cmdbuf suballocator\n");
525 ret = PTR_ERR(priv->cmdbuf_suballoc);
526 goto out_free_priv;
527 }
528
529 dev_set_drvdata(dev, drm);
530
531 ret = component_bind_all(dev, drm);
532 if (ret < 0)
533 goto out_destroy_suballoc;
534
535 load_gpu(drm);
536
537 ret = drm_dev_register(drm, 0);
538 if (ret)
539 goto out_unbind;
540
541 return 0;
542
543 out_unbind:
544 component_unbind_all(dev, drm);
545 out_destroy_suballoc:
546 etnaviv_cmdbuf_suballoc_destroy(priv->cmdbuf_suballoc);
547 out_free_priv:
548 kfree(priv);
549 out_put:
550 drm_dev_put(drm);
551
552 return ret;
553 }
554
etnaviv_unbind(struct device * dev)555 static void etnaviv_unbind(struct device *dev)
556 {
557 struct drm_device *drm = dev_get_drvdata(dev);
558 struct etnaviv_drm_private *priv = drm->dev_private;
559
560 drm_dev_unregister(drm);
561
562 component_unbind_all(dev, drm);
563
564 etnaviv_cmdbuf_suballoc_destroy(priv->cmdbuf_suballoc);
565
566 drm->dev_private = NULL;
567 kfree(priv);
568
569 drm_dev_put(drm);
570 }
571
572 static const struct component_master_ops etnaviv_master_ops = {
573 .bind = etnaviv_bind,
574 .unbind = etnaviv_unbind,
575 };
576
etnaviv_pdev_probe(struct platform_device * pdev)577 static int etnaviv_pdev_probe(struct platform_device *pdev)
578 {
579 struct device *dev = &pdev->dev;
580 struct device_node *first_node = NULL;
581 struct component_match *match = NULL;
582
583 if (!dev->platform_data) {
584 struct device_node *core_node;
585
586 for_each_compatible_node(core_node, NULL, "vivante,gc") {
587 if (!of_device_is_available(core_node))
588 continue;
589
590 if (!first_node)
591 first_node = core_node;
592
593 drm_of_component_match_add(&pdev->dev, &match,
594 component_compare_of, core_node);
595 }
596 } else {
597 char **names = dev->platform_data;
598 unsigned i;
599
600 for (i = 0; names[i]; i++)
601 component_match_add(dev, &match, component_compare_dev_name, names[i]);
602 }
603
604 /*
605 * PTA and MTLB can have 40 bit base addresses, but
606 * unfortunately, an entry in the MTLB can only point to a
607 * 32 bit base address of a STLB. Moreover, to initialize the
608 * MMU we need a command buffer with a 32 bit address because
609 * without an MMU there is only an indentity mapping between
610 * the internal 32 bit addresses and the bus addresses.
611 *
612 * To make things easy, we set the dma_coherent_mask to 32
613 * bit to make sure we are allocating the command buffers and
614 * TLBs in the lower 4 GiB address space.
615 */
616 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(40)) ||
617 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32))) {
618 dev_dbg(&pdev->dev, "No suitable DMA available\n");
619 return -ENODEV;
620 }
621
622 /*
623 * Apply the same DMA configuration to the virtual etnaviv
624 * device as the GPU we found. This assumes that all Vivante
625 * GPUs in the system share the same DMA constraints.
626 */
627 if (first_node)
628 of_dma_configure(&pdev->dev, first_node, true);
629
630 return component_master_add_with_match(dev, &etnaviv_master_ops, match);
631 }
632
etnaviv_pdev_remove(struct platform_device * pdev)633 static int etnaviv_pdev_remove(struct platform_device *pdev)
634 {
635 component_master_del(&pdev->dev, &etnaviv_master_ops);
636
637 return 0;
638 }
639
640 static struct platform_driver etnaviv_platform_driver = {
641 .probe = etnaviv_pdev_probe,
642 .remove = etnaviv_pdev_remove,
643 .driver = {
644 .name = "etnaviv",
645 },
646 };
647
648 static struct platform_device *etnaviv_drm;
649
etnaviv_init(void)650 static int __init etnaviv_init(void)
651 {
652 struct platform_device *pdev;
653 int ret;
654 struct device_node *np;
655
656 etnaviv_validate_init();
657
658 ret = platform_driver_register(&etnaviv_gpu_driver);
659 if (ret != 0)
660 return ret;
661
662 ret = platform_driver_register(&etnaviv_platform_driver);
663 if (ret != 0)
664 goto unregister_gpu_driver;
665
666 /*
667 * If the DT contains at least one available GPU device, instantiate
668 * the DRM platform device.
669 */
670 for_each_compatible_node(np, NULL, "vivante,gc") {
671 if (!of_device_is_available(np))
672 continue;
673
674 pdev = platform_device_alloc("etnaviv", PLATFORM_DEVID_NONE);
675 if (!pdev) {
676 ret = -ENOMEM;
677 of_node_put(np);
678 goto unregister_platform_driver;
679 }
680
681 ret = platform_device_add(pdev);
682 if (ret) {
683 platform_device_put(pdev);
684 of_node_put(np);
685 goto unregister_platform_driver;
686 }
687
688 etnaviv_drm = pdev;
689 of_node_put(np);
690 break;
691 }
692
693 return 0;
694
695 unregister_platform_driver:
696 platform_driver_unregister(&etnaviv_platform_driver);
697 unregister_gpu_driver:
698 platform_driver_unregister(&etnaviv_gpu_driver);
699 return ret;
700 }
701 module_init(etnaviv_init);
702
etnaviv_exit(void)703 static void __exit etnaviv_exit(void)
704 {
705 platform_device_unregister(etnaviv_drm);
706 platform_driver_unregister(&etnaviv_platform_driver);
707 platform_driver_unregister(&etnaviv_gpu_driver);
708 }
709 module_exit(etnaviv_exit);
710
711 MODULE_AUTHOR("Christian Gmeiner <christian.gmeiner@gmail.com>");
712 MODULE_AUTHOR("Russell King <rmk+kernel@armlinux.org.uk>");
713 MODULE_AUTHOR("Lucas Stach <l.stach@pengutronix.de>");
714 MODULE_DESCRIPTION("etnaviv DRM Driver");
715 MODULE_LICENSE("GPL v2");
716 MODULE_ALIAS("platform:etnaviv");
717