1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */
3 /* Copyright 2019 Linaro, Ltd., Rob Herring <robh@kernel.org> */
4 /* Copyright 2019 Collabora ltd. */
5
6 #include <linux/module.h>
7 #include <linux/of_platform.h>
8 #include <linux/pagemap.h>
9 #include <linux/pm_runtime.h>
10 #include <drm/panfrost_drm.h>
11 #include <drm/drm_drv.h>
12 #include <drm/drm_ioctl.h>
13 #include <drm/drm_syncobj.h>
14 #include <drm/drm_utils.h>
15
16 #include "panfrost_device.h"
17 #include "panfrost_gem.h"
18 #include "panfrost_mmu.h"
19 #include "panfrost_job.h"
20 #include "panfrost_gpu.h"
21 #include "panfrost_perfcnt.h"
22
23 #define DRM_PANFROST_PARAM_AFBC_FEATURES (DRM_PANFROST_PARAM_THREAD_TLS_ALLOC + 1) //TODO:warning,this marco may have a bug
24
25 static bool unstable_ioctls;
26 module_param_unsafe(unstable_ioctls, bool, 0600);
27
panfrost_ioctl_get_param(struct drm_device * ddev,void * data,struct drm_file * file)28 static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct drm_file *file)
29 {
30 struct drm_panfrost_get_param *param = data;
31 struct panfrost_device *pfdev = ddev->dev_private;
32
33 if (param->pad != 0)
34 return -EINVAL;
35
36 #define PANFROST_FEATURE(name, member) \
37 case DRM_PANFROST_PARAM_ ## name: \
38 param->value = pfdev->features.member; \
39 break
40 #define PANFROST_FEATURE_ARRAY(name, member, max) \
41 case DRM_PANFROST_PARAM_ ## name ## 0 ... \
42 DRM_PANFROST_PARAM_ ## name ## max: \
43 param->value = pfdev->features.member[param->param - \
44 DRM_PANFROST_PARAM_ ## name ## 0]; \
45 break
46
47 switch (param->param) {
48 PANFROST_FEATURE(GPU_PROD_ID, id);
49 PANFROST_FEATURE(GPU_REVISION, revision);
50 PANFROST_FEATURE(SHADER_PRESENT, shader_present);
51 PANFROST_FEATURE(TILER_PRESENT, tiler_present);
52 PANFROST_FEATURE(L2_PRESENT, l2_present);
53 PANFROST_FEATURE(STACK_PRESENT, stack_present);
54 PANFROST_FEATURE(AS_PRESENT, as_present);
55 PANFROST_FEATURE(JS_PRESENT, js_present);
56 PANFROST_FEATURE(L2_FEATURES, l2_features);
57 PANFROST_FEATURE(CORE_FEATURES, core_features);
58 PANFROST_FEATURE(TILER_FEATURES, tiler_features);
59 PANFROST_FEATURE(MEM_FEATURES, mem_features);
60 PANFROST_FEATURE(MMU_FEATURES, mmu_features);
61 PANFROST_FEATURE(THREAD_FEATURES, thread_features);
62 PANFROST_FEATURE(MAX_THREADS, max_threads);
63 PANFROST_FEATURE(THREAD_MAX_WORKGROUP_SZ,
64 thread_max_workgroup_sz);
65 PANFROST_FEATURE(THREAD_MAX_BARRIER_SZ,
66 thread_max_barrier_sz);
67 PANFROST_FEATURE(COHERENCY_FEATURES, coherency_features);
68 PANFROST_FEATURE_ARRAY(TEXTURE_FEATURES, texture_features, 3);
69 PANFROST_FEATURE_ARRAY(JS_FEATURES, js_features, 15);
70 PANFROST_FEATURE(NR_CORE_GROUPS, nr_core_groups);
71 PANFROST_FEATURE(THREAD_TLS_ALLOC, thread_tls_alloc);
72 #if IS_ENABLED(CONFIG_ARCH_SUN50IW9)
73 PANFROST_FEATURE(AFBC_FEATURES, afbc_features);
74 #endif
75 default:
76 return -EINVAL;
77 }
78
79 return 0;
80 }
81
panfrost_ioctl_create_bo(struct drm_device * dev,void * data,struct drm_file * file)82 static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data,
83 struct drm_file *file)
84 {
85 struct panfrost_file_priv *priv = file->driver_priv;
86 struct panfrost_gem_object *bo;
87 struct drm_panfrost_create_bo *args = data;
88 struct panfrost_gem_mapping *mapping;
89
90 if (!args->size || args->pad ||
91 (args->flags & ~(PANFROST_BO_NOEXEC | PANFROST_BO_HEAP)))
92 return -EINVAL;
93
94 /* Heaps should never be executable */
95 if ((args->flags & PANFROST_BO_HEAP) &&
96 !(args->flags & PANFROST_BO_NOEXEC))
97 return -EINVAL;
98
99 bo = panfrost_gem_create_with_handle(file, dev, args->size, args->flags,
100 &args->handle);
101 if (IS_ERR(bo))
102 return PTR_ERR(bo);
103
104 mapping = panfrost_gem_mapping_get(bo, priv);
105 if (!mapping) {
106 drm_gem_object_put(&bo->base.base);
107 return -EINVAL;
108 }
109
110 args->offset = mapping->mmnode.start << PAGE_SHIFT;
111 panfrost_gem_mapping_put(mapping);
112
113 return 0;
114 }
115
116 /**
117 * panfrost_lookup_bos() - Sets up job->bo[] with the GEM objects
118 * referenced by the job.
119 * @dev: DRM device
120 * @file_priv: DRM file for this fd
121 * @args: IOCTL args
122 * @job: job being set up
123 *
124 * Resolve handles from userspace to BOs and attach them to job.
125 *
126 * Note that this function doesn't need to unreference the BOs on
127 * failure, because that will happen at panfrost_job_cleanup() time.
128 */
129 static int
panfrost_lookup_bos(struct drm_device * dev,struct drm_file * file_priv,struct drm_panfrost_submit * args,struct panfrost_job * job)130 panfrost_lookup_bos(struct drm_device *dev,
131 struct drm_file *file_priv,
132 struct drm_panfrost_submit *args,
133 struct panfrost_job *job)
134 {
135 struct panfrost_file_priv *priv = file_priv->driver_priv;
136 struct panfrost_gem_object *bo;
137 unsigned int i;
138 int ret;
139
140 job->bo_count = args->bo_handle_count;
141
142 if (!job->bo_count)
143 return 0;
144
145 job->implicit_fences = kvmalloc_array(job->bo_count,
146 sizeof(struct dma_fence *),
147 GFP_KERNEL | __GFP_ZERO);
148 if (!job->implicit_fences)
149 return -ENOMEM;
150
151 ret = drm_gem_objects_lookup(file_priv,
152 (void __user *)(uintptr_t)args->bo_handles,
153 job->bo_count, &job->bos);
154 if (ret)
155 return ret;
156
157 job->mappings = kvmalloc_array(job->bo_count,
158 sizeof(struct panfrost_gem_mapping *),
159 GFP_KERNEL | __GFP_ZERO);
160 if (!job->mappings)
161 return -ENOMEM;
162
163 for (i = 0; i < job->bo_count; i++) {
164 struct panfrost_gem_mapping *mapping;
165
166 bo = to_panfrost_bo(job->bos[i]);
167 mapping = panfrost_gem_mapping_get(bo, priv);
168 if (!mapping) {
169 ret = -EINVAL;
170 break;
171 }
172
173 atomic_inc(&bo->gpu_usecount);
174 job->mappings[i] = mapping;
175 }
176
177 return ret;
178 }
179
180 /**
181 * panfrost_copy_in_sync() - Sets up job->in_fences[] with the sync objects
182 * referenced by the job.
183 * @dev: DRM device
184 * @file_priv: DRM file for this fd
185 * @args: IOCTL args
186 * @job: job being set up
187 *
188 * Resolve syncobjs from userspace to fences and attach them to job.
189 *
190 * Note that this function doesn't need to unreference the fences on
191 * failure, because that will happen at panfrost_job_cleanup() time.
192 */
193 static int
panfrost_copy_in_sync(struct drm_device * dev,struct drm_file * file_priv,struct drm_panfrost_submit * args,struct panfrost_job * job)194 panfrost_copy_in_sync(struct drm_device *dev,
195 struct drm_file *file_priv,
196 struct drm_panfrost_submit *args,
197 struct panfrost_job *job)
198 {
199 u32 *handles;
200 int ret = 0;
201 int i;
202
203 job->in_fence_count = args->in_sync_count;
204
205 if (!job->in_fence_count)
206 return 0;
207
208 job->in_fences = kvmalloc_array(job->in_fence_count,
209 sizeof(struct dma_fence *),
210 GFP_KERNEL | __GFP_ZERO);
211 if (!job->in_fences) {
212 DRM_DEBUG("Failed to allocate job in fences\n");
213 return -ENOMEM;
214 }
215
216 handles = kvmalloc_array(job->in_fence_count, sizeof(u32), GFP_KERNEL);
217 if (!handles) {
218 ret = -ENOMEM;
219 DRM_DEBUG("Failed to allocate incoming syncobj handles\n");
220 goto fail;
221 }
222
223 if (copy_from_user(handles,
224 (void __user *)(uintptr_t)args->in_syncs,
225 job->in_fence_count * sizeof(u32))) {
226 ret = -EFAULT;
227 DRM_DEBUG("Failed to copy in syncobj handles\n");
228 goto fail;
229 }
230
231 for (i = 0; i < job->in_fence_count; i++) {
232 ret = drm_syncobj_find_fence(file_priv, handles[i], 0, 0,
233 &job->in_fences[i]);
234 if (ret == -EINVAL)
235 goto fail;
236 }
237
238 fail:
239 kvfree(handles);
240 return ret;
241 }
242
panfrost_ioctl_submit(struct drm_device * dev,void * data,struct drm_file * file)243 static int panfrost_ioctl_submit(struct drm_device *dev, void *data,
244 struct drm_file *file)
245 {
246 struct panfrost_device *pfdev = dev->dev_private;
247 struct drm_panfrost_submit *args = data;
248 struct drm_syncobj *sync_out = NULL;
249 struct panfrost_job *job;
250 int ret = 0;
251
252 if (!args->jc)
253 return -EINVAL;
254
255 if (args->requirements && args->requirements != PANFROST_JD_REQ_FS)
256 return -EINVAL;
257
258 if (args->out_sync > 0) {
259 sync_out = drm_syncobj_find(file, args->out_sync);
260 if (!sync_out)
261 return -ENODEV;
262 }
263
264 job = kzalloc(sizeof(*job), GFP_KERNEL);
265 if (!job) {
266 ret = -ENOMEM;
267 goto fail_out_sync;
268 }
269
270 kref_init(&job->refcount);
271
272 job->pfdev = pfdev;
273 job->jc = args->jc;
274 job->requirements = args->requirements;
275 job->flush_id = panfrost_gpu_get_latest_flush_id(pfdev);
276 job->file_priv = file->driver_priv;
277
278 ret = panfrost_copy_in_sync(dev, file, args, job);
279 if (ret)
280 goto fail_job;
281
282 ret = panfrost_lookup_bos(dev, file, args, job);
283 if (ret)
284 goto fail_job;
285
286 ret = panfrost_job_push(job);
287 if (ret)
288 goto fail_job;
289
290 /* Update the return sync object for the job */
291 if (sync_out)
292 drm_syncobj_replace_fence(sync_out, job->render_done_fence);
293
294 fail_job:
295 panfrost_job_put(job);
296 fail_out_sync:
297 if (sync_out)
298 drm_syncobj_put(sync_out);
299
300 return ret;
301 }
302
303 static int
panfrost_ioctl_wait_bo(struct drm_device * dev,void * data,struct drm_file * file_priv)304 panfrost_ioctl_wait_bo(struct drm_device *dev, void *data,
305 struct drm_file *file_priv)
306 {
307 long ret;
308 struct drm_panfrost_wait_bo *args = data;
309 struct drm_gem_object *gem_obj;
310 unsigned long timeout = drm_timeout_abs_to_jiffies(args->timeout_ns);
311
312 if (args->pad)
313 return -EINVAL;
314
315 gem_obj = drm_gem_object_lookup(file_priv, args->handle);
316 if (!gem_obj)
317 return -ENOENT;
318
319 ret = dma_resv_wait_timeout_rcu(gem_obj->resv, true,
320 true, timeout);
321 if (!ret)
322 ret = timeout ? -ETIMEDOUT : -EBUSY;
323
324 drm_gem_object_put(gem_obj);
325
326 return ret;
327 }
328
panfrost_ioctl_mmap_bo(struct drm_device * dev,void * data,struct drm_file * file_priv)329 static int panfrost_ioctl_mmap_bo(struct drm_device *dev, void *data,
330 struct drm_file *file_priv)
331 {
332 struct drm_panfrost_mmap_bo *args = data;
333 struct drm_gem_object *gem_obj;
334 int ret;
335
336 if (args->flags != 0) {
337 DRM_INFO("unknown mmap_bo flags: %d\n", args->flags);
338 return -EINVAL;
339 }
340
341 gem_obj = drm_gem_object_lookup(file_priv, args->handle);
342 if (!gem_obj) {
343 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
344 return -ENOENT;
345 }
346
347 /* Don't allow mmapping of heap objects as pages are not pinned. */
348 if (to_panfrost_bo(gem_obj)->is_heap) {
349 ret = -EINVAL;
350 goto out;
351 }
352
353 ret = drm_gem_create_mmap_offset(gem_obj);
354 if (ret == 0)
355 args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
356
357 out:
358 drm_gem_object_put(gem_obj);
359 return ret;
360 }
361
panfrost_ioctl_get_bo_offset(struct drm_device * dev,void * data,struct drm_file * file_priv)362 static int panfrost_ioctl_get_bo_offset(struct drm_device *dev, void *data,
363 struct drm_file *file_priv)
364 {
365 struct panfrost_file_priv *priv = file_priv->driver_priv;
366 struct drm_panfrost_get_bo_offset *args = data;
367 struct panfrost_gem_mapping *mapping;
368 struct drm_gem_object *gem_obj;
369 struct panfrost_gem_object *bo;
370
371 gem_obj = drm_gem_object_lookup(file_priv, args->handle);
372 if (!gem_obj) {
373 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
374 return -ENOENT;
375 }
376 bo = to_panfrost_bo(gem_obj);
377
378 mapping = panfrost_gem_mapping_get(bo, priv);
379 drm_gem_object_put(gem_obj);
380
381 if (!mapping)
382 return -EINVAL;
383
384 args->offset = mapping->mmnode.start << PAGE_SHIFT;
385 panfrost_gem_mapping_put(mapping);
386 return 0;
387 }
388
panfrost_ioctl_madvise(struct drm_device * dev,void * data,struct drm_file * file_priv)389 static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
390 struct drm_file *file_priv)
391 {
392 struct panfrost_file_priv *priv = file_priv->driver_priv;
393 struct drm_panfrost_madvise *args = data;
394 struct panfrost_device *pfdev = dev->dev_private;
395 struct drm_gem_object *gem_obj;
396 struct panfrost_gem_object *bo;
397 int ret = 0;
398
399 gem_obj = drm_gem_object_lookup(file_priv, args->handle);
400 if (!gem_obj) {
401 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
402 return -ENOENT;
403 }
404
405 bo = to_panfrost_bo(gem_obj);
406
407 mutex_lock(&pfdev->shrinker_lock);
408 mutex_lock(&bo->mappings.lock);
409 if (args->madv == PANFROST_MADV_DONTNEED) {
410 struct panfrost_gem_mapping *first;
411
412 first = list_first_entry(&bo->mappings.list,
413 struct panfrost_gem_mapping,
414 node);
415
416 /*
417 * If we want to mark the BO purgeable, there must be only one
418 * user: the caller FD.
419 * We could do something smarter and mark the BO purgeable only
420 * when all its users have marked it purgeable, but globally
421 * visible/shared BOs are likely to never be marked purgeable
422 * anyway, so let's not bother.
423 */
424 if (!list_is_singular(&bo->mappings.list) ||
425 WARN_ON_ONCE(first->mmu != priv->mmu)) {
426 ret = -EINVAL;
427 goto out_unlock_mappings;
428 }
429 }
430
431 args->retained = drm_gem_shmem_madvise(gem_obj, args->madv);
432
433 if (args->retained) {
434 if (args->madv == PANFROST_MADV_DONTNEED)
435 list_add_tail(&bo->base.madv_list,
436 &pfdev->shrinker_list);
437 else if (args->madv == PANFROST_MADV_WILLNEED)
438 list_del_init(&bo->base.madv_list);
439 }
440
441 out_unlock_mappings:
442 mutex_unlock(&bo->mappings.lock);
443 mutex_unlock(&pfdev->shrinker_lock);
444
445 drm_gem_object_put(gem_obj);
446 return ret;
447 }
448
panfrost_unstable_ioctl_check(void)449 int panfrost_unstable_ioctl_check(void)
450 {
451 if (!unstable_ioctls)
452 return -ENOSYS;
453
454 return 0;
455 }
456
457 static int
panfrost_open(struct drm_device * dev,struct drm_file * file)458 panfrost_open(struct drm_device *dev, struct drm_file *file)
459 {
460 int ret;
461 struct panfrost_device *pfdev = dev->dev_private;
462 struct panfrost_file_priv *panfrost_priv;
463
464 panfrost_priv = kzalloc(sizeof(*panfrost_priv), GFP_KERNEL);
465 if (!panfrost_priv)
466 return -ENOMEM;
467
468 panfrost_priv->pfdev = pfdev;
469 file->driver_priv = panfrost_priv;
470
471 panfrost_priv->mmu = panfrost_mmu_ctx_create(pfdev);
472 if (IS_ERR(panfrost_priv->mmu)) {
473 ret = PTR_ERR(panfrost_priv->mmu);
474 goto err_free;
475 }
476
477 ret = panfrost_job_open(panfrost_priv);
478 if (ret)
479 goto err_job;
480
481 return 0;
482
483 err_job:
484 panfrost_mmu_ctx_put(panfrost_priv->mmu);
485 err_free:
486 kfree(panfrost_priv);
487 return ret;
488 }
489
490 static void
panfrost_postclose(struct drm_device * dev,struct drm_file * file)491 panfrost_postclose(struct drm_device *dev, struct drm_file *file)
492 {
493 struct panfrost_file_priv *panfrost_priv = file->driver_priv;
494
495 panfrost_perfcnt_close(file);
496 panfrost_job_close(panfrost_priv);
497
498 panfrost_mmu_ctx_put(panfrost_priv->mmu);
499 kfree(panfrost_priv);
500 }
501
502 static const struct drm_ioctl_desc panfrost_drm_driver_ioctls[] = {
503 #define PANFROST_IOCTL(n, func, flags) \
504 DRM_IOCTL_DEF_DRV(PANFROST_##n, panfrost_ioctl_##func, flags)
505
506 PANFROST_IOCTL(SUBMIT, submit, DRM_RENDER_ALLOW),
507 PANFROST_IOCTL(WAIT_BO, wait_bo, DRM_RENDER_ALLOW),
508 PANFROST_IOCTL(CREATE_BO, create_bo, DRM_RENDER_ALLOW),
509 PANFROST_IOCTL(MMAP_BO, mmap_bo, DRM_RENDER_ALLOW),
510 PANFROST_IOCTL(GET_PARAM, get_param, DRM_RENDER_ALLOW),
511 PANFROST_IOCTL(GET_BO_OFFSET, get_bo_offset, DRM_RENDER_ALLOW),
512 PANFROST_IOCTL(PERFCNT_ENABLE, perfcnt_enable, DRM_RENDER_ALLOW),
513 PANFROST_IOCTL(PERFCNT_DUMP, perfcnt_dump, DRM_RENDER_ALLOW),
514 PANFROST_IOCTL(MADVISE, madvise, DRM_RENDER_ALLOW),
515 };
516
517 DEFINE_DRM_GEM_FOPS(panfrost_drm_driver_fops);
518
519 /*
520 * Panfrost driver version:
521 * - 1.0 - initial interface
522 * - 1.1 - adds HEAP and NOEXEC flags for CREATE_BO
523 */
524 static struct drm_driver panfrost_drm_driver = {
525 .driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ,
526 .open = panfrost_open,
527 .postclose = panfrost_postclose,
528 .ioctls = panfrost_drm_driver_ioctls,
529 .num_ioctls = ARRAY_SIZE(panfrost_drm_driver_ioctls),
530 .fops = &panfrost_drm_driver_fops,
531 .name = "panfrost",
532 .desc = "panfrost DRM",
533 .date = "20180908",
534 .major = 1,
535 .minor = 1,
536
537 .gem_create_object = panfrost_gem_create_object,
538 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
539 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
540 .gem_prime_import_sg_table = panfrost_gem_prime_import_sg_table,
541 .gem_prime_mmap = drm_gem_prime_mmap,
542 };
543
panfrost_probe(struct platform_device * pdev)544 static int panfrost_probe(struct platform_device *pdev)
545 {
546 struct panfrost_device *pfdev;
547 struct drm_device *ddev;
548 int err;
549
550 pfdev = devm_kzalloc(&pdev->dev, sizeof(*pfdev), GFP_KERNEL);
551 if (!pfdev)
552 return -ENOMEM;
553
554 pfdev->pdev = pdev;
555 pfdev->dev = &pdev->dev;
556
557 platform_set_drvdata(pdev, pfdev);
558
559 pfdev->comp = of_device_get_match_data(&pdev->dev);
560 if (!pfdev->comp)
561 return -ENODEV;
562
563 pfdev->coherent = device_get_dma_attr(&pdev->dev) == DEV_DMA_COHERENT;
564
565 /* Allocate and initialze the DRM device. */
566 ddev = drm_dev_alloc(&panfrost_drm_driver, &pdev->dev);
567 if (IS_ERR(ddev))
568 return PTR_ERR(ddev);
569
570 ddev->dev_private = pfdev;
571 pfdev->ddev = ddev;
572
573 mutex_init(&pfdev->shrinker_lock);
574 INIT_LIST_HEAD(&pfdev->shrinker_list);
575
576 err = panfrost_device_init(pfdev);
577 if (err) {
578 if (err != -EPROBE_DEFER)
579 dev_err(&pdev->dev, "Fatal error during GPU init\n");
580 goto err_out0;
581 }
582
583 pm_runtime_set_active(pfdev->dev);
584 pm_runtime_mark_last_busy(pfdev->dev);
585 pm_runtime_enable(pfdev->dev);
586 pm_runtime_set_autosuspend_delay(pfdev->dev, 50); /* ~3 frames */
587 pm_runtime_use_autosuspend(pfdev->dev);
588
589 /*
590 * Register the DRM device with the core and the connectors with
591 * sysfs
592 */
593 err = drm_dev_register(ddev, 0);
594 if (err < 0)
595 goto err_out1;
596
597 panfrost_gem_shrinker_init(ddev);
598
599 return 0;
600
601 err_out1:
602 pm_runtime_disable(pfdev->dev);
603 panfrost_device_fini(pfdev);
604 pm_runtime_set_suspended(pfdev->dev);
605 err_out0:
606 drm_dev_put(ddev);
607 return err;
608 }
609
panfrost_remove(struct platform_device * pdev)610 static int panfrost_remove(struct platform_device *pdev)
611 {
612 struct panfrost_device *pfdev = platform_get_drvdata(pdev);
613 struct drm_device *ddev = pfdev->ddev;
614
615 drm_dev_unregister(ddev);
616 panfrost_gem_shrinker_cleanup(ddev);
617
618 pm_runtime_get_sync(pfdev->dev);
619 pm_runtime_disable(pfdev->dev);
620 panfrost_device_fini(pfdev);
621 pm_runtime_set_suspended(pfdev->dev);
622
623 drm_dev_put(ddev);
624 return 0;
625 }
626
627 static const char * const default_supplies[] = { "mali" };
628 static const struct panfrost_compatible default_data = {
629 .num_supplies = ARRAY_SIZE(default_supplies),
630 .supply_names = default_supplies,
631 .num_pm_domains = 1, /* optional */
632 .pm_domain_names = NULL,
633 };
634
635 static const struct panfrost_compatible amlogic_data = {
636 .num_supplies = ARRAY_SIZE(default_supplies),
637 .supply_names = default_supplies,
638 .vendor_quirk = panfrost_gpu_amlogic_quirk,
639 };
640
641 static const struct of_device_id dt_match[] = {
642 /* Set first to probe before the generic compatibles */
643 { .compatible = "amlogic,meson-gxm-mali",
644 .data = &amlogic_data, },
645 { .compatible = "amlogic,meson-g12a-mali",
646 .data = &amlogic_data, },
647 { .compatible = "arm,mali-t604", .data = &default_data, },
648 { .compatible = "arm,mali-t624", .data = &default_data, },
649 { .compatible = "arm,mali-t628", .data = &default_data, },
650 { .compatible = "arm,mali-t720", .data = &default_data, },
651 { .compatible = "arm,mali-t760", .data = &default_data, },
652 { .compatible = "arm,mali-t820", .data = &default_data, },
653 { .compatible = "arm,mali-t830", .data = &default_data, },
654 { .compatible = "arm,mali-t860", .data = &default_data, },
655 { .compatible = "arm,mali-t880", .data = &default_data, },
656 { .compatible = "arm,mali-bifrost", .data = &default_data, },
657 {}
658 };
659 MODULE_DEVICE_TABLE(of, dt_match);
660
661 static const struct dev_pm_ops panfrost_pm_ops = {
662 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
663 SET_RUNTIME_PM_OPS(panfrost_device_suspend, panfrost_device_resume, NULL)
664 };
665
666 static struct platform_driver panfrost_driver = {
667 .probe = panfrost_probe,
668 .remove = panfrost_remove,
669 .driver = {
670 .name = "panfrost",
671 .pm = &panfrost_pm_ops,
672 .of_match_table = dt_match,
673 },
674 };
675 module_platform_driver(panfrost_driver);
676
677 MODULE_AUTHOR("Panfrost Project Developers");
678 MODULE_DESCRIPTION("Panfrost DRM Driver");
679 MODULE_LICENSE("GPL v2");
680