• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *
3  * (C) COPYRIGHT 2010-2020 ARM Limited. All rights reserved.
4  *
5  * This program is free software and is provided to you under the terms of the
6  * GNU General Public License version 2 as published by the Free Software
7  * Foundation, and any use by you of this program is subject to the terms
8  * of such GNU licence.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, you can access it online at
17  * http://www.gnu.org/licenses/gpl-2.0.html.
18  *
19  * SPDX-License-Identifier: GPL-2.0
20  *
21  */
22 
23 #include <mali_kbase.h>
24 #include <mali_kbase_config_defaults.h>
25 #include <gpu/mali_kbase_gpu_regmap.h>
26 #include <mali_kbase_gator.h>
27 #include <mali_kbase_mem_linux.h>
28 #ifdef CONFIG_MALI_BIFROST_DEVFREQ
29 #include <linux/devfreq.h>
30 #include <backend/gpu/mali_kbase_devfreq.h>
31 #ifdef CONFIG_DEVFREQ_THERMAL
32 #include <ipa/mali_kbase_ipa_debugfs.h>
33 #endif /* CONFIG_DEVFREQ_THERMAL */
34 #endif /* CONFIG_MALI_BIFROST_DEVFREQ */
35 #ifdef CONFIG_MALI_BIFROST_NO_MALI
36 #include "mali_kbase_model_linux.h"
37 #include <backend/gpu/mali_kbase_model_dummy.h>
38 #endif /* CONFIG_MALI_BIFROST_NO_MALI */
39 #include "mali_kbase_mem_profile_debugfs_buf_size.h"
40 #include "mali_kbase_debug_mem_view.h"
41 #include "mali_kbase_mem.h"
42 #include "mali_kbase_mem_pool_debugfs.h"
43 #include "mali_kbase_debugfs_helper.h"
44 #if !MALI_CUSTOMER_RELEASE
45 #include "mali_kbase_regs_dump_debugfs.h"
46 #endif /* !MALI_CUSTOMER_RELEASE */
47 #include "mali_kbase_regs_history_debugfs.h"
48 #include <mali_kbase_hwaccess_backend.h>
49 #include <mali_kbase_hwaccess_time.h>
50 #if !MALI_USE_CSF
51 #include <mali_kbase_hwaccess_jm.h>
52 #endif /* !MALI_USE_CSF */
53 #ifdef CONFIG_MALI_PRFCNT_SET_SECONDARY_VIA_DEBUG_FS
54 #include <mali_kbase_hwaccess_instr.h>
55 #endif
56 #include <mali_kbase_ctx_sched.h>
57 #include <mali_kbase_reset_gpu.h>
58 #include "mali_kbase_ioctl.h"
59 #if !MALI_USE_CSF
60 #include "mali_kbase_kinstr_jm.h"
61 #endif
62 #include "mali_kbase_hwcnt_context.h"
63 #include "mali_kbase_hwcnt_virtualizer.h"
64 #include "mali_kbase_hwcnt_legacy.h"
65 #include "mali_kbase_vinstr.h"
66 #if MALI_USE_CSF
67 #include "csf/mali_kbase_csf_firmware.h"
68 #include "csf/mali_kbase_csf_tiler_heap.h"
69 #include "csf/mali_kbase_csf_kcpu_debugfs.h"
70 #include "csf/mali_kbase_csf_csg_debugfs.h"
71 #endif
72 #ifdef CONFIG_MALI_ARBITER_SUPPORT
73 #include "arbiter/mali_kbase_arbiter_pm.h"
74 #endif
75 
76 #include "mali_kbase_cs_experimental.h"
77 
78 #ifdef CONFIG_MALI_CINSTR_GWT
79 #include "mali_kbase_gwt.h"
80 #endif
81 #include "mali_kbase_pm_internal.h"
82 
83 #include <linux/module.h>
84 #include <linux/init.h>
85 #include <linux/poll.h>
86 #include <linux/kernel.h>
87 #include <linux/errno.h>
88 #include <linux/of.h>
89 #include <linux/platform_device.h>
90 #include <linux/of_platform.h>
91 #include <linux/miscdevice.h>
92 #include <linux/list.h>
93 #include <linux/semaphore.h>
94 #include <linux/fs.h>
95 #include <linux/uaccess.h>
96 #include <linux/interrupt.h>
97 #include <linux/mm.h>
98 #include <linux/compat.h> /* is_compat_task/in_compat_syscall */
99 #include <linux/mman.h>
100 #include <linux/version.h>
101 #include <mali_kbase_hw.h>
102 #if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
103 #include <mali_kbase_sync.h>
104 #endif /* CONFIG_SYNC || CONFIG_SYNC_FILE */
105 #include <linux/clk.h>
106 #include <linux/clk-provider.h>
107 #include <linux/delay.h>
108 #include <linux/log2.h>
109 
110 #include <mali_kbase_config.h>
111 
112 #if (KERNEL_VERSION(3, 13, 0) <= LINUX_VERSION_CODE)
113 #include <linux/pm_opp.h>
114 #include <soc/rockchip/rockchip_opp_select.h>
115 #else
116 #include <linux/opp.h>
117 #endif
118 
119 #include <linux/pm_runtime.h>
120 
121 #include <tl/mali_kbase_timeline.h>
122 
123 #include <mali_kbase_as_fault_debugfs.h>
124 #include <device/mali_kbase_device.h>
125 #include <context/mali_kbase_context.h>
126 
127 #include <mali_kbase_caps.h>
128 
129 /* GPU IRQ Tags */
130 #define JOB_IRQ_TAG 0
131 #define MMU_IRQ_TAG 1
132 #define GPU_IRQ_TAG 2
133 
134 #define KERNEL_SIDE_DDK_VERSION_STRING "K:" MALI_RELEASE_NAME "(GPL)"
135 
136 /**
137  * Kernel min/maj <=> API Version
138  */
139 #define KBASE_API_VERSION(major, minor)                                        \
140     ((((major)&0xFFF) << 20) | (((minor)&0xFFF) << 8) | ((0 & 0xFF) << 0))
141 
142 #define KBASE_API_MIN(api_version) (((api_version) >> 8) & 0xFFF)
143 #define KBASE_API_MAJ(api_version) (((api_version) >> 20) & 0xFFF)
144 
145 /**
146  * mali_kbase_api_version_to_maj_min - convert an api_version to a min/maj pair
147  *
148  * @api_version: API version to convert
149  * @major:  Major version number (must not exceed 12 bits)
150  * @minor:  Major version number (must not exceed 12 bits)
151  */
mali_kbase_api_version_to_maj_min(unsigned long api_version,u16 * maj,u16 * min)152 void mali_kbase_api_version_to_maj_min(unsigned long api_version, u16 *maj,
153                                        u16 *min)
154 {
155     if (WARN_ON(!maj)) {
156         return;
157     }
158 
159     if (WARN_ON(!min)) {
160         return;
161     }
162 
163     *maj = KBASE_API_MAJ(api_version);
164     *min = KBASE_API_MIN(api_version);
165 }
166 
167 /**
168  * kbase capabilities table
169  */
170 typedef struct mali_kbase_capability_def {
171     u16 required_major;
172     u16 required_minor;
173 } mali_kbase_capability_def;
174 
175 /**
176  * This must be kept in-sync with mali_kbase_cap
177  *
178  * The alternative approach would be to embed the cap enum values
179  * in the table. Less efficient but potentially safer.
180  */
181 static mali_kbase_capability_def kbase_caps_table[MALI_KBASE_NUM_CAPS] = {
182 #if MALI_USE_CSF
183     {1, 0}, /* SYSTEM_MONITOR     */
184     {1, 0}, /* JIT_PRESSURE_LIMIT    */
185     {1, 0}, /* MEM_GROW_ON_GPF    */
186     {1, 0}  /* MEM_PROTECTED    */
187 #else
188     {11, 15}, /* SYSTEM_MONITOR     */
189     {11, 25}, /* JIT_PRESSURE_LIMIT    */
190     {11, 2},  /* MEM_GROW_ON_GPF    */
191     {11, 2}   /* MEM_PROTECTED    */
192 #endif
193 };
194 
195 /**
196  * mali_kbase_supports_cap - Query whether a kbase capability is supported
197  *
198  * @api_version:     API version to convert
199  * @cap:        Capability to query for - see mali_kbase_caps.h
200  */
mali_kbase_supports_cap(unsigned long api_version,mali_kbase_cap cap)201 bool mali_kbase_supports_cap(unsigned long api_version, mali_kbase_cap cap)
202 {
203     bool supported = false;
204     unsigned long required_ver;
205 
206     mali_kbase_capability_def const *cap_def;
207 
208     if (WARN_ON(cap < 0)) {
209         return false;
210     }
211 
212     if (WARN_ON(cap >= MALI_KBASE_NUM_CAPS)) {
213         return false;
214     }
215 
216     cap_def = &kbase_caps_table[(int)cap];
217     required_ver =
218         KBASE_API_VERSION(cap_def->required_major, cap_def->required_minor);
219     supported = (api_version >= required_ver);
220 
221     return supported;
222 }
223 
224 /**
225  * kbase_file_new - Create an object representing a device file
226  *
227  * @kbdev:  An instance of the GPU platform device, allocated from the probe
228  *          method of the driver.
229  * @filp:   Pointer to the struct file corresponding to device file
230  *          /dev/malixx instance, passed to the file's open method.
231  *
232  * In its initial state, the device file has no context (i.e. no GPU
233  * address space) and no API version number. Both must be assigned before
234  * kbase_file_get_kctx_if_setup_complete() can be used successfully.
235  *
236  * @return Address of an object representing a simulated device file, or NULL
237  *         on failure.
238  */
kbase_file_new(struct kbase_device * const kbdev,struct file * const filp)239 static struct kbase_file *kbase_file_new(struct kbase_device *const kbdev,
240                                          struct file *const filp)
241 {
242     struct kbase_file *const kfile = kmalloc(sizeof(*kfile), GFP_KERNEL);
243 
244     if (kfile) {
245         kfile->kbdev = kbdev;
246         kfile->filp = filp;
247         kfile->kctx = NULL;
248         kfile->api_version = 0;
249         atomic_set(&kfile->setup_state, KBASE_FILE_NEED_VSN);
250     }
251     return kfile;
252 }
253 
254 /**
255  * kbase_file_set_api_version - Set the application programmer interface version
256  *
257  * @kfile:  A device file created by kbase_file_new()
258  * @major:  Major version number (must not exceed 12 bits)
259  * @minor:  Major version number (must not exceed 12 bits)
260  *
261  * An application programmer interface (API) version must be specified
262  * before calling kbase_file_create_kctx(), otherwise an error is returned.
263  *
264  * If a version number was already set for the given @kfile (or is in the
265  * process of being set by another thread) then an error is returned.
266  *
267  * Return: 0 if successful, otherwise a negative error code.
268  */
kbase_file_set_api_version(struct kbase_file * const kfile,u16 const major,u16 const minor)269 static int kbase_file_set_api_version(struct kbase_file *const kfile,
270                                       u16 const major, u16 const minor)
271 {
272     if (WARN_ON(!kfile)) {
273         return -EINVAL;
274     }
275 
276     /* setup pending, try to signal that we'll do the setup,
277      * if setup was already in progress, err this call
278      */
279     if (atomic_cmpxchg(&kfile->setup_state, KBASE_FILE_NEED_VSN,
280                        KBASE_FILE_VSN_IN_PROGRESS) != KBASE_FILE_NEED_VSN) {
281         return -EPERM;
282     }
283 
284     /* save the proposed version number for later use */
285     kfile->api_version = KBASE_API_VERSION(major, minor);
286 
287     atomic_set(&kfile->setup_state, KBASE_FILE_NEED_CTX);
288     return 0;
289 }
290 
291 /**
292  * kbase_file_get_api_version - Get the application programmer interface version
293  *
294  * @kfile:  A device file created by kbase_file_new()
295  *
296  * Return: The version number (encoded with KBASE_API_VERSION) or 0 if none has
297  *         been set.
298  */
kbase_file_get_api_version(struct kbase_file * const kfile)299 static unsigned long kbase_file_get_api_version(struct kbase_file *const kfile)
300 {
301     if (WARN_ON(!kfile)) {
302         return 0;
303     }
304 
305     if (atomic_read(&kfile->setup_state) < KBASE_FILE_NEED_CTX) {
306         return 0;
307     }
308 
309     return kfile->api_version;
310 }
311 
312 /**
313  * kbase_file_create_kctx - Create a kernel base context
314  *
315  * @kfile:  A device file created by kbase_file_new()
316  * @flags:  Flags to set, which can be any combination of
317  *          BASEP_CONTEXT_CREATE_KERNEL_FLAGS.
318  *
319  * This creates a new context for the GPU platform device instance that was
320  * specified when kbase_file_new() was called. Each context has its own GPU
321  * address space. If a context was already created for the given @kfile (or is
322  * in the process of being created for it by another thread) then an error is
323  * returned.
324  *
325  * An API version number must have been set by kbase_file_set_api_version()
326  * before calling this function, otherwise an error is returned.
327  *
328  * Return: 0 if a new context was created, otherwise a negative error code.
329  */
330 static int kbase_file_create_kctx(struct kbase_file *const kfile,
331                                   base_context_create_flags const flags);
332 
333 /**
334  * kbase_file_get_kctx_if_setup_complete - Get a kernel base context
335  *                                         pointer from a device file
336  *
337  * @kfile: A device file created by kbase_file_new()
338  *
339  * This function returns an error code (encoded with ERR_PTR) if no context
340  * has been created for the given @kfile. This makes it safe to use in
341  * circumstances where the order of initialization cannot be enforced, but
342  * only if the caller checks the return value.
343  *
344  * Return: Address of the kernel base context associated with the @kfile, or
345  *         NULL if no context exists.
346  */
347 static struct kbase_context *
kbase_file_get_kctx_if_setup_complete(struct kbase_file * const kfile)348 kbase_file_get_kctx_if_setup_complete(struct kbase_file *const kfile)
349 {
350     if (WARN_ON(!kfile) ||
351         atomic_read(&kfile->setup_state) != KBASE_FILE_COMPLETE ||
352         WARN_ON(!kfile->kctx)) {
353         return NULL;
354     }
355 
356     return kfile->kctx;
357 }
358 
359 /**
360  * kbase_file_delete - Destroy an object representing a device file
361  *
362  * @kfile: A device file created by kbase_file_new()
363  *
364  * If any context was created for the @kfile then it is destroyed.
365  */
kbase_file_delete(struct kbase_file * const kfile)366 static void kbase_file_delete(struct kbase_file *const kfile)
367 {
368     struct kbase_device *kbdev = NULL;
369 
370     if (WARN_ON(!kfile)) {
371         return;
372     }
373 
374     kfile->filp->private_data = NULL;
375     kbdev = kfile->kbdev;
376 
377     if (atomic_read(&kfile->setup_state) == KBASE_FILE_COMPLETE) {
378         struct kbase_context *kctx = kfile->kctx;
379 
380 #ifdef CONFIG_DEBUG_FS
381         kbasep_mem_profile_debugfs_remove(kctx);
382 #endif
383 
384         mutex_lock(&kctx->legacy_hwcnt_lock);
385         /* If this client was performing hardware counter dumping and
386          * did not explicitly detach itself, destroy it now
387          */
388         kbase_hwcnt_legacy_client_destroy(kctx->legacy_hwcnt_cli);
389         kctx->legacy_hwcnt_cli = NULL;
390         mutex_unlock(&kctx->legacy_hwcnt_lock);
391 
392         kbase_context_debugfs_term(kctx);
393 
394         kbase_destroy_context(kctx);
395 
396         dev_dbg(kbdev->dev, "deleted base context\n");
397     }
398 
399     kbase_release_device(kbdev);
400 
401     kfree(kfile);
402 }
403 
kbase_api_handshake(struct kbase_file * kfile,struct kbase_ioctl_version_check * version)404 static int kbase_api_handshake(struct kbase_file *kfile,
405                                struct kbase_ioctl_version_check *version)
406 {
407     int err = 0;
408 
409     switch (version->major) {
410         case BASE_UK_VERSION_MAJOR:
411             /* set minor to be the lowest common */
412             version->minor =
413                 min_t(int, BASE_UK_VERSION_MINOR, (int)version->minor);
414             break;
415         default:
416             /* We return our actual version regardless if it
417              * matches the version returned by userspace -
418              * userspace can bail if it can't handle this
419              * version
420              */
421             version->major = BASE_UK_VERSION_MAJOR;
422             version->minor = BASE_UK_VERSION_MINOR;
423             break;
424     }
425 
426     /* save the proposed version number for later use */
427     err = kbase_file_set_api_version(kfile, version->major, version->minor);
428     if (unlikely(err)) {
429         return err;
430     }
431 
432     /* For backward compatibility, we may need to create the context before
433      * the flags have been set. Originally it was created on file open
434      * (with job submission disabled) but we don't support that usage.
435      */
436     if (!mali_kbase_supports_system_monitor(kbase_file_get_api_version(kfile))) {
437         err = kbase_file_create_kctx(kfile, BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED);
438     }
439 
440     return err;
441 }
442 
kbase_api_handshake_dummy(struct kbase_file * kfile,struct kbase_ioctl_version_check * version)443 static int kbase_api_handshake_dummy(struct kbase_file *kfile,
444                                      struct kbase_ioctl_version_check *version)
445 {
446     return -EPERM;
447 }
448 
449 /**
450  * enum mali_error - Mali error codes shared with userspace
451  *
452  * This is subset of those common Mali errors that can be returned to userspace.
453  * Values of matching user and kernel space enumerators MUST be the same.
454  * MALI_ERROR_NONE is guaranteed to be 0.
455  *
456  * @MALI_ERROR_NONE: Success
457  * @MALI_ERROR_OUT_OF_GPU_MEMORY: Not used in the kernel driver
458  * @MALI_ERROR_OUT_OF_MEMORY: Memory allocation failure
459  * @MALI_ERROR_FUNCTION_FAILED: Generic error code
460  */
461 enum mali_error {
462     MALI_ERROR_NONE = 0,
463     MALI_ERROR_OUT_OF_GPU_MEMORY,
464     MALI_ERROR_OUT_OF_MEMORY,
465     MALI_ERROR_FUNCTION_FAILED,
466 };
467 
to_kbase_device(struct device * dev)468 static struct kbase_device *to_kbase_device(struct device *dev)
469 {
470     return dev_get_drvdata(dev);
471 }
472 
assign_irqs(struct kbase_device * kbdev)473 int assign_irqs(struct kbase_device *kbdev)
474 {
475     struct platform_device *pdev;
476     int i;
477 
478     if (!kbdev) {
479         return -ENODEV;
480     }
481 
482     pdev = to_platform_device(kbdev->dev);
483     /* 3 IRQ resources */
484     for (i = 0; i < 0x3; i++) {
485         struct resource *irq_res;
486         int irqtag;
487 
488         irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
489         if (!irq_res) {
490             dev_err(kbdev->dev, "No IRQ resource at index %d\n", i);
491             return -ENOENT;
492         }
493 
494 #ifdef CONFIG_OF
495         if (!strncasecmp(irq_res->name, "JOB", 0x4)) {
496             irqtag = JOB_IRQ_TAG;
497         } else if (!strncasecmp(irq_res->name, "MMU", 0x4)) {
498             irqtag = MMU_IRQ_TAG;
499         } else if (!strncasecmp(irq_res->name, "GPU", 0x4)) {
500             irqtag = GPU_IRQ_TAG;
501         } else {
502             dev_err(&pdev->dev, "Invalid irq res name: '%s'\n", irq_res->name);
503             return -EINVAL;
504         }
505 #else
506         irqtag = i;
507 #endif /* CONFIG_OF */
508         kbdev->irqs[irqtag].irq = irq_res->start;
509         kbdev->irqs[irqtag].flags = irq_res->flags & IRQF_TRIGGER_MASK;
510     }
511 
512     return 0;
513 }
514 
515 /* Find a particular kbase device (as specified by minor number), or find the
516  * "first" device if -1 is specified */
kbase_find_device(int minor)517 struct kbase_device *kbase_find_device(int minor)
518 {
519     struct kbase_device *kbdev = NULL;
520     struct list_head *entry;
521     const struct list_head *dev_list = kbase_device_get_list();
522 
523     list_for_each(entry, dev_list)
524     {
525         struct kbase_device *tmp;
526 
527         tmp = list_entry(entry, struct kbase_device, entry);
528         if (tmp->mdev.minor == minor || minor == -1) {
529             kbdev = tmp;
530             get_device(kbdev->dev);
531             break;
532         }
533     }
534     kbase_device_put_list(dev_list);
535 
536     return kbdev;
537 }
538 EXPORT_SYMBOL(kbase_find_device);
539 
kbase_release_device(struct kbase_device * kbdev)540 void kbase_release_device(struct kbase_device *kbdev)
541 {
542     put_device(kbdev->dev);
543 }
544 EXPORT_SYMBOL(kbase_release_device);
545 
546 #ifdef CONFIG_DEBUG_FS
547 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) &&                            \
548     !(LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 28) &&                        \
549       LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0))
550 /*
551  * Older versions, before v4.6, of the kernel doesn't have
552  * kstrtobool_from_user(), except longterm 4.4.y which had it added in 4.4.28
553  */
kstrtobool_from_user(const char __user * s,size_t count,bool * res)554 static int kstrtobool_from_user(const char __user *s, size_t count, bool *res)
555 {
556     char buf[4];
557 
558     count = min(count, sizeof(buf) - 1);
559 
560     if (copy_from_user(buf, s, count)) {
561         return -EFAULT;
562     }
563     buf[count] = '\0';
564 
565     return strtobool(buf, res);
566 }
567 #endif
568 
write_ctx_infinite_cache(struct file * f,const char __user * ubuf,size_t size,loff_t * off)569 static ssize_t write_ctx_infinite_cache(struct file *f, const char __user *ubuf,
570                                         size_t size, loff_t *off)
571 {
572     struct kbase_context *kctx = f->private_data;
573     int err;
574     bool value;
575 
576     err = kstrtobool_from_user(ubuf, size, &value);
577     if (err) {
578         return err;
579     }
580 
581     if (value) {
582         kbase_ctx_flag_set(kctx, KCTX_INFINITE_CACHE);
583     } else {
584         kbase_ctx_flag_clear(kctx, KCTX_INFINITE_CACHE);
585     }
586 
587     return size;
588 }
589 
read_ctx_infinite_cache(struct file * f,char __user * ubuf,size_t size,loff_t * off)590 static ssize_t read_ctx_infinite_cache(struct file *f, char __user *ubuf,
591                                        size_t size, loff_t *off)
592 {
593     struct kbase_context *kctx = f->private_data;
594     char buf[32];
595     int count;
596     bool value;
597 
598     value = kbase_ctx_flag(kctx, KCTX_INFINITE_CACHE);
599 
600     count = scnprintf(buf, sizeof(buf), "%s\n", value ? "Y" : "N");
601 
602     return simple_read_from_buffer(ubuf, size, off, buf, count);
603 }
604 
605 static const struct file_operations kbase_infinite_cache_fops = {
606     .owner = THIS_MODULE,
607     .open = simple_open,
608     .write = write_ctx_infinite_cache,
609     .read = read_ctx_infinite_cache,
610 };
611 
write_ctx_force_same_va(struct file * f,const char __user * ubuf,size_t size,loff_t * off)612 static ssize_t write_ctx_force_same_va(struct file *f, const char __user *ubuf,
613                                        size_t size, loff_t *off)
614 {
615     struct kbase_context *kctx = f->private_data;
616     int err;
617     bool value;
618 
619     err = kstrtobool_from_user(ubuf, size, &value);
620     if (err) {
621         return err;
622     }
623 
624     if (value) {
625 #if defined(CONFIG_64BIT)
626         /* 32-bit clients cannot force SAME_VA */
627         if (kbase_ctx_flag(kctx, KCTX_COMPAT)) {
628             return -EINVAL;
629         }
630         kbase_ctx_flag_set(kctx, KCTX_FORCE_SAME_VA);
631 #else  /* defined(CONFIG_64BIT) */
632         /* 32-bit clients cannot force SAME_VA */
633         return -EINVAL;
634 #endif /* defined(CONFIG_64BIT) */
635     } else {
636         kbase_ctx_flag_clear(kctx, KCTX_FORCE_SAME_VA);
637     }
638 
639     return size;
640 }
641 
read_ctx_force_same_va(struct file * f,char __user * ubuf,size_t size,loff_t * off)642 static ssize_t read_ctx_force_same_va(struct file *f, char __user *ubuf,
643                                       size_t size, loff_t *off)
644 {
645     struct kbase_context *kctx = f->private_data;
646     char buf[32];
647     int count;
648     bool value;
649 
650     value = kbase_ctx_flag(kctx, KCTX_FORCE_SAME_VA);
651 
652     count = scnprintf(buf, sizeof(buf), "%s\n", value ? "Y" : "N");
653 
654     return simple_read_from_buffer(ubuf, size, off, buf, count);
655 }
656 
657 static const struct file_operations kbase_force_same_va_fops = {
658     .owner = THIS_MODULE,
659     .open = simple_open,
660     .write = write_ctx_force_same_va,
661     .read = read_ctx_force_same_va,
662 };
663 #endif /* CONFIG_DEBUG_FS */
664 
kbase_file_create_kctx(struct kbase_file * const kfile,base_context_create_flags const flags)665 static int kbase_file_create_kctx(struct kbase_file *const kfile,
666                                   base_context_create_flags const flags)
667 {
668     struct kbase_device *kbdev = NULL;
669     struct kbase_context *kctx = NULL;
670 #ifdef CONFIG_DEBUG_FS
671     char kctx_name[64];
672 #endif
673 
674     if (WARN_ON(!kfile)) {
675         return -EINVAL;
676     }
677 
678     /* setup pending, try to signal that we'll do the setup,
679      * if setup was already in progress, err this call
680      */
681     if (atomic_cmpxchg(&kfile->setup_state, KBASE_FILE_NEED_CTX,
682                        KBASE_FILE_CTX_IN_PROGRESS) != KBASE_FILE_NEED_CTX) {
683         return -EPERM;
684     }
685 
686     kbdev = kfile->kbdev;
687 
688 #if (KERNEL_VERSION(4, 6, 0) <= LINUX_VERSION_CODE)
689     kctx = kbase_create_context(kbdev, in_compat_syscall(), flags,
690                                 kfile->api_version, kfile->filp);
691 #else
692     kctx = kbase_create_context(kbdev, is_compat_task(), flags,
693                                 kfile->api_version, kfile->filp);
694 #endif /* (KERNEL_VERSION(4, 6, 0) <= LINUX_VERSION_CODE) */
695     /* if bad flags, will stay stuck in setup mode */
696     if (!kctx) {
697         return -ENOMEM;
698     }
699 
700     if (kbdev->infinite_cache_active_default) {
701         kbase_ctx_flag_set(kctx, KCTX_INFINITE_CACHE);
702     }
703 
704 #ifdef CONFIG_DEBUG_FS
705     (void)snprintf(kctx_name, 0x40, "%d_%d", kctx->tgid, kctx->id);
706 
707     mutex_init(&kctx->mem_profile_lock);
708 
709     kctx->kctx_dentry =
710         debugfs_create_dir(kctx_name, kbdev->debugfs_ctx_directory);
711 
712     if (IS_ERR_OR_NULL(kctx->kctx_dentry)) {
713         /* we don't treat this as a fail - just warn about it */
714         dev_warn(kbdev->dev, "couldn't create debugfs dir for kctx\n");
715     } else {
716 #if (KERNEL_VERSION(4, 7, 0) > LINUX_VERSION_CODE)
717         /* prevent unprivileged use of debug file system
718          * in old kernel version
719          */
720         debugfs_create_file("infinite_cache", 0x180, kctx->kctx_dentry, kctx,
721                             &kbase_infinite_cache_fops);
722 #else
723         debugfs_create_file("infinite_cache", 0x1a4, kctx->kctx_dentry, kctx,
724                             &kbase_infinite_cache_fops);
725 #endif
726         debugfs_create_file("force_same_va", 0x180, kctx->kctx_dentry, kctx,
727                             &kbase_force_same_va_fops);
728 
729         kbase_context_debugfs_init(kctx);
730     }
731 #endif /* CONFIG_DEBUG_FS */
732 
733     dev_dbg(kbdev->dev, "created base context\n");
734 
735     kfile->kctx = kctx;
736     atomic_set(&kfile->setup_state, KBASE_FILE_COMPLETE);
737 
738     return 0;
739 }
740 
kbase_open(struct inode * inode,struct file * filp)741 static int kbase_open(struct inode *inode, struct file *filp)
742 {
743     struct kbase_device *kbdev = NULL;
744     struct kbase_file *kfile;
745     int ret = 0;
746 
747     kbdev = kbase_find_device(iminor(inode));
748     if (!kbdev) {
749         return -ENODEV;
750     }
751 
752     kfile = kbase_file_new(kbdev, filp);
753     if (!kfile) {
754         ret = -ENOMEM;
755         goto out;
756     }
757 
758     filp->private_data = kfile;
759     filp->f_mode |= FMODE_UNSIGNED_OFFSET;
760 
761     return 0;
762 
763 out:
764     kbase_release_device(kbdev);
765     return ret;
766 }
767 
kbase_release(struct inode * inode,struct file * filp)768 static int kbase_release(struct inode *inode, struct file *filp)
769 {
770     struct kbase_file *const kfile = filp->private_data;
771 
772     kbase_file_delete(kfile);
773     return 0;
774 }
775 
kbase_api_set_flags(struct kbase_file * kfile,struct kbase_ioctl_set_flags * flags)776 static int kbase_api_set_flags(struct kbase_file *kfile,
777                                struct kbase_ioctl_set_flags *flags)
778 {
779     int err = 0;
780     unsigned long const api_version = kbase_file_get_api_version(kfile);
781     struct kbase_context *kctx = NULL;
782 
783     /* Validate flags */
784     if (flags->create_flags !=
785         (flags->create_flags & BASEP_CONTEXT_CREATE_KERNEL_FLAGS)) {
786         return -EINVAL;
787     }
788 
789     /* For backward compatibility, the context may have been created before
790      * the flags were set.
791      */
792     if (mali_kbase_supports_system_monitor(api_version)) {
793         err = kbase_file_create_kctx(kfile, flags->create_flags);
794     } else {
795 #if !MALI_USE_CSF
796         struct kbasep_js_kctx_info *js_kctx_info = NULL;
797         unsigned long irq_flags = 0;
798 #endif
799 
800         /* If setup is incomplete (e.g. because the API version
801          * wasn't set) then we have to give up.
802          */
803         kctx = kbase_file_get_kctx_if_setup_complete(kfile);
804         if (unlikely(!kctx)) {
805             return -EPERM;
806         }
807 
808 #if MALI_USE_CSF
809         /* On CSF GPUs Job Manager interface isn't used to submit jobs
810          * (there are no job slots). So the legacy job manager path to
811          * submit jobs needs to remain disabled for CSF GPUs.
812          */
813 #else
814         js_kctx_info = &kctx->jctx.sched_info;
815         mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
816         spin_lock_irqsave(&kctx->kbdev->hwaccess_lock, irq_flags);
817         /* Translate the flags */
818         if ((flags->create_flags &
819              BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED) == 0) {
820             kbase_ctx_flag_clear(kctx, KCTX_SUBMIT_DISABLED);
821         }
822 
823         spin_unlock_irqrestore(&kctx->kbdev->hwaccess_lock, irq_flags);
824         mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
825 #endif
826     }
827 
828     return err;
829 }
830 
831 #if !MALI_USE_CSF
kbase_api_job_submit(struct kbase_context * kctx,struct kbase_ioctl_job_submit * submit)832 static int kbase_api_job_submit(struct kbase_context *kctx,
833                                 struct kbase_ioctl_job_submit *submit)
834 {
835     return kbase_jd_submit(kctx, u64_to_user_ptr(submit->addr),
836                            submit->nr_atoms, submit->stride, false);
837 }
838 #endif /* !MALI_USE_CSF */
839 
kbase_api_get_gpuprops(struct kbase_context * kctx,struct kbase_ioctl_get_gpuprops * get_props)840 static int kbase_api_get_gpuprops(struct kbase_context *kctx,
841                                   struct kbase_ioctl_get_gpuprops *get_props)
842 {
843     struct kbase_gpu_props *kprops = &kctx->kbdev->gpu_props;
844     int err;
845 
846     if (get_props->flags != 0) {
847         dev_err(kctx->kbdev->dev, "Unsupported flags to get_gpuprops");
848         return -EINVAL;
849     }
850 
851     if (get_props->size == 0) {
852         return kprops->prop_buffer_size;
853     }
854     if (get_props->size < kprops->prop_buffer_size) {
855         return -EINVAL;
856     }
857 
858     err = copy_to_user(u64_to_user_ptr(get_props->buffer), kprops->prop_buffer,
859                        kprops->prop_buffer_size);
860     if (err) {
861         return -EFAULT;
862     }
863     return kprops->prop_buffer_size;
864 }
865 
866 #if !MALI_USE_CSF
kbase_api_post_term(struct kbase_context * kctx)867 static int kbase_api_post_term(struct kbase_context *kctx)
868 {
869     kbase_event_close(kctx);
870     return 0;
871 }
872 #endif /* !MALI_USE_CSF */
873 
kbase_api_mem_alloc(struct kbase_context * kctx,union kbase_ioctl_mem_alloc * alloc)874 static int kbase_api_mem_alloc(struct kbase_context *kctx,
875                                union kbase_ioctl_mem_alloc *alloc)
876 {
877     struct kbase_va_region *reg;
878     u64 flags = alloc->in.flags;
879     u64 gpu_va;
880 
881     rcu_read_lock();
882     /* Don't allow memory allocation until user space has set up the
883      * tracking page (which sets kctx->process_mm). Also catches when we've
884      * forked.
885      */
886     if (rcu_dereference(kctx->process_mm) != current->mm) {
887         rcu_read_unlock();
888         return -EINVAL;
889     }
890     rcu_read_unlock();
891 
892     if (flags & BASEP_MEM_FLAGS_KERNEL_ONLY) {
893         return -ENOMEM;
894     }
895 
896     /* Force SAME_VA if a 64-bit client.
897      * The only exception is GPU-executable memory if an EXEC_VA zone
898      * has been initialized. In that case, GPU-executable memory may
899      * or may not be SAME_VA.
900      */
901     if ((!kbase_ctx_flag(kctx, KCTX_COMPAT)) &&
902         kbase_ctx_flag(kctx, KCTX_FORCE_SAME_VA)) {
903         if (!(flags & BASE_MEM_PROT_GPU_EX) || !kbase_has_exec_va_zone(kctx)) {
904             flags |= BASE_MEM_SAME_VA;
905         }
906     }
907 
908 #if MALI_USE_CSF
909     /* If CSF event memory allocation, need to force certain flags.
910      * SAME_VA - GPU address needs to be used as a CPU address, explicit
911      * mmap has to be avoided.
912      * CACHED_CPU - Frequent access to the event memory by CPU.
913      * COHERENT_SYSTEM - No explicit cache maintenance around the access
914      * to event memory so need to leverage the coherency support.
915      */
916     if (flags & BASE_MEM_CSF_EVENT) {
917         flags |=
918             (BASE_MEM_SAME_VA | BASE_MEM_CACHED_CPU | BASE_MEM_COHERENT_SYSTEM);
919     }
920 #endif
921 
922     reg = kbase_mem_alloc(kctx, alloc->in.va_pages, alloc->in.commit_pages,
923                           alloc->in.extent, &flags, &gpu_va);
924     if (!reg) {
925         return -ENOMEM;
926     }
927 
928     alloc->out.flags = flags;
929     alloc->out.gpu_va = gpu_va;
930 
931     return 0;
932 }
933 
kbase_api_mem_query(struct kbase_context * kctx,union kbase_ioctl_mem_query * query)934 static int kbase_api_mem_query(struct kbase_context *kctx,
935                                union kbase_ioctl_mem_query *query)
936 {
937     return kbase_mem_query(kctx, query->in.gpu_addr, query->in.query,
938                            &query->out.value);
939 }
940 
kbase_api_mem_free(struct kbase_context * kctx,struct kbase_ioctl_mem_free * free)941 static int kbase_api_mem_free(struct kbase_context *kctx,
942                               struct kbase_ioctl_mem_free *free)
943 {
944     return kbase_mem_free(kctx, free->gpu_addr);
945 }
946 
947 #if !MALI_USE_CSF
kbase_api_kinstr_jm_fd(struct kbase_context * kctx,union kbase_kinstr_jm_fd * arg)948 static int kbase_api_kinstr_jm_fd(struct kbase_context *kctx,
949                                   union kbase_kinstr_jm_fd *arg)
950 {
951     return kbase_kinstr_jm_get_fd(kctx->kinstr_jm, arg);
952 }
953 #endif
954 
kbase_api_hwcnt_reader_setup(struct kbase_context * kctx,struct kbase_ioctl_hwcnt_reader_setup * setup)955 static int kbase_api_hwcnt_reader_setup(struct kbase_context *kctx,
956                                         struct kbase_ioctl_hwcnt_reader_setup *setup)
957 {
958     return kbase_vinstr_hwcnt_reader_setup(kctx->kbdev->vinstr_ctx, setup);
959 }
960 
kbase_api_hwcnt_enable(struct kbase_context * kctx,struct kbase_ioctl_hwcnt_enable * enable)961 static int kbase_api_hwcnt_enable(struct kbase_context *kctx,
962                                   struct kbase_ioctl_hwcnt_enable *enable)
963 {
964     int ret;
965 
966     mutex_lock(&kctx->legacy_hwcnt_lock);
967     if (enable->dump_buffer != 0) {
968         /* Non-zero dump buffer, so user wants to create the client */
969         if (kctx->legacy_hwcnt_cli == NULL) {
970             ret = kbase_hwcnt_legacy_client_create(
971                 kctx->kbdev->hwcnt_gpu_virt, enable, &kctx->legacy_hwcnt_cli);
972         } else {
973             /* This context already has a client */
974             ret = -EBUSY;
975         }
976     } else {
977         /* Zero dump buffer, so user wants to destroy the client */
978         if (kctx->legacy_hwcnt_cli != NULL) {
979             kbase_hwcnt_legacy_client_destroy(kctx->legacy_hwcnt_cli);
980             kctx->legacy_hwcnt_cli = NULL;
981             ret = 0;
982         } else {
983             /* This context has no client to destroy */
984             ret = -EINVAL;
985         }
986     }
987     mutex_unlock(&kctx->legacy_hwcnt_lock);
988 
989     return ret;
990 }
991 
kbase_api_hwcnt_dump(struct kbase_context * kctx)992 static int kbase_api_hwcnt_dump(struct kbase_context *kctx)
993 {
994     int ret;
995 
996     mutex_lock(&kctx->legacy_hwcnt_lock);
997     ret = kbase_hwcnt_legacy_client_dump(kctx->legacy_hwcnt_cli);
998     mutex_unlock(&kctx->legacy_hwcnt_lock);
999 
1000     return ret;
1001 }
1002 
kbase_api_hwcnt_clear(struct kbase_context * kctx)1003 static int kbase_api_hwcnt_clear(struct kbase_context *kctx)
1004 {
1005     int ret;
1006 
1007     mutex_lock(&kctx->legacy_hwcnt_lock);
1008     ret = kbase_hwcnt_legacy_client_clear(kctx->legacy_hwcnt_cli);
1009     mutex_unlock(&kctx->legacy_hwcnt_lock);
1010 
1011     return ret;
1012 }
1013 
kbase_api_get_cpu_gpu_timeinfo(struct kbase_context * kctx,union kbase_ioctl_get_cpu_gpu_timeinfo * timeinfo)1014 static int kbase_api_get_cpu_gpu_timeinfo(struct kbase_context *kctx,
1015                                           union kbase_ioctl_get_cpu_gpu_timeinfo *timeinfo)
1016 {
1017     u32 flags = timeinfo->in.request_flags;
1018     struct timespec64 ts;
1019     u64 timestamp;
1020     u64 cycle_cnt;
1021 
1022     kbase_pm_context_active(kctx->kbdev);
1023 
1024     kbase_backend_get_gpu_time(
1025         kctx->kbdev,
1026         (flags & BASE_TIMEINFO_CYCLE_COUNTER_FLAG) ? &cycle_cnt : NULL,
1027         (flags & BASE_TIMEINFO_TIMESTAMP_FLAG) ? &timestamp : NULL,
1028         (flags & BASE_TIMEINFO_MONOTONIC_FLAG) ? &ts : NULL);
1029 
1030     if (flags & BASE_TIMEINFO_TIMESTAMP_FLAG) {
1031         timeinfo->out.timestamp = timestamp;
1032     }
1033 
1034     if (flags & BASE_TIMEINFO_CYCLE_COUNTER_FLAG) {
1035         timeinfo->out.cycle_counter = cycle_cnt;
1036     }
1037 
1038     if (flags & BASE_TIMEINFO_MONOTONIC_FLAG) {
1039         timeinfo->out.sec = ts.tv_sec;
1040         timeinfo->out.nsec = ts.tv_nsec;
1041     }
1042 
1043     kbase_pm_context_idle(kctx->kbdev);
1044 
1045     return 0;
1046 }
1047 
1048 #ifdef CONFIG_MALI_BIFROST_NO_MALI
kbase_api_hwcnt_set(struct kbase_context * kctx,struct kbase_ioctl_hwcnt_values * values)1049 static int kbase_api_hwcnt_set(struct kbase_context *kctx,
1050                                struct kbase_ioctl_hwcnt_values *values)
1051 {
1052     gpu_model_set_dummy_prfcnt_sample((u32 __user *)(uintptr_t)values->data,
1053                                       values->size);
1054 
1055     return 0;
1056 }
1057 #endif
1058 
kbase_api_disjoint_query(struct kbase_context * kctx,struct kbase_ioctl_disjoint_query * query)1059 static int kbase_api_disjoint_query(struct kbase_context *kctx,
1060                                     struct kbase_ioctl_disjoint_query *query)
1061 {
1062     query->counter = kbase_disjoint_event_get(kctx->kbdev);
1063 
1064     return 0;
1065 }
1066 
kbase_api_get_ddk_version(struct kbase_context * kctx,struct kbase_ioctl_get_ddk_version * version)1067 static int kbase_api_get_ddk_version(struct kbase_context *kctx,
1068                                      struct kbase_ioctl_get_ddk_version *version)
1069 {
1070     int ret;
1071     int len = sizeof(KERNEL_SIDE_DDK_VERSION_STRING);
1072 
1073     if (version->version_buffer == 0) {
1074         return len;
1075     }
1076 
1077     if (version->size < len) {
1078         return -EOVERFLOW;
1079     }
1080 
1081     ret = copy_to_user(u64_to_user_ptr(version->version_buffer),
1082                        KERNEL_SIDE_DDK_VERSION_STRING,
1083                        sizeof(KERNEL_SIDE_DDK_VERSION_STRING));
1084 
1085     if (ret) {
1086         return -EFAULT;
1087     }
1088 
1089     return len;
1090 }
1091 
1092 /* Defaults for legacy just-in-time memory allocator initialization
1093  * kernel calls
1094  */
1095 #define DEFAULT_MAX_JIT_ALLOCATIONS 255
1096 #define JIT_LEGACY_TRIM_LEVEL (0) /* No trimming */
1097 
kbase_api_mem_jit_init_10_2(struct kbase_context * kctx,struct kbase_ioctl_mem_jit_init_10_2 * jit_init)1098 static int kbase_api_mem_jit_init_10_2(struct kbase_context *kctx,
1099                                        struct kbase_ioctl_mem_jit_init_10_2 *jit_init)
1100 {
1101     kctx->jit_version = 1;
1102 
1103     /* since no phys_pages parameter, use the maximum: va_pages */
1104     return kbase_region_tracker_init_jit(
1105         kctx, jit_init->va_pages, DEFAULT_MAX_JIT_ALLOCATIONS,
1106         JIT_LEGACY_TRIM_LEVEL, BASE_MEM_GROUP_DEFAULT, jit_init->va_pages);
1107 }
1108 
kbase_api_mem_jit_init_11_5(struct kbase_context * kctx,struct kbase_ioctl_mem_jit_init_11_5 * jit_init)1109 static int kbase_api_mem_jit_init_11_5(struct kbase_context *kctx,
1110                                        struct kbase_ioctl_mem_jit_init_11_5 *jit_init)
1111 {
1112     int i;
1113 
1114     kctx->jit_version = 0x2;
1115 
1116     for (i = 0; i < sizeof(jit_init->padding); i++) {
1117         /* Ensure all padding bytes are 0 for potential future
1118          * extension
1119          */
1120         if (jit_init->padding[i]) {
1121             return -EINVAL;
1122         }
1123     }
1124 
1125     /* since no phys_pages parameter, use the maximum: va_pages */
1126     return kbase_region_tracker_init_jit(
1127         kctx, jit_init->va_pages, jit_init->max_allocations,
1128         jit_init->trim_level, jit_init->group_id, jit_init->va_pages);
1129 }
1130 
kbase_api_mem_jit_init(struct kbase_context * kctx,struct kbase_ioctl_mem_jit_init * jit_init)1131 static int kbase_api_mem_jit_init(struct kbase_context *kctx,
1132                                   struct kbase_ioctl_mem_jit_init *jit_init)
1133 {
1134     int i;
1135 
1136     kctx->jit_version = 0x3;
1137 
1138     for (i = 0; i < sizeof(jit_init->padding); i++) {
1139         /* Ensure all padding bytes are 0 for potential future
1140          * extension
1141          */
1142         if (jit_init->padding[i]) {
1143             return -EINVAL;
1144         }
1145     }
1146 
1147     return kbase_region_tracker_init_jit(
1148         kctx, jit_init->va_pages, jit_init->max_allocations,
1149         jit_init->trim_level, jit_init->group_id, jit_init->phys_pages);
1150 }
1151 
kbase_api_mem_exec_init(struct kbase_context * kctx,struct kbase_ioctl_mem_exec_init * exec_init)1152 static int kbase_api_mem_exec_init(struct kbase_context *kctx,
1153                                    struct kbase_ioctl_mem_exec_init *exec_init)
1154 {
1155     return kbase_region_tracker_init_exec(kctx, exec_init->va_pages);
1156 }
1157 
kbase_api_mem_sync(struct kbase_context * kctx,struct kbase_ioctl_mem_sync * sync)1158 static int kbase_api_mem_sync(struct kbase_context *kctx,
1159                               struct kbase_ioctl_mem_sync *sync)
1160 {
1161     struct basep_syncset sset = {.mem_handle.basep.handle = sync->handle,
1162                                  .user_addr = sync->user_addr,
1163                                  .size = sync->size,
1164                                  .type = sync->type};
1165 
1166     return kbase_sync_now(kctx, &sset);
1167 }
1168 
kbase_api_mem_find_cpu_offset(struct kbase_context * kctx,union kbase_ioctl_mem_find_cpu_offset * find)1169 static int kbase_api_mem_find_cpu_offset(struct kbase_context *kctx,
1170                                          union kbase_ioctl_mem_find_cpu_offset *find)
1171 {
1172     return kbasep_find_enclosing_cpu_mapping_offset(
1173         kctx, find->in.cpu_addr, find->in.size, &find->out.offset);
1174 }
1175 
kbase_api_mem_find_gpu_start_and_offset(struct kbase_context * kctx,union kbase_ioctl_mem_find_gpu_start_and_offset * find)1176 static int kbase_api_mem_find_gpu_start_and_offset(
1177     struct kbase_context *kctx,
1178     union kbase_ioctl_mem_find_gpu_start_and_offset *find)
1179 {
1180     return kbasep_find_enclosing_gpu_mapping_start_and_offset(
1181         kctx, find->in.gpu_addr, find->in.size, &find->out.start,
1182         &find->out.offset);
1183 }
1184 
kbase_api_get_context_id(struct kbase_context * kctx,struct kbase_ioctl_get_context_id * info)1185 static int kbase_api_get_context_id(struct kbase_context *kctx,
1186                                     struct kbase_ioctl_get_context_id *info)
1187 {
1188     info->id = kctx->id;
1189 
1190     return 0;
1191 }
1192 
kbase_api_tlstream_acquire(struct kbase_context * kctx,struct kbase_ioctl_tlstream_acquire * acquire)1193 static int kbase_api_tlstream_acquire(struct kbase_context *kctx,
1194                                       struct kbase_ioctl_tlstream_acquire *acquire)
1195 {
1196     return kbase_timeline_io_acquire(kctx->kbdev, acquire->flags);
1197 }
1198 
kbase_api_tlstream_flush(struct kbase_context * kctx)1199 static int kbase_api_tlstream_flush(struct kbase_context *kctx)
1200 {
1201     kbase_timeline_streams_flush(kctx->kbdev->timeline);
1202 
1203     return 0;
1204 }
1205 
kbase_api_mem_commit(struct kbase_context * kctx,struct kbase_ioctl_mem_commit * commit)1206 static int kbase_api_mem_commit(struct kbase_context *kctx,
1207                                 struct kbase_ioctl_mem_commit *commit)
1208 {
1209     return kbase_mem_commit(kctx, commit->gpu_addr, commit->pages);
1210 }
1211 
kbase_api_mem_alias(struct kbase_context * kctx,union kbase_ioctl_mem_alias * alias)1212 static int kbase_api_mem_alias(struct kbase_context *kctx,
1213                                union kbase_ioctl_mem_alias *alias)
1214 {
1215     struct base_mem_aliasing_info *ai;
1216     u64 flags;
1217     int err;
1218 
1219     if (alias->in.nents == 0 || alias->in.nents > 0x800) {
1220         return -EINVAL;
1221     }
1222 
1223     if (alias->in.stride > (U64_MAX / 0x800)) {
1224         return -EINVAL;
1225     }
1226 
1227     ai = vmalloc(sizeof(*ai) * alias->in.nents);
1228     if (!ai) {
1229         return -ENOMEM;
1230     }
1231 
1232     err = copy_from_user(ai, u64_to_user_ptr(alias->in.aliasing_info),
1233                          sizeof(*ai) * alias->in.nents);
1234     if (err) {
1235         vfree(ai);
1236         return -EFAULT;
1237     }
1238 
1239     flags = alias->in.flags;
1240     if (flags & BASEP_MEM_FLAGS_KERNEL_ONLY) {
1241         vfree(ai);
1242         return -EINVAL;
1243     }
1244 
1245     alias->out.gpu_va =
1246         kbase_mem_alias(kctx, &flags, alias->in.stride, alias->in.nents, ai,
1247                         &alias->out.va_pages);
1248 
1249     alias->out.flags = flags;
1250 
1251     vfree(ai);
1252 
1253     if (alias->out.gpu_va == 0) {
1254         return -ENOMEM;
1255     }
1256 
1257     return 0;
1258 }
1259 
kbase_api_mem_import(struct kbase_context * kctx,union kbase_ioctl_mem_import * import)1260 static int kbase_api_mem_import(struct kbase_context *kctx,
1261                                 union kbase_ioctl_mem_import *import)
1262 {
1263     int ret;
1264     u64 flags = import->in.flags;
1265 
1266     if (flags & BASEP_MEM_FLAGS_KERNEL_ONLY) {
1267         return -ENOMEM;
1268     }
1269 
1270     ret = kbase_mem_import(
1271         kctx, import->in.type, u64_to_user_ptr(import->in.phandle),
1272         import->in.padding, &import->out.gpu_va, &import->out.va_pages, &flags);
1273 
1274     import->out.flags = flags;
1275 
1276     return ret;
1277 }
1278 
kbase_api_mem_flags_change(struct kbase_context * kctx,struct kbase_ioctl_mem_flags_change * change)1279 static int kbase_api_mem_flags_change(struct kbase_context *kctx,
1280                                       struct kbase_ioctl_mem_flags_change *change)
1281 {
1282     if (change->flags & BASEP_MEM_FLAGS_KERNEL_ONLY) {
1283         return -ENOMEM;
1284     }
1285 
1286     return kbase_mem_flags_change(kctx, change->gpu_va, change->flags,
1287                                   change->mask);
1288 }
1289 
kbase_api_stream_create(struct kbase_context * kctx,struct kbase_ioctl_stream_create * stream)1290 static int kbase_api_stream_create(struct kbase_context *kctx,
1291                                    struct kbase_ioctl_stream_create *stream)
1292 {
1293 #if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
1294     int fd, ret;
1295 
1296     /* Name must be NULL-terminated and padded with NULLs, so check last
1297      * character is NULL
1298      */
1299     if (stream->name[sizeof(stream->name) - 1] != 0) {
1300         return -EINVAL;
1301     }
1302 
1303     ret = kbase_sync_fence_stream_create(stream->name, &fd);
1304     if (ret) {
1305         return ret;
1306     }
1307     return fd;
1308 #else
1309     return -ENOENT;
1310 #endif
1311 }
1312 
kbase_api_fence_validate(struct kbase_context * kctx,struct kbase_ioctl_fence_validate * validate)1313 static int kbase_api_fence_validate(struct kbase_context *kctx,
1314                                     struct kbase_ioctl_fence_validate *validate)
1315 {
1316 #if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
1317     return kbase_sync_fence_validate(validate->fd);
1318 #else
1319     return -ENOENT;
1320 #endif
1321 }
1322 
kbase_api_mem_profile_add(struct kbase_context * kctx,struct kbase_ioctl_mem_profile_add * data)1323 static int kbase_api_mem_profile_add(struct kbase_context *kctx,
1324                                      struct kbase_ioctl_mem_profile_add *data)
1325 {
1326     char *buf;
1327     int err;
1328 
1329     if (data->len > KBASE_MEM_PROFILE_MAX_BUF_SIZE) {
1330         dev_err(kctx->kbdev->dev, "mem_profile_add: buffer too big\n");
1331         return -EINVAL;
1332     }
1333 
1334     buf = kmalloc(data->len, GFP_KERNEL);
1335     if (ZERO_OR_NULL_PTR(buf)) {
1336         return -ENOMEM;
1337     }
1338 
1339     err = copy_from_user(buf, u64_to_user_ptr(data->buffer), data->len);
1340     if (err) {
1341         kfree(buf);
1342         return -EFAULT;
1343     }
1344 
1345     return kbasep_mem_profile_debugfs_insert(kctx, buf, data->len);
1346 }
1347 
1348 #if !MALI_USE_CSF
kbase_api_soft_event_update(struct kbase_context * kctx,struct kbase_ioctl_soft_event_update * update)1349 static int kbase_api_soft_event_update(struct kbase_context *kctx,
1350                                        struct kbase_ioctl_soft_event_update *update)
1351 {
1352     if (update->flags != 0) {
1353         return -EINVAL;
1354     }
1355 
1356     return kbase_soft_event_update(kctx, update->event, update->new_status);
1357 }
1358 #endif /* !MALI_USE_CSF */
1359 
kbase_api_sticky_resource_map(struct kbase_context * kctx,struct kbase_ioctl_sticky_resource_map * map)1360 static int kbase_api_sticky_resource_map(struct kbase_context *kctx,
1361                                          struct kbase_ioctl_sticky_resource_map *map)
1362 {
1363     int ret;
1364     u64 i;
1365     u64 gpu_addr[BASE_EXT_RES_COUNT_MAX];
1366 
1367     if (!map->count || map->count > BASE_EXT_RES_COUNT_MAX) {
1368         return -EOVERFLOW;
1369     }
1370 
1371     ret = copy_from_user(gpu_addr, u64_to_user_ptr(map->address),
1372                          sizeof(u64) * map->count);
1373     if (ret != 0) {
1374         return -EFAULT;
1375     }
1376 
1377     kbase_gpu_vm_lock(kctx);
1378 
1379     for (i = 0; i < map->count; i++) {
1380         if (!kbase_sticky_resource_acquire(kctx, gpu_addr[i])) {
1381             /* Invalid resource */
1382             ret = -EINVAL;
1383             break;
1384         }
1385     }
1386 
1387     if (ret != 0) {
1388         while (i > 0) {
1389             i--;
1390             kbase_sticky_resource_release_force(kctx, NULL, gpu_addr[i]);
1391         }
1392     }
1393 
1394     kbase_gpu_vm_unlock(kctx);
1395 
1396     return ret;
1397 }
1398 
kbase_api_sticky_resource_unmap(struct kbase_context * kctx,struct kbase_ioctl_sticky_resource_unmap * unmap)1399 static int kbase_api_sticky_resource_unmap(struct kbase_context *kctx,
1400                                            struct kbase_ioctl_sticky_resource_unmap *unmap)
1401 {
1402     int ret;
1403     u64 i;
1404     u64 gpu_addr[BASE_EXT_RES_COUNT_MAX];
1405 
1406     if (!unmap->count || unmap->count > BASE_EXT_RES_COUNT_MAX) {
1407         return -EOVERFLOW;
1408     }
1409 
1410     ret = copy_from_user(gpu_addr, u64_to_user_ptr(unmap->address),
1411                          sizeof(u64) * unmap->count);
1412     if (ret != 0) {
1413         return -EFAULT;
1414     }
1415 
1416     kbase_gpu_vm_lock(kctx);
1417 
1418     for (i = 0; i < unmap->count; i++) {
1419         if (!kbase_sticky_resource_release_force(kctx, NULL, gpu_addr[i])) {
1420             /* Invalid resource, but we keep going anyway */
1421             ret = -EINVAL;
1422         }
1423     }
1424 
1425     kbase_gpu_vm_unlock(kctx);
1426 
1427     return ret;
1428 }
1429 
1430 #if MALI_UNIT_TEST
kbase_api_tlstream_test(struct kbase_context * kctx,struct kbase_ioctl_tlstream_test * test)1431 static int kbase_api_tlstream_test(struct kbase_context *kctx,
1432                                    struct kbase_ioctl_tlstream_test *test)
1433 {
1434     kbase_timeline_test(kctx->kbdev, test->tpw_count, test->msg_delay,
1435                         test->msg_count, test->aux_msg);
1436 
1437     return 0;
1438 }
1439 
kbase_api_tlstream_stats(struct kbase_context * kctx,struct kbase_ioctl_tlstream_stats * stats)1440 static int kbase_api_tlstream_stats(struct kbase_context *kctx,
1441                                     struct kbase_ioctl_tlstream_stats *stats)
1442 {
1443     kbase_timeline_stats(kctx->kbdev->timeline, &stats->bytes_collected,
1444                          &stats->bytes_generated);
1445 
1446     return 0;
1447 }
1448 #endif /* MALI_UNIT_TEST */
1449 
1450 #if MALI_USE_CSF
kbasep_cs_event_signal(struct kbase_context * kctx)1451 static int kbasep_cs_event_signal(struct kbase_context *kctx)
1452 {
1453     kbase_csf_event_signal_notify_gpu(kctx);
1454     return 0;
1455 }
1456 
kbasep_cs_queue_register(struct kbase_context * kctx,struct kbase_ioctl_cs_queue_register * reg)1457 static int kbasep_cs_queue_register(struct kbase_context *kctx,
1458                                     struct kbase_ioctl_cs_queue_register *reg)
1459 {
1460     kctx->jit_group_id = BASE_MEM_GROUP_DEFAULT;
1461 
1462     return kbase_csf_queue_register(kctx, reg);
1463 }
1464 
kbasep_cs_queue_terminate(struct kbase_context * kctx,struct kbase_ioctl_cs_queue_terminate * term)1465 static int kbasep_cs_queue_terminate(struct kbase_context *kctx,
1466                                      struct kbase_ioctl_cs_queue_terminate *term)
1467 {
1468     kbase_csf_queue_terminate(kctx, term);
1469 
1470     return 0;
1471 }
1472 
kbasep_cs_queue_bind(struct kbase_context * kctx,union kbase_ioctl_cs_queue_bind * bind)1473 static int kbasep_cs_queue_bind(struct kbase_context *kctx,
1474                                 union kbase_ioctl_cs_queue_bind *bind)
1475 {
1476     return kbase_csf_queue_bind(kctx, bind);
1477 }
1478 
kbasep_cs_queue_kick(struct kbase_context * kctx,struct kbase_ioctl_cs_queue_kick * kick)1479 static int kbasep_cs_queue_kick(struct kbase_context *kctx,
1480                                 struct kbase_ioctl_cs_queue_kick *kick)
1481 {
1482     return kbase_csf_queue_kick(kctx, kick);
1483 }
1484 
kbasep_cs_queue_group_create(struct kbase_context * kctx,union kbase_ioctl_cs_queue_group_create * create)1485 static int kbasep_cs_queue_group_create(struct kbase_context *kctx,
1486                                         union kbase_ioctl_cs_queue_group_create *create)
1487 {
1488     return kbase_csf_queue_group_create(kctx, create);
1489 }
1490 
kbasep_cs_queue_group_terminate(struct kbase_context * kctx,struct kbase_ioctl_cs_queue_group_term * term)1491 static int kbasep_cs_queue_group_terminate(struct kbase_context *kctx,
1492                                            struct kbase_ioctl_cs_queue_group_term *term)
1493 {
1494     kbase_csf_queue_group_terminate(kctx, term->group_handle);
1495 
1496     return 0;
1497 }
1498 
kbasep_kcpu_queue_new(struct kbase_context * kctx,struct kbase_ioctl_kcpu_queue_new * new)1499 static int kbasep_kcpu_queue_new(struct kbase_context *kctx,
1500                                  struct kbase_ioctl_kcpu_queue_new *new)
1501 {
1502     return kbase_csf_kcpu_queue_new(kctx, new);
1503 }
1504 
kbasep_kcpu_queue_delete(struct kbase_context * kctx,struct kbase_ioctl_kcpu_queue_delete * delete)1505 static int kbasep_kcpu_queue_delete(struct kbase_context *kctx,
1506                                     struct kbase_ioctl_kcpu_queue_delete *delete)
1507 {
1508     return kbase_csf_kcpu_queue_delete(kctx, delete);
1509 }
1510 
kbasep_kcpu_queue_enqueue(struct kbase_context * kctx,struct kbase_ioctl_kcpu_queue_enqueue * enqueue)1511 static int kbasep_kcpu_queue_enqueue(struct kbase_context *kctx,
1512                                      struct kbase_ioctl_kcpu_queue_enqueue *enqueue)
1513 {
1514     return kbase_csf_kcpu_queue_enqueue(kctx, enqueue);
1515 }
1516 
kbasep_cs_tiler_heap_init(struct kbase_context * kctx,union kbase_ioctl_cs_tiler_heap_init * heap_init)1517 static int kbasep_cs_tiler_heap_init(struct kbase_context *kctx,
1518                                      union kbase_ioctl_cs_tiler_heap_init *heap_init)
1519 {
1520     kctx->jit_group_id = heap_init->in.group_id;
1521 
1522     return kbase_csf_tiler_heap_init(
1523         kctx, heap_init->in.chunk_size, heap_init->in.initial_chunks,
1524         heap_init->in.max_chunks, heap_init->in.target_in_flight,
1525         &heap_init->out.gpu_heap_va, &heap_init->out.first_chunk_va);
1526 }
1527 
kbasep_cs_tiler_heap_term(struct kbase_context * kctx,struct kbase_ioctl_cs_tiler_heap_term * heap_term)1528 static int kbasep_cs_tiler_heap_term(struct kbase_context *kctx,
1529                                      struct kbase_ioctl_cs_tiler_heap_term *heap_term)
1530 {
1531     return kbase_csf_tiler_heap_term(kctx, heap_term->gpu_heap_va);
1532 }
1533 
kbase_ioctl_cs_get_glb_iface(struct kbase_context * kctx,union kbase_ioctl_cs_get_glb_iface * param)1534 static int kbase_ioctl_cs_get_glb_iface(struct kbase_context *kctx,
1535                                         union kbase_ioctl_cs_get_glb_iface *param)
1536 {
1537     struct basep_cs_stream_control *stream_data = NULL;
1538     struct basep_cs_group_control *group_data = NULL;
1539     void __user *user_groups, *user_streams;
1540     int err = 0;
1541     u32 const max_group_num = param->in.max_group_num;
1542     u32 const max_total_stream_num = param->in.max_total_stream_num;
1543 
1544     if (max_group_num > MAX_SUPPORTED_CSGS) {
1545         return -EINVAL;
1546     }
1547 
1548     if (max_total_stream_num >
1549         MAX_SUPPORTED_CSGS * MAX_SUPPORTED_STREAMS_PER_GROUP) {
1550         return -EINVAL;
1551     }
1552 
1553     user_groups = u64_to_user_ptr(param->in.groups_ptr);
1554     user_streams = u64_to_user_ptr(param->in.streams_ptr);
1555 
1556     if (max_group_num > 0) {
1557         if (!user_groups) {
1558             err = -EINVAL;
1559         } else {
1560             group_data =
1561                 kcalloc(max_group_num, sizeof(*group_data), GFP_KERNEL);
1562             if (!group_data) {
1563                 err = -ENOMEM;
1564             }
1565         }
1566     }
1567 
1568     if (max_total_stream_num > 0) {
1569         if (!user_streams) {
1570             err = -EINVAL;
1571         } else {
1572             stream_data =
1573                 kcalloc(max_total_stream_num, sizeof(*stream_data), GFP_KERNEL);
1574             if (!stream_data) {
1575                 err = -ENOMEM;
1576             }
1577         }
1578     }
1579 
1580     if (!err) {
1581         param->out.total_stream_num = kbase_csf_firmware_get_glb_iface(
1582             kctx->kbdev, group_data, max_group_num, stream_data,
1583             max_total_stream_num, &param->out.glb_version, &param->out.features,
1584             &param->out.group_num, &param->out.prfcnt_size);
1585 
1586         param->out.padding = 0;
1587 
1588         if (copy_to_user(user_groups, group_data,
1589                          MIN(max_group_num, param->out.group_num) *
1590                              sizeof(*group_data))) {
1591             err = -EFAULT;
1592         }
1593     }
1594 
1595     if (!err) {
1596         if (copy_to_user(user_streams, stream_data,
1597             MIN(max_total_stream_num, param->out.total_stream_num) * sizeof(*stream_data))) {
1598             err = -EFAULT;
1599         }
1600     }
1601 
1602     kfree(group_data);
1603     kfree(stream_data);
1604     return err;
1605 }
1606 #endif /* MALI_USE_CSF */
1607 
1608 #define KBASE_HANDLE_IOCTL(cmd, function, arg)                                 \
1609     do {                                                                       \
1610         BUILD_BUG_ON(_IOC_DIR(cmd) != _IOC_NONE);                              \
1611         return function(arg);                                                  \
1612     } while (0)
1613 
1614 #define KBASE_HANDLE_IOCTL_IN(cmd, function, type, arg)                        \
1615     do {                                                                       \
1616         type param;                                                            \
1617         int err;                                                               \
1618         BUILD_BUG_ON(_IOC_DIR(cmd) != _IOC_WRITE);                             \
1619         BUILD_BUG_ON(sizeof(param) != _IOC_SIZE(cmd));                         \
1620         err = copy_from_user(&param, uarg, sizeof(param));                     \
1621         if (err)                                                               \
1622             return -EFAULT;                                                    \
1623         return function(arg, &param);                                          \
1624     } while (0)
1625 
1626 #define KBASE_HANDLE_IOCTL_OUT(cmd, function, type, arg)                       \
1627     do {                                                                       \
1628         type param;                                                            \
1629         int ret, err;                                                          \
1630         BUILD_BUG_ON(_IOC_DIR(cmd) != _IOC_READ);                              \
1631         BUILD_BUG_ON(sizeof(param) != _IOC_SIZE(cmd));                         \
1632         memset(&param, 0, sizeof(param));                                      \
1633         ret = function(arg, &param);                                           \
1634         err = copy_to_user(uarg, &param, sizeof(param));                       \
1635         if (err)                                                               \
1636             return -EFAULT;                                                    \
1637         return ret;                                                            \
1638     } while (0)
1639 
1640 #define KBASE_HANDLE_IOCTL_INOUT(cmd, function, type, arg)                     \
1641     do {                                                                       \
1642         type param;                                                            \
1643         int ret, err;                                                          \
1644         BUILD_BUG_ON(_IOC_DIR(cmd) != (_IOC_WRITE | _IOC_READ));               \
1645         BUILD_BUG_ON(sizeof(param) != _IOC_SIZE(cmd));                         \
1646         err = copy_from_user(&param, uarg, sizeof(param));                     \
1647         if (err)                                                               \
1648             return -EFAULT;                                                    \
1649         ret = function(arg, &param);                                           \
1650         err = copy_to_user(uarg, &param, sizeof(param));                       \
1651         if (err)                                                               \
1652             return -EFAULT;                                                    \
1653         return ret;                                                            \
1654     } while (0)
1655 
kbase_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)1656 static long kbase_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1657 {
1658     struct kbase_file *const kfile = filp->private_data;
1659     struct kbase_context *kctx = NULL;
1660     struct kbase_device *kbdev = kfile->kbdev;
1661     void __user *uarg = (void __user *)arg;
1662 
1663     /* Only these ioctls are available until setup is complete */
1664     switch (cmd) {
1665         case KBASE_IOCTL_VERSION_CHECK:
1666             KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_VERSION_CHECK,
1667                                      kbase_api_handshake,
1668                                      struct kbase_ioctl_version_check, kfile);
1669             break;
1670 
1671         case KBASE_IOCTL_VERSION_CHECK_RESERVED:
1672             KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_VERSION_CHECK_RESERVED,
1673                                      kbase_api_handshake_dummy,
1674                                      struct kbase_ioctl_version_check, kfile);
1675             break;
1676 
1677         case KBASE_IOCTL_SET_FLAGS:
1678             KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_SET_FLAGS, kbase_api_set_flags,
1679                                   struct kbase_ioctl_set_flags, kfile);
1680             break;
1681         default:
1682             break;
1683     }
1684 
1685     kctx = kbase_file_get_kctx_if_setup_complete(kfile);
1686     if (unlikely(!kctx)) {
1687         return -EPERM;
1688     }
1689 
1690     /* Normal ioctls */
1691     switch (cmd) {
1692 #if !MALI_USE_CSF
1693         case KBASE_IOCTL_JOB_SUBMIT:
1694             KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_JOB_SUBMIT, kbase_api_job_submit,
1695                                   struct kbase_ioctl_job_submit, kctx);
1696             break;
1697 #endif /* !MALI_USE_CSF */
1698         case KBASE_IOCTL_GET_GPUPROPS:
1699             KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_GET_GPUPROPS,
1700                                   kbase_api_get_gpuprops,
1701                                   struct kbase_ioctl_get_gpuprops, kctx);
1702             break;
1703 #if !MALI_USE_CSF
1704         case KBASE_IOCTL_POST_TERM:
1705             KBASE_HANDLE_IOCTL(KBASE_IOCTL_POST_TERM, kbase_api_post_term,
1706                                kctx);
1707             break;
1708 #endif /* !MALI_USE_CSF */
1709         case KBASE_IOCTL_MEM_ALLOC:
1710             KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_MEM_ALLOC, kbase_api_mem_alloc,
1711                                      union kbase_ioctl_mem_alloc, kctx);
1712             break;
1713         case KBASE_IOCTL_MEM_QUERY:
1714             KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_MEM_QUERY, kbase_api_mem_query,
1715                                      union kbase_ioctl_mem_query, kctx);
1716             break;
1717         case KBASE_IOCTL_MEM_FREE:
1718             KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_FREE, kbase_api_mem_free,
1719                                   struct kbase_ioctl_mem_free, kctx);
1720             break;
1721         case KBASE_IOCTL_DISJOINT_QUERY:
1722             KBASE_HANDLE_IOCTL_OUT(KBASE_IOCTL_DISJOINT_QUERY,
1723                                    kbase_api_disjoint_query,
1724                                    struct kbase_ioctl_disjoint_query, kctx);
1725             break;
1726         case KBASE_IOCTL_GET_DDK_VERSION:
1727             KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_GET_DDK_VERSION,
1728                                   kbase_api_get_ddk_version,
1729                                   struct kbase_ioctl_get_ddk_version, kctx);
1730             break;
1731         case KBASE_IOCTL_MEM_JIT_INIT_10_2:
1732             KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_JIT_INIT_10_2,
1733                                   kbase_api_mem_jit_init_10_2,
1734                                   struct kbase_ioctl_mem_jit_init_10_2, kctx);
1735             break;
1736         case KBASE_IOCTL_MEM_JIT_INIT_11_5:
1737             KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_JIT_INIT_11_5,
1738                                   kbase_api_mem_jit_init_11_5,
1739                                   struct kbase_ioctl_mem_jit_init_11_5, kctx);
1740             break;
1741         case KBASE_IOCTL_MEM_JIT_INIT:
1742             KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_JIT_INIT,
1743                                   kbase_api_mem_jit_init,
1744                                   struct kbase_ioctl_mem_jit_init, kctx);
1745             break;
1746         case KBASE_IOCTL_MEM_EXEC_INIT:
1747             KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_EXEC_INIT,
1748                                   kbase_api_mem_exec_init,
1749                                   struct kbase_ioctl_mem_exec_init, kctx);
1750             break;
1751         case KBASE_IOCTL_MEM_SYNC:
1752             KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_SYNC, kbase_api_mem_sync,
1753                                   struct kbase_ioctl_mem_sync, kctx);
1754             break;
1755         case KBASE_IOCTL_MEM_FIND_CPU_OFFSET:
1756             KBASE_HANDLE_IOCTL_INOUT(
1757                 KBASE_IOCTL_MEM_FIND_CPU_OFFSET, kbase_api_mem_find_cpu_offset,
1758                 union kbase_ioctl_mem_find_cpu_offset, kctx);
1759             break;
1760         case KBASE_IOCTL_MEM_FIND_GPU_START_AND_OFFSET:
1761             KBASE_HANDLE_IOCTL_INOUT(
1762                 KBASE_IOCTL_MEM_FIND_GPU_START_AND_OFFSET,
1763                 kbase_api_mem_find_gpu_start_and_offset,
1764                 union kbase_ioctl_mem_find_gpu_start_and_offset, kctx);
1765             break;
1766         case KBASE_IOCTL_GET_CONTEXT_ID:
1767             KBASE_HANDLE_IOCTL_OUT(KBASE_IOCTL_GET_CONTEXT_ID,
1768                                    kbase_api_get_context_id,
1769                                    struct kbase_ioctl_get_context_id, kctx);
1770             break;
1771         case KBASE_IOCTL_TLSTREAM_ACQUIRE:
1772             KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_TLSTREAM_ACQUIRE,
1773                                   kbase_api_tlstream_acquire,
1774                                   struct kbase_ioctl_tlstream_acquire, kctx);
1775             break;
1776         case KBASE_IOCTL_TLSTREAM_FLUSH:
1777             KBASE_HANDLE_IOCTL(KBASE_IOCTL_TLSTREAM_FLUSH,
1778                                kbase_api_tlstream_flush, kctx);
1779             break;
1780         case KBASE_IOCTL_MEM_COMMIT:
1781             KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_COMMIT, kbase_api_mem_commit,
1782                                   struct kbase_ioctl_mem_commit, kctx);
1783             break;
1784         case KBASE_IOCTL_MEM_ALIAS:
1785             KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_MEM_ALIAS, kbase_api_mem_alias,
1786                                      union kbase_ioctl_mem_alias, kctx);
1787             break;
1788         case KBASE_IOCTL_MEM_IMPORT:
1789             KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_MEM_IMPORT,
1790                                      kbase_api_mem_import,
1791                                      union kbase_ioctl_mem_import, kctx);
1792             break;
1793         case KBASE_IOCTL_MEM_FLAGS_CHANGE:
1794             KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_FLAGS_CHANGE,
1795                                   kbase_api_mem_flags_change,
1796                                   struct kbase_ioctl_mem_flags_change, kctx);
1797             break;
1798         case KBASE_IOCTL_STREAM_CREATE:
1799             KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_STREAM_CREATE,
1800                                   kbase_api_stream_create,
1801                                   struct kbase_ioctl_stream_create, kctx);
1802             break;
1803         case KBASE_IOCTL_FENCE_VALIDATE:
1804             KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_FENCE_VALIDATE,
1805                                   kbase_api_fence_validate,
1806                                   struct kbase_ioctl_fence_validate, kctx);
1807             break;
1808         case KBASE_IOCTL_MEM_PROFILE_ADD:
1809             KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_PROFILE_ADD,
1810                                   kbase_api_mem_profile_add,
1811                                   struct kbase_ioctl_mem_profile_add, kctx);
1812             break;
1813 
1814 #if !MALI_USE_CSF
1815         case KBASE_IOCTL_SOFT_EVENT_UPDATE:
1816             KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_SOFT_EVENT_UPDATE,
1817                                   kbase_api_soft_event_update,
1818                                   struct kbase_ioctl_soft_event_update, kctx);
1819             break;
1820 #endif /* !MALI_USE_CSF */
1821 
1822         case KBASE_IOCTL_STICKY_RESOURCE_MAP:
1823             KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_STICKY_RESOURCE_MAP,
1824                                   kbase_api_sticky_resource_map,
1825                                   struct kbase_ioctl_sticky_resource_map, kctx);
1826             break;
1827         case KBASE_IOCTL_STICKY_RESOURCE_UNMAP:
1828             KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_STICKY_RESOURCE_UNMAP,
1829                                   kbase_api_sticky_resource_unmap,
1830                                   struct kbase_ioctl_sticky_resource_unmap,
1831                                   kctx);
1832             break;
1833 
1834             /* Instrumentation. */
1835 #if !MALI_USE_CSF
1836         case KBASE_IOCTL_KINSTR_JM_FD:
1837             KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_KINSTR_JM_FD,
1838                                      kbase_api_kinstr_jm_fd,
1839                                      union kbase_kinstr_jm_fd, kctx);
1840             break;
1841 #endif
1842         case KBASE_IOCTL_HWCNT_READER_SETUP:
1843             KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_HWCNT_READER_SETUP,
1844                                   kbase_api_hwcnt_reader_setup,
1845                                   struct kbase_ioctl_hwcnt_reader_setup, kctx);
1846             break;
1847         case KBASE_IOCTL_HWCNT_ENABLE:
1848             KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_HWCNT_ENABLE,
1849                                   kbase_api_hwcnt_enable,
1850                                   struct kbase_ioctl_hwcnt_enable, kctx);
1851             break;
1852         case KBASE_IOCTL_HWCNT_DUMP:
1853             KBASE_HANDLE_IOCTL(KBASE_IOCTL_HWCNT_DUMP, kbase_api_hwcnt_dump,
1854                                kctx);
1855             break;
1856         case KBASE_IOCTL_HWCNT_CLEAR:
1857             KBASE_HANDLE_IOCTL(KBASE_IOCTL_HWCNT_CLEAR, kbase_api_hwcnt_clear,
1858                                kctx);
1859             break;
1860         case KBASE_IOCTL_GET_CPU_GPU_TIMEINFO:
1861             KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_GET_CPU_GPU_TIMEINFO,
1862                                      kbase_api_get_cpu_gpu_timeinfo,
1863                                      union kbase_ioctl_get_cpu_gpu_timeinfo,
1864                                      kctx);
1865             break;
1866 #ifdef CONFIG_MALI_BIFROST_NO_MALI
1867         case KBASE_IOCTL_HWCNT_SET:
1868             KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_HWCNT_SET, kbase_api_hwcnt_set,
1869                                   struct kbase_ioctl_hwcnt_values, kctx);
1870             break;
1871 #endif
1872 #ifdef CONFIG_MALI_CINSTR_GWT
1873         case KBASE_IOCTL_CINSTR_GWT_START:
1874             KBASE_HANDLE_IOCTL(KBASE_IOCTL_CINSTR_GWT_START,
1875                                kbase_gpu_gwt_start, kctx);
1876             break;
1877         case KBASE_IOCTL_CINSTR_GWT_STOP:
1878             KBASE_HANDLE_IOCTL(KBASE_IOCTL_CINSTR_GWT_STOP, kbase_gpu_gwt_stop,
1879                                kctx);
1880             break;
1881         case KBASE_IOCTL_CINSTR_GWT_DUMP:
1882             KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_CINSTR_GWT_DUMP,
1883                                      kbase_gpu_gwt_dump,
1884                                      union kbase_ioctl_cinstr_gwt_dump, kctx);
1885             break;
1886 #endif
1887 #if MALI_USE_CSF
1888         case KBASE_IOCTL_CS_EVENT_SIGNAL:
1889             KBASE_HANDLE_IOCTL(KBASE_IOCTL_CS_EVENT_SIGNAL,
1890                                kbasep_cs_event_signal, kctx);
1891             break;
1892         case KBASE_IOCTL_CS_QUEUE_REGISTER:
1893             KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_CS_QUEUE_REGISTER,
1894                                   kbasep_cs_queue_register,
1895                                   struct kbase_ioctl_cs_queue_register, kctx);
1896             break;
1897         case KBASE_IOCTL_CS_QUEUE_TERMINATE:
1898             KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_CS_QUEUE_TERMINATE,
1899                                   kbasep_cs_queue_terminate,
1900                                   struct kbase_ioctl_cs_queue_terminate, kctx);
1901             break;
1902         case KBASE_IOCTL_CS_QUEUE_BIND:
1903             KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_CS_QUEUE_BIND,
1904                                      kbasep_cs_queue_bind,
1905                                      union kbase_ioctl_cs_queue_bind, kctx);
1906             break;
1907         case KBASE_IOCTL_CS_QUEUE_KICK:
1908             KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_CS_QUEUE_KICK,
1909                                   kbasep_cs_queue_kick,
1910                                   struct kbase_ioctl_cs_queue_kick, kctx);
1911             break;
1912         case KBASE_IOCTL_CS_QUEUE_GROUP_CREATE:
1913             KBASE_HANDLE_IOCTL_INOUT(
1914                 KBASE_IOCTL_CS_QUEUE_GROUP_CREATE, kbasep_cs_queue_group_create,
1915                 union kbase_ioctl_cs_queue_group_create, kctx);
1916             break;
1917         case KBASE_IOCTL_CS_QUEUE_GROUP_TERMINATE:
1918             KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_CS_QUEUE_GROUP_TERMINATE,
1919                                   kbasep_cs_queue_group_terminate,
1920                                   struct kbase_ioctl_cs_queue_group_term, kctx);
1921             break;
1922         case KBASE_IOCTL_KCPU_QUEUE_CREATE:
1923             KBASE_HANDLE_IOCTL_OUT(KBASE_IOCTL_KCPU_QUEUE_CREATE,
1924                                    kbasep_kcpu_queue_new,
1925                                    struct kbase_ioctl_kcpu_queue_new, kctx);
1926             break;
1927         case KBASE_IOCTL_KCPU_QUEUE_DELETE:
1928             KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_KCPU_QUEUE_DELETE,
1929                                   kbasep_kcpu_queue_delete,
1930                                   struct kbase_ioctl_kcpu_queue_delete, kctx);
1931             break;
1932         case KBASE_IOCTL_KCPU_QUEUE_ENQUEUE:
1933             KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_KCPU_QUEUE_ENQUEUE,
1934                                   kbasep_kcpu_queue_enqueue,
1935                                   struct kbase_ioctl_kcpu_queue_enqueue, kctx);
1936             break;
1937         case KBASE_IOCTL_CS_TILER_HEAP_INIT:
1938             KBASE_HANDLE_IOCTL_INOUT(
1939                 KBASE_IOCTL_CS_TILER_HEAP_INIT, kbasep_cs_tiler_heap_init,
1940                 union kbase_ioctl_cs_tiler_heap_init, kctx);
1941             break;
1942         case KBASE_IOCTL_CS_TILER_HEAP_TERM:
1943             KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_CS_TILER_HEAP_TERM,
1944                                   kbasep_cs_tiler_heap_term,
1945                                   struct kbase_ioctl_cs_tiler_heap_term, kctx);
1946             break;
1947         case KBASE_IOCTL_CS_GET_GLB_IFACE:
1948             KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_CS_GET_GLB_IFACE,
1949                                      kbase_ioctl_cs_get_glb_iface,
1950                                      union kbase_ioctl_cs_get_glb_iface, kctx);
1951             break;
1952 #endif /* MALI_USE_CSF */
1953 #if MALI_UNIT_TEST
1954         case KBASE_IOCTL_TLSTREAM_TEST:
1955             KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_TLSTREAM_TEST,
1956                                   kbase_api_tlstream_test,
1957                                   struct kbase_ioctl_tlstream_test, kctx);
1958             break;
1959         case KBASE_IOCTL_TLSTREAM_STATS:
1960             KBASE_HANDLE_IOCTL_OUT(KBASE_IOCTL_TLSTREAM_STATS,
1961                                    kbase_api_tlstream_stats,
1962                                    struct kbase_ioctl_tlstream_stats, kctx);
1963             break;
1964 #endif /* MALI_UNIT_TEST */
1965         default:
1966             break;
1967     }
1968 
1969     dev_warn(kbdev->dev, "Unknown ioctl 0x%x nr:%d", cmd, _IOC_NR(cmd));
1970 
1971     return -ENOIOCTLCMD;
1972 }
1973 
1974 #if MALI_USE_CSF
kbase_read(struct file * filp,char __user * buf,size_t count,loff_t * f_pos)1975 static ssize_t kbase_read(struct file *filp, char __user *buf, size_t count,
1976                           loff_t *f_pos)
1977 {
1978     struct kbase_file *const kfile = filp->private_data;
1979     struct kbase_context *const kctx =
1980         kbase_file_get_kctx_if_setup_complete(kfile);
1981     struct base_csf_notification event_data = {.type =
1982                                                    BASE_CSF_NOTIFICATION_EVENT};
1983     const size_t data_size = sizeof(event_data);
1984     bool read_event = false, read_error = false;
1985 
1986     if (unlikely(!kctx)) {
1987         return -EPERM;
1988     }
1989 
1990     if (atomic_read(&kctx->event_count)) {
1991         read_event = true;
1992     } else {
1993         read_error = kbase_csf_read_error(kctx, &event_data);
1994     }
1995 
1996     if (!read_event && !read_error) {
1997         /* This condition is not treated as an error.
1998          * It is possible that event handling thread was woken up due
1999          * to a fault/error that occurred for a queue group, but before
2000          * the corresponding fault data was read by the thread the
2001          * queue group was already terminated by the userspace.
2002          */
2003         dev_dbg(kctx->kbdev->dev, "Neither event nor error signaled");
2004     }
2005 
2006     if (copy_to_user(buf, &event_data, data_size) != 0) {
2007         dev_warn(kctx->kbdev->dev, "Failed to copy data\n");
2008         return -EFAULT;
2009     }
2010 
2011     if (read_event) {
2012         atomic_set(&kctx->event_count, 0);
2013     }
2014 
2015     return data_size;
2016 }
2017 #else  /* MALI_USE_CSF */
kbase_read(struct file * filp,char __user * buf,size_t count,loff_t * f_pos)2018 static ssize_t kbase_read(struct file *filp, char __user *buf, size_t count,
2019                           loff_t *f_pos)
2020 {
2021     struct kbase_file *const kfile = filp->private_data;
2022     struct kbase_context *const kctx =
2023         kbase_file_get_kctx_if_setup_complete(kfile);
2024     struct base_jd_event_v2 uevent;
2025     int out_count = 0;
2026 
2027     if (unlikely(!kctx)) {
2028         return -EPERM;
2029     }
2030 
2031     if (count < sizeof(uevent)) {
2032         return -ENOBUFS;
2033     }
2034 
2035     do {
2036         while (kbase_event_dequeue(kctx, &uevent)) {
2037             if (out_count > 0) {
2038                 goto out;
2039             }
2040 
2041             if (filp->f_flags & O_NONBLOCK) {
2042                 return -EAGAIN;
2043             }
2044 
2045             if (wait_event_interruptible(kctx->event_queue,
2046                                          kbase_event_pending(kctx)) != 0) {
2047                 return -ERESTARTSYS;
2048             }
2049         }
2050         if (uevent.event_code == BASE_JD_EVENT_DRV_TERMINATED) {
2051             if (out_count == 0) {
2052                 return -EPIPE;
2053             }
2054             goto out;
2055         }
2056 
2057         if (copy_to_user(buf, &uevent, sizeof(uevent)) != 0) {
2058             return -EFAULT;
2059         }
2060 
2061         buf += sizeof(uevent);
2062         out_count++;
2063         count -= sizeof(uevent);
2064     } while (count >= sizeof(uevent));
2065 
2066 out:
2067     return out_count * sizeof(uevent);
2068 }
2069 #endif /* MALI_USE_CSF */
2070 
kbase_poll(struct file * filp,poll_table * wait)2071 static unsigned int kbase_poll(struct file *filp, poll_table *wait)
2072 {
2073     struct kbase_file *const kfile = filp->private_data;
2074     struct kbase_context *const kctx =
2075         kbase_file_get_kctx_if_setup_complete(kfile);
2076 
2077     if (unlikely(!kctx)) {
2078         return POLLERR;
2079     }
2080 
2081     poll_wait(filp, &kctx->event_queue, wait);
2082     if (kbase_event_pending(kctx)) {
2083         return POLLIN | POLLRDNORM;
2084     }
2085 
2086     return 0;
2087 }
2088 
kbase_event_wakeup(struct kbase_context * kctx)2089 void kbase_event_wakeup(struct kbase_context *kctx)
2090 {
2091     KBASE_DEBUG_ASSERT(kctx);
2092 
2093     wake_up_interruptible(&kctx->event_queue);
2094 }
2095 
2096 KBASE_EXPORT_TEST_API(kbase_event_wakeup);
2097 
2098 #if MALI_USE_CSF
kbase_event_pending(struct kbase_context * ctx)2099 int kbase_event_pending(struct kbase_context *ctx)
2100 {
2101     WARN_ON_ONCE(!ctx);
2102 
2103     return (atomic_read(&ctx->event_count) != 0) ||
2104            kbase_csf_error_pending(ctx);
2105 }
2106 #else
kbase_event_pending(struct kbase_context * ctx)2107 int kbase_event_pending(struct kbase_context *ctx)
2108 {
2109     KBASE_DEBUG_ASSERT(ctx);
2110 
2111     return (atomic_read(&ctx->event_count) != 0) ||
2112            (atomic_read(&ctx->event_closed) != 0);
2113 }
2114 #endif
2115 
2116 KBASE_EXPORT_TEST_API(kbase_event_pending);
2117 
kbase_mmap(struct file * const filp,struct vm_area_struct * const vma)2118 static int kbase_mmap(struct file *const filp, struct vm_area_struct *const vma)
2119 {
2120     struct kbase_file *const kfile = filp->private_data;
2121     struct kbase_context *const kctx =
2122         kbase_file_get_kctx_if_setup_complete(kfile);
2123 
2124     if (unlikely(!kctx)) {
2125         return -EPERM;
2126     }
2127 
2128     return kbase_context_mmap(kctx, vma);
2129 }
2130 
kbase_check_flags(int flags)2131 static int kbase_check_flags(int flags)
2132 {
2133     /* Enforce that the driver keeps the O_CLOEXEC flag so that execve() always
2134      * closes the file descriptor in a child process.
2135      */
2136     if ((flags & O_CLOEXEC) == 0) {
2137         return -EINVAL;
2138     }
2139 
2140     return 0;
2141 }
2142 
kbase_get_unmapped_area(struct file * const filp,const unsigned long addr,const unsigned long len,const unsigned long pgoff,const unsigned long flags)2143 static unsigned long kbase_get_unmapped_area(struct file *const filp,
2144                                              const unsigned long addr,
2145                                              const unsigned long len,
2146                                              const unsigned long pgoff,
2147                                              const unsigned long flags)
2148 {
2149     struct kbase_file *const kfile = filp->private_data;
2150     struct kbase_context *const kctx =
2151         kbase_file_get_kctx_if_setup_complete(kfile);
2152 
2153     if (unlikely(!kctx)) {
2154         return -EPERM;
2155     }
2156 
2157     return kbase_context_get_unmapped_area(kctx, addr, len, pgoff, flags);
2158 }
2159 
2160 static const struct file_operations kbase_fops = {
2161     .owner = THIS_MODULE,
2162     .open = kbase_open,
2163     .release = kbase_release,
2164     .read = kbase_read,
2165     .poll = kbase_poll,
2166     .unlocked_ioctl = kbase_ioctl,
2167     .compat_ioctl = kbase_ioctl,
2168     .mmap = kbase_mmap,
2169     .check_flags = kbase_check_flags,
2170     .get_unmapped_area = kbase_get_unmapped_area,
2171 };
2172 
2173 /**
2174  * show_policy - Show callback for the power_policy sysfs file.
2175  *
2176  * This function is called to get the contents of the power_policy sysfs
2177  * file. This is a list of the available policies with the currently active one
2178  * surrounded by square brackets.
2179  *
2180  * @dev:    The device this sysfs file is for
2181  * @attr:    The attributes of the sysfs file
2182  * @buf:    The output buffer for the sysfs file contents
2183  *
2184  * Return: The number of bytes output to @buf.
2185  */
show_policy(struct device * dev,struct device_attribute * attr,char * const buf)2186 static ssize_t show_policy(struct device *dev, struct device_attribute *attr,
2187                            char *const buf)
2188 {
2189     struct kbase_device *kbdev;
2190     const struct kbase_pm_policy *current_policy;
2191     const struct kbase_pm_policy *const *policy_list;
2192     int policy_count;
2193     int i;
2194     ssize_t ret = 0;
2195 
2196     kbdev = to_kbase_device(dev);
2197     if (!kbdev) {
2198         return -ENODEV;
2199     }
2200 
2201     current_policy = kbase_pm_get_policy(kbdev);
2202 
2203     policy_count = kbase_pm_list_policies(kbdev, &policy_list);
2204 
2205     for (i = 0; i < policy_count && ret < PAGE_SIZE; i++) {
2206         if (policy_list[i] == current_policy) {
2207             ret += scnprintf(buf + ret, PAGE_SIZE - ret, "[%s] ",
2208                              policy_list[i]->name);
2209         } else {
2210             ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s ",
2211                              policy_list[i]->name);
2212         }
2213     }
2214 
2215     if (ret < PAGE_SIZE - 1) {
2216         ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
2217     } else {
2218         buf[PAGE_SIZE - 0x2] = '\n';
2219         buf[PAGE_SIZE - 1] = '\0';
2220         ret = PAGE_SIZE - 1;
2221     }
2222 
2223     return ret;
2224 }
2225 
2226 /**
2227  * set_policy - Store callback for the power_policy sysfs file.
2228  *
2229  * This function is called when the power_policy sysfs file is written to.
2230  * It matches the requested policy against the available policies and if a
2231  * matching policy is found calls kbase_pm_set_policy() to change the
2232  * policy.
2233  *
2234  * @dev:    The device with sysfs file is for
2235  * @attr:    The attributes of the sysfs file
2236  * @buf:    The value written to the sysfs file
2237  * @count:    The number of bytes to write to the sysfs file
2238  *
2239  * Return: @count if the function succeeded. An error code on failure.
2240  */
set_policy(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2241 static ssize_t set_policy(struct device *dev, struct device_attribute *attr,
2242                           const char *buf, size_t count)
2243 {
2244     struct kbase_device *kbdev;
2245     const struct kbase_pm_policy *new_policy = NULL;
2246     const struct kbase_pm_policy *const *policy_list;
2247     int policy_count;
2248     int i;
2249 
2250     kbdev = to_kbase_device(dev);
2251     if (!kbdev) {
2252         return -ENODEV;
2253     }
2254 
2255     policy_count = kbase_pm_list_policies(kbdev, &policy_list);
2256 
2257     for (i = 0; i < policy_count; i++) {
2258         if (sysfs_streq(policy_list[i]->name, buf)) {
2259             new_policy = policy_list[i];
2260             break;
2261         }
2262     }
2263 
2264     if (!new_policy) {
2265         dev_err(dev, "power_policy: policy not found\n");
2266         return -EINVAL;
2267     }
2268 
2269     kbase_pm_set_policy(kbdev, new_policy);
2270 
2271     return count;
2272 }
2273 
2274 /*
2275  * The sysfs file power_policy.
2276  *
2277  * This is used for obtaining information about the available policies,
2278  * determining which policy is currently active, and changing the active
2279  * policy.
2280  */
2281 static DEVICE_ATTR(power_policy, S_IRUGO | S_IWUSR, show_policy, set_policy);
2282 
2283 /*
2284  * show_core_mask - Show callback for the core_mask sysfs file.
2285  *
2286  * This function is called to get the contents of the core_mask sysfs file.
2287  *
2288  * @dev:    The device this sysfs file is for
2289  * @attr:    The attributes of the sysfs file
2290  * @buf:    The output buffer for the sysfs file contents
2291  *
2292  * Return: The number of bytes output to @buf.
2293  */
show_core_mask(struct device * dev,struct device_attribute * attr,char * const buf)2294 static ssize_t show_core_mask(struct device *dev, struct device_attribute *attr,
2295                               char *const buf)
2296 {
2297     struct kbase_device *kbdev;
2298     ssize_t ret = 0;
2299 
2300     kbdev = to_kbase_device(dev);
2301     if (!kbdev) {
2302         return -ENODEV;
2303     }
2304 
2305     ret += scnprintf(buf + ret, PAGE_SIZE - ret,
2306                      "Current core mask (JS0) : 0x%llX\n",
2307                      kbdev->pm.debug_core_mask[0x0]);
2308     ret += scnprintf(buf + ret, PAGE_SIZE - ret,
2309                      "Current core mask (JS1) : 0x%llX\n",
2310                      kbdev->pm.debug_core_mask[0x1]);
2311     ret += scnprintf(buf + ret, PAGE_SIZE - ret,
2312                      "Current core mask (JS2) : 0x%llX\n",
2313                      kbdev->pm.debug_core_mask[0x2]);
2314     ret +=
2315         scnprintf(buf + ret, PAGE_SIZE - ret, "Available core mask : 0x%llX\n",
2316                   kbdev->gpu_props.props.raw_props.shader_present);
2317 
2318     return ret;
2319 }
2320 
2321 /**
2322  * set_core_mask - Store callback for the core_mask sysfs file.
2323  *
2324  * This function is called when the core_mask sysfs file is written to.
2325  *
2326  * @dev:    The device with sysfs file is for
2327  * @attr:    The attributes of the sysfs file
2328  * @buf:    The value written to the sysfs file
2329  * @count:    The number of bytes to write to the sysfs file
2330  *
2331  * Return: @count if the function succeeded. An error code on failure.
2332  */
set_core_mask(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2333 static ssize_t set_core_mask(struct device *dev, struct device_attribute *attr,
2334                              const char *buf, size_t count)
2335 {
2336     struct kbase_device *kbdev;
2337     u64 new_core_mask[3];
2338     int items, i;
2339     ssize_t err = count;
2340     unsigned long flags;
2341     u64 shader_present, group0_core_mask;
2342 
2343     kbdev = to_kbase_device(dev);
2344     if (!kbdev) {
2345         return -ENODEV;
2346     }
2347 
2348     items = sscanf(buf, "%llx %llx %llx", &new_core_mask[0x0],
2349                    &new_core_mask[0x1], &new_core_mask[0x2]);
2350     if (items != 0x1 && items != 0x3) {
2351         dev_err(kbdev->dev,
2352                 "Couldn't process core mask write operation.\n"
2353                 "Use format <core_mask>\n"
2354                 "or <core_mask_js0> <core_mask_js1> <core_mask_js2>\n");
2355         err = -EINVAL;
2356         goto end;
2357     }
2358 
2359     if (items == 1) {
2360         new_core_mask[0x1] = new_core_mask[0x2] = new_core_mask[0x0];
2361     }
2362 
2363     mutex_lock(&kbdev->pm.lock);
2364     spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
2365 
2366     shader_present = kbdev->gpu_props.props.raw_props.shader_present;
2367     group0_core_mask = kbdev->gpu_props.props.coherency_info.group[0].core_mask;
2368 
2369     for (i = 0; i < 0x3; ++i) {
2370         if ((new_core_mask[i] & shader_present) != new_core_mask[i]) {
2371             dev_err(dev,
2372                     "Invalid core mask 0x%llX for JS %d: Includes non-existent "
2373                     "cores (present = 0x%llX)",
2374                     new_core_mask[i], i, shader_present);
2375             err = -EINVAL;
2376             goto unlock;
2377         } else if (!(new_core_mask[i] & shader_present &
2378                      kbdev->pm.backend.ca_cores_enabled)) {
2379             dev_err(dev,
2380                     "Invalid core mask 0x%llX for JS %d: No intersection with "
2381                     "currently available cores (present = "
2382                     "0x%llX, CA enabled = 0x%llX\n",
2383                     new_core_mask[i], i,
2384                     kbdev->gpu_props.props.raw_props.shader_present,
2385                     kbdev->pm.backend.ca_cores_enabled);
2386             err = -EINVAL;
2387             goto unlock;
2388         } else if (!(new_core_mask[i] & group0_core_mask)) {
2389             dev_err(dev,
2390                     "Invalid core mask 0x%llX for JS %d: No intersection with "
2391                     "group 0 core mask 0x%llX\n",
2392                     new_core_mask[i], i, group0_core_mask);
2393             err = -EINVAL;
2394             goto unlock;
2395         }
2396     }
2397 
2398     if (kbdev->pm.debug_core_mask[0] != new_core_mask[0] ||
2399         kbdev->pm.debug_core_mask[1] != new_core_mask[1] ||
2400         kbdev->pm.debug_core_mask[0x2] != new_core_mask[0x2]) {
2401         kbase_pm_set_debug_core_mask(kbdev, new_core_mask[0x0],
2402                                      new_core_mask[0x1], new_core_mask[0x2]);
2403     }
2404 
2405 unlock:
2406     spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
2407     mutex_unlock(&kbdev->pm.lock);
2408 end:
2409     return err;
2410 }
2411 
2412 /*
2413  * The sysfs file core_mask.
2414  *
2415  * This is used to restrict shader core availability for debugging purposes.
2416  * Reading it will show the current core mask and the mask of cores available.
2417  * Writing to it will set the current core mask.
2418  */
2419 static DEVICE_ATTR(core_mask, S_IRUGO | S_IWUSR, show_core_mask, set_core_mask);
2420 
2421 #if !MALI_USE_CSF
2422 /**
2423  * set_soft_job_timeout - Store callback for the soft_job_timeout sysfs
2424  * file.
2425  *
2426  * @dev: The device this sysfs file is for.
2427  * @attr: The attributes of the sysfs file.
2428  * @buf: The value written to the sysfs file.
2429  * @count: The number of bytes to write to the sysfs file.
2430  *
2431  * This allows setting the timeout for software jobs. Waiting soft event wait
2432  * jobs will be cancelled after this period expires, while soft fence wait jobs
2433  * will print debug information if the fence debug feature is enabled.
2434  *
2435  * This is expressed in milliseconds.
2436  *
2437  * Return: count if the function succeeded. An error code on failure.
2438  */
set_soft_job_timeout(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2439 static ssize_t set_soft_job_timeout(struct device *dev,
2440                                     struct device_attribute *attr,
2441                                     const char *buf, size_t count)
2442 {
2443     struct kbase_device *kbdev;
2444     int soft_job_timeout_ms;
2445 
2446     kbdev = to_kbase_device(dev);
2447     if (!kbdev) {
2448         return -ENODEV;
2449     }
2450 
2451     if ((kstrtoint(buf, 0, &soft_job_timeout_ms) != 0) ||
2452         (soft_job_timeout_ms <= 0)) {
2453         return -EINVAL;
2454     }
2455 
2456     atomic_set(&kbdev->js_data.soft_job_timeout_ms, soft_job_timeout_ms);
2457 
2458     return count;
2459 }
2460 
2461 /**
2462  * show_soft_job_timeout - Show callback for the soft_job_timeout sysfs
2463  * file.
2464  *
2465  * This will return the timeout for the software jobs.
2466  *
2467  * @dev: The device this sysfs file is for.
2468  * @attr: The attributes of the sysfs file.
2469  * @buf: The output buffer for the sysfs file contents.
2470  *
2471  * Return: The number of bytes output to buf.
2472  */
show_soft_job_timeout(struct device * dev,struct device_attribute * attr,char * const buf)2473 static ssize_t show_soft_job_timeout(struct device *dev,
2474                                      struct device_attribute *attr,
2475                                      char *const buf)
2476 {
2477     struct kbase_device *kbdev;
2478 
2479     kbdev = to_kbase_device(dev);
2480     if (!kbdev) {
2481         return -ENODEV;
2482     }
2483 
2484     return scnprintf(buf, PAGE_SIZE, "%i\n",
2485                      atomic_read(&kbdev->js_data.soft_job_timeout_ms));
2486 }
2487 
2488 static DEVICE_ATTR(soft_job_timeout, S_IRUGO | S_IWUSR, show_soft_job_timeout,
2489                    set_soft_job_timeout);
2490 
timeout_ms_to_ticks(struct kbase_device * kbdev,long timeout_ms,int default_ticks,u32 old_ticks)2491 static u32 timeout_ms_to_ticks(struct kbase_device *kbdev, long timeout_ms,
2492                                int default_ticks, u32 old_ticks)
2493 {
2494     if (timeout_ms > 0) {
2495         u64 ticks = timeout_ms * 1000000ULL;
2496         do_div(ticks, kbdev->js_data.scheduling_period_ns);
2497         if (!ticks) {
2498             return 1;
2499         }
2500         return ticks;
2501     } else if (timeout_ms < 0) {
2502         return default_ticks;
2503     } else {
2504         return old_ticks;
2505     }
2506 }
2507 
2508 /**
2509  * set_js_timeouts - Store callback for the js_timeouts sysfs file.
2510  *
2511  * This function is called to get the contents of the js_timeouts sysfs
2512  * file. This file contains five values separated by whitespace. The values
2513  * are basically the same as %JS_SOFT_STOP_TICKS, %JS_HARD_STOP_TICKS_SS,
2514  * %JS_HARD_STOP_TICKS_DUMPING, %JS_RESET_TICKS_SS, %JS_RESET_TICKS_DUMPING
2515  * configuration values (in that order), with the difference that the js_timeout
2516  * values are expressed in MILLISECONDS.
2517  *
2518  * The js_timeouts sysfile file allows the current values in
2519  * use by the job scheduler to get override. Note that a value needs to
2520  * be other than 0 for it to override the current job scheduler value.
2521  *
2522  * @dev:    The device with sysfs file is for
2523  * @attr:    The attributes of the sysfs file
2524  * @buf:    The value written to the sysfs file
2525  * @count:    The number of bytes to write to the sysfs file
2526  *
2527  * Return: @count if the function succeeded. An error code on failure.
2528  */
set_js_timeouts(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2529 static ssize_t set_js_timeouts(struct device *dev,
2530                                struct device_attribute *attr, const char *buf,
2531                                size_t count)
2532 {
2533     struct kbase_device *kbdev;
2534     int items;
2535     long js_soft_stop_ms;
2536     long js_soft_stop_ms_cl;
2537     long js_hard_stop_ms_ss;
2538     long js_hard_stop_ms_cl;
2539     long js_hard_stop_ms_dumping;
2540     long js_reset_ms_ss;
2541     long js_reset_ms_cl;
2542     long js_reset_ms_dumping;
2543 
2544     kbdev = to_kbase_device(dev);
2545     if (!kbdev) {
2546         return -ENODEV;
2547     }
2548 
2549     items = sscanf(buf, "%ld %ld %ld %ld %ld %ld %ld %ld", &js_soft_stop_ms,
2550                    &js_soft_stop_ms_cl, &js_hard_stop_ms_ss,
2551                    &js_hard_stop_ms_cl, &js_hard_stop_ms_dumping,
2552                    &js_reset_ms_ss, &js_reset_ms_cl, &js_reset_ms_dumping);
2553     if (items == 0x8) {
2554         struct kbasep_js_device_data *js_data = &kbdev->js_data;
2555         unsigned long flags;
2556         spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
2557 
2558 #define UPDATE_TIMEOUT(ticks_name, ms_name, default)                           \
2559     do {                                                                       \
2560         js_data->ticks_name =                                                  \
2561             timeout_ms_to_ticks(kbdev, ms_name, default, js_data->ticks_name); \
2562         dev_dbg(kbdev->dev,                                                    \
2563                 "Overriding " #ticks_name " with %lu ticks (%lu ms)\n",        \
2564                 (unsigned long)js_data->ticks_name, ms_name);                  \
2565     } while (0)
2566 
2567         UPDATE_TIMEOUT(soft_stop_ticks, js_soft_stop_ms,
2568                        DEFAULT_JS_SOFT_STOP_TICKS);
2569         UPDATE_TIMEOUT(soft_stop_ticks_cl, js_soft_stop_ms_cl,
2570                        DEFAULT_JS_SOFT_STOP_TICKS_CL);
2571         UPDATE_TIMEOUT(hard_stop_ticks_ss, js_hard_stop_ms_ss,
2572                        DEFAULT_JS_HARD_STOP_TICKS_SS);
2573         UPDATE_TIMEOUT(hard_stop_ticks_cl, js_hard_stop_ms_cl,
2574                        DEFAULT_JS_HARD_STOP_TICKS_CL);
2575         UPDATE_TIMEOUT(hard_stop_ticks_dumping, js_hard_stop_ms_dumping,
2576                        DEFAULT_JS_HARD_STOP_TICKS_DUMPING);
2577         UPDATE_TIMEOUT(gpu_reset_ticks_ss, js_reset_ms_ss,
2578                        DEFAULT_JS_RESET_TICKS_SS);
2579         UPDATE_TIMEOUT(gpu_reset_ticks_cl, js_reset_ms_cl,
2580                        DEFAULT_JS_RESET_TICKS_CL);
2581         UPDATE_TIMEOUT(gpu_reset_ticks_dumping, js_reset_ms_dumping,
2582                        DEFAULT_JS_RESET_TICKS_DUMPING);
2583 
2584         kbase_js_set_timeouts(kbdev);
2585 
2586         spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
2587 
2588         return count;
2589     }
2590 
2591     dev_err(kbdev->dev,
2592             "Couldn't process js_timeouts write operation.\n"
2593             "Use format <soft_stop_ms> <soft_stop_ms_cl> <hard_stop_ms_ss> "
2594             "<hard_stop_ms_cl> "
2595             "<hard_stop_ms_dumping> <reset_ms_ss> <reset_ms_cl> "
2596             "<reset_ms_dumping>\n"
2597             "Write 0 for no change, -1 to restore default timeout\n");
2598     return -EINVAL;
2599 }
2600 
get_js_timeout_in_ms(u32 scheduling_period_ns,u32 ticks)2601 static unsigned long get_js_timeout_in_ms(u32 scheduling_period_ns, u32 ticks)
2602 {
2603     u64 ms = (u64)ticks * scheduling_period_ns;
2604 
2605     do_div(ms, 1000000UL);
2606     return ms;
2607 }
2608 
2609 /**
2610  * show_js_timeouts - Show callback for the js_timeouts sysfs file.
2611  *
2612  * This function is called to get the contents of the js_timeouts sysfs
2613  * file. It returns the last set values written to the js_timeouts sysfs file.
2614  * If the file didn't get written yet, the values will be current setting in
2615  * use.
2616  * @dev:    The device this sysfs file is for
2617  * @attr:    The attributes of the sysfs file
2618  * @buf:    The output buffer for the sysfs file contents
2619  *
2620  * Return: The number of bytes output to @buf.
2621  */
show_js_timeouts(struct device * dev,struct device_attribute * attr,char * const buf)2622 static ssize_t show_js_timeouts(struct device *dev,
2623                                 struct device_attribute *attr, char *const buf)
2624 {
2625     struct kbase_device *kbdev;
2626     ssize_t ret;
2627     unsigned long js_soft_stop_ms;
2628     unsigned long js_soft_stop_ms_cl;
2629     unsigned long js_hard_stop_ms_ss;
2630     unsigned long js_hard_stop_ms_cl;
2631     unsigned long js_hard_stop_ms_dumping;
2632     unsigned long js_reset_ms_ss;
2633     unsigned long js_reset_ms_cl;
2634     unsigned long js_reset_ms_dumping;
2635     u32 scheduling_period_ns;
2636 
2637     kbdev = to_kbase_device(dev);
2638     if (!kbdev) {
2639         return -ENODEV;
2640     }
2641 
2642     scheduling_period_ns = kbdev->js_data.scheduling_period_ns;
2643 
2644 #define GET_TIMEOUT(name)                                                      \
2645     get_js_timeout_in_ms(scheduling_period_ns, kbdev->js_data.name)
2646 
2647     js_soft_stop_ms = GET_TIMEOUT(soft_stop_ticks);
2648     js_soft_stop_ms_cl = GET_TIMEOUT(soft_stop_ticks_cl);
2649     js_hard_stop_ms_ss = GET_TIMEOUT(hard_stop_ticks_ss);
2650     js_hard_stop_ms_cl = GET_TIMEOUT(hard_stop_ticks_cl);
2651     js_hard_stop_ms_dumping = GET_TIMEOUT(hard_stop_ticks_dumping);
2652     js_reset_ms_ss = GET_TIMEOUT(gpu_reset_ticks_ss);
2653     js_reset_ms_cl = GET_TIMEOUT(gpu_reset_ticks_cl);
2654     js_reset_ms_dumping = GET_TIMEOUT(gpu_reset_ticks_dumping);
2655 
2656 #undef GET_TIMEOUT
2657 
2658     ret = scnprintf(buf, PAGE_SIZE, "%lu %lu %lu %lu %lu %lu %lu %lu\n",
2659                     js_soft_stop_ms, js_soft_stop_ms_cl, js_hard_stop_ms_ss,
2660                     js_hard_stop_ms_cl, js_hard_stop_ms_dumping, js_reset_ms_ss,
2661                     js_reset_ms_cl, js_reset_ms_dumping);
2662     if (ret >= PAGE_SIZE) {
2663         buf[PAGE_SIZE - 0x2] = '\n';
2664         buf[PAGE_SIZE - 0x1] = '\0';
2665         ret = PAGE_SIZE - 0x1;
2666     }
2667     return ret;
2668 }
2669 
2670 /*
2671  * The sysfs file js_timeouts.
2672  *
2673  * This is used to override the current job scheduler values for
2674  * JS_STOP_STOP_TICKS_SS
2675  * JS_STOP_STOP_TICKS_CL
2676  * JS_HARD_STOP_TICKS_SS
2677  * JS_HARD_STOP_TICKS_CL
2678  * JS_HARD_STOP_TICKS_DUMPING
2679  * JS_RESET_TICKS_SS
2680  * JS_RESET_TICKS_CL
2681  * JS_RESET_TICKS_DUMPING.
2682  */
2683 static DEVICE_ATTR(js_timeouts, S_IRUGO | S_IWUSR, show_js_timeouts,
2684                    set_js_timeouts);
2685 
get_new_js_timeout(u32 old_period,u32 old_ticks,u32 new_scheduling_period_ns)2686 static u32 get_new_js_timeout(u32 old_period, u32 old_ticks,
2687                               u32 new_scheduling_period_ns)
2688 {
2689     u64 ticks = (u64)old_period * (u64)old_ticks;
2690     do_div(ticks, new_scheduling_period_ns);
2691     return ticks ? ticks : 1;
2692 }
2693 
2694 /**
2695  * set_js_scheduling_period - Store callback for the js_scheduling_period sysfs
2696  *                            file
2697  * @dev:   The device the sysfs file is for
2698  * @attr:  The attributes of the sysfs file
2699  * @buf:   The value written to the sysfs file
2700  * @count: The number of bytes to write to the sysfs file
2701  *
2702  * This function is called when the js_scheduling_period sysfs file is written
2703  * to. It checks the data written, and if valid updates the js_scheduling_period
2704  * value
2705  *
2706  * Return: @count if the function succeeded. An error code on failure.
2707  */
set_js_scheduling_period(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2708 static ssize_t set_js_scheduling_period(struct device *dev,
2709                                         struct device_attribute *attr,
2710                                         const char *buf, size_t count)
2711 {
2712     struct kbase_device *kbdev;
2713     int ret;
2714     unsigned int js_scheduling_period;
2715     u32 new_scheduling_period_ns;
2716     u32 old_period;
2717     struct kbasep_js_device_data *js_data;
2718     unsigned long flags;
2719 
2720     kbdev = to_kbase_device(dev);
2721     if (!kbdev) {
2722         return -ENODEV;
2723     }
2724 
2725     js_data = &kbdev->js_data;
2726 
2727     ret = kstrtouint(buf, 0, &js_scheduling_period);
2728     if (ret || !js_scheduling_period) {
2729         dev_err(kbdev->dev,
2730                 "Couldn't process js_scheduling_period write operation.\n"
2731                 "Use format <js_scheduling_period_ms>\n");
2732         return -EINVAL;
2733     }
2734 
2735     new_scheduling_period_ns = js_scheduling_period * 0xf4240;
2736 
2737     /* Update scheduling timeouts */
2738     mutex_lock(&js_data->runpool_mutex);
2739     spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
2740 
2741     /* If no contexts have been scheduled since js_timeouts was last written
2742      * to, the new timeouts might not have been latched yet. So check if an
2743      * update is pending and use the new values if necessary. */
2744 
2745     /* Use previous 'new' scheduling period as a base if present. */
2746     old_period = js_data->scheduling_period_ns;
2747 
2748 #define SET_TIMEOUT(name)                                                      \
2749     (js_data->name = get_new_js_timeout(old_period, kbdev->js_data.name,       \
2750                                         new_scheduling_period_ns))
2751 
2752     SET_TIMEOUT(soft_stop_ticks);
2753     SET_TIMEOUT(soft_stop_ticks_cl);
2754     SET_TIMEOUT(hard_stop_ticks_ss);
2755     SET_TIMEOUT(hard_stop_ticks_cl);
2756     SET_TIMEOUT(hard_stop_ticks_dumping);
2757     SET_TIMEOUT(gpu_reset_ticks_ss);
2758     SET_TIMEOUT(gpu_reset_ticks_cl);
2759     SET_TIMEOUT(gpu_reset_ticks_dumping);
2760 
2761 #undef SET_TIMEOUT
2762 
2763     js_data->scheduling_period_ns = new_scheduling_period_ns;
2764 
2765     kbase_js_set_timeouts(kbdev);
2766 
2767     spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
2768     mutex_unlock(&js_data->runpool_mutex);
2769 
2770     dev_dbg(kbdev->dev, "JS scheduling period: %dms\n", js_scheduling_period);
2771 
2772     return count;
2773 }
2774 
2775 /**
2776  * show_js_scheduling_period - Show callback for the js_scheduling_period sysfs
2777  *                             entry.
2778  * @dev:  The device this sysfs file is for.
2779  * @attr: The attributes of the sysfs file.
2780  * @buf:  The output buffer to receive the GPU information.
2781  *
2782  * This function is called to get the current period used for the JS scheduling
2783  * period.
2784  *
2785  * Return: The number of bytes output to @buf.
2786  */
show_js_scheduling_period(struct device * dev,struct device_attribute * attr,char * const buf)2787 static ssize_t show_js_scheduling_period(struct device *dev,
2788                                          struct device_attribute *attr,
2789                                          char *const buf)
2790 {
2791     struct kbase_device *kbdev;
2792     u32 period;
2793     ssize_t ret;
2794 
2795     kbdev = to_kbase_device(dev);
2796     if (!kbdev) {
2797         return -ENODEV;
2798     }
2799 
2800     period = kbdev->js_data.scheduling_period_ns;
2801 
2802     ret = scnprintf(buf, PAGE_SIZE, "%d\n", period / 0xf4240);
2803 
2804     return ret;
2805 }
2806 
2807 static DEVICE_ATTR(js_scheduling_period, S_IRUGO | S_IWUSR,
2808                    show_js_scheduling_period, set_js_scheduling_period);
2809 
2810 #ifdef CONFIG_MALI_BIFROST_DEBUG
set_js_softstop_always(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2811 static ssize_t set_js_softstop_always(struct device *dev,
2812                                       struct device_attribute *attr,
2813                                       const char *buf, size_t count)
2814 {
2815     struct kbase_device *kbdev;
2816     int ret;
2817     int softstop_always;
2818 
2819     kbdev = to_kbase_device(dev);
2820     if (!kbdev) {
2821         return -ENODEV;
2822     }
2823 
2824     ret = kstrtoint(buf, 0, &softstop_always);
2825     if (ret || ((softstop_always != 0) && (softstop_always != 1))) {
2826         dev_err(kbdev->dev,
2827                 "Couldn't process js_softstop_always write operation.\n"
2828                 "Use format <soft_stop_always>\n");
2829         return -EINVAL;
2830     }
2831 
2832     kbdev->js_data.softstop_always = (bool)softstop_always;
2833     dev_dbg(kbdev->dev, "Support for softstop on a single context: %s\n",
2834             (kbdev->js_data.softstop_always) ? "Enabled" : "Disabled");
2835     return count;
2836 }
2837 
show_js_softstop_always(struct device * dev,struct device_attribute * attr,char * const buf)2838 static ssize_t show_js_softstop_always(struct device *dev,
2839                                        struct device_attribute *attr,
2840                                        char *const buf)
2841 {
2842     struct kbase_device *kbdev;
2843     ssize_t ret;
2844 
2845     kbdev = to_kbase_device(dev);
2846     if (!kbdev) {
2847         return -ENODEV;
2848     }
2849 
2850     ret = scnprintf(buf, PAGE_SIZE, "%d\n", kbdev->js_data.softstop_always);
2851     if (ret >= PAGE_SIZE) {
2852         buf[PAGE_SIZE - 0x2] = '\n';
2853         buf[PAGE_SIZE - 0x1] = '\0';
2854         ret = PAGE_SIZE - 0x1;
2855     }
2856     return ret;
2857 }
2858 
2859 /*
2860  * By default, soft-stops are disabled when only a single context is present.
2861  * The ability to enable soft-stop when only a single context is present can be
2862  * used for debug and unit-testing purposes.
2863  * (see CL t6xx_stress_1 unit-test as an example whereby this feature is used.)
2864  */
2865 static DEVICE_ATTR(js_softstop_always, S_IRUGO | S_IWUSR,
2866                    show_js_softstop_always, set_js_softstop_always);
2867 #endif /* CONFIG_MALI_BIFROST_DEBUG */
2868 #endif /* !MALI_USE_CSF */
2869 
2870 #ifdef CONFIG_MALI_BIFROST_DEBUG
2871 typedef void(kbasep_debug_command_func)(struct kbase_device *);
2872 
2873 enum kbasep_debug_command_code {
2874     KBASEP_DEBUG_COMMAND_DUMPTRACE,
2875 
2876     /* This must be the last enum */
2877     KBASEP_DEBUG_COMMAND_COUNT
2878 };
2879 
2880 struct kbasep_debug_command {
2881     char *str;
2882     kbasep_debug_command_func *func;
2883 };
2884 
kbasep_ktrace_dump_wrapper(struct kbase_device * kbdev)2885 void kbasep_ktrace_dump_wrapper(struct kbase_device *kbdev)
2886 {
2887     KBASE_KTRACE_DUMP(kbdev);
2888 }
2889 
2890 /* Debug commands supported by the driver */
2891 static const struct kbasep_debug_command debug_commands[] = {{
2892     .str = "dumptrace",
2893     .func = &kbasep_ktrace_dump_wrapper,
2894 }};
2895 
2896 /**
2897  * show_debug - Show callback for the debug_command sysfs file.
2898  *
2899  * This function is called to get the contents of the debug_command sysfs
2900  * file. This is a list of the available debug commands, separated by newlines.
2901  *
2902  * @dev:    The device this sysfs file is for
2903  * @attr:    The attributes of the sysfs file
2904  * @buf:    The output buffer for the sysfs file contents
2905  *
2906  * Return: The number of bytes output to @buf.
2907  */
show_debug(struct device * dev,struct device_attribute * attr,char * const buf)2908 static ssize_t show_debug(struct device *dev, struct device_attribute *attr,
2909                           char *const buf)
2910 {
2911     struct kbase_device *kbdev;
2912     int i;
2913     ssize_t ret = 0;
2914 
2915     kbdev = to_kbase_device(dev);
2916     if (!kbdev) {
2917         return -ENODEV;
2918     }
2919 
2920     for (i = 0; i < KBASEP_DEBUG_COMMAND_COUNT && ret < PAGE_SIZE; i++) {
2921         ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s\n",
2922                          debug_commands[i].str);
2923     }
2924     if (ret >= PAGE_SIZE) {
2925         buf[PAGE_SIZE - 0x2] = '\n';
2926         buf[PAGE_SIZE - 0x1] = '\0';
2927         ret = PAGE_SIZE - 0x1;
2928     }
2929 
2930     return ret;
2931 }
2932 
2933 /**
2934  * issue_debug - Store callback for the debug_command sysfs file.
2935  *
2936  * This function is called when the debug_command sysfs file is written to.
2937  * It matches the requested command against the available commands, and if
2938  * a matching command is found calls the associated function from
2939  * @debug_commands to issue the command.
2940  *
2941  * @dev:    The device with sysfs file is for
2942  * @attr:    The attributes of the sysfs file
2943  * @buf:    The value written to the sysfs file
2944  * @count:    The number of bytes written to the sysfs file
2945  *
2946  * Return: @count if the function succeeded. An error code on failure.
2947  */
issue_debug(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2948 static ssize_t issue_debug(struct device *dev, struct device_attribute *attr,
2949                            const char *buf, size_t count)
2950 {
2951     struct kbase_device *kbdev;
2952     int i;
2953 
2954     kbdev = to_kbase_device(dev);
2955     if (!kbdev) {
2956         return -ENODEV;
2957     }
2958 
2959     for (i = 0; i < KBASEP_DEBUG_COMMAND_COUNT; i++) {
2960         if (sysfs_streq(debug_commands[i].str, buf)) {
2961             debug_commands[i].func(kbdev);
2962             return count;
2963         }
2964     }
2965 
2966     /* Debug Command not found */
2967     dev_err(dev, "debug_command: command not known\n");
2968     return -EINVAL;
2969 }
2970 
2971 /* The sysfs file debug_command.
2972  *
2973  * This is used to issue general debug commands to the device driver.
2974  * Reading it will produce a list of debug commands, separated by newlines.
2975  * Writing to it with one of those commands will issue said command.
2976  */
2977 static DEVICE_ATTR(debug_command, S_IRUGO | S_IWUSR, show_debug, issue_debug);
2978 #endif /* CONFIG_MALI_BIFROST_DEBUG */
2979 
2980 /**
2981  * kbase_show_gpuinfo - Show callback for the gpuinfo sysfs entry.
2982  * @dev: The device this sysfs file is for.
2983  * @attr: The attributes of the sysfs file.
2984  * @buf: The output buffer to receive the GPU information.
2985  *
2986  * This function is called to get a description of the present Mali
2987  * GPU via the gpuinfo sysfs entry.  This includes the GPU family, the
2988  * number of cores, the hardware version and the raw product id.  For
2989  * example
2990  *
2991  *    Mali-T60x MP4 r0p0 0x6956
2992  *
2993  * Return: The number of bytes output to @buf.
2994  */
kbase_show_gpuinfo(struct device * dev,struct device_attribute * attr,char * buf)2995 static ssize_t kbase_show_gpuinfo(struct device *dev,
2996                                   struct device_attribute *attr, char *buf)
2997 {
2998     static const struct gpu_product_id_name {
2999         unsigned id;
3000         char *name;
3001     } gpu_product_id_names[] = {
3002         {.id = GPU_ID2_PRODUCT_TMIX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
3003          .name = "Mali-G71"},
3004         {.id = GPU_ID2_PRODUCT_THEX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
3005          .name = "Mali-G72"},
3006         {.id = GPU_ID2_PRODUCT_TSIX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
3007          .name = "Mali-G51"},
3008         {.id = GPU_ID2_PRODUCT_TNOX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
3009          .name = "Mali-G76"},
3010         {.id = GPU_ID2_PRODUCT_TDVX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
3011          .name = "Mali-G31"},
3012         {.id = GPU_ID2_PRODUCT_TGOX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
3013          .name = "Mali-G52"},
3014         {.id = GPU_ID2_PRODUCT_TTRX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
3015          .name = "Mali-G77"},
3016         {.id = GPU_ID2_PRODUCT_TBEX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
3017          .name = "Mali-G78"},
3018         {.id = GPU_ID2_PRODUCT_TBAX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
3019          .name = "Mali-TBAX"},
3020         {.id = GPU_ID2_PRODUCT_LBEX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
3021          .name = "Mali-G68"},
3022         {.id = GPU_ID2_PRODUCT_TNAX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
3023          .name = "Mali-G57"},
3024         {.id = GPU_ID2_PRODUCT_TODX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
3025          .name = "Mali-TODX"},
3026         {.id = GPU_ID2_PRODUCT_TGRX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
3027          .name = "Mali-TGRX"},
3028         {.id = GPU_ID2_PRODUCT_TVAX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
3029          .name = "Mali-TVAX"},
3030         {.id = GPU_ID2_PRODUCT_LODX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
3031          .name = "Mali-LODX"},
3032         {.id = GPU_ID2_PRODUCT_TTUX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
3033          .name = "Mali-TTUX"},
3034         {.id = GPU_ID2_PRODUCT_LTUX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
3035          .name = "Mali-LTUX"},
3036         {.id = GPU_ID2_PRODUCT_TE2X >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
3037          .name = "Mali-TE2X"},
3038     };
3039     const char *product_name = "(Unknown Mali GPU)";
3040     struct kbase_device *kbdev;
3041     u32 gpu_id;
3042     unsigned product_id, product_id_mask;
3043     unsigned i;
3044 
3045     kbdev = to_kbase_device(dev);
3046     if (!kbdev) {
3047         return -ENODEV;
3048     }
3049 
3050     gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
3051     product_id = gpu_id >> GPU_ID_VERSION_PRODUCT_ID_SHIFT;
3052     product_id_mask = GPU_ID2_PRODUCT_MODEL >> GPU_ID_VERSION_PRODUCT_ID_SHIFT;
3053 
3054     for (i = 0; i < ARRAY_SIZE(gpu_product_id_names); ++i) {
3055         const struct gpu_product_id_name *p = &gpu_product_id_names[i];
3056 
3057         if ((p->id & product_id_mask) == (product_id & product_id_mask)) {
3058             product_name = p->name;
3059             break;
3060         }
3061     }
3062 
3063     return scnprintf(
3064         buf, PAGE_SIZE, "%s %d cores r%dp%d 0x%04X\n", product_name,
3065         kbdev->gpu_props.num_cores,
3066         (gpu_id & GPU_ID_VERSION_MAJOR) >> GPU_ID_VERSION_MAJOR_SHIFT,
3067         (gpu_id & GPU_ID_VERSION_MINOR) >> GPU_ID_VERSION_MINOR_SHIFT,
3068         product_id);
3069 }
3070 static DEVICE_ATTR(gpuinfo, S_IRUGO, kbase_show_gpuinfo, NULL);
3071 
3072 /**
3073  * set_dvfs_period - Store callback for the dvfs_period sysfs file.
3074  * @dev:   The device with sysfs file is for
3075  * @attr:  The attributes of the sysfs file
3076  * @buf:   The value written to the sysfs file
3077  * @count: The number of bytes written to the sysfs file
3078  *
3079  * This function is called when the dvfs_period sysfs file is written to. It
3080  * checks the data written, and if valid updates the DVFS period variable,
3081  *
3082  * Return: @count if the function succeeded. An error code on failure.
3083  */
set_dvfs_period(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3084 static ssize_t set_dvfs_period(struct device *dev,
3085                                struct device_attribute *attr, const char *buf,
3086                                size_t count)
3087 {
3088     struct kbase_device *kbdev;
3089     int ret;
3090     int dvfs_period;
3091 
3092     kbdev = to_kbase_device(dev);
3093     if (!kbdev) {
3094         return -ENODEV;
3095     }
3096 
3097     ret = kstrtoint(buf, 0, &dvfs_period);
3098     if (ret || dvfs_period <= 0) {
3099         dev_err(kbdev->dev, "Couldn't process dvfs_period write operation.\n"
3100                             "Use format <dvfs_period_ms>\n");
3101         return -EINVAL;
3102     }
3103 
3104     kbdev->pm.dvfs_period = dvfs_period;
3105     dev_dbg(kbdev->dev, "DVFS period: %dms\n", dvfs_period);
3106 
3107     return count;
3108 }
3109 
3110 /**
3111  * show_dvfs_period - Show callback for the dvfs_period sysfs entry.
3112  * @dev:  The device this sysfs file is for.
3113  * @attr: The attributes of the sysfs file.
3114  * @buf:  The output buffer to receive the GPU information.
3115  *
3116  * This function is called to get the current period used for the DVFS sample
3117  * timer.
3118  *
3119  * Return: The number of bytes output to @buf.
3120  */
show_dvfs_period(struct device * dev,struct device_attribute * attr,char * const buf)3121 static ssize_t show_dvfs_period(struct device *dev,
3122                                 struct device_attribute *attr, char *const buf)
3123 {
3124     struct kbase_device *kbdev;
3125     ssize_t ret;
3126 
3127     kbdev = to_kbase_device(dev);
3128     if (!kbdev) {
3129         return -ENODEV;
3130     }
3131 
3132     ret = scnprintf(buf, PAGE_SIZE, "%d\n", kbdev->pm.dvfs_period);
3133 
3134     return ret;
3135 }
3136 
3137 static DEVICE_ATTR(dvfs_period, S_IRUGO | S_IWUSR, show_dvfs_period,
3138                    set_dvfs_period);
3139 
3140 /**
3141  * set_pm_poweroff - Store callback for the pm_poweroff sysfs file.
3142  * @dev:   The device with sysfs file is for
3143  * @attr:  The attributes of the sysfs file
3144  * @buf:   The value written to the sysfs file
3145  * @count: The number of bytes written to the sysfs file
3146  *
3147  * This function is called when the pm_poweroff sysfs file is written to.
3148  *
3149  * This file contains three values separated by whitespace. The values
3150  * are gpu_poweroff_time (the period of the poweroff timer, in ns),
3151  * poweroff_shader_ticks (the number of poweroff timer ticks before an idle
3152  * shader is powered off), and poweroff_gpu_ticks (the number of poweroff timer
3153  * ticks before the GPU is powered off), in that order.
3154  *
3155  * Return: @count if the function succeeded. An error code on failure.
3156  */
set_pm_poweroff(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3157 static ssize_t set_pm_poweroff(struct device *dev,
3158                                struct device_attribute *attr, const char *buf,
3159                                size_t count)
3160 {
3161     struct kbase_device *kbdev;
3162     struct kbasep_pm_tick_timer_state *stt;
3163     int items;
3164     u64 gpu_poweroff_time;
3165     unsigned int poweroff_shader_ticks, poweroff_gpu_ticks;
3166     unsigned long flags;
3167 
3168     kbdev = to_kbase_device(dev);
3169     if (!kbdev) {
3170         return -ENODEV;
3171     }
3172 
3173     items = sscanf(buf, "%llu %u %u", &gpu_poweroff_time,
3174                    &poweroff_shader_ticks, &poweroff_gpu_ticks);
3175     if (items != 0x3) {
3176         dev_err(kbdev->dev, "Couldn't process pm_poweroff write operation.\n"
3177                             "Use format <gpu_poweroff_time_ns> "
3178                             "<poweroff_shader_ticks> <poweroff_gpu_ticks>\n");
3179         return -EINVAL;
3180     }
3181 
3182     spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
3183 
3184     stt = &kbdev->pm.backend.shader_tick_timer;
3185     stt->configured_interval = HR_TIMER_DELAY_NSEC(gpu_poweroff_time);
3186     stt->configured_ticks = poweroff_shader_ticks;
3187 
3188     spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
3189 
3190     if (poweroff_gpu_ticks != 0) {
3191         dev_warn(kbdev->dev,
3192                  "Separate GPU poweroff delay no longer supported.\n");
3193     }
3194 
3195     return count;
3196 }
3197 
3198 /**
3199  * show_pm_poweroff - Show callback for the pm_poweroff sysfs entry.
3200  * @dev:  The device this sysfs file is for.
3201  * @attr: The attributes of the sysfs file.
3202  * @buf:  The output buffer to receive the GPU information.
3203  *
3204  * This function is called to get the current period used for the DVFS sample
3205  * timer.
3206  *
3207  * Return: The number of bytes output to @buf.
3208  */
show_pm_poweroff(struct device * dev,struct device_attribute * attr,char * const buf)3209 static ssize_t show_pm_poweroff(struct device *dev,
3210                                 struct device_attribute *attr, char *const buf)
3211 {
3212     struct kbase_device *kbdev;
3213     struct kbasep_pm_tick_timer_state *stt;
3214     ssize_t ret;
3215     unsigned long flags;
3216 
3217     kbdev = to_kbase_device(dev);
3218     if (!kbdev) {
3219         return -ENODEV;
3220     }
3221 
3222     spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
3223 
3224     stt = &kbdev->pm.backend.shader_tick_timer;
3225     ret =
3226         scnprintf(buf, PAGE_SIZE, "%llu %u 0\n",
3227                   ktime_to_ns(stt->configured_interval), stt->configured_ticks);
3228 
3229     spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
3230 
3231     return ret;
3232 }
3233 
3234 static DEVICE_ATTR(pm_poweroff, S_IRUGO | S_IWUSR, show_pm_poweroff,
3235                    set_pm_poweroff);
3236 
3237 /**
3238  * set_reset_timeout - Store callback for the reset_timeout sysfs file.
3239  * @dev:   The device with sysfs file is for
3240  * @attr:  The attributes of the sysfs file
3241  * @buf:   The value written to the sysfs file
3242  * @count: The number of bytes written to the sysfs file
3243  *
3244  * This function is called when the reset_timeout sysfs file is written to. It
3245  * checks the data written, and if valid updates the reset timeout.
3246  *
3247  * Return: @count if the function succeeded. An error code on failure.
3248  */
set_reset_timeout(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3249 static ssize_t set_reset_timeout(struct device *dev,
3250                                  struct device_attribute *attr, const char *buf,
3251                                  size_t count)
3252 {
3253     struct kbase_device *kbdev;
3254     int ret;
3255     int reset_timeout;
3256 
3257     kbdev = to_kbase_device(dev);
3258     if (!kbdev) {
3259         return -ENODEV;
3260     }
3261 
3262     ret = kstrtoint(buf, 0, &reset_timeout);
3263     if (ret || reset_timeout <= 0) {
3264         dev_err(kbdev->dev, "Couldn't process reset_timeout write operation.\n"
3265                             "Use format <reset_timeout_ms>\n");
3266         return -EINVAL;
3267     }
3268 
3269     kbdev->reset_timeout_ms = reset_timeout;
3270     dev_dbg(kbdev->dev, "Reset timeout: %dms\n", reset_timeout);
3271 
3272     return count;
3273 }
3274 
3275 /**
3276  * show_reset_timeout - Show callback for the reset_timeout sysfs entry.
3277  * @dev:  The device this sysfs file is for.
3278  * @attr: The attributes of the sysfs file.
3279  * @buf:  The output buffer to receive the GPU information.
3280  *
3281  * This function is called to get the current reset timeout.
3282  *
3283  * Return: The number of bytes output to @buf.
3284  */
show_reset_timeout(struct device * dev,struct device_attribute * attr,char * const buf)3285 static ssize_t show_reset_timeout(struct device *dev,
3286                                   struct device_attribute *attr,
3287                                   char *const buf)
3288 {
3289     struct kbase_device *kbdev;
3290     ssize_t ret;
3291 
3292     kbdev = to_kbase_device(dev);
3293     if (!kbdev) {
3294         return -ENODEV;
3295     }
3296 
3297     ret = scnprintf(buf, PAGE_SIZE, "%d\n", kbdev->reset_timeout_ms);
3298 
3299     return ret;
3300 }
3301 
3302 static DEVICE_ATTR(reset_timeout, S_IRUGO | S_IWUSR, show_reset_timeout,
3303                    set_reset_timeout);
3304 
show_mem_pool_size(struct device * dev,struct device_attribute * attr,char * const buf)3305 static ssize_t show_mem_pool_size(struct device *dev,
3306                                   struct device_attribute *attr,
3307                                   char *const buf)
3308 {
3309     struct kbase_device *const kbdev = to_kbase_device(dev);
3310 
3311     if (!kbdev) {
3312         return -ENODEV;
3313     }
3314 
3315     return kbase_debugfs_helper_get_attr_to_string(
3316         buf, PAGE_SIZE, kbdev->mem_pools.small, MEMORY_GROUP_MANAGER_NR_GROUPS,
3317         kbase_mem_pool_debugfs_size);
3318 }
3319 
set_mem_pool_size(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3320 static ssize_t set_mem_pool_size(struct device *dev,
3321                                  struct device_attribute *attr, const char *buf,
3322                                  size_t count)
3323 {
3324     struct kbase_device *const kbdev = to_kbase_device(dev);
3325     int err;
3326 
3327     if (!kbdev) {
3328         return -ENODEV;
3329     }
3330 
3331     err = kbase_debugfs_helper_set_attr_from_string(
3332         buf, kbdev->mem_pools.small, MEMORY_GROUP_MANAGER_NR_GROUPS,
3333         kbase_mem_pool_debugfs_trim);
3334 
3335     return err ? err : count;
3336 }
3337 
3338 static DEVICE_ATTR(mem_pool_size, S_IRUGO | S_IWUSR, show_mem_pool_size,
3339                    set_mem_pool_size);
3340 
show_mem_pool_max_size(struct device * dev,struct device_attribute * attr,char * const buf)3341 static ssize_t show_mem_pool_max_size(struct device *dev,
3342                                       struct device_attribute *attr,
3343                                       char *const buf)
3344 {
3345     struct kbase_device *const kbdev = to_kbase_device(dev);
3346 
3347     if (!kbdev) {
3348         return -ENODEV;
3349     }
3350 
3351     return kbase_debugfs_helper_get_attr_to_string(
3352         buf, PAGE_SIZE, kbdev->mem_pools.small, MEMORY_GROUP_MANAGER_NR_GROUPS,
3353         kbase_mem_pool_debugfs_max_size);
3354 }
3355 
set_mem_pool_max_size(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3356 static ssize_t set_mem_pool_max_size(struct device *dev,
3357                                      struct device_attribute *attr,
3358                                      const char *buf, size_t count)
3359 {
3360     struct kbase_device *const kbdev = to_kbase_device(dev);
3361     int err;
3362 
3363     if (!kbdev) {
3364         return -ENODEV;
3365     }
3366 
3367     err = kbase_debugfs_helper_set_attr_from_string(
3368         buf, kbdev->mem_pools.small, MEMORY_GROUP_MANAGER_NR_GROUPS,
3369         kbase_mem_pool_debugfs_set_max_size);
3370 
3371     return err ? err : count;
3372 }
3373 
3374 static DEVICE_ATTR(mem_pool_max_size, S_IRUGO | S_IWUSR, show_mem_pool_max_size,
3375                    set_mem_pool_max_size);
3376 
3377 /**
3378  * show_lp_mem_pool_size - Show size of the large memory pages pool.
3379  * @dev:  The device this sysfs file is for.
3380  * @attr: The attributes of the sysfs file.
3381  * @buf:  The output buffer to receive the pool size.
3382  *
3383  * This function is called to get the number of large memory pages which
3384  * currently populate the kbdev pool.
3385  *
3386  * Return: The number of bytes output to @buf.
3387  */
show_lp_mem_pool_size(struct device * dev,struct device_attribute * attr,char * const buf)3388 static ssize_t show_lp_mem_pool_size(struct device *dev,
3389                                      struct device_attribute *attr,
3390                                      char *const buf)
3391 {
3392     struct kbase_device *const kbdev = to_kbase_device(dev);
3393 
3394     if (!kbdev) {
3395         return -ENODEV;
3396     }
3397 
3398     return kbase_debugfs_helper_get_attr_to_string(
3399         buf, PAGE_SIZE, kbdev->mem_pools.large, MEMORY_GROUP_MANAGER_NR_GROUPS,
3400         kbase_mem_pool_debugfs_size);
3401 }
3402 
3403 /**
3404  * set_lp_mem_pool_size - Set size of the large memory pages pool.
3405  * @dev:   The device this sysfs file is for.
3406  * @attr:  The attributes of the sysfs file.
3407  * @buf:   The value written to the sysfs file.
3408  * @count: The number of bytes written to the sysfs file.
3409  *
3410  * This function is called to set the number of large memory pages which should
3411  * populate the kbdev pool. This may cause existing pages to be removed from the
3412  * pool, or new pages to be created and then added to the pool.
3413  *
3414  * Return: @count if the function succeeded. An error code on failure.
3415  */
set_lp_mem_pool_size(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3416 static ssize_t set_lp_mem_pool_size(struct device *dev,
3417                                     struct device_attribute *attr,
3418                                     const char *buf, size_t count)
3419 {
3420     struct kbase_device *const kbdev = to_kbase_device(dev);
3421     int err;
3422 
3423     if (!kbdev) {
3424         return -ENODEV;
3425     }
3426 
3427     err = kbase_debugfs_helper_set_attr_from_string(
3428         buf, kbdev->mem_pools.large, MEMORY_GROUP_MANAGER_NR_GROUPS,
3429         kbase_mem_pool_debugfs_trim);
3430 
3431     return err ? err : count;
3432 }
3433 
3434 static DEVICE_ATTR(lp_mem_pool_size, S_IRUGO | S_IWUSR, show_lp_mem_pool_size,
3435                    set_lp_mem_pool_size);
3436 
3437 /**
3438  * show_lp_mem_pool_max_size - Show maximum size of the large memory pages pool.
3439  * @dev:  The device this sysfs file is for.
3440  * @attr: The attributes of the sysfs file.
3441  * @buf:  The output buffer to receive the pool size.
3442  *
3443  * This function is called to get the maximum number of large memory pages that
3444  * the kbdev pool can possibly contain.
3445  *
3446  * Return: The number of bytes output to @buf.
3447  */
show_lp_mem_pool_max_size(struct device * dev,struct device_attribute * attr,char * const buf)3448 static ssize_t show_lp_mem_pool_max_size(struct device *dev,
3449                                          struct device_attribute *attr,
3450                                          char *const buf)
3451 {
3452     struct kbase_device *const kbdev = to_kbase_device(dev);
3453 
3454     if (!kbdev) {
3455         return -ENODEV;
3456     }
3457 
3458     return kbase_debugfs_helper_get_attr_to_string(
3459         buf, PAGE_SIZE, kbdev->mem_pools.large, MEMORY_GROUP_MANAGER_NR_GROUPS,
3460         kbase_mem_pool_debugfs_max_size);
3461 }
3462 
3463 /**
3464  * set_lp_mem_pool_max_size - Set maximum size of the large memory pages pool.
3465  * @dev:   The device this sysfs file is for.
3466  * @attr:  The attributes of the sysfs file.
3467  * @buf:   The value written to the sysfs file.
3468  * @count: The number of bytes written to the sysfs file.
3469  *
3470  * This function is called to set the maximum number of large memory pages that
3471  * the kbdev pool can possibly contain.
3472  *
3473  * Return: @count if the function succeeded. An error code on failure.
3474  */
set_lp_mem_pool_max_size(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3475 static ssize_t set_lp_mem_pool_max_size(struct device *dev,
3476                                         struct device_attribute *attr,
3477                                         const char *buf, size_t count)
3478 {
3479     struct kbase_device *const kbdev = to_kbase_device(dev);
3480     int err;
3481 
3482     if (!kbdev) {
3483         return -ENODEV;
3484     }
3485 
3486     err = kbase_debugfs_helper_set_attr_from_string(
3487         buf, kbdev->mem_pools.large, MEMORY_GROUP_MANAGER_NR_GROUPS,
3488         kbase_mem_pool_debugfs_set_max_size);
3489 
3490     return err ? err : count;
3491 }
3492 
3493 static DEVICE_ATTR(lp_mem_pool_max_size, S_IRUGO | S_IWUSR,
3494                    show_lp_mem_pool_max_size, set_lp_mem_pool_max_size);
3495 
3496 #if !MALI_USE_CSF
3497 /**
3498  * show_js_ctx_scheduling_mode - Show callback for js_ctx_scheduling_mode sysfs
3499  *                               entry.
3500  * @dev:  The device this sysfs file is for.
3501  * @attr: The attributes of the sysfs file.
3502  * @buf:  The output buffer to receive the context scheduling mode information.
3503  *
3504  * This function is called to get the context scheduling mode being used by JS.
3505  *
3506  * Return: The number of bytes output to @buf.
3507  */
show_js_ctx_scheduling_mode(struct device * dev,struct device_attribute * attr,char * const buf)3508 static ssize_t show_js_ctx_scheduling_mode(struct device *dev,
3509                                            struct device_attribute *attr,
3510                                            char *const buf)
3511 {
3512     struct kbase_device *kbdev;
3513 
3514     kbdev = to_kbase_device(dev);
3515     if (!kbdev) {
3516         return -ENODEV;
3517     }
3518 
3519     return scnprintf(buf, PAGE_SIZE, "%u\n", kbdev->js_ctx_scheduling_mode);
3520 }
3521 
3522 /**
3523  * set_js_ctx_scheduling_mode - Set callback for js_ctx_scheduling_mode sysfs
3524  *                              entry.
3525  * @dev:   The device this sysfs file is for.
3526  * @attr:  The attributes of the sysfs file.
3527  * @buf:   The value written to the sysfs file.
3528  * @count: The number of bytes written to the sysfs file.
3529  *
3530  * This function is called when the js_ctx_scheduling_mode sysfs file is written
3531  * to. It checks the data written, and if valid updates the ctx scheduling mode
3532  * being by JS.
3533  *
3534  * Return: @count if the function succeeded. An error code on failure.
3535  */
set_js_ctx_scheduling_mode(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3536 static ssize_t set_js_ctx_scheduling_mode(struct device *dev,
3537                                           struct device_attribute *attr,
3538                                           const char *buf, size_t count)
3539 {
3540     struct kbase_context *kctx;
3541     u32 new_js_ctx_scheduling_mode;
3542     struct kbase_device *kbdev;
3543     unsigned long flags;
3544     int ret;
3545 
3546     kbdev = to_kbase_device(dev);
3547     if (!kbdev) {
3548         return -ENODEV;
3549     }
3550 
3551     ret = kstrtouint(buf, 0, &new_js_ctx_scheduling_mode);
3552     if (ret || new_js_ctx_scheduling_mode >= KBASE_JS_PRIORITY_MODE_COUNT) {
3553         dev_err(kbdev->dev, "Couldn't process js_ctx_scheduling_mode"
3554                             " write operation.\n"
3555                             "Use format <js_ctx_scheduling_mode>\n");
3556         return -EINVAL;
3557     }
3558 
3559     if (new_js_ctx_scheduling_mode == kbdev->js_ctx_scheduling_mode) {
3560         return count;
3561     }
3562 
3563     mutex_lock(&kbdev->kctx_list_lock);
3564     spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
3565 
3566     /* Update the context priority mode */
3567     kbdev->js_ctx_scheduling_mode = new_js_ctx_scheduling_mode;
3568 
3569     /* Adjust priority of all the contexts as per the new mode */
3570     list_for_each_entry(kctx, &kbdev->kctx_list, kctx_list_link)
3571         kbase_js_update_ctx_priority(kctx);
3572 
3573     spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
3574     mutex_unlock(&kbdev->kctx_list_lock);
3575 
3576     dev_dbg(kbdev->dev, "JS ctx scheduling mode: %u\n",
3577             new_js_ctx_scheduling_mode);
3578 
3579     return count;
3580 }
3581 
3582 static DEVICE_ATTR(js_ctx_scheduling_mode, S_IRUGO | S_IWUSR,
3583                    show_js_ctx_scheduling_mode, set_js_ctx_scheduling_mode);
3584 
3585 #ifdef MALI_KBASE_BUILD
3586 
3587 /* Number of entries in serialize_jobs_settings[] */
3588 #define NR_SERIALIZE_JOBS_SETTINGS 5
3589 /* Maximum string length in serialize_jobs_settings[].name */
3590 #define MAX_SERIALIZE_JOBS_NAME_LEN 16
3591 
3592 static struct {
3593     char *name;
3594     u8 setting;
3595 } serialize_jobs_settings[NR_SERIALIZE_JOBS_SETTINGS] = {
3596     {"none", 0},
3597     {"intra-slot", KBASE_SERIALIZE_INTRA_SLOT},
3598     {"inter-slot", KBASE_SERIALIZE_INTER_SLOT},
3599     {"full", KBASE_SERIALIZE_INTRA_SLOT | KBASE_SERIALIZE_INTER_SLOT},
3600     {"full-reset", KBASE_SERIALIZE_INTRA_SLOT | KBASE_SERIALIZE_INTER_SLOT |
3601                        KBASE_SERIALIZE_RESET}};
3602 
3603 /**
3604  * update_serialize_jobs_setting - Update the serialization setting for the
3605  *                                 submission of GPU jobs.
3606  *
3607  * This function is called when the serialize_jobs sysfs/debugfs file is
3608  * written to. It matches the requested setting against the available settings
3609  * and if a matching setting is found updates kbdev->serialize_jobs.
3610  *
3611  * @kbdev:  An instance of the GPU platform device, allocated from the probe
3612  *          method of the driver.
3613  * @buf:    Buffer containing the value written to the sysfs/debugfs file.
3614  * @count:  The number of bytes to write to the sysfs/debugfs file.
3615  *
3616  * Return: @count if the function succeeded. An error code on failure.
3617  */
update_serialize_jobs_setting(struct kbase_device * kbdev,const char * buf,size_t count)3618 static ssize_t update_serialize_jobs_setting(struct kbase_device *kbdev,
3619                                              const char *buf, size_t count)
3620 {
3621     int i;
3622     bool valid = false;
3623 
3624     for (i = 0; i < NR_SERIALIZE_JOBS_SETTINGS; i++) {
3625         if (sysfs_streq(serialize_jobs_settings[i].name, buf)) {
3626             kbdev->serialize_jobs = serialize_jobs_settings[i].setting;
3627             valid = true;
3628             break;
3629         }
3630     }
3631 
3632     if (!valid) {
3633         dev_err(kbdev->dev, "serialize_jobs: invalid setting");
3634         return -EINVAL;
3635     }
3636 
3637     return count;
3638 }
3639 
3640 #ifdef CONFIG_DEBUG_FS
3641 /**
3642  * kbasep_serialize_jobs_seq_debugfs_show - Show callback for the serialize_jobs
3643  *                        debugfs file
3644  * @sfile: seq_file pointer
3645  * @data:  Private callback data
3646  *
3647  * This function is called to get the contents of the serialize_jobs debugfs
3648  * file. This is a list of the available settings with the currently active one
3649  * surrounded by square brackets.
3650  *
3651  * Return: 0 on success, or an error code on error
3652  */
kbasep_serialize_jobs_seq_debugfs_show(struct seq_file * sfile,void * data)3653 static int kbasep_serialize_jobs_seq_debugfs_show(struct seq_file *sfile,
3654                                                   void *data)
3655 {
3656     struct kbase_device *kbdev = sfile->private;
3657     int i;
3658 
3659     CSTD_UNUSED(data);
3660 
3661     for (i = 0; i < NR_SERIALIZE_JOBS_SETTINGS; i++) {
3662         if (kbdev->serialize_jobs == serialize_jobs_settings[i].setting) {
3663             seq_printf(sfile, "[%s] ", serialize_jobs_settings[i].name);
3664         } else {
3665             seq_printf(sfile, "%s ", serialize_jobs_settings[i].name);
3666         }
3667     }
3668 
3669     seq_puts(sfile, "\n");
3670 
3671     return 0;
3672 }
3673 
3674 /**
3675  * kbasep_serialize_jobs_debugfs_write - Store callback for the serialize_jobs
3676  *                                       debugfs file.
3677  * @file:  File pointer
3678  * @ubuf:  User buffer containing data to store
3679  * @count: Number of bytes in user buffer
3680  * @ppos:  File position
3681  *
3682  * This function is called when the serialize_jobs debugfs file is written to.
3683  * It matches the requested setting against the available settings and if a
3684  * matching setting is found updates kbdev->serialize_jobs.
3685  *
3686  * Return: @count if the function succeeded. An error code on failure.
3687  */
kbasep_serialize_jobs_debugfs_write(struct file * file,const char __user * ubuf,size_t count,loff_t * ppos)3688 static ssize_t kbasep_serialize_jobs_debugfs_write(struct file *file,
3689                                                    const char __user *ubuf,
3690                                                    size_t count, loff_t *ppos)
3691 {
3692     struct seq_file *s = file->private_data;
3693     struct kbase_device *kbdev = s->private;
3694     char buf[MAX_SERIALIZE_JOBS_NAME_LEN];
3695 
3696     CSTD_UNUSED(ppos);
3697 
3698     count = min_t(size_t, sizeof(buf) - 1, count);
3699     if (copy_from_user(buf, ubuf, count)) {
3700         return -EFAULT;
3701     }
3702 
3703     buf[count] = 0;
3704 
3705     return update_serialize_jobs_setting(kbdev, buf, count);
3706 }
3707 
3708 /**
3709  * kbasep_serialize_jobs_debugfs_open - Open callback for the serialize_jobs
3710  *                                     debugfs file
3711  * @in:   inode pointer
3712  * @file: file pointer
3713  *
3714  * Return: Zero on success, error code on failure
3715  */
kbasep_serialize_jobs_debugfs_open(struct inode * in,struct file * file)3716 static int kbasep_serialize_jobs_debugfs_open(struct inode *in,
3717                                               struct file *file)
3718 {
3719     return single_open(file, kbasep_serialize_jobs_seq_debugfs_show,
3720                        in->i_private);
3721 }
3722 
3723 static const struct file_operations kbasep_serialize_jobs_debugfs_fops = {
3724     .owner = THIS_MODULE,
3725     .open = kbasep_serialize_jobs_debugfs_open,
3726     .read = seq_read,
3727     .write = kbasep_serialize_jobs_debugfs_write,
3728     .llseek = seq_lseek,
3729     .release = single_release,
3730 };
3731 
3732 #endif /* CONFIG_DEBUG_FS */
3733 
3734 /**
3735  * show_serialize_jobs_sysfs - Show callback for serialize_jobs sysfs file.
3736  *
3737  * This function is called to get the contents of the serialize_jobs sysfs
3738  * file. This is a list of the available settings with the currently active
3739  * one surrounded by square brackets.
3740  *
3741  * @dev:    The device this sysfs file is for
3742  * @attr:    The attributes of the sysfs file
3743  * @buf:    The output buffer for the sysfs file contents
3744  *
3745  * Return: The number of bytes output to @buf.
3746  */
show_serialize_jobs_sysfs(struct device * dev,struct device_attribute * attr,char * buf)3747 static ssize_t show_serialize_jobs_sysfs(struct device *dev,
3748                                          struct device_attribute *attr,
3749                                          char *buf)
3750 {
3751     struct kbase_device *kbdev = to_kbase_device(dev);
3752     ssize_t ret = 0;
3753     int i;
3754 
3755     for (i = 0; i < NR_SERIALIZE_JOBS_SETTINGS; i++) {
3756         if (kbdev->serialize_jobs == serialize_jobs_settings[i].setting) {
3757             ret += scnprintf(buf + ret, PAGE_SIZE - ret, "[%s]",
3758                              serialize_jobs_settings[i].name);
3759         } else {
3760             ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s ",
3761                              serialize_jobs_settings[i].name);
3762         }
3763     }
3764 
3765     if (ret < PAGE_SIZE - 1) {
3766         ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
3767     } else {
3768         buf[PAGE_SIZE - 0x2] = '\n';
3769         buf[PAGE_SIZE - 0x1] = '\0';
3770         ret = PAGE_SIZE - 0x1;
3771     }
3772 
3773     return ret;
3774 }
3775 
3776 /**
3777  * store_serialize_jobs_sysfs - Store callback for serialize_jobs sysfs file.
3778  *
3779  * This function is called when the serialize_jobs sysfs file is written to.
3780  * It matches the requested setting against the available settings and if a
3781  * matching setting is found updates kbdev->serialize_jobs.
3782  *
3783  * @dev:    The device this sysfs file is for
3784  * @attr:    The attributes of the sysfs file
3785  * @buf:    The value written to the sysfs file
3786  * @count:    The number of bytes to write to the sysfs file
3787  *
3788  * Return: @count if the function succeeded. An error code on failure.
3789  */
store_serialize_jobs_sysfs(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3790 static ssize_t store_serialize_jobs_sysfs(struct device *dev,
3791                                           struct device_attribute *attr,
3792                                           const char *buf, size_t count)
3793 {
3794     return update_serialize_jobs_setting(to_kbase_device(dev), buf, count);
3795 }
3796 
3797 static DEVICE_ATTR(serialize_jobs, 0600, show_serialize_jobs_sysfs,
3798                    store_serialize_jobs_sysfs);
3799 #endif /* MALI_KBASE_BUILD */
3800 #endif /* !MALI_USE_CSF */
3801 
kbasep_protected_mode_hwcnt_disable_worker(struct work_struct * data)3802 static void kbasep_protected_mode_hwcnt_disable_worker(struct work_struct *data)
3803 {
3804     struct kbase_device *kbdev = container_of(
3805         data, struct kbase_device, protected_mode_hwcnt_disable_work);
3806     unsigned long flags;
3807 
3808     bool do_disable;
3809 
3810     spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
3811     do_disable = !kbdev->protected_mode_hwcnt_desired &&
3812                  !kbdev->protected_mode_hwcnt_disabled;
3813     spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
3814 
3815     if (!do_disable) {
3816         return;
3817     }
3818 
3819     kbase_hwcnt_context_disable(kbdev->hwcnt_gpu_ctx);
3820 
3821     spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
3822     do_disable = !kbdev->protected_mode_hwcnt_desired &&
3823                  !kbdev->protected_mode_hwcnt_disabled;
3824 
3825     if (do_disable) {
3826         /* Protected mode state did not change while we were doing the
3827          * disable, so commit the work we just performed and continue
3828          * the state machine.
3829          */
3830         kbdev->protected_mode_hwcnt_disabled = true;
3831 #if !MALI_USE_CSF
3832         kbase_backend_slot_update(kbdev);
3833 #endif /* !MALI_USE_CSF */
3834     } else {
3835         /* Protected mode state was updated while we were doing the
3836          * disable, so we need to undo the disable we just performed.
3837          */
3838         kbase_hwcnt_context_enable(kbdev->hwcnt_gpu_ctx);
3839     }
3840 
3841     spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
3842 }
3843 
kbasep_protected_mode_enable(struct protected_mode_device * pdev)3844 static int kbasep_protected_mode_enable(struct protected_mode_device *pdev)
3845 {
3846     struct kbase_device *kbdev = pdev->data;
3847 
3848     return kbase_pm_protected_mode_enable(kbdev);
3849 }
3850 
kbasep_protected_mode_disable(struct protected_mode_device * pdev)3851 static int kbasep_protected_mode_disable(struct protected_mode_device *pdev)
3852 {
3853     struct kbase_device *kbdev = pdev->data;
3854 
3855     return kbase_pm_protected_mode_disable(kbdev);
3856 }
3857 
3858 static const struct protected_mode_ops kbasep_native_protected_ops = {
3859     .protected_mode_enable = kbasep_protected_mode_enable,
3860     .protected_mode_disable = kbasep_protected_mode_disable};
3861 
kbase_protected_mode_init(struct kbase_device * kbdev)3862 int kbase_protected_mode_init(struct kbase_device *kbdev)
3863 {
3864     /* Use native protected ops */
3865     kbdev->protected_dev = kzalloc(sizeof(*kbdev->protected_dev), GFP_KERNEL);
3866     if (!kbdev->protected_dev) {
3867         return -ENOMEM;
3868     }
3869     kbdev->protected_dev->data = kbdev;
3870     kbdev->protected_ops = &kbasep_native_protected_ops;
3871     INIT_WORK(&kbdev->protected_mode_hwcnt_disable_work,
3872               kbasep_protected_mode_hwcnt_disable_worker);
3873     kbdev->protected_mode_hwcnt_desired = true;
3874     kbdev->protected_mode_hwcnt_disabled = false;
3875     return 0;
3876 }
3877 
kbase_protected_mode_term(struct kbase_device * kbdev)3878 void kbase_protected_mode_term(struct kbase_device *kbdev)
3879 {
3880     cancel_work_sync(&kbdev->protected_mode_hwcnt_disable_work);
3881     kfree(kbdev->protected_dev);
3882 }
3883 
3884 #ifdef CONFIG_MALI_BIFROST_NO_MALI
kbase_common_reg_map(struct kbase_device * kbdev)3885 static int kbase_common_reg_map(struct kbase_device *kbdev)
3886 {
3887     return 0;
3888 }
kbase_common_reg_unmap(struct kbase_device * const kbdev)3889 static void kbase_common_reg_unmap(struct kbase_device *const kbdev)
3890 {
3891 }
3892 #else  /* CONFIG_MALI_BIFROST_NO_MALI */
kbase_common_reg_map(struct kbase_device * kbdev)3893 static int kbase_common_reg_map(struct kbase_device *kbdev)
3894 {
3895     int err = 0;
3896 
3897     if (!request_mem_region(kbdev->reg_start, kbdev->reg_size,
3898                             dev_name(kbdev->dev))) {
3899         dev_err(kbdev->dev, "Register window unavailable\n");
3900         err = -EIO;
3901         goto out_region;
3902     }
3903 
3904     kbdev->reg = ioremap(kbdev->reg_start, kbdev->reg_size);
3905     if (!kbdev->reg) {
3906         dev_err(kbdev->dev, "Can't remap register window\n");
3907         err = -EINVAL;
3908         goto out_ioremap;
3909     }
3910 
3911     return err;
3912 
3913 out_ioremap:
3914     release_mem_region(kbdev->reg_start, kbdev->reg_size);
3915 out_region:
3916     return err;
3917 }
3918 
kbase_common_reg_unmap(struct kbase_device * const kbdev)3919 static void kbase_common_reg_unmap(struct kbase_device *const kbdev)
3920 {
3921     if (kbdev->reg) {
3922         iounmap(kbdev->reg);
3923         release_mem_region(kbdev->reg_start, kbdev->reg_size);
3924         kbdev->reg = NULL;
3925         kbdev->reg_start = 0;
3926         kbdev->reg_size = 0;
3927     }
3928 }
3929 #endif /* CONFIG_MALI_BIFROST_NO_MALI */
3930 
registers_map(struct kbase_device * const kbdev)3931 int registers_map(struct kbase_device *const kbdev)
3932 {
3933     /* the first memory resource is the physical address of the GPU
3934      * registers.
3935      */
3936     struct platform_device *pdev = to_platform_device(kbdev->dev);
3937     struct resource *reg_res;
3938     int err;
3939 
3940     reg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3941     if (!reg_res) {
3942         dev_err(kbdev->dev, "Invalid register resource\n");
3943         return -ENOENT;
3944     }
3945 
3946     kbdev->reg_start = reg_res->start;
3947     kbdev->reg_size = resource_size(reg_res);
3948 
3949 #if MALI_USE_CSF
3950     if (kbdev->reg_size < (CSF_HW_DOORBELL_PAGE_OFFSET +
3951                            CSF_NUM_DOORBELL * CSF_HW_DOORBELL_PAGE_SIZE)) {
3952         dev_err(kbdev->dev, "Insufficient register space, will override to the "
3953                             "required size\n");
3954         kbdev->reg_size = CSF_HW_DOORBELL_PAGE_OFFSET +
3955                           CSF_NUM_DOORBELL * CSF_HW_DOORBELL_PAGE_SIZE;
3956     }
3957 #endif
3958 
3959     err = kbase_common_reg_map(kbdev);
3960     if (err) {
3961         dev_err(kbdev->dev, "Failed to map registers\n");
3962         return err;
3963     }
3964 
3965     return 0;
3966 }
3967 
registers_unmap(struct kbase_device * kbdev)3968 void registers_unmap(struct kbase_device *kbdev)
3969 {
3970     kbase_common_reg_unmap(kbdev);
3971 }
3972 
3973 #if defined(CONFIG_MALI_ARBITER_SUPPORT) && defined(CONFIG_OF)
3974 
kbase_is_pm_enabled(const struct device_node * gpu_node)3975 static bool kbase_is_pm_enabled(const struct device_node *gpu_node)
3976 {
3977     const struct device_node *power_model_node;
3978     const void *cooling_cells_node;
3979     const void *operating_point_node;
3980     bool is_pm_enable = false;
3981 
3982     power_model_node = of_get_child_by_name(gpu_node, "power_model");
3983     if (power_model_node) {
3984         is_pm_enable = true;
3985     }
3986 
3987     cooling_cells_node = of_get_property(gpu_node, "#cooling-cells", NULL);
3988     if (cooling_cells_node) {
3989         is_pm_enable = true;
3990     }
3991 
3992     operating_point_node = of_get_property(gpu_node, "operating-points", NULL);
3993     if (operating_point_node) {
3994         is_pm_enable = true;
3995     }
3996 
3997     return is_pm_enable;
3998 }
3999 
kbase_is_pv_enabled(const struct device_node * gpu_node)4000 static bool kbase_is_pv_enabled(const struct device_node *gpu_node)
4001 {
4002     const void *arbiter_if_node;
4003 
4004     arbiter_if_node = of_get_property(gpu_node, "arbiter_if", NULL);
4005 
4006     return arbiter_if_node ? true : false;
4007 }
4008 
kbase_is_full_coherency_enabled(const struct device_node * gpu_node)4009 static bool kbase_is_full_coherency_enabled(const struct device_node *gpu_node)
4010 {
4011     const void *coherency_dts;
4012     u32 coherency;
4013 
4014     coherency_dts = of_get_property(gpu_node, "system-coherency", NULL);
4015     if (coherency_dts) {
4016         coherency = be32_to_cpup(coherency_dts);
4017         if (coherency == COHERENCY_ACE) {
4018             return true;
4019         }
4020     }
4021     return false;
4022 }
4023 
4024 #endif /* CONFIG_MALI_ARBITER_SUPPORT && CONFIG_OF */
4025 
kbase_device_pm_init(struct kbase_device * kbdev)4026 int kbase_device_pm_init(struct kbase_device *kbdev)
4027 {
4028     int err = 0;
4029 
4030 #if defined(CONFIG_MALI_ARBITER_SUPPORT) && defined(CONFIG_OF)
4031 
4032     u32 gpu_id;
4033     u32 product_id;
4034     u32 gpu_model_id;
4035 
4036     if (kbase_is_pv_enabled(kbdev->dev->of_node)) {
4037         if (kbase_is_pm_enabled(kbdev->dev->of_node)) {
4038             /* Arbitration AND power management invalid */
4039             dev_err(
4040                 kbdev->dev,
4041                 "Invalid combination of arbitration AND power management\n");
4042             return -EPERM;
4043         }
4044         if (kbase_is_full_coherency_enabled(kbdev->dev->of_node)) {
4045             /* Arbitration AND full coherency invalid */
4046             dev_err(kbdev->dev,
4047                     "Invalid combination of arbitration AND full coherency\n");
4048             return -EPERM;
4049         }
4050         err = kbase_arbiter_pm_early_init(kbdev);
4051         if (err == 0) {
4052             /* Check if Arbitration is running on
4053              * supported GPU platform
4054              */
4055             kbase_pm_register_access_enable(kbdev);
4056             gpu_id = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_ID));
4057             kbase_pm_register_access_disable(kbdev);
4058             product_id =
4059                 KBASE_UBFX32(gpu_id, GPU_ID_VERSION_PRODUCT_ID_SHIFT, 0x10);
4060             gpu_model_id = GPU_ID2_MODEL_MATCH_VALUE(product_id);
4061             if (gpu_model_id != GPU_ID2_PRODUCT_TGOX &&
4062                 gpu_model_id != GPU_ID2_PRODUCT_TNOX) {
4063                 kbase_arbiter_pm_early_term(kbdev);
4064                 dev_err(kbdev->dev,
4065                         "GPU platform not suitable for arbitration\n");
4066                 return -EPERM;
4067             }
4068         }
4069     } else {
4070         err = power_control_init(kbdev);
4071     }
4072 #else
4073     err = power_control_init(kbdev);
4074 #endif /* CONFIG_MALI_ARBITER_SUPPORT && CONFIG_OF */
4075     return err;
4076 }
4077 
kbase_device_pm_term(struct kbase_device * kbdev)4078 void kbase_device_pm_term(struct kbase_device *kbdev)
4079 {
4080 #ifdef CONFIG_MALI_ARBITER_SUPPORT
4081 #ifdef CONFIG_OF
4082     if (kbase_is_pv_enabled(kbdev->dev->of_node)) {
4083         kbase_arbiter_pm_early_term(kbdev);
4084     } else {
4085         power_control_term(kbdev);
4086     }
4087 #endif /* CONFIG_OF */
4088 #else
4089     power_control_term(kbdev);
4090 #endif
4091 }
4092 
power_control_init(struct kbase_device * kbdev)4093 int power_control_init(struct kbase_device *kbdev)
4094 {
4095 #if KERNEL_VERSION(3, 18, 0) > LINUX_VERSION_CODE || !defined(CONFIG_OF)
4096     /* Power control initialization requires at least the capability to get
4097      * regulators and clocks from the device tree, as well as parsing
4098      * arrays of unsigned integer values.
4099      *
4100      * The whole initialization process shall simply be skipped if the
4101      * minimum capability is not available.
4102      */
4103     return 0;
4104 #else
4105     struct platform_device *pdev;
4106     int err = 0;
4107     unsigned int i;
4108 #if defined(CONFIG_REGULATOR)
4109     static const char *regulator_names[] = {"mali", "shadercores"};
4110     BUILD_BUG_ON(ARRAY_SIZE(regulator_names) < BASE_MAX_NR_CLOCKS_REGULATORS);
4111 #endif /* CONFIG_REGULATOR */
4112 
4113     if (!kbdev) {
4114         return -ENODEV;
4115     }
4116 
4117     pdev = to_platform_device(kbdev->dev);
4118 
4119 #if defined(CONFIG_REGULATOR)
4120     /* Since the error code EPROBE_DEFER causes the entire probing
4121      * procedure to be restarted from scratch at a later time,
4122      * all regulators will be released before returning.
4123      *
4124      * Any other error is ignored and the driver will continue
4125      * operating with a partial initialization of regulators.
4126      */
4127     for (i = 0; i < BASE_MAX_NR_CLOCKS_REGULATORS; i++) {
4128         kbdev->regulators[i] =
4129             regulator_get_optional(kbdev->dev, regulator_names[i]);
4130         if (IS_ERR_OR_NULL(kbdev->regulators[i])) {
4131             err = PTR_ERR(kbdev->regulators[i]);
4132             kbdev->regulators[i] = NULL;
4133             break;
4134         }
4135     }
4136     if (err == -EPROBE_DEFER) {
4137         while ((i > 0) && (i < BASE_MAX_NR_CLOCKS_REGULATORS)) {
4138             regulator_put(kbdev->regulators[--i]);
4139         }
4140         return err;
4141     }
4142 
4143     kbdev->nr_regulators = i;
4144     dev_dbg(&pdev->dev, "Regulators probed: %u\n", kbdev->nr_regulators);
4145 #endif
4146 
4147     /* Having more clocks than regulators is acceptable, while the
4148      * opposite shall not happen.
4149      *
4150      * Since the error code EPROBE_DEFER causes the entire probing
4151      * procedure to be restarted from scratch at a later time,
4152      * all clocks and regulators will be released before returning.
4153      *
4154      * Any other error is ignored and the driver will continue
4155      * operating with a partial initialization of clocks.
4156      */
4157     for (i = 0; i < BASE_MAX_NR_CLOCKS_REGULATORS; i++) {
4158         kbdev->clocks[i] = of_clk_get(kbdev->dev->of_node, i);
4159         if (IS_ERR_OR_NULL(kbdev->clocks[i])) {
4160             err = PTR_ERR(kbdev->clocks[i]);
4161             kbdev->clocks[i] = NULL;
4162             break;
4163         }
4164 
4165         err = clk_prepare(kbdev->clocks[i]);
4166         if (err) {
4167             dev_err(kbdev->dev, "Failed to prepare and enable clock (%d)\n",
4168                     err);
4169             clk_put(kbdev->clocks[i]);
4170             break;
4171         }
4172     }
4173     if (err == -EPROBE_DEFER) {
4174         while ((i > 0) && (i < BASE_MAX_NR_CLOCKS_REGULATORS)) {
4175             clk_unprepare(kbdev->clocks[--i]);
4176             clk_put(kbdev->clocks[i]);
4177         }
4178         goto clocks_probe_defer;
4179     }
4180 
4181     kbdev->nr_clocks = i;
4182     dev_dbg(&pdev->dev, "Clocks probed: %u\n", kbdev->nr_clocks);
4183 
4184 #if defined(CONFIG_PM_OPP)
4185 #if ((KERNEL_VERSION(4, 10, 0) <= LINUX_VERSION_CODE) &&                       \
4186      defined(CONFIG_REGULATOR))
4187     if (kbdev->nr_regulators > 0) {
4188         kbdev->opp_table = dev_pm_opp_set_regulators(
4189             kbdev->dev, regulator_names, BASE_MAX_NR_CLOCKS_REGULATORS);
4190     }
4191 #endif /* (KERNEL_VERSION(4, 10, 0) <= LINUX_VERSION_CODE */
4192 #ifdef CONFIG_ARCH_ROCKCHIP
4193     err = kbase_platform_rk_init_opp_table(kbdev);
4194     if (err) {
4195         dev_err(kbdev->dev, "Failed to init_opp_table (%d)\n", err);
4196     }
4197 #else
4198     err = dev_pm_opp_of_add_table(kbdev->dev);
4199     CSTD_UNUSED(err);
4200 #endif
4201 #endif /* CONFIG_PM_OPP */
4202 
4203 #endif /* KERNEL_VERSION(4, 4, 0) > LINUX_VERSION_CODE */
4204     return 0;
4205 
4206 clocks_probe_defer:
4207 #if defined(CONFIG_REGULATOR)
4208     for (i = 0; i < BASE_MAX_NR_CLOCKS_REGULATORS; i++) {
4209         regulator_put(kbdev->regulators[i]);
4210     }
4211 #endif
4212     return err;
4213 }
4214 
power_control_term(struct kbase_device * kbdev)4215 void power_control_term(struct kbase_device *kbdev)
4216 {
4217     unsigned int i;
4218 
4219 #if (KERNEL_VERSION(4, 4, 0) > LINUX_VERSION_CODE &&                           \
4220      !defined(LSK_OPPV2_BACKPORT))
4221 #if KERNEL_VERSION(3, 19, 0) <= LINUX_VERSION_CODE
4222     of_free_opp_table(kbdev->dev);
4223 #endif
4224 #else
4225 
4226 #if defined(CONFIG_PM_OPP)
4227     dev_pm_opp_of_remove_table(kbdev->dev);
4228 #if ((KERNEL_VERSION(4, 10, 0) <= LINUX_VERSION_CODE) &&                       \
4229      defined(CONFIG_REGULATOR))
4230     if (!IS_ERR_OR_NULL(kbdev->opp_table)) {
4231         dev_pm_opp_put_regulators(kbdev->opp_table);
4232     }
4233 #endif /* (KERNEL_VERSION(4, 10, 0) <= LINUX_VERSION_CODE */
4234 #endif /* CONFIG_PM_OPP */
4235 
4236 #endif /* KERNEL_VERSION(4, 4, 0) > LINUX_VERSION_CODE */
4237 
4238     for (i = 0; i < BASE_MAX_NR_CLOCKS_REGULATORS; i++) {
4239         if (kbdev->clocks[i]) {
4240             clk_unprepare(kbdev->clocks[i]);
4241             clk_put(kbdev->clocks[i]);
4242             kbdev->clocks[i] = NULL;
4243         } else {
4244             break;
4245         }
4246     }
4247 
4248 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)) && defined(CONFIG_OF) &&  \
4249     defined(CONFIG_REGULATOR)
4250     for (i = 0; i < BASE_MAX_NR_CLOCKS_REGULATORS; i++) {
4251         if (kbdev->regulators[i]) {
4252             regulator_put(kbdev->regulators[i]);
4253             kbdev->regulators[i] = NULL;
4254         }
4255     }
4256 #endif /* LINUX_VERSION_CODE >= 3, 12, 0 */
4257 }
4258 
4259 #ifdef MALI_KBASE_BUILD
4260 #ifdef CONFIG_DEBUG_FS
4261 
trigger_reset(struct kbase_device * kbdev)4262 static void trigger_reset(struct kbase_device *kbdev)
4263 {
4264     kbase_pm_context_active(kbdev);
4265     if (kbase_prepare_to_reset_gpu(kbdev)) {
4266         kbase_reset_gpu(kbdev);
4267     }
4268     kbase_pm_context_idle(kbdev);
4269 }
4270 
4271 #define MAKE_QUIRK_ACCESSORS(type)                                             \
4272     static int type##_quirks_set(void *data, u64 val)                          \
4273     {                                                                          \
4274         do {                                                                   \
4275             struct kbase_device *kbdev;                                        \
4276             kbdev = (struct kbase_device *)data;                               \
4277             kbdev->hw_quirks_##type = (u32)val;                                \
4278             trigger_reset(kbdev);                                              \
4279         } while (0);                                                           \
4280         return 0;                                                              \
4281     }                                                                          \
4282                                                                                \
4283     static int type##_quirks_get(void *data, u64 *val)                         \
4284     {                                                                          \
4285         do {                                                                   \
4286             struct kbase_device *kbdev;                                        \
4287             kbdev = (struct kbase_device *)data;                               \
4288             *val = kbdev->hw_quirks_##type;                                    \
4289         } while (0);                                                           \
4290         return 0;                                                              \
4291     }                                                                          \
4292     DEFINE_SIMPLE_ATTRIBUTE(fops_##type##_quirks, type##_quirks_get,           \
4293                             type##_quirks_set, "%llu\n")
4294 
4295 MAKE_QUIRK_ACCESSORS(sc);
4296 MAKE_QUIRK_ACCESSORS(tiler);
4297 MAKE_QUIRK_ACCESSORS(mmu);
4298 MAKE_QUIRK_ACCESSORS(jm);
4299 
kbase_device_debugfs_reset_write(struct file * file,const char __user * ubuf,size_t count,loff_t * ppos)4300 static ssize_t kbase_device_debugfs_reset_write(struct file *file,
4301                                                 const char __user *ubuf,
4302                                                 size_t count, loff_t *ppos)
4303 {
4304     struct kbase_device *kbdev = file->private_data;
4305     CSTD_UNUSED(ubuf);
4306     CSTD_UNUSED(count);
4307     CSTD_UNUSED(ppos);
4308 
4309     trigger_reset(kbdev);
4310 
4311     return count;
4312 }
4313 
4314 static const struct file_operations fops_trigger_reset = {
4315     .owner = THIS_MODULE,
4316     .open = simple_open,
4317     .write = kbase_device_debugfs_reset_write,
4318     .llseek = default_llseek,
4319 };
4320 
4321 /**
4322  * debugfs_protected_debug_mode_read - "protected_debug_mode" debugfs read
4323  * @file: File object to read is for
4324  * @buf:  User buffer to populate with data
4325  * @len:  Length of user buffer
4326  * @ppos: Offset within file object
4327  *
4328  * Retrieves the current status of protected debug mode
4329  * (0 = disabled, 1 = enabled)
4330  *
4331  * Return: Number of bytes added to user buffer
4332  */
debugfs_protected_debug_mode_read(struct file * file,char __user * buf,size_t len,loff_t * ppos)4333 static ssize_t debugfs_protected_debug_mode_read(struct file *file,
4334                                                  char __user *buf, size_t len,
4335                                                  loff_t *ppos)
4336 {
4337     struct kbase_device *kbdev = (struct kbase_device *)file->private_data;
4338     u32 gpu_status;
4339     ssize_t ret_val;
4340 
4341     kbase_pm_context_active(kbdev);
4342     gpu_status = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_STATUS));
4343     kbase_pm_context_idle(kbdev);
4344 
4345     if (gpu_status & GPU_DBGEN) {
4346         ret_val = simple_read_from_buffer(buf, len, ppos, "1\n", 2);
4347     } else {
4348         ret_val = simple_read_from_buffer(buf, len, ppos, "0\n", 2);
4349     }
4350 
4351     return ret_val;
4352 }
4353 
4354 /*
4355  * struct fops_protected_debug_mode - "protected_debug_mode" debugfs fops
4356  *
4357  * Contains the file operations for the "protected_debug_mode" debugfs file
4358  */
4359 static const struct file_operations fops_protected_debug_mode = {
4360     .owner = THIS_MODULE,
4361     .open = simple_open,
4362     .read = debugfs_protected_debug_mode_read,
4363     .llseek = default_llseek,
4364 };
4365 
kbase_device_debugfs_mem_pool_max_size_show(struct seq_file * sfile,void * data)4366 static int kbase_device_debugfs_mem_pool_max_size_show(struct seq_file *sfile,
4367                                                        void *data)
4368 {
4369     CSTD_UNUSED(data);
4370     return kbase_debugfs_helper_seq_read(
4371         sfile, MEMORY_GROUP_MANAGER_NR_GROUPS,
4372         kbase_mem_pool_config_debugfs_max_size);
4373 }
4374 
kbase_device_debugfs_mem_pool_max_size_write(struct file * file,const char __user * ubuf,size_t count,loff_t * ppos)4375 static ssize_t kbase_device_debugfs_mem_pool_max_size_write(
4376     struct file *file, const char __user *ubuf, size_t count, loff_t *ppos)
4377 {
4378     int err = 0;
4379 
4380     CSTD_UNUSED(ppos);
4381     err = kbase_debugfs_helper_seq_write(
4382         file, ubuf, count, MEMORY_GROUP_MANAGER_NR_GROUPS,
4383         kbase_mem_pool_config_debugfs_set_max_size);
4384 
4385     return err ? err : count;
4386 }
4387 
kbase_device_debugfs_mem_pool_max_size_open(struct inode * in,struct file * file)4388 static int kbase_device_debugfs_mem_pool_max_size_open(struct inode *in,
4389                                                        struct file *file)
4390 {
4391     return single_open(file, kbase_device_debugfs_mem_pool_max_size_show,
4392                        in->i_private);
4393 }
4394 
4395 static const struct file_operations
4396     kbase_device_debugfs_mem_pool_max_size_fops = {
4397         .owner = THIS_MODULE,
4398         .open = kbase_device_debugfs_mem_pool_max_size_open,
4399         .read = seq_read,
4400         .write = kbase_device_debugfs_mem_pool_max_size_write,
4401         .llseek = seq_lseek,
4402         .release = single_release,
4403 };
4404 
kbase_device_debugfs_init(struct kbase_device * kbdev)4405 int kbase_device_debugfs_init(struct kbase_device *kbdev)
4406 {
4407     struct dentry *debugfs_ctx_defaults_directory;
4408     int err;
4409     /* prevent unprivileged use of debug file system
4410      * in old kernel version
4411      */
4412 #if (KERNEL_VERSION(4, 7, 0) <= LINUX_VERSION_CODE)
4413     /* only for newer kernel version debug file system is safe */
4414     const mode_t mode = 0644;
4415 #else
4416     const mode_t mode = 0600;
4417 #endif
4418 
4419     kbdev->mali_debugfs_directory = debugfs_create_dir(kbdev->devname, NULL);
4420     if (!kbdev->mali_debugfs_directory) {
4421         dev_err(kbdev->dev, "Couldn't create mali debugfs directory\n");
4422         err = -ENOMEM;
4423         goto out;
4424     }
4425 
4426     kbdev->debugfs_ctx_directory =
4427         debugfs_create_dir("ctx", kbdev->mali_debugfs_directory);
4428     if (!kbdev->debugfs_ctx_directory) {
4429         dev_err(kbdev->dev, "Couldn't create mali debugfs ctx directory\n");
4430         err = -ENOMEM;
4431         goto out;
4432     }
4433 
4434     kbdev->debugfs_instr_directory =
4435         debugfs_create_dir("instrumentation", kbdev->mali_debugfs_directory);
4436     if (!kbdev->debugfs_instr_directory) {
4437         dev_err(kbdev->dev,
4438                 "Couldn't create mali debugfs instrumentation directory\n");
4439         err = -ENOMEM;
4440         goto out;
4441     }
4442 
4443     debugfs_ctx_defaults_directory =
4444         debugfs_create_dir("defaults", kbdev->debugfs_ctx_directory);
4445     if (!debugfs_ctx_defaults_directory) {
4446         dev_err(kbdev->dev,
4447                 "Couldn't create mali debugfs ctx defaults directory\n");
4448         err = -ENOMEM;
4449         goto out;
4450     }
4451 
4452 #if !MALI_CUSTOMER_RELEASE
4453     kbasep_regs_dump_debugfs_init(kbdev);
4454 #endif /* !MALI_CUSTOMER_RELEASE */
4455     kbasep_regs_history_debugfs_init(kbdev);
4456 
4457 #if !MALI_USE_CSF
4458     kbase_debug_job_fault_debugfs_init(kbdev);
4459 #endif /* !MALI_USE_CSF */
4460 
4461     kbasep_gpu_memory_debugfs_init(kbdev);
4462     kbase_as_fault_debugfs_init(kbdev);
4463 #ifdef CONFIG_MALI_PRFCNT_SET_SECONDARY_VIA_DEBUG_FS
4464     kbase_instr_backend_debugfs_init(kbdev);
4465 #endif
4466     /* fops_* variables created by invocations of macro
4467      * MAKE_QUIRK_ACCESSORS() above. */
4468     debugfs_create_file("quirks_sc", 0x1a4, kbdev->mali_debugfs_directory,
4469                         kbdev, &fops_sc_quirks);
4470     debugfs_create_file("quirks_tiler", 0x1a4, kbdev->mali_debugfs_directory,
4471                         kbdev, &fops_tiler_quirks);
4472     debugfs_create_file("quirks_mmu", 0x1a4, kbdev->mali_debugfs_directory,
4473                         kbdev, &fops_mmu_quirks);
4474     debugfs_create_file("quirks_jm", 0x1a4, kbdev->mali_debugfs_directory,
4475                         kbdev, &fops_jm_quirks);
4476 
4477     debugfs_create_bool("infinite_cache", mode, debugfs_ctx_defaults_directory,
4478                         &kbdev->infinite_cache_active_default);
4479 
4480     debugfs_create_file("mem_pool_max_size", mode,
4481                         debugfs_ctx_defaults_directory,
4482                         &kbdev->mem_pool_defaults.small,
4483                         &kbase_device_debugfs_mem_pool_max_size_fops);
4484 
4485     debugfs_create_file("lp_mem_pool_max_size", mode,
4486                         debugfs_ctx_defaults_directory,
4487                         &kbdev->mem_pool_defaults.large,
4488                         &kbase_device_debugfs_mem_pool_max_size_fops);
4489 
4490     if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_PROTECTED_DEBUG_MODE)) {
4491         debugfs_create_file("protected_debug_mode", S_IRUGO,
4492                             kbdev->mali_debugfs_directory, kbdev,
4493                             &fops_protected_debug_mode);
4494     }
4495 
4496     debugfs_create_file("reset", 0x1a4, kbdev->mali_debugfs_directory, kbdev,
4497                         &fops_trigger_reset);
4498 
4499     kbase_ktrace_debugfs_init(kbdev);
4500 
4501 #ifdef CONFIG_MALI_BIFROST_DEVFREQ
4502 #ifdef CONFIG_DEVFREQ_THERMAL
4503     if (kbdev->devfreq && !kbdev->model_data) {
4504         kbase_ipa_debugfs_init(kbdev);
4505     }
4506 #endif /* CONFIG_DEVFREQ_THERMAL */
4507 #endif /* CONFIG_MALI_BIFROST_DEVFREQ */
4508 
4509 #if MALI_USE_CSF
4510     kbase_csf_debugfs_init(kbdev);
4511 #else
4512     debugfs_create_file("serialize_jobs", S_IRUGO | S_IWUSR,
4513                         kbdev->mali_debugfs_directory, kbdev,
4514                         &kbasep_serialize_jobs_debugfs_fops);
4515 #endif
4516 
4517     return 0;
4518 
4519 out:
4520     debugfs_remove_recursive(kbdev->mali_debugfs_directory);
4521     return err;
4522 }
4523 
kbase_device_debugfs_term(struct kbase_device * kbdev)4524 void kbase_device_debugfs_term(struct kbase_device *kbdev)
4525 {
4526     debugfs_remove_recursive(kbdev->mali_debugfs_directory);
4527 }
4528 #endif /* CONFIG_DEBUG_FS */
4529 #endif /* MALI_KBASE_BUILD */
4530 
kbase_device_coherency_init(struct kbase_device * kbdev)4531 int kbase_device_coherency_init(struct kbase_device *kbdev)
4532 {
4533 #ifdef CONFIG_OF
4534     u32 supported_coherency_bitmap =
4535         kbdev->gpu_props.props.raw_props.coherency_mode;
4536     const void *coherency_override_dts;
4537     u32 override_coherency, gpu_id;
4538     unsigned int prod_id;
4539 
4540     gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
4541     gpu_id &= GPU_ID_VERSION_PRODUCT_ID;
4542     prod_id = gpu_id >> GPU_ID_VERSION_PRODUCT_ID_SHIFT;
4543 
4544     /* Only for tMIx :
4545      * (COHERENCY_ACE_LITE | COHERENCY_ACE) was incorrectly
4546      * documented for tMIx so force correct value here.
4547      */
4548     if (GPU_ID2_MODEL_MATCH_VALUE(prod_id) == GPU_ID2_PRODUCT_TMIX) {
4549         if (supported_coherency_bitmap ==
4550             COHERENCY_FEATURE_BIT(COHERENCY_ACE)) {
4551             supported_coherency_bitmap |=
4552                 COHERENCY_FEATURE_BIT(COHERENCY_ACE_LITE);
4553         }
4554     }
4555 
4556 #endif /* CONFIG_OF */
4557 
4558     kbdev->system_coherency = COHERENCY_NONE;
4559 
4560     /* device tree may override the coherency */
4561 #ifdef CONFIG_OF
4562     coherency_override_dts =
4563         of_get_property(kbdev->dev->of_node, "system-coherency", NULL);
4564     if (coherency_override_dts) {
4565         override_coherency = be32_to_cpup(coherency_override_dts);
4566         if ((override_coherency <= COHERENCY_NONE) &&
4567             (supported_coherency_bitmap &
4568              COHERENCY_FEATURE_BIT(override_coherency))) {
4569             kbdev->system_coherency = override_coherency;
4570 
4571             dev_info(kbdev->dev, "Using coherency mode %u set from dtb",
4572                      override_coherency);
4573         } else {
4574             dev_warn(kbdev->dev,
4575                      "Ignoring unsupported coherency mode %u set from dtb",
4576                      override_coherency);
4577         }
4578     }
4579 
4580 #endif /* CONFIG_OF */
4581 
4582     kbdev->gpu_props.props.raw_props.coherency_mode = kbdev->system_coherency;
4583 
4584     return 0;
4585 }
4586 
4587 #ifdef CONFIG_MALI_BUSLOG
4588 
4589 /* Callback used by the kbase bus logger client, to initiate a GPU reset
4590  * when the bus log is restarted.  GPU reset is used as reference point
4591  * in HW bus log analyses.
4592  */
kbase_logging_started_cb(void * data)4593 static void kbase_logging_started_cb(void *data)
4594 {
4595     struct kbase_device *kbdev = (struct kbase_device *)data;
4596 
4597     if (kbase_prepare_to_reset_gpu(kbdev)) {
4598         kbase_reset_gpu(kbdev);
4599     }
4600     dev_info(kbdev->dev, "KBASE - Bus logger restarted\n");
4601 }
4602 
buslog_init(struct kbase_device * kbdev)4603 int buslog_init(struct kbase_device *kbdev)
4604 {
4605     int err = 0;
4606 
4607     err = bl_core_client_register(kbdev->devname, kbase_logging_started_cb,
4608                                   kbdev, &kbdev->buslogger, THIS_MODULE, NULL);
4609     if (err == 0) {
4610         bl_core_set_threshold(kbdev->buslogger, 0x40000000);
4611     }
4612 
4613     return err;
4614 }
4615 
buslog_term(struct kbase_device * kbdev)4616 void buslog_term(struct kbase_device *kbdev)
4617 {
4618     bl_core_client_unregister(kbdev->buslogger);
4619 }
4620 #endif
4621 
4622 static struct attribute *kbase_scheduling_attrs[] = {
4623 #if !MALI_USE_CSF
4624     &dev_attr_serialize_jobs.attr,
4625 #endif /* !MALI_USE_CSF */
4626     NULL};
4627 
4628 static struct attribute *kbase_attrs[] = {
4629 #ifdef CONFIG_MALI_BIFROST_DEBUG
4630     &dev_attr_debug_command.attr,
4631 #if !MALI_USE_CSF
4632     &dev_attr_js_softstop_always.attr,
4633 #endif /* !MALI_USE_CSF */
4634 #endif
4635 #if !MALI_USE_CSF
4636     &dev_attr_js_timeouts.attr,
4637     &dev_attr_soft_job_timeout.attr,
4638 #endif /* !MALI_USE_CSF */
4639     &dev_attr_gpuinfo.attr,
4640     &dev_attr_dvfs_period.attr,
4641     &dev_attr_pm_poweroff.attr,
4642     &dev_attr_reset_timeout.attr,
4643 #if !MALI_USE_CSF
4644     &dev_attr_js_scheduling_period.attr,
4645 #endif /* !MALI_USE_CSF */
4646     &dev_attr_power_policy.attr,
4647     &dev_attr_core_mask.attr,
4648     &dev_attr_mem_pool_size.attr,
4649     &dev_attr_mem_pool_max_size.attr,
4650     &dev_attr_lp_mem_pool_size.attr,
4651     &dev_attr_lp_mem_pool_max_size.attr,
4652 #if !MALI_USE_CSF
4653     &dev_attr_js_ctx_scheduling_mode.attr,
4654 #endif /* !MALI_USE_CSF */
4655     NULL};
4656 
4657 #define SYSFS_SCHEDULING_GROUP "scheduling"
4658 static const struct attribute_group kbase_scheduling_attr_group = {
4659     .name = SYSFS_SCHEDULING_GROUP,
4660     .attrs = kbase_scheduling_attrs,
4661 };
4662 
4663 static const struct attribute_group kbase_attr_group = {
4664     .attrs = kbase_attrs,
4665 };
4666 
kbase_sysfs_init(struct kbase_device * kbdev)4667 int kbase_sysfs_init(struct kbase_device *kbdev)
4668 {
4669     int err = 0;
4670 
4671     kbdev->mdev.minor = MISC_DYNAMIC_MINOR;
4672     kbdev->mdev.name = kbdev->devname;
4673     kbdev->mdev.fops = &kbase_fops;
4674     kbdev->mdev.parent = get_device(kbdev->dev);
4675     kbdev->mdev.mode = 0x1b6;
4676 
4677     err = sysfs_create_group(&kbdev->dev->kobj, &kbase_attr_group);
4678     if (!err) {
4679         err =
4680             sysfs_create_group(&kbdev->dev->kobj, &kbase_scheduling_attr_group);
4681         if (err) {
4682             dev_err(kbdev->dev, "Creation of %s sysfs group failed",
4683                     SYSFS_SCHEDULING_GROUP);
4684             sysfs_remove_group(&kbdev->dev->kobj, &kbase_attr_group);
4685         }
4686     }
4687 
4688     return err;
4689 }
4690 
kbase_sysfs_term(struct kbase_device * kbdev)4691 void kbase_sysfs_term(struct kbase_device *kbdev)
4692 {
4693     sysfs_remove_group(&kbdev->dev->kobj, &kbase_scheduling_attr_group);
4694     sysfs_remove_group(&kbdev->dev->kobj, &kbase_attr_group);
4695     put_device(kbdev->dev);
4696 }
4697 
kbase_platform_device_remove(struct platform_device * pdev)4698 static int kbase_platform_device_remove(struct platform_device *pdev)
4699 {
4700     struct kbase_device *kbdev = to_kbase_device(&pdev->dev);
4701 
4702     if (!kbdev) {
4703         return -ENODEV;
4704     }
4705 
4706     kbase_device_term(kbdev);
4707     dev_set_drvdata(kbdev->dev, NULL);
4708     kbase_device_free(kbdev);
4709 
4710     return 0;
4711 }
4712 
kbase_backend_devfreq_term(struct kbase_device * kbdev)4713 void kbase_backend_devfreq_term(struct kbase_device *kbdev)
4714 {
4715 #ifdef CONFIG_MALI_BIFROST_DEVFREQ
4716     if (kbdev->devfreq) {
4717         kbase_devfreq_term(kbdev);
4718     }
4719 #endif
4720 }
4721 
kbase_backend_devfreq_init(struct kbase_device * kbdev)4722 int kbase_backend_devfreq_init(struct kbase_device *kbdev)
4723 {
4724 #ifdef CONFIG_MALI_BIFROST_DEVFREQ
4725     /* Devfreq uses hardware counters, so must be initialized after it. */
4726     int err = kbase_devfreq_init(kbdev);
4727     if (err) {
4728         dev_err(kbdev->dev, "Continuing without devfreq\n");
4729     }
4730 #endif /* CONFIG_MALI_BIFROST_DEVFREQ */
4731     return 0;
4732 }
4733 
kbase_platform_device_probe(struct platform_device * pdev)4734 static int kbase_platform_device_probe(struct platform_device *pdev)
4735 {
4736     struct kbase_device *kbdev;
4737     int err = 0;
4738 
4739     mali_kbase_print_cs_experimental();
4740 
4741     kbdev = kbase_device_alloc();
4742     if (!kbdev) {
4743         dev_err(&pdev->dev, "Allocate device failed\n");
4744         return -ENOMEM;
4745     }
4746 
4747     kbdev->dev = &pdev->dev;
4748     dev_set_drvdata(kbdev->dev, kbdev);
4749 
4750     err = kbase_device_init(kbdev);
4751     if (err) {
4752         if (err == -EPROBE_DEFER) {
4753             dev_err(kbdev->dev, "Device initialization Deferred\n");
4754         } else {
4755             dev_err(kbdev->dev, "Device initialization failed\n");
4756         }
4757 
4758         dev_set_drvdata(kbdev->dev, NULL);
4759         kbase_device_free(kbdev);
4760     } else {
4761 #ifdef MALI_KBASE_BUILD
4762         dev_info(kbdev->dev, "Probed as %s\n",
4763                  dev_name(kbdev->mdev.this_device));
4764 #endif /* MALI_KBASE_BUILD */
4765         kbase_increment_device_id();
4766 #ifdef CONFIG_MALI_ARBITER_SUPPORT
4767         mutex_lock(&kbdev->pm.lock);
4768         kbase_arbiter_pm_vm_event(kbdev, KBASE_VM_GPU_INITIALIZED_EVT);
4769         mutex_unlock(&kbdev->pm.lock);
4770 #endif
4771     }
4772 
4773     return err;
4774 }
4775 
4776 #undef KBASEP_DEFAULT_REGISTER_HISTORY_SIZE
4777 
4778 /**
4779  * kbase_device_suspend - Suspend callback from the OS.
4780  *
4781  * This is called by Linux when the device should suspend.
4782  *
4783  * @dev:  The device to suspend
4784  *
4785  * Return: A standard Linux error code
4786  */
kbase_device_suspend(struct device * dev)4787 static int kbase_device_suspend(struct device *dev)
4788 {
4789     struct kbase_device *kbdev = to_kbase_device(dev);
4790 
4791     if (!kbdev) {
4792         return -ENODEV;
4793     }
4794 
4795     kbase_pm_suspend(kbdev);
4796 
4797 #if defined(CONFIG_MALI_BIFROST_DEVFREQ) &&                                    \
4798     (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
4799     dev_dbg(dev, "Callback %s\n", __func__);
4800     if (kbdev->devfreq) {
4801         kbase_devfreq_enqueue_work(kbdev, DEVFREQ_WORK_SUSPEND);
4802         flush_workqueue(kbdev->devfreq_queue.workq);
4803     }
4804 #endif
4805     return 0;
4806 }
4807 
4808 /**
4809  * kbase_device_resume - Resume callback from the OS.
4810  *
4811  * This is called by Linux when the device should resume from suspension.
4812  *
4813  * @dev:  The device to resume
4814  *
4815  * Return: A standard Linux error code
4816  */
kbase_device_resume(struct device * dev)4817 static int kbase_device_resume(struct device *dev)
4818 {
4819     struct kbase_device *kbdev = to_kbase_device(dev);
4820 
4821     if (!kbdev) {
4822         return -ENODEV;
4823     }
4824 
4825     kbase_pm_resume(kbdev);
4826 
4827 #if defined(CONFIG_MALI_BIFROST_DEVFREQ) &&                                    \
4828     (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
4829     dev_dbg(dev, "Callback %s\n", __func__);
4830     if (kbdev->devfreq) {
4831         mutex_lock(&kbdev->pm.lock);
4832         if (kbdev->pm.active_count > 0) {
4833             kbase_devfreq_enqueue_work(kbdev, DEVFREQ_WORK_RESUME);
4834         }
4835         mutex_unlock(&kbdev->pm.lock);
4836         flush_workqueue(kbdev->devfreq_queue.workq);
4837     }
4838 #endif
4839     return 0;
4840 }
4841 
4842 /**
4843  * kbase_device_runtime_suspend - Runtime suspend callback from the OS.
4844  *
4845  * This is called by Linux when the device should prepare for a condition in
4846  * which it will not be able to communicate with the CPU(s) and RAM due to
4847  * power management.
4848  *
4849  * @dev:  The device to suspend
4850  *
4851  * Return: A standard Linux error code
4852  */
4853 #ifdef KBASE_PM_RUNTIME
kbase_device_runtime_suspend(struct device * dev)4854 static int kbase_device_runtime_suspend(struct device *dev)
4855 {
4856     struct kbase_device *kbdev = to_kbase_device(dev);
4857 
4858     if (!kbdev) {
4859         return -ENODEV;
4860     }
4861 
4862 #if defined(CONFIG_MALI_BIFROST_DEVFREQ) &&                                    \
4863     (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
4864     if (kbdev->devfreq) {
4865         kbase_devfreq_enqueue_work(kbdev, DEVFREQ_WORK_SUSPEND);
4866     }
4867 #endif
4868 
4869     if (kbdev->pm.backend.callback_power_runtime_off) {
4870         kbdev->pm.backend.callback_power_runtime_off(kbdev);
4871         dev_dbg(dev, "runtime suspend\n");
4872     }
4873     return 0;
4874 }
4875 #endif /* KBASE_PM_RUNTIME */
4876 
4877 /**
4878  * kbase_device_runtime_resume - Runtime resume callback from the OS.
4879  *
4880  * This is called by Linux when the device should go into a fully active state.
4881  *
4882  * @dev:  The device to suspend
4883  *
4884  * Return: A standard Linux error code
4885  */
4886 
4887 #ifdef KBASE_PM_RUNTIME
kbase_device_runtime_resume(struct device * dev)4888 static int kbase_device_runtime_resume(struct device *dev)
4889 {
4890     int ret = 0;
4891     struct kbase_device *kbdev = to_kbase_device(dev);
4892 
4893     if (!kbdev) {
4894         return -ENODEV;
4895     }
4896 
4897     dev_dbg(dev, "Callback %s\n", __func__);
4898     if (kbdev->pm.backend.callback_power_runtime_on) {
4899         ret = kbdev->pm.backend.callback_power_runtime_on(kbdev);
4900         dev_dbg(dev, "runtime resume\n");
4901     }
4902 
4903 #if defined(CONFIG_MALI_BIFROST_DEVFREQ) &&                                    \
4904     (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
4905     if (kbdev->devfreq) {
4906         kbase_devfreq_enqueue_work(kbdev, DEVFREQ_WORK_RESUME);
4907     }
4908 #endif
4909 
4910     return ret;
4911 }
4912 #endif /* KBASE_PM_RUNTIME */
4913 
4914 #ifdef KBASE_PM_RUNTIME
4915 /**
4916  * kbase_device_runtime_idle - Runtime idle callback from the OS.
4917  * @dev: The device to suspend
4918  *
4919  * This is called by Linux when the device appears to be inactive and it might
4920  * be placed into a low power state.
4921  *
4922  * Return: 0 if device can be suspended, non-zero to avoid runtime autosuspend,
4923  * otherwise a standard Linux error code
4924  */
kbase_device_runtime_idle(struct device * dev)4925 static int kbase_device_runtime_idle(struct device *dev)
4926 {
4927     struct kbase_device *kbdev = to_kbase_device(dev);
4928 
4929     if (!kbdev) {
4930         return -ENODEV;
4931     }
4932 
4933     dev_dbg(dev, "Callback %s\n", __func__);
4934     /* Use platform specific implementation if it exists. */
4935     if (kbdev->pm.backend.callback_power_runtime_idle) {
4936         return kbdev->pm.backend.callback_power_runtime_idle(kbdev);
4937     }
4938 
4939     /* Just need to update the device's last busy mark. Kernel will respect
4940      * the autosuspend delay and so won't suspend the device immediately.
4941      */
4942     pm_runtime_mark_last_busy(kbdev->dev);
4943     return 0;
4944 }
4945 #endif /* KBASE_PM_RUNTIME */
4946 
4947 /* The power management operations for the platform driver.
4948  */
4949 static const struct dev_pm_ops kbase_pm_ops = {
4950     .suspend = kbase_device_suspend,
4951     .resume = kbase_device_resume,
4952 #ifdef KBASE_PM_RUNTIME
4953     .runtime_suspend = kbase_device_runtime_suspend,
4954     .runtime_resume = kbase_device_runtime_resume,
4955     .runtime_idle = kbase_device_runtime_idle,
4956 #endif /* KBASE_PM_RUNTIME */
4957 };
4958 
4959 #ifdef CONFIG_OF
4960 static const struct of_device_id kbase_dt_ids[] = {
4961     {.compatible = "arm,mali-bifrost"}, {}};  /* sentinel */
4962 MODULE_DEVICE_TABLE(of, kbase_dt_ids);
4963 #endif
4964 
4965 static struct platform_driver kbase_platform_driver = {
4966     .probe = kbase_platform_device_probe,
4967     .remove = kbase_platform_device_remove,
4968     .driver =
4969         {
4970             .name = kbase_drv_name,
4971             .owner = THIS_MODULE,
4972             .pm = &kbase_pm_ops,
4973             .of_match_table = of_match_ptr(kbase_dt_ids),
4974         },
4975 };
4976 
4977 /*
4978  * The driver will not provide a shortcut to create the Mali platform device
4979  * anymore when using Device Tree.
4980  */
4981 #ifdef CONFIG_OF
4982 module_platform_driver(kbase_platform_driver);
4983 #else
4984 
kbase_driver_init(void)4985 static int __init kbase_driver_init(void)
4986 {
4987     int ret;
4988 
4989     ret = kbase_platform_register();
4990     if (ret) {
4991         return ret;
4992     }
4993 
4994     ret = platform_driver_register(&kbase_platform_driver);
4995 
4996     if (ret) {
4997         kbase_platform_unregister();
4998     }
4999 
5000     return ret;
5001 }
5002 
kbase_driver_exit(void)5003 static void __exit kbase_driver_exit(void)
5004 {
5005     platform_driver_unregister(&kbase_platform_driver);
5006     kbase_platform_unregister();
5007 }
5008 
5009 module_init(kbase_driver_init);
5010 module_exit(kbase_driver_exit);
5011 
5012 #endif /* CONFIG_OF */
5013 
5014 MODULE_LICENSE("GPL");
5015 MODULE_VERSION(MALI_RELEASE_NAME " (UK version " __stringify(
5016     BASE_UK_VERSION_MAJOR) "." __stringify(BASE_UK_VERSION_MINOR) ")");
5017 
5018 #define CREATE_TRACE_POINTS
5019 /* Create the trace points (otherwise we just get code to call a tracepoint) */
5020 #include "mali_linux_trace.h"
5021 
5022 #ifdef CONFIG_MALI_BIFROST_GATOR_SUPPORT
5023 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_job_slots_event);
5024 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_pm_status);
5025 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_page_fault_insert_pages);
5026 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_total_alloc_pages_change);
5027 
kbase_trace_mali_pm_status(u32 dev_id,u32 event,u64 value)5028 void kbase_trace_mali_pm_status(u32 dev_id, u32 event, u64 value)
5029 {
5030     trace_mali_pm_status(dev_id, event, value);
5031 }
5032 
kbase_trace_mali_job_slots_event(u32 dev_id,u32 event,const struct kbase_context * kctx,u8 atom_id)5033 void kbase_trace_mali_job_slots_event(u32 dev_id, u32 event,
5034                                       const struct kbase_context *kctx,
5035                                       u8 atom_id)
5036 {
5037     trace_mali_job_slots_event(dev_id, event, (kctx != NULL ? kctx->tgid : 0),
5038                                (kctx != NULL ? kctx->pid : 0), atom_id);
5039 }
5040 
kbase_trace_mali_page_fault_insert_pages(u32 dev_id,int event,u32 value)5041 void kbase_trace_mali_page_fault_insert_pages(u32 dev_id, int event, u32 value)
5042 {
5043     trace_mali_page_fault_insert_pages(dev_id, event, value);
5044 }
5045 
kbase_trace_mali_total_alloc_pages_change(u32 dev_id,long long int event)5046 void kbase_trace_mali_total_alloc_pages_change(u32 dev_id, long long int event)
5047 {
5048     trace_mali_total_alloc_pages_change(dev_id, event);
5049 }
5050 #endif /* CONFIG_MALI_BIFROST_GATOR_SUPPORT */
5051