1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2 /*
3 *
4 * (C) COPYRIGHT 2019-2021 ARM Limited. All rights reserved.
5 *
6 * This program is free software and is provided to you under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation, and any use by you of this program is subject to the terms
9 * of such GNU license.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, you can access it online at
18 * http://www.gnu.org/licenses/gpl-2.0.html.
19 *
20 */
21
22 #ifndef _KBASE_MMU_H_
23 #define _KBASE_MMU_H_
24
25 #include <uapi/gpu/arm/bifrost/mali_base_kernel.h>
26
27 #define KBASE_MMU_PAGE_ENTRIES 512
28
29 struct kbase_context;
30 struct kbase_mmu_table;
31
32 /**
33 * enum kbase_caller_mmu_sync_info - MMU-synchronous caller info.
34 * A pointer to this type is passed down from the outer-most callers in the kbase
35 * module - where the information resides as to the synchronous / asynchronous
36 * nature of the call flow, with respect to MMU operations. ie - does the call flow relate to
37 * existing GPU work does it come from requests (like ioctl) from user-space, power management,
38 * etc.
39 */
40 enum kbase_caller_mmu_sync_info {
41 /* default value must be invalid to avoid accidental choice ov a 'valid' value. */
42 CALLER_MMU_UNSET_SYNCHRONICITY,
43 /* Arbitrary value for 'synchronous that isn't easy to choose by accident. */
44 CALLER_MMU_SYNC = 0x02,
45 /* Also hard to choose by accident */
46 CALLER_MMU_ASYNC
47 };
48
49 /**
50 * kbase_mmu_as_init() - Initialising GPU address space object.
51 *
52 * This is called from device probe to initialise an address space object
53 * of the device.
54 *
55 * @kbdev: The kbase device structure for the device (must be a valid pointer).
56 * @i: Array index of address space object.
57 *
58 * Return: 0 on success and non-zero value on failure.
59 */
60 int kbase_mmu_as_init(struct kbase_device *kbdev, int i);
61
62 /**
63 * kbase_mmu_as_term() - Terminate address space object.
64 *
65 * This is called upon device termination to destroy
66 * the address space object of the device.
67 *
68 * @kbdev: The kbase device structure for the device (must be a valid pointer).
69 * @i: Array index of address space object.
70 */
71 void kbase_mmu_as_term(struct kbase_device *kbdev, int i);
72
73 /**
74 * kbase_mmu_init - Initialise an object representing GPU page tables
75 *
76 * The structure should be terminated using kbase_mmu_term()
77 *
78 * @kbdev: Instance of GPU platform device, allocated from the probe method.
79 * @mmut: GPU page tables to be initialized.
80 * @kctx: Optional kbase context, may be NULL if this set of MMU tables
81 * is not associated with a context.
82 * @group_id: The physical group ID from which to allocate GPU page tables.
83 * Valid range is 0..(MEMORY_GROUP_MANAGER_NR_GROUPS-1).
84 *
85 * Return: 0 if successful, otherwise a negative error code.
86 */
87 int kbase_mmu_init(struct kbase_device *kbdev, struct kbase_mmu_table *mmut,
88 struct kbase_context *kctx, int group_id);
89
90 /**
91 * kbase_mmu_interrupt - Process an MMU interrupt.
92 *
93 * Process the MMU interrupt that was reported by the &kbase_device.
94 *
95 * @kbdev: Pointer to the kbase device for which the interrupt happened.
96 * @irq_stat: Value of the MMU_IRQ_STATUS register.
97 */
98 void kbase_mmu_interrupt(struct kbase_device *kbdev, u32 irq_stat);
99
100 /**
101 * kbase_mmu_term - Terminate an object representing GPU page tables
102 *
103 * This will free any page tables that have been allocated
104 *
105 * @kbdev: Instance of GPU platform device, allocated from the probe method.
106 * @mmut: GPU page tables to be destroyed.
107 */
108 void kbase_mmu_term(struct kbase_device *kbdev, struct kbase_mmu_table *mmut);
109
110 /**
111 * kbase_mmu_create_ate - Create an address translation entry
112 *
113 * @kbdev: Instance of GPU platform device, allocated from the probe method.
114 * @phy: Physical address of the page to be mapped for GPU access.
115 * @flags: Bitmask of attributes of the GPU memory region being mapped.
116 * @level: Page table level for which to build an address translation entry.
117 * @group_id: The physical memory group in which the page was allocated.
118 * Valid range is 0..(MEMORY_GROUP_MANAGER_NR_GROUPS-1).
119 *
120 * This function creates an address translation entry to encode the physical
121 * address of a page to be mapped for access by the GPU, along with any extra
122 * attributes required for the GPU memory region.
123 *
124 * Return: An address translation entry, either in LPAE or AArch64 format
125 * (depending on the driver's configuration).
126 */
127 u64 kbase_mmu_create_ate(struct kbase_device *kbdev,
128 struct tagged_addr phy, unsigned long flags, int level, int group_id);
129
130 int kbase_mmu_insert_pages_no_flush(struct kbase_device *kbdev,
131 struct kbase_mmu_table *mmut,
132 const u64 start_vpfn,
133 struct tagged_addr *phys, size_t nr,
134 unsigned long flags, int group_id);
135 int kbase_mmu_insert_pages(struct kbase_device *kbdev,
136 struct kbase_mmu_table *mmut, u64 vpfn,
137 struct tagged_addr *phys, size_t nr,
138 unsigned long flags, int as_nr, int group_id,
139 enum kbase_caller_mmu_sync_info mmu_sync_info);
140 int kbase_mmu_insert_single_page(struct kbase_context *kctx, u64 vpfn,
141 struct tagged_addr phys, size_t nr,
142 unsigned long flags, int group_id,
143 enum kbase_caller_mmu_sync_info mmu_sync_info);
144
145 int kbase_mmu_teardown_pages(struct kbase_device *kbdev,
146 struct kbase_mmu_table *mmut, u64 vpfn,
147 size_t nr, int as_nr);
148 int kbase_mmu_update_pages(struct kbase_context *kctx, u64 vpfn,
149 struct tagged_addr *phys, size_t nr,
150 unsigned long flags, int const group_id);
151
152 /**
153 * kbase_mmu_bus_fault_interrupt - Process a bus fault interrupt.
154 *
155 * Process the bus fault interrupt that was reported for a particular GPU
156 * address space.
157 *
158 * @kbdev: Pointer to the kbase device for which bus fault was reported.
159 * @status: Value of the GPU_FAULTSTATUS register.
160 * @as_nr: GPU address space for which the bus fault occurred.
161 *
162 * Return: zero if the operation was successful, non-zero otherwise.
163 */
164 int kbase_mmu_bus_fault_interrupt(struct kbase_device *kbdev, u32 status,
165 u32 as_nr);
166
167 /**
168 * kbase_mmu_gpu_fault_interrupt() - Report a GPU fault.
169 * @kbdev: Kbase device pointer
170 * @status: GPU fault status
171 * @as_nr: Faulty address space
172 * @address: GPU fault address
173 * @as_valid: true if address space is valid
174 *
175 * This function builds GPU fault information to submit a work
176 * for reporting the details of the fault.
177 */
178 void kbase_mmu_gpu_fault_interrupt(struct kbase_device *kbdev, u32 status,
179 u32 as_nr, u64 address, bool as_valid);
180
181 /**
182 * kbase_context_mmu_group_id_get - Decode a memory group ID from
183 * base_context_create_flags
184 *
185 * Memory allocated for GPU page tables will come from the returned group.
186 *
187 * @flags: Bitmask of flags to pass to base_context_init.
188 *
189 * Return: Physical memory group ID. Valid range is 0..(BASE_MEM_GROUP_COUNT-1).
190 */
191 static inline int
kbase_context_mmu_group_id_get(base_context_create_flags const flags)192 kbase_context_mmu_group_id_get(base_context_create_flags const flags)
193 {
194 KBASE_DEBUG_ASSERT(flags ==
195 (flags & BASEP_CONTEXT_CREATE_ALLOWED_FLAGS));
196 return (int)BASE_CONTEXT_MMU_GROUP_ID_GET(flags);
197 }
198
199 #endif /* _KBASE_MMU_H_ */
200