• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
2 /*
3  *
4  * (C) COPYRIGHT 2019-2021 ARM Limited. All rights reserved.
5  *
6  * This program is free software and is provided to you under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation, and any use by you of this program is subject to the terms
9  * of such GNU license.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, you can access it online at
18  * http://www.gnu.org/licenses/gpl-2.0.html.
19  *
20  */
21 
22 #include <mali_kbase.h>
23 #include "mali_kbase_csf_heap_context_alloc.h"
24 
25 /* Size of one heap context structure, in bytes. */
26 #define HEAP_CTX_SIZE ((size_t)32)
27 
28 /* Total size of the GPU memory region allocated for heap contexts, in bytes. */
29 #define HEAP_CTX_REGION_SIZE (MAX_TILER_HEAPS * HEAP_CTX_SIZE)
30 
31 /**
32  * sub_alloc - Sub-allocate a heap context from a GPU memory region
33  *
34  * @ctx_alloc: Pointer to the heap context allocator.
35  *
36  * Return: GPU virtual address of the allocated heap context or 0 on failure.
37  */
sub_alloc(struct kbase_csf_heap_context_allocator * const ctx_alloc)38 static u64 sub_alloc(struct kbase_csf_heap_context_allocator *const ctx_alloc)
39 {
40 	struct kbase_context *const kctx = ctx_alloc->kctx;
41 	int heap_nr = 0;
42 	size_t ctx_offset = 0;
43 	u64 heap_gpu_va = 0;
44 	struct kbase_vmap_struct mapping;
45 	void *ctx_ptr = NULL;
46 
47 	lockdep_assert_held(&ctx_alloc->lock);
48 
49 	heap_nr = find_first_zero_bit(ctx_alloc->in_use,
50 		MAX_TILER_HEAPS);
51 
52 	if (unlikely(heap_nr >= MAX_TILER_HEAPS)) {
53 		dev_dbg(kctx->kbdev->dev,
54 			"No free tiler heap contexts in the pool");
55 		return 0;
56 	}
57 
58 	ctx_offset = heap_nr * HEAP_CTX_SIZE;
59 	heap_gpu_va = ctx_alloc->gpu_va + ctx_offset;
60 	ctx_ptr = kbase_vmap_prot(kctx, heap_gpu_va,
61 		HEAP_CTX_SIZE, KBASE_REG_CPU_WR, &mapping);
62 
63 	if (unlikely(!ctx_ptr)) {
64 		dev_err(kctx->kbdev->dev,
65 			"Failed to map tiler heap context %d (0x%llX)\n",
66 			heap_nr, heap_gpu_va);
67 		return 0;
68 	}
69 
70 	memset(ctx_ptr, 0, HEAP_CTX_SIZE);
71 	kbase_vunmap(ctx_ptr, &mapping);
72 
73 	bitmap_set(ctx_alloc->in_use, heap_nr, 1);
74 
75 	dev_dbg(kctx->kbdev->dev, "Allocated tiler heap context %d (0x%llX)\n",
76 		heap_nr, heap_gpu_va);
77 
78 	return heap_gpu_va;
79 }
80 
81 /**
82  * sub_free - Free a heap context sub-allocated from a GPU memory region
83  *
84  * @ctx_alloc:   Pointer to the heap context allocator.
85  * @heap_gpu_va: The GPU virtual address of a heap context structure to free.
86  */
sub_free(struct kbase_csf_heap_context_allocator * const ctx_alloc,u64 const heap_gpu_va)87 static void sub_free(struct kbase_csf_heap_context_allocator *const ctx_alloc,
88 	u64 const heap_gpu_va)
89 {
90 	struct kbase_context *const kctx = ctx_alloc->kctx;
91 	u64 ctx_offset = 0;
92 	unsigned int heap_nr = 0;
93 
94 	lockdep_assert_held(&ctx_alloc->lock);
95 
96 	if (WARN_ON(!ctx_alloc->region))
97 		return;
98 
99 	if (WARN_ON(heap_gpu_va < ctx_alloc->gpu_va))
100 		return;
101 
102 	ctx_offset = heap_gpu_va - ctx_alloc->gpu_va;
103 
104 	if (WARN_ON(ctx_offset >= HEAP_CTX_REGION_SIZE) ||
105 		WARN_ON(ctx_offset % HEAP_CTX_SIZE))
106 		return;
107 
108 	heap_nr = ctx_offset / HEAP_CTX_SIZE;
109 	dev_dbg(kctx->kbdev->dev,
110 		"Freed tiler heap context %d (0x%llX)\n", heap_nr, heap_gpu_va);
111 
112 	bitmap_clear(ctx_alloc->in_use, heap_nr, 1);
113 }
114 
kbase_csf_heap_context_allocator_init(struct kbase_csf_heap_context_allocator * const ctx_alloc,struct kbase_context * const kctx)115 int kbase_csf_heap_context_allocator_init(
116 	struct kbase_csf_heap_context_allocator *const ctx_alloc,
117 	struct kbase_context *const kctx)
118 {
119 	/* We cannot pre-allocate GPU memory here because the
120 	 * custom VA zone may not have been created yet.
121 	 */
122 	ctx_alloc->kctx = kctx;
123 	ctx_alloc->region = NULL;
124 	ctx_alloc->gpu_va = 0;
125 
126 	mutex_init(&ctx_alloc->lock);
127 	bitmap_zero(ctx_alloc->in_use, MAX_TILER_HEAPS);
128 
129 	dev_dbg(kctx->kbdev->dev,
130 		"Initialized a tiler heap context allocator\n");
131 
132 	return 0;
133 }
134 
kbase_csf_heap_context_allocator_term(struct kbase_csf_heap_context_allocator * const ctx_alloc)135 void kbase_csf_heap_context_allocator_term(
136 	struct kbase_csf_heap_context_allocator *const ctx_alloc)
137 {
138 	struct kbase_context *const kctx = ctx_alloc->kctx;
139 
140 	dev_dbg(kctx->kbdev->dev,
141 		"Terminating tiler heap context allocator\n");
142 
143 	if (ctx_alloc->region) {
144 		kbase_gpu_vm_lock(kctx);
145 		ctx_alloc->region->flags &= ~KBASE_REG_NO_USER_FREE;
146 		kbase_mem_free_region(kctx, ctx_alloc->region);
147 		kbase_gpu_vm_unlock(kctx);
148 	}
149 
150 	mutex_destroy(&ctx_alloc->lock);
151 }
152 
kbase_csf_heap_context_allocator_alloc(struct kbase_csf_heap_context_allocator * const ctx_alloc)153 u64 kbase_csf_heap_context_allocator_alloc(
154 	struct kbase_csf_heap_context_allocator *const ctx_alloc)
155 {
156 	struct kbase_context *const kctx = ctx_alloc->kctx;
157 	u64 flags = BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_GPU_WR |
158 		BASE_MEM_PROT_CPU_WR | BASEP_MEM_NO_USER_FREE;
159 	u64 nr_pages = PFN_UP(HEAP_CTX_REGION_SIZE);
160 	u64 heap_gpu_va = 0;
161 
162 	/* Calls to this function are inherently asynchronous, with respect to
163 	 * MMU operations.
164 	 */
165 	const enum kbase_caller_mmu_sync_info mmu_sync_info = CALLER_MMU_ASYNC;
166 
167 #ifdef CONFIG_MALI_VECTOR_DUMP
168 	flags |= BASE_MEM_PROT_CPU_RD;
169 #endif
170 
171 	mutex_lock(&ctx_alloc->lock);
172 
173 	/* If the pool of heap contexts wasn't already allocated then
174 	 * allocate it.
175 	 */
176 	if (!ctx_alloc->region) {
177 		ctx_alloc->region =
178 			kbase_mem_alloc(kctx, nr_pages, nr_pages, 0, &flags,
179 					&ctx_alloc->gpu_va, mmu_sync_info);
180 	}
181 
182 	/* If the pool still isn't allocated then an error occurred. */
183 	if (unlikely(!ctx_alloc->region)) {
184 		dev_dbg(kctx->kbdev->dev, "Failed to allocate a pool of tiler heap contexts");
185 	} else {
186 		heap_gpu_va = sub_alloc(ctx_alloc);
187 	}
188 
189 	mutex_unlock(&ctx_alloc->lock);
190 
191 	return heap_gpu_va;
192 }
193 
kbase_csf_heap_context_allocator_free(struct kbase_csf_heap_context_allocator * const ctx_alloc,u64 const heap_gpu_va)194 void kbase_csf_heap_context_allocator_free(
195 	struct kbase_csf_heap_context_allocator *const ctx_alloc,
196 	u64 const heap_gpu_va)
197 {
198 	mutex_lock(&ctx_alloc->lock);
199 	sub_free(ctx_alloc, heap_gpu_va);
200 	mutex_unlock(&ctx_alloc->lock);
201 }
202