• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2011 Red Hat Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Jerome Glisse <glisse@freedesktop.org>
29  */
30 /* Algorithm:
31  *
32  * We store the last allocated bo in "hole", we always try to allocate
33  * after the last allocated bo. Principle is that in a linear GPU ring
34  * progression was is after last is the oldest bo we allocated and thus
35  * the first one that should no longer be in use by the GPU.
36  *
37  * If it's not the case we skip over the bo after last to the closest
38  * done bo if such one exist. If none exist and we are not asked to
39  * block we report failure to allocate.
40  *
41  * If we are asked to block we wait on all the oldest fence of all
42  * rings. We just wait for any of those fence to complete.
43  */
44 
45 #include "amdgpu.h"
46 
47 static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo);
48 static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager);
49 
amdgpu_sa_bo_manager_init(struct amdgpu_device * adev,struct amdgpu_sa_manager * sa_manager,unsigned size,u32 align,u32 domain)50 int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
51 			      struct amdgpu_sa_manager *sa_manager,
52 			      unsigned size, u32 align, u32 domain)
53 {
54 	int i, r;
55 
56 	init_waitqueue_head(&sa_manager->wq);
57 	sa_manager->bo = NULL;
58 	sa_manager->size = size;
59 	sa_manager->domain = domain;
60 	sa_manager->align = align;
61 	sa_manager->hole = &sa_manager->olist;
62 	INIT_LIST_HEAD(&sa_manager->olist);
63 	for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
64 		INIT_LIST_HEAD(&sa_manager->flist[i]);
65 
66 	r = amdgpu_bo_create_kernel(adev, size, align, domain, &sa_manager->bo,
67 				&sa_manager->gpu_addr, &sa_manager->cpu_ptr);
68 	if (r) {
69 		dev_err(adev->dev, "(%d) failed to allocate bo for manager\n", r);
70 		return r;
71 	}
72 
73 	memset(sa_manager->cpu_ptr, 0, sa_manager->size);
74 	return r;
75 }
76 
amdgpu_sa_bo_manager_fini(struct amdgpu_device * adev,struct amdgpu_sa_manager * sa_manager)77 void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev,
78                               struct amdgpu_sa_manager *sa_manager)
79 {
80 	struct amdgpu_sa_bo *sa_bo, *tmp;
81 
82 	if (sa_manager->bo == NULL) {
83 		dev_err(adev->dev, "no bo for sa manager\n");
84 		return;
85 	}
86 
87 	if (!list_empty(&sa_manager->olist)) {
88 		sa_manager->hole = &sa_manager->olist,
89 		amdgpu_sa_bo_try_free(sa_manager);
90 		if (!list_empty(&sa_manager->olist)) {
91 			dev_err(adev->dev, "sa_manager is not empty, clearing anyway\n");
92 		}
93 	}
94 	list_for_each_entry_safe(sa_bo, tmp, &sa_manager->olist, olist) {
95 		amdgpu_sa_bo_remove_locked(sa_bo);
96 	}
97 
98 	amdgpu_bo_free_kernel(&sa_manager->bo, &sa_manager->gpu_addr, &sa_manager->cpu_ptr);
99 	sa_manager->size = 0;
100 }
101 
amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo * sa_bo)102 static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo)
103 {
104 	struct amdgpu_sa_manager *sa_manager = sa_bo->manager;
105 	if (sa_manager->hole == &sa_bo->olist) {
106 		sa_manager->hole = sa_bo->olist.prev;
107 	}
108 	list_del_init(&sa_bo->olist);
109 	list_del_init(&sa_bo->flist);
110 	dma_fence_put(sa_bo->fence);
111 	kfree(sa_bo);
112 }
113 
amdgpu_sa_bo_try_free(struct amdgpu_sa_manager * sa_manager)114 static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager)
115 {
116 	struct amdgpu_sa_bo *sa_bo, *tmp;
117 
118 	if (sa_manager->hole->next == &sa_manager->olist)
119 		return;
120 
121 	sa_bo = list_entry(sa_manager->hole->next, struct amdgpu_sa_bo, olist);
122 	list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) {
123 		if (sa_bo->fence == NULL ||
124 		    !dma_fence_is_signaled(sa_bo->fence)) {
125 			return;
126 		}
127 		amdgpu_sa_bo_remove_locked(sa_bo);
128 	}
129 }
130 
amdgpu_sa_bo_hole_soffset(struct amdgpu_sa_manager * sa_manager)131 static inline unsigned amdgpu_sa_bo_hole_soffset(struct amdgpu_sa_manager *sa_manager)
132 {
133 	struct list_head *hole = sa_manager->hole;
134 
135 	if (hole != &sa_manager->olist) {
136 		return list_entry(hole, struct amdgpu_sa_bo, olist)->eoffset;
137 	}
138 	return 0;
139 }
140 
amdgpu_sa_bo_hole_eoffset(struct amdgpu_sa_manager * sa_manager)141 static inline unsigned amdgpu_sa_bo_hole_eoffset(struct amdgpu_sa_manager *sa_manager)
142 {
143 	struct list_head *hole = sa_manager->hole;
144 
145 	if (hole->next != &sa_manager->olist) {
146 		return list_entry(hole->next, struct amdgpu_sa_bo, olist)->soffset;
147 	}
148 	return sa_manager->size;
149 }
150 
amdgpu_sa_bo_try_alloc(struct amdgpu_sa_manager * sa_manager,struct amdgpu_sa_bo * sa_bo,unsigned size,unsigned align)151 static bool amdgpu_sa_bo_try_alloc(struct amdgpu_sa_manager *sa_manager,
152 				   struct amdgpu_sa_bo *sa_bo,
153 				   unsigned size, unsigned align)
154 {
155 	unsigned soffset, eoffset, wasted;
156 
157 	soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
158 	eoffset = amdgpu_sa_bo_hole_eoffset(sa_manager);
159 	wasted = (align - (soffset % align)) % align;
160 
161 	if ((eoffset - soffset) >= (size + wasted)) {
162 		soffset += wasted;
163 
164 		sa_bo->manager = sa_manager;
165 		sa_bo->soffset = soffset;
166 		sa_bo->eoffset = soffset + size;
167 		list_add(&sa_bo->olist, sa_manager->hole);
168 		INIT_LIST_HEAD(&sa_bo->flist);
169 		sa_manager->hole = &sa_bo->olist;
170 		return true;
171 	}
172 	return false;
173 }
174 
175 /**
176  * amdgpu_sa_event - Check if we can stop waiting
177  *
178  * @sa_manager: pointer to the sa_manager
179  * @size: number of bytes we want to allocate
180  * @align: alignment we need to match
181  *
182  * Check if either there is a fence we can wait for or
183  * enough free memory to satisfy the allocation directly
184  */
amdgpu_sa_event(struct amdgpu_sa_manager * sa_manager,unsigned size,unsigned align)185 static bool amdgpu_sa_event(struct amdgpu_sa_manager *sa_manager,
186 			    unsigned size, unsigned align)
187 {
188 	unsigned soffset, eoffset, wasted;
189 	int i;
190 
191 	for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
192 		if (!list_empty(&sa_manager->flist[i]))
193 			return true;
194 
195 	soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
196 	eoffset = amdgpu_sa_bo_hole_eoffset(sa_manager);
197 	wasted = (align - (soffset % align)) % align;
198 
199 	if ((eoffset - soffset) >= (size + wasted)) {
200 		return true;
201 	}
202 
203 	return false;
204 }
205 
amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager * sa_manager,struct dma_fence ** fences,unsigned * tries)206 static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
207 				   struct dma_fence **fences,
208 				   unsigned *tries)
209 {
210 	struct amdgpu_sa_bo *best_bo = NULL;
211 	unsigned i, soffset, best, tmp;
212 
213 	/* if hole points to the end of the buffer */
214 	if (sa_manager->hole->next == &sa_manager->olist) {
215 		/* try again with its beginning */
216 		sa_manager->hole = &sa_manager->olist;
217 		return true;
218 	}
219 
220 	soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
221 	/* to handle wrap around we add sa_manager->size */
222 	best = sa_manager->size * 2;
223 	/* go over all fence list and try to find the closest sa_bo
224 	 * of the current last
225 	 */
226 	for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i) {
227 		struct amdgpu_sa_bo *sa_bo;
228 
229 		fences[i] = NULL;
230 
231 		if (list_empty(&sa_manager->flist[i]))
232 			continue;
233 
234 		sa_bo = list_first_entry(&sa_manager->flist[i],
235 					 struct amdgpu_sa_bo, flist);
236 
237 		if (!dma_fence_is_signaled(sa_bo->fence)) {
238 			fences[i] = sa_bo->fence;
239 			continue;
240 		}
241 
242 		/* limit the number of tries each ring gets */
243 		if (tries[i] > 2) {
244 			continue;
245 		}
246 
247 		tmp = sa_bo->soffset;
248 		if (tmp < soffset) {
249 			/* wrap around, pretend it's after */
250 			tmp += sa_manager->size;
251 		}
252 		tmp -= soffset;
253 		if (tmp < best) {
254 			/* this sa bo is the closest one */
255 			best = tmp;
256 			best_bo = sa_bo;
257 		}
258 	}
259 
260 	if (best_bo) {
261 		uint32_t idx = best_bo->fence->context;
262 
263 		idx %= AMDGPU_SA_NUM_FENCE_LISTS;
264 		++tries[idx];
265 		sa_manager->hole = best_bo->olist.prev;
266 
267 		/* we knew that this one is signaled,
268 		   so it's save to remote it */
269 		amdgpu_sa_bo_remove_locked(best_bo);
270 		return true;
271 	}
272 	return false;
273 }
274 
amdgpu_sa_bo_new(struct amdgpu_sa_manager * sa_manager,struct amdgpu_sa_bo ** sa_bo,unsigned size,unsigned align)275 int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
276 		     struct amdgpu_sa_bo **sa_bo,
277 		     unsigned size, unsigned align)
278 {
279 	struct dma_fence *fences[AMDGPU_SA_NUM_FENCE_LISTS];
280 	unsigned tries[AMDGPU_SA_NUM_FENCE_LISTS];
281 	unsigned count;
282 	int i, r;
283 	signed long t;
284 
285 	if (WARN_ON_ONCE(align > sa_manager->align))
286 		return -EINVAL;
287 
288 	if (WARN_ON_ONCE(size > sa_manager->size))
289 		return -EINVAL;
290 
291 	*sa_bo = kmalloc(sizeof(struct amdgpu_sa_bo), GFP_KERNEL);
292 	if (!(*sa_bo))
293 		return -ENOMEM;
294 	(*sa_bo)->manager = sa_manager;
295 	(*sa_bo)->fence = NULL;
296 	INIT_LIST_HEAD(&(*sa_bo)->olist);
297 	INIT_LIST_HEAD(&(*sa_bo)->flist);
298 
299 	spin_lock(&sa_manager->wq.lock);
300 	do {
301 		for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
302 			tries[i] = 0;
303 
304 		do {
305 			amdgpu_sa_bo_try_free(sa_manager);
306 
307 			if (amdgpu_sa_bo_try_alloc(sa_manager, *sa_bo,
308 						   size, align)) {
309 				spin_unlock(&sa_manager->wq.lock);
310 				return 0;
311 			}
312 
313 			/* see if we can skip over some allocations */
314 		} while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries));
315 
316 		for (i = 0, count = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
317 			if (fences[i])
318 				fences[count++] = dma_fence_get(fences[i]);
319 
320 		if (count) {
321 			spin_unlock(&sa_manager->wq.lock);
322 			t = dma_fence_wait_any_timeout(fences, count, false,
323 						       MAX_SCHEDULE_TIMEOUT,
324 						       NULL);
325 			for (i = 0; i < count; ++i)
326 				dma_fence_put(fences[i]);
327 
328 			r = (t > 0) ? 0 : t;
329 			spin_lock(&sa_manager->wq.lock);
330 		} else {
331 			/* if we have nothing to wait for block */
332 			r = wait_event_interruptible_locked(
333 				sa_manager->wq,
334 				amdgpu_sa_event(sa_manager, size, align)
335 			);
336 		}
337 
338 	} while (!r);
339 
340 	spin_unlock(&sa_manager->wq.lock);
341 	kfree(*sa_bo);
342 	*sa_bo = NULL;
343 	return r;
344 }
345 
amdgpu_sa_bo_free(struct amdgpu_device * adev,struct amdgpu_sa_bo ** sa_bo,struct dma_fence * fence)346 void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
347 		       struct dma_fence *fence)
348 {
349 	struct amdgpu_sa_manager *sa_manager;
350 
351 	if (sa_bo == NULL || *sa_bo == NULL) {
352 		return;
353 	}
354 
355 	sa_manager = (*sa_bo)->manager;
356 	spin_lock(&sa_manager->wq.lock);
357 	if (fence && !dma_fence_is_signaled(fence)) {
358 		uint32_t idx;
359 
360 		(*sa_bo)->fence = dma_fence_get(fence);
361 		idx = fence->context % AMDGPU_SA_NUM_FENCE_LISTS;
362 		list_add_tail(&(*sa_bo)->flist, &sa_manager->flist[idx]);
363 	} else {
364 		amdgpu_sa_bo_remove_locked(*sa_bo);
365 	}
366 	wake_up_all_locked(&sa_manager->wq);
367 	spin_unlock(&sa_manager->wq.lock);
368 	*sa_bo = NULL;
369 }
370 
371 #if defined(CONFIG_DEBUG_FS)
372 
amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager * sa_manager,struct seq_file * m)373 void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
374 				  struct seq_file *m)
375 {
376 	struct amdgpu_sa_bo *i;
377 
378 	spin_lock(&sa_manager->wq.lock);
379 	list_for_each_entry(i, &sa_manager->olist, olist) {
380 		uint64_t soffset = i->soffset + sa_manager->gpu_addr;
381 		uint64_t eoffset = i->eoffset + sa_manager->gpu_addr;
382 		if (&i->olist == sa_manager->hole) {
383 			seq_printf(m, ">");
384 		} else {
385 			seq_printf(m, " ");
386 		}
387 		seq_printf(m, "[0x%010llx 0x%010llx] size %8lld",
388 			   soffset, eoffset, eoffset - soffset);
389 
390 		if (i->fence)
391 			seq_printf(m, " protected by 0x%016llx on context %llu",
392 				   i->fence->seqno, i->fence->context);
393 
394 		seq_printf(m, "\n");
395 	}
396 	spin_unlock(&sa_manager->wq.lock);
397 }
398 #endif
399