• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
2 
3 /*
4  * Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the next
14  * paragraph) shall be included in all copies or substantial portions of the
15  * Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23  * SOFTWARE.
24  *
25  * Authors:
26  *    Rob Clark <robclark@freedesktop.org>
27  */
28 
29 #ifdef HAVE_CONFIG_H
30 # include <config.h>
31 #endif
32 
33 #include "freedreno_drmif.h"
34 #include "freedreno_priv.h"
35 
36 
37 drm_private void bo_del(struct fd_bo *bo);
38 drm_private extern pthread_mutex_t table_lock;
39 
40 static void
add_bucket(struct fd_bo_cache * cache,int size)41 add_bucket(struct fd_bo_cache *cache, int size)
42 {
43 	unsigned int i = cache->num_buckets;
44 
45 	assert(i < ARRAY_SIZE(cache->cache_bucket));
46 
47 	list_inithead(&cache->cache_bucket[i].list);
48 	cache->cache_bucket[i].size = size;
49 	cache->num_buckets++;
50 }
51 
52 /**
53  * @coarse: if true, only power-of-two bucket sizes, otherwise
54  *    fill in for a bit smoother size curve..
55  */
56 drm_private void
fd_bo_cache_init(struct fd_bo_cache * cache,int course)57 fd_bo_cache_init(struct fd_bo_cache *cache, int course)
58 {
59 	unsigned long size, cache_max_size = 64 * 1024 * 1024;
60 
61 	/* OK, so power of two buckets was too wasteful of memory.
62 	 * Give 3 other sizes between each power of two, to hopefully
63 	 * cover things accurately enough.  (The alternative is
64 	 * probably to just go for exact matching of sizes, and assume
65 	 * that for things like composited window resize the tiled
66 	 * width/height alignment and rounding of sizes to pages will
67 	 * get us useful cache hit rates anyway)
68 	 */
69 	add_bucket(cache, 4096);
70 	add_bucket(cache, 4096 * 2);
71 	if (!course)
72 		add_bucket(cache, 4096 * 3);
73 
74 	/* Initialize the linked lists for BO reuse cache. */
75 	for (size = 4 * 4096; size <= cache_max_size; size *= 2) {
76 		add_bucket(cache, size);
77 		if (!course) {
78 			add_bucket(cache, size + size * 1 / 4);
79 			add_bucket(cache, size + size * 2 / 4);
80 			add_bucket(cache, size + size * 3 / 4);
81 		}
82 	}
83 }
84 
85 /* Frees older cached buffers.  Called under table_lock */
86 drm_private void
fd_bo_cache_cleanup(struct fd_bo_cache * cache,time_t time)87 fd_bo_cache_cleanup(struct fd_bo_cache *cache, time_t time)
88 {
89 	int i;
90 
91 	if (cache->time == time)
92 		return;
93 
94 	for (i = 0; i < cache->num_buckets; i++) {
95 		struct fd_bo_bucket *bucket = &cache->cache_bucket[i];
96 		struct fd_bo *bo;
97 
98 		while (!LIST_IS_EMPTY(&bucket->list)) {
99 			bo = LIST_ENTRY(struct fd_bo, bucket->list.next, list);
100 
101 			/* keep things in cache for at least 1 second: */
102 			if (time && ((time - bo->free_time) <= 1))
103 				break;
104 
105 			list_del(&bo->list);
106 			bo_del(bo);
107 		}
108 	}
109 
110 	cache->time = time;
111 }
112 
get_bucket(struct fd_bo_cache * cache,uint32_t size)113 static struct fd_bo_bucket * get_bucket(struct fd_bo_cache *cache, uint32_t size)
114 {
115 	int i;
116 
117 	/* hmm, this is what intel does, but I suppose we could calculate our
118 	 * way to the correct bucket size rather than looping..
119 	 */
120 	for (i = 0; i < cache->num_buckets; i++) {
121 		struct fd_bo_bucket *bucket = &cache->cache_bucket[i];
122 		if (bucket->size >= size) {
123 			return bucket;
124 		}
125 	}
126 
127 	return NULL;
128 }
129 
is_idle(struct fd_bo * bo)130 static int is_idle(struct fd_bo *bo)
131 {
132 	return fd_bo_cpu_prep(bo, NULL,
133 			DRM_FREEDRENO_PREP_READ |
134 			DRM_FREEDRENO_PREP_WRITE |
135 			DRM_FREEDRENO_PREP_NOSYNC) == 0;
136 }
137 
find_in_bucket(struct fd_bo_bucket * bucket,uint32_t flags)138 static struct fd_bo *find_in_bucket(struct fd_bo_bucket *bucket, uint32_t flags)
139 {
140 	struct fd_bo *bo = NULL;
141 
142 	/* TODO .. if we had an ALLOC_FOR_RENDER flag like intel, we could
143 	 * skip the busy check.. if it is only going to be a render target
144 	 * then we probably don't need to stall..
145 	 *
146 	 * NOTE that intel takes ALLOC_FOR_RENDER bo's from the list tail
147 	 * (MRU, since likely to be in GPU cache), rather than head (LRU)..
148 	 */
149 	pthread_mutex_lock(&table_lock);
150 	if (!LIST_IS_EMPTY(&bucket->list)) {
151 		bo = LIST_ENTRY(struct fd_bo, bucket->list.next, list);
152 		/* TODO check for compatible flags? */
153 		if (is_idle(bo)) {
154 			list_del(&bo->list);
155 		} else {
156 			bo = NULL;
157 		}
158 	}
159 	pthread_mutex_unlock(&table_lock);
160 
161 	return bo;
162 }
163 
164 /* NOTE: size is potentially rounded up to bucket size: */
165 drm_private struct fd_bo *
fd_bo_cache_alloc(struct fd_bo_cache * cache,uint32_t * size,uint32_t flags)166 fd_bo_cache_alloc(struct fd_bo_cache *cache, uint32_t *size, uint32_t flags)
167 {
168 	struct fd_bo *bo = NULL;
169 	struct fd_bo_bucket *bucket;
170 
171 	*size = ALIGN(*size, 4096);
172 	bucket = get_bucket(cache, *size);
173 
174 	/* see if we can be green and recycle: */
175 retry:
176 	if (bucket) {
177 		*size = bucket->size;
178 		bo = find_in_bucket(bucket, flags);
179 		if (bo) {
180 			if (bo->funcs->madvise(bo, TRUE) <= 0) {
181 				/* we've lost the backing pages, delete and try again: */
182 				pthread_mutex_lock(&table_lock);
183 				bo_del(bo);
184 				pthread_mutex_unlock(&table_lock);
185 				goto retry;
186 			}
187 			atomic_set(&bo->refcnt, 1);
188 			fd_device_ref(bo->dev);
189 			return bo;
190 		}
191 	}
192 
193 	return NULL;
194 }
195 
196 drm_private int
fd_bo_cache_free(struct fd_bo_cache * cache,struct fd_bo * bo)197 fd_bo_cache_free(struct fd_bo_cache *cache, struct fd_bo *bo)
198 {
199 	struct fd_bo_bucket *bucket = get_bucket(cache, bo->size);
200 
201 	/* see if we can be green and recycle: */
202 	if (bucket) {
203 		struct timespec time;
204 
205 		bo->funcs->madvise(bo, FALSE);
206 
207 		clock_gettime(CLOCK_MONOTONIC, &time);
208 
209 		bo->free_time = time.tv_sec;
210 		list_addtail(&bo->list, &bucket->list);
211 		fd_bo_cache_cleanup(cache, time.tv_sec);
212 
213 		/* bo's in the bucket cache don't have a ref and
214 		 * don't hold a ref to the dev:
215 		 */
216 		fd_device_del_locked(bo->dev);
217 
218 		return 0;
219 	}
220 
221 	return -1;
222 }
223