1 /* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
2
3 /*
4 * Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * SOFTWARE.
24 *
25 * Authors:
26 * Rob Clark <robclark@freedesktop.org>
27 */
28
29 #include "freedreno_drmif.h"
30 #include "freedreno_priv.h"
31
32 drm_private void bo_del(struct fd_bo *bo);
33 drm_private extern pthread_mutex_t table_lock;
34
35 static void
add_bucket(struct fd_bo_cache * cache,int size)36 add_bucket(struct fd_bo_cache *cache, int size)
37 {
38 unsigned int i = cache->num_buckets;
39
40 assert(i < ARRAY_SIZE(cache->cache_bucket));
41
42 list_inithead(&cache->cache_bucket[i].list);
43 cache->cache_bucket[i].size = size;
44 cache->num_buckets++;
45 }
46
47 /**
48 * @coarse: if true, only power-of-two bucket sizes, otherwise
49 * fill in for a bit smoother size curve..
50 */
51 drm_private void
fd_bo_cache_init(struct fd_bo_cache * cache,int coarse)52 fd_bo_cache_init(struct fd_bo_cache *cache, int coarse)
53 {
54 unsigned long size, cache_max_size = 64 * 1024 * 1024;
55
56 /* OK, so power of two buckets was too wasteful of memory.
57 * Give 3 other sizes between each power of two, to hopefully
58 * cover things accurately enough. (The alternative is
59 * probably to just go for exact matching of sizes, and assume
60 * that for things like composited window resize the tiled
61 * width/height alignment and rounding of sizes to pages will
62 * get us useful cache hit rates anyway)
63 */
64 add_bucket(cache, 4096);
65 add_bucket(cache, 4096 * 2);
66 if (!coarse)
67 add_bucket(cache, 4096 * 3);
68
69 /* Initialize the linked lists for BO reuse cache. */
70 for (size = 4 * 4096; size <= cache_max_size; size *= 2) {
71 add_bucket(cache, size);
72 if (!coarse) {
73 add_bucket(cache, size + size * 1 / 4);
74 add_bucket(cache, size + size * 2 / 4);
75 add_bucket(cache, size + size * 3 / 4);
76 }
77 }
78 }
79
80 /* Frees older cached buffers. Called under table_lock */
81 drm_private void
fd_bo_cache_cleanup(struct fd_bo_cache * cache,time_t time)82 fd_bo_cache_cleanup(struct fd_bo_cache *cache, time_t time)
83 {
84 int i;
85
86 if (cache->time == time)
87 return;
88
89 for (i = 0; i < cache->num_buckets; i++) {
90 struct fd_bo_bucket *bucket = &cache->cache_bucket[i];
91 struct fd_bo *bo;
92
93 while (!LIST_IS_EMPTY(&bucket->list)) {
94 bo = LIST_ENTRY(struct fd_bo, bucket->list.next, list);
95
96 /* keep things in cache for at least 1 second: */
97 if (time && ((time - bo->free_time) <= 1))
98 break;
99
100 VG_BO_OBTAIN(bo);
101 list_del(&bo->list);
102 bo_del(bo);
103 }
104 }
105
106 cache->time = time;
107 }
108
get_bucket(struct fd_bo_cache * cache,uint32_t size)109 static struct fd_bo_bucket * get_bucket(struct fd_bo_cache *cache, uint32_t size)
110 {
111 int i;
112
113 /* hmm, this is what intel does, but I suppose we could calculate our
114 * way to the correct bucket size rather than looping..
115 */
116 for (i = 0; i < cache->num_buckets; i++) {
117 struct fd_bo_bucket *bucket = &cache->cache_bucket[i];
118 if (bucket->size >= size) {
119 return bucket;
120 }
121 }
122
123 return NULL;
124 }
125
is_idle(struct fd_bo * bo)126 static int is_idle(struct fd_bo *bo)
127 {
128 return fd_bo_cpu_prep(bo, NULL,
129 DRM_FREEDRENO_PREP_READ |
130 DRM_FREEDRENO_PREP_WRITE |
131 DRM_FREEDRENO_PREP_NOSYNC) == 0;
132 }
133
find_in_bucket(struct fd_bo_bucket * bucket,uint32_t flags)134 static struct fd_bo *find_in_bucket(struct fd_bo_bucket *bucket, uint32_t flags)
135 {
136 struct fd_bo *bo = NULL;
137
138 /* TODO .. if we had an ALLOC_FOR_RENDER flag like intel, we could
139 * skip the busy check.. if it is only going to be a render target
140 * then we probably don't need to stall..
141 *
142 * NOTE that intel takes ALLOC_FOR_RENDER bo's from the list tail
143 * (MRU, since likely to be in GPU cache), rather than head (LRU)..
144 */
145 pthread_mutex_lock(&table_lock);
146 if (!LIST_IS_EMPTY(&bucket->list)) {
147 bo = LIST_ENTRY(struct fd_bo, bucket->list.next, list);
148 /* TODO check for compatible flags? */
149 if (is_idle(bo)) {
150 list_del(&bo->list);
151 } else {
152 bo = NULL;
153 }
154 }
155 pthread_mutex_unlock(&table_lock);
156
157 return bo;
158 }
159
160 /* NOTE: size is potentially rounded up to bucket size: */
161 drm_private struct fd_bo *
fd_bo_cache_alloc(struct fd_bo_cache * cache,uint32_t * size,uint32_t flags)162 fd_bo_cache_alloc(struct fd_bo_cache *cache, uint32_t *size, uint32_t flags)
163 {
164 struct fd_bo *bo = NULL;
165 struct fd_bo_bucket *bucket;
166
167 *size = ALIGN(*size, 4096);
168 bucket = get_bucket(cache, *size);
169
170 /* see if we can be green and recycle: */
171 retry:
172 if (bucket) {
173 *size = bucket->size;
174 bo = find_in_bucket(bucket, flags);
175 if (bo) {
176 VG_BO_OBTAIN(bo);
177 if (bo->funcs->madvise(bo, TRUE) <= 0) {
178 /* we've lost the backing pages, delete and try again: */
179 pthread_mutex_lock(&table_lock);
180 bo_del(bo);
181 pthread_mutex_unlock(&table_lock);
182 goto retry;
183 }
184 atomic_set(&bo->refcnt, 1);
185 fd_device_ref(bo->dev);
186 return bo;
187 }
188 }
189
190 return NULL;
191 }
192
193 drm_private int
fd_bo_cache_free(struct fd_bo_cache * cache,struct fd_bo * bo)194 fd_bo_cache_free(struct fd_bo_cache *cache, struct fd_bo *bo)
195 {
196 struct fd_bo_bucket *bucket = get_bucket(cache, bo->size);
197
198 /* see if we can be green and recycle: */
199 if (bucket) {
200 struct timespec time;
201
202 bo->funcs->madvise(bo, FALSE);
203
204 clock_gettime(CLOCK_MONOTONIC, &time);
205
206 bo->free_time = time.tv_sec;
207 VG_BO_RELEASE(bo);
208 list_addtail(&bo->list, &bucket->list);
209 fd_bo_cache_cleanup(cache, time.tv_sec);
210
211 /* bo's in the bucket cache don't have a ref and
212 * don't hold a ref to the dev:
213 */
214 fd_device_del_locked(bo->dev);
215
216 return 0;
217 }
218
219 return -1;
220 }
221