1 /* 2 * Copyright (C) 2016 Etnaviv Project 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 * SOFTWARE. 22 * 23 * Authors: 24 * Christian Gmeiner <christian.gmeiner@gmail.com> 25 */ 26 27 #include "etnaviv_priv.h" 28 #include "etnaviv_drmif.h" 29 30 drm_private void bo_del(struct etna_bo *bo); 31 drm_private extern pthread_mutex_t table_lock; 32 33 static void add_bucket(struct etna_bo_cache *cache, int size) 34 { 35 unsigned i = cache->num_buckets; 36 37 assert(i < ARRAY_SIZE(cache->cache_bucket)); 38 39 list_inithead(&cache->cache_bucket[i].list); 40 cache->cache_bucket[i].size = size; 41 cache->num_buckets++; 42 } 43 44 drm_private void etna_bo_cache_init(struct etna_bo_cache *cache) 45 { 46 unsigned long size, cache_max_size = 64 * 1024 * 1024; 47 48 /* OK, so power of two buckets was too wasteful of memory. 49 * Give 3 other sizes between each power of two, to hopefully 50 * cover things accurately enough. (The alternative is 51 * probably to just go for exact matching of sizes, and assume 52 * that for things like composited window resize the tiled 53 * width/height alignment and rounding of sizes to pages will 54 * get us useful cache hit rates anyway) 55 */ 56 add_bucket(cache, 4096); 57 add_bucket(cache, 4096 * 2); 58 add_bucket(cache, 4096 * 3); 59 60 /* Initialize the linked lists for BO reuse cache. */ 61 for (size = 4 * 4096; size <= cache_max_size; size *= 2) { 62 add_bucket(cache, size); 63 add_bucket(cache, size + size * 1 / 4); 64 add_bucket(cache, size + size * 2 / 4); 65 add_bucket(cache, size + size * 3 / 4); 66 } 67 } 68 69 /* Frees older cached buffers. Called under table_lock */ 70 drm_private void etna_bo_cache_cleanup(struct etna_bo_cache *cache, time_t time) 71 { 72 unsigned i; 73 74 if (cache->time == time) 75 return; 76 77 for (i = 0; i < cache->num_buckets; i++) { 78 struct etna_bo_bucket *bucket = &cache->cache_bucket[i]; 79 struct etna_bo *bo; 80 81 while (!LIST_IS_EMPTY(&bucket->list)) { 82 bo = LIST_ENTRY(struct etna_bo, bucket->list.next, list); 83 84 /* keep things in cache for at least 1 second: */ 85 if (time && ((time - bo->free_time) <= 1)) 86 break; 87 88 list_del(&bo->list); 89 bo_del(bo); 90 } 91 } 92 93 cache->time = time; 94 } 95 96 static struct etna_bo_bucket *get_bucket(struct etna_bo_cache *cache, uint32_t size) 97 { 98 unsigned i; 99 100 /* hmm, this is what intel does, but I suppose we could calculate our 101 * way to the correct bucket size rather than looping.. 102 */ 103 for (i = 0; i < cache->num_buckets; i++) { 104 struct etna_bo_bucket *bucket = &cache->cache_bucket[i]; 105 if (bucket->size >= size) { 106 return bucket; 107 } 108 } 109 110 return NULL; 111 } 112 113 static int is_idle(struct etna_bo *bo) 114 { 115 return etna_bo_cpu_prep(bo, 116 DRM_ETNA_PREP_READ | 117 DRM_ETNA_PREP_WRITE | 118 DRM_ETNA_PREP_NOSYNC) == 0; 119 } 120 121 static struct etna_bo *find_in_bucket(struct etna_bo_bucket *bucket, uint32_t flags) 122 { 123 struct etna_bo *bo = NULL, *tmp; 124 125 pthread_mutex_lock(&table_lock); 126 127 if (LIST_IS_EMPTY(&bucket->list)) 128 goto out_unlock; 129 130 LIST_FOR_EACH_ENTRY_SAFE(bo, tmp, &bucket->list, list) { 131 /* skip BOs with different flags */ 132 if (bo->flags != flags) 133 continue; 134 135 /* check if the first BO with matching flags is idle */ 136 if (is_idle(bo)) { 137 list_delinit(&bo->list); 138 goto out_unlock; 139 } 140 141 /* If the oldest BO is still busy, don't try younger ones */ 142 break; 143 } 144 145 /* There was no matching buffer found */ 146 bo = NULL; 147 148 out_unlock: 149 pthread_mutex_unlock(&table_lock); 150 151 return bo; 152 } 153 154 /* allocate a new (un-tiled) buffer object 155 * 156 * NOTE: size is potentially rounded up to bucket size 157 */ 158 drm_private struct etna_bo *etna_bo_cache_alloc(struct etna_bo_cache *cache, uint32_t *size, 159 uint32_t flags) 160 { 161 struct etna_bo *bo; 162 struct etna_bo_bucket *bucket; 163 164 *size = ALIGN(*size, 4096); 165 bucket = get_bucket(cache, *size); 166 167 /* see if we can be green and recycle: */ 168 if (bucket) { 169 *size = bucket->size; 170 bo = find_in_bucket(bucket, flags); 171 if (bo) { 172 atomic_set(&bo->refcnt, 1); 173 etna_device_ref(bo->dev); 174 return bo; 175 } 176 } 177 178 return NULL; 179 } 180 181 drm_private int etna_bo_cache_free(struct etna_bo_cache *cache, struct etna_bo *bo) 182 { 183 struct etna_bo_bucket *bucket = get_bucket(cache, bo->size); 184 185 /* see if we can be green and recycle: */ 186 if (bucket) { 187 struct timespec time; 188 189 clock_gettime(CLOCK_MONOTONIC, &time); 190 191 bo->free_time = time.tv_sec; 192 list_addtail(&bo->list, &bucket->list); 193 etna_bo_cache_cleanup(cache, time.tv_sec); 194 195 /* bo's in the bucket cache don't have a ref and 196 * don't hold a ref to the dev: 197 */ 198 etna_device_del_locked(bo->dev); 199 200 return 0; 201 } 202 203 return -1; 204 } 205