Lines Matching +full:lru +full:- +full:cache
4 * SPDX-License-Identifier: MIT
26 return (bucket_index - MIN_BO_CACHE_BUCKET); in agx_bucket_index()
32 return &dev->bo_cache.buckets[agx_bucket_index(size)]; in agx_bucket()
38 simple_mtx_assert_locked(&dev->bo_cache.lock); in agx_bo_cache_remove_locked()
39 list_del(&bo->bucket_link); in agx_bo_cache_remove_locked()
40 list_del(&bo->lru_link); in agx_bo_cache_remove_locked()
41 dev->bo_cache.size -= bo->size; in agx_bo_cache_remove_locked()
45 * BO cache. If it succeeds, it returns that BO and removes the BO from the
46 * cache. If it fails, it returns NULL signaling the caller to allocate a new
53 simple_mtx_lock(&dev->bo_cache.lock); in agx_bo_cache_fetch()
59 if (entry->size < size || entry->flags != flags) in agx_bo_cache_fetch()
63 if (entry->size > 2 * size) in agx_bo_cache_fetch()
66 if (align > entry->align) in agx_bo_cache_fetch()
74 simple_mtx_unlock(&dev->bo_cache.lock); in agx_bo_cache_fetch()
85 list_for_each_entry_safe(struct agx_bo, entry, &dev->bo_cache.lru, in agx_bo_cache_evict_stale_bos()
89 * here to account for the fact that we're only testing ->tv_sec, not in agx_bo_cache_evict_stale_bos()
90 * ->tv_nsec. That means we might keep entries that are between 1 and 2 in agx_bo_cache_evict_stale_bos()
94 if (time.tv_sec - entry->last_used <= 2) in agx_bo_cache_evict_stale_bos()
105 struct list_head *bucket = agx_bucket(dev, bo->size); in agx_bo_cache_put_locked()
109 list_addtail(&bo->bucket_link, bucket); in agx_bo_cache_put_locked()
111 /* Add us to the LRU list and update the last_used field. */ in agx_bo_cache_put_locked()
112 list_addtail(&bo->lru_link, &dev->bo_cache.lru); in agx_bo_cache_put_locked()
114 bo->last_used = time.tv_sec; in agx_bo_cache_put_locked()
117 dev->bo_cache.size += bo->size; in agx_bo_cache_put_locked()
120 printf("BO cache: %zu KiB (+%zu KiB from %s, hit/miss %" PRIu64 in agx_bo_cache_put_locked()
122 DIV_ROUND_UP(dev->bo_cache.size, 1024), in agx_bo_cache_put_locked()
123 DIV_ROUND_UP(bo->size, 1024), bo->label, in agx_bo_cache_put_locked()
124 p_atomic_read(&dev->bo_cache.hits), in agx_bo_cache_put_locked()
125 p_atomic_read(&dev->bo_cache.misses)); in agx_bo_cache_put_locked()
129 bo->label = "Unused (BO cache)"; in agx_bo_cache_put_locked()
131 /* Let's do some cleanup in the BO cache while we hold the lock. */ in agx_bo_cache_put_locked()
135 /* Tries to add a BO to the cache. Returns if it was successful */
139 if (bo->flags & AGX_BO_SHARED) { in agx_bo_cache_put()
142 simple_mtx_lock(&dev->bo_cache.lock); in agx_bo_cache_put()
144 simple_mtx_unlock(&dev->bo_cache.lock); in agx_bo_cache_put()
153 simple_mtx_lock(&dev->bo_cache.lock); in agx_bo_cache_evict_all()
154 for (unsigned i = 0; i < ARRAY_SIZE(dev->bo_cache.buckets); ++i) { in agx_bo_cache_evict_all()
155 struct list_head *bucket = &dev->bo_cache.buckets[i]; in agx_bo_cache_evict_all()
162 simple_mtx_unlock(&dev->bo_cache.lock); in agx_bo_cache_evict_all()
169 ASSERTED int count = p_atomic_inc_return(&bo->refcnt); in agx_bo_reference()
184 stat->count++; in account_bo()
185 stat->alloc_B += bo->size; in account_bo()
187 if (bo->_map != NULL) in account_bo()
188 stat->mapped_B += bo->size; in account_bo()
211 fprintf(fp, "%s%s%s: ", BOLD, stat->label, RESET); in print_stat()
212 print_size(fp, stat->alloc_B); in print_stat()
214 if (stat->mapped_B) { in print_stat()
216 print_size(fp, stat->mapped_B); in print_stat()
219 fprintf(fp, ", %u BOs\n", stat->count); in print_stat()
228 size_t a = (*label_a)->alloc_B; in compare_size()
229 size_t b = (*label_b)->alloc_B; in compare_size()
231 return (a > b) ? 1 : (a < b) ? -1 : 0; in compare_size()
239 bool verbose = dev->debug & AGX_DBG_BODUMPVERBOSE; in agx_bo_dump_all()
242 fprintf(stderr, "---\n"); in agx_bo_dump_all()
244 for (uint32_t handle = 0; handle < dev->max_handle; handle++) { in agx_bo_dump_all()
246 if (!bo->size) in agx_bo_dump_all()
250 fprintf(stderr, "%u: %s %zu KiB\n", handle, bo->label, in agx_bo_dump_all()
251 bo->size / 1024); in agx_bo_dump_all()
256 assert(bo->label != NULL && "Everything must be labeled"); in agx_bo_dump_all()
258 struct hash_entry *ent = _mesa_hash_table_search(totals, bo->label); in agx_bo_dump_all()
261 ls = ent->data; in agx_bo_dump_all()
264 ls->label = bo->label; in agx_bo_dump_all()
265 _mesa_hash_table_insert(totals, bo->label, ls); in agx_bo_dump_all()
282 stats[i++] = ent->data; in agx_bo_dump_all()
297 fprintf(stderr, "---\n\n"); in agx_bo_dump_all()
307 if (likely(!(dev->debug & (AGX_DBG_BODUMP | AGX_DBG_BODUMPVERBOSE)))) in agx_bo_dump_all_periodic()
326 /* Don't return to cache if there are still references */ in agx_bo_unreference()
327 if (p_atomic_dec_return(&bo->refcnt)) in agx_bo_unreference()
330 pthread_mutex_lock(&dev->bo_map_lock); in agx_bo_unreference()
335 if (p_atomic_read(&bo->refcnt) == 0) { in agx_bo_unreference()
336 assert(!p_atomic_read_relaxed(&bo->writer)); in agx_bo_unreference()
338 if (dev->debug & AGX_DBG_TRACE) in agx_bo_unreference()
339 agxdecode_track_free(dev->agxdecode, bo); in agx_bo_unreference()
347 pthread_mutex_unlock(&dev->bo_map_lock); in agx_bo_unreference()
358 size = ALIGN_POT(size, (size_t)dev->params.vm_page_size); in agx_bo_create()
359 align = MAX2(align, dev->params.vm_page_size); in agx_bo_create()
361 /* See if we have a BO already in the cache */ in agx_bo_create()
366 p_atomic_inc(&dev->bo_cache.hits); in agx_bo_create()
368 p_atomic_inc(&dev->bo_cache.misses); in agx_bo_create()
371 * for something in the cache. But if there's no nothing suitable, we should in agx_bo_create()
372 * flush the cache to make space for the new allocation. in agx_bo_create()
375 bo = dev->ops.bo_alloc(dev, size, align, flags); in agx_bo_create()
380 bo = dev->ops.bo_alloc(dev, size, align, flags); in agx_bo_create()
388 bo->label = label; in agx_bo_create()
389 p_atomic_set(&bo->refcnt, 1); in agx_bo_create()
391 if (dev->debug & AGX_DBG_TRACE) { in agx_bo_create()
393 agxdecode_track_alloc(dev->agxdecode, bo); in agx_bo_create()