• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**************************************************************************
2  *
3  * Copyright 2008 VMware, Inc.
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21  * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 /**
29  * @file
30  * Improved cache implementation.
31  *
32  * Fixed size array with linear probing on collision and LRU eviction
33  * on full.
34  *
35  * @author Jose Fonseca <jfonseca@vmware.com>
36  */
37 
38 
39 #include "pipe/p_compiler.h"
40 #include "util/u_debug.h"
41 
42 #include "util/u_math.h"
43 #include "util/u_memory.h"
44 #include "util/u_cache.h"
45 #include "util/list.h"
46 
47 struct util_cache_entry
48 {
49    enum { EMPTY = 0, FILLED, DELETED } state;
50    uint32_t hash;
51 
52    struct list_head list;
53 
54    void *key;
55    void *value;
56 
57 #ifdef DEBUG
58    unsigned count;
59 #endif
60 };
61 
62 
63 struct util_cache
64 {
65    /** Hash function */
66    uint32_t (*hash)(const void *key);
67 
68    /** Compare two keys */
69    int (*compare)(const void *key1, const void *key2);
70 
71    /** Destroy a (key, value) pair */
72    void (*destroy)(void *key, void *value);
73 
74    /** Max entries in the cache */
75    uint32_t size;
76 
77    /** Array [size] of entries */
78    struct util_cache_entry *entries;
79 
80    /** Number of entries in the cache */
81    unsigned count;
82 
83    /** Head of list, sorted from Least-recently used to Most-recently used */
84    struct util_cache_entry lru;
85 };
86 
87 
88 static void
89 ensure_sanity(const struct util_cache *cache);
90 
91 #define CACHE_DEFAULT_ALPHA 2
92 
93 /**
94  * Create a new cache with 'size' entries.  Also provide functions for
95  * hashing keys, comparing keys and destroying (key,value) pairs.
96  */
97 struct util_cache *
util_cache_create(uint32_t (* hash)(const void * key),int (* compare)(const void * key1,const void * key2),void (* destroy)(void * key,void * value),uint32_t size)98 util_cache_create(uint32_t (*hash)(const void *key),
99                   int (*compare)(const void *key1, const void *key2),
100                   void (*destroy)(void *key, void *value),
101                   uint32_t size)
102 {
103    struct util_cache *cache;
104 
105    cache = CALLOC_STRUCT(util_cache);
106    if (!cache)
107       return NULL;
108 
109    cache->hash = hash;
110    cache->compare = compare;
111    cache->destroy = destroy;
112 
113    list_inithead(&cache->lru.list);
114 
115    size *= CACHE_DEFAULT_ALPHA;
116    cache->size = size;
117 
118    cache->entries = CALLOC(size, sizeof(struct util_cache_entry));
119    if (!cache->entries) {
120       FREE(cache);
121       return NULL;
122    }
123 
124    ensure_sanity(cache);
125    return cache;
126 }
127 
128 
129 /**
130  * Try to find a cache entry, given the key and hash of the key.
131  */
132 static struct util_cache_entry *
util_cache_entry_get(struct util_cache * cache,uint32_t hash,const void * key)133 util_cache_entry_get(struct util_cache *cache,
134                      uint32_t hash,
135                      const void *key)
136 {
137    struct util_cache_entry *first_unfilled = NULL;
138    uint32_t index = hash % cache->size;
139    uint32_t probe;
140 
141    /* Probe until we find either a matching FILLED entry or an EMPTY
142     * slot (which has never been occupied).
143     *
144     * Deleted or non-matching slots are not indicative of completion
145     * as a previous linear probe for the same key could have continued
146     * past this point.
147     */
148    for (probe = 0; probe < cache->size; probe++) {
149       uint32_t i = (index + probe) % cache->size;
150       struct util_cache_entry *current = &cache->entries[i];
151 
152       if (current->state == FILLED) {
153          if (current->hash == hash &&
154              cache->compare(key, current->key) == 0)
155             return current;
156       }
157       else {
158          if (!first_unfilled)
159             first_unfilled = current;
160 
161          if (current->state == EMPTY)
162             return first_unfilled;
163       }
164    }
165 
166    return NULL;
167 }
168 
169 static inline void
util_cache_entry_destroy(struct util_cache * cache,struct util_cache_entry * entry)170 util_cache_entry_destroy(struct util_cache *cache,
171                          struct util_cache_entry *entry)
172 {
173    void *key = entry->key;
174    void *value = entry->value;
175 
176    entry->key = NULL;
177    entry->value = NULL;
178 
179    if (entry->state == FILLED) {
180       list_del(&entry->list);
181       cache->count--;
182 
183       if (cache->destroy)
184          cache->destroy(key, value);
185 
186       entry->state = DELETED;
187    }
188 }
189 
190 
191 /**
192  * Insert an entry in the cache, given the key and value.
193  */
194 void
util_cache_set(struct util_cache * cache,void * key,void * value)195 util_cache_set(struct util_cache *cache,
196                void *key,
197                void *value)
198 {
199    struct util_cache_entry *entry;
200    uint32_t hash;
201 
202    assert(cache);
203    if (!cache)
204       return;
205    hash = cache->hash(key);
206    entry = util_cache_entry_get(cache, hash, key);
207    if (!entry)
208       entry = list_last_entry(&cache->lru.list, struct util_cache_entry, list);
209 
210    if (cache->count >= cache->size / CACHE_DEFAULT_ALPHA)
211       util_cache_entry_destroy(cache, list_last_entry(&cache->lru.list, struct util_cache_entry, list));
212 
213    util_cache_entry_destroy(cache, entry);
214 
215 #ifdef DEBUG
216    ++entry->count;
217 #endif
218 
219    entry->key = key;
220    entry->hash = hash;
221    entry->value = value;
222    entry->state = FILLED;
223    list_add(&cache->lru.list, &entry->list);
224    cache->count++;
225 
226    ensure_sanity(cache);
227 }
228 
229 
230 /**
231  * Search the cache for an entry with the given key.  Return the corresponding
232  * value or NULL if not found.
233  */
234 void *
util_cache_get(struct util_cache * cache,const void * key)235 util_cache_get(struct util_cache *cache,
236                const void *key)
237 {
238    struct util_cache_entry *entry;
239    uint32_t hash;
240 
241    assert(cache);
242    if (!cache)
243       return NULL;
244    hash = cache->hash(key);
245    entry = util_cache_entry_get(cache, hash, key);
246    if (!entry)
247       return NULL;
248 
249    if (entry->state == FILLED) {
250       list_move_to(&cache->lru.list, &entry->list);
251    }
252 
253    return entry->value;
254 }
255 
256 
257 /**
258  * Remove all entries from the cache.  The 'destroy' function will be called
259  * for each entry's (key, value).
260  */
261 void
util_cache_clear(struct util_cache * cache)262 util_cache_clear(struct util_cache *cache)
263 {
264    uint32_t i;
265 
266    assert(cache);
267    if (!cache)
268       return;
269 
270    for (i = 0; i < cache->size; ++i) {
271       util_cache_entry_destroy(cache, &cache->entries[i]);
272       cache->entries[i].state = EMPTY;
273    }
274 
275    assert(cache->count == 0);
276    assert(list_is_empty(&cache->lru.list));
277    ensure_sanity(cache);
278 }
279 
280 
281 /**
282  * Destroy the cache and all entries.
283  */
284 void
util_cache_destroy(struct util_cache * cache)285 util_cache_destroy(struct util_cache *cache)
286 {
287    assert(cache);
288    if (!cache)
289       return;
290 
291 #ifdef DEBUG
292    if (cache->count >= 20*cache->size) {
293       /* Normal approximation of the Poisson distribution */
294       double mean = (double)cache->count/(double)cache->size;
295       double stddev = sqrt(mean);
296       unsigned i;
297       for (i = 0; i < cache->size; ++i) {
298          double z = fabs(cache->entries[i].count - mean)/stddev;
299          /* This assert should not fail 99.9999998027% of the times, unless
300           * the hash function is a poor one */
301          assert(z <= 6.0);
302       }
303    }
304 #endif
305 
306    util_cache_clear(cache);
307 
308    FREE(cache->entries);
309    FREE(cache);
310 }
311 
312 
313 /**
314  * Remove the cache entry which matches the given key.
315  */
316 void
util_cache_remove(struct util_cache * cache,const void * key)317 util_cache_remove(struct util_cache *cache,
318                   const void *key)
319 {
320    struct util_cache_entry *entry;
321    uint32_t hash;
322 
323    assert(cache);
324    if (!cache)
325       return;
326 
327    hash = cache->hash(key);
328 
329    entry = util_cache_entry_get(cache, hash, key);
330    if (!entry)
331       return;
332 
333    if (entry->state == FILLED)
334       util_cache_entry_destroy(cache, entry);
335 
336    ensure_sanity(cache);
337 }
338 
339 
340 static void
ensure_sanity(const struct util_cache * cache)341 ensure_sanity(const struct util_cache *cache)
342 {
343 #ifdef DEBUG
344    unsigned i, cnt = 0;
345 
346    assert(cache);
347    for (i = 0; i < cache->size; i++) {
348       struct util_cache_entry *header = &cache->entries[i];
349 
350       assert(header);
351       assert(header->state == FILLED ||
352              header->state == EMPTY ||
353              header->state == DELETED);
354       if (header->state == FILLED) {
355          cnt++;
356          assert(header->hash == cache->hash(header->key));
357       }
358    }
359 
360    assert(cnt == cache->count);
361    assert(cache->size >= cnt);
362 
363    if (cache->count == 0) {
364       assert (list_is_empty(&cache->lru.list));
365    }
366    else {
367       struct util_cache_entry *header =
368          list_entry(&cache->lru, struct util_cache_entry, list);
369 
370       assert (header);
371       assert (!list_is_empty(&cache->lru.list));
372 
373       for (i = 0; i < cache->count; i++)
374          header = list_entry(&header, struct util_cache_entry, list);
375 
376       assert(header == &cache->lru);
377    }
378 #endif
379 
380    (void)cache;
381 }
382