• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * libwebsockets - small server side websockets and web server implementation
3  *
4  * Copyright (C) 2010 - 2021 Andy Green <andy@warmcat.com>
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22  * IN THE SOFTWARE.
23  */
24 
25 #include <private-lib-core.h>
26 #include "private-lib-misc-cache-ttl.h"
27 
28 #include <assert.h>
29 
30 #if defined(write)
31 #undef write
32 #endif
33 
34 void
lws_cache_clear_matches(lws_dll2_owner_t * results_owner)35 lws_cache_clear_matches(lws_dll2_owner_t *results_owner)
36 {
37 	lws_start_foreach_dll_safe(struct lws_dll2 *, d, d1, results_owner->head) {
38 		lws_cache_match_t *item = lws_container_of(d, lws_cache_match_t,
39 							   list);
40 		lws_dll2_remove(d);
41 		lws_free(item);
42 	} lws_end_foreach_dll_safe(d, d1);
43 }
44 
45 void
lws_cache_schedule(struct lws_cache_ttl_lru * cache,sul_cb_t cb,lws_usec_t e)46 lws_cache_schedule(struct lws_cache_ttl_lru *cache, sul_cb_t cb, lws_usec_t e)
47 {
48 	lwsl_cache("%s: %s schedule %llu\n", __func__, cache->info.name,
49 			(unsigned long long)e);
50 
51 	lws_sul_schedule(cache->info.cx, cache->info.tsi, &cache->sul, cb,
52 			 e - lws_now_usecs());
53 }
54 
55 int
lws_cache_write_through(struct lws_cache_ttl_lru * cache,const char * specific_key,const uint8_t * source,size_t size,lws_usec_t expiry,void ** ppay)56 lws_cache_write_through(struct lws_cache_ttl_lru *cache,
57 			const char *specific_key, const uint8_t *source,
58 			size_t size, lws_usec_t expiry, void **ppay)
59 {
60 	struct lws_cache_ttl_lru *levels[LWS_CACHE_MAX_LEVELS], *c = cache;
61 	int n = 0, r = 0;
62 
63 	lws_cache_item_remove(cache, specific_key);
64 
65 	/* starting from L1 */
66 
67 	do {
68 		levels[n++] = c;
69 		c = c->info.parent;
70 	} while (c && n < (int)LWS_ARRAY_SIZE(levels));
71 
72 	/* starting from outermost cache level */
73 
74 	while (n) {
75 		n--;
76 		r = levels[n]->info.ops->write(levels[n], specific_key,
77 						source, size, expiry, ppay);
78 	}
79 
80 	return r;
81 }
82 
83 /*
84  * We want to make a list of unique keys that exist at any cache level
85  * matching a wildcard search key.
86  *
87  * If L1 has a cached version though, we will just use that.
88  */
89 
90 int
lws_cache_lookup(struct lws_cache_ttl_lru * cache,const char * wildcard_key,const void ** pdata,size_t * psize)91 lws_cache_lookup(struct lws_cache_ttl_lru *cache, const char *wildcard_key,
92 		 const void **pdata, size_t *psize)
93 {
94 	struct lws_cache_ttl_lru *l1 = cache;
95 	lws_dll2_owner_t results_owner;
96 	lws_usec_t expiry = 0;
97 	char meta_key[128];
98 	uint8_t *p, *temp;
99 	size_t sum = 0;
100 	int n;
101 
102 	memset(&results_owner, 0, sizeof(results_owner));
103 	meta_key[0] = META_ITEM_LEADING;
104 	lws_strncpy(&meta_key[1], wildcard_key, sizeof(meta_key) - 2);
105 
106 	/*
107 	 * If we have a cached result set in L1 already, return that
108 	 */
109 
110 	if (!l1->info.ops->get(l1, meta_key, pdata, psize))
111 		return 0;
112 
113 	/*
114 	 * No, we have to do the actual lookup work in the backing store layer
115 	 * to get results for this...
116 	 */
117 
118 	while (cache->info.parent)
119 		cache = cache->info.parent;
120 
121 	if (cache->info.ops->lookup(cache, wildcard_key, &results_owner)) {
122 		/* eg, OOM */
123 
124 		lwsl_cache("%s: bs lookup fail\n", __func__);
125 
126 		lws_cache_clear_matches(&results_owner);
127 		return 1;
128 	}
129 
130 	/*
131 	 * Scan the results, we want to know how big a payload it needs in
132 	 * the cache, and we want to know the earliest expiry of any of the
133 	 * component parts, so the meta cache entry for these results can be
134 	 * expired when any of the results would expire.
135 	 */
136 
137 	lws_start_foreach_dll(struct lws_dll2 *, d, results_owner.head) {
138 		lws_cache_match_t *m = lws_container_of(d, lws_cache_match_t,
139 							list);
140 		sum += 8; /* payload size, name length */
141 		sum += m->tag_size + 1;
142 
143 		if (m->expiry && (!expiry || expiry < m->expiry))
144 			expiry = m->expiry;
145 
146 	} lws_end_foreach_dll(d);
147 
148 	lwsl_cache("%s: results %d, size %d\n", __func__,
149 		    (int)results_owner.count, (int)sum);
150 
151 	temp = lws_malloc(sum, __func__);
152 	if (!temp) {
153 		lws_cache_clear_matches(&results_owner);
154 		return 1;
155 	}
156 
157 	/*
158 	 * Fill temp with the serialized results
159 	 */
160 
161 	p = temp;
162 	lws_start_foreach_dll(struct lws_dll2 *, d, results_owner.head) {
163 		lws_cache_match_t *m = lws_container_of(d, lws_cache_match_t,
164 							list);
165 
166 		/* we don't copy the payload in, but take note of its size */
167 		lws_ser_wu32be(p, (uint32_t)m->payload_size);
168 		p += 4;
169 		/* length of the tag name (there is an uncounted NUL after) */
170 		lws_ser_wu32be(p, (uint32_t)m->tag_size);
171 		p += 4;
172 
173 		/* then the tag name, plus the extra NUL */
174 		memcpy(p, &m[1], m->tag_size + 1);
175 		p += m->tag_size + 1;
176 
177 	} lws_end_foreach_dll(d);
178 
179 	lws_cache_clear_matches(&results_owner);
180 
181 	/*
182 	 * Create the right amount of space for an L1 record of these results,
183 	 * with its expiry set to the earliest of the results, and copy it in
184 	 * from temp
185 	 */
186 
187 	n = l1->info.ops->write(l1, meta_key, temp, sum, expiry, (void **)&p);
188 	/* done with temp */
189 	lws_free(temp);
190 
191 	if (n)
192 		return 1;
193 
194 	/* point to the results in L1 */
195 
196 	*pdata = p;
197 	*psize = sum;
198 
199 	return 0;
200 }
201 
202 int
lws_cache_item_get(struct lws_cache_ttl_lru * cache,const char * specific_key,const void ** pdata,size_t * psize)203 lws_cache_item_get(struct lws_cache_ttl_lru *cache, const char *specific_key,
204 		   const void **pdata, size_t *psize)
205 {
206 	while (cache) {
207 		if (!cache->info.ops->get(cache, specific_key, pdata, psize)) {
208 			lwsl_cache("%s: hit\n", __func__);
209 			return 0;
210 		}
211 
212 		cache = cache->info.parent;
213 	}
214 
215 	return 1;
216 }
217 
218 int
lws_cache_expunge(struct lws_cache_ttl_lru * cache)219 lws_cache_expunge(struct lws_cache_ttl_lru *cache)
220 {
221 	int ret = 0;
222 
223 	while (cache) {
224 		ret |= cache->info.ops->expunge(cache);
225 
226 		cache = cache->info.parent;
227 	}
228 
229 	return ret;
230 }
231 
232 int
lws_cache_item_remove(struct lws_cache_ttl_lru * cache,const char * wildcard_key)233 lws_cache_item_remove(struct lws_cache_ttl_lru *cache, const char *wildcard_key)
234 {
235 	while (cache) {
236 		if (cache->info.ops->invalidate(cache, wildcard_key))
237 			return 1;
238 
239 		cache = cache->info.parent;
240 	}
241 
242 	return 0;
243 }
244 
245 uint64_t
lws_cache_footprint(struct lws_cache_ttl_lru * cache)246 lws_cache_footprint(struct lws_cache_ttl_lru *cache)
247 {
248 	return cache->current_footprint;
249 }
250 
251 void
lws_cache_debug_dump(struct lws_cache_ttl_lru * cache)252 lws_cache_debug_dump(struct lws_cache_ttl_lru *cache)
253 {
254 #if defined(_DEBUG)
255 	if (cache->info.ops->debug_dump)
256 		cache->info.ops->debug_dump(cache);
257 #endif
258 }
259 
260 int
lws_cache_results_walk(lws_cache_results_t * walk_ctx)261 lws_cache_results_walk(lws_cache_results_t *walk_ctx)
262 {
263 	if (!walk_ctx->size)
264 		return 1;
265 
266 	walk_ctx->payload_len = lws_ser_ru32be(walk_ctx->ptr);
267 	walk_ctx->tag_len = lws_ser_ru32be(walk_ctx->ptr + 4);
268 	walk_ctx->tag = walk_ctx->ptr + 8;
269 
270 	walk_ctx->ptr += walk_ctx->tag_len + 1 + 8;
271 	walk_ctx->size -= walk_ctx->tag_len + 1 + 8;
272 
273 	return 0;
274 }
275 
276 struct lws_cache_ttl_lru *
lws_cache_create(const struct lws_cache_creation_info * info)277 lws_cache_create(const struct lws_cache_creation_info *info)
278 {
279 	assert(info);
280 	assert(info->ops);
281 	assert(info->name);
282 	assert(info->ops->create);
283 
284 	return info->ops->create(info);
285 }
286 
287 void
lws_cache_destroy(struct lws_cache_ttl_lru ** _cache)288 lws_cache_destroy(struct lws_cache_ttl_lru **_cache)
289 {
290 	lws_cache_ttl_lru_t *cache = *_cache;
291 
292 	if (!cache)
293 		return;
294 
295 	assert(cache->info.ops->destroy);
296 
297 	lws_sul_cancel(&cache->sul);
298 
299 	cache->info.ops->destroy(_cache);
300 }
301