• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2022 Google LLC
2 //
3 // This source code is licensed under the BSD-style license found in the
4 // LICENSE file in the root directory of this source tree.
5 
6 #include <xnnpack/cache.h>
7 
8 #include <algorithm> // For std::rotate.
9 #include <cstdint>   // For uintptr_t.
10 #include <cstdint>   // For uintptr_t.
11 #include <cstring>   // For memcpy.
12 #include <cstring>   // For memcpy.
13 #include <thread>   // For memcpy.
14 
15 #include <xnnpack.h>
16 #include <xnnpack/common.h>
17 
18 #include <gtest/gtest.h>
19 
cache_end(const xnn_weights_cache * cache)20 static void* cache_end(const xnn_weights_cache* cache) {
21   return reinterpret_cast<void *>(reinterpret_cast<uintptr_t>(cache->cache.weights.start) + cache->cache.weights.size);
22 }
23 
write_weights(xnn_weights_cache * cache,const std::string & str)24 static void write_weights(xnn_weights_cache* cache, const std::string& str) {
25   ASSERT_NE(nullptr, xnn_reserve_space_in_weights_cache(cache, str.length()));
26   std::memcpy(cache_end(cache), str.data(), str.length());
27 };
28 
TEST(WEIGHTS_CACHE,init_and_release)29 TEST(WEIGHTS_CACHE, init_and_release)
30 {
31   ASSERT_EQ(xnn_status_success, xnn_initialize(/*allocator=*/nullptr));
32   struct xnn_weights_cache cache;
33   EXPECT_EQ(xnn_status_success, xnn_init_weights_cache(&cache));
34   EXPECT_EQ(xnn_status_success, xnn_release_weights_cache(&cache));
35 }
36 
TEST(WEIGHTS_CACHE,init_with_size_and_release)37 TEST(WEIGHTS_CACHE, init_with_size_and_release)
38 {
39   constexpr size_t four_mb = 4194304;
40   ASSERT_EQ(xnn_status_success, xnn_initialize(/*allocator=*/nullptr));
41   struct xnn_weights_cache cache;
42   EXPECT_EQ(xnn_status_success, xnn_init_weights_cache_with_size(&cache, four_mb));
43   // Allocation can be rounded up to alignment, so check GE instead of EQ.
44   ASSERT_GE(cache.cache.weights.capacity, four_mb);
45   EXPECT_EQ(xnn_status_success, xnn_release_weights_cache(&cache));
46 }
47 
TEST(WEIGHTS_CACHE,release_null)48 TEST(WEIGHTS_CACHE, release_null)
49 {
50   EXPECT_EQ(xnn_status_success, xnn_release_weights_cache(nullptr));
51 }
52 
TEST(WEIGHTS_CACHE,get_or_insert)53 TEST(WEIGHTS_CACHE, get_or_insert)
54 {
55   ASSERT_EQ(xnn_status_success, xnn_initialize(/*allocator=*/nullptr));
56   struct xnn_weights_cache cache;
57   EXPECT_EQ(xnn_status_success, xnn_init_weights_cache(&cache));
58 
59   write_weights(&cache, "1234");
60   ASSERT_EQ(0, xnn_get_or_insert_weights_cache(&cache, cache.cache.weights.start, 4));
61   ASSERT_EQ(0, cache.cache.hits);
62   ASSERT_EQ(1, cache.cache.misses);
63   ASSERT_EQ(4, cache.cache.weights.size);
64 
65   void* span2_weights = cache_end(&cache);
66   // Simulate a cache hit.
67   write_weights(&cache, "1234");
68   ASSERT_EQ(0, xnn_get_or_insert_weights_cache(&cache, span2_weights, 4));
69   ASSERT_EQ(1, cache.cache.hits);
70   ASSERT_EQ(1, cache.cache.misses);
71   ASSERT_EQ(4, cache.cache.weights.size);
72 
73   void* span3_weights = cache_end(&cache);
74   // Simulate a cache miss.
75   write_weights(&cache, "5678");
76   ASSERT_EQ(4, xnn_get_or_insert_weights_cache(&cache, span3_weights, 4));
77   ASSERT_EQ(1, cache.cache.hits);
78   ASSERT_EQ(2, cache.cache.misses);
79   ASSERT_EQ(2, cache.cache.num_entries);
80   ASSERT_EQ(8, cache.cache.weights.size);
81 
82   EXPECT_EQ(xnn_status_success, xnn_release_weights_cache(&cache));
83 }
84 
TEST(WEIGHTS_CACHE,grow)85 TEST(WEIGHTS_CACHE, grow) {
86   ASSERT_EQ(xnn_status_success, xnn_initialize(/*allocator=*/nullptr));
87   xnn_weights_cache cache;
88   EXPECT_EQ(xnn_status_success, xnn_init_weights_cache(&cache));
89   size_t old_num_buckets = cache.cache.num_buckets;
90   for (size_t i = 0, expected_offset = 0; i < old_num_buckets; i++) {
91     // Add many entries to force cache to grow.
92     const std::string s = std::to_string(i);
93     write_weights(&cache, s);
94     ASSERT_EQ(expected_offset, xnn_get_or_insert_weights_cache(&cache, cache_end(&cache), s.length()));
95     expected_offset += s.length();
96   }
97 
98   ASSERT_EQ(0, cache.cache.hits);
99   ASSERT_EQ(old_num_buckets, cache.cache.num_entries);
100   // Check that cache has grown.
101   ASSERT_LT(old_num_buckets, cache.cache.num_buckets);
102   // Check that all the entries are still in cache.
103   for (size_t i = 0, expected_offset = 0; i < old_num_buckets; i++) {
104     const std::string s = std::to_string(i);
105     write_weights(&cache, s);
106     ASSERT_EQ(expected_offset, xnn_get_or_insert_weights_cache(&cache, cache_end(&cache), s.length()));
107     expected_offset += s.length();
108   }
109   // And now all of the lookups should be cache hits.
110   ASSERT_EQ(old_num_buckets, cache.cache.hits);
111 
112   EXPECT_EQ(xnn_status_success, xnn_release_weights_cache(&cache));
113 }
114 
TEST(WEIGHTS_MEMORY,allocate_and_release)115 TEST(WEIGHTS_MEMORY, allocate_and_release) {
116   xnn_weights_buffer b;
117   ASSERT_EQ(xnn_status_success, xnn_allocate_weights_memory(&b, XNN_DEFAULT_WEIGHTS_BUFFER_SIZE));
118   ASSERT_EQ(xnn_status_success, xnn_release_weights_memory(&b));
119 }
120 
TEST(WEIGHTS_MEMORY,grow)121 TEST(WEIGHTS_MEMORY, grow) {
122   xnn_weights_buffer b;
123   ASSERT_EQ(xnn_status_success, xnn_allocate_weights_memory(&b, 8));
124   // Allocations rounded to page size, so it might not be 8.
125   size_t old_capacity = b.capacity;
126 
127   std::string junk = "1234";
128   std::memcpy(b.start, junk.data(), junk.length());
129   b.size += junk.length();
130   ASSERT_EQ(b.size, 4);
131   const uintptr_t old_weights = reinterpret_cast<uintptr_t>(b.start);
132 
133   // This should be a no-op, since we have enough space.
134   ASSERT_EQ(xnn_status_success, xnn_reserve_weights_memory(&b, 4));
135   ASSERT_EQ(old_weights, reinterpret_cast<uintptr_t>(b.start));
136 
137   // Simulate copying bytes until we are full.
138   b.size += (old_capacity - b.size);
139 
140   const size_t old_size = b.size;
141   ASSERT_EQ(xnn_status_success, xnn_reserve_weights_memory(&b, 4));
142 
143   // After growing, the new capacity should be bigger than the old one.
144   ASSERT_LT(old_capacity, b.capacity);
145   // At least 4 bytes free.
146   ASSERT_GE(b.capacity, b.size + 4);
147   // But size stays the same.
148   ASSERT_EQ(old_size, b.size);
149 
150   // Check that after growing, the contents remain.
151   std::string actual = std::string(static_cast<char*>(b.start), static_cast<char*>(b.start) + junk.length());
152   ASSERT_EQ(junk, actual);
153 
154   ASSERT_EQ(xnn_status_success, xnn_release_weights_memory(&b));
155 }
156 
TEST(WEIGHTS_CACHE,finalize_empty)157 TEST(WEIGHTS_CACHE, finalize_empty) {
158   xnn_weights_buffer b;
159   const size_t initial_capacity = 1024 * 1024;  // 1MB.
160   ASSERT_EQ(xnn_status_success, xnn_allocate_weights_memory(&b, initial_capacity));
161 
162   ASSERT_EQ(0, b.size);
163   ASSERT_EQ(initial_capacity, b.capacity);
164 
165   ASSERT_EQ(xnn_status_success, xnn_finalize_weights_memory(&b));
166   ASSERT_EQ(0, b.size);
167   ASSERT_EQ(0, b.capacity);
168 
169   ASSERT_EQ(xnn_status_success, xnn_release_weights_memory(&b));
170 }
171 
TEST(WEIGHTS_CACHE,finalize)172 TEST(WEIGHTS_CACHE, finalize) {
173   xnn_weights_buffer b;
174   const size_t initial_capacity = 1024 * 1024;  // 1MB.
175   ASSERT_EQ(xnn_status_success, xnn_allocate_weights_memory(&b, initial_capacity));
176   const size_t actual_capacity = b.capacity;
177 
178   const std::string junk = "1234";
179   std::memcpy(b.start, junk.data(), junk.length());
180   b.size += junk.length();
181   ASSERT_EQ(4, b.size);
182 
183   ASSERT_EQ(xnn_status_success, xnn_finalize_weights_memory(&b));
184   #if XNN_PLATFORM_WEB
185     // Web does not support partial unmapping.
186     ASSERT_EQ(actual_capacity, b.capacity);
187   #else
188     // The actual capacity depends on page size, since it is aligned, just check that it shrunk.
189     ASSERT_GE(actual_capacity, b.capacity);
190   #endif
191   ASSERT_EQ(4, b.size);
192 
193   ASSERT_EQ(xnn_status_success, xnn_release_weights_memory(&b));
194 }
195 
TEST(WEIGHTS_CACHE,finalize_twice)196 TEST(WEIGHTS_CACHE, finalize_twice) {
197   xnn_weights_buffer b;
198   ASSERT_EQ(xnn_status_success, xnn_allocate_weights_memory(&b, XNN_DEFAULT_WEIGHTS_BUFFER_SIZE));
199 
200   const std::string junk = "1234";
201   std::memcpy(b.start, junk.data(), junk.length());
202   b.size += junk.length();
203 
204   ASSERT_EQ(xnn_status_success, xnn_finalize_weights_memory(&b));
205   const size_t capacity = b.capacity;
206   // Finalizing twice does not error.
207   ASSERT_EQ(xnn_status_success, xnn_finalize_weights_memory(&b));
208   // Capacity does not change.
209   ASSERT_EQ(capacity, b.capacity);
210   ASSERT_EQ(4, b.size);
211 
212   ASSERT_EQ(xnn_status_success, xnn_release_weights_memory(&b));
213 }
214 
TEST(WEIGHTS_CACHE,finalize_capacity_smaller_than_page_aligned_size)215 TEST(WEIGHTS_CACHE, finalize_capacity_smaller_than_page_aligned_size) {
216   xnn_weights_buffer b;
217   // Small capacity that is smaller than page sizes on all platforms.
218   ASSERT_EQ(xnn_status_success, xnn_allocate_weights_memory(&b, 8));
219 
220   const std::string junk = "1234";
221   std::memcpy(b.start, junk.data(), junk.length());
222   b.size += junk.length();
223   ASSERT_EQ(xnn_status_success, xnn_finalize_weights_memory(&b));
224   ASSERT_EQ(4, b.size);
225   ASSERT_EQ(xnn_status_success, xnn_release_weights_memory(&b));
226 }
227 
TEST(WEIGHTS_CACHE,write_many_cache_hits)228 TEST(WEIGHTS_CACHE, write_many_cache_hits) {
229 #if XNN_PLATFORM_WEB && !defined(__EMSCRIPTEN_PTHREADS__)
230   GTEST_SKIP();
231 #endif
232   ASSERT_EQ(xnn_status_success, xnn_initialize(/*allocator=*/nullptr));
233   struct xnn_weights_cache cache;
234   EXPECT_EQ(xnn_status_success, xnn_init_weights_cache(&cache));
235   const std::string weights = "0123456789abcdefghij";
236   const size_t weights_size = weights.size();
237   auto write = [&] {
238     write_weights(&cache, weights);
239     xnn_get_or_insert_weights_cache(&cache, cache_end(&cache), weights_size);
240   };
241   constexpr size_t num_threads = 20;
242   std::vector<std::thread> threads;
243   threads.reserve(num_threads);
244 
245   for (size_t i = 0; i < num_threads; i++) {
246     threads.emplace_back(write);
247   }
248   for (size_t i = 0; i < num_threads; i++) {
249     threads[i].join();
250   }
251 
252   ASSERT_EQ(num_threads - 1, cache.cache.hits);
253   ASSERT_EQ(1, cache.cache.num_entries);
254   ASSERT_EQ(weights_size, cache.cache.weights.size);
255   EXPECT_EQ(xnn_status_success, xnn_release_weights_cache(&cache));
256 }
257 
TEST(WEIGHTS_CACHE,write_many_cache_misses)258 TEST(WEIGHTS_CACHE, write_many_cache_misses) {
259 #if XNN_PLATFORM_WEB && !defined(__EMSCRIPTEN_PTHREADS__)
260   GTEST_SKIP();
261 #endif
262   ASSERT_EQ(xnn_status_success, xnn_initialize(/*allocator=*/nullptr));
263   struct xnn_weights_cache cache;
264   EXPECT_EQ(xnn_status_success, xnn_init_weights_cache(&cache));
265   const std::string weights = "0123456789abcdefghij";
266   const size_t weights_size = weights.size();
267   auto write = [&](size_t i) {
268     std::string rotated_weights = weights;
269     std::rotate(rotated_weights.begin(), rotated_weights.begin() + i,
270                 rotated_weights.end());
271     write_weights(&cache, rotated_weights);
272     xnn_get_or_insert_weights_cache(&cache, cache_end(&cache), weights_size);
273   };
274   constexpr size_t num_threads = 20;
275   ASSERT_LE(num_threads, weights_size);
276   std::vector<std::thread> threads;
277   threads.reserve(num_threads);
278 
279   for (size_t i = 0; i < num_threads; i++) {
280     threads.emplace_back(write, i);
281   }
282   for (size_t i = 0; i < num_threads; i++) {
283     threads[i].join();
284   }
285 
286   ASSERT_EQ(0, cache.cache.hits);
287   ASSERT_EQ(num_threads, cache.cache.num_entries);
288   ASSERT_EQ(weights_size * num_threads, cache.cache.weights.size);
289   EXPECT_EQ(xnn_status_success, xnn_release_weights_cache(&cache));
290 }
291 
TEST(WEIGHTS_CACHE,operations_on_finalized_cache_hard)292 TEST(WEIGHTS_CACHE, operations_on_finalized_cache_hard) {
293   ASSERT_EQ(xnn_status_success, xnn_initialize(/*allocator=*/nullptr));
294   struct xnn_weights_cache cache;
295   ASSERT_EQ(xnn_status_success, xnn_init_weights_cache(&cache));
296 
297   ASSERT_EQ(xnn_status_success, xnn_finalize_weights_cache(&cache, xnn_weights_cache_finalization_kind_hard));
298   // Finalizing a finalized cache is an error.
299   ASSERT_NE(xnn_status_success, xnn_finalize_weights_cache(&cache, xnn_weights_cache_finalization_kind_hard));
300   // Trying to reserve is an error.
301   ASSERT_EQ(nullptr, xnn_reserve_space_in_weights_cache(&cache, 1));
302 
303   // We should not be able to insert into the weights cache, and also this shouldn't timeout by unlocking a mutex which
304   // has not been locked (since xnn_reserve_space_in_weights_cache above failed).
305   ASSERT_EQ(XNN_CACHE_NOT_FOUND, xnn_get_or_insert_weights_cache(&cache, cache.cache.weights.start, 4));
306 
307   ASSERT_EQ(xnn_status_success, xnn_release_weights_cache(&cache));
308 }
309 
TEST(WEIGHTS_CACHE,operations_on_finalized_cache_soft)310 TEST(WEIGHTS_CACHE, operations_on_finalized_cache_soft) {
311   ASSERT_EQ(xnn_status_success, xnn_initialize(/*allocator=*/nullptr));
312   struct xnn_weights_cache cache;
313   ASSERT_EQ(xnn_status_success, xnn_init_weights_cache(&cache));
314 
315   ASSERT_EQ(xnn_status_success, xnn_finalize_weights_cache(&cache, xnn_weights_cache_finalization_kind_soft));
316   // Finalizing a finalized cache is an error.
317   ASSERT_NE(xnn_status_success, xnn_finalize_weights_cache(&cache, xnn_weights_cache_finalization_kind_soft));
318   // Trying to reserve too much is an error.
319   ASSERT_EQ(nullptr, xnn_reserve_space_in_weights_cache(&cache, cache.cache.weights.capacity + 1));
320 
321   ASSERT_EQ(xnn_status_success, xnn_release_weights_cache(&cache));
322 }
323 
TEST(WEIGHTS_CACHE,insert_into_finalized_cache_soft)324 TEST(WEIGHTS_CACHE, insert_into_finalized_cache_soft) {
325   ASSERT_EQ(xnn_status_success, xnn_initialize(/*allocator=*/nullptr));
326   struct xnn_weights_cache cache;
327   ASSERT_EQ(xnn_status_success, xnn_init_weights_cache(&cache));
328 
329   write_weights(&cache, "1234");
330   ASSERT_EQ(0, xnn_get_or_insert_weights_cache(&cache, cache.cache.weights.start, 4));
331   ASSERT_EQ(xnn_status_success, xnn_finalize_weights_cache(&cache, xnn_weights_cache_finalization_kind_soft));
332 
333   // Inserting into a finalized cache is okay as long as cache memory has space and it is a cache hit.
334   ASSERT_LT(cache.cache.weights.size + 4, cache.cache.weights.capacity);
335   write_weights(&cache, "1234");
336   void* cached_weights = cache_end(&cache);
337   ASSERT_EQ(0, xnn_get_or_insert_weights_cache(&cache, cached_weights, 4));
338   ASSERT_EQ(4, cache.cache.weights.size);
339 
340   // Sufficient space, but Cache miss.
341   write_weights(&cache, "4567");
342   ASSERT_EQ(XNN_CACHE_NOT_FOUND, xnn_get_or_insert_weights_cache(&cache, cached_weights, 4));
343 
344   // Not enough space in the finalized weights cache.
345   std::string big_string(cache.cache.weights.capacity, '5');
346   // Don't use write_weights here as it asserts xnn_reserve_space_in_weights_cache does not return nullptr.
347   ASSERT_EQ(nullptr, xnn_reserve_space_in_weights_cache(&cache, big_string.length()));
348 
349   ASSERT_EQ(xnn_status_success, xnn_release_weights_cache(&cache));
350 }
351