• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* NOLINT(build/header_guard) */
2 /* Copyright 2010 Google Inc. All Rights Reserved.
3 
4    Distributed under MIT license.
5    See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
6 */
7 
8 /* template parameters: FN */
9 
10 /* A (forgetful) hash table to the data seen by the compressor, to
11    help create backward references to previous data.
12 
13    This is a hash map of fixed size (bucket_size_) to a ring buffer of
14    fixed size (block_size_). The ring buffer contains the last block_size_
15    index positions of the given hash key in the compressed data. */
16 
17 #define HashLongestMatch HASHER()
18 
FN(HashTypeLength)19 static BROTLI_INLINE size_t FN(HashTypeLength)(void) { return 8; }
FN(StoreLookahead)20 static BROTLI_INLINE size_t FN(StoreLookahead)(void) { return 8; }
21 
22 /* HashBytes is the function that chooses the bucket to place the address in. */
FN(HashBytes)23 static BROTLI_INLINE uint32_t FN(HashBytes)(const uint8_t* data,
24                                             const uint64_t mask,
25                                             const int shift) {
26   const uint64_t h = (BROTLI_UNALIGNED_LOAD64LE(data) & mask) * kHashMul64Long;
27   /* The higher bits contain more mixture from the multiplication,
28      so we take our results from there. */
29   return (uint32_t)(h >> shift);
30 }
31 
32 typedef struct HashLongestMatch {
33   /* Number of hash buckets. */
34   size_t bucket_size_;
35   /* Only block_size_ newest backward references are kept,
36      and the older are forgotten. */
37   size_t block_size_;
38   /* Left-shift for computing hash bucket index from hash value. */
39   int hash_shift_;
40   /* Mask for selecting the next 4-8 bytes of input */
41   uint64_t hash_mask_;
42   /* Mask for accessing entries in a block (in a ring-buffer manner). */
43   uint32_t block_mask_;
44 
45   /* --- Dynamic size members --- */
46 
47   /* Number of entries in a particular bucket. */
48   /* uint16_t num[bucket_size]; */
49 
50   /* Buckets containing block_size_ of backward references. */
51   /* uint32_t* buckets[bucket_size * block_size]; */
52 } HashLongestMatch;
53 
FN(Self)54 static BROTLI_INLINE HashLongestMatch* FN(Self)(HasherHandle handle) {
55   return (HashLongestMatch*)&(GetHasherCommon(handle)[1]);
56 }
57 
FN(Num)58 static BROTLI_INLINE uint16_t* FN(Num)(HashLongestMatch* self) {
59   return (uint16_t*)(&self[1]);
60 }
61 
FN(Buckets)62 static BROTLI_INLINE uint32_t* FN(Buckets)(HashLongestMatch* self) {
63   return (uint32_t*)(&FN(Num)(self)[self->bucket_size_]);
64 }
65 
FN(Initialize)66 static void FN(Initialize)(
67     HasherHandle handle, const BrotliEncoderParams* params) {
68   HasherCommon* common = GetHasherCommon(handle);
69   HashLongestMatch* self = FN(Self)(handle);
70   BROTLI_UNUSED(params);
71   self->hash_shift_ = 64 - common->params.bucket_bits;
72   self->hash_mask_ = (~((uint64_t)0U)) >> (64 - 8 * common->params.hash_len);
73   self->bucket_size_ = (size_t)1 << common->params.bucket_bits;
74   self->block_size_ = (size_t)1 << common->params.block_bits;
75   self->block_mask_ = (uint32_t)(self->block_size_ - 1);
76 }
77 
FN(Prepare)78 static void FN(Prepare)(HasherHandle handle, BROTLI_BOOL one_shot,
79     size_t input_size, const uint8_t* data) {
80   HashLongestMatch* self = FN(Self)(handle);
81   uint16_t* num = FN(Num)(self);
82   /* Partial preparation is 100 times slower (per socket). */
83   size_t partial_prepare_threshold = self->bucket_size_ >> 6;
84   if (one_shot && input_size <= partial_prepare_threshold) {
85     size_t i;
86     for (i = 0; i < input_size; ++i) {
87       const uint32_t key = FN(HashBytes)(&data[i], self->hash_mask_,
88                                          self->hash_shift_);
89       num[key] = 0;
90     }
91   } else {
92     memset(num, 0, self->bucket_size_ * sizeof(num[0]));
93   }
94 }
95 
FN(HashMemAllocInBytes)96 static BROTLI_INLINE size_t FN(HashMemAllocInBytes)(
97     const BrotliEncoderParams* params, BROTLI_BOOL one_shot,
98     size_t input_size) {
99   size_t bucket_size = (size_t)1 << params->hasher.bucket_bits;
100   size_t block_size = (size_t)1 << params->hasher.block_bits;
101   BROTLI_UNUSED(one_shot);
102   BROTLI_UNUSED(input_size);
103   return sizeof(HashLongestMatch) + bucket_size * (2 + 4 * block_size);
104 }
105 
106 /* Look at 4 bytes at &data[ix & mask].
107    Compute a hash from these, and store the value of ix at that position. */
FN(Store)108 static BROTLI_INLINE void FN(Store)(HasherHandle handle, const uint8_t* data,
109     const size_t mask, const size_t ix) {
110   HashLongestMatch* self = FN(Self)(handle);
111   uint16_t* num = FN(Num)(self);
112   const uint32_t key = FN(HashBytes)(&data[ix & mask], self->hash_mask_,
113                                      self->hash_shift_);
114   const size_t minor_ix = num[key] & self->block_mask_;
115   const size_t offset =
116       minor_ix + (key << GetHasherCommon(handle)->params.block_bits);
117   FN(Buckets)(self)[offset] = (uint32_t)ix;
118   ++num[key];
119 }
120 
FN(StoreRange)121 static BROTLI_INLINE void FN(StoreRange)(HasherHandle handle,
122     const uint8_t* data, const size_t mask, const size_t ix_start,
123     const size_t ix_end) {
124   size_t i;
125   for (i = ix_start; i < ix_end; ++i) {
126     FN(Store)(handle, data, mask, i);
127   }
128 }
129 
FN(StitchToPreviousBlock)130 static BROTLI_INLINE void FN(StitchToPreviousBlock)(HasherHandle handle,
131     size_t num_bytes, size_t position, const uint8_t* ringbuffer,
132     size_t ringbuffer_mask) {
133   if (num_bytes >= FN(HashTypeLength)() - 1 && position >= 3) {
134     /* Prepare the hashes for three last bytes of the last write.
135        These could not be calculated before, since they require knowledge
136        of both the previous and the current block. */
137     FN(Store)(handle, ringbuffer, ringbuffer_mask, position - 3);
138     FN(Store)(handle, ringbuffer, ringbuffer_mask, position - 2);
139     FN(Store)(handle, ringbuffer, ringbuffer_mask, position - 1);
140   }
141 }
142 
FN(PrepareDistanceCache)143 static BROTLI_INLINE void FN(PrepareDistanceCache)(
144     HasherHandle handle, int* BROTLI_RESTRICT distance_cache) {
145   PrepareDistanceCache(distance_cache,
146       GetHasherCommon(handle)->params.num_last_distances_to_check);
147 }
148 
149 /* Find a longest backward match of &data[cur_ix] up to the length of
150    max_length and stores the position cur_ix in the hash table.
151 
152    REQUIRES: FN(PrepareDistanceCache) must be invoked for current distance cache
153              values; if this method is invoked repeatedly with the same distance
154              cache values, it is enough to invoke FN(PrepareDistanceCache) once.
155 
156    Does not look for matches longer than max_length.
157    Does not look for matches further away than max_backward.
158    Writes the best match into |out|.
159    |out|->score is updated only if a better match is found. */
FN(FindLongestMatch)160 static BROTLI_INLINE void FN(FindLongestMatch)(HasherHandle handle,
161     const BrotliEncoderDictionary* dictionary,
162     const uint8_t* BROTLI_RESTRICT data, const size_t ring_buffer_mask,
163     const int* BROTLI_RESTRICT distance_cache, const size_t cur_ix,
164     const size_t max_length, const size_t max_backward,
165     const size_t gap, const size_t max_distance,
166     HasherSearchResult* BROTLI_RESTRICT out) {
167   HasherCommon* common = GetHasherCommon(handle);
168   HashLongestMatch* self = FN(Self)(handle);
169   uint16_t* num = FN(Num)(self);
170   uint32_t* buckets = FN(Buckets)(self);
171   const size_t cur_ix_masked = cur_ix & ring_buffer_mask;
172   /* Don't accept a short copy from far away. */
173   score_t min_score = out->score;
174   score_t best_score = out->score;
175   size_t best_len = out->len;
176   size_t i;
177   out->len = 0;
178   out->len_code_delta = 0;
179   /* Try last distance first. */
180   for (i = 0; i < (size_t)common->params.num_last_distances_to_check; ++i) {
181     const size_t backward = (size_t)distance_cache[i];
182     size_t prev_ix = (size_t)(cur_ix - backward);
183     if (prev_ix >= cur_ix) {
184       continue;
185     }
186     if (BROTLI_PREDICT_FALSE(backward > max_backward)) {
187       continue;
188     }
189     prev_ix &= ring_buffer_mask;
190 
191     if (cur_ix_masked + best_len > ring_buffer_mask ||
192         prev_ix + best_len > ring_buffer_mask ||
193         data[cur_ix_masked + best_len] != data[prev_ix + best_len]) {
194       continue;
195     }
196     {
197       const size_t len = FindMatchLengthWithLimit(&data[prev_ix],
198                                                   &data[cur_ix_masked],
199                                                   max_length);
200       if (len >= 3 || (len == 2 && i < 2)) {
201         /* Comparing for >= 2 does not change the semantics, but just saves for
202            a few unnecessary binary logarithms in backward reference score,
203            since we are not interested in such short matches. */
204         score_t score = BackwardReferenceScoreUsingLastDistance(len);
205         if (best_score < score) {
206           if (i != 0) score -= BackwardReferencePenaltyUsingLastDistance(i);
207           if (best_score < score) {
208             best_score = score;
209             best_len = len;
210             out->len = best_len;
211             out->distance = backward;
212             out->score = best_score;
213           }
214         }
215       }
216     }
217   }
218   {
219     const uint32_t key = FN(HashBytes)(
220         &data[cur_ix_masked], self->hash_mask_, self->hash_shift_);
221     uint32_t* BROTLI_RESTRICT bucket =
222         &buckets[key << common->params.block_bits];
223     const size_t down =
224         (num[key] > self->block_size_) ?
225         (num[key] - self->block_size_) : 0u;
226     for (i = num[key]; i > down;) {
227       size_t prev_ix = bucket[--i & self->block_mask_];
228       const size_t backward = cur_ix - prev_ix;
229       if (BROTLI_PREDICT_FALSE(backward > max_backward)) {
230         break;
231       }
232       prev_ix &= ring_buffer_mask;
233       if (cur_ix_masked + best_len > ring_buffer_mask ||
234           prev_ix + best_len > ring_buffer_mask ||
235           data[cur_ix_masked + best_len] != data[prev_ix + best_len]) {
236         continue;
237       }
238       {
239         const size_t len = FindMatchLengthWithLimit(&data[prev_ix],
240                                                     &data[cur_ix_masked],
241                                                     max_length);
242         if (len >= 4) {
243           /* Comparing for >= 3 does not change the semantics, but just saves
244              for a few unnecessary binary logarithms in backward reference
245              score, since we are not interested in such short matches. */
246           score_t score = BackwardReferenceScore(len, backward);
247           if (best_score < score) {
248             best_score = score;
249             best_len = len;
250             out->len = best_len;
251             out->distance = backward;
252             out->score = best_score;
253           }
254         }
255       }
256     }
257     bucket[num[key] & self->block_mask_] = (uint32_t)cur_ix;
258     ++num[key];
259   }
260   if (min_score == out->score) {
261     SearchInStaticDictionary(dictionary,
262         handle, &data[cur_ix_masked], max_length, max_backward + gap,
263         max_distance, out, BROTLI_FALSE);
264   }
265 }
266 
267 #undef HashLongestMatch
268