1 /*
2 *
3 * Copyright 2015 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19 #include <grpc/support/port_platform.h>
20
21 #include "src/core/ext/transport/chttp2/transport/hpack_encoder.h"
22
23 #include <assert.h>
24 #include <string.h>
25
26 /* This is here for grpc_is_binary_header
27 * TODO(murgatroid99): Remove this
28 */
29 #include <grpc/grpc.h>
30
31 #include <grpc/support/alloc.h>
32 #include <grpc/support/log.h>
33
34 #include "src/core/ext/transport/chttp2/transport/bin_encoder.h"
35 #include "src/core/ext/transport/chttp2/transport/hpack_table.h"
36 #include "src/core/ext/transport/chttp2/transport/varint.h"
37 #include "src/core/lib/debug/stats.h"
38 #include "src/core/lib/slice/slice_internal.h"
39 #include "src/core/lib/slice/slice_string_helpers.h"
40 #include "src/core/lib/surface/validate_metadata.h"
41 #include "src/core/lib/transport/metadata.h"
42 #include "src/core/lib/transport/static_metadata.h"
43 #include "src/core/lib/transport/timeout_encoding.h"
44
45 namespace {
46 /* (Maybe-cuckoo) hpack encoder hash table implementation.
47
48 This hashtable implementation is a subset of a proper cuckoo hash; while we
49 have fallback cells that a value can be hashed to if the first cell is full,
50 we do not attempt to iteratively rearrange entries into backup cells to get
51 things to fit. Instead, if both a cell and the backup cell for a value are
52 occupied, the older existing entry is evicted.
53
54 Note that we can disable backup-cell picking by setting
55 GRPC_HPACK_ENCODER_USE_CUCKOO_HASH to 0. In that case, we simply evict an
56 existing entry rather than try to use a backup. Hence, "maybe-cuckoo."
57 TODO(arjunroy): Add unit tests for hashtable implementation. */
58 #define GRPC_HPACK_ENCODER_USE_CUCKOO_HASH 1
59 #define HASH_FRAGMENT_MASK (GRPC_CHTTP2_HPACKC_NUM_VALUES - 1)
60 #define HASH_FRAGMENT_1(x) ((x)&HASH_FRAGMENT_MASK)
61 #define HASH_FRAGMENT_2(x) \
62 (((x) >> GRPC_CHTTP2_HPACKC_NUM_VALUES_BITS) & HASH_FRAGMENT_MASK)
63 #define HASH_FRAGMENT_3(x) \
64 (((x) >> (GRPC_CHTTP2_HPACKC_NUM_VALUES_BITS * 2)) & HASH_FRAGMENT_MASK)
65 #define HASH_FRAGMENT_4(x) \
66 (((x) >> (GRPC_CHTTP2_HPACKC_NUM_VALUES_BITS * 3)) & HASH_FRAGMENT_MASK)
67
68 /* don't consider adding anything bigger than this to the hpack table */
69 constexpr size_t kMaxDecoderSpaceUsage = 512;
70 constexpr size_t kDataFrameHeaderSize = 9;
71 constexpr uint8_t kMaxFilterValue = 255;
72
73 /* if the probability of this item being seen again is < 1/x then don't add
74 it to the table */
75 #define ONE_ON_ADD_PROBABILITY (GRPC_CHTTP2_HPACKC_NUM_VALUES >> 1)
76 /* The hpack index we encode over the wire. Meaningful to the hpack encoder and
77 parser on the remote end as well as HTTP2. *Not* the same as
78 HpackEncoderSlotHash, which is only meaningful to the hpack encoder
79 implementation (HpackEncoderSlotHash is used for the hashtable implementation
80 when mapping from metadata to HpackEncoderIndex. */
81 typedef uint32_t HpackEncoderIndex;
82 /* Internal-table bookkeeping (*not* the hpack index). */
83 typedef uint32_t HpackEncoderSlotHash;
84
85 struct SliceRefComparator {
86 typedef grpc_slice_refcount* Type;
Null__anon7b049fc20111::SliceRefComparator87 static grpc_slice_refcount* Null() { return nullptr; }
IsNull__anon7b049fc20111::SliceRefComparator88 static bool IsNull(const grpc_slice_refcount* sref) {
89 return sref == nullptr;
90 }
Equals__anon7b049fc20111::SliceRefComparator91 static bool Equals(const grpc_slice_refcount* s1,
92 const grpc_slice_refcount* s2) {
93 return s1 == s2;
94 }
Ref__anon7b049fc20111::SliceRefComparator95 static void Ref(grpc_slice_refcount* sref) {
96 GPR_DEBUG_ASSERT(sref != nullptr);
97 sref->Ref();
98 }
Unref__anon7b049fc20111::SliceRefComparator99 static void Unref(grpc_slice_refcount* sref) {
100 GPR_DEBUG_ASSERT(sref != nullptr);
101 sref->Unref();
102 }
103 };
104
105 struct MetadataComparator {
106 typedef grpc_mdelem Type;
Null__anon7b049fc20111::MetadataComparator107 static const grpc_mdelem Null() { return {0}; }
IsNull__anon7b049fc20111::MetadataComparator108 static bool IsNull(const grpc_mdelem md) { return md.payload == 0; }
Equals__anon7b049fc20111::MetadataComparator109 static bool Equals(const grpc_mdelem md1, const grpc_mdelem md2) {
110 return md1.payload == md2.payload;
111 }
Ref__anon7b049fc20111::MetadataComparator112 static void Ref(grpc_mdelem md) {
113 GPR_DEBUG_ASSERT(md.payload != 0);
114 GRPC_MDELEM_REF(md);
115 }
Unref__anon7b049fc20111::MetadataComparator116 static void Unref(grpc_mdelem md) {
117 GPR_DEBUG_ASSERT(md.payload != 0);
118 GRPC_MDELEM_UNREF(md);
119 }
120 };
121
122 /* Index table management */
123 template <typename Hashtable>
HpackIndex(const Hashtable * hashtable,HpackEncoderSlotHash hash_index)124 static HpackEncoderIndex HpackIndex(const Hashtable* hashtable,
125 HpackEncoderSlotHash hash_index) {
126 return hashtable[hash_index].index;
127 }
128
129 template <typename ValueType, typename Hashtable>
GetEntry(const Hashtable * hashtable,HpackEncoderSlotHash hash_index)130 static const ValueType& GetEntry(const Hashtable* hashtable,
131 HpackEncoderSlotHash hash_index) {
132 return hashtable[hash_index].value;
133 }
134
135 template <typename Cmp, typename Hashtable>
TableEmptyAt(const Hashtable * hashtable,HpackEncoderSlotHash hash_index)136 static bool TableEmptyAt(const Hashtable* hashtable,
137 HpackEncoderSlotHash hash_index) {
138 return Cmp::Equals(hashtable[hash_index].value, Cmp::Null());
139 }
140
141 template <typename Cmp, typename Hashtable, typename ValueType>
Matches(const Hashtable * hashtable,const ValueType & value,HpackEncoderSlotHash hash_index)142 static bool Matches(const Hashtable* hashtable, const ValueType& value,
143 HpackEncoderSlotHash hash_index) {
144 return Cmp::Equals(value, hashtable[hash_index].value);
145 }
146
147 template <typename Hashtable>
UpdateIndex(Hashtable * hashtable,HpackEncoderSlotHash hash_index,HpackEncoderIndex hpack_index)148 static void UpdateIndex(Hashtable* hashtable, HpackEncoderSlotHash hash_index,
149 HpackEncoderIndex hpack_index) {
150 hashtable[hash_index].index = hpack_index;
151 }
152
153 template <typename Hashtable, typename ValueType>
SetIndex(Hashtable * hashtable,HpackEncoderSlotHash hash_index,const ValueType & value,HpackEncoderIndex hpack_index)154 static void SetIndex(Hashtable* hashtable, HpackEncoderSlotHash hash_index,
155 const ValueType& value, HpackEncoderIndex hpack_index) {
156 hashtable[hash_index].value = value;
157 UpdateIndex(hashtable, hash_index, hpack_index);
158 }
159
160 template <typename Cmp, typename Hashtable, typename ValueType>
GetMatchingIndex(Hashtable * hashtable,const ValueType & value,uint32_t value_hash,HpackEncoderIndex * index)161 static bool GetMatchingIndex(Hashtable* hashtable, const ValueType& value,
162 uint32_t value_hash, HpackEncoderIndex* index) {
163 const HpackEncoderSlotHash cuckoo_first = HASH_FRAGMENT_2(value_hash);
164 if (Matches<Cmp>(hashtable, value, cuckoo_first)) {
165 *index = HpackIndex(hashtable, cuckoo_first);
166 return true;
167 }
168 #if GRPC_HPACK_ENCODER_USE_CUCKOO_HASH
169 const HpackEncoderSlotHash cuckoo_second = HASH_FRAGMENT_3(value_hash);
170
171 if (Matches<Cmp>(hashtable, value, cuckoo_second)) {
172 *index = HpackIndex(hashtable, cuckoo_second);
173 return true;
174 }
175 #endif
176 return false;
177 }
178
179 template <typename Cmp, typename Hashtable, typename ValueType>
ReplaceOlderIndex(Hashtable * hashtable,const ValueType & value,HpackEncoderSlotHash hash_index_a,HpackEncoderSlotHash hash_index_b,HpackEncoderIndex new_index)180 static ValueType ReplaceOlderIndex(Hashtable* hashtable, const ValueType& value,
181 HpackEncoderSlotHash hash_index_a,
182 HpackEncoderSlotHash hash_index_b,
183 HpackEncoderIndex new_index) {
184 const HpackEncoderIndex hpack_idx_a = hashtable[hash_index_a].index;
185 const HpackEncoderIndex hpack_idx_b = hashtable[hash_index_b].index;
186 const HpackEncoderSlotHash id =
187 hpack_idx_a < hpack_idx_b ? hash_index_a : hash_index_b;
188 ValueType old = GetEntry<typename Cmp::Type>(hashtable, id);
189 SetIndex(hashtable, id, value, new_index);
190 return old;
191 }
192
193 template <typename Cmp, typename Hashtable, typename ValueType>
UpdateAddOrEvict(Hashtable hashtable,const ValueType & value,uint32_t value_hash,HpackEncoderIndex new_index)194 static void UpdateAddOrEvict(Hashtable hashtable, const ValueType& value,
195 uint32_t value_hash, HpackEncoderIndex new_index) {
196 const HpackEncoderSlotHash cuckoo_first = HASH_FRAGMENT_2(value_hash);
197 if (Matches<Cmp>(hashtable, value, cuckoo_first)) {
198 UpdateIndex(hashtable, cuckoo_first, new_index);
199 return;
200 }
201 if (TableEmptyAt<Cmp>(hashtable, cuckoo_first)) {
202 Cmp::Ref(value);
203 SetIndex(hashtable, cuckoo_first, value, new_index);
204 return;
205 }
206 #if GRPC_HPACK_ENCODER_USE_CUCKOO_HASH
207 const HpackEncoderSlotHash cuckoo_second = HASH_FRAGMENT_3(value_hash);
208 if (Matches<Cmp>(hashtable, value, cuckoo_second)) {
209 UpdateIndex(hashtable, cuckoo_second, new_index);
210 return;
211 }
212 Cmp::Ref(value);
213 if (TableEmptyAt<Cmp>(hashtable, cuckoo_second)) {
214 SetIndex(hashtable, cuckoo_second, value, new_index);
215 return;
216 }
217 Cmp::Unref(ReplaceOlderIndex<Cmp>(hashtable, value, cuckoo_first,
218 cuckoo_second, new_index));
219 #else
220 ValueType old = GetEntry<typename Cmp::Type>(hashtable, cuckoo_first);
221 SetIndex(hashtable, cuckoo_first, value, new_index);
222 Cmp::Unref(old);
223 #endif
224 }
225
226 /* halve all counts because an element reached max */
HalveFilter(uint8_t,uint32_t * sum,uint8_t * elems)227 static void HalveFilter(uint8_t /*idx*/, uint32_t* sum, uint8_t* elems) {
228 *sum = 0;
229 for (int i = 0; i < GRPC_CHTTP2_HPACKC_NUM_VALUES; i++) {
230 elems[i] /= 2;
231 (*sum) += elems[i];
232 }
233 }
234
235 /* increment a filter count, halve all counts if one element reaches max */
IncrementFilter(uint8_t idx,uint32_t * sum,uint8_t * elems)236 static void IncrementFilter(uint8_t idx, uint32_t* sum, uint8_t* elems) {
237 elems[idx]++;
238 if (GPR_LIKELY(elems[idx] < kMaxFilterValue)) {
239 (*sum)++;
240 } else {
241 HalveFilter(idx, sum, elems);
242 }
243 }
244
UpdateHashtablePopularity(grpc_chttp2_hpack_compressor * hpack_compressor,uint32_t elem_hash)245 static uint32_t UpdateHashtablePopularity(
246 grpc_chttp2_hpack_compressor* hpack_compressor, uint32_t elem_hash) {
247 const uint32_t popularity_hash = HASH_FRAGMENT_1(elem_hash);
248 IncrementFilter(popularity_hash, &hpack_compressor->filter_elems_sum,
249 hpack_compressor->filter_elems);
250 return popularity_hash;
251 }
252
CanAddToHashtable(grpc_chttp2_hpack_compressor * hpack_compressor,uint32_t popularity_hash)253 static bool CanAddToHashtable(grpc_chttp2_hpack_compressor* hpack_compressor,
254 uint32_t popularity_hash) {
255 const bool can_add =
256 hpack_compressor->filter_elems[popularity_hash] >=
257 hpack_compressor->filter_elems_sum / ONE_ON_ADD_PROBABILITY;
258 return can_add;
259 }
260 } /* namespace */
261
262 struct framer_state {
263 int is_first_frame;
264 /* number of bytes in 'output' when we started the frame - used to calculate
265 frame length */
266 size_t output_length_at_start_of_frame;
267 /* index (in output) of the header for the current frame */
268 size_t header_idx;
269 #ifndef NDEBUG
270 /* have we seen a regular (non-colon-prefixed) header yet? */
271 uint8_t seen_regular_header;
272 #endif
273 /* output stream id */
274 uint32_t stream_id;
275 grpc_slice_buffer* output;
276 grpc_transport_one_way_stats* stats;
277 /* maximum size of a frame */
278 size_t max_frame_size;
279 bool use_true_binary_metadata;
280 bool is_end_of_stream;
281 };
282 /* fills p (which is expected to be kDataFrameHeaderSize bytes long)
283 * with a data frame header */
fill_header(uint8_t * p,uint8_t type,uint32_t id,size_t len,uint8_t flags)284 static void fill_header(uint8_t* p, uint8_t type, uint32_t id, size_t len,
285 uint8_t flags) {
286 /* len is the current frame size (i.e. for the frame we're finishing).
287 We finish a frame if:
288 1) We called ensure_space(), (i.e. add_tiny_header_data()) and adding
289 'need_bytes' to the frame would cause us to exceed st->max_frame_size.
290 2) We called add_header_data, and adding the slice would cause us to exceed
291 st->max_frame_size.
292 3) We're done encoding the header.
293
294 Thus, len is always <= st->max_frame_size.
295 st->max_frame_size is derived from GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE,
296 which has a max allowable value of 16777215 (see chttp_transport.cc).
297 Thus, the following assert can be a debug assert. */
298 GPR_DEBUG_ASSERT(len < 16777316);
299 *p++ = static_cast<uint8_t>(len >> 16);
300 *p++ = static_cast<uint8_t>(len >> 8);
301 *p++ = static_cast<uint8_t>(len);
302 *p++ = type;
303 *p++ = flags;
304 *p++ = static_cast<uint8_t>(id >> 24);
305 *p++ = static_cast<uint8_t>(id >> 16);
306 *p++ = static_cast<uint8_t>(id >> 8);
307 *p++ = static_cast<uint8_t>(id);
308 }
309
current_frame_size(framer_state * st)310 static size_t current_frame_size(framer_state* st) {
311 const size_t frame_size =
312 st->output->length - st->output_length_at_start_of_frame;
313 GPR_DEBUG_ASSERT(frame_size <= st->max_frame_size);
314 return frame_size;
315 }
316
317 /* finish a frame - fill in the previously reserved header */
finish_frame(framer_state * st,int is_header_boundary)318 static void finish_frame(framer_state* st, int is_header_boundary) {
319 uint8_t type = 0xff;
320 type =
321 static_cast<uint8_t>(st->is_first_frame ? GRPC_CHTTP2_FRAME_HEADER
322 : GRPC_CHTTP2_FRAME_CONTINUATION);
323 uint8_t flags = 0xff;
324 /* per the HTTP/2 spec:
325 A HEADERS frame carries the END_STREAM flag that signals the end of a
326 stream. However, a HEADERS frame with the END_STREAM flag set can be
327 followed by CONTINUATION frames on the same stream. Logically, the
328 CONTINUATION frames are part of the HEADERS frame.
329 Thus, we add the END_STREAM flag to the HEADER frame (the first frame). */
330 flags = static_cast<uint8_t>(st->is_first_frame && st->is_end_of_stream
331 ? GRPC_CHTTP2_DATA_FLAG_END_STREAM
332 : 0);
333 /* per the HTTP/2 spec:
334 A HEADERS frame without the END_HEADERS flag set MUST be followed by
335 a CONTINUATION frame for the same stream.
336 Thus, we add the END_HEADER flag to the last frame. */
337 flags |= static_cast<uint8_t>(
338 is_header_boundary ? GRPC_CHTTP2_DATA_FLAG_END_HEADERS : 0);
339 fill_header(GRPC_SLICE_START_PTR(st->output->slices[st->header_idx]), type,
340 st->stream_id, current_frame_size(st), flags);
341 st->stats->framing_bytes += kDataFrameHeaderSize;
342 st->is_first_frame = 0;
343 }
344
345 /* begin a new frame: reserve off header space, remember how many bytes we'd
346 output before beginning */
begin_frame(framer_state * st)347 static void begin_frame(framer_state* st) {
348 grpc_slice reserved;
349 reserved.refcount = nullptr;
350 reserved.data.inlined.length = kDataFrameHeaderSize;
351 st->header_idx = grpc_slice_buffer_add_indexed(st->output, reserved);
352 st->output_length_at_start_of_frame = st->output->length;
353 }
354
355 /* make sure that the current frame is of the type desired, and has sufficient
356 space to add at least about_to_add bytes -- finishes the current frame if
357 needed */
ensure_space(framer_state * st,size_t need_bytes)358 static void ensure_space(framer_state* st, size_t need_bytes) {
359 if (GPR_LIKELY(current_frame_size(st) + need_bytes <= st->max_frame_size)) {
360 return;
361 }
362 finish_frame(st, 0);
363 begin_frame(st);
364 }
365
add_header_data(framer_state * st,grpc_slice slice)366 static void add_header_data(framer_state* st, grpc_slice slice) {
367 size_t len = GRPC_SLICE_LENGTH(slice);
368 size_t remaining;
369 if (len == 0) return;
370 remaining = st->max_frame_size - current_frame_size(st);
371 if (len <= remaining) {
372 st->stats->header_bytes += len;
373 grpc_slice_buffer_add(st->output, slice);
374 } else {
375 st->stats->header_bytes += remaining;
376 grpc_slice_buffer_add(st->output, grpc_slice_split_head(&slice, remaining));
377 finish_frame(st, 0);
378 begin_frame(st);
379 add_header_data(st, slice);
380 }
381 }
382
add_tiny_header_data(framer_state * st,size_t len)383 static uint8_t* add_tiny_header_data(framer_state* st, size_t len) {
384 ensure_space(st, len);
385 st->stats->header_bytes += len;
386 return grpc_slice_buffer_tiny_add(st->output, len);
387 }
388
evict_entry(grpc_chttp2_hpack_compressor * c)389 static void evict_entry(grpc_chttp2_hpack_compressor* c) {
390 c->tail_remote_index++;
391 GPR_ASSERT(c->tail_remote_index > 0);
392 GPR_ASSERT(c->table_size >=
393 c->table_elem_size[c->tail_remote_index % c->cap_table_elems]);
394 GPR_ASSERT(c->table_elems > 0);
395 c->table_size = static_cast<uint16_t>(
396 c->table_size -
397 c->table_elem_size[c->tail_remote_index % c->cap_table_elems]);
398 c->table_elems--;
399 }
400
401 // Reserve space in table for the new element, evict entries if needed.
402 // Return the new index of the element. Return 0 to indicate not adding to
403 // table.
prepare_space_for_new_elem(grpc_chttp2_hpack_compressor * c,size_t elem_size)404 static uint32_t prepare_space_for_new_elem(grpc_chttp2_hpack_compressor* c,
405 size_t elem_size) {
406 uint32_t new_index = c->tail_remote_index + c->table_elems + 1;
407 GPR_DEBUG_ASSERT(elem_size < 65536);
408
409 // TODO(arjunroy): Re-examine semantics
410 if (elem_size > c->max_table_size) {
411 while (c->table_size > 0) {
412 evict_entry(c);
413 }
414 return 0;
415 }
416
417 /* Reserve space for this element in the remote table: if this overflows
418 the current table, drop elements until it fits, matching the decompressor
419 algorithm */
420 while (c->table_size + elem_size > c->max_table_size) {
421 evict_entry(c);
422 }
423 GPR_ASSERT(c->table_elems < c->max_table_size);
424 c->table_elem_size[new_index % c->cap_table_elems] =
425 static_cast<uint16_t>(elem_size);
426 c->table_size = static_cast<uint16_t>(c->table_size + elem_size);
427 c->table_elems++;
428
429 return new_index;
430 }
431
432 // Add a key to the dynamic table. Both key and value will be added to table at
433 // the decoder.
AddKeyWithIndex(grpc_chttp2_hpack_compressor * c,grpc_slice_refcount * key_ref,uint32_t new_index,uint32_t key_hash)434 static void AddKeyWithIndex(grpc_chttp2_hpack_compressor* c,
435 grpc_slice_refcount* key_ref, uint32_t new_index,
436 uint32_t key_hash) {
437 UpdateAddOrEvict<SliceRefComparator>(c->key_table.entries, key_ref, key_hash,
438 new_index);
439 }
440
441 /* add an element to the decoder table */
AddElemWithIndex(grpc_chttp2_hpack_compressor * c,grpc_mdelem elem,uint32_t new_index,uint32_t elem_hash,uint32_t key_hash)442 static void AddElemWithIndex(grpc_chttp2_hpack_compressor* c, grpc_mdelem elem,
443 uint32_t new_index, uint32_t elem_hash,
444 uint32_t key_hash) {
445 GPR_DEBUG_ASSERT(GRPC_MDELEM_IS_INTERNED(elem));
446 UpdateAddOrEvict<MetadataComparator>(c->elem_table.entries, elem, elem_hash,
447 new_index);
448 AddKeyWithIndex(c, GRPC_MDKEY(elem).refcount, new_index, key_hash);
449 }
450
add_elem(grpc_chttp2_hpack_compressor * c,grpc_mdelem elem,size_t elem_size,uint32_t elem_hash,uint32_t key_hash)451 static void add_elem(grpc_chttp2_hpack_compressor* c, grpc_mdelem elem,
452 size_t elem_size, uint32_t elem_hash, uint32_t key_hash) {
453 uint32_t new_index = prepare_space_for_new_elem(c, elem_size);
454 if (new_index != 0) {
455 AddElemWithIndex(c, elem, new_index, elem_hash, key_hash);
456 }
457 }
458
add_key(grpc_chttp2_hpack_compressor * c,grpc_mdelem elem,size_t elem_size,uint32_t key_hash)459 static void add_key(grpc_chttp2_hpack_compressor* c, grpc_mdelem elem,
460 size_t elem_size, uint32_t key_hash) {
461 uint32_t new_index = prepare_space_for_new_elem(c, elem_size);
462 if (new_index != 0) {
463 AddKeyWithIndex(c, GRPC_MDKEY(elem).refcount, new_index, key_hash);
464 }
465 }
466
emit_indexed(grpc_chttp2_hpack_compressor *,uint32_t elem_index,framer_state * st)467 static void emit_indexed(grpc_chttp2_hpack_compressor* /*c*/,
468 uint32_t elem_index, framer_state* st) {
469 GRPC_STATS_INC_HPACK_SEND_INDEXED();
470 uint32_t len = GRPC_CHTTP2_VARINT_LENGTH(elem_index, 1);
471 GRPC_CHTTP2_WRITE_VARINT(elem_index, 1, 0x80, add_tiny_header_data(st, len),
472 len);
473 }
474
475 struct wire_value {
wire_valuewire_value476 wire_value(uint8_t huffman_prefix, bool insert_null_before_wire_value,
477 const grpc_slice& slice)
478 : data(slice),
479 huffman_prefix(huffman_prefix),
480 insert_null_before_wire_value(insert_null_before_wire_value),
481 length(GRPC_SLICE_LENGTH(slice) +
482 (insert_null_before_wire_value ? 1 : 0)) {}
483 // While wire_value is const from the POV of hpack encoder code, actually
484 // adding it to a slice buffer will possibly split the slice.
485 const grpc_slice data;
486 const uint8_t huffman_prefix;
487 const bool insert_null_before_wire_value;
488 const size_t length;
489 };
490
491 template <bool mdkey_definitely_interned>
get_wire_value(grpc_mdelem elem,bool true_binary_enabled)492 static wire_value get_wire_value(grpc_mdelem elem, bool true_binary_enabled) {
493 const bool is_bin_hdr =
494 mdkey_definitely_interned
495 ? grpc_is_refcounted_slice_binary_header(GRPC_MDKEY(elem))
496 : grpc_is_binary_header_internal(GRPC_MDKEY(elem));
497 const grpc_slice& value = GRPC_MDVALUE(elem);
498 if (is_bin_hdr) {
499 if (true_binary_enabled) {
500 GRPC_STATS_INC_HPACK_SEND_BINARY();
501 return wire_value(0x00, true, grpc_slice_ref_internal(value));
502 } else {
503 GRPC_STATS_INC_HPACK_SEND_BINARY_BASE64();
504 return wire_value(0x80, false,
505 grpc_chttp2_base64_encode_and_huffman_compress(value));
506 }
507 } else {
508 /* TODO(ctiller): opportunistically compress non-binary headers */
509 GRPC_STATS_INC_HPACK_SEND_UNCOMPRESSED();
510 return wire_value(0x00, false, grpc_slice_ref_internal(value));
511 }
512 }
513
wire_value_length(const wire_value & v)514 static uint32_t wire_value_length(const wire_value& v) {
515 GPR_DEBUG_ASSERT(v.length <= UINT32_MAX);
516 return static_cast<uint32_t>(v.length);
517 }
518
519 namespace {
520 enum class EmitLitHdrType { INC_IDX, NO_IDX };
521
522 enum class EmitLitHdrVType { INC_IDX_V, NO_IDX_V };
523 } // namespace
524
525 template <EmitLitHdrType type>
emit_lithdr(grpc_chttp2_hpack_compressor *,uint32_t key_index,grpc_mdelem elem,framer_state * st)526 static void emit_lithdr(grpc_chttp2_hpack_compressor* /*c*/, uint32_t key_index,
527 grpc_mdelem elem, framer_state* st) {
528 switch (type) {
529 case EmitLitHdrType::INC_IDX:
530 GRPC_STATS_INC_HPACK_SEND_LITHDR_INCIDX();
531 break;
532 case EmitLitHdrType::NO_IDX:
533 GRPC_STATS_INC_HPACK_SEND_LITHDR_NOTIDX();
534 break;
535 }
536 const uint32_t len_pfx = type == EmitLitHdrType::INC_IDX
537 ? GRPC_CHTTP2_VARINT_LENGTH(key_index, 2)
538 : GRPC_CHTTP2_VARINT_LENGTH(key_index, 4);
539 const wire_value value =
540 get_wire_value<true>(elem, st->use_true_binary_metadata);
541 const uint32_t len_val = wire_value_length(value);
542 const uint32_t len_val_len = GRPC_CHTTP2_VARINT_LENGTH(len_val, 1);
543 GPR_DEBUG_ASSERT(len_pfx + len_val_len < GRPC_SLICE_INLINED_SIZE);
544 uint8_t* data = add_tiny_header_data(
545 st,
546 len_pfx + len_val_len + (value.insert_null_before_wire_value ? 1 : 0));
547 switch (type) {
548 case EmitLitHdrType::INC_IDX:
549 GRPC_CHTTP2_WRITE_VARINT(key_index, 2, 0x40, data, len_pfx);
550 break;
551 case EmitLitHdrType::NO_IDX:
552 GRPC_CHTTP2_WRITE_VARINT(key_index, 4, 0x00, data, len_pfx);
553 break;
554 }
555 GRPC_CHTTP2_WRITE_VARINT(len_val, 1, value.huffman_prefix, &data[len_pfx],
556 len_val_len);
557 if (value.insert_null_before_wire_value) {
558 data[len_pfx + len_val_len] = 0;
559 }
560 add_header_data(st, value.data);
561 }
562
563 template <EmitLitHdrVType type>
emit_lithdr_v(grpc_chttp2_hpack_compressor *,grpc_mdelem elem,framer_state * st)564 static void emit_lithdr_v(grpc_chttp2_hpack_compressor* /*c*/, grpc_mdelem elem,
565 framer_state* st) {
566 switch (type) {
567 case EmitLitHdrVType::INC_IDX_V:
568 GRPC_STATS_INC_HPACK_SEND_LITHDR_INCIDX_V();
569 break;
570 case EmitLitHdrVType::NO_IDX_V:
571 GRPC_STATS_INC_HPACK_SEND_LITHDR_NOTIDX_V();
572 break;
573 }
574 GRPC_STATS_INC_HPACK_SEND_UNCOMPRESSED();
575 const uint32_t len_key =
576 static_cast<uint32_t>(GRPC_SLICE_LENGTH(GRPC_MDKEY(elem)));
577 const wire_value value =
578 type == EmitLitHdrVType::INC_IDX_V
579 ? get_wire_value<true>(elem, st->use_true_binary_metadata)
580 : get_wire_value<false>(elem, st->use_true_binary_metadata);
581 const uint32_t len_val = wire_value_length(value);
582 const uint32_t len_key_len = GRPC_CHTTP2_VARINT_LENGTH(len_key, 1);
583 const uint32_t len_val_len = GRPC_CHTTP2_VARINT_LENGTH(len_val, 1);
584 GPR_DEBUG_ASSERT(len_key <= UINT32_MAX);
585 GPR_DEBUG_ASSERT(1 + len_key_len < GRPC_SLICE_INLINED_SIZE);
586 uint8_t* key_buf = add_tiny_header_data(st, 1 + len_key_len);
587 key_buf[0] = type == EmitLitHdrVType::INC_IDX_V ? 0x40 : 0x00;
588 GRPC_CHTTP2_WRITE_VARINT(len_key, 1, 0x00, &key_buf[1], len_key_len);
589 add_header_data(st, grpc_slice_ref_internal(GRPC_MDKEY(elem)));
590 uint8_t* value_buf = add_tiny_header_data(
591 st, len_val_len + (value.insert_null_before_wire_value ? 1 : 0));
592 GRPC_CHTTP2_WRITE_VARINT(len_val, 1, value.huffman_prefix, value_buf,
593 len_val_len);
594 if (value.insert_null_before_wire_value) {
595 value_buf[len_val_len] = 0;
596 }
597 add_header_data(st, value.data);
598 }
599
emit_advertise_table_size_change(grpc_chttp2_hpack_compressor * c,framer_state * st)600 static void emit_advertise_table_size_change(grpc_chttp2_hpack_compressor* c,
601 framer_state* st) {
602 uint32_t len = GRPC_CHTTP2_VARINT_LENGTH(c->max_table_size, 3);
603 GRPC_CHTTP2_WRITE_VARINT(c->max_table_size, 3, 0x20,
604 add_tiny_header_data(st, len), len);
605 c->advertise_table_size_change = 0;
606 }
607
hpack_enc_log(grpc_mdelem elem)608 static void GPR_ATTRIBUTE_NOINLINE hpack_enc_log(grpc_mdelem elem) {
609 char* k = grpc_slice_to_c_string(GRPC_MDKEY(elem));
610 char* v = nullptr;
611 if (grpc_is_binary_header_internal(GRPC_MDKEY(elem))) {
612 v = grpc_dump_slice(GRPC_MDVALUE(elem), GPR_DUMP_HEX);
613 } else {
614 v = grpc_slice_to_c_string(GRPC_MDVALUE(elem));
615 }
616 gpr_log(
617 GPR_INFO,
618 "Encode: '%s: %s', elem_interned=%d [%d], k_interned=%d, v_interned=%d",
619 k, v, GRPC_MDELEM_IS_INTERNED(elem), GRPC_MDELEM_STORAGE(elem),
620 grpc_slice_is_interned(GRPC_MDKEY(elem)),
621 grpc_slice_is_interned(GRPC_MDVALUE(elem)));
622 gpr_free(k);
623 gpr_free(v);
624 }
625
dynidx(grpc_chttp2_hpack_compressor * c,uint32_t elem_index)626 static uint32_t dynidx(grpc_chttp2_hpack_compressor* c, uint32_t elem_index) {
627 return 1 + GRPC_CHTTP2_LAST_STATIC_ENTRY + c->tail_remote_index +
628 c->table_elems - elem_index;
629 }
630
631 struct EmitIndexedStatus {
632 EmitIndexedStatus() = default;
EmitIndexedStatusEmitIndexedStatus633 EmitIndexedStatus(uint32_t elem_hash, bool emitted, bool can_add)
634 : elem_hash(elem_hash), emitted(emitted), can_add(can_add) {}
635 const uint32_t elem_hash = 0;
636 const bool emitted = false;
637 const bool can_add = false;
638 };
639
maybe_emit_indexed(grpc_chttp2_hpack_compressor * c,grpc_mdelem elem,framer_state * st)640 static EmitIndexedStatus maybe_emit_indexed(grpc_chttp2_hpack_compressor* c,
641 grpc_mdelem elem,
642 framer_state* st) {
643 const uint32_t elem_hash =
644 GRPC_MDELEM_STORAGE(elem) == GRPC_MDELEM_STORAGE_INTERNED
645 ? reinterpret_cast<grpc_core::InternedMetadata*>(
646 GRPC_MDELEM_DATA(elem))
647 ->hash()
648 : reinterpret_cast<grpc_core::StaticMetadata*>(GRPC_MDELEM_DATA(elem))
649 ->hash();
650 /* Update filter to see if we can perhaps add this elem. */
651 const uint32_t popularity_hash = UpdateHashtablePopularity(c, elem_hash);
652 /* is this elem currently in the decoders table? */
653 HpackEncoderIndex indices_key;
654 if (GetMatchingIndex<MetadataComparator>(c->elem_table.entries, elem,
655 elem_hash, &indices_key) &&
656 indices_key > c->tail_remote_index) {
657 emit_indexed(c, dynidx(c, indices_key), st);
658 return EmitIndexedStatus(elem_hash, true, false);
659 }
660 /* Didn't hit either cuckoo index, so no emit. */
661 return EmitIndexedStatus(elem_hash, false,
662 CanAddToHashtable(c, popularity_hash));
663 }
664
emit_maybe_add(grpc_chttp2_hpack_compressor * c,grpc_mdelem elem,framer_state * st,uint32_t indices_key,bool should_add_elem,size_t decoder_space_usage,uint32_t elem_hash,uint32_t key_hash)665 static void emit_maybe_add(grpc_chttp2_hpack_compressor* c, grpc_mdelem elem,
666 framer_state* st, uint32_t indices_key,
667 bool should_add_elem, size_t decoder_space_usage,
668 uint32_t elem_hash, uint32_t key_hash) {
669 if (should_add_elem) {
670 emit_lithdr<EmitLitHdrType::INC_IDX>(c, dynidx(c, indices_key), elem, st);
671 add_elem(c, elem, decoder_space_usage, elem_hash, key_hash);
672 } else {
673 emit_lithdr<EmitLitHdrType::NO_IDX>(c, dynidx(c, indices_key), elem, st);
674 }
675 }
676
677 /* encode an mdelem */
hpack_enc(grpc_chttp2_hpack_compressor * c,grpc_mdelem elem,framer_state * st)678 static void hpack_enc(grpc_chttp2_hpack_compressor* c, grpc_mdelem elem,
679 framer_state* st) {
680 const grpc_slice& elem_key = GRPC_MDKEY(elem);
681 /* User-provided key len validated in grpc_validate_header_key_is_legal(). */
682 GPR_DEBUG_ASSERT(GRPC_SLICE_LENGTH(elem_key) > 0);
683 /* Header ordering: all reserved headers (prefixed with ':') must precede
684 * regular headers. This can be a debug assert, since:
685 * 1) User cannot give us ':' headers (grpc_validate_header_key_is_legal()).
686 * 2) grpc filters/core should be checked during debug builds. */
687 #ifndef NDEBUG
688 if (GRPC_SLICE_START_PTR(elem_key)[0] != ':') { /* regular header */
689 st->seen_regular_header = 1;
690 } else {
691 GPR_DEBUG_ASSERT(
692 st->seen_regular_header == 0 &&
693 "Reserved header (colon-prefixed) happening after regular ones.");
694 }
695 #endif
696 if (GRPC_TRACE_FLAG_ENABLED(grpc_http_trace)) {
697 hpack_enc_log(elem);
698 }
699 const bool elem_interned = GRPC_MDELEM_IS_INTERNED(elem);
700 const bool key_interned = elem_interned || grpc_slice_is_interned(elem_key);
701 /* Key is not interned, emit literals. */
702 if (!key_interned) {
703 emit_lithdr_v<EmitLitHdrVType::NO_IDX_V>(c, elem, st);
704 return;
705 }
706 /* Interned metadata => maybe already indexed. */
707 const EmitIndexedStatus ret =
708 elem_interned ? maybe_emit_indexed(c, elem, st) : EmitIndexedStatus();
709 if (ret.emitted) {
710 return;
711 }
712 /* should this elem be in the table? */
713 const size_t decoder_space_usage =
714 grpc_chttp2_get_size_in_hpack_table(elem, st->use_true_binary_metadata);
715 const bool decoder_space_available =
716 decoder_space_usage < kMaxDecoderSpaceUsage;
717 const bool should_add_elem =
718 elem_interned && decoder_space_available && ret.can_add;
719 const uint32_t elem_hash = ret.elem_hash;
720 /* no hits for the elem... maybe there's a key? */
721 const uint32_t key_hash = elem_key.refcount->Hash(elem_key);
722 HpackEncoderIndex indices_key;
723 if (GetMatchingIndex<SliceRefComparator>(
724 c->key_table.entries, elem_key.refcount, key_hash, &indices_key) &&
725 indices_key > c->tail_remote_index) {
726 emit_maybe_add(c, elem, st, indices_key, should_add_elem,
727 decoder_space_usage, elem_hash, key_hash);
728 return;
729 }
730 /* no elem, key in the table... fall back to literal emission */
731 const bool should_add_key = !elem_interned && decoder_space_available;
732 if (should_add_elem || should_add_key) {
733 emit_lithdr_v<EmitLitHdrVType::INC_IDX_V>(c, elem, st);
734 } else {
735 emit_lithdr_v<EmitLitHdrVType::NO_IDX_V>(c, elem, st);
736 }
737 if (should_add_elem) {
738 add_elem(c, elem, decoder_space_usage, elem_hash, key_hash);
739 } else if (should_add_key) {
740 add_key(c, elem, decoder_space_usage, key_hash);
741 }
742 }
743
744 #define STRLEN_LIT(x) (sizeof(x) - 1)
745 #define TIMEOUT_KEY "grpc-timeout"
746
deadline_enc(grpc_chttp2_hpack_compressor * c,grpc_millis deadline,framer_state * st)747 static void deadline_enc(grpc_chttp2_hpack_compressor* c, grpc_millis deadline,
748 framer_state* st) {
749 char timeout_str[GRPC_HTTP2_TIMEOUT_ENCODE_MIN_BUFSIZE];
750 grpc_mdelem mdelem;
751 grpc_http2_encode_timeout(deadline - grpc_core::ExecCtx::Get()->Now(),
752 timeout_str);
753 mdelem = grpc_mdelem_from_slices(
754 GRPC_MDSTR_GRPC_TIMEOUT, grpc_core::UnmanagedMemorySlice(timeout_str));
755 hpack_enc(c, mdelem, st);
756 GRPC_MDELEM_UNREF(mdelem);
757 }
758
elems_for_bytes(uint32_t bytes)759 static uint32_t elems_for_bytes(uint32_t bytes) { return (bytes + 31) / 32; }
760
grpc_chttp2_hpack_compressor_init(grpc_chttp2_hpack_compressor * c)761 void grpc_chttp2_hpack_compressor_init(grpc_chttp2_hpack_compressor* c) {
762 memset(c, 0, sizeof(*c));
763 c->max_table_size = GRPC_CHTTP2_HPACKC_INITIAL_TABLE_SIZE;
764 c->cap_table_elems = elems_for_bytes(c->max_table_size);
765 c->max_table_elems = c->cap_table_elems;
766 c->max_usable_size = GRPC_CHTTP2_HPACKC_INITIAL_TABLE_SIZE;
767 const size_t alloc_size = sizeof(*c->table_elem_size) * c->cap_table_elems;
768 c->table_elem_size = static_cast<uint16_t*>(gpr_malloc(alloc_size));
769 memset(c->table_elem_size, 0, alloc_size);
770 }
771
grpc_chttp2_hpack_compressor_destroy(grpc_chttp2_hpack_compressor * c)772 void grpc_chttp2_hpack_compressor_destroy(grpc_chttp2_hpack_compressor* c) {
773 for (int i = 0; i < GRPC_CHTTP2_HPACKC_NUM_VALUES; i++) {
774 auto* const key = GetEntry<grpc_slice_refcount*>(c->key_table.entries, i);
775 if (key != nullptr) {
776 key->Unref();
777 }
778 GRPC_MDELEM_UNREF(GetEntry<grpc_mdelem>(c->elem_table.entries, i));
779 }
780 gpr_free(c->table_elem_size);
781 }
782
grpc_chttp2_hpack_compressor_set_max_usable_size(grpc_chttp2_hpack_compressor * c,uint32_t max_table_size)783 void grpc_chttp2_hpack_compressor_set_max_usable_size(
784 grpc_chttp2_hpack_compressor* c, uint32_t max_table_size) {
785 c->max_usable_size = max_table_size;
786 grpc_chttp2_hpack_compressor_set_max_table_size(
787 c, GPR_MIN(c->max_table_size, max_table_size));
788 }
789
rebuild_elems(grpc_chttp2_hpack_compressor * c,uint32_t new_cap)790 static void rebuild_elems(grpc_chttp2_hpack_compressor* c, uint32_t new_cap) {
791 uint16_t* table_elem_size =
792 static_cast<uint16_t*>(gpr_malloc(sizeof(*table_elem_size) * new_cap));
793 uint32_t i;
794
795 memset(table_elem_size, 0, sizeof(*table_elem_size) * new_cap);
796 GPR_ASSERT(c->table_elems <= new_cap);
797
798 for (i = 0; i < c->table_elems; i++) {
799 uint32_t ofs = c->tail_remote_index + i + 1;
800 table_elem_size[ofs % new_cap] =
801 c->table_elem_size[ofs % c->cap_table_elems];
802 }
803
804 c->cap_table_elems = new_cap;
805 gpr_free(c->table_elem_size);
806 c->table_elem_size = table_elem_size;
807 }
808
grpc_chttp2_hpack_compressor_set_max_table_size(grpc_chttp2_hpack_compressor * c,uint32_t max_table_size)809 void grpc_chttp2_hpack_compressor_set_max_table_size(
810 grpc_chttp2_hpack_compressor* c, uint32_t max_table_size) {
811 max_table_size = GPR_MIN(max_table_size, c->max_usable_size);
812 if (max_table_size == c->max_table_size) {
813 return;
814 }
815 while (c->table_size > 0 && c->table_size > max_table_size) {
816 evict_entry(c);
817 }
818 c->max_table_size = max_table_size;
819 c->max_table_elems = elems_for_bytes(max_table_size);
820 if (c->max_table_elems > c->cap_table_elems) {
821 rebuild_elems(c, GPR_MAX(c->max_table_elems, 2 * c->cap_table_elems));
822 } else if (c->max_table_elems < c->cap_table_elems / 3) {
823 uint32_t new_cap = GPR_MAX(c->max_table_elems, 16);
824 if (new_cap != c->cap_table_elems) {
825 rebuild_elems(c, new_cap);
826 }
827 }
828 c->advertise_table_size_change = 1;
829 if (GRPC_TRACE_FLAG_ENABLED(grpc_http_trace)) {
830 gpr_log(GPR_INFO, "set max table size from encoder to %d", max_table_size);
831 }
832 }
833
grpc_chttp2_encode_header(grpc_chttp2_hpack_compressor * c,grpc_mdelem ** extra_headers,size_t extra_headers_size,grpc_metadata_batch * metadata,const grpc_encode_header_options * options,grpc_slice_buffer * outbuf)834 void grpc_chttp2_encode_header(grpc_chttp2_hpack_compressor* c,
835 grpc_mdelem** extra_headers,
836 size_t extra_headers_size,
837 grpc_metadata_batch* metadata,
838 const grpc_encode_header_options* options,
839 grpc_slice_buffer* outbuf) {
840 /* grpc_chttp2_encode_header is called by FlushInitial/TrailingMetadata in
841 writing.cc. Specifically, on streams returned by NextStream(), which
842 returns streams from the list GRPC_CHTTP2_LIST_WRITABLE. The only way to be
843 added to the list is via grpc_chttp2_list_add_writable_stream(), which
844 validates that stream_id is not 0. So, this can be a debug assert. */
845 GPR_DEBUG_ASSERT(options->stream_id != 0);
846 framer_state st;
847 #ifndef NDEBUG
848 st.seen_regular_header = 0;
849 #endif
850 st.stream_id = options->stream_id;
851 st.output = outbuf;
852 st.is_first_frame = 1;
853 st.stats = options->stats;
854 st.max_frame_size = options->max_frame_size;
855 st.use_true_binary_metadata = options->use_true_binary_metadata;
856 st.is_end_of_stream = options->is_eof;
857
858 /* Encode a metadata batch; store the returned values, representing
859 a metadata element that needs to be unreffed back into the metadata
860 slot. THIS MAY NOT BE THE SAME ELEMENT (if a decoder table slot got
861 updated). After this loop, we'll do a batch unref of elements. */
862 begin_frame(&st);
863 if (c->advertise_table_size_change != 0) {
864 emit_advertise_table_size_change(c, &st);
865 }
866 for (size_t i = 0; i < extra_headers_size; ++i) {
867 grpc_mdelem md = *extra_headers[i];
868 const bool is_static =
869 GRPC_MDELEM_STORAGE(md) == GRPC_MDELEM_STORAGE_STATIC;
870 uintptr_t static_index;
871 if (is_static &&
872 (static_index =
873 reinterpret_cast<grpc_core::StaticMetadata*>(GRPC_MDELEM_DATA(md))
874 ->StaticIndex()) < GRPC_CHTTP2_LAST_STATIC_ENTRY) {
875 emit_indexed(c, static_cast<uint32_t>(static_index + 1), &st);
876 } else {
877 hpack_enc(c, md, &st);
878 }
879 }
880 grpc_metadata_batch_assert_ok(metadata);
881 for (grpc_linked_mdelem* l = metadata->list.head; l; l = l->next) {
882 const bool is_static =
883 GRPC_MDELEM_STORAGE(l->md) == GRPC_MDELEM_STORAGE_STATIC;
884 uintptr_t static_index;
885 if (is_static &&
886 (static_index = reinterpret_cast<grpc_core::StaticMetadata*>(
887 GRPC_MDELEM_DATA(l->md))
888 ->StaticIndex()) < GRPC_CHTTP2_LAST_STATIC_ENTRY) {
889 emit_indexed(c, static_cast<uint32_t>(static_index + 1), &st);
890 } else {
891 hpack_enc(c, l->md, &st);
892 }
893 }
894 grpc_millis deadline = metadata->deadline;
895 if (deadline != GRPC_MILLIS_INF_FUTURE) {
896 deadline_enc(c, deadline, &st);
897 }
898
899 finish_frame(&st, 1);
900 }
901