1 /*
2 *
3 * Copyright 2015 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19 #include <grpc/support/port_platform.h>
20
21 #include "src/core/lib/transport/metadata.h"
22
23 #include <assert.h>
24 #include <inttypes.h>
25 #include <stddef.h>
26 #include <string.h>
27
28 #include <grpc/compression.h>
29 #include <grpc/grpc.h>
30 #include <grpc/support/alloc.h>
31 #include <grpc/support/atm.h>
32 #include <grpc/support/log.h>
33 #include <grpc/support/string_util.h>
34 #include <grpc/support/time.h>
35
36 #include "src/core/lib/gpr/murmur_hash.h"
37 #include "src/core/lib/gpr/string.h"
38 #include "src/core/lib/iomgr/iomgr_internal.h"
39 #include "src/core/lib/profiling/timers.h"
40 #include "src/core/lib/slice/slice_internal.h"
41 #include "src/core/lib/slice/slice_string_helpers.h"
42 #include "src/core/lib/transport/static_metadata.h"
43
44 using grpc_core::AllocatedMetadata;
45 using grpc_core::InternedMetadata;
46 using grpc_core::StaticMetadata;
47 using grpc_core::UserData;
48
49 /* There are two kinds of mdelem and mdstr instances.
50 * Static instances are declared in static_metadata.{h,c} and
51 * are initialized by grpc_mdctx_global_init().
52 * Dynamic instances are stored in hash tables on grpc_mdctx, and are backed
53 * by internal_string and internal_element structures.
54 * Internal helper functions here-in (is_mdstr_static, is_mdelem_static) are
55 * used to determine which kind of element a pointer refers to.
56 */
57
58 grpc_core::DebugOnlyTraceFlag grpc_trace_metadata(false, "metadata");
59
60 #ifndef NDEBUG
61 #define DEBUG_ARGS , const char *file, int line
62 #define FWD_DEBUG_ARGS file, line
63
grpc_mdelem_trace_ref(void * md,const grpc_slice & key,const grpc_slice & value,intptr_t refcnt,const char * file,int line)64 void grpc_mdelem_trace_ref(void* md, const grpc_slice& key,
65 const grpc_slice& value, intptr_t refcnt,
66 const char* file, int line) {
67 if (grpc_trace_metadata.enabled()) {
68 char* key_str = grpc_slice_to_c_string(key);
69 char* value_str = grpc_slice_to_c_string(value);
70 gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
71 "mdelem REF:%p:%" PRIdPTR "->%" PRIdPTR ": '%s' = '%s'", md,
72 refcnt, refcnt + 1, key_str, value_str);
73 gpr_free(key_str);
74 gpr_free(value_str);
75 }
76 }
77
grpc_mdelem_trace_unref(void * md,const grpc_slice & key,const grpc_slice & value,intptr_t refcnt,const char * file,int line)78 void grpc_mdelem_trace_unref(void* md, const grpc_slice& key,
79 const grpc_slice& value, intptr_t refcnt,
80 const char* file, int line) {
81 if (grpc_trace_metadata.enabled()) {
82 char* key_str = grpc_slice_to_c_string(key);
83 char* value_str = grpc_slice_to_c_string(value);
84 gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
85 "mdelem UNREF:%p:%" PRIdPTR "->%" PRIdPTR ": '%s' = '%s'", md,
86 refcnt, refcnt - 1, key_str, value_str);
87 gpr_free(key_str);
88 gpr_free(value_str);
89 }
90 }
91
92 #else // ifndef NDEBUG
93 #define DEBUG_ARGS
94 #define FWD_DEBUG_ARGS
95 #endif // ifndef NDEBUG
96
97 #define INITIAL_SHARD_CAPACITY 8
98 #define LOG2_SHARD_COUNT 4
99 #define SHARD_COUNT ((size_t)(1 << LOG2_SHARD_COUNT))
100
101 #define TABLE_IDX(hash, capacity) (((hash) >> (LOG2_SHARD_COUNT)) % (capacity))
102 #define SHARD_IDX(hash) ((hash) & ((1 << (LOG2_SHARD_COUNT)) - 1))
103
HashInit()104 void StaticMetadata::HashInit() {
105 uint32_t k_hash = grpc_slice_hash_internal(kv_.key);
106 uint32_t v_hash = grpc_slice_hash_internal(kv_.value);
107 hash_ = GRPC_MDSTR_KV_HASH(k_hash, v_hash);
108 }
109
AllocatedMetadata(const grpc_slice & key,const grpc_slice & value)110 AllocatedMetadata::AllocatedMetadata(const grpc_slice& key,
111 const grpc_slice& value)
112 : RefcountedMdBase(grpc_slice_ref_internal(key),
113 grpc_slice_ref_internal(value)) {
114 #ifndef NDEBUG
115 TraceAtStart("ALLOC_MD");
116 #endif
117 }
118
AllocatedMetadata(const grpc_slice & key,const grpc_slice & value,const NoRefKey *)119 AllocatedMetadata::AllocatedMetadata(const grpc_slice& key,
120 const grpc_slice& value, const NoRefKey*)
121 : RefcountedMdBase(key, grpc_slice_ref_internal(value)) {
122 #ifndef NDEBUG
123 TraceAtStart("ALLOC_MD_NOREF_KEY");
124 #endif
125 }
126
AllocatedMetadata(const grpc_core::ManagedMemorySlice & key,const grpc_core::UnmanagedMemorySlice & value)127 AllocatedMetadata::AllocatedMetadata(
128 const grpc_core::ManagedMemorySlice& key,
129 const grpc_core::UnmanagedMemorySlice& value)
130 : RefcountedMdBase(key, value) {
131 #ifndef NDEBUG
132 TraceAtStart("ALLOC_MD_NOREF_KEY_VAL");
133 #endif
134 }
135
AllocatedMetadata(const grpc_core::ExternallyManagedSlice & key,const grpc_core::UnmanagedMemorySlice & value)136 AllocatedMetadata::AllocatedMetadata(
137 const grpc_core::ExternallyManagedSlice& key,
138 const grpc_core::UnmanagedMemorySlice& value)
139 : RefcountedMdBase(key, value) {
140 #ifndef NDEBUG
141 TraceAtStart("ALLOC_MD_NOREF_KEY_VAL");
142 #endif
143 }
144
~AllocatedMetadata()145 AllocatedMetadata::~AllocatedMetadata() {
146 grpc_slice_unref_internal(key());
147 grpc_slice_unref_internal(value());
148 void* user_data = user_data_.data.Load(grpc_core::MemoryOrder::RELAXED);
149 if (user_data) {
150 destroy_user_data_func destroy_user_data =
151 user_data_.destroy_user_data.Load(grpc_core::MemoryOrder::RELAXED);
152 destroy_user_data(user_data);
153 }
154 }
155
156 #ifndef NDEBUG
TraceAtStart(const char * tag)157 void grpc_core::RefcountedMdBase::TraceAtStart(const char* tag) {
158 if (grpc_trace_metadata.enabled()) {
159 char* key_str = grpc_slice_to_c_string(key());
160 char* value_str = grpc_slice_to_c_string(value());
161 gpr_log(GPR_DEBUG, "mdelem %s:%p:%" PRIdPTR ": '%s' = '%s'", tag, this,
162 RefValue(), key_str, value_str);
163 gpr_free(key_str);
164 gpr_free(value_str);
165 }
166 }
167 #endif
168
InternedMetadata(const grpc_slice & key,const grpc_slice & value,uint32_t hash,InternedMetadata * next)169 InternedMetadata::InternedMetadata(const grpc_slice& key,
170 const grpc_slice& value, uint32_t hash,
171 InternedMetadata* next)
172 : RefcountedMdBase(grpc_slice_ref_internal(key),
173 grpc_slice_ref_internal(value), hash),
174 link_(next) {
175 #ifndef NDEBUG
176 TraceAtStart("INTERNED_MD");
177 #endif
178 }
179
InternedMetadata(const grpc_slice & key,const grpc_slice & value,uint32_t hash,InternedMetadata * next,const NoRefKey *)180 InternedMetadata::InternedMetadata(const grpc_slice& key,
181 const grpc_slice& value, uint32_t hash,
182 InternedMetadata* next, const NoRefKey*)
183 : RefcountedMdBase(key, grpc_slice_ref_internal(value), hash), link_(next) {
184 #ifndef NDEBUG
185 TraceAtStart("INTERNED_MD_NOREF_KEY");
186 #endif
187 }
188
~InternedMetadata()189 InternedMetadata::~InternedMetadata() {
190 grpc_slice_unref_internal(key());
191 grpc_slice_unref_internal(value());
192 void* user_data = user_data_.data.Load(grpc_core::MemoryOrder::RELAXED);
193 if (user_data) {
194 destroy_user_data_func destroy_user_data =
195 user_data_.destroy_user_data.Load(grpc_core::MemoryOrder::RELAXED);
196 destroy_user_data(user_data);
197 }
198 }
199
CleanupLinkedMetadata(InternedMetadata::BucketLink * head)200 size_t InternedMetadata::CleanupLinkedMetadata(
201 InternedMetadata::BucketLink* head) {
202 size_t num_freed = 0;
203 InternedMetadata::BucketLink* prev_next = head;
204 InternedMetadata *md, *next;
205
206 for (md = head->next; md; md = next) {
207 next = md->link_.next;
208 if (md->AllRefsDropped()) {
209 prev_next->next = next;
210 delete md;
211 num_freed++;
212 } else {
213 prev_next = &md->link_;
214 }
215 }
216 return num_freed;
217 }
218
219 typedef struct mdtab_shard {
220 gpr_mu mu;
221 InternedMetadata::BucketLink* elems;
222 size_t count;
223 size_t capacity;
224 /** Estimate of the number of unreferenced mdelems in the hash table.
225 This will eventually converge to the exact number, but it's instantaneous
226 accuracy is not guaranteed */
227 gpr_atm free_estimate;
228 } mdtab_shard;
229
230 static mdtab_shard g_shards[SHARD_COUNT];
231
232 static void gc_mdtab(mdtab_shard* shard);
233
grpc_mdctx_global_init(void)234 void grpc_mdctx_global_init(void) {
235 /* initialize shards */
236 for (size_t i = 0; i < SHARD_COUNT; i++) {
237 mdtab_shard* shard = &g_shards[i];
238 gpr_mu_init(&shard->mu);
239 shard->count = 0;
240 gpr_atm_no_barrier_store(&shard->free_estimate, 0);
241 shard->capacity = INITIAL_SHARD_CAPACITY;
242 shard->elems = static_cast<InternedMetadata::BucketLink*>(
243 gpr_zalloc(sizeof(*shard->elems) * shard->capacity));
244 }
245 }
246
grpc_mdctx_global_shutdown()247 void grpc_mdctx_global_shutdown() {
248 for (size_t i = 0; i < SHARD_COUNT; i++) {
249 mdtab_shard* shard = &g_shards[i];
250 gpr_mu_destroy(&shard->mu);
251 gc_mdtab(shard);
252 if (shard->count != 0) {
253 gpr_log(GPR_DEBUG, "WARNING: %" PRIuPTR " metadata elements were leaked",
254 shard->count);
255 if (grpc_iomgr_abort_on_leaks()) {
256 abort();
257 }
258 }
259 // For ASAN builds, we don't want to crash here, because that will
260 // prevent ASAN from providing leak detection information, which is
261 // far more useful than this simple assertion.
262 #ifndef GRPC_ASAN_ENABLED
263 GPR_DEBUG_ASSERT(shard->count == 0);
264 #endif
265 gpr_free(shard->elems);
266 }
267 }
268
269 #ifndef NDEBUG
is_mdelem_static(grpc_mdelem e)270 static int is_mdelem_static(grpc_mdelem e) {
271 return reinterpret_cast<grpc_core::StaticMetadata*>(GRPC_MDELEM_DATA(e)) >=
272 &grpc_static_mdelem_table()[0] &&
273 reinterpret_cast<grpc_core::StaticMetadata*>(GRPC_MDELEM_DATA(e)) <
274 &grpc_static_mdelem_table()[GRPC_STATIC_MDELEM_COUNT];
275 }
276 #endif
277
RefWithShardLocked(mdtab_shard * shard)278 void InternedMetadata::RefWithShardLocked(mdtab_shard* shard) {
279 #ifndef NDEBUG
280 if (grpc_trace_metadata.enabled()) {
281 char* key_str = grpc_slice_to_c_string(key());
282 char* value_str = grpc_slice_to_c_string(value());
283 intptr_t value = RefValue();
284 gpr_log(__FILE__, __LINE__, GPR_LOG_SEVERITY_DEBUG,
285 "mdelem REF:%p:%" PRIdPTR "->%" PRIdPTR ": '%s' = '%s'", this,
286 value, value + 1, key_str, value_str);
287 gpr_free(key_str);
288 gpr_free(value_str);
289 }
290 #endif
291 if (FirstRef()) {
292 gpr_atm_no_barrier_fetch_add(&shard->free_estimate, -1);
293 }
294 }
295
gc_mdtab(mdtab_shard * shard)296 static void gc_mdtab(mdtab_shard* shard) {
297 GPR_TIMER_SCOPE("gc_mdtab", 0);
298 size_t num_freed = 0;
299 for (size_t i = 0; i < shard->capacity; ++i) {
300 intptr_t freed = InternedMetadata::CleanupLinkedMetadata(&shard->elems[i]);
301 num_freed += freed;
302 shard->count -= freed;
303 }
304 gpr_atm_no_barrier_fetch_add(&shard->free_estimate,
305 -static_cast<intptr_t>(num_freed));
306 }
307
grow_mdtab(mdtab_shard * shard)308 static void grow_mdtab(mdtab_shard* shard) {
309 GPR_TIMER_SCOPE("grow_mdtab", 0);
310
311 size_t capacity = shard->capacity * 2;
312 size_t i;
313 InternedMetadata::BucketLink* mdtab;
314 InternedMetadata *md, *next;
315 uint32_t hash;
316
317 mdtab = static_cast<InternedMetadata::BucketLink*>(
318 gpr_zalloc(sizeof(InternedMetadata::BucketLink) * capacity));
319
320 for (i = 0; i < shard->capacity; i++) {
321 for (md = shard->elems[i].next; md; md = next) {
322 size_t idx;
323 hash = md->hash();
324 next = md->bucket_next();
325 idx = TABLE_IDX(hash, capacity);
326 md->set_bucket_next(mdtab[idx].next);
327 mdtab[idx].next = md;
328 }
329 }
330 gpr_free(shard->elems);
331 shard->elems = mdtab;
332 shard->capacity = capacity;
333 }
334
rehash_mdtab(mdtab_shard * shard)335 static void rehash_mdtab(mdtab_shard* shard) {
336 if (gpr_atm_no_barrier_load(&shard->free_estimate) >
337 static_cast<gpr_atm>(shard->capacity / 4)) {
338 gc_mdtab(shard);
339 } else {
340 grow_mdtab(shard);
341 }
342 }
343
344 template <bool key_definitely_static, bool value_definitely_static = false>
345 static grpc_mdelem md_create_maybe_static(const grpc_slice& key,
346 const grpc_slice& value);
347 template <bool key_definitely_static>
348 static grpc_mdelem md_create_must_intern(const grpc_slice& key,
349 const grpc_slice& value,
350 uint32_t hash);
351
352 template <bool key_definitely_static, bool value_definitely_static = false>
md_create(const grpc_slice & key,const grpc_slice & value,grpc_mdelem_data * compatible_external_backing_store)353 static grpc_mdelem md_create(
354 const grpc_slice& key, const grpc_slice& value,
355 grpc_mdelem_data* compatible_external_backing_store) {
356 // Ensure slices are, in fact, static if we claimed they were.
357 GPR_DEBUG_ASSERT(!key_definitely_static ||
358 GRPC_IS_STATIC_METADATA_STRING(key));
359 GPR_DEBUG_ASSERT(!value_definitely_static ||
360 GRPC_IS_STATIC_METADATA_STRING(value));
361 const bool key_is_interned =
362 key_definitely_static || grpc_slice_is_interned(key);
363 const bool value_is_interned =
364 value_definitely_static || grpc_slice_is_interned(value);
365 // External storage if either slice is not interned and the caller already
366 // created a backing store. If no backing store, we allocate one.
367 if (!key_is_interned || !value_is_interned) {
368 if (compatible_external_backing_store != nullptr) {
369 // Caller provided backing store.
370 return GRPC_MAKE_MDELEM(compatible_external_backing_store,
371 GRPC_MDELEM_STORAGE_EXTERNAL);
372 } else {
373 // We allocate backing store.
374 return key_definitely_static
375 ? GRPC_MAKE_MDELEM(
376 new AllocatedMetadata(
377 key, value,
378 static_cast<const AllocatedMetadata::NoRefKey*>(
379 nullptr)),
380 GRPC_MDELEM_STORAGE_ALLOCATED)
381 : GRPC_MAKE_MDELEM(new AllocatedMetadata(key, value),
382 GRPC_MDELEM_STORAGE_ALLOCATED);
383 }
384 }
385 return md_create_maybe_static<key_definitely_static, value_definitely_static>(
386 key, value);
387 }
388
389 template <bool key_definitely_static, bool value_definitely_static>
md_create_maybe_static(const grpc_slice & key,const grpc_slice & value)390 static grpc_mdelem md_create_maybe_static(const grpc_slice& key,
391 const grpc_slice& value) {
392 // Ensure slices are, in fact, static if we claimed they were.
393 GPR_DEBUG_ASSERT(!key_definitely_static ||
394 GRPC_IS_STATIC_METADATA_STRING(key));
395 GPR_DEBUG_ASSERT(!value_definitely_static ||
396 GRPC_IS_STATIC_METADATA_STRING(value));
397 GPR_DEBUG_ASSERT(key.refcount != nullptr);
398 GPR_DEBUG_ASSERT(value.refcount != nullptr);
399
400 const bool key_is_static_mdstr =
401 key_definitely_static ||
402 key.refcount->GetType() == grpc_slice_refcount::Type::STATIC;
403 const bool value_is_static_mdstr =
404 value_definitely_static ||
405 value.refcount->GetType() == grpc_slice_refcount::Type::STATIC;
406
407 const intptr_t kidx = GRPC_STATIC_METADATA_INDEX(key);
408
409 // Not all static slice input yields a statically stored metadata element.
410 if (key_is_static_mdstr && value_is_static_mdstr) {
411 grpc_mdelem static_elem = grpc_static_mdelem_for_static_strings(
412 kidx, GRPC_STATIC_METADATA_INDEX(value));
413 if (!GRPC_MDISNULL(static_elem)) {
414 return static_elem;
415 }
416 }
417
418 uint32_t khash = key_definitely_static
419 ? grpc_static_metadata_hash_values[kidx]
420 : grpc_slice_hash_refcounted(key);
421
422 uint32_t hash = GRPC_MDSTR_KV_HASH(khash, grpc_slice_hash_refcounted(value));
423 return md_create_must_intern<key_definitely_static>(key, value, hash);
424 }
425
426 template <bool key_definitely_static>
md_create_must_intern(const grpc_slice & key,const grpc_slice & value,uint32_t hash)427 static grpc_mdelem md_create_must_intern(const grpc_slice& key,
428 const grpc_slice& value,
429 uint32_t hash) {
430 // Here, we know both key and value are both at least interned, and both
431 // possibly static. We know that anything inside the shared interned table is
432 // also at least interned (and maybe static). Note that equality for a static
433 // and interned slice implies that they are both the same exact slice.
434 // The same applies to a pair of interned slices, or a pair of static slices.
435 // Rather than run the full equality check, we can therefore just do a pointer
436 // comparison of the refcounts.
437 InternedMetadata* md;
438 mdtab_shard* shard = &g_shards[SHARD_IDX(hash)];
439 size_t idx;
440
441 GPR_TIMER_SCOPE("grpc_mdelem_from_metadata_strings", 0);
442
443 gpr_mu_lock(&shard->mu);
444
445 idx = TABLE_IDX(hash, shard->capacity);
446 /* search for an existing pair */
447 for (md = shard->elems[idx].next; md; md = md->bucket_next()) {
448 if (grpc_slice_static_interned_equal(key, md->key()) &&
449 grpc_slice_static_interned_equal(value, md->value())) {
450 md->RefWithShardLocked(shard);
451 gpr_mu_unlock(&shard->mu);
452 return GRPC_MAKE_MDELEM(md, GRPC_MDELEM_STORAGE_INTERNED);
453 }
454 }
455
456 /* not found: create a new pair */
457 md = key_definitely_static
458 ? new InternedMetadata(
459 key, value, hash, shard->elems[idx].next,
460 static_cast<const InternedMetadata::NoRefKey*>(nullptr))
461 : new InternedMetadata(key, value, hash, shard->elems[idx].next);
462 shard->elems[idx].next = md;
463 shard->count++;
464
465 if (shard->count > shard->capacity * 2) {
466 rehash_mdtab(shard);
467 }
468
469 gpr_mu_unlock(&shard->mu);
470
471 return GRPC_MAKE_MDELEM(md, GRPC_MDELEM_STORAGE_INTERNED);
472 }
473
grpc_mdelem_create(const grpc_slice & key,const grpc_slice & value,grpc_mdelem_data * compatible_external_backing_store)474 grpc_mdelem grpc_mdelem_create(
475 const grpc_slice& key, const grpc_slice& value,
476 grpc_mdelem_data* compatible_external_backing_store) {
477 return md_create<false>(key, value, compatible_external_backing_store);
478 }
479
grpc_mdelem_create(const grpc_core::StaticMetadataSlice & key,const grpc_slice & value,grpc_mdelem_data * compatible_external_backing_store)480 grpc_mdelem grpc_mdelem_create(
481 const grpc_core::StaticMetadataSlice& key, const grpc_slice& value,
482 grpc_mdelem_data* compatible_external_backing_store) {
483 return md_create<true>(key, value, compatible_external_backing_store);
484 }
485
486 /* Create grpc_mdelem from provided slices. We specify via template parameter
487 whether we know that the input key is static or not. If it is, we short
488 circuit various comparisons and a no-op unref. */
489 template <bool key_definitely_static>
md_from_slices(const grpc_slice & key,const grpc_slice & value)490 static grpc_mdelem md_from_slices(const grpc_slice& key,
491 const grpc_slice& value) {
492 // Ensure key is, in fact, static if we claimed it was.
493 GPR_DEBUG_ASSERT(!key_definitely_static ||
494 GRPC_IS_STATIC_METADATA_STRING(key));
495 grpc_mdelem out = md_create<key_definitely_static>(key, value, nullptr);
496 if (!key_definitely_static) {
497 grpc_slice_unref_internal(key);
498 }
499 grpc_slice_unref_internal(value);
500 return out;
501 }
502
grpc_mdelem_from_slices(const grpc_slice & key,const grpc_slice & value)503 grpc_mdelem grpc_mdelem_from_slices(const grpc_slice& key,
504 const grpc_slice& value) {
505 return md_from_slices</*key_definitely_static=*/false>(key, value);
506 }
507
grpc_mdelem_from_slices(const grpc_core::StaticMetadataSlice & key,const grpc_slice & value)508 grpc_mdelem grpc_mdelem_from_slices(const grpc_core::StaticMetadataSlice& key,
509 const grpc_slice& value) {
510 return md_from_slices</*key_definitely_static=*/true>(key, value);
511 }
512
grpc_mdelem_from_slices(const grpc_core::StaticMetadataSlice & key,const grpc_core::StaticMetadataSlice & value)513 grpc_mdelem grpc_mdelem_from_slices(
514 const grpc_core::StaticMetadataSlice& key,
515 const grpc_core::StaticMetadataSlice& value) {
516 grpc_mdelem out = md_create_maybe_static<true, true>(key, value);
517 return out;
518 }
519
grpc_mdelem_from_slices(const grpc_core::StaticMetadataSlice & key,const grpc_core::ManagedMemorySlice & value)520 grpc_mdelem grpc_mdelem_from_slices(
521 const grpc_core::StaticMetadataSlice& key,
522 const grpc_core::ManagedMemorySlice& value) {
523 // TODO(arjunroy): We can save the unref if md_create_maybe_static ended up
524 // creating a new interned metadata. But otherwise - we need this here.
525 grpc_mdelem out = md_create_maybe_static<true>(key, value);
526 grpc_slice_unref_internal(value);
527 return out;
528 }
529
grpc_mdelem_from_slices(const grpc_core::ManagedMemorySlice & key,const grpc_core::ManagedMemorySlice & value)530 grpc_mdelem grpc_mdelem_from_slices(
531 const grpc_core::ManagedMemorySlice& key,
532 const grpc_core::ManagedMemorySlice& value) {
533 grpc_mdelem out = md_create_maybe_static<false>(key, value);
534 // TODO(arjunroy): We can save the unref if md_create_maybe_static ended up
535 // creating a new interned metadata. But otherwise - we need this here.
536 grpc_slice_unref_internal(key);
537 grpc_slice_unref_internal(value);
538 return out;
539 }
540
grpc_mdelem_from_grpc_metadata(grpc_metadata * metadata)541 grpc_mdelem grpc_mdelem_from_grpc_metadata(grpc_metadata* metadata) {
542 bool changed = false;
543 grpc_slice key_slice =
544 grpc_slice_maybe_static_intern(metadata->key, &changed);
545 grpc_slice value_slice =
546 grpc_slice_maybe_static_intern(metadata->value, &changed);
547 return grpc_mdelem_create(
548 key_slice, value_slice,
549 changed ? nullptr : reinterpret_cast<grpc_mdelem_data*>(metadata));
550 }
551
get_user_data(UserData * user_data,void (* destroy_func)(void *))552 static void* get_user_data(UserData* user_data, void (*destroy_func)(void*)) {
553 if (user_data->destroy_user_data.Load(grpc_core::MemoryOrder::ACQUIRE) ==
554 destroy_func) {
555 return user_data->data.Load(grpc_core::MemoryOrder::RELAXED);
556 } else {
557 return nullptr;
558 }
559 }
560
grpc_mdelem_get_user_data(grpc_mdelem md,void (* destroy_func)(void *))561 void* grpc_mdelem_get_user_data(grpc_mdelem md, void (*destroy_func)(void*)) {
562 switch (GRPC_MDELEM_STORAGE(md)) {
563 case GRPC_MDELEM_STORAGE_EXTERNAL:
564 return nullptr;
565 case GRPC_MDELEM_STORAGE_STATIC:
566 return reinterpret_cast<void*>(
567 grpc_static_mdelem_user_data
568 [reinterpret_cast<grpc_core::StaticMetadata*>(
569 GRPC_MDELEM_DATA(md)) -
570 grpc_static_mdelem_table()]);
571 case GRPC_MDELEM_STORAGE_ALLOCATED: {
572 auto* am = reinterpret_cast<AllocatedMetadata*>(GRPC_MDELEM_DATA(md));
573 return get_user_data(am->user_data(), destroy_func);
574 }
575 case GRPC_MDELEM_STORAGE_INTERNED: {
576 auto* im = reinterpret_cast<InternedMetadata*> GRPC_MDELEM_DATA(md);
577 return get_user_data(im->user_data(), destroy_func);
578 }
579 }
580 GPR_UNREACHABLE_CODE(return nullptr);
581 }
582
set_user_data(UserData * ud,void (* destroy_func)(void *),void * data)583 static void* set_user_data(UserData* ud, void (*destroy_func)(void*),
584 void* data) {
585 GPR_ASSERT((data == nullptr) == (destroy_func == nullptr));
586 grpc_core::ReleasableMutexLock lock(&ud->mu_user_data);
587 if (ud->destroy_user_data.Load(grpc_core::MemoryOrder::RELAXED)) {
588 /* user data can only be set once */
589 lock.Unlock();
590 if (destroy_func != nullptr) {
591 destroy_func(data);
592 }
593 return ud->data.Load(grpc_core::MemoryOrder::RELAXED);
594 }
595 ud->data.Store(data, grpc_core::MemoryOrder::RELAXED);
596 ud->destroy_user_data.Store(destroy_func, grpc_core::MemoryOrder::RELEASE);
597 return data;
598 }
599
grpc_mdelem_set_user_data(grpc_mdelem md,void (* destroy_func)(void *),void * data)600 void* grpc_mdelem_set_user_data(grpc_mdelem md, void (*destroy_func)(void*),
601 void* data) {
602 switch (GRPC_MDELEM_STORAGE(md)) {
603 case GRPC_MDELEM_STORAGE_EXTERNAL:
604 destroy_func(data);
605 return nullptr;
606 case GRPC_MDELEM_STORAGE_STATIC:
607 destroy_func(data);
608 return reinterpret_cast<void*>(
609 grpc_static_mdelem_user_data
610 [reinterpret_cast<grpc_core::StaticMetadata*>(
611 GRPC_MDELEM_DATA(md)) -
612 grpc_static_mdelem_table()]);
613 case GRPC_MDELEM_STORAGE_ALLOCATED: {
614 auto* am = reinterpret_cast<AllocatedMetadata*>(GRPC_MDELEM_DATA(md));
615 return set_user_data(am->user_data(), destroy_func, data);
616 }
617 case GRPC_MDELEM_STORAGE_INTERNED: {
618 auto* im = reinterpret_cast<InternedMetadata*> GRPC_MDELEM_DATA(md);
619 GPR_DEBUG_ASSERT(!is_mdelem_static(md));
620 return set_user_data(im->user_data(), destroy_func, data);
621 }
622 }
623 GPR_UNREACHABLE_CODE(return nullptr);
624 }
625
grpc_mdelem_eq(grpc_mdelem a,grpc_mdelem b)626 bool grpc_mdelem_eq(grpc_mdelem a, grpc_mdelem b) {
627 if (a.payload == b.payload) return true;
628 if (GRPC_MDELEM_IS_INTERNED(a) && GRPC_MDELEM_IS_INTERNED(b)) return false;
629 if (GRPC_MDISNULL(a) || GRPC_MDISNULL(b)) return false;
630 return grpc_slice_eq(GRPC_MDKEY(a), GRPC_MDKEY(b)) &&
631 grpc_slice_eq(GRPC_MDVALUE(a), GRPC_MDVALUE(b));
632 }
633
note_disposed_interned_metadata(uint32_t hash)634 static void note_disposed_interned_metadata(uint32_t hash) {
635 mdtab_shard* shard = &g_shards[SHARD_IDX(hash)];
636 gpr_atm_no_barrier_fetch_add(&shard->free_estimate, 1);
637 }
638
grpc_mdelem_do_unref(grpc_mdelem gmd DEBUG_ARGS)639 void grpc_mdelem_do_unref(grpc_mdelem gmd DEBUG_ARGS) {
640 switch (GRPC_MDELEM_STORAGE(gmd)) {
641 case GRPC_MDELEM_STORAGE_EXTERNAL:
642 case GRPC_MDELEM_STORAGE_STATIC:
643 return;
644 case GRPC_MDELEM_STORAGE_INTERNED: {
645 auto* md = reinterpret_cast<InternedMetadata*> GRPC_MDELEM_DATA(gmd);
646 uint32_t hash = md->hash();
647 if (GPR_UNLIKELY(md->Unref(FWD_DEBUG_ARGS))) {
648 /* once the refcount hits zero, some other thread can come along and
649 free md at any time: it's unsafe from this point on to access it */
650 note_disposed_interned_metadata(hash);
651 }
652 break;
653 }
654 case GRPC_MDELEM_STORAGE_ALLOCATED: {
655 auto* md = reinterpret_cast<AllocatedMetadata*> GRPC_MDELEM_DATA(gmd);
656 if (GPR_UNLIKELY(md->Unref(FWD_DEBUG_ARGS))) {
657 delete md;
658 }
659 break;
660 }
661 }
662 }
663
grpc_mdelem_on_final_unref(grpc_mdelem_data_storage storage,void * ptr,uint32_t hash DEBUG_ARGS)664 void grpc_mdelem_on_final_unref(grpc_mdelem_data_storage storage, void* ptr,
665 uint32_t hash DEBUG_ARGS) {
666 switch (storage) {
667 case GRPC_MDELEM_STORAGE_EXTERNAL:
668 case GRPC_MDELEM_STORAGE_STATIC:
669 return;
670 case GRPC_MDELEM_STORAGE_INTERNED: {
671 note_disposed_interned_metadata(hash);
672 break;
673 }
674 case GRPC_MDELEM_STORAGE_ALLOCATED: {
675 delete reinterpret_cast<AllocatedMetadata*>(ptr);
676 break;
677 }
678 }
679 }
680