1 // Copyright 2021 The Abseil Authors.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14
15 #ifndef ABSL_STRINGS_INTERNAL_CORD_INTERNAL_H_
16 #define ABSL_STRINGS_INTERNAL_CORD_INTERNAL_H_
17
18 #include <atomic>
19 #include <cassert>
20 #include <cstddef>
21 #include <cstdint>
22 #include <type_traits>
23
24 #include "absl/base/config.h"
25 #include "absl/base/internal/endian.h"
26 #include "absl/base/internal/invoke.h"
27 #include "absl/base/optimization.h"
28 #include "absl/container/internal/compressed_tuple.h"
29 #include "absl/meta/type_traits.h"
30 #include "absl/strings/string_view.h"
31
32 namespace absl {
33 ABSL_NAMESPACE_BEGIN
34 namespace cord_internal {
35
36 class CordzInfo;
37
38 // Default feature enable states for cord ring buffers
39 enum CordFeatureDefaults {
40 kCordEnableBtreeDefault = false,
41 kCordEnableRingBufferDefault = false,
42 kCordShallowSubcordsDefault = false
43 };
44
45 extern std::atomic<bool> cord_btree_enabled;
46 extern std::atomic<bool> cord_ring_buffer_enabled;
47 extern std::atomic<bool> shallow_subcords_enabled;
48
49 // `cord_btree_exhaustive_validation` can be set to force exhaustive validation
50 // in debug assertions, and code that calls `IsValid()` explicitly. By default,
51 // assertions should be relatively cheap and AssertValid() can easily lead to
52 // O(n^2) complexity as recursive / full tree validation is O(n).
53 extern std::atomic<bool> cord_btree_exhaustive_validation;
54
enable_cord_btree(bool enable)55 inline void enable_cord_btree(bool enable) {
56 cord_btree_enabled.store(enable, std::memory_order_relaxed);
57 }
58
enable_cord_ring_buffer(bool enable)59 inline void enable_cord_ring_buffer(bool enable) {
60 cord_ring_buffer_enabled.store(enable, std::memory_order_relaxed);
61 }
62
enable_shallow_subcords(bool enable)63 inline void enable_shallow_subcords(bool enable) {
64 shallow_subcords_enabled.store(enable, std::memory_order_relaxed);
65 }
66
67 enum Constants {
68 // The inlined size to use with absl::InlinedVector.
69 //
70 // Note: The InlinedVectors in this file (and in cord.h) do not need to use
71 // the same value for their inlined size. The fact that they do is historical.
72 // It may be desirable for each to use a different inlined size optimized for
73 // that InlinedVector's usage.
74 //
75 // TODO(jgm): Benchmark to see if there's a more optimal value than 47 for
76 // the inlined vector size (47 exists for backward compatibility).
77 kInlinedVectorSize = 47,
78
79 // Prefer copying blocks of at most this size, otherwise reference count.
80 kMaxBytesToCopy = 511
81 };
82
83 // Compact class for tracking the reference count and state flags for CordRep
84 // instances. Data is stored in an atomic int32_t for compactness and speed.
85 class RefcountAndFlags {
86 public:
RefcountAndFlags()87 constexpr RefcountAndFlags() : count_{kRefIncrement} {}
88 struct Immortal {};
RefcountAndFlags(Immortal)89 explicit constexpr RefcountAndFlags(Immortal) : count_(kImmortalFlag) {}
90
91 // Increments the reference count. Imposes no memory ordering.
Increment()92 inline void Increment() {
93 count_.fetch_add(kRefIncrement, std::memory_order_relaxed);
94 }
95
96 // Asserts that the current refcount is greater than 0. If the refcount is
97 // greater than 1, decrements the reference count.
98 //
99 // Returns false if there are no references outstanding; true otherwise.
100 // Inserts barriers to ensure that state written before this method returns
101 // false will be visible to a thread that just observed this method returning
102 // false. Always returns false when the immortal bit is set.
Decrement()103 inline bool Decrement() {
104 int32_t refcount = count_.load(std::memory_order_acquire) & kRefcountMask;
105 assert(refcount > 0 || refcount & kImmortalFlag);
106 return refcount != kRefIncrement &&
107 (count_.fetch_sub(kRefIncrement, std::memory_order_acq_rel) &
108 kRefcountMask) != kRefIncrement;
109 }
110
111 // Same as Decrement but expect that refcount is greater than 1.
DecrementExpectHighRefcount()112 inline bool DecrementExpectHighRefcount() {
113 int32_t refcount =
114 count_.fetch_sub(kRefIncrement, std::memory_order_acq_rel) &
115 kRefcountMask;
116 assert(refcount > 0 || refcount & kImmortalFlag);
117 return refcount != kRefIncrement;
118 }
119
120 // Returns the current reference count using acquire semantics.
Get()121 inline int32_t Get() const {
122 return count_.load(std::memory_order_acquire) >> kNumFlags;
123 }
124
125 // Returns whether the atomic integer is 1.
126 // If the reference count is used in the conventional way, a
127 // reference count of 1 implies that the current thread owns the
128 // reference and no other thread shares it.
129 // This call performs the test for a reference count of one, and
130 // performs the memory barrier needed for the owning thread
131 // to act on the object, knowing that it has exclusive access to the
132 // object. Always returns false when the immortal bit is set.
IsOne()133 inline bool IsOne() {
134 return (count_.load(std::memory_order_acquire) & kRefcountMask) ==
135 kRefIncrement;
136 }
137
IsImmortal()138 bool IsImmortal() const {
139 return (count_.load(std::memory_order_relaxed) & kImmortalFlag) != 0;
140 }
141
142 private:
143 // We reserve the bottom bits for flags.
144 // kImmortalBit indicates that this entity should never be collected; it is
145 // used for the StringConstant constructor to avoid collecting immutable
146 // constant cords.
147 // kReservedFlag is reserved for future use.
148 enum {
149 kNumFlags = 2,
150
151 kImmortalFlag = 0x1,
152 kReservedFlag = 0x2,
153 kRefIncrement = (1 << kNumFlags),
154
155 // Bitmask to use when checking refcount by equality. This masks out
156 // all flags except kImmortalFlag, which is part of the refcount for
157 // purposes of equality. (A refcount of 0 or 1 does not count as 0 or 1
158 // if the immortal bit is set.)
159 kRefcountMask = ~kReservedFlag,
160 };
161
162 std::atomic<int32_t> count_;
163 };
164
165 // The overhead of a vtable is too much for Cord, so we roll our own subclasses
166 // using only a single byte to differentiate classes from each other - the "tag"
167 // byte. Define the subclasses first so we can provide downcasting helper
168 // functions in the base class.
169
170 struct CordRepConcat;
171 struct CordRepExternal;
172 struct CordRepFlat;
173 struct CordRepSubstring;
174 class CordRepRing;
175 class CordRepBtree;
176
177 // Various representations that we allow
178 enum CordRepKind {
179 CONCAT = 0,
180 SUBSTRING = 1,
181 BTREE = 2,
182 RING = 3,
183 EXTERNAL = 4,
184
185 // We have different tags for different sized flat arrays,
186 // starting with FLAT, and limited to MAX_FLAT_TAG. The 225 value is based on
187 // the current 'size to tag' encoding of 8 / 32 bytes. If a new tag is needed
188 // in the future, then 'FLAT' and 'MAX_FLAT_TAG' should be adjusted as well
189 // as the Tag <---> Size logic so that FLAT stil represents the minimum flat
190 // allocation size. (32 bytes as of now).
191 FLAT = 5,
192 MAX_FLAT_TAG = 225
193 };
194
195 // There are various locations where we want to check if some rep is a 'plain'
196 // data edge, i.e. an external or flat rep. By having FLAT == EXTERNAL + 1, we
197 // can perform this check in a single branch as 'tag >= EXTERNAL'
198 // Likewise, we have some locations where we check for 'ring or external/flat',
199 // so likewise align RING to EXTERNAL.
200 // Note that we can leave this optimization to the compiler. The compiler will
201 // DTRT when it sees a condition like `tag == EXTERNAL || tag >= FLAT`.
202 static_assert(RING == BTREE + 1, "BTREE and RING not consecutive");
203 static_assert(EXTERNAL == RING + 1, "BTREE and EXTERNAL not consecutive");
204 static_assert(FLAT == EXTERNAL + 1, "EXTERNAL and FLAT not consecutive");
205
206 struct CordRep {
207 CordRep() = default;
CordRepCordRep208 constexpr CordRep(RefcountAndFlags::Immortal immortal, size_t l)
209 : length(l), refcount(immortal), tag(EXTERNAL), storage{} {}
210
211 // The following three fields have to be less than 32 bytes since
212 // that is the smallest supported flat node size.
213 size_t length;
214 RefcountAndFlags refcount;
215 // If tag < FLAT, it represents CordRepKind and indicates the type of node.
216 // Otherwise, the node type is CordRepFlat and the tag is the encoded size.
217 uint8_t tag;
218
219 // `storage` provides two main purposes:
220 // - the starting point for FlatCordRep.Data() [flexible-array-member]
221 // - 3 bytes of additional storage for use by derived classes.
222 // The latter is used by CordrepConcat and CordRepBtree. CordRepConcat stores
223 // a 'depth' value in storage[0], and the (future) CordRepBtree class stores
224 // `height`, `begin` and `end` in the 3 entries. Otherwise we would need to
225 // allocate room for these in the derived class, as not all compilers reuse
226 // padding space from the base class (clang and gcc do, MSVC does not, etc)
227 uint8_t storage[3];
228
229 // Returns true if this instance's tag matches the requested type.
IsRingCordRep230 constexpr bool IsRing() const { return tag == RING; }
IsConcatCordRep231 constexpr bool IsConcat() const { return tag == CONCAT; }
IsSubstringCordRep232 constexpr bool IsSubstring() const { return tag == SUBSTRING; }
IsExternalCordRep233 constexpr bool IsExternal() const { return tag == EXTERNAL; }
IsFlatCordRep234 constexpr bool IsFlat() const { return tag >= FLAT; }
IsBtreeCordRep235 constexpr bool IsBtree() const { return tag == BTREE; }
236
237 inline CordRepRing* ring();
238 inline const CordRepRing* ring() const;
239 inline CordRepConcat* concat();
240 inline const CordRepConcat* concat() const;
241 inline CordRepSubstring* substring();
242 inline const CordRepSubstring* substring() const;
243 inline CordRepExternal* external();
244 inline const CordRepExternal* external() const;
245 inline CordRepFlat* flat();
246 inline const CordRepFlat* flat() const;
247 inline CordRepBtree* btree();
248 inline const CordRepBtree* btree() const;
249
250 // --------------------------------------------------------------------
251 // Memory management
252
253 // Destroys the provided `rep`.
254 static void Destroy(CordRep* rep);
255
256 // Increments the reference count of `rep`.
257 // Requires `rep` to be a non-null pointer value.
258 static inline CordRep* Ref(CordRep* rep);
259
260 // Decrements the reference count of `rep`. Destroys rep if count reaches
261 // zero. Requires `rep` to be a non-null pointer value.
262 static inline void Unref(CordRep* rep);
263 };
264
265 struct CordRepConcat : public CordRep {
266 CordRep* left;
267 CordRep* right;
268
depthCordRepConcat269 uint8_t depth() const { return storage[0]; }
set_depthCordRepConcat270 void set_depth(uint8_t depth) { storage[0] = depth; }
271 };
272
273 struct CordRepSubstring : public CordRep {
274 size_t start; // Starting offset of substring in child
275 CordRep* child;
276 };
277
278 // Type for function pointer that will invoke the releaser function and also
279 // delete the `CordRepExternalImpl` corresponding to the passed in
280 // `CordRepExternal`.
281 using ExternalReleaserInvoker = void (*)(CordRepExternal*);
282
283 // External CordReps are allocated together with a type erased releaser. The
284 // releaser is stored in the memory directly following the CordRepExternal.
285 struct CordRepExternal : public CordRep {
286 CordRepExternal() = default;
CordRepExternalCordRepExternal287 explicit constexpr CordRepExternal(absl::string_view str)
288 : CordRep(RefcountAndFlags::Immortal{}, str.size()),
289 base(str.data()),
290 releaser_invoker(nullptr) {}
291
292 const char* base;
293 // Pointer to function that knows how to call and destroy the releaser.
294 ExternalReleaserInvoker releaser_invoker;
295
296 // Deletes (releases) the external rep.
297 // Requires rep != nullptr and rep->IsExternal()
298 static void Delete(CordRep* rep);
299 };
300
301 struct Rank1 {};
302 struct Rank0 : Rank1 {};
303
304 template <typename Releaser, typename = ::absl::base_internal::invoke_result_t<
305 Releaser, absl::string_view>>
InvokeReleaser(Rank0,Releaser && releaser,absl::string_view data)306 void InvokeReleaser(Rank0, Releaser&& releaser, absl::string_view data) {
307 ::absl::base_internal::invoke(std::forward<Releaser>(releaser), data);
308 }
309
310 template <typename Releaser,
311 typename = ::absl::base_internal::invoke_result_t<Releaser>>
InvokeReleaser(Rank1,Releaser && releaser,absl::string_view)312 void InvokeReleaser(Rank1, Releaser&& releaser, absl::string_view) {
313 ::absl::base_internal::invoke(std::forward<Releaser>(releaser));
314 }
315
316 // We use CompressedTuple so that we can benefit from EBCO.
317 template <typename Releaser>
318 struct CordRepExternalImpl
319 : public CordRepExternal,
320 public ::absl::container_internal::CompressedTuple<Releaser> {
321 // The extra int arg is so that we can avoid interfering with copy/move
322 // constructors while still benefitting from perfect forwarding.
323 template <typename T>
CordRepExternalImplCordRepExternalImpl324 CordRepExternalImpl(T&& releaser, int)
325 : CordRepExternalImpl::CompressedTuple(std::forward<T>(releaser)) {
326 this->releaser_invoker = &Release;
327 }
328
~CordRepExternalImplCordRepExternalImpl329 ~CordRepExternalImpl() {
330 InvokeReleaser(Rank0{}, std::move(this->template get<0>()),
331 absl::string_view(base, length));
332 }
333
ReleaseCordRepExternalImpl334 static void Release(CordRepExternal* rep) {
335 delete static_cast<CordRepExternalImpl*>(rep);
336 }
337 };
338
Delete(CordRep * rep)339 inline void CordRepExternal::Delete(CordRep* rep) {
340 assert(rep != nullptr && rep->IsExternal());
341 auto* rep_external = static_cast<CordRepExternal*>(rep);
342 assert(rep_external->releaser_invoker != nullptr);
343 rep_external->releaser_invoker(rep_external);
344 }
345
346 template <typename Str>
347 struct ConstInitExternalStorage {
348 ABSL_CONST_INIT static CordRepExternal value;
349 };
350
351 template <typename Str>
352 CordRepExternal ConstInitExternalStorage<Str>::value(Str::value);
353
354 enum {
355 kMaxInline = 15,
356 };
357
GetOrNull(absl::string_view data,size_t pos)358 constexpr char GetOrNull(absl::string_view data, size_t pos) {
359 return pos < data.size() ? data[pos] : '\0';
360 }
361
362 // We store cordz_info as 64 bit pointer value in big endian format. This
363 // guarantees that the least significant byte of cordz_info matches the last
364 // byte of the inline data representation in as_chars_, which holds the inlined
365 // size or the 'is_tree' bit.
366 using cordz_info_t = int64_t;
367
368 // Assert that the `cordz_info` pointer value perfectly overlaps the last half
369 // of `as_chars_` and can hold a pointer value.
370 static_assert(sizeof(cordz_info_t) * 2 == kMaxInline + 1, "");
371 static_assert(sizeof(cordz_info_t) >= sizeof(intptr_t), "");
372
373 // BigEndianByte() creates a big endian representation of 'value', i.e.: a big
374 // endian value where the last byte in the host's representation holds 'value`,
375 // with all other bytes being 0.
BigEndianByte(unsigned char value)376 static constexpr cordz_info_t BigEndianByte(unsigned char value) {
377 #if defined(ABSL_IS_BIG_ENDIAN)
378 return value;
379 #else
380 return static_cast<cordz_info_t>(value) << ((sizeof(cordz_info_t) - 1) * 8);
381 #endif
382 }
383
384 class InlineData {
385 public:
386 // DefaultInitType forces the use of the default initialization constructor.
387 enum DefaultInitType { kDefaultInit };
388
389 // kNullCordzInfo holds the big endian representation of intptr_t(1)
390 // This is the 'null' / initial value of 'cordz_info'. The null value
391 // is specifically big endian 1 as with 64-bit pointers, the last
392 // byte of cordz_info overlaps with the last byte holding the tag.
393 static constexpr cordz_info_t kNullCordzInfo = BigEndianByte(1);
394
InlineData()395 constexpr InlineData() : as_chars_{0} {}
InlineData(DefaultInitType)396 explicit InlineData(DefaultInitType) {}
InlineData(CordRep * rep)397 explicit constexpr InlineData(CordRep* rep) : as_tree_(rep) {}
InlineData(absl::string_view chars)398 explicit constexpr InlineData(absl::string_view chars)
399 : as_chars_{
400 GetOrNull(chars, 0), GetOrNull(chars, 1),
401 GetOrNull(chars, 2), GetOrNull(chars, 3),
402 GetOrNull(chars, 4), GetOrNull(chars, 5),
403 GetOrNull(chars, 6), GetOrNull(chars, 7),
404 GetOrNull(chars, 8), GetOrNull(chars, 9),
405 GetOrNull(chars, 10), GetOrNull(chars, 11),
406 GetOrNull(chars, 12), GetOrNull(chars, 13),
407 GetOrNull(chars, 14), static_cast<char>((chars.size() << 1))} {}
408
409 // Returns true if the current instance is empty.
410 // The 'empty value' is an inlined data value of zero length.
is_empty()411 bool is_empty() const { return tag() == 0; }
412
413 // Returns true if the current instance holds a tree value.
is_tree()414 bool is_tree() const { return (tag() & 1) != 0; }
415
416 // Returns true if the current instance holds a cordz_info value.
417 // Requires the current instance to hold a tree value.
is_profiled()418 bool is_profiled() const {
419 assert(is_tree());
420 return as_tree_.cordz_info != kNullCordzInfo;
421 }
422
423 // Returns true if either of the provided instances hold a cordz_info value.
424 // This method is more efficient than the equivalent `data1.is_profiled() ||
425 // data2.is_profiled()`. Requires both arguments to hold a tree.
is_either_profiled(const InlineData & data1,const InlineData & data2)426 static bool is_either_profiled(const InlineData& data1,
427 const InlineData& data2) {
428 assert(data1.is_tree() && data2.is_tree());
429 return (data1.as_tree_.cordz_info | data2.as_tree_.cordz_info) !=
430 kNullCordzInfo;
431 }
432
433 // Returns the cordz_info sampling instance for this instance, or nullptr
434 // if the current instance is not sampled and does not have CordzInfo data.
435 // Requires the current instance to hold a tree value.
cordz_info()436 CordzInfo* cordz_info() const {
437 assert(is_tree());
438 intptr_t info =
439 static_cast<intptr_t>(absl::big_endian::ToHost64(as_tree_.cordz_info));
440 assert(info & 1);
441 return reinterpret_cast<CordzInfo*>(info - 1);
442 }
443
444 // Sets the current cordz_info sampling instance for this instance, or nullptr
445 // if the current instance is not sampled and does not have CordzInfo data.
446 // Requires the current instance to hold a tree value.
set_cordz_info(CordzInfo * cordz_info)447 void set_cordz_info(CordzInfo* cordz_info) {
448 assert(is_tree());
449 intptr_t info = reinterpret_cast<intptr_t>(cordz_info) | 1;
450 as_tree_.cordz_info = absl::big_endian::FromHost64(info);
451 }
452
453 // Resets the current cordz_info to null / empty.
clear_cordz_info()454 void clear_cordz_info() {
455 assert(is_tree());
456 as_tree_.cordz_info = kNullCordzInfo;
457 }
458
459 // Returns a read only pointer to the character data inside this instance.
460 // Requires the current instance to hold inline data.
as_chars()461 const char* as_chars() const {
462 assert(!is_tree());
463 return as_chars_;
464 }
465
466 // Returns a mutable pointer to the character data inside this instance.
467 // Should be used for 'write only' operations setting an inlined value.
468 // Applications can set the value of inlined data either before or after
469 // setting the inlined size, i.e., both of the below are valid:
470 //
471 // // Set inlined data and inline size
472 // memcpy(data_.as_chars(), data, size);
473 // data_.set_inline_size(size);
474 //
475 // // Set inlined size and inline data
476 // data_.set_inline_size(size);
477 // memcpy(data_.as_chars(), data, size);
478 //
479 // It's an error to read from the returned pointer without a preceding write
480 // if the current instance does not hold inline data, i.e.: is_tree() == true.
as_chars()481 char* as_chars() { return as_chars_; }
482
483 // Returns the tree value of this value.
484 // Requires the current instance to hold a tree value.
as_tree()485 CordRep* as_tree() const {
486 assert(is_tree());
487 return as_tree_.rep;
488 }
489
490 // Initialize this instance to holding the tree value `rep`,
491 // initializing the cordz_info to null, i.e.: 'not profiled'.
make_tree(CordRep * rep)492 void make_tree(CordRep* rep) {
493 as_tree_.rep = rep;
494 as_tree_.cordz_info = kNullCordzInfo;
495 }
496
497 // Set the tree value of this instance to 'rep`.
498 // Requires the current instance to already hold a tree value.
499 // Does not affect the value of cordz_info.
set_tree(CordRep * rep)500 void set_tree(CordRep* rep) {
501 assert(is_tree());
502 as_tree_.rep = rep;
503 }
504
505 // Returns the size of the inlined character data inside this instance.
506 // Requires the current instance to hold inline data.
inline_size()507 size_t inline_size() const {
508 assert(!is_tree());
509 return tag() >> 1;
510 }
511
512 // Sets the size of the inlined character data inside this instance.
513 // Requires `size` to be <= kMaxInline.
514 // See the documentation on 'as_chars()' for more information and examples.
set_inline_size(size_t size)515 void set_inline_size(size_t size) {
516 ABSL_ASSERT(size <= kMaxInline);
517 tag() = static_cast<char>(size << 1);
518 }
519
520 private:
521 // See cordz_info_t for forced alignment and size of `cordz_info` details.
522 struct AsTree {
AsTreeAsTree523 explicit constexpr AsTree(absl::cord_internal::CordRep* tree)
524 : rep(tree), cordz_info(kNullCordzInfo) {}
525 // This union uses up extra space so that whether rep is 32 or 64 bits,
526 // cordz_info will still start at the eighth byte, and the last
527 // byte of cordz_info will still be the last byte of InlineData.
528 union {
529 absl::cord_internal::CordRep* rep;
530 cordz_info_t unused_aligner;
531 };
532 cordz_info_t cordz_info;
533 };
534
tag()535 char& tag() { return reinterpret_cast<char*>(this)[kMaxInline]; }
tag()536 char tag() const { return reinterpret_cast<const char*>(this)[kMaxInline]; }
537
538 // If the data has length <= kMaxInline, we store it in `as_chars_`, and
539 // store the size in the last char of `as_chars_` shifted left + 1.
540 // Else we store it in a tree and store a pointer to that tree in
541 // `as_tree_.rep` and store a tag in `tagged_size`.
542 union {
543 char as_chars_[kMaxInline + 1];
544 AsTree as_tree_;
545 };
546 };
547
548 static_assert(sizeof(InlineData) == kMaxInline + 1, "");
549
concat()550 inline CordRepConcat* CordRep::concat() {
551 assert(IsConcat());
552 return static_cast<CordRepConcat*>(this);
553 }
554
concat()555 inline const CordRepConcat* CordRep::concat() const {
556 assert(IsConcat());
557 return static_cast<const CordRepConcat*>(this);
558 }
559
substring()560 inline CordRepSubstring* CordRep::substring() {
561 assert(IsSubstring());
562 return static_cast<CordRepSubstring*>(this);
563 }
564
substring()565 inline const CordRepSubstring* CordRep::substring() const {
566 assert(IsSubstring());
567 return static_cast<const CordRepSubstring*>(this);
568 }
569
external()570 inline CordRepExternal* CordRep::external() {
571 assert(IsExternal());
572 return static_cast<CordRepExternal*>(this);
573 }
574
external()575 inline const CordRepExternal* CordRep::external() const {
576 assert(IsExternal());
577 return static_cast<const CordRepExternal*>(this);
578 }
579
Ref(CordRep * rep)580 inline CordRep* CordRep::Ref(CordRep* rep) {
581 assert(rep != nullptr);
582 rep->refcount.Increment();
583 return rep;
584 }
585
Unref(CordRep * rep)586 inline void CordRep::Unref(CordRep* rep) {
587 assert(rep != nullptr);
588 // Expect refcount to be 0. Avoiding the cost of an atomic decrement should
589 // typically outweigh the cost of an extra branch checking for ref == 1.
590 if (ABSL_PREDICT_FALSE(!rep->refcount.DecrementExpectHighRefcount())) {
591 Destroy(rep);
592 }
593 }
594
595 } // namespace cord_internal
596
597 ABSL_NAMESPACE_END
598 } // namespace absl
599 #endif // ABSL_STRINGS_INTERNAL_CORD_INTERNAL_H_
600