• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2014 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "base/memory/discardable_shared_memory.h"
6 
7 #include <stdint.h>
8 
9 #include <algorithm>
10 
11 #include "base/atomicops.h"
12 #include "base/bits.h"
13 #include "base/feature_list.h"
14 #include "base/logging.h"
15 #include "base/memory/discardable_memory.h"
16 #include "base/memory/discardable_memory_internal.h"
17 #include "base/memory/page_size.h"
18 #include "base/memory/shared_memory_tracker.h"
19 #include "base/numerics/safe_math.h"
20 #include "base/tracing_buildflags.h"
21 #include "build/build_config.h"
22 
23 #if PA_BUILDFLAG(USE_PARTITION_ALLOC)
24 #include "partition_alloc/page_allocator.h"  // nogncheck
25 #endif
26 
27 #if BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_NACL)
28 // For madvise() which is available on all POSIX compatible systems.
29 #include <sys/mman.h>
30 #endif
31 
32 #if BUILDFLAG(IS_ANDROID)
33 #include "third_party/ashmem/ashmem.h"
34 #endif
35 
36 #if BUILDFLAG(IS_WIN)
37 #include <windows.h>
38 
39 #include "base/win/windows_version.h"
40 #endif
41 
42 #if BUILDFLAG(IS_FUCHSIA)
43 #include <lib/zx/vmar.h>
44 #include <zircon/types.h>
45 #include "base/fuchsia/fuchsia_logging.h"
46 #endif
47 
48 #if BUILDFLAG(ENABLE_BASE_TRACING)
49 #include "base/trace_event/memory_allocator_dump.h"  // no-presubmit-check
50 #include "base/trace_event/process_memory_dump.h"    // no-presubmit-check
51 #endif  // BUILDFLAG(ENABLE_BASE_TRACING)
52 
53 namespace base {
54 namespace {
55 
56 // Use a machine-sized pointer as atomic type. It will use the Atomic32 or
57 // Atomic64 routines, depending on the architecture.
58 typedef intptr_t AtomicType;
59 typedef uintptr_t UAtomicType;
60 
61 // Template specialization for timestamp serialization/deserialization. This
62 // is used to serialize timestamps using Unix time on systems where AtomicType
63 // does not have enough precision to contain a timestamp in the standard
64 // serialized format.
65 template <int>
66 Time TimeFromWireFormat(int64_t value);
67 template <int>
68 int64_t TimeToWireFormat(Time time);
69 
70 // Serialize to Unix time when using 4-byte wire format.
71 // Note: 19 January 2038, this will cease to work.
72 template <>
TimeFromWireFormat(int64_t value)73 [[maybe_unused]] Time TimeFromWireFormat<4>(int64_t value) {
74   return value ? Time::UnixEpoch() + Seconds(value) : Time();
75 }
76 template <>
TimeToWireFormat(Time time)77 [[maybe_unused]] int64_t TimeToWireFormat<4>(Time time) {
78   return time > Time::UnixEpoch() ? (time - Time::UnixEpoch()).InSeconds() : 0;
79 }
80 
81 // Standard serialization format when using 8-byte wire format.
82 template <>
TimeFromWireFormat(int64_t value)83 [[maybe_unused]] Time TimeFromWireFormat<8>(int64_t value) {
84   return Time::FromInternalValue(value);
85 }
86 template <>
TimeToWireFormat(Time time)87 [[maybe_unused]] int64_t TimeToWireFormat<8>(Time time) {
88   return time.ToInternalValue();
89 }
90 
91 struct SharedState {
92   enum LockState { UNLOCKED = 0, LOCKED = 1 };
93 
SharedStatebase::__anon55c6afcd0111::SharedState94   explicit SharedState(AtomicType ivalue) { value.i = ivalue; }
SharedStatebase::__anon55c6afcd0111::SharedState95   SharedState(LockState lock_state, Time timestamp) {
96     int64_t wire_timestamp = TimeToWireFormat<sizeof(AtomicType)>(timestamp);
97     DCHECK_GE(wire_timestamp, 0);
98     DCHECK_EQ(lock_state & ~1, 0);
99     value.u = (static_cast<UAtomicType>(wire_timestamp) << 1) | lock_state;
100   }
101 
GetLockStatebase::__anon55c6afcd0111::SharedState102   LockState GetLockState() const { return static_cast<LockState>(value.u & 1); }
103 
GetTimestampbase::__anon55c6afcd0111::SharedState104   Time GetTimestamp() const {
105     return TimeFromWireFormat<sizeof(AtomicType)>(value.u >> 1);
106   }
107 
108   // Bit 1: Lock state. Bit is set when locked.
109   // Bit 2..sizeof(AtomicType)*8: Usage timestamp. NULL time when locked or
110   // purged.
111   union {
112     AtomicType i;
113     UAtomicType u;
114   } value;
115 };
116 
117 // Shared state is stored at offset 0 in shared memory segments.
SharedStateFromSharedMemory(const WritableSharedMemoryMapping & shared_memory)118 SharedState* SharedStateFromSharedMemory(
119     const WritableSharedMemoryMapping& shared_memory) {
120   DCHECK(shared_memory.IsValid());
121   return shared_memory.GetMemoryAs<SharedState>();
122 }
123 
124 // Round up |size| to a multiple of page size.
AlignToPageSize(size_t size)125 size_t AlignToPageSize(size_t size) {
126   return bits::AlignUp(size, base::GetPageSize());
127 }
128 
129 #if BUILDFLAG(IS_ANDROID)
UseAshmemUnpinningForDiscardableMemory()130 bool UseAshmemUnpinningForDiscardableMemory() {
131   if (!ashmem_device_is_supported())
132     return false;
133 
134   // If we are participating in the discardable memory backing trial, only
135   // enable ashmem unpinning when we are in the corresponding trial group.
136   if (base::DiscardableMemoryBackingFieldTrialIsEnabled()) {
137     return base::GetDiscardableMemoryBackingFieldTrialGroup() ==
138            base::DiscardableMemoryTrialGroup::kAshmem;
139   }
140   return true;
141 }
142 #endif  // BUILDFLAG(IS_ANDROID)
143 
144 }  // namespace
145 
146 DiscardableSharedMemory::DiscardableSharedMemory() = default;
147 
DiscardableSharedMemory(UnsafeSharedMemoryRegion shared_memory_region)148 DiscardableSharedMemory::DiscardableSharedMemory(
149     UnsafeSharedMemoryRegion shared_memory_region)
150     : shared_memory_region_(std::move(shared_memory_region)) {}
151 
152 DiscardableSharedMemory::~DiscardableSharedMemory() = default;
153 
CreateAndMap(size_t size)154 bool DiscardableSharedMemory::CreateAndMap(size_t size) {
155   CheckedNumeric<size_t> checked_size = size;
156   checked_size += AlignToPageSize(sizeof(SharedState));
157   if (!checked_size.IsValid())
158     return false;
159 
160   shared_memory_region_ =
161       UnsafeSharedMemoryRegion::Create(checked_size.ValueOrDie());
162 
163   if (!shared_memory_region_.IsValid())
164     return false;
165 
166   shared_memory_mapping_ = shared_memory_region_.Map();
167   if (!shared_memory_mapping_.IsValid())
168     return false;
169 
170   locked_page_count_ =
171       AlignToPageSize(mapped_memory().size()) / base::GetPageSize();
172 #if DCHECK_IS_ON()
173   for (size_t page = 0; page < locked_page_count_; ++page)
174     locked_pages_.insert(page);
175 #endif
176 
177   DCHECK(last_known_usage_.is_null());
178   SharedState new_state(SharedState::LOCKED, Time());
179   subtle::Release_Store(
180       &SharedStateFromSharedMemory(shared_memory_mapping_)->value.i,
181       new_state.value.i);
182   return true;
183 }
184 
Map(size_t size)185 bool DiscardableSharedMemory::Map(size_t size) {
186   DCHECK(!shared_memory_mapping_.IsValid());
187   if (shared_memory_mapping_.IsValid())
188     return false;
189 
190   shared_memory_mapping_ = shared_memory_region_.MapAt(
191       0, AlignToPageSize(sizeof(SharedState)) + size);
192   if (!shared_memory_mapping_.IsValid())
193     return false;
194 
195   locked_page_count_ =
196       AlignToPageSize(mapped_memory().size()) / base::GetPageSize();
197 #if DCHECK_IS_ON()
198   for (size_t page = 0; page < locked_page_count_; ++page)
199     locked_pages_.insert(page);
200 #endif
201 
202   return true;
203 }
204 
Unmap()205 bool DiscardableSharedMemory::Unmap() {
206   if (!shared_memory_mapping_.IsValid())
207     return false;
208 
209   shared_memory_mapping_ = WritableSharedMemoryMapping();
210   locked_page_count_ = 0;
211 #if DCHECK_IS_ON()
212   locked_pages_.clear();
213 #endif
214   return true;
215 }
216 
Lock(size_t offset,size_t length)217 DiscardableSharedMemory::LockResult DiscardableSharedMemory::Lock(
218     size_t offset, size_t length) {
219   DCHECK_EQ(AlignToPageSize(offset), offset);
220   DCHECK_EQ(AlignToPageSize(length), length);
221 
222   // Calls to this function must be synchronized properly.
223   DFAKE_SCOPED_LOCK(thread_collision_warner_);
224 
225   DCHECK(shared_memory_mapping_.IsValid());
226 
227   // We need to successfully acquire the platform independent lock before
228   // individual pages can be locked.
229   if (!locked_page_count_) {
230     // Return false when instance has been purged or not initialized properly
231     // by checking if |last_known_usage_| is NULL.
232     if (last_known_usage_.is_null())
233       return FAILED;
234 
235     SharedState old_state(SharedState::UNLOCKED, last_known_usage_);
236     SharedState new_state(SharedState::LOCKED, Time());
237     SharedState result(subtle::Acquire_CompareAndSwap(
238         &SharedStateFromSharedMemory(shared_memory_mapping_)->value.i,
239         old_state.value.i, new_state.value.i));
240     if (result.value.u != old_state.value.u) {
241       // Update |last_known_usage_| in case the above CAS failed because of
242       // an incorrect timestamp.
243       last_known_usage_ = result.GetTimestamp();
244       return FAILED;
245     }
246   }
247 
248   // Zero for length means "everything onward".
249   if (!length)
250     length = AlignToPageSize(mapped_memory().size()) - offset;
251 
252   size_t start = offset / base::GetPageSize();
253   size_t end = start + length / base::GetPageSize();
254   DCHECK_LE(start, end);
255   DCHECK_LE(end, AlignToPageSize(mapped_memory().size()) / base::GetPageSize());
256 
257   // Add pages to |locked_page_count_|.
258   // Note: Locking a page that is already locked is an error.
259   locked_page_count_ += end - start;
260 #if DCHECK_IS_ON()
261   // Detect incorrect usage by keeping track of exactly what pages are locked.
262   for (auto page = start; page < end; ++page) {
263     auto result = locked_pages_.insert(page);
264     DCHECK(result.second);
265   }
266   DCHECK_EQ(locked_pages_.size(), locked_page_count_);
267 #endif
268 
269   // Always behave as if memory was purged when trying to lock a 0 byte segment.
270   if (!length)
271       return PURGED;
272 
273 #if BUILDFLAG(IS_ANDROID)
274   // Ensure that the platform won't discard the required pages.
275   return LockPages(shared_memory_region_,
276                    AlignToPageSize(sizeof(SharedState)) + offset, length);
277 #elif BUILDFLAG(IS_APPLE)
278   // On macOS, there is no mechanism to lock pages. However, we do need to call
279   // madvise(MADV_FREE_REUSE) in order to correctly update accounting for memory
280   // footprint via task_info().
281   //
282   // Note that calling madvise(MADV_FREE_REUSE) on regions that haven't had
283   // madvise(MADV_FREE_REUSABLE) called on them has no effect.
284   //
285   // Note that the corresponding call to MADV_FREE_REUSABLE is in Purge(), since
286   // that's where the memory is actually released, rather than Unlock(), which
287   // is a no-op on macOS.
288   //
289   // For more information, see
290   // https://bugs.chromium.org/p/chromium/issues/detail?id=823915.
291   base::span<uint8_t> mapped = mapped_memory();
292   madvise(mapped.data(), AlignToPageSize(mapped.size()), MADV_FREE_REUSE);
293   return DiscardableSharedMemory::SUCCESS;
294 #else
295   return DiscardableSharedMemory::SUCCESS;
296 #endif
297 }
298 
Unlock(size_t offset,size_t length)299 void DiscardableSharedMemory::Unlock(size_t offset, size_t length) {
300   DCHECK_EQ(AlignToPageSize(offset), offset);
301   DCHECK_EQ(AlignToPageSize(length), length);
302 
303   // Calls to this function must be synchronized properly.
304   DFAKE_SCOPED_LOCK(thread_collision_warner_);
305 
306   // Passing zero for |length| means "everything onward". Note that |length| may
307   // still be zero after this calculation, e.g. if the mapped size is zero.
308   if (!length)
309     length = AlignToPageSize(mapped_memory().size()) - offset;
310 
311   DCHECK(shared_memory_mapping_.IsValid());
312 
313   // Allow the pages to be discarded by the platform, if supported.
314   UnlockPages(shared_memory_region_,
315               AlignToPageSize(sizeof(SharedState)) + offset, length);
316 
317   size_t start = offset / base::GetPageSize();
318   size_t end = start + length / base::GetPageSize();
319   DCHECK_LE(start, end);
320   DCHECK_LE(end, AlignToPageSize(mapped_memory().size()) / base::GetPageSize());
321 
322   // Remove pages from |locked_page_count_|.
323   // Note: Unlocking a page that is not locked is an error.
324   DCHECK_GE(locked_page_count_, end - start);
325   locked_page_count_ -= end - start;
326 #if DCHECK_IS_ON()
327   // Detect incorrect usage by keeping track of exactly what pages are locked.
328   for (auto page = start; page < end; ++page) {
329     auto erased_count = locked_pages_.erase(page);
330     DCHECK_EQ(1u, erased_count);
331   }
332   DCHECK_EQ(locked_pages_.size(), locked_page_count_);
333 #endif
334 
335   // Early out and avoid releasing the platform independent lock if some pages
336   // are still locked.
337   if (locked_page_count_)
338     return;
339 
340   Time current_time = Now();
341   DCHECK(!current_time.is_null());
342 
343   SharedState old_state(SharedState::LOCKED, Time());
344   SharedState new_state(SharedState::UNLOCKED, current_time);
345   // Note: timestamp cannot be NULL as that is a unique value used when
346   // locked or purged.
347   DCHECK(!new_state.GetTimestamp().is_null());
348   // Timestamp precision should at least be accurate to the second.
349   DCHECK_EQ((new_state.GetTimestamp() - Time::UnixEpoch()).InSeconds(),
350             (current_time - Time::UnixEpoch()).InSeconds());
351   SharedState result(subtle::Release_CompareAndSwap(
352       &SharedStateFromSharedMemory(shared_memory_mapping_)->value.i,
353       old_state.value.i, new_state.value.i));
354 
355   DCHECK_EQ(old_state.value.u, result.value.u);
356 
357   last_known_usage_ = current_time;
358 }
359 
memory() const360 span<uint8_t> DiscardableSharedMemory::memory() const {
361   return shared_memory_mapping_.GetMemoryAsSpan<uint8_t>().subspan(
362       AlignToPageSize(sizeof(SharedState)));
363 }
364 
mapped_memory() const365 span<uint8_t> DiscardableSharedMemory::mapped_memory() const {
366   return shared_memory_mapping_.mapped_memory().subspan(
367       AlignToPageSize(sizeof(SharedState)));
368 }
369 
Purge(Time current_time)370 bool DiscardableSharedMemory::Purge(Time current_time) {
371   // Calls to this function must be synchronized properly.
372   DFAKE_SCOPED_LOCK(thread_collision_warner_);
373   DCHECK(shared_memory_mapping_.IsValid());
374 
375   SharedState old_state(SharedState::UNLOCKED, last_known_usage_);
376   SharedState new_state(SharedState::UNLOCKED, Time());
377   SharedState result(subtle::Acquire_CompareAndSwap(
378       &SharedStateFromSharedMemory(shared_memory_mapping_)->value.i,
379       old_state.value.i, new_state.value.i));
380 
381   // Update |last_known_usage_| to |current_time| if the memory is locked. This
382   // allows the caller to determine if purging failed because last known usage
383   // was incorrect or memory was locked. In the second case, the caller should
384   // most likely wait for some amount of time before attempting to purge the
385   // the memory again.
386   if (result.value.u != old_state.value.u) {
387     last_known_usage_ = result.GetLockState() == SharedState::LOCKED
388                             ? current_time
389                             : result.GetTimestamp();
390     return false;
391   }
392 
393 // The next section will release as much resource as can be done
394 // from the purging process, until the client process notices the
395 // purge and releases its own references.
396 // Note: this memory will not be accessed again.  The segment will be
397 // freed asynchronously at a later time, so just do the best
398 // immediately.
399 #if BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_NACL)
400 // Linux and Android provide MADV_REMOVE which is preferred as it has a
401 // behavior that can be verified in tests. Other POSIX flavors (MacOSX, BSDs),
402 // provide MADV_FREE which has the same result but memory is purged lazily.
403 #if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID)
404 #define MADV_PURGE_ARGUMENT MADV_REMOVE
405 #elif BUILDFLAG(IS_APPLE)
406 // MADV_FREE_REUSABLE is similar to MADV_FREE, but also marks the pages with the
407 // reusable bit, which allows both Activity Monitor and memory-infra to
408 // correctly track the pages.
409 #define MADV_PURGE_ARGUMENT MADV_FREE_REUSABLE
410 #else
411 #define MADV_PURGE_ARGUMENT MADV_FREE
412 #endif
413   // Advise the kernel to remove resources associated with purged pages.
414   // Subsequent accesses of memory pages will succeed, but might result in
415   // zero-fill-on-demand pages.
416   base::span<uint8_t> map = mapped_memory();
417   if (madvise(map.data(), AlignToPageSize(map.size()), MADV_PURGE_ARGUMENT)) {
418     DPLOG(ERROR) << "madvise() failed";
419   }
420 #elif BUILDFLAG(IS_WIN)
421   // On Windows, discarded pages are not returned to the system immediately and
422   // not guaranteed to be zeroed when returned to the application.
423   base::span<uint8_t> mapped = mapped_memory();
424   uint8_t* address = mapped.data();
425   size_t length = AlignToPageSize(mapped.size());
426 
427   DWORD ret = DiscardVirtualMemory(address, length);
428   // DiscardVirtualMemory is buggy in Win10 SP0, so fall back to MEM_RESET on
429   // failure.
430   if (ret != ERROR_SUCCESS) {
431     void* ptr = VirtualAlloc(address, length, MEM_RESET, PAGE_READWRITE);
432     CHECK(ptr);
433   }
434 #elif BUILDFLAG(IS_FUCHSIA)
435   // De-commit via our VMAR, rather than relying on the VMO handle, since the
436   // handle may have been closed after the memory was mapped into this process.
437   base::span<uint8_t> mapped = mapped_memory();
438   uint64_t address_int = reinterpret_cast<uint64_t>(mapped.data());
439   zx_status_t status = zx::vmar::root_self()->op_range(
440       ZX_VMO_OP_DECOMMIT, address_int, AlignToPageSize(mapped.size()), nullptr,
441       0);
442   ZX_DCHECK(status == ZX_OK, status) << "zx_vmo_op_range(ZX_VMO_OP_DECOMMIT)";
443 #endif  // BUILDFLAG(IS_FUCHSIA)
444 
445   last_known_usage_ = Time();
446   return true;
447 }
448 
IsMemoryResident() const449 bool DiscardableSharedMemory::IsMemoryResident() const {
450   DCHECK(shared_memory_mapping_.IsValid());
451 
452   SharedState result(subtle::NoBarrier_Load(
453       &SharedStateFromSharedMemory(shared_memory_mapping_)->value.i));
454 
455   return result.GetLockState() == SharedState::LOCKED ||
456          !result.GetTimestamp().is_null();
457 }
458 
IsMemoryLocked() const459 bool DiscardableSharedMemory::IsMemoryLocked() const {
460   DCHECK(shared_memory_mapping_.IsValid());
461 
462   SharedState result(subtle::NoBarrier_Load(
463       &SharedStateFromSharedMemory(shared_memory_mapping_)->value.i));
464 
465   return result.GetLockState() == SharedState::LOCKED;
466 }
467 
Close()468 void DiscardableSharedMemory::Close() {
469   shared_memory_region_ = UnsafeSharedMemoryRegion();
470 }
471 
CreateSharedMemoryOwnershipEdge(trace_event::MemoryAllocatorDump * local_segment_dump,trace_event::ProcessMemoryDump * pmd,bool is_owned) const472 void DiscardableSharedMemory::CreateSharedMemoryOwnershipEdge(
473     trace_event::MemoryAllocatorDump* local_segment_dump,
474     trace_event::ProcessMemoryDump* pmd,
475     bool is_owned) const {
476 // Memory dumps are only supported when tracing support is enabled,.
477 #if BUILDFLAG(ENABLE_BASE_TRACING)
478   auto* shared_memory_dump = SharedMemoryTracker::GetOrCreateSharedMemoryDump(
479       shared_memory_mapping_, pmd);
480   // TODO(ssid): Clean this by a new api to inherit size of parent dump once the
481   // we send the full PMD and calculate sizes inside chrome, crbug.com/704203.
482   uint64_t resident_size = shared_memory_dump->GetSizeInternal();
483   local_segment_dump->AddScalar(trace_event::MemoryAllocatorDump::kNameSize,
484                                 trace_event::MemoryAllocatorDump::kUnitsBytes,
485                                 resident_size);
486 
487   // By creating an edge with a higher |importance| (w.r.t non-owned dumps)
488   // the tracing UI will account the effective size of the segment to the
489   // client instead of manager.
490   // TODO(ssid): Define better constants in MemoryAllocatorDump for importance
491   // values, crbug.com/754793.
492   const int kImportance = is_owned ? 2 : 0;
493   auto shared_memory_guid = shared_memory_mapping_.guid();
494   local_segment_dump->AddString("id", "hash", shared_memory_guid.ToString());
495 
496   // Owned discardable segments which are allocated by client process, could
497   // have been cleared by the discardable manager. So, the segment need not
498   // exist in memory and weak dumps are created to indicate the UI that the dump
499   // should exist only if the manager also created the global dump edge.
500   if (is_owned) {
501     pmd->CreateWeakSharedMemoryOwnershipEdge(local_segment_dump->guid(),
502                                              shared_memory_guid, kImportance);
503   } else {
504     pmd->CreateSharedMemoryOwnershipEdge(local_segment_dump->guid(),
505                                          shared_memory_guid, kImportance);
506   }
507 #endif  // BUILDFLAG(ENABLE_BASE_TRACING)
508 }
509 
510 // static
LockPages(const UnsafeSharedMemoryRegion & region,size_t offset,size_t length)511 DiscardableSharedMemory::LockResult DiscardableSharedMemory::LockPages(
512     const UnsafeSharedMemoryRegion& region,
513     size_t offset,
514     size_t length) {
515 #if BUILDFLAG(IS_ANDROID)
516   if (region.IsValid()) {
517     if (UseAshmemUnpinningForDiscardableMemory()) {
518       int pin_result =
519           ashmem_pin_region(region.GetPlatformHandle(), offset, length);
520       if (pin_result == ASHMEM_WAS_PURGED)
521         return PURGED;
522       if (pin_result < 0)
523         return FAILED;
524     }
525   }
526 #endif
527   return SUCCESS;
528 }
529 
530 // static
UnlockPages(const UnsafeSharedMemoryRegion & region,size_t offset,size_t length)531 void DiscardableSharedMemory::UnlockPages(
532     const UnsafeSharedMemoryRegion& region,
533     size_t offset,
534     size_t length) {
535 #if BUILDFLAG(IS_ANDROID)
536   if (region.IsValid()) {
537     if (UseAshmemUnpinningForDiscardableMemory()) {
538       int unpin_result =
539           ashmem_unpin_region(region.GetPlatformHandle(), offset, length);
540       DCHECK_EQ(0, unpin_result);
541     }
542   }
543 #endif
544 }
545 
Now() const546 Time DiscardableSharedMemory::Now() const {
547   return Time::Now();
548 }
549 
550 #if BUILDFLAG(IS_ANDROID)
551 // static
IsAshmemDeviceSupportedForTesting()552 bool DiscardableSharedMemory::IsAshmemDeviceSupportedForTesting() {
553   return UseAshmemUnpinningForDiscardableMemory();
554 }
555 #endif
556 
557 }  // namespace base
558