• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2014 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "base/memory/discardable_shared_memory.h"
6 
7 #include <stdint.h>
8 
9 #include <algorithm>
10 
11 #include "base/allocator/partition_allocator/src/partition_alloc/page_allocator.h"
12 #include "base/atomicops.h"
13 #include "base/bits.h"
14 #include "base/feature_list.h"
15 #include "base/logging.h"
16 #include "base/memory/discardable_memory.h"
17 #include "base/memory/discardable_memory_internal.h"
18 #include "base/memory/page_size.h"
19 #include "base/memory/shared_memory_tracker.h"
20 #include "base/numerics/safe_math.h"
21 #include "base/tracing_buildflags.h"
22 #include "build/build_config.h"
23 
24 #if BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_NACL)
25 // For madvise() which is available on all POSIX compatible systems.
26 #include <sys/mman.h>
27 #endif
28 
29 #if BUILDFLAG(IS_ANDROID)
30 #include "third_party/ashmem/ashmem.h"
31 #endif
32 
33 #if BUILDFLAG(IS_WIN)
34 #include <windows.h>
35 #include "base/win/windows_version.h"
36 #endif
37 
38 #if BUILDFLAG(IS_FUCHSIA)
39 #include <lib/zx/vmar.h>
40 #include <zircon/types.h>
41 #include "base/fuchsia/fuchsia_logging.h"
42 #endif
43 
44 #if BUILDFLAG(ENABLE_BASE_TRACING)
45 #include "base/trace_event/memory_allocator_dump.h"  // no-presubmit-check
46 #include "base/trace_event/process_memory_dump.h"    // no-presubmit-check
47 #endif  // BUILDFLAG(ENABLE_BASE_TRACING)
48 
49 namespace base {
50 namespace {
51 
52 // Use a machine-sized pointer as atomic type. It will use the Atomic32 or
53 // Atomic64 routines, depending on the architecture.
54 typedef intptr_t AtomicType;
55 typedef uintptr_t UAtomicType;
56 
57 // Template specialization for timestamp serialization/deserialization. This
58 // is used to serialize timestamps using Unix time on systems where AtomicType
59 // does not have enough precision to contain a timestamp in the standard
60 // serialized format.
61 template <int>
62 Time TimeFromWireFormat(int64_t value);
63 template <int>
64 int64_t TimeToWireFormat(Time time);
65 
66 // Serialize to Unix time when using 4-byte wire format.
67 // Note: 19 January 2038, this will cease to work.
68 template <>
TimeFromWireFormat(int64_t value)69 [[maybe_unused]] Time TimeFromWireFormat<4>(int64_t value) {
70   return value ? Time::UnixEpoch() + Seconds(value) : Time();
71 }
72 template <>
TimeToWireFormat(Time time)73 [[maybe_unused]] int64_t TimeToWireFormat<4>(Time time) {
74   return time > Time::UnixEpoch() ? (time - Time::UnixEpoch()).InSeconds() : 0;
75 }
76 
77 // Standard serialization format when using 8-byte wire format.
78 template <>
TimeFromWireFormat(int64_t value)79 [[maybe_unused]] Time TimeFromWireFormat<8>(int64_t value) {
80   return Time::FromInternalValue(value);
81 }
82 template <>
TimeToWireFormat(Time time)83 [[maybe_unused]] int64_t TimeToWireFormat<8>(Time time) {
84   return time.ToInternalValue();
85 }
86 
87 struct SharedState {
88   enum LockState { UNLOCKED = 0, LOCKED = 1 };
89 
SharedStatebase::__anonaa0ebb810111::SharedState90   explicit SharedState(AtomicType ivalue) { value.i = ivalue; }
SharedStatebase::__anonaa0ebb810111::SharedState91   SharedState(LockState lock_state, Time timestamp) {
92     int64_t wire_timestamp = TimeToWireFormat<sizeof(AtomicType)>(timestamp);
93     DCHECK_GE(wire_timestamp, 0);
94     DCHECK_EQ(lock_state & ~1, 0);
95     value.u = (static_cast<UAtomicType>(wire_timestamp) << 1) | lock_state;
96   }
97 
GetLockStatebase::__anonaa0ebb810111::SharedState98   LockState GetLockState() const { return static_cast<LockState>(value.u & 1); }
99 
GetTimestampbase::__anonaa0ebb810111::SharedState100   Time GetTimestamp() const {
101     return TimeFromWireFormat<sizeof(AtomicType)>(value.u >> 1);
102   }
103 
104   // Bit 1: Lock state. Bit is set when locked.
105   // Bit 2..sizeof(AtomicType)*8: Usage timestamp. NULL time when locked or
106   // purged.
107   union {
108     AtomicType i;
109     UAtomicType u;
110   } value;
111 };
112 
113 // Shared state is stored at offset 0 in shared memory segments.
SharedStateFromSharedMemory(const WritableSharedMemoryMapping & shared_memory)114 SharedState* SharedStateFromSharedMemory(
115     const WritableSharedMemoryMapping& shared_memory) {
116   DCHECK(shared_memory.IsValid());
117   return static_cast<SharedState*>(shared_memory.memory());
118 }
119 
120 // Round up |size| to a multiple of page size.
AlignToPageSize(size_t size)121 size_t AlignToPageSize(size_t size) {
122   return bits::AlignUp(size, base::GetPageSize());
123 }
124 
125 #if BUILDFLAG(IS_ANDROID)
UseAshmemUnpinningForDiscardableMemory()126 bool UseAshmemUnpinningForDiscardableMemory() {
127   if (!ashmem_device_is_supported())
128     return false;
129 
130   // If we are participating in the discardable memory backing trial, only
131   // enable ashmem unpinning when we are in the corresponding trial group.
132   if (base::DiscardableMemoryBackingFieldTrialIsEnabled()) {
133     return base::GetDiscardableMemoryBackingFieldTrialGroup() ==
134            base::DiscardableMemoryTrialGroup::kAshmem;
135   }
136   return true;
137 }
138 #endif  // BUILDFLAG(IS_ANDROID)
139 
140 }  // namespace
141 
DiscardableSharedMemory()142 DiscardableSharedMemory::DiscardableSharedMemory()
143     : mapped_size_(0), locked_page_count_(0) {
144 }
145 
DiscardableSharedMemory(UnsafeSharedMemoryRegion shared_memory_region)146 DiscardableSharedMemory::DiscardableSharedMemory(
147     UnsafeSharedMemoryRegion shared_memory_region)
148     : shared_memory_region_(std::move(shared_memory_region)),
149       mapped_size_(0),
150       locked_page_count_(0) {}
151 
152 DiscardableSharedMemory::~DiscardableSharedMemory() = default;
153 
CreateAndMap(size_t size)154 bool DiscardableSharedMemory::CreateAndMap(size_t size) {
155   CheckedNumeric<size_t> checked_size = size;
156   checked_size += AlignToPageSize(sizeof(SharedState));
157   if (!checked_size.IsValid())
158     return false;
159 
160   shared_memory_region_ =
161       UnsafeSharedMemoryRegion::Create(checked_size.ValueOrDie());
162 
163   if (!shared_memory_region_.IsValid())
164     return false;
165 
166   shared_memory_mapping_ = shared_memory_region_.Map();
167   if (!shared_memory_mapping_.IsValid())
168     return false;
169 
170   mapped_size_ = shared_memory_mapping_.mapped_size() -
171                  AlignToPageSize(sizeof(SharedState));
172 
173   locked_page_count_ = AlignToPageSize(mapped_size_) / base::GetPageSize();
174 #if DCHECK_IS_ON()
175   for (size_t page = 0; page < locked_page_count_; ++page)
176     locked_pages_.insert(page);
177 #endif
178 
179   DCHECK(last_known_usage_.is_null());
180   SharedState new_state(SharedState::LOCKED, Time());
181   subtle::Release_Store(
182       &SharedStateFromSharedMemory(shared_memory_mapping_)->value.i,
183       new_state.value.i);
184   return true;
185 }
186 
Map(size_t size)187 bool DiscardableSharedMemory::Map(size_t size) {
188   DCHECK(!shared_memory_mapping_.IsValid());
189   if (shared_memory_mapping_.IsValid())
190     return false;
191 
192   shared_memory_mapping_ = shared_memory_region_.MapAt(
193       0, AlignToPageSize(sizeof(SharedState)) + size);
194   if (!shared_memory_mapping_.IsValid())
195     return false;
196 
197   mapped_size_ = shared_memory_mapping_.mapped_size() -
198                  AlignToPageSize(sizeof(SharedState));
199 
200   locked_page_count_ = AlignToPageSize(mapped_size_) / base::GetPageSize();
201 #if DCHECK_IS_ON()
202   for (size_t page = 0; page < locked_page_count_; ++page)
203     locked_pages_.insert(page);
204 #endif
205 
206   return true;
207 }
208 
Unmap()209 bool DiscardableSharedMemory::Unmap() {
210   if (!shared_memory_mapping_.IsValid())
211     return false;
212 
213   shared_memory_mapping_ = WritableSharedMemoryMapping();
214   locked_page_count_ = 0;
215 #if DCHECK_IS_ON()
216   locked_pages_.clear();
217 #endif
218   mapped_size_ = 0;
219   return true;
220 }
221 
Lock(size_t offset,size_t length)222 DiscardableSharedMemory::LockResult DiscardableSharedMemory::Lock(
223     size_t offset, size_t length) {
224   DCHECK_EQ(AlignToPageSize(offset), offset);
225   DCHECK_EQ(AlignToPageSize(length), length);
226 
227   // Calls to this function must be synchronized properly.
228   DFAKE_SCOPED_LOCK(thread_collision_warner_);
229 
230   DCHECK(shared_memory_mapping_.IsValid());
231 
232   // We need to successfully acquire the platform independent lock before
233   // individual pages can be locked.
234   if (!locked_page_count_) {
235     // Return false when instance has been purged or not initialized properly
236     // by checking if |last_known_usage_| is NULL.
237     if (last_known_usage_.is_null())
238       return FAILED;
239 
240     SharedState old_state(SharedState::UNLOCKED, last_known_usage_);
241     SharedState new_state(SharedState::LOCKED, Time());
242     SharedState result(subtle::Acquire_CompareAndSwap(
243         &SharedStateFromSharedMemory(shared_memory_mapping_)->value.i,
244         old_state.value.i, new_state.value.i));
245     if (result.value.u != old_state.value.u) {
246       // Update |last_known_usage_| in case the above CAS failed because of
247       // an incorrect timestamp.
248       last_known_usage_ = result.GetTimestamp();
249       return FAILED;
250     }
251   }
252 
253   // Zero for length means "everything onward".
254   if (!length)
255     length = AlignToPageSize(mapped_size_) - offset;
256 
257   size_t start = offset / base::GetPageSize();
258   size_t end = start + length / base::GetPageSize();
259   DCHECK_LE(start, end);
260   DCHECK_LE(end, AlignToPageSize(mapped_size_) / base::GetPageSize());
261 
262   // Add pages to |locked_page_count_|.
263   // Note: Locking a page that is already locked is an error.
264   locked_page_count_ += end - start;
265 #if DCHECK_IS_ON()
266   // Detect incorrect usage by keeping track of exactly what pages are locked.
267   for (auto page = start; page < end; ++page) {
268     auto result = locked_pages_.insert(page);
269     DCHECK(result.second);
270   }
271   DCHECK_EQ(locked_pages_.size(), locked_page_count_);
272 #endif
273 
274   // Always behave as if memory was purged when trying to lock a 0 byte segment.
275   if (!length)
276       return PURGED;
277 
278 #if BUILDFLAG(IS_ANDROID)
279   // Ensure that the platform won't discard the required pages.
280   return LockPages(shared_memory_region_,
281                    AlignToPageSize(sizeof(SharedState)) + offset, length);
282 #elif BUILDFLAG(IS_APPLE)
283   // On macOS, there is no mechanism to lock pages. However, we do need to call
284   // madvise(MADV_FREE_REUSE) in order to correctly update accounting for memory
285   // footprint via task_info().
286   //
287   // Note that calling madvise(MADV_FREE_REUSE) on regions that haven't had
288   // madvise(MADV_FREE_REUSABLE) called on them has no effect.
289   //
290   // Note that the corresponding call to MADV_FREE_REUSABLE is in Purge(), since
291   // that's where the memory is actually released, rather than Unlock(), which
292   // is a no-op on macOS.
293   //
294   // For more information, see
295   // https://bugs.chromium.org/p/chromium/issues/detail?id=823915.
296   madvise(static_cast<char*>(shared_memory_mapping_.memory()) +
297               AlignToPageSize(sizeof(SharedState)),
298           AlignToPageSize(mapped_size_), MADV_FREE_REUSE);
299   return DiscardableSharedMemory::SUCCESS;
300 #else
301   return DiscardableSharedMemory::SUCCESS;
302 #endif
303 }
304 
Unlock(size_t offset,size_t length)305 void DiscardableSharedMemory::Unlock(size_t offset, size_t length) {
306   DCHECK_EQ(AlignToPageSize(offset), offset);
307   DCHECK_EQ(AlignToPageSize(length), length);
308 
309   // Calls to this function must be synchronized properly.
310   DFAKE_SCOPED_LOCK(thread_collision_warner_);
311 
312   // Passing zero for |length| means "everything onward". Note that |length| may
313   // still be zero after this calculation, e.g. if |mapped_size_| is zero.
314   if (!length)
315     length = AlignToPageSize(mapped_size_) - offset;
316 
317   DCHECK(shared_memory_mapping_.IsValid());
318 
319   // Allow the pages to be discarded by the platform, if supported.
320   UnlockPages(shared_memory_region_,
321               AlignToPageSize(sizeof(SharedState)) + offset, length);
322 
323   size_t start = offset / base::GetPageSize();
324   size_t end = start + length / base::GetPageSize();
325   DCHECK_LE(start, end);
326   DCHECK_LE(end, AlignToPageSize(mapped_size_) / base::GetPageSize());
327 
328   // Remove pages from |locked_page_count_|.
329   // Note: Unlocking a page that is not locked is an error.
330   DCHECK_GE(locked_page_count_, end - start);
331   locked_page_count_ -= end - start;
332 #if DCHECK_IS_ON()
333   // Detect incorrect usage by keeping track of exactly what pages are locked.
334   for (auto page = start; page < end; ++page) {
335     auto erased_count = locked_pages_.erase(page);
336     DCHECK_EQ(1u, erased_count);
337   }
338   DCHECK_EQ(locked_pages_.size(), locked_page_count_);
339 #endif
340 
341   // Early out and avoid releasing the platform independent lock if some pages
342   // are still locked.
343   if (locked_page_count_)
344     return;
345 
346   Time current_time = Now();
347   DCHECK(!current_time.is_null());
348 
349   SharedState old_state(SharedState::LOCKED, Time());
350   SharedState new_state(SharedState::UNLOCKED, current_time);
351   // Note: timestamp cannot be NULL as that is a unique value used when
352   // locked or purged.
353   DCHECK(!new_state.GetTimestamp().is_null());
354   // Timestamp precision should at least be accurate to the second.
355   DCHECK_EQ((new_state.GetTimestamp() - Time::UnixEpoch()).InSeconds(),
356             (current_time - Time::UnixEpoch()).InSeconds());
357   SharedState result(subtle::Release_CompareAndSwap(
358       &SharedStateFromSharedMemory(shared_memory_mapping_)->value.i,
359       old_state.value.i, new_state.value.i));
360 
361   DCHECK_EQ(old_state.value.u, result.value.u);
362 
363   last_known_usage_ = current_time;
364 }
365 
memory() const366 void* DiscardableSharedMemory::memory() const {
367   return static_cast<uint8_t*>(shared_memory_mapping_.memory()) +
368          AlignToPageSize(sizeof(SharedState));
369 }
370 
Purge(Time current_time)371 bool DiscardableSharedMemory::Purge(Time current_time) {
372   // Calls to this function must be synchronized properly.
373   DFAKE_SCOPED_LOCK(thread_collision_warner_);
374   DCHECK(shared_memory_mapping_.IsValid());
375 
376   SharedState old_state(SharedState::UNLOCKED, last_known_usage_);
377   SharedState new_state(SharedState::UNLOCKED, Time());
378   SharedState result(subtle::Acquire_CompareAndSwap(
379       &SharedStateFromSharedMemory(shared_memory_mapping_)->value.i,
380       old_state.value.i, new_state.value.i));
381 
382   // Update |last_known_usage_| to |current_time| if the memory is locked. This
383   // allows the caller to determine if purging failed because last known usage
384   // was incorrect or memory was locked. In the second case, the caller should
385   // most likely wait for some amount of time before attempting to purge the
386   // the memory again.
387   if (result.value.u != old_state.value.u) {
388     last_known_usage_ = result.GetLockState() == SharedState::LOCKED
389                             ? current_time
390                             : result.GetTimestamp();
391     return false;
392   }
393 
394 // The next section will release as much resource as can be done
395 // from the purging process, until the client process notices the
396 // purge and releases its own references.
397 // Note: this memory will not be accessed again.  The segment will be
398 // freed asynchronously at a later time, so just do the best
399 // immediately.
400 #if BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_NACL)
401 // Linux and Android provide MADV_REMOVE which is preferred as it has a
402 // behavior that can be verified in tests. Other POSIX flavors (MacOSX, BSDs),
403 // provide MADV_FREE which has the same result but memory is purged lazily.
404 #if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID)
405 #define MADV_PURGE_ARGUMENT MADV_REMOVE
406 #elif BUILDFLAG(IS_APPLE)
407 // MADV_FREE_REUSABLE is similar to MADV_FREE, but also marks the pages with the
408 // reusable bit, which allows both Activity Monitor and memory-infra to
409 // correctly track the pages.
410 #define MADV_PURGE_ARGUMENT MADV_FREE_REUSABLE
411 #else
412 #define MADV_PURGE_ARGUMENT MADV_FREE
413 #endif
414   // Advise the kernel to remove resources associated with purged pages.
415   // Subsequent accesses of memory pages will succeed, but might result in
416   // zero-fill-on-demand pages.
417   if (madvise(static_cast<char*>(shared_memory_mapping_.memory()) +
418                   AlignToPageSize(sizeof(SharedState)),
419               AlignToPageSize(mapped_size_), MADV_PURGE_ARGUMENT)) {
420     DPLOG(ERROR) << "madvise() failed";
421   }
422 #elif BUILDFLAG(IS_WIN)
423   // On Windows, discarded pages are not returned to the system immediately and
424   // not guaranteed to be zeroed when returned to the application.
425   char* address = static_cast<char*>(shared_memory_mapping_.memory()) +
426                   AlignToPageSize(sizeof(SharedState));
427   size_t length = AlignToPageSize(mapped_size_);
428 
429   DWORD ret = DiscardVirtualMemory(address, length);
430   // DiscardVirtualMemory is buggy in Win10 SP0, so fall back to MEM_RESET on
431   // failure.
432   if (ret != ERROR_SUCCESS) {
433     void* ptr = VirtualAlloc(address, length, MEM_RESET, PAGE_READWRITE);
434     CHECK(ptr);
435   }
436 #elif BUILDFLAG(IS_FUCHSIA)
437   // De-commit via our VMAR, rather than relying on the VMO handle, since the
438   // handle may have been closed after the memory was mapped into this process.
439   uint64_t address_int = reinterpret_cast<uint64_t>(
440       static_cast<char*>(shared_memory_mapping_.memory()) +
441       AlignToPageSize(sizeof(SharedState)));
442   zx_status_t status = zx::vmar::root_self()->op_range(
443       ZX_VMO_OP_DECOMMIT, address_int, AlignToPageSize(mapped_size_), nullptr,
444       0);
445   ZX_DCHECK(status == ZX_OK, status) << "zx_vmo_op_range(ZX_VMO_OP_DECOMMIT)";
446 #endif  // BUILDFLAG(IS_FUCHSIA)
447 
448   last_known_usage_ = Time();
449   return true;
450 }
451 
ReleaseMemoryIfPossible(size_t offset,size_t length)452 void DiscardableSharedMemory::ReleaseMemoryIfPossible(size_t offset,
453                                                       size_t length) {
454 #if BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_NACL)
455 // Linux and Android provide MADV_REMOVE which is preferred as it has a
456 // behavior that can be verified in tests. Other POSIX flavors (MacOSX, BSDs),
457 // provide MADV_FREE which has the same result but memory is purged lazily.
458 #if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID)
459 #define MADV_PURGE_ARGUMENT MADV_REMOVE
460 #elif BUILDFLAG(IS_APPLE)
461 // MADV_FREE_REUSABLE is similar to MADV_FREE, but also marks the pages with the
462 // reusable bit, which allows both Activity Monitor and memory-infra to
463 // correctly track the pages.
464 #define MADV_PURGE_ARGUMENT MADV_FREE_REUSABLE
465 #else  // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID)
466 #define MADV_PURGE_ARGUMENT MADV_FREE
467 #endif  // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) ||
468         // BUILDFLAG(IS_ANDROID)
469   // Advise the kernel to remove resources associated with purged pages.
470   // Subsequent accesses of memory pages will succeed, but might result in
471   // zero-fill-on-demand pages.
472   if (madvise(static_cast<char*>(shared_memory_mapping_.memory()) + offset,
473               length, MADV_PURGE_ARGUMENT)) {
474     DPLOG(ERROR) << "madvise() failed";
475   }
476 #else   // BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_NACL)
477   partition_alloc::DiscardSystemPages(
478       static_cast<char*>(shared_memory_mapping_.memory()) + offset, length);
479 #endif  // BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_NACL)
480 }
481 
IsMemoryResident() const482 bool DiscardableSharedMemory::IsMemoryResident() const {
483   DCHECK(shared_memory_mapping_.IsValid());
484 
485   SharedState result(subtle::NoBarrier_Load(
486       &SharedStateFromSharedMemory(shared_memory_mapping_)->value.i));
487 
488   return result.GetLockState() == SharedState::LOCKED ||
489          !result.GetTimestamp().is_null();
490 }
491 
IsMemoryLocked() const492 bool DiscardableSharedMemory::IsMemoryLocked() const {
493   DCHECK(shared_memory_mapping_.IsValid());
494 
495   SharedState result(subtle::NoBarrier_Load(
496       &SharedStateFromSharedMemory(shared_memory_mapping_)->value.i));
497 
498   return result.GetLockState() == SharedState::LOCKED;
499 }
500 
Close()501 void DiscardableSharedMemory::Close() {
502   shared_memory_region_ = UnsafeSharedMemoryRegion();
503 }
504 
CreateSharedMemoryOwnershipEdge(trace_event::MemoryAllocatorDump * local_segment_dump,trace_event::ProcessMemoryDump * pmd,bool is_owned) const505 void DiscardableSharedMemory::CreateSharedMemoryOwnershipEdge(
506     trace_event::MemoryAllocatorDump* local_segment_dump,
507     trace_event::ProcessMemoryDump* pmd,
508     bool is_owned) const {
509 // Memory dumps are only supported when tracing support is enabled,.
510 #if BUILDFLAG(ENABLE_BASE_TRACING)
511   auto* shared_memory_dump = SharedMemoryTracker::GetOrCreateSharedMemoryDump(
512       shared_memory_mapping_, pmd);
513   // TODO(ssid): Clean this by a new api to inherit size of parent dump once the
514   // we send the full PMD and calculate sizes inside chrome, crbug.com/704203.
515   uint64_t resident_size = shared_memory_dump->GetSizeInternal();
516   local_segment_dump->AddScalar(trace_event::MemoryAllocatorDump::kNameSize,
517                                 trace_event::MemoryAllocatorDump::kUnitsBytes,
518                                 resident_size);
519 
520   // By creating an edge with a higher |importance| (w.r.t non-owned dumps)
521   // the tracing UI will account the effective size of the segment to the
522   // client instead of manager.
523   // TODO(ssid): Define better constants in MemoryAllocatorDump for importance
524   // values, crbug.com/754793.
525   const int kImportance = is_owned ? 2 : 0;
526   auto shared_memory_guid = shared_memory_mapping_.guid();
527   local_segment_dump->AddString("id", "hash", shared_memory_guid.ToString());
528 
529   // Owned discardable segments which are allocated by client process, could
530   // have been cleared by the discardable manager. So, the segment need not
531   // exist in memory and weak dumps are created to indicate the UI that the dump
532   // should exist only if the manager also created the global dump edge.
533   if (is_owned) {
534     pmd->CreateWeakSharedMemoryOwnershipEdge(local_segment_dump->guid(),
535                                              shared_memory_guid, kImportance);
536   } else {
537     pmd->CreateSharedMemoryOwnershipEdge(local_segment_dump->guid(),
538                                          shared_memory_guid, kImportance);
539   }
540 #endif  // BUILDFLAG(ENABLE_BASE_TRACING)
541 }
542 
543 // static
LockPages(const UnsafeSharedMemoryRegion & region,size_t offset,size_t length)544 DiscardableSharedMemory::LockResult DiscardableSharedMemory::LockPages(
545     const UnsafeSharedMemoryRegion& region,
546     size_t offset,
547     size_t length) {
548 #if BUILDFLAG(IS_ANDROID)
549   if (region.IsValid()) {
550     if (UseAshmemUnpinningForDiscardableMemory()) {
551       int pin_result =
552           ashmem_pin_region(region.GetPlatformHandle(), offset, length);
553       if (pin_result == ASHMEM_WAS_PURGED)
554         return PURGED;
555       if (pin_result < 0)
556         return FAILED;
557     }
558   }
559 #endif
560   return SUCCESS;
561 }
562 
563 // static
UnlockPages(const UnsafeSharedMemoryRegion & region,size_t offset,size_t length)564 void DiscardableSharedMemory::UnlockPages(
565     const UnsafeSharedMemoryRegion& region,
566     size_t offset,
567     size_t length) {
568 #if BUILDFLAG(IS_ANDROID)
569   if (region.IsValid()) {
570     if (UseAshmemUnpinningForDiscardableMemory()) {
571       int unpin_result =
572           ashmem_unpin_region(region.GetPlatformHandle(), offset, length);
573       DCHECK_EQ(0, unpin_result);
574     }
575   }
576 #endif
577 }
578 
Now() const579 Time DiscardableSharedMemory::Now() const {
580   return Time::Now();
581 }
582 
583 #if BUILDFLAG(IS_ANDROID)
584 // static
IsAshmemDeviceSupportedForTesting()585 bool DiscardableSharedMemory::IsAshmemDeviceSupportedForTesting() {
586   return UseAshmemUnpinningForDiscardableMemory();
587 }
588 #endif
589 
590 }  // namespace base
591