1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/memory/discardable_shared_memory.h"
6
7 #include <stdint.h>
8
9 #include <algorithm>
10
11 #include "base/atomicops.h"
12 #include "base/bits.h"
13 #include "base/logging.h"
14 #include "base/memory/shared_memory_tracker.h"
15 #include "base/numerics/safe_math.h"
16 #include "base/process/process_metrics.h"
17 #include "base/trace_event/memory_allocator_dump.h"
18 #include "base/trace_event/process_memory_dump.h"
19 #include "build/build_config.h"
20
21 #if defined(OS_POSIX) && !defined(OS_NACL)
22 // For madvise() which is available on all POSIX compatible systems.
23 #include <sys/mman.h>
24 #endif
25
26 #if defined(OS_ANDROID)
27 #include "third_party/ashmem/ashmem.h"
28 #endif
29
30 #if defined(OS_WIN)
31 #include <windows.h>
32 #include "base/win/windows_version.h"
33 #endif
34
35 namespace base {
36 namespace {
37
38 // Use a machine-sized pointer as atomic type. It will use the Atomic32 or
39 // Atomic64 routines, depending on the architecture.
40 typedef intptr_t AtomicType;
41 typedef uintptr_t UAtomicType;
42
43 // Template specialization for timestamp serialization/deserialization. This
44 // is used to serialize timestamps using Unix time on systems where AtomicType
45 // does not have enough precision to contain a timestamp in the standard
46 // serialized format.
47 template <int>
48 Time TimeFromWireFormat(int64_t value);
49 template <int>
50 int64_t TimeToWireFormat(Time time);
51
52 // Serialize to Unix time when using 4-byte wire format.
53 // Note: 19 January 2038, this will cease to work.
54 template <>
TimeFromWireFormat(int64_t value)55 Time ALLOW_UNUSED_TYPE TimeFromWireFormat<4>(int64_t value) {
56 return value ? Time::UnixEpoch() + TimeDelta::FromSeconds(value) : Time();
57 }
58 template <>
TimeToWireFormat(Time time)59 int64_t ALLOW_UNUSED_TYPE TimeToWireFormat<4>(Time time) {
60 return time > Time::UnixEpoch() ? (time - Time::UnixEpoch()).InSeconds() : 0;
61 }
62
63 // Standard serialization format when using 8-byte wire format.
64 template <>
TimeFromWireFormat(int64_t value)65 Time ALLOW_UNUSED_TYPE TimeFromWireFormat<8>(int64_t value) {
66 return Time::FromInternalValue(value);
67 }
68 template <>
TimeToWireFormat(Time time)69 int64_t ALLOW_UNUSED_TYPE TimeToWireFormat<8>(Time time) {
70 return time.ToInternalValue();
71 }
72
73 struct SharedState {
74 enum LockState { UNLOCKED = 0, LOCKED = 1 };
75
SharedStatebase::__anonb05b9cfe0111::SharedState76 explicit SharedState(AtomicType ivalue) { value.i = ivalue; }
SharedStatebase::__anonb05b9cfe0111::SharedState77 SharedState(LockState lock_state, Time timestamp) {
78 int64_t wire_timestamp = TimeToWireFormat<sizeof(AtomicType)>(timestamp);
79 DCHECK_GE(wire_timestamp, 0);
80 DCHECK_EQ(lock_state & ~1, 0);
81 value.u = (static_cast<UAtomicType>(wire_timestamp) << 1) | lock_state;
82 }
83
GetLockStatebase::__anonb05b9cfe0111::SharedState84 LockState GetLockState() const { return static_cast<LockState>(value.u & 1); }
85
GetTimestampbase::__anonb05b9cfe0111::SharedState86 Time GetTimestamp() const {
87 return TimeFromWireFormat<sizeof(AtomicType)>(value.u >> 1);
88 }
89
90 // Bit 1: Lock state. Bit is set when locked.
91 // Bit 2..sizeof(AtomicType)*8: Usage timestamp. NULL time when locked or
92 // purged.
93 union {
94 AtomicType i;
95 UAtomicType u;
96 } value;
97 };
98
99 // Shared state is stored at offset 0 in shared memory segments.
SharedStateFromSharedMemory(const WritableSharedMemoryMapping & shared_memory)100 SharedState* SharedStateFromSharedMemory(
101 const WritableSharedMemoryMapping& shared_memory) {
102 DCHECK(shared_memory.IsValid());
103 return static_cast<SharedState*>(shared_memory.memory());
104 }
105
106 // Round up |size| to a multiple of page size.
AlignToPageSize(size_t size)107 size_t AlignToPageSize(size_t size) {
108 return bits::Align(size, base::GetPageSize());
109 }
110
111 } // namespace
112
DiscardableSharedMemory()113 DiscardableSharedMemory::DiscardableSharedMemory()
114 : mapped_size_(0), locked_page_count_(0) {
115 }
116
DiscardableSharedMemory(UnsafeSharedMemoryRegion shared_memory_region)117 DiscardableSharedMemory::DiscardableSharedMemory(
118 UnsafeSharedMemoryRegion shared_memory_region)
119 : shared_memory_region_(std::move(shared_memory_region)),
120 mapped_size_(0),
121 locked_page_count_(0) {}
122
123 DiscardableSharedMemory::~DiscardableSharedMemory() = default;
124
CreateAndMap(size_t size)125 bool DiscardableSharedMemory::CreateAndMap(size_t size) {
126 CheckedNumeric<size_t> checked_size = size;
127 checked_size += AlignToPageSize(sizeof(SharedState));
128 if (!checked_size.IsValid())
129 return false;
130
131 shared_memory_region_ =
132 UnsafeSharedMemoryRegion::Create(checked_size.ValueOrDie());
133
134 if (!shared_memory_region_.IsValid())
135 return false;
136
137 shared_memory_mapping_ = shared_memory_region_.Map();
138 if (!shared_memory_mapping_.IsValid())
139 return false;
140
141 mapped_size_ = shared_memory_mapping_.mapped_size() -
142 AlignToPageSize(sizeof(SharedState));
143
144 locked_page_count_ = AlignToPageSize(mapped_size_) / base::GetPageSize();
145 #if DCHECK_IS_ON()
146 for (size_t page = 0; page < locked_page_count_; ++page)
147 locked_pages_.insert(page);
148 #endif
149
150 DCHECK(last_known_usage_.is_null());
151 SharedState new_state(SharedState::LOCKED, Time());
152 subtle::Release_Store(
153 &SharedStateFromSharedMemory(shared_memory_mapping_)->value.i,
154 new_state.value.i);
155 return true;
156 }
157
Map(size_t size)158 bool DiscardableSharedMemory::Map(size_t size) {
159 DCHECK(!shared_memory_mapping_.IsValid());
160 if (shared_memory_mapping_.IsValid())
161 return false;
162
163 shared_memory_mapping_ = shared_memory_region_.MapAt(
164 0, AlignToPageSize(sizeof(SharedState)) + size);
165 if (!shared_memory_mapping_.IsValid())
166 return false;
167
168 mapped_size_ = shared_memory_mapping_.mapped_size() -
169 AlignToPageSize(sizeof(SharedState));
170
171 locked_page_count_ = AlignToPageSize(mapped_size_) / base::GetPageSize();
172 #if DCHECK_IS_ON()
173 for (size_t page = 0; page < locked_page_count_; ++page)
174 locked_pages_.insert(page);
175 #endif
176
177 return true;
178 }
179
Unmap()180 bool DiscardableSharedMemory::Unmap() {
181 if (!shared_memory_mapping_.IsValid())
182 return false;
183
184 shared_memory_mapping_ = WritableSharedMemoryMapping();
185 locked_page_count_ = 0;
186 #if DCHECK_IS_ON()
187 locked_pages_.clear();
188 #endif
189 mapped_size_ = 0;
190 return true;
191 }
192
Lock(size_t offset,size_t length)193 DiscardableSharedMemory::LockResult DiscardableSharedMemory::Lock(
194 size_t offset, size_t length) {
195 DCHECK_EQ(AlignToPageSize(offset), offset);
196 DCHECK_EQ(AlignToPageSize(length), length);
197
198 // Calls to this function must be synchronized properly.
199 DFAKE_SCOPED_LOCK(thread_collision_warner_);
200
201 DCHECK(shared_memory_mapping_.IsValid());
202
203 // We need to successfully acquire the platform independent lock before
204 // individual pages can be locked.
205 if (!locked_page_count_) {
206 // Return false when instance has been purged or not initialized properly
207 // by checking if |last_known_usage_| is NULL.
208 if (last_known_usage_.is_null())
209 return FAILED;
210
211 SharedState old_state(SharedState::UNLOCKED, last_known_usage_);
212 SharedState new_state(SharedState::LOCKED, Time());
213 SharedState result(subtle::Acquire_CompareAndSwap(
214 &SharedStateFromSharedMemory(shared_memory_mapping_)->value.i,
215 old_state.value.i, new_state.value.i));
216 if (result.value.u != old_state.value.u) {
217 // Update |last_known_usage_| in case the above CAS failed because of
218 // an incorrect timestamp.
219 last_known_usage_ = result.GetTimestamp();
220 return FAILED;
221 }
222 }
223
224 // Zero for length means "everything onward".
225 if (!length)
226 length = AlignToPageSize(mapped_size_) - offset;
227
228 size_t start = offset / base::GetPageSize();
229 size_t end = start + length / base::GetPageSize();
230 DCHECK_LE(start, end);
231 DCHECK_LE(end, AlignToPageSize(mapped_size_) / base::GetPageSize());
232
233 // Add pages to |locked_page_count_|.
234 // Note: Locking a page that is already locked is an error.
235 locked_page_count_ += end - start;
236 #if DCHECK_IS_ON()
237 // Detect incorrect usage by keeping track of exactly what pages are locked.
238 for (auto page = start; page < end; ++page) {
239 auto result = locked_pages_.insert(page);
240 DCHECK(result.second);
241 }
242 DCHECK_EQ(locked_pages_.size(), locked_page_count_);
243 #endif
244
245 // Always behave as if memory was purged when trying to lock a 0 byte segment.
246 if (!length)
247 return PURGED;
248
249 #if defined(OS_ANDROID)
250 // Ensure that the platform won't discard the required pages.
251 return LockPages(shared_memory_region_,
252 AlignToPageSize(sizeof(SharedState)) + offset, length);
253 #elif defined(OS_MACOSX)
254 // On macOS, there is no mechanism to lock pages. However, we do need to call
255 // madvise(MADV_FREE_REUSE) in order to correctly update accounting for memory
256 // footprint via task_info().
257 //
258 // Note that calling madvise(MADV_FREE_REUSE) on regions that haven't had
259 // madvise(MADV_FREE_REUSABLE) called on them has no effect.
260 //
261 // Note that the corresponding call to MADV_FREE_REUSABLE is in Purge(), since
262 // that's where the memory is actually released, rather than Unlock(), which
263 // is a no-op on macOS.
264 //
265 // For more information, see
266 // https://bugs.chromium.org/p/chromium/issues/detail?id=823915.
267 if (madvise(reinterpret_cast<char*>(shared_memory_mapping_.memory()) +
268 AlignToPageSize(sizeof(SharedState)),
269 AlignToPageSize(mapped_size_), MADV_FREE_REUSE))
270 ;
271 return DiscardableSharedMemory::SUCCESS;
272 #else
273 return DiscardableSharedMemory::SUCCESS;
274 #endif
275 }
276
Unlock(size_t offset,size_t length)277 void DiscardableSharedMemory::Unlock(size_t offset, size_t length) {
278 DCHECK_EQ(AlignToPageSize(offset), offset);
279 DCHECK_EQ(AlignToPageSize(length), length);
280
281 // Calls to this function must be synchronized properly.
282 DFAKE_SCOPED_LOCK(thread_collision_warner_);
283
284 // Passing zero for |length| means "everything onward". Note that |length| may
285 // still be zero after this calculation, e.g. if |mapped_size_| is zero.
286 if (!length)
287 length = AlignToPageSize(mapped_size_) - offset;
288
289 DCHECK(shared_memory_mapping_.IsValid());
290
291 // Allow the pages to be discarded by the platform, if supported.
292 UnlockPages(shared_memory_region_,
293 AlignToPageSize(sizeof(SharedState)) + offset, length);
294
295 size_t start = offset / base::GetPageSize();
296 size_t end = start + length / base::GetPageSize();
297 DCHECK_LE(start, end);
298 DCHECK_LE(end, AlignToPageSize(mapped_size_) / base::GetPageSize());
299
300 // Remove pages from |locked_page_count_|.
301 // Note: Unlocking a page that is not locked is an error.
302 DCHECK_GE(locked_page_count_, end - start);
303 locked_page_count_ -= end - start;
304 #if DCHECK_IS_ON()
305 // Detect incorrect usage by keeping track of exactly what pages are locked.
306 for (auto page = start; page < end; ++page) {
307 auto erased_count = locked_pages_.erase(page);
308 DCHECK_EQ(1u, erased_count);
309 }
310 DCHECK_EQ(locked_pages_.size(), locked_page_count_);
311 #endif
312
313 // Early out and avoid releasing the platform independent lock if some pages
314 // are still locked.
315 if (locked_page_count_)
316 return;
317
318 Time current_time = Now();
319 DCHECK(!current_time.is_null());
320
321 SharedState old_state(SharedState::LOCKED, Time());
322 SharedState new_state(SharedState::UNLOCKED, current_time);
323 // Note: timestamp cannot be NULL as that is a unique value used when
324 // locked or purged.
325 DCHECK(!new_state.GetTimestamp().is_null());
326 // Timestamp precision should at least be accurate to the second.
327 DCHECK_EQ((new_state.GetTimestamp() - Time::UnixEpoch()).InSeconds(),
328 (current_time - Time::UnixEpoch()).InSeconds());
329 SharedState result(subtle::Release_CompareAndSwap(
330 &SharedStateFromSharedMemory(shared_memory_mapping_)->value.i,
331 old_state.value.i, new_state.value.i));
332
333 DCHECK_EQ(old_state.value.u, result.value.u);
334
335 last_known_usage_ = current_time;
336 }
337
memory() const338 void* DiscardableSharedMemory::memory() const {
339 return reinterpret_cast<uint8_t*>(shared_memory_mapping_.memory()) +
340 AlignToPageSize(sizeof(SharedState));
341 }
342
Purge(Time current_time)343 bool DiscardableSharedMemory::Purge(Time current_time) {
344 // Calls to this function must be synchronized properly.
345 DFAKE_SCOPED_LOCK(thread_collision_warner_);
346 DCHECK(shared_memory_mapping_.IsValid());
347
348 SharedState old_state(SharedState::UNLOCKED, last_known_usage_);
349 SharedState new_state(SharedState::UNLOCKED, Time());
350 SharedState result(subtle::Acquire_CompareAndSwap(
351 &SharedStateFromSharedMemory(shared_memory_mapping_)->value.i,
352 old_state.value.i, new_state.value.i));
353
354 // Update |last_known_usage_| to |current_time| if the memory is locked. This
355 // allows the caller to determine if purging failed because last known usage
356 // was incorrect or memory was locked. In the second case, the caller should
357 // most likely wait for some amount of time before attempting to purge the
358 // the memory again.
359 if (result.value.u != old_state.value.u) {
360 last_known_usage_ = result.GetLockState() == SharedState::LOCKED
361 ? current_time
362 : result.GetTimestamp();
363 return false;
364 }
365
366 // The next section will release as much resource as can be done
367 // from the purging process, until the client process notices the
368 // purge and releases its own references.
369 // Note: this memory will not be accessed again. The segment will be
370 // freed asynchronously at a later time, so just do the best
371 // immediately.
372 #if defined(OS_POSIX) && !defined(OS_NACL)
373 // Linux and Android provide MADV_REMOVE which is preferred as it has a
374 // behavior that can be verified in tests. Other POSIX flavors (MacOSX, BSDs),
375 // provide MADV_FREE which has the same result but memory is purged lazily.
376 #if defined(OS_LINUX) || defined(OS_ANDROID)
377 #define MADV_PURGE_ARGUMENT MADV_REMOVE
378 #elif defined(OS_MACOSX)
379 // MADV_FREE_REUSABLE is similar to MADV_FREE, but also marks the pages with the
380 // reusable bit, which allows both Activity Monitor and memory-infra to
381 // correctly track the pages.
382 #define MADV_PURGE_ARGUMENT MADV_FREE_REUSABLE
383 #else
384 #define MADV_PURGE_ARGUMENT MADV_FREE
385 #endif
386 // Advise the kernel to remove resources associated with purged pages.
387 // Subsequent accesses of memory pages will succeed, but might result in
388 // zero-fill-on-demand pages.
389 if (madvise(reinterpret_cast<char*>(shared_memory_mapping_.memory()) +
390 AlignToPageSize(sizeof(SharedState)),
391 AlignToPageSize(mapped_size_), MADV_PURGE_ARGUMENT)) {
392 DPLOG(ERROR) << "madvise() failed";
393 }
394 #elif defined(OS_WIN)
395 if (base::win::GetVersion() >= base::win::VERSION_WIN8_1) {
396 // Discard the purged pages, which releases the physical storage (resident
397 // memory, compressed or swapped), but leaves them reserved & committed.
398 // This does not free commit for use by other applications, but allows the
399 // system to avoid compressing/swapping these pages to free physical memory.
400 static const auto discard_virtual_memory =
401 reinterpret_cast<decltype(&::DiscardVirtualMemory)>(GetProcAddress(
402 GetModuleHandle(L"kernel32.dll"), "DiscardVirtualMemory"));
403 if (discard_virtual_memory) {
404 DWORD discard_result = discard_virtual_memory(
405 reinterpret_cast<char*>(shared_memory_mapping_.memory()) +
406 AlignToPageSize(sizeof(SharedState)),
407 AlignToPageSize(mapped_size_));
408 if (discard_result != ERROR_SUCCESS) {
409 DLOG(DCHECK) << "DiscardVirtualMemory() failed in Purge(): "
410 << logging::SystemErrorCodeToString(discard_result);
411 }
412 }
413 }
414 #endif
415
416 last_known_usage_ = Time();
417 return true;
418 }
419
IsMemoryResident() const420 bool DiscardableSharedMemory::IsMemoryResident() const {
421 DCHECK(shared_memory_mapping_.IsValid());
422
423 SharedState result(subtle::NoBarrier_Load(
424 &SharedStateFromSharedMemory(shared_memory_mapping_)->value.i));
425
426 return result.GetLockState() == SharedState::LOCKED ||
427 !result.GetTimestamp().is_null();
428 }
429
IsMemoryLocked() const430 bool DiscardableSharedMemory::IsMemoryLocked() const {
431 DCHECK(shared_memory_mapping_.IsValid());
432
433 SharedState result(subtle::NoBarrier_Load(
434 &SharedStateFromSharedMemory(shared_memory_mapping_)->value.i));
435
436 return result.GetLockState() == SharedState::LOCKED;
437 }
438
Close()439 void DiscardableSharedMemory::Close() {
440 shared_memory_region_ = UnsafeSharedMemoryRegion();
441 }
442
CreateSharedMemoryOwnershipEdge(trace_event::MemoryAllocatorDump * local_segment_dump,trace_event::ProcessMemoryDump * pmd,bool is_owned) const443 void DiscardableSharedMemory::CreateSharedMemoryOwnershipEdge(
444 trace_event::MemoryAllocatorDump* local_segment_dump,
445 trace_event::ProcessMemoryDump* pmd,
446 bool is_owned) const {
447 auto* shared_memory_dump = SharedMemoryTracker::GetOrCreateSharedMemoryDump(
448 shared_memory_mapping_, pmd);
449 // TODO(ssid): Clean this by a new api to inherit size of parent dump once the
450 // we send the full PMD and calculate sizes inside chrome, crbug.com/704203.
451 size_t resident_size = shared_memory_dump->GetSizeInternal();
452 local_segment_dump->AddScalar(trace_event::MemoryAllocatorDump::kNameSize,
453 trace_event::MemoryAllocatorDump::kUnitsBytes,
454 resident_size);
455
456 // By creating an edge with a higher |importance| (w.r.t non-owned dumps)
457 // the tracing UI will account the effective size of the segment to the
458 // client instead of manager.
459 // TODO(ssid): Define better constants in MemoryAllocatorDump for importance
460 // values, crbug.com/754793.
461 const int kImportance = is_owned ? 2 : 0;
462 auto shared_memory_guid = shared_memory_mapping_.guid();
463 local_segment_dump->AddString("id", "hash", shared_memory_guid.ToString());
464
465 // Owned discardable segments which are allocated by client process, could
466 // have been cleared by the discardable manager. So, the segment need not
467 // exist in memory and weak dumps are created to indicate the UI that the dump
468 // should exist only if the manager also created the global dump edge.
469 if (is_owned) {
470 pmd->CreateWeakSharedMemoryOwnershipEdge(local_segment_dump->guid(),
471 shared_memory_guid, kImportance);
472 } else {
473 pmd->CreateSharedMemoryOwnershipEdge(local_segment_dump->guid(),
474 shared_memory_guid, kImportance);
475 }
476 }
477
478 // static
LockPages(const UnsafeSharedMemoryRegion & region,size_t offset,size_t length)479 DiscardableSharedMemory::LockResult DiscardableSharedMemory::LockPages(
480 const UnsafeSharedMemoryRegion& region,
481 size_t offset,
482 size_t length) {
483 #if defined(OS_ANDROID)
484 if (region.IsValid()) {
485 int pin_result =
486 ashmem_pin_region(region.GetPlatformHandle(), offset, length);
487 if (pin_result == ASHMEM_WAS_PURGED)
488 return PURGED;
489 if (pin_result < 0)
490 return FAILED;
491 }
492 #endif
493 return SUCCESS;
494 }
495
496 // static
UnlockPages(const UnsafeSharedMemoryRegion & region,size_t offset,size_t length)497 void DiscardableSharedMemory::UnlockPages(
498 const UnsafeSharedMemoryRegion& region,
499 size_t offset,
500 size_t length) {
501 #if defined(OS_ANDROID)
502 if (region.IsValid()) {
503 int unpin_result =
504 ashmem_unpin_region(region.GetPlatformHandle(), offset, length);
505 DCHECK_EQ(0, unpin_result);
506 }
507 #endif
508 }
509
Now() const510 Time DiscardableSharedMemory::Now() const {
511 return Time::Now();
512 }
513
514 } // namespace base
515