• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2019 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "src/profiling/memory/page_idle_checker.h"
18 #include "perfetto/ext/base/utils.h"
19 
20 #include <inttypes.h>
21 #include <vector>
22 
23 namespace perfetto {
24 namespace profiling {
25 
26 // TODO(fmayer): Be smarter about batching reads and writes to page_idle.
27 
OnIdlePage(uint64_t addr,size_t size)28 int64_t PageIdleChecker::OnIdlePage(uint64_t addr, size_t size) {
29   uint64_t page_nr = addr / base::kPageSize;
30   uint64_t end_page_nr = (addr + size) / base::kPageSize;
31   // The trailing division will have rounded down, unless the end is at a page
32   // boundary. Add one page if we rounded down.
33   if ((addr + size) % base::kPageSize != 0)
34     end_page_nr++;
35 
36   size_t pages = static_cast<size_t>(end_page_nr - page_nr);
37 
38   int64_t idle_mem = 0;
39   for (size_t i = 0; i < pages; ++i) {
40     int idle = IsPageIdle(page_nr + i);
41     if (idle == -1)
42       continue;
43 
44     if (idle) {
45       if (i == 0)
46         idle_mem += GetFirstPageShare(addr, size);
47       else if (i == pages - 1)
48         idle_mem += GetLastPageShare(addr, size);
49       else
50         idle_mem += base::kPageSize;
51     } else {
52       touched_virt_page_nrs_.emplace(page_nr + i);
53     }
54   }
55   return idle_mem;
56 }
57 
MarkPagesIdle()58 void PageIdleChecker::MarkPagesIdle() {
59   for (uint64_t virt_page_nr : touched_virt_page_nrs_)
60     MarkPageIdle(virt_page_nr);
61   touched_virt_page_nrs_.clear();
62 }
63 
MarkPageIdle(uint64_t virt_page_nr)64 void PageIdleChecker::MarkPageIdle(uint64_t virt_page_nr) {
65   // The file implements a bitmap where each bit corresponds to a memory page.
66   // The bitmap is represented by an array of 8-byte integers, and the page at
67   // PFN #i is mapped to bit #i%64 of array element #i/64, byte order i
68   // native. When a bit is set, the corresponding page is idle.
69   //
70   // The kernel ORs the value written with the existing bitmap, so we do not
71   // override previously written values.
72   // See https://www.kernel.org/doc/Documentation/vm/idle_page_tracking.txt
73   off64_t offset = 8 * (virt_page_nr / 64);
74   size_t bit_offset = virt_page_nr % 64;
75   uint64_t bit_pattern = 1 << bit_offset;
76   if (pwrite64(*page_idle_fd_, &bit_pattern, sizeof(bit_pattern), offset) !=
77       static_cast<ssize_t>(sizeof(bit_pattern))) {
78     PERFETTO_PLOG("Failed to write bit pattern at %" PRIi64 ".", offset);
79   }
80 }
81 
IsPageIdle(uint64_t virt_page_nr)82 int PageIdleChecker::IsPageIdle(uint64_t virt_page_nr) {
83   off64_t offset = 8 * (virt_page_nr / 64);
84   size_t bit_offset = virt_page_nr % 64;
85   uint64_t bit_pattern;
86   if (pread64(*page_idle_fd_, &bit_pattern, sizeof(bit_pattern), offset) !=
87       static_cast<ssize_t>(sizeof(bit_pattern))) {
88     PERFETTO_PLOG("Failed to read bit pattern at %" PRIi64 ".", offset);
89     return -1;
90   }
91   return static_cast<int>(bit_pattern & (1 << bit_offset));
92 }
93 
GetFirstPageShare(uint64_t addr,size_t size)94 uint64_t GetFirstPageShare(uint64_t addr, size_t size) {
95   // Our allocation is xxxx in this illustration:
96   //         +----------------------------------------------+
97   //         |             xxxxxxxxxx|xxxxxx                |
98   //         |             xxxxxxxxxx|xxxxxx                |
99   //         |             xxxxxxxxxx|xxxxxx                |
100   //         +-------------+---------------+----------------+
101   //         ^             ^         ^     ^
102   //         +             +         +     +
103   // page_aligned_addr  addr        end    addr + size
104   uint64_t page_aligned_addr = (addr / base::kPageSize) * base::kPageSize;
105   uint64_t end = page_aligned_addr + base::kPageSize;
106   if (end > addr + size) {
107     // The whole allocation is on the first page.
108     return size;
109   }
110 
111   return base::kPageSize - (addr - page_aligned_addr);
112 }
113 
GetLastPageShare(uint64_t addr,size_t size)114 uint64_t GetLastPageShare(uint64_t addr, size_t size) {
115   uint64_t last_page_size = (addr + size) % base::kPageSize;
116   if (last_page_size == 0) {
117     // Address ends at a page boundary, the whole last page is idle.
118     return base::kPageSize;
119   } else {
120     // Address does not end at a page boundary, only a subset of the last
121     // page should be attributed to this allocation.
122     return last_page_size;
123   }
124 }
125 
126 }  // namespace profiling
127 }  // namespace perfetto
128