1 /*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "perfetto/ext/tracing/core/shared_memory_abi.h"
18
19 #include "perfetto/ext/tracing/core/basic_types.h"
20 #include "src/base/test/gtest_test_suite.h"
21 #include "src/tracing/test/aligned_buffer_test.h"
22 #include "test/gtest_and_gmock.h"
23
24 namespace perfetto {
25 namespace {
26
27 using testing::ValuesIn;
28 using Chunk = SharedMemoryABI::Chunk;
29 using ChunkHeader = SharedMemoryABI::ChunkHeader;
30
31 using SharedMemoryABITest = AlignedBufferTest;
32
33 size_t const kPageSizes[] = {4096, 8192, 16384, 32768, 65536};
34 INSTANTIATE_TEST_SUITE_P(PageSize, SharedMemoryABITest, ValuesIn(kPageSizes));
35
TEST_P(SharedMemoryABITest,NominalCases)36 TEST_P(SharedMemoryABITest, NominalCases) {
37 SharedMemoryABI abi(buf(), buf_size(), page_size(),
38 SharedMemoryABI::ShmemMode::kDefault);
39
40 ASSERT_EQ(buf(), abi.start());
41 ASSERT_EQ(buf() + buf_size(), abi.end());
42 ASSERT_EQ(buf_size(), abi.size());
43 ASSERT_EQ(page_size(), abi.page_size());
44 ASSERT_EQ(kNumPages, abi.num_pages());
45
46 for (size_t i = 0; i < kNumPages; i++) {
47 ASSERT_TRUE(abi.is_page_free(i));
48 ASSERT_FALSE(abi.is_page_complete(i));
49 // GetFreeChunks() should return 0 for an unpartitioned page.
50 ASSERT_EQ(0u, abi.GetFreeChunks(i));
51 }
52
53 ASSERT_TRUE(abi.TryPartitionPage(0, SharedMemoryABI::kPageDiv1));
54 ASSERT_EQ(0x01u, abi.GetFreeChunks(0));
55
56 ASSERT_TRUE(abi.TryPartitionPage(1, SharedMemoryABI::kPageDiv2));
57 ASSERT_EQ(0x03u, abi.GetFreeChunks(1));
58
59 ASSERT_TRUE(abi.TryPartitionPage(2, SharedMemoryABI::kPageDiv4));
60 ASSERT_EQ(0x0fu, abi.GetFreeChunks(2));
61
62 ASSERT_TRUE(abi.TryPartitionPage(3, SharedMemoryABI::kPageDiv7));
63 ASSERT_EQ(0x7fu, abi.GetFreeChunks(3));
64
65 ASSERT_TRUE(abi.TryPartitionPage(4, SharedMemoryABI::kPageDiv14));
66 ASSERT_EQ(0x3fffu, abi.GetFreeChunks(4));
67
68 // Repartitioning an existing page must fail.
69 ASSERT_FALSE(abi.TryPartitionPage(0, SharedMemoryABI::kPageDiv1));
70 ASSERT_FALSE(abi.TryPartitionPage(4, SharedMemoryABI::kPageDiv14));
71
72 for (size_t i = 0; i <= 4; i++) {
73 ASSERT_FALSE(abi.is_page_free(i));
74 ASSERT_FALSE(abi.is_page_complete(i));
75 }
76
77 uint16_t last_chunk_id = 0;
78 uint16_t last_writer_id = 0;
79 uint8_t* last_chunk_begin = nullptr;
80 uint8_t* last_chunk_end = nullptr;
81
82 for (size_t page_idx = 0; page_idx <= 4; page_idx++) {
83 uint8_t* const page_start = buf() + page_idx * page_size();
84 uint8_t* const page_end = page_start + page_size();
85 const size_t num_chunks =
86 SharedMemoryABI::GetNumChunksForLayout(abi.GetPageLayout(page_idx));
87 Chunk chunks[14];
88
89 for (size_t chunk_idx = 0; chunk_idx < num_chunks; chunk_idx++) {
90 Chunk& chunk = chunks[chunk_idx];
91 ChunkHeader header{};
92
93 ASSERT_EQ(SharedMemoryABI::kChunkFree,
94 abi.GetChunkState(page_idx, chunk_idx));
95 uint16_t chunk_id = ++last_chunk_id;
96 last_writer_id = (last_writer_id + 1) & kMaxWriterID;
97 uint16_t writer_id = last_writer_id;
98 header.chunk_id.store(chunk_id);
99 header.writer_id.store(writer_id);
100
101 uint16_t packets_count = static_cast<uint16_t>(chunk_idx * 10);
102 const uint8_t kFlagsMask = (1 << 6) - 1;
103 uint8_t flags = static_cast<uint8_t>((0xffu - chunk_idx) & kFlagsMask);
104 header.packets.store({packets_count, flags});
105
106 chunk = abi.TryAcquireChunkForWriting(page_idx, chunk_idx, &header);
107 ASSERT_TRUE(chunk.is_valid());
108 ASSERT_EQ(SharedMemoryABI::kChunkBeingWritten,
109 abi.GetChunkState(page_idx, chunk_idx));
110
111 // Check chunk bounds.
112 size_t expected_chunk_size =
113 (page_size() - sizeof(SharedMemoryABI::PageHeader)) / num_chunks;
114 expected_chunk_size = expected_chunk_size - (expected_chunk_size % 4);
115 ASSERT_EQ(expected_chunk_size, chunk.size());
116 ASSERT_EQ(expected_chunk_size - sizeof(SharedMemoryABI::ChunkHeader),
117 chunk.payload_size());
118 ASSERT_GT(chunk.begin(), page_start);
119 ASSERT_GT(chunk.begin(), last_chunk_begin);
120 ASSERT_GE(chunk.begin(), last_chunk_end);
121 ASSERT_LE(chunk.end(), page_end);
122 ASSERT_GT(chunk.end(), chunk.begin());
123 ASSERT_EQ(chunk.end(), chunk.begin() + chunk.size());
124 last_chunk_begin = chunk.begin();
125 last_chunk_end = chunk.end();
126
127 ASSERT_EQ(chunk_id, chunk.header()->chunk_id.load());
128 ASSERT_EQ(writer_id, chunk.header()->writer_id.load());
129 ASSERT_EQ(packets_count, chunk.header()->packets.load().count);
130 ASSERT_EQ(flags, chunk.header()->packets.load().flags);
131 ASSERT_EQ(std::make_pair(packets_count, flags),
132 chunk.GetPacketCountAndFlags());
133
134 chunk.IncrementPacketCount();
135 ASSERT_EQ(packets_count + 1, chunk.header()->packets.load().count);
136
137 chunk.IncrementPacketCount();
138 ASSERT_EQ(packets_count + 2, chunk.header()->packets.load().count);
139
140 chunk.SetFlag(
141 SharedMemoryABI::ChunkHeader::kLastPacketContinuesOnNextChunk);
142 ASSERT_TRUE(
143 chunk.header()->packets.load().flags &
144 SharedMemoryABI::ChunkHeader::kLastPacketContinuesOnNextChunk);
145
146 // Test clearing the needs patching flag.
147 chunk.SetFlag(SharedMemoryABI::ChunkHeader::kChunkNeedsPatching);
148 ASSERT_TRUE(chunk.header()->packets.load().flags &
149 SharedMemoryABI::ChunkHeader::kChunkNeedsPatching);
150 chunk.ClearNeedsPatchingFlag();
151 ASSERT_FALSE(chunk.header()->packets.load().flags &
152 SharedMemoryABI::ChunkHeader::kChunkNeedsPatching);
153
154 // Reacquiring the same chunk should fail.
155 ASSERT_FALSE(abi.TryAcquireChunkForWriting(page_idx, chunk_idx, &header)
156 .is_valid());
157 }
158
159 // Now release chunks and check the Release() logic.
160 for (size_t chunk_idx = 0; chunk_idx < num_chunks; chunk_idx++) {
161 Chunk& chunk = chunks[chunk_idx];
162
163 size_t res = abi.ReleaseChunkAsComplete(std::move(chunk));
164 ASSERT_EQ(page_idx, res);
165 ASSERT_EQ(chunk_idx == num_chunks - 1, abi.is_page_complete(page_idx));
166 ASSERT_EQ(SharedMemoryABI::kChunkComplete,
167 abi.GetChunkState(page_idx, chunk_idx));
168 }
169
170 // Now acquire all chunks for reading.
171 for (size_t chunk_idx = 0; chunk_idx < num_chunks; chunk_idx++) {
172 Chunk& chunk = chunks[chunk_idx];
173 chunk = abi.TryAcquireChunkForReading(page_idx, chunk_idx);
174 ASSERT_TRUE(chunk.is_valid());
175 ASSERT_EQ(SharedMemoryABI::kChunkBeingRead,
176 abi.GetChunkState(page_idx, chunk_idx));
177 }
178
179 // Finally release all chunks as free.
180 for (size_t chunk_idx = 0; chunk_idx < num_chunks; chunk_idx++) {
181 Chunk& chunk = chunks[chunk_idx];
182
183 // If this was the last chunk in the page, the full page should be marked
184 // as free.
185 size_t res = abi.ReleaseChunkAsFree(std::move(chunk));
186 ASSERT_EQ(page_idx, res);
187 ASSERT_EQ(chunk_idx == num_chunks - 1, abi.is_page_free(page_idx));
188 ASSERT_EQ(SharedMemoryABI::kChunkFree,
189 abi.GetChunkState(page_idx, chunk_idx));
190 }
191 }
192 }
193
194 // Tests chunk state transition in the emulation mode.
TEST_P(SharedMemoryABITest,ShmemEmulation)195 TEST_P(SharedMemoryABITest, ShmemEmulation) {
196 SharedMemoryABI abi(buf(), buf_size(), page_size(),
197 SharedMemoryABI::ShmemMode::kShmemEmulation);
198
199 for (size_t i = 0; i < kNumPages; i++) {
200 ASSERT_TRUE(abi.is_page_free(i));
201 ASSERT_FALSE(abi.is_page_complete(i));
202 // GetFreeChunks() should return 0 for an unpartitioned page.
203 ASSERT_EQ(0u, abi.GetFreeChunks(i));
204 }
205
206 ASSERT_TRUE(abi.TryPartitionPage(0, SharedMemoryABI::kPageDiv14));
207 ASSERT_EQ(0x3fffu, abi.GetFreeChunks(0));
208
209 ASSERT_FALSE(abi.is_page_free(0));
210
211 const size_t num_chunks =
212 SharedMemoryABI::GetNumChunksForLayout(abi.GetPageLayout(0));
213 Chunk chunks[14];
214
215 for (size_t chunk_idx = 0; chunk_idx < num_chunks; chunk_idx++) {
216 Chunk& chunk = chunks[chunk_idx];
217 ChunkHeader header{};
218
219 ASSERT_EQ(SharedMemoryABI::kChunkFree, abi.GetChunkState(0, chunk_idx));
220
221 chunk = abi.TryAcquireChunkForWriting(0, chunk_idx, &header);
222 ASSERT_TRUE(chunk.is_valid());
223 ASSERT_EQ(SharedMemoryABI::kChunkBeingWritten,
224 abi.GetChunkState(0, chunk_idx));
225
226 // Reacquiring the same chunk should fail.
227 ASSERT_FALSE(
228 abi.TryAcquireChunkForWriting(0, chunk_idx, &header).is_valid());
229 }
230
231 // Now release chunks and check the Release() logic.
232 for (size_t chunk_idx = 0; chunk_idx < num_chunks; chunk_idx++) {
233 Chunk& chunk = chunks[chunk_idx];
234
235 size_t res = abi.ReleaseChunkAsComplete(std::move(chunk));
236 ASSERT_EQ(0u, res);
237 ASSERT_EQ(SharedMemoryABI::kChunkComplete, abi.GetChunkState(0, chunk_idx));
238 }
239
240 for (size_t chunk_idx = 0; chunk_idx < num_chunks; chunk_idx++) {
241 Chunk chunk = abi.GetChunkUnchecked(0, abi.GetPageLayout(0), chunk_idx);
242
243 // If this was the last chunk in the page, the full page should be marked
244 // as free.
245 size_t res = abi.ReleaseChunkAsFree(std::move(chunk));
246 ASSERT_EQ(0u, res);
247 ASSERT_EQ(chunk_idx == num_chunks - 1, abi.is_page_free(0));
248 ASSERT_EQ(SharedMemoryABI::kChunkFree, abi.GetChunkState(0u, chunk_idx));
249 }
250 }
251
252 } // namespace
253 } // namespace perfetto
254