• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2017 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "src/tracing/core/shared_memory_arbiter_impl.h"
18 
19 #include <bitset>
20 #include "perfetto/ext/base/utils.h"
21 #include "perfetto/ext/tracing/core/basic_types.h"
22 #include "perfetto/ext/tracing/core/commit_data_request.h"
23 #include "perfetto/ext/tracing/core/shared_memory_abi.h"
24 #include "perfetto/ext/tracing/core/trace_packet.h"
25 #include "perfetto/ext/tracing/core/trace_writer.h"
26 #include "perfetto/ext/tracing/core/tracing_service.h"
27 #include "src/base/test/gtest_test_suite.h"
28 #include "src/base/test/test_task_runner.h"
29 #include "src/tracing/core/in_process_shared_memory.h"
30 #include "src/tracing/core/patch_list.h"
31 #include "src/tracing/test/aligned_buffer_test.h"
32 #include "src/tracing/test/mock_producer_endpoint.h"
33 #include "test/gtest_and_gmock.h"
34 
35 #include "protos/perfetto/trace/test_event.pbzero.h"
36 #include "protos/perfetto/trace/trace_packet.pbzero.h"
37 
38 namespace perfetto {
39 
40 using testing::_;
41 using testing::Between;
42 using testing::Invoke;
43 using testing::Mock;
44 using testing::NiceMock;
45 using testing::UnorderedElementsAreArray;
46 
47 using ShmemMode = SharedMemoryABI::ShmemMode;
48 
49 class SharedMemoryArbiterImplTest : public AlignedBufferTest {
50  public:
SetUp()51   void SetUp() override {
52     default_layout_ =
53         SharedMemoryArbiterImpl::default_page_layout_for_testing();
54     AlignedBufferTest::SetUp();
55     task_runner_.reset(new base::TestTaskRunner());
56     arbiter_.reset(new SharedMemoryArbiterImpl(
57         buf(), buf_size(), ShmemMode::kDefault, page_size(),
58         &mock_producer_endpoint_, task_runner_.get()));
59   }
60 
IsArbiterFullyBound()61   bool IsArbiterFullyBound() { return arbiter_->fully_bound_; }
62 
TearDown()63   void TearDown() override {
64     arbiter_.reset();
65     task_runner_.reset();
66     SharedMemoryArbiterImpl::set_default_layout_for_testing(default_layout_);
67   }
68 
69   std::unique_ptr<base::TestTaskRunner> task_runner_;
70   std::unique_ptr<SharedMemoryArbiterImpl> arbiter_;
71   NiceMock<MockProducerEndpoint> mock_producer_endpoint_;
72   std::function<void(const std::vector<uint32_t>&)> on_pages_complete_;
73   SharedMemoryABI::PageLayout default_layout_;
74 };
75 
76 size_t const kPageSizes[] = {4096, 65536};
77 INSTANTIATE_TEST_SUITE_P(PageSize,
78                          SharedMemoryArbiterImplTest,
79                          ::testing::ValuesIn(kPageSizes));
80 
81 // The buffer has 14 pages (kNumPages), each will be partitioned in 14 chunks.
82 // The test requests 30 chunks (2 full pages + 2 chunks from a 3rd page) and
83 // releases them in different batches. It tests the consistency of the batches
84 // and the releasing order.
TEST_P(SharedMemoryArbiterImplTest,GetAndReturnChunks)85 TEST_P(SharedMemoryArbiterImplTest, GetAndReturnChunks) {
86   SharedMemoryArbiterImpl::set_default_layout_for_testing(
87       SharedMemoryABI::PageLayout::kPageDiv14);
88   static constexpr size_t kTotChunks = kNumPages * 14;
89   SharedMemoryABI::Chunk chunks[kTotChunks];
90   for (size_t i = 0; i < 14 * 2 + 2; i++) {
91     chunks[i] = arbiter_->GetNewChunk({}, BufferExhaustedPolicy::kStall);
92     ASSERT_TRUE(chunks[i].is_valid());
93   }
94 
95   // Finally return the first 28 chunks (full 2 pages) and only the 2nd chunk of
96   // the 2rd page. Chunks are release in interleaved order: 1,0,3,2,5,4,7,6.
97   // Check that the notification callback is posted and order is consistent.
98   auto on_commit_1 = task_runner_->CreateCheckpoint("on_commit_1");
99   EXPECT_CALL(mock_producer_endpoint_, CommitData(_, _))
100       .WillOnce(Invoke([on_commit_1](const CommitDataRequest& req,
101                                      MockProducerEndpoint::CommitDataCallback) {
102         ASSERT_EQ(14 * 2 + 1, req.chunks_to_move_size());
103         for (size_t i = 0; i < 14 * 2; i++) {
104           ASSERT_EQ(i / 14, req.chunks_to_move()[i].page());
105           ASSERT_EQ((i % 14) ^ 1, req.chunks_to_move()[i].chunk());
106           ASSERT_EQ(i % 5 + 1, req.chunks_to_move()[i].target_buffer());
107         }
108         ASSERT_EQ(2u, req.chunks_to_move()[28].page());
109         ASSERT_EQ(1u, req.chunks_to_move()[28].chunk());
110         ASSERT_EQ(42u, req.chunks_to_move()[28].target_buffer());
111         on_commit_1();
112       }));
113   PatchList ignored;
114   for (size_t i = 0; i < 14 * 2; i++) {
115     arbiter_->ReturnCompletedChunk(std::move(chunks[i ^ 1]), i % 5 + 1,
116                                    &ignored);
117   }
118   arbiter_->ReturnCompletedChunk(std::move(chunks[29]), 42, &ignored);
119   task_runner_->RunUntilCheckpoint("on_commit_1");
120 
121   // Then release the 1st chunk of the 3rd page, and check that we get a
122   // notification for that as well.
123   auto on_commit_2 = task_runner_->CreateCheckpoint("on_commit_2");
124   EXPECT_CALL(mock_producer_endpoint_, CommitData(_, _))
125       .WillOnce(Invoke([on_commit_2](const CommitDataRequest& req,
126                                      MockProducerEndpoint::CommitDataCallback) {
127         ASSERT_EQ(1, req.chunks_to_move_size());
128         ASSERT_EQ(2u, req.chunks_to_move()[0].page());
129         ASSERT_EQ(0u, req.chunks_to_move()[0].chunk());
130         ASSERT_EQ(43u, req.chunks_to_move()[0].target_buffer());
131         on_commit_2();
132       }));
133   arbiter_->ReturnCompletedChunk(std::move(chunks[28]), 43, &ignored);
134   task_runner_->RunUntilCheckpoint("on_commit_2");
135 }
136 
TEST_P(SharedMemoryArbiterImplTest,BatchCommits)137 TEST_P(SharedMemoryArbiterImplTest, BatchCommits) {
138   SharedMemoryArbiterImpl::set_default_layout_for_testing(
139       SharedMemoryABI::PageLayout::kPageDiv1);
140 
141   // Batching period is 0s - chunks are being committed as soon as they are
142   // returned.
143   SharedMemoryABI::Chunk chunk =
144       arbiter_->GetNewChunk({}, BufferExhaustedPolicy::kDefault);
145   ASSERT_TRUE(chunk.is_valid());
146   EXPECT_CALL(mock_producer_endpoint_, CommitData(_, _)).Times(1);
147   PatchList ignored;
148   arbiter_->ReturnCompletedChunk(std::move(chunk), 0, &ignored);
149   task_runner_->RunUntilIdle();
150   ASSERT_TRUE(Mock::VerifyAndClearExpectations(&mock_producer_endpoint_));
151 
152   // Since we cannot explicitly control the passage of time in task_runner_, to
153   // simulate a non-zero batching period and a commit at the end of it, set the
154   // batching duration to a very large value and call
155   // FlushPendingCommitDataRequests to manually trigger the commit.
156   arbiter_->SetDirectSMBPatchingSupportedByService();
157   ASSERT_TRUE(arbiter_->EnableDirectSMBPatching());
158   arbiter_->SetBatchCommitsDuration(UINT32_MAX);
159 
160   // First chunk that will be batched. CommitData should not be called
161   // immediately this time.
162   chunk = arbiter_->GetNewChunk({}, BufferExhaustedPolicy::kDefault);
163   ASSERT_TRUE(chunk.is_valid());
164   EXPECT_CALL(mock_producer_endpoint_, CommitData(_, _)).Times(0);
165   // We'll pretend that the chunk needs patching. This is done in order to
166   // verify that chunks that need patching are not marked as complete (i.e. they
167   // are kept in state kChunkBeingWritten) before the batching period ends - in
168   // case a patch for them arrives during the batching period.
169   chunk.SetFlag(SharedMemoryABI::ChunkHeader::kChunkNeedsPatching);
170   arbiter_->ReturnCompletedChunk(std::move(chunk), 1, &ignored);
171   task_runner_->RunUntilIdle();
172   ASSERT_TRUE(Mock::VerifyAndClearExpectations(&mock_producer_endpoint_));
173   ASSERT_EQ(SharedMemoryABI::kChunkBeingWritten,
174             arbiter_->shmem_abi_for_testing()->GetChunkState(1u, 0u));
175 
176   // Add a second chunk to the batch. This should also not trigger an immediate
177   // call to CommitData.
178   chunk = arbiter_->GetNewChunk({}, BufferExhaustedPolicy::kDefault);
179   ASSERT_TRUE(chunk.is_valid());
180   EXPECT_CALL(mock_producer_endpoint_, CommitData(_, _)).Times(0);
181   arbiter_->ReturnCompletedChunk(std::move(chunk), 2, &ignored);
182   task_runner_->RunUntilIdle();
183   ASSERT_TRUE(Mock::VerifyAndClearExpectations(&mock_producer_endpoint_));
184   // This chunk does not need patching, so it should be marked as complete even
185   // before the end of the batching period - to allow the service to read it in
186   // full.
187   ASSERT_EQ(SharedMemoryABI::kChunkComplete,
188             arbiter_->shmem_abi_for_testing()->GetChunkState(2u, 0u));
189 
190   // Make sure that CommitData gets called once (should happen at the end
191   // of the batching period), with the two chunks in the batch.
192   EXPECT_CALL(mock_producer_endpoint_, CommitData(_, _))
193       .WillOnce(Invoke([](const CommitDataRequest& req,
194                           MockProducerEndpoint::CommitDataCallback) {
195         ASSERT_EQ(2, req.chunks_to_move_size());
196 
197         // Verify that this is the first chunk that we expect to have been
198         // batched.
199         ASSERT_EQ(1u, req.chunks_to_move()[0].page());
200         ASSERT_EQ(0u, req.chunks_to_move()[0].chunk());
201         ASSERT_EQ(1u, req.chunks_to_move()[0].target_buffer());
202 
203         // Verify that this is the second chunk that we expect to have been
204         // batched.
205         ASSERT_EQ(2u, req.chunks_to_move()[1].page());
206         ASSERT_EQ(0u, req.chunks_to_move()[1].chunk());
207         ASSERT_EQ(2u, req.chunks_to_move()[1].target_buffer());
208       }));
209 
210   // Pretend we've reached the end of the batching period.
211   arbiter_->FlushPendingCommitDataRequests();
212 }
213 
TEST_P(SharedMemoryArbiterImplTest,UseShmemEmulation)214 TEST_P(SharedMemoryArbiterImplTest, UseShmemEmulation) {
215   arbiter_.reset(new SharedMemoryArbiterImpl(
216       buf(), buf_size(), ShmemMode::kShmemEmulation, page_size(),
217       &mock_producer_endpoint_, task_runner_.get()));
218 
219   SharedMemoryArbiterImpl::set_default_layout_for_testing(
220       SharedMemoryABI::PageLayout::kPageDiv1);
221 
222   size_t page_idx;
223   size_t chunk_idx;
224   auto* abi = arbiter_->shmem_abi_for_testing();
225 
226   // Test returning a completed chunk.
227   SharedMemoryABI::Chunk chunk =
228       arbiter_->GetNewChunk({}, BufferExhaustedPolicy::kDefault);
229   std::tie(page_idx, chunk_idx) = abi->GetPageAndChunkIndex(chunk);
230   ASSERT_TRUE(chunk.is_valid());
231   EXPECT_CALL(mock_producer_endpoint_, CommitData(_, _)).Times(1);
232   PatchList ignored;
233   arbiter_->ReturnCompletedChunk(std::move(chunk), 0, &ignored);
234   task_runner_->RunUntilIdle();
235   ASSERT_TRUE(Mock::VerifyAndClearExpectations(&mock_producer_endpoint_));
236   // When running in the emulation mode, the chunk is freed when the
237   // CommitDataRequest is flushed.
238   ASSERT_EQ(
239       SharedMemoryABI::kChunkFree,
240       arbiter_->shmem_abi_for_testing()->GetChunkState(page_idx, chunk_idx));
241 
242   // Direct patching is supported in the emulation mode.
243   arbiter_->SetDirectSMBPatchingSupportedByService();
244   ASSERT_TRUE(arbiter_->EnableDirectSMBPatching());
245 
246   chunk = arbiter_->GetNewChunk({}, BufferExhaustedPolicy::kDefault);
247   std::tie(page_idx, chunk_idx) = abi->GetPageAndChunkIndex(chunk);
248   ASSERT_TRUE(chunk.is_valid());
249   EXPECT_CALL(mock_producer_endpoint_, CommitData(_, _))
250       .WillOnce(Invoke([&](const CommitDataRequest& req,
251                            MockProducerEndpoint::CommitDataCallback) {
252         ASSERT_EQ(1, req.chunks_to_move_size());
253 
254         ASSERT_EQ(page_idx, req.chunks_to_move()[0].page());
255         ASSERT_EQ(chunk_idx, req.chunks_to_move()[0].chunk());
256         ASSERT_EQ(1u, req.chunks_to_move()[0].target_buffer());
257 
258         // The request should contain chunk data.
259         ASSERT_TRUE(req.chunks_to_move()[0].has_data());
260       }));
261   chunk.SetFlag(SharedMemoryABI::ChunkHeader::kChunkNeedsPatching);
262   arbiter_->ReturnCompletedChunk(std::move(chunk), 1, &ignored);
263   task_runner_->RunUntilIdle();
264   ASSERT_TRUE(Mock::VerifyAndClearExpectations(&mock_producer_endpoint_));
265   // A chunk is freed after being flushed.
266   ASSERT_EQ(
267       SharedMemoryABI::kChunkFree,
268       arbiter_->shmem_abi_for_testing()->GetChunkState(page_idx, chunk_idx));
269 }
270 
271 // Check that we can actually create up to kMaxWriterID TraceWriter(s).
TEST_P(SharedMemoryArbiterImplTest,WriterIDsAllocation)272 TEST_P(SharedMemoryArbiterImplTest, WriterIDsAllocation) {
273   auto checkpoint = task_runner_->CreateCheckpoint("last_unregistered");
274 
275   std::vector<uint32_t> registered_ids;
276   std::vector<uint32_t> unregistered_ids;
277 
278   ON_CALL(mock_producer_endpoint_, RegisterTraceWriter)
279       .WillByDefault(
280           [&](uint32_t id, uint32_t) { registered_ids.push_back(id); });
281   ON_CALL(mock_producer_endpoint_, UnregisterTraceWriter)
282       .WillByDefault([&](uint32_t id) {
283         unregistered_ids.push_back(id);
284         if (unregistered_ids.size() == kMaxWriterID) {
285           checkpoint();
286         }
287       });
288   {
289     std::map<WriterID, std::unique_ptr<TraceWriter>> writers;
290 
291     for (size_t i = 0; i < kMaxWriterID; i++) {
292       std::unique_ptr<TraceWriter> writer = arbiter_->CreateTraceWriter(1);
293       ASSERT_TRUE(writer);
294       WriterID writer_id = writer->writer_id();
295       ASSERT_TRUE(writers.emplace(writer_id, std::move(writer)).second);
296     }
297 
298     // A further call should return a null impl of trace writer as we exhausted
299     // writer IDs.
300     ASSERT_EQ(arbiter_->CreateTraceWriter(1)->writer_id(), 0);
301   }
302 
303   // This should run the Register/UnregisterTraceWriter tasks enqueued by the
304   // memory arbiter.
305   task_runner_->RunUntilCheckpoint("last_unregistered", 15000);
306 
307   std::vector<uint32_t> expected_ids;  // 1..kMaxWriterID
308   for (uint32_t i = 1; i <= kMaxWriterID; i++)
309     expected_ids.push_back(i);
310   EXPECT_THAT(registered_ids, UnorderedElementsAreArray(expected_ids));
311   EXPECT_THAT(unregistered_ids, UnorderedElementsAreArray(expected_ids));
312 }
313 
TEST_P(SharedMemoryArbiterImplTest,Shutdown)314 TEST_P(SharedMemoryArbiterImplTest, Shutdown) {
315   std::unique_ptr<TraceWriter> writer = arbiter_->CreateTraceWriter(1);
316   EXPECT_TRUE(writer);
317   EXPECT_FALSE(arbiter_->TryShutdown());
318 
319   // We still get a valid trace writer after shutdown, but it's a null one
320   // that's not connected to the arbiter.
321   std::unique_ptr<TraceWriter> writer2 = arbiter_->CreateTraceWriter(2);
322   EXPECT_TRUE(writer2);
323   EXPECT_EQ(writer2->writer_id(), 0);
324 
325   // Shutdown will succeed once the only non-null writer goes away.
326   writer.reset();
327   EXPECT_TRUE(arbiter_->TryShutdown());
328 }
329 
330 // Verify that getting a new chunk doesn't stall when kDrop policy is chosen.
TEST_P(SharedMemoryArbiterImplTest,BufferExhaustedPolicyDrop)331 TEST_P(SharedMemoryArbiterImplTest, BufferExhaustedPolicyDrop) {
332   // Grab all chunks in the SMB.
333   SharedMemoryArbiterImpl::set_default_layout_for_testing(
334       SharedMemoryABI::PageLayout::kPageDiv1);
335   static constexpr size_t kTotChunks = kNumPages;
336   SharedMemoryABI::Chunk chunks[kTotChunks];
337   for (size_t i = 0; i < kTotChunks; i++) {
338     chunks[i] = arbiter_->GetNewChunk({}, BufferExhaustedPolicy::kDrop);
339     ASSERT_TRUE(chunks[i].is_valid());
340   }
341 
342   // SMB is exhausted, thus GetNewChunk() should return an invalid chunk. In
343   // kStall mode, this would stall.
344   SharedMemoryABI::Chunk invalid_chunk =
345       arbiter_->GetNewChunk({}, BufferExhaustedPolicy::kDrop);
346   ASSERT_FALSE(invalid_chunk.is_valid());
347 
348   // Returning the chunk is not enough to be able to reacquire it.
349   PatchList ignored;
350   arbiter_->ReturnCompletedChunk(std::move(chunks[0]), 1, &ignored);
351 
352   invalid_chunk = arbiter_->GetNewChunk({}, BufferExhaustedPolicy::kDrop);
353   ASSERT_FALSE(invalid_chunk.is_valid());
354 
355   // After releasing the chunk as free, we can reacquire it.
356   chunks[0] =
357       arbiter_->shmem_abi_for_testing()->TryAcquireChunkForReading(0, 0);
358   ASSERT_TRUE(chunks[0].is_valid());
359   arbiter_->shmem_abi_for_testing()->ReleaseChunkAsFree(std::move(chunks[0]));
360 
361   chunks[0] = arbiter_->GetNewChunk({}, BufferExhaustedPolicy::kDrop);
362   ASSERT_TRUE(chunks[0].is_valid());
363 }
364 
TEST_P(SharedMemoryArbiterImplTest,CreateUnboundAndBind)365 TEST_P(SharedMemoryArbiterImplTest, CreateUnboundAndBind) {
366   auto checkpoint_writer = task_runner_->CreateCheckpoint("writer_registered");
367   auto checkpoint_flush = task_runner_->CreateCheckpoint("flush_completed");
368 
369   // Create an unbound arbiter and bind immediately.
370   arbiter_.reset(new SharedMemoryArbiterImpl(
371       buf(), buf_size(), ShmemMode::kDefault, page_size(), nullptr, nullptr));
372   arbiter_->BindToProducerEndpoint(&mock_producer_endpoint_,
373                                    task_runner_.get());
374   EXPECT_TRUE(IsArbiterFullyBound());
375 
376   // Trace writer should be registered in a non-delayed task.
377   EXPECT_CALL(mock_producer_endpoint_, RegisterTraceWriter(_, 1))
378       .WillOnce(testing::InvokeWithoutArgs(checkpoint_writer));
379   std::unique_ptr<TraceWriter> writer =
380       arbiter_->CreateTraceWriter(1, BufferExhaustedPolicy::kDrop);
381   task_runner_->RunUntilCheckpoint("writer_registered", 5000);
382 
383   // Commits/flushes should be sent right away.
384   EXPECT_CALL(mock_producer_endpoint_, CommitData(_, _))
385       .WillOnce(testing::InvokeArgument<1>());
386   writer->Flush(checkpoint_flush);
387   task_runner_->RunUntilCheckpoint("flush_completed", 5000);
388 }
389 
390 // Startup tracing tests are run with the arbiter in either bound or unbound
391 // initial state. Startup tracing in bound state can still be useful, e.g. in
392 // integration tests or to enable tracing in the consumer process immediately
393 // before/after instructing the service to start a session, avoiding the
394 // round-trip time through the service.
395 enum class InitialBindingState { kUnbound, kBound };
396 
397 class SharedMemoryArbiterImplStartupTracingTest
398     : public SharedMemoryArbiterImplTest {
399  public:
SetupArbiter(InitialBindingState initial_state)400   void SetupArbiter(InitialBindingState initial_state) {
401     if (initial_state == InitialBindingState::kUnbound) {
402       arbiter_.reset(
403           new SharedMemoryArbiterImpl(buf(), buf_size(), ShmemMode::kDefault,
404                                       page_size(), nullptr, nullptr));
405       EXPECT_FALSE(IsArbiterFullyBound());
406     } else {
407       // A bound arbiter is already set up by the base class.
408       EXPECT_TRUE(IsArbiterFullyBound());
409     }
410   }
411 
EnsureArbiterBoundToEndpoint(InitialBindingState initial_state)412   void EnsureArbiterBoundToEndpoint(InitialBindingState initial_state) {
413     if (initial_state == InitialBindingState::kUnbound) {
414       arbiter_->BindToProducerEndpoint(&mock_producer_endpoint_,
415                                        task_runner_.get());
416     }
417   }
418 
TestStartupTracing(InitialBindingState initial_state)419   void TestStartupTracing(InitialBindingState initial_state) {
420     constexpr uint16_t kTargetBufferReservationId1 = 1;
421     constexpr uint16_t kTargetBufferReservationId2 = 2;
422 
423     SetupArbiter(initial_state);
424 
425     // Create an unbound startup writer.
426     std::unique_ptr<TraceWriter> writer =
427         arbiter_->CreateStartupTraceWriter(kTargetBufferReservationId1);
428     EXPECT_FALSE(IsArbiterFullyBound());
429 
430     // Write two packets while unbound (if InitialBindingState::kUnbound) and
431     // flush the chunk after each packet. The writer will return the chunk to
432     // the arbiter and grab a new chunk for the second packet. The flush should
433     // only add the chunk into the queued commit request.
434     for (int i = 0; i < 2; i++) {
435       {
436         auto packet = writer->NewTracePacket();
437         packet->set_for_testing()->set_str("foo");
438       }
439       writer->Flush();
440     }
441 
442     // Bind to producer endpoint if initially unbound. This should not register
443     // the trace writer yet, because its buffer reservation is still unbound.
444     EnsureArbiterBoundToEndpoint(initial_state);
445     EXPECT_FALSE(IsArbiterFullyBound());
446 
447     // Write another packet into another chunk and queue it.
448     {
449       auto packet = writer->NewTracePacket();
450       packet->set_for_testing()->set_str("foo");
451     }
452     bool flush_completed = false;
453     writer->Flush([&flush_completed] { flush_completed = true; });
454 
455     // Bind the buffer reservation to a buffer. Trace writer should be
456     // registered and queued commits flushed.
457     EXPECT_CALL(mock_producer_endpoint_, RegisterTraceWriter(_, 42));
458     EXPECT_CALL(mock_producer_endpoint_, CommitData(_, _))
459         .WillOnce(Invoke([](const CommitDataRequest& req,
460                             MockProducerEndpoint::CommitDataCallback callback) {
461           ASSERT_EQ(3, req.chunks_to_move_size());
462           EXPECT_EQ(42u, req.chunks_to_move()[0].target_buffer());
463           EXPECT_EQ(42u, req.chunks_to_move()[1].target_buffer());
464           EXPECT_EQ(42u, req.chunks_to_move()[2].target_buffer());
465           callback();
466         }));
467 
468     arbiter_->BindStartupTargetBuffer(kTargetBufferReservationId1, 42);
469     EXPECT_TRUE(IsArbiterFullyBound());
470 
471     testing::Mock::VerifyAndClearExpectations(&mock_producer_endpoint_);
472     EXPECT_TRUE(flush_completed);
473 
474     // Creating a new startup writer for the same buffer posts an immediate task
475     // to register it.
476     auto checkpoint_register1b =
477         task_runner_->CreateCheckpoint("writer1b_registered");
478     EXPECT_CALL(mock_producer_endpoint_, RegisterTraceWriter(_, 42))
479         .WillOnce(testing::InvokeWithoutArgs(checkpoint_register1b));
480     std::unique_ptr<TraceWriter> writer1b =
481         arbiter_->CreateStartupTraceWriter(kTargetBufferReservationId1);
482     task_runner_->RunUntilCheckpoint("writer1b_registered", 5000);
483 
484     // And a commit on this new writer should be flushed to the right buffer,
485     // too.
486     EXPECT_CALL(mock_producer_endpoint_, CommitData(_, _))
487         .WillOnce(Invoke([](const CommitDataRequest& req,
488                             MockProducerEndpoint::CommitDataCallback callback) {
489           ASSERT_EQ(1, req.chunks_to_move_size());
490           EXPECT_EQ(42u, req.chunks_to_move()[0].target_buffer());
491           callback();
492         }));
493     {
494       auto packet = writer1b->NewTracePacket();
495       packet->set_for_testing()->set_str("foo");
496     }
497     flush_completed = false;
498     writer1b->Flush([&flush_completed] { flush_completed = true; });
499 
500     testing::Mock::VerifyAndClearExpectations(&mock_producer_endpoint_);
501     EXPECT_TRUE(flush_completed);
502 
503     // Create another startup writer for another target buffer, which puts the
504     // arbiter back into unbound state.
505     std::unique_ptr<TraceWriter> writer2 =
506         arbiter_->CreateStartupTraceWriter(kTargetBufferReservationId2);
507     EXPECT_FALSE(IsArbiterFullyBound());
508 
509     // Write a chunk into both writers. Both should be queued up into the next
510     // commit request.
511     {
512       auto packet = writer->NewTracePacket();
513       packet->set_for_testing()->set_str("foo");
514     }
515     writer->Flush();
516     {
517       auto packet = writer2->NewTracePacket();
518       packet->set_for_testing()->set_str("bar");
519     }
520     flush_completed = false;
521     writer2->Flush([&flush_completed] { flush_completed = true; });
522 
523     // Destroy the first trace writer, which should cause the arbiter to post a
524     // task to unregister it.
525     auto checkpoint_writer =
526         task_runner_->CreateCheckpoint("writer_unregistered");
527     EXPECT_CALL(mock_producer_endpoint_,
528                 UnregisterTraceWriter(writer->writer_id()))
529         .WillOnce(testing::InvokeWithoutArgs(checkpoint_writer));
530     writer.reset();
531     task_runner_->RunUntilCheckpoint("writer_unregistered", 5000);
532 
533     // Bind the second buffer reservation to a buffer. Second trace writer
534     // should be registered and queued commits flushed.
535     EXPECT_CALL(mock_producer_endpoint_, RegisterTraceWriter(_, 23));
536     EXPECT_CALL(mock_producer_endpoint_, CommitData(_, _))
537         .WillOnce(Invoke([](const CommitDataRequest& req,
538                             MockProducerEndpoint::CommitDataCallback callback) {
539           ASSERT_EQ(2, req.chunks_to_move_size());
540           EXPECT_EQ(42u, req.chunks_to_move()[0].target_buffer());
541           EXPECT_EQ(23u, req.chunks_to_move()[1].target_buffer());
542           callback();
543         }));
544 
545     arbiter_->BindStartupTargetBuffer(kTargetBufferReservationId2, 23);
546     EXPECT_TRUE(IsArbiterFullyBound());
547 
548     testing::Mock::VerifyAndClearExpectations(&mock_producer_endpoint_);
549     EXPECT_TRUE(flush_completed);
550   }
551 
TestAbortStartupTracingForReservation(InitialBindingState initial_state)552   void TestAbortStartupTracingForReservation(
553       InitialBindingState initial_state) {
554     constexpr uint16_t kTargetBufferReservationId1 = 1;
555     constexpr uint16_t kTargetBufferReservationId2 = 2;
556 
557     SetupArbiter(initial_state);
558 
559     // Create two unbound startup writers the same target buffer.
560     SharedMemoryABI* shmem_abi = arbiter_->shmem_abi_for_testing();
561     std::unique_ptr<TraceWriter> writer =
562         arbiter_->CreateStartupTraceWriter(kTargetBufferReservationId1);
563     std::unique_ptr<TraceWriter> writer2 =
564         arbiter_->CreateStartupTraceWriter(kTargetBufferReservationId1);
565 
566     // Write two packet while unbound and flush the chunk after each packet. The
567     // writer will return the chunk to the arbiter and grab a new chunk for the
568     // second packet. The flush should only add the chunk into the queued commit
569     // request.
570     for (int i = 0; i < 2; i++) {
571       {
572         auto packet = writer->NewTracePacket();
573         packet->set_for_testing()->set_str("foo");
574       }
575       writer->Flush();
576     }
577 
578     // Expectations for the below calls.
579     EXPECT_CALL(mock_producer_endpoint_, RegisterTraceWriter(_, _)).Times(0);
580     EXPECT_CALL(mock_producer_endpoint_, CommitData(_, _))
581         .WillOnce(Invoke([shmem_abi](const CommitDataRequest& req,
582                                      MockProducerEndpoint::CommitDataCallback) {
583           ASSERT_EQ(2, req.chunks_to_move_size());
584           for (size_t i = 0; i < 2; i++) {
585             EXPECT_EQ(0u, req.chunks_to_move()[i].target_buffer());
586             SharedMemoryABI::Chunk chunk = shmem_abi->TryAcquireChunkForReading(
587                 req.chunks_to_move()[i].page(),
588                 req.chunks_to_move()[i].chunk());
589             shmem_abi->ReleaseChunkAsFree(std::move(chunk));
590           }
591         }));
592 
593     // Abort the first session. This should resolve the two chunks committed up
594     // to this point to an invalid target buffer (ID 0). They will remain
595     // buffered until bound to an endpoint (if InitialBindingState::kUnbound).
596     arbiter_->AbortStartupTracingForReservation(kTargetBufferReservationId1);
597 
598     // Destroy a writer that was created before the abort. This should not cause
599     // crashes.
600     EXPECT_CALL(mock_producer_endpoint_,
601                 UnregisterTraceWriter(writer2->writer_id()))
602         .Times(Between(0, 1));  // Depending on `initial_state`.
603     writer2.reset();
604 
605     // Bind to producer endpoint if unbound. The trace writer should not be
606     // registered as its target buffer is invalid. Since no startup sessions are
607     // active anymore, the arbiter should be fully bound. The commit data
608     // request is flushed.
609     EnsureArbiterBoundToEndpoint(initial_state);
610     EXPECT_TRUE(IsArbiterFullyBound());
611 
612     // SMB should be free again, as no writer holds on to any chunk anymore.
613     for (size_t i = 0; i < shmem_abi->num_pages(); i++)
614       EXPECT_TRUE(shmem_abi->is_page_free(i));
615 
616     // Write another packet into another chunk and commit it. It should be sent
617     // to the arbiter with invalid target buffer (ID 0).
618     {
619       auto packet = writer->NewTracePacket();
620       packet->set_for_testing()->set_str("foo");
621     }
622     EXPECT_CALL(mock_producer_endpoint_, CommitData(_, _))
623         .WillOnce(Invoke(
624             [shmem_abi](const CommitDataRequest& req,
625                         MockProducerEndpoint::CommitDataCallback callback) {
626               ASSERT_EQ(1, req.chunks_to_move_size());
627               EXPECT_EQ(0u, req.chunks_to_move()[0].target_buffer());
628               SharedMemoryABI::Chunk chunk =
629                   shmem_abi->TryAcquireChunkForReading(
630                       req.chunks_to_move()[0].page(),
631                       req.chunks_to_move()[0].chunk());
632               shmem_abi->ReleaseChunkAsFree(std::move(chunk));
633               callback();
634             }));
635     bool flush_completed = false;
636     writer->Flush([&flush_completed] { flush_completed = true; });
637     EXPECT_TRUE(flush_completed);
638 
639     // Creating a new startup writer for the same buffer does not cause it to
640     // register.
641     EXPECT_CALL(mock_producer_endpoint_, RegisterTraceWriter(_, _)).Times(0);
642     std::unique_ptr<TraceWriter> writer1b =
643         arbiter_->CreateStartupTraceWriter(kTargetBufferReservationId1);
644 
645     // And a commit on this new writer should again be flushed to the invalid
646     // target buffer.
647     {
648       auto packet = writer1b->NewTracePacket();
649       packet->set_for_testing()->set_str("foo");
650     }
651     EXPECT_CALL(mock_producer_endpoint_, CommitData(_, _))
652         .WillOnce(Invoke(
653             [shmem_abi](const CommitDataRequest& req,
654                         MockProducerEndpoint::CommitDataCallback callback) {
655               ASSERT_EQ(1, req.chunks_to_move_size());
656               EXPECT_EQ(0u, req.chunks_to_move()[0].target_buffer());
657               SharedMemoryABI::Chunk chunk =
658                   shmem_abi->TryAcquireChunkForReading(
659                       req.chunks_to_move()[0].page(),
660                       req.chunks_to_move()[0].chunk());
661               shmem_abi->ReleaseChunkAsFree(std::move(chunk));
662               callback();
663             }));
664     flush_completed = false;
665     writer1b->Flush([&flush_completed] { flush_completed = true; });
666     EXPECT_TRUE(flush_completed);
667 
668     // Create another startup writer for another target buffer, which puts the
669     // arbiter back into unbound state.
670     std::unique_ptr<TraceWriter> writer3 =
671         arbiter_->CreateStartupTraceWriter(kTargetBufferReservationId2);
672     EXPECT_FALSE(IsArbiterFullyBound());
673 
674     // Write a chunk into both writers. Both should be queued up into the next
675     // commit request.
676     {
677       auto packet = writer->NewTracePacket();
678       packet->set_for_testing()->set_str("foo");
679     }
680     writer->Flush();
681     {
682       auto packet = writer3->NewTracePacket();
683       packet->set_for_testing()->set_str("bar");
684     }
685     flush_completed = false;
686     writer3->Flush([&flush_completed] { flush_completed = true; });
687 
688     // Destroy the first trace writer, which should cause the arbiter to post a
689     // task to unregister it.
690     auto checkpoint_writer =
691         task_runner_->CreateCheckpoint("writer_unregistered");
692     EXPECT_CALL(mock_producer_endpoint_,
693                 UnregisterTraceWriter(writer->writer_id()))
694         .WillOnce(testing::InvokeWithoutArgs(checkpoint_writer));
695     writer.reset();
696     task_runner_->RunUntilCheckpoint("writer_unregistered", 5000);
697 
698     // Abort the second session. Its commits should now also be associated with
699     // target buffer 0, and both writers' commits flushed.
700     EXPECT_CALL(mock_producer_endpoint_, RegisterTraceWriter(_, _)).Times(0);
701     EXPECT_CALL(mock_producer_endpoint_, CommitData(_, _))
702         .WillOnce(Invoke(
703             [shmem_abi](const CommitDataRequest& req,
704                         MockProducerEndpoint::CommitDataCallback callback) {
705               ASSERT_EQ(2, req.chunks_to_move_size());
706               for (size_t i = 0; i < 2; i++) {
707                 EXPECT_EQ(0u, req.chunks_to_move()[i].target_buffer());
708                 SharedMemoryABI::Chunk chunk =
709                     shmem_abi->TryAcquireChunkForReading(
710                         req.chunks_to_move()[i].page(),
711                         req.chunks_to_move()[i].chunk());
712                 shmem_abi->ReleaseChunkAsFree(std::move(chunk));
713               }
714               callback();
715             }));
716 
717     arbiter_->AbortStartupTracingForReservation(kTargetBufferReservationId2);
718     EXPECT_TRUE(IsArbiterFullyBound());
719     EXPECT_TRUE(flush_completed);
720 
721     // SMB should be free again, as no writer holds on to any chunk anymore.
722     for (size_t i = 0; i < shmem_abi->num_pages(); i++)
723       EXPECT_TRUE(shmem_abi->is_page_free(i));
724   }
725 };
726 
727 INSTANTIATE_TEST_SUITE_P(PageSize,
728                          SharedMemoryArbiterImplStartupTracingTest,
729                          ::testing::ValuesIn(kPageSizes));
730 
TEST_P(SharedMemoryArbiterImplStartupTracingTest,StartupTracingUnbound)731 TEST_P(SharedMemoryArbiterImplStartupTracingTest, StartupTracingUnbound) {
732   TestStartupTracing(InitialBindingState::kUnbound);
733 }
734 
TEST_P(SharedMemoryArbiterImplStartupTracingTest,StartupTracingBound)735 TEST_P(SharedMemoryArbiterImplStartupTracingTest, StartupTracingBound) {
736   TestStartupTracing(InitialBindingState::kBound);
737 }
738 
TEST_P(SharedMemoryArbiterImplStartupTracingTest,AbortStartupTracingForReservationUnbound)739 TEST_P(SharedMemoryArbiterImplStartupTracingTest,
740        AbortStartupTracingForReservationUnbound) {
741   TestAbortStartupTracingForReservation(InitialBindingState::kUnbound);
742 }
743 
TEST_P(SharedMemoryArbiterImplStartupTracingTest,AbortStartupTracingForReservationBound)744 TEST_P(SharedMemoryArbiterImplStartupTracingTest,
745        AbortStartupTracingForReservationBound) {
746   TestAbortStartupTracingForReservation(InitialBindingState::kBound);
747 }
748 
749 // TODO(primiano): add multi-threaded tests.
750 
751 }  // namespace perfetto
752