1 // Copyright 2019 The Android Open Source Project
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14
15 #include <gtest/gtest.h> // for Message
16 #include <stdint.h> // for uint32_t
17 #include <stdio.h> // for printf
18 #include <string.h> // for size_t
19 #include <sys/types.h> // for ssize_t
20 #include <algorithm> // for uniform_...
21 #include <functional> // for __base
22 #include <random> // for default_...
23 #include <vector> // for vector
24
25 #include "base/ring_buffer.h" // for ring_buf...
26 #include "base/FunctorThread.h" // for FunctorT...
27 #include "host-common/AndroidAgentFactory.h" // for getConso...
28 #include "host-common/AddressSpaceService.h" // for AddressS...
29 #include "host-common/address_space_device.hpp" // for goldfish...
30 #include "host-common/address_space_graphics.h" // for AddressS...
31 #include "host-common/address_space_graphics_types.h" // for asg_context
32 #include "testing/HostAddressSpace.h" // for HostAddr...
33 #include "host-common/globals.h" // for android_hw
34
35 namespace android {
36 namespace base {
37 class Stream;
38 } // namespace base
39 } // namespace android
40
41 using android::base::FunctorThread;
42
43
44
45 namespace android {
46 namespace emulation {
47 namespace asg {
48
49 #define ASG_TEST_READ_PATTERN 0xAA
50 #define ASG_TEST_WRITE_PATTERN 0xBB
51
52 class AddressSpaceGraphicsTest : public ::testing::Test {
53 public:
54 class Client {
55 public:
Client(HostAddressSpaceDevice * device)56 Client(HostAddressSpaceDevice* device) :
57 mDevice(device),
58 mHandle(mDevice->open()) {
59
60 ping((uint64_t)AddressSpaceDeviceType::Graphics);
61
62 auto getRingResult = ping((uint64_t)ASG_GET_RING);
63 mRingOffset = getRingResult.metadata;
64 mRingSize = getRingResult.size;
65
66 EXPECT_EQ(0, mDevice->claimShared(mHandle, mRingOffset, mRingSize));
67
68 mRingStorage =
69 (char*)mDevice->getHostAddr(
70 mDevice->offsetToPhysAddr(mRingOffset));
71
72 auto getBufferResult = ping((uint64_t)ASG_GET_BUFFER);
73 mBufferOffset = getBufferResult.metadata;
74 mBufferSize = getBufferResult.size;
75
76 EXPECT_EQ(0, mDevice->claimShared(mHandle, mBufferOffset, mBufferSize));
77 mBuffer =
78 (char*)mDevice->getHostAddr(
79 mDevice->offsetToPhysAddr(mBufferOffset));
80
81 mContext = asg_context_create(mRingStorage, mBuffer, mBufferSize);
82
83 EXPECT_EQ(mBuffer, mContext.buffer);
84
85 auto setVersionResult = ping((uint64_t)ASG_SET_VERSION, mVersion);
86 uint32_t hostVersion = setVersionResult.size;
87 EXPECT_LE(hostVersion, mVersion);
88 EXPECT_EQ(android_hw->hw_gltransport_asg_writeStepSize,
89 mContext.ring_config->flush_interval);
90 EXPECT_EQ(android_hw->hw_gltransport_asg_writeBufferSize,
91 mBufferSize);
92
93 mContext.ring_config->transfer_mode = 1;
94 mContext.ring_config->host_consumed_pos = 0;
95 mContext.ring_config->guest_write_pos = 0;
96 mBufferMask = mBufferSize - 1;
97
98 mWriteStart = mBuffer;
99 }
100
~Client()101 ~Client() {
102 mDevice->unclaimShared(mHandle, mBufferOffset);
103 mDevice->unclaimShared(mHandle, mRingOffset);
104 mDevice->close(mHandle);
105 }
106
isInError() const107 bool isInError() const {
108 return 1 == mContext.ring_config->in_error;
109 }
110
abort()111 void abort() {
112 mContext.ring_config->in_error = 1;
113 }
114
allocBuffer(size_t size)115 char* allocBuffer(size_t size) {
116 if (size > mContext.ring_config->flush_interval) {
117 return nullptr;
118 }
119
120 if (mWriteStart + mCurrentWriteBytes + size >
121 mWriteStart + mWriteStep) {
122 flush();
123 mCurrentWriteBytes = 0;
124 }
125
126 char* res = mWriteStart + mCurrentWriteBytes;
127 mCurrentWriteBytes += size;
128
129 return res;
130 }
131
writeFully(const char * buf,size_t size)132 int writeFully(const char* buf, size_t size) {
133 flush();
134 ensureType1Finished();
135 mContext.ring_config->transfer_size = size;
136 mContext.ring_config->transfer_mode = 3;
137
138 size_t sent = 0;
139 size_t quarterRingSize = mBufferSize / 4;
140 size_t chunkSize = size < quarterRingSize ? size : quarterRingSize;
141
142 while (sent < size) {
143 size_t remaining = size - sent;
144 size_t sendThisTime = remaining < chunkSize ? remaining : chunkSize;
145
146 long sentChunks =
147 ring_buffer_view_write(
148 mContext.to_host_large_xfer.ring,
149 &mContext.to_host_large_xfer.view,
150 buf + sent, sendThisTime, 1);
151
152 if (*(mContext.host_state) != ASG_HOST_STATE_CAN_CONSUME) {
153 ping(ASG_NOTIFY_AVAILABLE);
154 }
155
156 if (sentChunks == 0) {
157 ring_buffer_yield();
158 }
159
160 sent += sentChunks * sendThisTime;
161
162 if (isInError()) {
163 return -1;
164 }
165 }
166
167 ensureType3Finished();
168 mContext.ring_config->transfer_mode = 1;
169 return 0;
170 }
171
speculativeRead(char * readBuffer,size_t minSizeToRead)172 ssize_t speculativeRead(char* readBuffer, size_t minSizeToRead) {
173 flush();
174 ensureConsumerFinishing();
175
176 size_t actuallyRead = 0;
177 while (!actuallyRead) {
178 uint32_t readAvail =
179 ring_buffer_available_read(
180 mContext.from_host_large_xfer.ring,
181 &mContext.from_host_large_xfer.view);
182
183 if (!readAvail) {
184 ring_buffer_yield();
185 continue;
186 }
187
188 uint32_t toRead = readAvail > minSizeToRead ?
189 minSizeToRead : readAvail;
190
191 long stepsRead = ring_buffer_view_read(
192 mContext.from_host_large_xfer.ring,
193 &mContext.from_host_large_xfer.view,
194 readBuffer, toRead, 1);
195
196 actuallyRead += stepsRead * toRead;
197
198 if (isInError()) {
199 return -1;
200 }
201 }
202
203 return actuallyRead;
204 }
205
flush()206 void flush() {
207 if (!mCurrentWriteBytes) return;
208 type1WriteWithNotify(mWriteStart - mBuffer, mCurrentWriteBytes);
209 advanceWrite();
210 }
211
get_relative_buffer_pos(uint32_t pos)212 uint32_t get_relative_buffer_pos(uint32_t pos) {
213 return pos & mBufferMask;
214 }
215
get_available_for_write()216 uint32_t get_available_for_write() {
217 uint32_t host_consumed_view;
218 __atomic_load(&mContext.ring_config->host_consumed_pos,
219 &host_consumed_view,
220 __ATOMIC_SEQ_CST);
221 uint32_t availableForWrite =
222 get_relative_buffer_pos(
223 host_consumed_view -
224 mContext.ring_config->guest_write_pos - 1);
225 return availableForWrite;
226 }
227
advanceWrite()228 void advanceWrite() {
229 uint32_t avail = get_available_for_write();
230
231 while (avail < mContext.ring_config->flush_interval) {
232 ensureConsumerFinishing();
233 avail = get_available_for_write();
234 }
235
236 __atomic_add_fetch(
237 &mContext.ring_config->guest_write_pos,
238 mContext.ring_config->flush_interval,
239 __ATOMIC_SEQ_CST);
240
241 char* newBuffer =
242 mBuffer +
243 get_relative_buffer_pos(
244 mContext.ring_config->guest_write_pos);
245
246 mWriteStart = newBuffer;
247 mCurrentWriteBytes = 0;
248 }
249
type1WriteWithNotify(uint32_t bufferOffset,size_t size)250 int type1WriteWithNotify(uint32_t bufferOffset, size_t size) {
251 size_t sent = 0;
252 size_t sizeForRing = 8;
253
254 struct asg_type1_xfer xfer {
255 bufferOffset,
256 (uint32_t)size,
257 };
258
259 uint8_t* writeBufferBytes = (uint8_t*)(&xfer);
260
261 while (sent < sizeForRing) {
262
263 long sentChunks = ring_buffer_write(
264 mContext.to_host, writeBufferBytes + sent, sizeForRing - sent, 1);
265
266 if (*(mContext.host_state) != ASG_HOST_STATE_CAN_CONSUME) {
267 ping(ASG_NOTIFY_AVAILABLE);
268 }
269
270 if (sentChunks == 0) {
271 ring_buffer_yield();
272 }
273
274 sent += sentChunks * (sizeForRing - sent);
275
276 if (isInError()) {
277 return -1;
278 }
279 }
280
281 return 0;
282 }
283
ensureConsumerFinishing()284 void ensureConsumerFinishing() {
285 uint32_t currAvailRead =
286 ring_buffer_available_read(mContext.to_host, 0);
287
288 while (currAvailRead) {
289 ring_buffer_yield();
290 uint32_t nextAvailRead = ring_buffer_available_read(mContext.to_host, 0);
291
292 if (nextAvailRead != currAvailRead) {
293 break;
294 }
295
296 if (*(mContext.host_state) != ASG_HOST_STATE_CAN_CONSUME) {
297 ping(ASG_NOTIFY_AVAILABLE);
298 break;
299 }
300 }
301 }
302
ensureType1Finished()303 void ensureType1Finished() {
304 ensureConsumerFinishing();
305
306 uint32_t currAvailRead =
307 ring_buffer_available_read(mContext.to_host, 0);
308
309 while (currAvailRead) {
310 ring_buffer_yield();
311 currAvailRead = ring_buffer_available_read(mContext.to_host, 0);
312 if (isInError()) {
313 return;
314 }
315 }
316 }
317
ensureType3Finished()318 void ensureType3Finished() {
319 uint32_t availReadLarge =
320 ring_buffer_available_read(
321 mContext.to_host_large_xfer.ring,
322 &mContext.to_host_large_xfer.view);
323 while (availReadLarge) {
324 ring_buffer_yield();
325 availReadLarge =
326 ring_buffer_available_read(
327 mContext.to_host_large_xfer.ring,
328 &mContext.to_host_large_xfer.view);
329 if (*(mContext.host_state) != ASG_HOST_STATE_CAN_CONSUME) {
330 ping(ASG_NOTIFY_AVAILABLE);
331 }
332 if (isInError()) {
333 return;
334 }
335 }
336 }
337
getBufferPtr()338 char* getBufferPtr() { return mBuffer; }
339
340 private:
341
ping(uint64_t metadata,uint64_t size=0)342 AddressSpaceDevicePingInfo ping(uint64_t metadata, uint64_t size = 0) {
343 AddressSpaceDevicePingInfo info;
344 info.metadata = metadata;
345 mDevice->ping(mHandle, &info);
346 return info;
347 }
348
349 HostAddressSpaceDevice* mDevice;
350 uint32_t mHandle;
351 uint64_t mRingOffset;
352 uint64_t mRingSize;
353 uint64_t mBufferOffset;
354 uint64_t mBufferSize;
355 char* mRingStorage;
356 char* mBuffer;
357 struct asg_context mContext;
358 uint32_t mVersion = 1;
359
360 char* mWriteStart = 0;
361 uint32_t mWriteStep = 0;
362 uint32_t mCurrentWriteBytes = 0;
363 uint32_t mBufferMask = 0;
364 };
365
366 class Consumer {
367 public:
Consumer(struct asg_context context,ConsumerCallbacks callbacks)368 Consumer(struct asg_context context,
369 ConsumerCallbacks callbacks) :
370 mContext(context),
371 mCallbacks(callbacks),
372 mThread([this] { threadFunc(); }) {
373 mThread.start();
374 }
375
~Consumer()376 ~Consumer() {
377 mThread.wait();
378 }
379
setRoundTrip(bool enabled,uint32_t toHostBytes=0,uint32_t fromHostBytes=0)380 void setRoundTrip(bool enabled,
381 uint32_t toHostBytes = 0,
382 uint32_t fromHostBytes = 0) {
383 mRoundTripEnabled = enabled;
384 if (mRoundTripEnabled) {
385 mToHostBytes = toHostBytes;
386 mFromHostBytes = fromHostBytes;
387 }
388 }
389
handleRoundTrip()390 void handleRoundTrip() {
391 if (!mRoundTripEnabled) return;
392
393 if (mReadPos == mToHostBytes) {
394 std::vector<char> reply(mFromHostBytes, ASG_TEST_READ_PATTERN);
395 uint32_t origBytes = mFromHostBytes;
396 auto res = ring_buffer_write_fully_with_abort(
397 mContext.from_host_large_xfer.ring,
398 &mContext.from_host_large_xfer.view,
399 reply.data(),
400 mFromHostBytes,
401 1, &mContext.ring_config->in_error);
402 if (res < mFromHostBytes) {
403 printf("%s: aborted write (%u vs %u %u). in error? %u\n", __func__,
404 res, mFromHostBytes, origBytes,
405 mContext.ring_config->in_error);
406 EXPECT_EQ(1, mContext.ring_config->in_error);
407 }
408 mReadPos = 0;
409 }
410 }
411
ensureWritebackDone()412 void ensureWritebackDone() {
413 while (mReadPos) {
414 ring_buffer_yield();
415 }
416 }
417
step()418 int step() {
419
420 uint32_t nonLargeAvail =
421 ring_buffer_available_read(
422 mContext.to_host, 0);
423
424 uint32_t largeAvail =
425 ring_buffer_available_read(
426 mContext.to_host_large_xfer.ring,
427 &mContext.to_host_large_xfer.view);
428
429 ensureReadBuffer(nonLargeAvail);
430
431 int res = 0;
432 if (nonLargeAvail) {
433 uint32_t transferMode = mContext.ring_config->transfer_mode;
434
435 switch (transferMode) {
436 case 1:
437 type1Read(nonLargeAvail);
438 break;
439 case 2:
440 type2Read(nonLargeAvail);
441 break;
442 case 3:
443 break;
444 default:
445 EXPECT_TRUE(false) << "Failed, invalid transfer mode";
446 }
447
448
449 res = 0;
450 } else if (largeAvail) {
451 res = type3Read(largeAvail);
452 } else {
453 res = mCallbacks.onUnavailableRead();
454 }
455
456 handleRoundTrip();
457
458 return res;
459 }
460
ensureReadBuffer(uint32_t new_xfer)461 void ensureReadBuffer(uint32_t new_xfer) {
462 size_t readBufferAvail = mReadBuffer.size() - mReadPos;
463 if (readBufferAvail < new_xfer) {
464 mReadBuffer.resize(mReadBuffer.size() + 2 * new_xfer);
465 }
466 }
467
type1Read(uint32_t avail)468 void type1Read(uint32_t avail) {
469 uint32_t xferTotal = avail / 8;
470 for (uint32_t i = 0; i < xferTotal; ++i) {
471 struct asg_type1_xfer currentXfer;
472 uint8_t* currentXferPtr = (uint8_t*)(¤tXfer);
473
474 EXPECT_EQ(0, ring_buffer_copy_contents(
475 mContext.to_host, 0,
476 sizeof(currentXfer), currentXferPtr));
477
478 char* ptr = mContext.buffer + currentXfer.offset;
479 size_t size = currentXfer.size;
480
481 ensureReadBuffer(size);
482
483 memcpy(mReadBuffer.data() + mReadPos,
484 ptr, size);
485
486 for (uint32_t j = 0; j < size; ++j) {
487 EXPECT_EQ((char)ASG_TEST_WRITE_PATTERN,
488 (mReadBuffer.data() + mReadPos)[j]);
489 }
490
491 mReadPos += size;
492 mContext.ring_config->host_consumed_pos =
493 ptr - mContext.buffer;
494
495 EXPECT_EQ(1, ring_buffer_advance_read(
496 mContext.to_host, sizeof(asg_type1_xfer), 1));
497 }
498 }
499
type2Read(uint32_t avail)500 void type2Read(uint32_t avail) {
501 uint32_t xferTotal = avail / 16;
502 for (uint32_t i = 0; i < xferTotal; ++i) {
503 struct asg_type2_xfer currentXfer;
504 uint8_t* xferPtr = (uint8_t*)(¤tXfer);
505
506 EXPECT_EQ(0, ring_buffer_copy_contents(
507 mContext.to_host, 0, sizeof(currentXfer),
508 xferPtr));
509
510 char* ptr = mCallbacks.getPtr(currentXfer.physAddr);
511 ensureReadBuffer(currentXfer.size);
512
513 memcpy(mReadBuffer.data() + mReadPos, ptr,
514 currentXfer.size);
515 mReadPos += currentXfer.size;
516
517 EXPECT_EQ(1, ring_buffer_advance_read(
518 mContext.to_host, sizeof(currentXfer), 1));
519 }
520 }
521
type3Read(uint32_t avail)522 int type3Read(uint32_t avail) {
523 (void)avail;
524 ensureReadBuffer(avail);
525 ring_buffer_read_fully_with_abort(
526 mContext.to_host_large_xfer.ring,
527 &mContext.to_host_large_xfer.view,
528 mReadBuffer.data() + mReadPos,
529 avail,
530 1, &mContext.ring_config->in_error);
531 mReadPos += avail;
532 return 0;
533 }
534
535 private:
536
threadFunc()537 void threadFunc() {
538 while(-1 != step());
539 }
540
541 struct asg_context mContext;
542 ConsumerCallbacks mCallbacks;
543 FunctorThread mThread;
544 std::vector<char> mReadBuffer;
545 std::vector<char> mWriteBuffer;
546 size_t mReadPos = 0;
547 uint32_t mToHostBytes = 0;
548 uint32_t mFromHostBytes = 0;
549 bool mRoundTripEnabled = false;
550 };
551
552 protected:
SetUpTestCase()553 static void SetUpTestCase() {
554 goldfish_address_space_set_vm_operations(getConsoleAgents()->vm);
555 }
556
TearDownTestCase()557 static void TearDownTestCase() { }
558
SetUp()559 void SetUp() override {
560 android_hw->hw_gltransport_asg_writeBufferSize = 524288;
561 android_hw->hw_gltransport_asg_writeStepSize = 1024;
562
563 mDevice = HostAddressSpaceDevice::get();
564 ConsumerInterface interface = {
565 // create
566 [this](struct asg_context context,
567 base::Stream* loadStream,
568 ConsumerCallbacks callbacks) {
569 Consumer* c = new Consumer(context, callbacks);
570 mCurrentConsumer = c;
571 return (void*)c;
572 },
573 // destroy
574 [this](void* context) {
575 Consumer* c = reinterpret_cast<Consumer*>(context);
576 delete c;
577 mCurrentConsumer = nullptr;
578 },
579 // presave
580 [](void* consumer) { },
581 // global presave
582 []() { },
583 // save
584 [](void* consumer, base::Stream* stream) { },
585 // global postsave
586 []() { },
587 // postsave
588 [](void* consumer) { },
589 // postload
590 [](void* consumer) { },
591 // global preload
592 []() { },
593 };
594 AddressSpaceGraphicsContext::setConsumer(interface);
595 }
596
TearDown()597 void TearDown() override {
598 AddressSpaceGraphicsContext::clear();
599 mDevice->clear();
600 android_hw->hw_gltransport_asg_writeBufferSize = 524288;
601 android_hw->hw_gltransport_asg_writeStepSize = 1024;
602 EXPECT_EQ(nullptr, mCurrentConsumer);
603 }
604
setRoundTrip(bool enabled,size_t writeBytes,size_t readBytes)605 void setRoundTrip(bool enabled, size_t writeBytes, size_t readBytes) {
606 EXPECT_NE(nullptr, mCurrentConsumer);
607 mCurrentConsumer->setRoundTrip(enabled, writeBytes, readBytes);
608 }
609
610 struct RoundTrip {
611 size_t writeBytes;
612 size_t readBytes;
613 };
614
runRoundTrips(Client & client,const std::vector<RoundTrip> & trips)615 void runRoundTrips(Client& client, const std::vector<RoundTrip>& trips) {
616 EXPECT_NE(nullptr, mCurrentConsumer);
617
618 for (const auto& trip : trips) {
619 mCurrentConsumer->setRoundTrip(true, trip.writeBytes, trip.readBytes);
620
621 std::vector<char> send(trip.writeBytes, ASG_TEST_WRITE_PATTERN);
622 std::vector<char> expectedRead(trip.readBytes, ASG_TEST_READ_PATTERN);
623 std::vector<char> toRead(trip.readBytes, 0);
624
625 size_t stepSize = android_hw->hw_gltransport_asg_writeStepSize;
626 size_t stepSizeRead = android_hw->hw_gltransport_asg_writeBufferSize;
627
628 size_t sent = 0;
629 while (sent < trip.writeBytes) {
630 size_t remaining = trip.writeBytes - sent;
631 size_t next = remaining < stepSize ? remaining : stepSize;
632 auto buf = client.allocBuffer(next);
633 memcpy(buf, send.data() + sent, next);
634 sent += next;
635 }
636
637 client.flush();
638
639 size_t recv = 0;
640
641 while (recv < trip.readBytes) {
642 ssize_t readThisTime = client.speculativeRead(
643 toRead.data() + recv, stepSizeRead);
644 EXPECT_GE(readThisTime, 0);
645 recv += readThisTime;
646 }
647
648 EXPECT_EQ(expectedRead, toRead);
649
650 // make sure the consumer is hung up here or this will
651 // race with setRoundTrip
652 mCurrentConsumer->ensureWritebackDone();
653 }
654
655 mCurrentConsumer->setRoundTrip(false);
656 }
657
658 HostAddressSpaceDevice* mDevice = nullptr;
659 Consumer* mCurrentConsumer = nullptr;
660 };
661
662 // Tests that we can create a client for ASG,
663 // which then in turn creates a consumer thread on the "host."
664 // Then test the thread teardown.
TEST_F(AddressSpaceGraphicsTest,Basic)665 TEST_F(AddressSpaceGraphicsTest, Basic) {
666 Client client(mDevice);
667 }
668
669 // Tests writing via an IOStream-like interface
670 // (allocBuffer, then flush)
TEST_F(AddressSpaceGraphicsTest,BasicWrite)671 TEST_F(AddressSpaceGraphicsTest, BasicWrite) {
672 EXPECT_EQ(1024, android_hw->hw_gltransport_asg_writeStepSize);
673 Client client(mDevice);
674
675 // Tests that going over the step size results in nullptr
676 // when using allocBuffer
677 auto buf = client.allocBuffer(1025);
678 EXPECT_EQ(nullptr, buf);
679
680 buf = client.allocBuffer(4);
681 EXPECT_NE(nullptr, buf);
682 memset(buf, ASG_TEST_WRITE_PATTERN, 4);
683 client.flush();
684 }
685
686 // Tests that further allocs result in flushing
TEST_F(AddressSpaceGraphicsTest,FlushFromAlloc)687 TEST_F(AddressSpaceGraphicsTest, FlushFromAlloc) {
688 EXPECT_EQ(1024, android_hw->hw_gltransport_asg_writeStepSize);
689 Client client(mDevice);
690
691 auto buf = client.allocBuffer(1024);
692 memset(buf, ASG_TEST_WRITE_PATTERN, 1024);
693
694 for (uint32_t i = 0; i < 10; ++i) {
695 buf = client.allocBuffer(1024);
696 memset(buf, ASG_TEST_WRITE_PATTERN, 1024);
697 }
698 }
699
700 // Tests type 3 (large) transfer by itself
TEST_F(AddressSpaceGraphicsTest,LargeXfer)701 TEST_F(AddressSpaceGraphicsTest, LargeXfer) {
702 Client client(mDevice);
703
704 std::vector<char> largeBuf(1048576, ASG_TEST_WRITE_PATTERN);
705 client.writeFully(largeBuf.data(), largeBuf.size());
706 }
707
708 // Round trip test
TEST_F(AddressSpaceGraphicsTest,RoundTrip)709 TEST_F(AddressSpaceGraphicsTest, RoundTrip) {
710 Client client(mDevice);
711 setRoundTrip(true, 1, 1);
712 char element = (char)(ASG_TEST_WRITE_PATTERN);
713 char reply;
714
715 auto buf = client.allocBuffer(1);
716 *buf = element;
717 client.flush();
718 client.speculativeRead(&reply, 1);
719 }
720
721 // Round trip test (more than one)
TEST_F(AddressSpaceGraphicsTest,RoundTrips)722 TEST_F(AddressSpaceGraphicsTest, RoundTrips) {
723 Client client(mDevice);
724
725 std::vector<RoundTrip> trips = {
726 { 1, 1, },
727 { 2, 2, },
728 { 4, 4, },
729 { 1026, 34, },
730 { 4, 1048576, },
731 };
732
733 runRoundTrips(client, trips);
734 }
735
736 // Round trip test (random)
TEST_F(AddressSpaceGraphicsTest,RoundTripsRandom)737 TEST_F(AddressSpaceGraphicsTest, RoundTripsRandom) {
738 Client client(mDevice);
739
740 std::default_random_engine generator;
741 generator.seed(0);
742 std::uniform_int_distribution<int>
743 sizeDist(1, 4097);
744 std::vector<RoundTrip> trips;
745 for (uint32_t i = 0; i < 1000; ++i) {
746 trips.push_back({
747 (size_t)sizeDist(generator),
748 (size_t)sizeDist(generator),
749 });
750 };
751
752 runRoundTrips(client, trips);
753 }
754
755 // Abort test. Say that we are reading back 4096
756 // bytes, but only actually read back 1 then abort.
TEST_F(AddressSpaceGraphicsTest,Abort)757 TEST_F(AddressSpaceGraphicsTest, Abort) {
758 Client client(mDevice);
759 setRoundTrip(true, 1, 1048576);
760
761 char send = ASG_TEST_WRITE_PATTERN;
762 auto buf = client.allocBuffer(1);
763 *buf = send;
764 client.flush();
765 client.abort();
766 }
767
768 // Test having to create more than one block, and
769 // ensure traffic works each time.
TEST_F(AddressSpaceGraphicsTest,BlockCreateDestroy)770 TEST_F(AddressSpaceGraphicsTest, BlockCreateDestroy) {
771
772 std::vector<Client*> clients;
773
774 std::default_random_engine generator;
775 generator.seed(0);
776 std::uniform_int_distribution<int>
777 sizeDist(1, 47);
778 std::vector<RoundTrip> trips;
779 for (uint32_t i = 0; i < 100; ++i) {
780 trips.push_back({
781 (size_t)sizeDist(generator),
782 (size_t)sizeDist(generator),
783 });
784 };
785
786 int numBlocksMax = 3;
787 int numBlocksDetected = 0;
788 char* bufLow = (char*)(uintptr_t)(-1);
789 char* bufHigh = 0;
790
791 while (true) {
792 Client* c = new Client(mDevice);
793 runRoundTrips(*c, trips);
794
795 clients.push_back(c);
796
797 char* bufPtr = c->getBufferPtr();
798 bufLow = bufPtr < bufLow ? bufPtr : bufLow;
799 bufHigh = bufPtr > bufHigh ? bufPtr : bufHigh;
800
801 size_t gap = bufHigh - bufLow;
802
803 numBlocksDetected =
804 gap / ADDRESS_SPACE_GRAPHICS_BLOCK_SIZE;
805
806 if (numBlocksDetected > numBlocksMax) break;
807 }
808
809 for (auto c: clients) {
810 delete c;
811 }
812 }
813
814 // Test having to create more than one block, and
815 // ensure traffic works each time, but also randomly
816 // delete previous allocs to cause fragmentation.
TEST_F(AddressSpaceGraphicsTest,BlockCreateDestroyRandom)817 TEST_F(AddressSpaceGraphicsTest, BlockCreateDestroyRandom) {
818 std::vector<Client*> clients;
819
820 std::default_random_engine generator;
821 generator.seed(0);
822
823 std::uniform_int_distribution<int>
824 sizeDist(1, 89);
825 std::bernoulli_distribution
826 deleteDist(0.2);
827
828 std::vector<RoundTrip> trips;
829 for (uint32_t i = 0; i < 100; ++i) {
830 trips.push_back({
831 (size_t)sizeDist(generator),
832 (size_t)sizeDist(generator),
833 });
834 };
835
836 int numBlocksMax = 3;
837 int numBlocksDetected = 0;
838 char* bufLow = (char*)(uintptr_t)(-1);
839 char* bufHigh = 0;
840
841 while (true) {
842 Client* c = new Client(mDevice);
843 runRoundTrips(*c, trips);
844
845 clients.push_back(c);
846
847 char* bufPtr = c->getBufferPtr();
848 bufLow = bufPtr < bufLow ? bufPtr : bufLow;
849 bufHigh = bufPtr > bufHigh ? bufPtr : bufHigh;
850
851 size_t gap = bufHigh - bufLow;
852
853 numBlocksDetected =
854 gap / ADDRESS_SPACE_GRAPHICS_BLOCK_SIZE;
855
856 if (numBlocksDetected > numBlocksMax) break;
857
858 if (deleteDist(generator)) {
859 delete c;
860 clients[clients.size() - 1] = 0;
861 }
862 }
863
864 for (auto c: clients) {
865 delete c;
866 }
867 }
868
869 } // namespace asg
870 } // namespace emulation
871 } // namespace android
872