1 // Copyright 2010 the V8 project authors. All rights reserved.
2 //
3 // Tests of the circular queue.
4
5 #include "v8.h"
6 #include "circular-queue-inl.h"
7 #include "cctest.h"
8
9 using i::SamplingCircularQueue;
10
11
TEST(SamplingCircularQueue)12 TEST(SamplingCircularQueue) {
13 typedef SamplingCircularQueue::Cell Record;
14 const int kRecordsPerChunk = 4;
15 SamplingCircularQueue scq(sizeof(Record),
16 kRecordsPerChunk * sizeof(Record),
17 3);
18
19 // Check that we are using non-reserved values.
20 CHECK_NE(SamplingCircularQueue::kClear, 1);
21 CHECK_NE(SamplingCircularQueue::kEnd, 1);
22 // Fill up the first chunk.
23 CHECK_EQ(NULL, scq.StartDequeue());
24 for (Record i = 1; i < 1 + kRecordsPerChunk; ++i) {
25 Record* rec = reinterpret_cast<Record*>(scq.Enqueue());
26 CHECK_NE(NULL, rec);
27 *rec = i;
28 CHECK_EQ(NULL, scq.StartDequeue());
29 }
30
31 // Fill up the second chunk. Consumption must still be unavailable.
32 CHECK_EQ(NULL, scq.StartDequeue());
33 for (Record i = 10; i < 10 + kRecordsPerChunk; ++i) {
34 Record* rec = reinterpret_cast<Record*>(scq.Enqueue());
35 CHECK_NE(NULL, rec);
36 *rec = i;
37 CHECK_EQ(NULL, scq.StartDequeue());
38 }
39
40 Record* rec = reinterpret_cast<Record*>(scq.Enqueue());
41 CHECK_NE(NULL, rec);
42 *rec = 20;
43 // Now as we started filling up the third chunk, consumption
44 // must become possible.
45 CHECK_NE(NULL, scq.StartDequeue());
46
47 // Consume the first chunk.
48 for (Record i = 1; i < 1 + kRecordsPerChunk; ++i) {
49 Record* rec = reinterpret_cast<Record*>(scq.StartDequeue());
50 CHECK_NE(NULL, rec);
51 CHECK_EQ(static_cast<int64_t>(i), static_cast<int64_t>(*rec));
52 CHECK_EQ(rec, reinterpret_cast<Record*>(scq.StartDequeue()));
53 scq.FinishDequeue();
54 CHECK_NE(rec, reinterpret_cast<Record*>(scq.StartDequeue()));
55 }
56 // Now consumption must not be possible, as consumer now polls
57 // the first chunk for emptinness.
58 CHECK_EQ(NULL, scq.StartDequeue());
59
60 scq.FlushResidualRecords();
61 // From now, consumer no more polls ahead of the current chunk,
62 // so it's possible to consume the second chunk.
63 CHECK_NE(NULL, scq.StartDequeue());
64 // Consume the second chunk
65 for (Record i = 10; i < 10 + kRecordsPerChunk; ++i) {
66 Record* rec = reinterpret_cast<Record*>(scq.StartDequeue());
67 CHECK_NE(NULL, rec);
68 CHECK_EQ(static_cast<int64_t>(i), static_cast<int64_t>(*rec));
69 CHECK_EQ(rec, reinterpret_cast<Record*>(scq.StartDequeue()));
70 scq.FinishDequeue();
71 CHECK_NE(rec, reinterpret_cast<Record*>(scq.StartDequeue()));
72 }
73 // Consumption must still be possible as the first cell of the
74 // last chunk is not clean.
75 CHECK_NE(NULL, scq.StartDequeue());
76 }
77
78
79 namespace {
80
81 class ProducerThread: public i::Thread {
82 public:
83 typedef SamplingCircularQueue::Cell Record;
84
ProducerThread(SamplingCircularQueue * scq,int records_per_chunk,Record value,i::Semaphore * finished)85 ProducerThread(SamplingCircularQueue* scq,
86 int records_per_chunk,
87 Record value,
88 i::Semaphore* finished)
89 : Thread("producer"),
90 scq_(scq),
91 records_per_chunk_(records_per_chunk),
92 value_(value),
93 finished_(finished) { }
94
Run()95 virtual void Run() {
96 for (Record i = value_; i < value_ + records_per_chunk_; ++i) {
97 Record* rec = reinterpret_cast<Record*>(scq_->Enqueue());
98 CHECK_NE(NULL, rec);
99 *rec = i;
100 }
101
102 finished_->Signal();
103 }
104
105 private:
106 SamplingCircularQueue* scq_;
107 const int records_per_chunk_;
108 Record value_;
109 i::Semaphore* finished_;
110 };
111
112 } // namespace
113
TEST(SamplingCircularQueueMultithreading)114 TEST(SamplingCircularQueueMultithreading) {
115 // Emulate multiple VM threads working 'one thread at a time.'
116 // This test enqueues data from different threads. This corresponds
117 // to the case of profiling under Linux, where signal handler that
118 // does sampling is called in the context of different VM threads.
119
120 typedef ProducerThread::Record Record;
121 const int kRecordsPerChunk = 4;
122 SamplingCircularQueue scq(sizeof(Record),
123 kRecordsPerChunk * sizeof(Record),
124 3);
125 i::Semaphore* semaphore = i::OS::CreateSemaphore(0);
126 // Don't poll ahead, making possible to check data in the buffer
127 // immediately after enqueuing.
128 scq.FlushResidualRecords();
129
130 // Check that we are using non-reserved values.
131 CHECK_NE(SamplingCircularQueue::kClear, 1);
132 CHECK_NE(SamplingCircularQueue::kEnd, 1);
133 ProducerThread producer1(&scq, kRecordsPerChunk, 1, semaphore);
134 ProducerThread producer2(&scq, kRecordsPerChunk, 10, semaphore);
135 ProducerThread producer3(&scq, kRecordsPerChunk, 20, semaphore);
136
137 CHECK_EQ(NULL, scq.StartDequeue());
138 producer1.Start();
139 semaphore->Wait();
140 for (Record i = 1; i < 1 + kRecordsPerChunk; ++i) {
141 Record* rec = reinterpret_cast<Record*>(scq.StartDequeue());
142 CHECK_NE(NULL, rec);
143 CHECK_EQ(static_cast<int64_t>(i), static_cast<int64_t>(*rec));
144 CHECK_EQ(rec, reinterpret_cast<Record*>(scq.StartDequeue()));
145 scq.FinishDequeue();
146 CHECK_NE(rec, reinterpret_cast<Record*>(scq.StartDequeue()));
147 }
148
149 CHECK_EQ(NULL, scq.StartDequeue());
150 producer2.Start();
151 semaphore->Wait();
152 for (Record i = 10; i < 10 + kRecordsPerChunk; ++i) {
153 Record* rec = reinterpret_cast<Record*>(scq.StartDequeue());
154 CHECK_NE(NULL, rec);
155 CHECK_EQ(static_cast<int64_t>(i), static_cast<int64_t>(*rec));
156 CHECK_EQ(rec, reinterpret_cast<Record*>(scq.StartDequeue()));
157 scq.FinishDequeue();
158 CHECK_NE(rec, reinterpret_cast<Record*>(scq.StartDequeue()));
159 }
160
161 CHECK_EQ(NULL, scq.StartDequeue());
162 producer3.Start();
163 semaphore->Wait();
164 for (Record i = 20; i < 20 + kRecordsPerChunk; ++i) {
165 Record* rec = reinterpret_cast<Record*>(scq.StartDequeue());
166 CHECK_NE(NULL, rec);
167 CHECK_EQ(static_cast<int64_t>(i), static_cast<int64_t>(*rec));
168 CHECK_EQ(rec, reinterpret_cast<Record*>(scq.StartDequeue()));
169 scq.FinishDequeue();
170 CHECK_NE(rec, reinterpret_cast<Record*>(scq.StartDequeue()));
171 }
172
173 CHECK_EQ(NULL, scq.StartDequeue());
174
175 delete semaphore;
176 }
177