• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2019 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <source/AudioSource.h>
18 
19 #include <libyuv/convert.h>
20 
21 #include <system/audio.h>
22 
23 #include "host/libs/config/cuttlefish_config.h"
24 
25 #include <opus.h>
26 
27 #include <gflags/gflags.h>
28 #include <cmath>
29 
30 #define LOG_AUDIO       0
31 
32 namespace android {
33 
34 namespace {
35 
36 // These definitions are deleted in master, copying here temporarily
37 typedef uint32_t size32_t;
38 
39 struct timespec32 {
40   uint32_t tv_sec;
41   uint32_t tv_nsec;
42 
43   timespec32() = default;
44 
timespec32android::__anond1df2f6f0111::timespec3245   timespec32(const timespec &from)
46       : tv_sec(from.tv_sec),
47         tv_nsec(from.tv_nsec) {
48   }
49 };
50 
51 struct gce_audio_message {
52 //  static const size32_t kMaxAudioFrameLen = 65536;
53   enum message_t {
54     UNKNOWN = 0,
55     DATA_SAMPLES = 1,
56     OPEN_INPUT_STREAM = 2,
57     OPEN_OUTPUT_STREAM = 3,
58     CLOSE_INPUT_STREAM = 4,
59     CLOSE_OUTPUT_STREAM = 5,
60     CONTROL_PAUSE = 100
61   };
62   // Size of the header + data. Used to frame when we're on TCP.
63   size32_t total_size;
64   // Size of the audio header
65   size32_t header_size;
66   message_t message_type;
67   // Identifier for the stream.
68   uint32_t stream_number;
69   // HAL assigned frame number, starts from 0.
70   int64_t frame_num;
71   // MONOTONIC_TIME when these frames were presented to the HAL.
72   timespec32 time_presented;
73   // Sample rate from the audio configuration.
74   uint32_t frame_rate;
75   // Channel mask from the audio configuration.
76   audio_channel_mask_t channel_mask;
77   // Format from the audio configuration.
78   audio_format_t format;
79   // Size of each frame in bytes.
80   size32_t frame_size;
81   // Number of frames that were presented to the HAL.
82   size32_t num_frames_presented;
83   // Number of frames that the HAL accepted.
84   //   For blocking audio this will be the same as num_frames.
85   //   For non-blocking audio this may be less.
86   size32_t num_frames_accepted;
87   // Count of the number of packets that were dropped because they would
88   // have blocked the HAL or exceeded the maximum message size.
89   size32_t num_packets_dropped;
90   // Count of the number of packets that were shortened to fit within
91   // kMaxAudioFrameLen.
92   size32_t num_packets_shortened;
93   // num_frames_presented (not num_frames_accepted) will follow here.
94 
gce_audio_messageandroid::__anond1df2f6f0111::gce_audio_message95   gce_audio_message() :
96       total_size(sizeof(gce_audio_message)),
97       header_size(sizeof(gce_audio_message)),
98       message_type(UNKNOWN),
99       stream_number(0),
100       frame_num(0),
101       frame_rate(0),
102       channel_mask(0),
103       format(AUDIO_FORMAT_DEFAULT),
104       frame_size(0),
105       num_frames_presented(0),
106       num_frames_accepted(0),
107       num_packets_dropped(0),
108       num_packets_shortened(0) {
109     time_presented.tv_sec = 0;
110     time_presented.tv_nsec = 0;
111   }
112 };
113 
114 }
115 
116 struct AudioSource::Encoder {
117     explicit Encoder();
118     virtual ~Encoder() = default;
119 
120     virtual int32_t initCheck() const = 0;
121     virtual void encode(const void *data, size_t size) = 0;
122     virtual void reset() = 0;
123 
124     void setFrameCallback(
125             std::function<void(const std::shared_ptr<SBuffer> &)> onFrameFn);
126 
127 protected:
128     std::function<void(const std::shared_ptr<SBuffer> &)> mOnFrameFn;
129 };
130 
Encoder()131 AudioSource::Encoder::Encoder()
132     : mOnFrameFn(nullptr) {
133 }
134 
setFrameCallback(std::function<void (const std::shared_ptr<SBuffer> &)> onFrameFn)135 void AudioSource::Encoder::setFrameCallback(
136         std::function<void(const std::shared_ptr<SBuffer> &)> onFrameFn) {
137     mOnFrameFn = onFrameFn;
138 }
139 
140 ////////////////////////////////////////////////////////////////////////////////
141 
142 struct Upsampler {
Upsamplerandroid::Upsampler143     explicit Upsampler(int32_t from = 44100, int32_t to = 48000)
144         : mFrom(from),
145           mTo(to),
146           mCounter(0) {
147     }
148 
appendandroid::Upsampler149     void append(const int16_t *data, size_t numFrames) {
150         for (size_t i = 0; i < numFrames; ++i) {
151             int16_t l = *data++;
152             int16_t r = *data++;
153 
154             mCounter += mTo;
155             while (mCounter >= mFrom) {
156                 mCounter -= mFrom;
157 
158                 mBuffer.push_back(l);
159                 mBuffer.push_back(r);
160             }
161         }
162     }
163 
dataandroid::Upsampler164     const int16_t *data() const { return mBuffer.data(); }
165 
numFramesAvailableandroid::Upsampler166     size_t numFramesAvailable() const { return mBuffer.size() / 2; }
167 
drainandroid::Upsampler168     void drain(size_t numFrames) {
169         CHECK_LE(numFrames, numFramesAvailable());
170 
171         mBuffer.erase(mBuffer.begin(), mBuffer.begin() + numFrames * 2);
172     }
173 
174 private:
175     int32_t mFrom;
176     int32_t mTo;
177 
178     std::vector<int16_t> mBuffer;
179 
180     int32_t mCounter;
181 };
182 
183 ////////////////////////////////////////////////////////////////////////////////
184 
185 struct AudioSource::OPUSEncoder : public AudioSource::Encoder {
186     explicit OPUSEncoder();
187     ~OPUSEncoder() override;
188 
189     int32_t initCheck() const override;
190 
191     OPUSEncoder(const OPUSEncoder &) = delete;
192     OPUSEncoder &operator=(const OPUSEncoder &) = delete;
193 
194     void encode(const void *data, size_t size) override;
195     void reset() override;
196 
197 private:
198     int32_t mInitCheck;
199 
200     gce_audio_message mPrevHeader;
201     bool mPrevHeaderValid;
202 
203     size_t mChannelCount;
204 
205     OpusEncoder *mImpl;
206 
207     std::unique_ptr<Upsampler> mUpSampler;
208 
209     FILE *mLogFile;
210 };
211 
OPUSEncoder()212 AudioSource::OPUSEncoder::OPUSEncoder()
213     : mInitCheck(-ENODEV),
214       mImpl(nullptr),
215       mLogFile(nullptr) {
216     reset();
217     mInitCheck = 0;
218 }
219 
~OPUSEncoder()220 AudioSource::OPUSEncoder::~OPUSEncoder() {
221     reset();
222 }
223 
initCheck() const224 int32_t AudioSource::OPUSEncoder::initCheck() const {
225     return mInitCheck;
226 }
227 
reset()228 void AudioSource::OPUSEncoder::reset() {
229     if (mLogFile != nullptr) {
230         fclose(mLogFile);
231         mLogFile = nullptr;
232     }
233 
234     mUpSampler.reset();
235 
236     if (mImpl) {
237         opus_encoder_destroy(mImpl);
238         mImpl = nullptr;
239     }
240 
241     mPrevHeaderValid = false;
242     mChannelCount = 0;
243 }
244 
encode(const void * _data,size_t size)245 void AudioSource::OPUSEncoder::encode(const void *_data, size_t size) {
246     auto data = static_cast<const uint8_t *>(_data);
247 
248     CHECK_GE(size, sizeof(gce_audio_message));
249 
250     gce_audio_message hdr;
251     std::memcpy(&hdr, data, sizeof(gce_audio_message));
252 
253     if (hdr.message_type != gce_audio_message::DATA_SAMPLES) {
254         return;
255     }
256 
257     static int64_t timeUs = 0;
258 
259     static int64_t prevTimeUs = 0;
260 
261     LOG(VERBOSE)
262         << "encode received "
263         << ((size - sizeof(gce_audio_message)) / (2 * sizeof(int16_t)))
264         << " frames, "
265         << " deltaTime = "
266         << (((timeUs - prevTimeUs) * hdr.frame_rate) / 1000000ll)
267         << " frames";
268 
269     prevTimeUs = timeUs;
270 
271     if (!mPrevHeaderValid
272             || mPrevHeader.frame_size != hdr.frame_size
273             || mPrevHeader.frame_rate != hdr.frame_rate
274             || mPrevHeader.stream_number != hdr.stream_number) {
275 
276         if (mPrevHeaderValid) {
277             LOG(INFO)
278                 << "Found audio data in a different configuration than before!"
279                 << " frame_size="
280                 << hdr.frame_size
281                 << " vs. "
282                 << mPrevHeader.frame_size
283                 << ", frame_rate="
284                 << hdr.frame_rate
285                 << " vs. "
286                 << mPrevHeader.frame_rate
287                 << ", stream_number="
288                 << hdr.stream_number
289                 << " vs. "
290                 << mPrevHeader.stream_number;
291 
292             // reset?
293             return;
294         }
295 
296         mPrevHeaderValid = true;
297         mPrevHeader = hdr;
298 
299         const size_t numChannels = hdr.frame_size / sizeof(int16_t);
300 
301 #if LOG_AUDIO
302         mLogFile = fopen("/tmp/log_remote.opus", "wb");
303         CHECK(mLogFile != nullptr);
304 #endif
305 
306         LOG(INFO)
307             << "Calling opus_encoder_create w/ "
308             << "hdr.frame_rate = "
309             << hdr.frame_rate
310             << ", numChannels = "
311             << numChannels;
312 
313         int err;
314         mImpl = opus_encoder_create(
315                 48000,
316                 numChannels,
317                 OPUS_APPLICATION_AUDIO,
318                 &err);
319 
320         CHECK_EQ(err, OPUS_OK);
321 
322         mChannelCount = numChannels;
323 
324         static_assert(sizeof(int16_t) == sizeof(opus_int16));
325 
326         err = opus_encoder_ctl(mImpl, OPUS_SET_INBAND_FEC(true));
327         CHECK_EQ(err, OPUS_OK);
328 
329         err = opus_encoder_ctl(mImpl, OPUS_SET_PACKET_LOSS_PERC(10));
330         CHECK_EQ(err, OPUS_OK);
331 
332         err = opus_encoder_ctl(
333                 mImpl, OPUS_SET_BANDWIDTH(OPUS_BANDWIDTH_WIDEBAND));
334 
335         CHECK_EQ(err, OPUS_OK);
336 
337         CHECK_LE(hdr.frame_rate, 48000);
338         mUpSampler = std::make_unique<Upsampler>(hdr.frame_rate, 48000);
339     }
340 
341     // {2.5, 5, 10, 20, 40, 60, 80, 100, 120} ms
342     static constexpr size_t kNumFramesPerOutputBuffer = 48 * 20;
343 
344     const size_t offset = sizeof(gce_audio_message);
345     mUpSampler->append(
346             reinterpret_cast<const int16_t *>(&data[offset]),
347             (size - offset) / (mChannelCount * sizeof(int16_t)));
348 
349     while (mUpSampler->numFramesAvailable() >= kNumFramesPerOutputBuffer) {
350         size_t copyFrames =
351             std::min(mUpSampler->numFramesAvailable(),
352                     kNumFramesPerOutputBuffer);
353 
354         static constexpr size_t kMaxPacketSize = 8192;
355 
356         std::shared_ptr<SBuffer> outBuffer(new SBuffer(kMaxPacketSize));
357 
358         auto outSize = opus_encode(
359                 mImpl,
360                 reinterpret_cast<const opus_int16 *>(mUpSampler->data()),
361                 copyFrames,
362                 outBuffer->data(),
363                 outBuffer->capacity());
364 
365         CHECK_GT(outSize, 0);
366 
367         outBuffer->resize(outSize);
368 
369         outBuffer->time_us(timeUs);
370 
371         mUpSampler->drain(copyFrames);
372 
373         timeUs += (copyFrames * 1000ll) / 48;
374 
375 #if LOG_AUDIO
376         fwrite(outBuffer->data(), 1, outBuffer->size(), mLogFile);
377         fflush(mLogFile);
378 #endif
379 
380         if (mOnFrameFn) {
381             mOnFrameFn(outBuffer);
382         }
383     }
384 }
385 
386 ////////////////////////////////////////////////////////////////////////////////
387 
388 struct Downsampler {
Downsamplerandroid::Downsampler389     explicit Downsampler(int32_t from = 44100, int32_t to = 8000)
390         : mFrom(from),
391           mTo(to),
392           mCounter(0) {
393     }
394 
appendandroid::Downsampler395     void append(const int16_t *data, size_t numFrames) {
396         for (size_t i = 0; i < numFrames; ++i) {
397             int16_t l = *data++;
398             int16_t r = *data++;
399 
400             mCounter += mTo;
401             if (mCounter >= mFrom) {
402                 mCounter -= mFrom;
403 
404                 auto mono = (l + r) / 2;
405                 mBuffer.push_back(mono);
406             }
407         }
408     }
409 
dataandroid::Downsampler410     const int16_t *data() const { return mBuffer.data(); }
411 
numFramesAvailableandroid::Downsampler412     size_t numFramesAvailable() const { return mBuffer.size(); }
413 
drainandroid::Downsampler414     void drain(size_t numFrames) {
415         CHECK_LE(numFrames, numFramesAvailable());
416 
417         mBuffer.erase(mBuffer.begin(), mBuffer.begin() + numFrames);
418     }
419 
420 private:
421     int32_t mFrom;
422     int32_t mTo;
423 
424     std::vector<int16_t> mBuffer;
425 
426     int32_t mCounter;
427 };
428 
429 struct AudioSource::G711Encoder : public AudioSource::Encoder {
430     enum class Mode {
431         ALAW,
432         ULAW,
433     };
434 
435     explicit G711Encoder(Mode mode);
436 
437     int32_t initCheck() const override;
438 
439     G711Encoder(const G711Encoder &) = delete;
440     G711Encoder &operator=(const G711Encoder &) = delete;
441 
442     void encode(const void *data, size_t size) override;
443     void reset() override;
444 
445 private:
446     static constexpr size_t kNumFramesPerBuffer = 512;
447 
448     int32_t mInitCheck;
449     Mode mMode;
450 
451     gce_audio_message mPrevHeader;
452     bool mPrevHeaderValid;
453 
454     size_t mChannelCount;
455 
456     std::shared_ptr<SBuffer> mOutputFrame;
457     Downsampler mDownSampler;
458 
459     void doEncode(const int16_t *src, size_t numFrames);
460 };
461 
G711Encoder(Mode mode)462 AudioSource::G711Encoder::G711Encoder(Mode mode)
463     : mInitCheck(-ENODEV),
464       mMode(mode) {
465     reset();
466     mInitCheck = 0;
467 }
468 
initCheck() const469 int32_t AudioSource::G711Encoder::initCheck() const {
470     return mInitCheck;
471 }
472 
reset()473 void AudioSource::G711Encoder::reset() {
474     mPrevHeaderValid = false;
475     mChannelCount = 0;
476 }
477 
encode(const void * _data,size_t size)478 void AudioSource::G711Encoder::encode(const void *_data, size_t size) {
479     auto data = static_cast<const uint8_t *>(_data);
480 
481     CHECK_GE(size, sizeof(gce_audio_message));
482 
483     gce_audio_message hdr;
484     std::memcpy(&hdr, data, sizeof(gce_audio_message));
485 
486     if (hdr.message_type != gce_audio_message::DATA_SAMPLES) {
487         return;
488     }
489 
490     static int64_t timeUs = 0;
491 
492     static int64_t prevTimeUs = 0;
493 
494     LOG(VERBOSE)
495         << "encode received "
496         << ((size - sizeof(gce_audio_message)) / (2 * sizeof(int16_t)))
497         << " frames, "
498         << " deltaTime = "
499         << ((timeUs - prevTimeUs) * 441) / 10000
500         << " frames";
501 
502     prevTimeUs = timeUs;
503 
504     if (!mPrevHeaderValid
505             || mPrevHeader.frame_size != hdr.frame_size
506             || mPrevHeader.frame_rate != hdr.frame_rate
507             || mPrevHeader.stream_number != hdr.stream_number) {
508 
509         if (mPrevHeaderValid) {
510             LOG(INFO)
511                 << "Found audio data in a different configuration than before!"
512                 << " frame_size="
513                 << hdr.frame_size
514                 << " vs. "
515                 << mPrevHeader.frame_size
516                 << ", frame_rate="
517                 << hdr.frame_rate
518                 << " vs. "
519                 << mPrevHeader.frame_rate
520                 << ", stream_number="
521                 << hdr.stream_number
522                 << " vs. "
523                 << mPrevHeader.stream_number;
524 
525             // reset?
526             return;
527         }
528 
529         mPrevHeaderValid = true;
530         mPrevHeader = hdr;
531 
532         mChannelCount = hdr.frame_size / sizeof(int16_t);
533 
534         // mono, 8-bit output samples.
535         mOutputFrame.reset(new SBuffer(kNumFramesPerBuffer));
536     }
537 
538     const size_t offset = sizeof(gce_audio_message);
539     mDownSampler.append(
540             reinterpret_cast<const int16_t *>(&data[offset]),
541             (size - offset) / (mChannelCount * sizeof(int16_t)));
542 
543     while (mDownSampler.numFramesAvailable() >= kNumFramesPerBuffer) {
544         doEncode(mDownSampler.data(), kNumFramesPerBuffer);
545 
546         mOutputFrame->time_us(timeUs);
547 
548         mDownSampler.drain(kNumFramesPerBuffer);
549 
550         timeUs += (kNumFramesPerBuffer * 1000ll) / 8;
551 
552         if (mOnFrameFn) {
553             mOnFrameFn(mOutputFrame);
554         }
555     }
556 }
557 
clz16(uint16_t x)558 static unsigned clz16(uint16_t x) {
559     unsigned n = 0;
560     if ((x & 0xff00) == 0) {
561         n += 8;
562         x <<= 8;
563     }
564     if ((x & 0xf000) == 0) {
565         n += 4;
566         x <<= 4;
567     }
568 
569     static const unsigned kClzNibble[] = {
570         4, 3, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0
571     };
572 
573     return n + kClzNibble[n >> 12];
574 }
575 
doEncode(const int16_t * src,size_t numFrames)576 void AudioSource::G711Encoder::doEncode(const int16_t *src, size_t numFrames) {
577     switch (mMode) {
578         case Mode::ALAW:
579         {
580             uint8_t *dst = mOutputFrame->data();
581 
582             for (size_t i = numFrames; i--;) {
583                 uint16_t in = (*src++) >> 3;  // Convert from 16-bit to 13-bit.
584                 uint8_t inverseSign = 0x80;
585 
586                 if (in & 0x8000) {
587                     in = ~in;
588                     inverseSign = 0x00;
589                 }
590 
591                 auto numLeadingZeroes = clz16(in);
592                 auto suffixLength = 16 - numLeadingZeroes;
593 
594                 static constexpr uint8_t kMask = 0x55;
595 
596                 if (suffixLength <= 5) {
597                     *dst++ = (((in >> 1) & 0x0f) | inverseSign) ^ kMask;
598                 } else {
599                     auto shift = suffixLength - 5;
600                     auto abcd = (in >> shift) & 0x0f;
601                     *dst++ = (abcd | (shift << 4) | inverseSign) ^ kMask;
602                 }
603             }
604             break;
605         }
606 
607         case Mode::ULAW:
608         {
609             uint8_t *dst = mOutputFrame->data();
610 
611             for (size_t i = numFrames; i--;) {
612                 uint16_t in = (*src++) >> 2;  // Convert from 16-bit to 14-bit.
613                 uint8_t inverseSign = 0x80;
614 
615                 if (in & 0x8000) {
616                     in = ~in;
617                     inverseSign = 0x00;
618                 }
619 
620                 in += 33;
621 
622                 auto numLeadingZeroes = clz16(in);
623                 auto suffixLength = 16 - numLeadingZeroes;
624 
625                 static constexpr uint8_t kMask = 0xff;
626 
627                 if (suffixLength <= 6) {
628                     *dst++ = (((in >> 1) & 0x0f) | inverseSign) ^ kMask;
629                 } else {
630                     auto shift = suffixLength - 5;
631                     auto abcd = (in >> shift) & 0x0f;
632                     *dst++ = (abcd | ((shift - 1) << 4) | inverseSign) ^ kMask;
633                 }
634             }
635             break;
636         }
637 
638         default:
639             LOG(FATAL) << "Should not be here.";
640     }
641 }
642 
643 ////////////////////////////////////////////////////////////////////////////////
644 
AudioSource(Format format,bool useADTSFraming)645 AudioSource::AudioSource(Format format, bool useADTSFraming)
646     : mInitCheck(-ENODEV),
647       mState(STOPPED)
648 #if SIMULATE_AUDIO
649       ,mPhase(0)
650 #endif
651 {
652     switch (format) {
653         case Format::OPUS:
654         {
655             CHECK(!useADTSFraming);
656             mEncoder.reset(new OPUSEncoder);
657             break;
658         }
659 
660         case Format::G711_ALAW:
661         case Format::G711_ULAW:
662         {
663             CHECK(!useADTSFraming);
664 
665             mEncoder.reset(
666                     new G711Encoder(
667                         (format == Format::G711_ALAW)
668                             ? G711Encoder::Mode::ALAW
669                             : G711Encoder::Mode::ULAW));
670             break;
671         }
672 
673         default:
674             LOG(FATAL) << "Should not be here.";
675     }
676 
677     mEncoder->setFrameCallback([this](const std::shared_ptr<SBuffer> &accessUnit) {
678         StreamingSource::onAccessUnit(accessUnit);
679     });
680 
681     mInitCheck = 0;
682 }
683 
~AudioSource()684 AudioSource::~AudioSource() {
685     stop();
686 }
687 
initCheck() const688 int32_t AudioSource::initCheck() const {
689     return mInitCheck;
690 }
691 
start()692 int32_t AudioSource::start() {
693     std::lock_guard<std::mutex> autoLock(mLock);
694 
695     if (mState != STOPPED) {
696         return 0;
697     }
698 
699     mEncoder->reset();
700 
701     mState = RUNNING;
702 
703 #if SIMULATE_AUDIO
704     mThread.reset(
705             new std::thread([this]{
706                 auto startTime = std::chrono::steady_clock::now();
707 
708                 std::vector<uint8_t> raw(
709                         sizeof(gce_audio_message)
710                             + kNumFramesPerBuffer * kNumChannels * sizeof(int16_t));
711 
712                 gce_audio_message *buffer =
713                         reinterpret_cast<gce_audio_message *>(raw.data());
714 
715                 buffer->message_type = gce_audio_message::DATA_SAMPLES;
716                 buffer->frame_size = kNumChannels * sizeof(int16_t);
717                 buffer->frame_rate = kSampleRate;
718                 buffer->stream_number = 0;
719 
720                 const double k = (double)kFrequency / kSampleRate * (2.0 * M_PI);
721 
722                 while (mState != STOPPING) {
723                     std::chrono::microseconds durationSinceStart(
724                             (mPhase  * 1000000ll) / kSampleRate);
725 
726                     auto time = startTime + durationSinceStart;
727                     auto now = std::chrono::steady_clock::now();
728                     auto delayUs = std::chrono::duration_cast<
729                             std::chrono::microseconds>(time - now).count();
730 
731                     if (delayUs > 0) {
732                         usleep(delayUs);
733                     }
734 
735                     auto usSinceStart =
736                         std::chrono::duration_cast<std::chrono::microseconds>(
737                                 std::chrono::steady_clock::now() - startTime).count();
738 
739                     buffer->time_presented.tv_sec = usSinceStart / 1000000ll;
740 
741                     buffer->time_presented.tv_nsec =
742                         (usSinceStart % 1000000ll) * 1000;
743 
744                     int16_t *ptr =
745                         reinterpret_cast<int16_t *>(
746                                 raw.data() + sizeof(gce_audio_message));
747 
748                     double x = mPhase * k;
749                     for (size_t i = 0; i < kNumFramesPerBuffer; ++i) {
750                         int16_t amplitude = (int16_t)(32767.0 * sin(x));
751 
752                         *ptr++ = amplitude;
753                         if (kNumChannels == 2) {
754                             *ptr++ = amplitude;
755                         }
756 
757                         x += k;
758                     }
759 
760                     mEncoder->encode(raw.data(), raw.size());
761 
762                     mPhase += kNumFramesPerBuffer;
763                 }
764             }));
765 #else
766 /*
767     if (mRegionView) {
768         mThread.reset(
769                 new std::thread([this]{
770                     while (mState != STOPPING) {
771                         uint8_t buffer[4096];
772 
773                         struct timespec absTimeLimit;
774                         vsoc::RegionView::GetFutureTime(
775                                 1000000000ll ns_from_now, &absTimeLimit);
776 
777                         intptr_t res = mRegionView->data()->audio_queue.Read(
778                                 mRegionView,
779                                 reinterpret_cast<char *>(buffer),
780                                 sizeof(buffer),
781                                 &absTimeLimit);
782 
783                         if (res < 0) {
784                             if (res == -ETIMEDOUT) {
785                                 LOG(VERBOSE) << "AudioSource read timed out";
786                             }
787                             continue;
788                         }
789 
790                         if (mState == RUNNING) {
791                             mEncoder->encode(buffer, static_cast<size_t>(res));
792                         }
793                     }
794             }));
795     }
796     */
797 #endif  // SIMULATE_AUDIO
798 
799     return 0;
800 }
801 
stop()802 int32_t AudioSource::stop() {
803     std::lock_guard<std::mutex> autoLock(mLock);
804 
805     if (mState == STOPPED) {
806         return 0;
807     }
808 
809     mState = STOPPING;
810 
811     if (mThread) {
812         mThread->join();
813         mThread.reset();
814     }
815 
816     mState = STOPPED;
817 
818     return 0;
819 }
820 
requestIDRFrame()821 int32_t AudioSource::requestIDRFrame() {
822     return 0;
823 }
824 
inject(const void * data,size_t size)825 void AudioSource::inject(const void *data, size_t size) {
826     // Only used in the case of CrosVM operation.
827 
828     std::lock_guard<std::mutex> autoLock(mLock);
829     if (mState != State::RUNNING) {
830         return;
831     }
832 
833     mEncoder->encode(static_cast<const uint8_t *>(data), size);
834 }
835 
836 }  // namespace android
837