• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2016 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 package android.media.audio.cts;
18 
19 import android.annotation.RawRes;
20 import android.content.Context;
21 import android.content.pm.PackageManager;
22 import android.content.res.AssetFileDescriptor;
23 import android.content.res.Resources;
24 import android.media.AudioAttributes;
25 import android.media.AudioFormat;
26 import android.media.AudioManager;
27 import android.media.AudioProfile;
28 import android.media.AudioTimestamp;
29 import android.media.AudioTrack;
30 import android.media.audio.cts.R;
31 import android.platform.test.annotations.AppModeSdkSandbox;
32 import android.util.Log;
33 
34 import com.android.compatibility.common.util.CtsAndroidTestCase;
35 import com.android.compatibility.common.util.FrameworkSpecificTest;
36 
37 import java.io.BufferedInputStream;
38 import java.io.ByteArrayOutputStream;
39 import java.io.InputStream;
40 import java.nio.ByteBuffer;
41 import java.nio.ByteOrder;
42 import java.nio.ShortBuffer;
43 import java.util.ArrayList;
44 import java.util.List;
45 import java.util.Random;
46 
47 // Test the Java AudioTrack surround sound and HDMI passthrough.
48 // Most tests involve creating a track with a given format and then playing
49 // a few seconds of audio. The playback is verified by measuring the output
50 // sample rate based on the AudioTimestamps.
51 
52 @FrameworkSpecificTest
53 @AppModeSdkSandbox(reason = "Allow test in the SDK sandbox (does not prevent other modes).")
54 public class AudioTrackSurroundTest extends CtsAndroidTestCase {
55     private static final String TAG = "AudioTrackSurroundTest";
56 
57     private static final double MAX_RATE_TOLERANCE_FRACTION = 0.01;
58     private static final boolean LOG_TIMESTAMPS = false; // set true for debugging
59     // just long enough to measure the rate
60     private static final long SAMPLE_RATE_SHORT_TEST_DURATION_MILLIS = 5000;
61     // AC3 and IEC61937 tracks require more time
62     private static final long SAMPLE_RATE_LONG_TEST_DURATION_MILLIS = 12000;
63 
64     // Should we fail if there is no PCM16 profile reported?
65     // This can happen if, for example, an ATV set top box does not have its HDMI cable plugged in.
66     private static final boolean REQUIRE_PCM_PROFILE = false;
67 
68     private final static long NANOS_PER_MILLISECOND = 1000000L;
69     private final static int MILLIS_PER_SECOND = 1000;
70     private final static long NANOS_PER_SECOND = NANOS_PER_MILLISECOND * MILLIS_PER_SECOND;
71 
72     private final static int RES_AC3_SPDIF_VOICE_32000 = R.raw.voice12_32k_128kbps_15s_ac3_spdif;
73     private final static int RES_AC3_SPDIF_VOICE_44100 = R.raw.voice12_44k_128kbps_15s_ac3_spdif;
74     private final static int RES_AC3_SPDIF_VOICE_48000 = R.raw.voice12_48k_128kbps_15s_ac3_spdif;
75     private final static int RES_AC3_VOICE_48000 = R.raw.voice12_48k_128kbps_15s_ac3;
76     private final static int RES_AC4_L4_SAMPLE_48000 = R.raw.ajoc_fri_13_dmx_10_umx_21_lfe1_ac4;
77 
78     private static int mLastPlayedEncoding = AudioFormat.ENCODING_INVALID;
79 
80     // Profiles that support various encodings.
81     private static AudioProfile mProfilePCM16 = null;
82     private static AudioProfile mProfileAC3 = null;
83     private static AudioProfile mProfileE_AC3 = null;
84     private static AudioProfile mProfileAC4_L4 = null;
85 
86     private static AudioProfile mProfileDTS = null;
87     private static AudioProfile mProfileDTS_HD = null;
88     private static AudioProfile mProfileIEC61937 = null;
89 
90     private static AudioAttributes mAudioAttributes = null;
91 
log(String testName, String message)92     private static void log(String testName, String message) {
93         Log.i(TAG, "[" + testName + "] " + message);
94     }
95 
logw(String testName, String message)96     private static void logw(String testName, String message) {
97         Log.w(TAG, "[" + testName + "] " + message);
98     }
99 
loge(String testName, String message)100     private static void loge(String testName, String message) {
101         Log.e(TAG, "[" + testName + "] " + message);
102     }
103 
104     // This is a special method that is called automatically before each test.
105     @Override
setUp()106     protected void setUp() throws Exception {
107         // Note that I tried to only scan for encodings once but the static
108         // data did not persist properly. That may be a bug.
109         // For now, just scan before every test.
110         scanProfilesForEncodings();
111     }
112 
scanProfilesForEncodings()113     private void scanProfilesForEncodings() throws Exception {
114         final String MTAG = "scanProfilesForEncodings";
115         // Scan profiles to see which encodings are supported.
116         AudioManager audioManager = (AudioManager) getContext()
117                 .getSystemService(Context.AUDIO_SERVICE);
118         mAudioAttributes = new AudioAttributes.Builder()
119                                 .setUsage(AudioAttributes.USAGE_MEDIA)
120                                 .setContentType(AudioAttributes.CONTENT_TYPE_MUSIC)
121                                 .build();
122         List<AudioProfile> profiles = audioManager.getDirectProfilesForAttributes(mAudioAttributes);
123         if (profiles.size() == 0) {
124             log(MTAG, "no direct profiles for media + music found");
125         }
126         for (AudioProfile profile : profiles) {
127             log(MTAG, "scanning profiles, profile = " + profile.toString());
128             if (profile.getEncapsulationType() == AudioProfile.AUDIO_ENCAPSULATION_TYPE_IEC61937) {
129                 mProfileIEC61937 = profile;
130                 log(MTAG, "mProfileIEC61937 set to " + profile);
131                 break;
132             } else { // AudioProfile.AUDIO_ENCAPSULATION_TYPE_NONE
133                 switch (profile.getFormat()) {
134                     case AudioFormat.ENCODING_PCM_16BIT:
135                         mProfilePCM16 = profile;
136                         log(MTAG, "mProfilePCM16 set to " + profile);
137                         break;
138                     case AudioFormat.ENCODING_AC3:
139                         mProfileAC3 = profile;
140                         log(MTAG, "mProfileAC3 set to " + profile);
141                         break;
142                     case AudioFormat.ENCODING_E_AC3:
143                         mProfileE_AC3 = profile;
144                         log(MTAG, "mProfileE_AC3 set to " + profile);
145                         break;
146                     case AudioFormat.ENCODING_AC4_L4:
147                         mProfileAC4_L4 = profile;
148                         log(MTAG, "mProfileAC4_L4 set to " + profile);
149                         break;
150                     case AudioFormat.ENCODING_DTS:
151                         mProfileDTS = profile;
152                         log(MTAG, "mProfileDTS set to " + profile);
153                         break;
154                     case AudioFormat.ENCODING_DTS_HD:
155                         mProfileDTS_HD = profile;
156                         log(MTAG, "mProfileDTS_HD set to " + profile);
157                         break;
158                     default:
159                         // This is OK. It is just an encoding that we don't care about.
160                         break;
161                 }
162             }
163 
164         }
165     }
166 
167     // Load a resource into a byte[]
loadRawResourceBytes(@awRes int id)168     private byte[] loadRawResourceBytes(@RawRes int id) throws Exception {
169         InputStream is = getContext().getResources().openRawResource(id);
170         ByteArrayOutputStream bos = new ByteArrayOutputStream();
171         try (BufferedInputStream bis = new BufferedInputStream(is)) {
172             for (int b = bis.read(); b != -1; b = bis.read()) {
173                 bos.write(b);
174             }
175         }
176         return bos.toByteArray();
177     }
178 
179     // Load a resource into a short[]
loadRawResourceShorts(@awRes int id)180     private short[] loadRawResourceShorts(@RawRes int id) throws Exception {
181         byte[] byteBuffer = loadRawResourceBytes(id);
182         ShortBuffer shortBuffer =
183                 ByteBuffer.wrap(byteBuffer).order(ByteOrder.LITTLE_ENDIAN).asShortBuffer();
184         // Unfortunately, ShortBuffer.array() works with allocated buffers only.
185         short[] mainBuffer = new short[byteBuffer.length / 2];
186         for (int i = 0; i < mainBuffer.length; i++) {
187             mainBuffer[i] = shortBuffer.get();
188         }
189         return mainBuffer;
190     }
191 
testLoadSineSweep()192     public void testLoadSineSweep() throws Exception {
193         final String TEST_NAME = "testLoadSineSweep";
194         short[] shortData = loadRawResourceShorts(R.raw.sinesweepraw);
195         assertTrue(TEST_NAME + ": load sinesweepraw as shorts", shortData.length > 100);
196         byte[] byteData = loadRawResourceBytes(R.raw.sinesweepraw);
197         assertTrue(TEST_NAME + ": load sinesweepraw as bytes", byteData.length > shortData.length);
198     }
199 
createAudioTrack(int sampleRate, int encoding, int channelConfig)200     private static AudioTrack createAudioTrack(int sampleRate, int encoding, int channelConfig) {
201         final String TEST_NAME = "createAudioTrack";
202         int minBufferSize = AudioTrack.getMinBufferSize(
203                 sampleRate, channelConfig,
204                 encoding);
205         assertTrue(TEST_NAME + ": getMinBufferSize", minBufferSize > 0);
206         int bufferSize = minBufferSize * 3; // plenty big
207         AudioTrack track = new AudioTrack(AudioManager.STREAM_MUSIC,
208                 sampleRate, channelConfig,
209                 encoding, bufferSize,
210                 AudioTrack.MODE_STREAM);
211         return track;
212     }
213 
214     static class TimestampAnalyzer {
215         ArrayList<AudioTimestamp> mTimestamps = new ArrayList<AudioTimestamp>();
216         AudioTimestamp mPreviousTimestamp = null;
217 
timestampToString(AudioTimestamp timestamp)218         static String timestampToString(AudioTimestamp timestamp) {
219             if (timestamp == null)
220                 return "null";
221             return "(pos = " + timestamp.framePosition + ", nanos = " + timestamp.nanoTime + ")";
222         }
223 
224         // Add timestamp if unique and valid.
addTimestamp(AudioTrack track)225         void addTimestamp(AudioTrack track) {
226             AudioTimestamp timestamp = new AudioTimestamp();
227             boolean gotTimestamp = track.getTimestamp(timestamp);
228             if (gotTimestamp) {
229                 // Only save timestamps after the data is flowing.
230                 boolean accepted = mPreviousTimestamp != null
231                         && timestamp.framePosition > 0
232                         && timestamp.nanoTime != mPreviousTimestamp.nanoTime
233                         && timestamp.framePosition != mPreviousTimestamp.framePosition;
234                 if (accepted) {
235                     mTimestamps.add(timestamp);
236                 }
237                 Log.d(TAG, (accepted ? "" : "NOT ") + "added ts " + timestampToString(timestamp));
238                 mPreviousTimestamp = timestamp;
239             }
240         }
241 
checkIndividualTimestamps(int sampleRate)242         void checkIndividualTimestamps(int sampleRate) {
243             AudioTimestamp previous = null;
244             double sumDeltaSquared = 0.0;
245             int populationSize = 0;
246             double maxDeltaMillis = 0.0;
247             // Make sure the timestamps are smooth and don't go retrograde.
248             for (AudioTimestamp timestamp : mTimestamps) {
249                 if (previous != null) {
250 
251                     assertTrue("framePosition must be monotonic",
252                             timestamp.framePosition >= previous.framePosition);
253                     assertTrue("nanoTime must be monotonic",
254                             timestamp.nanoTime >= previous.nanoTime);
255 
256                     if (timestamp.framePosition > previous.framePosition) {
257                         // Measure timing jitter.
258                         // Calculate predicted duration based on measured rate and compare
259                         // it with actual duration.
260                         final double TOLERANCE_MILLIS = 2.0;
261                         long elapsedFrames = timestamp.framePosition - previous.framePosition;
262                         long elapsedNanos = timestamp.nanoTime - previous.nanoTime;
263                         double measuredMillis = elapsedNanos / (double) NANOS_PER_MILLISECOND;
264                         double expectedMillis = elapsedFrames * (double) MILLIS_PER_SECOND
265                             / sampleRate;
266                         double deltaMillis = measuredMillis - expectedMillis;
267                         sumDeltaSquared += deltaMillis * deltaMillis;
268                         populationSize++;
269                         // We only issue a warning here because the CDD does not mandate a
270                         // specific tolerance.
271                         double absDeltaMillis = Math.abs(deltaMillis);
272                         if (absDeltaMillis > TOLERANCE_MILLIS) {
273                             Log.w(TAG, "measured time exceeds expected"
274                                 + ", srate = " + sampleRate
275                                 + ", frame = " + timestamp.framePosition
276                                 + ", expected = " + expectedMillis
277                                 + ", measured = " + measuredMillis + " (msec)"
278                                 );
279                         }
280                         if (absDeltaMillis > maxDeltaMillis) {
281                             maxDeltaMillis = absDeltaMillis;
282                         }
283                     }
284                 }
285                 previous = timestamp;
286             }
287             Log.d(TAG, "max abs(delta) from expected duration = " + maxDeltaMillis + " msec");
288             if (populationSize > 0) {
289                 double deviation = Math.sqrt(sumDeltaSquared / populationSize);
290                 Log.d(TAG, "standard deviation from expected duration = " + deviation + " msec");
291             }
292         }
293 
294         // Use collected timestamps to estimate a sample rate.
estimateSampleRate()295         double estimateSampleRate() {
296             Log.w(TAG, "timestamps collected: " + mTimestamps.size());
297             assertTrue("expect many timestamps, got " + mTimestamps.size(),
298                     mTimestamps.size() > 10);
299             // Use first and last timestamp to get the most accurate rate.
300             AudioTimestamp first = mTimestamps.get(0);
301             AudioTimestamp last = mTimestamps.get(mTimestamps.size() - 1);
302             return calculateSampleRate(first, last);
303         }
304 
305         /**
306          * @param timestamp1
307          * @param timestamp2
308          */
calculateSampleRate(AudioTimestamp timestamp1, AudioTimestamp timestamp2)309         private double calculateSampleRate(AudioTimestamp timestamp1, AudioTimestamp timestamp2) {
310             long elapsedFrames = timestamp2.framePosition - timestamp1.framePosition;
311             long elapsedNanos = timestamp2.nanoTime - timestamp1.nanoTime;
312             double measuredRate = elapsedFrames * (double) NANOS_PER_SECOND / elapsedNanos;
313             if (LOG_TIMESTAMPS) {
314                 Log.i(TAG, "calculateSampleRate(), elapsedFrames =, " + elapsedFrames
315                         + ", measuredRate =, "
316                         + (int) measuredRate);
317             }
318             return measuredRate;
319         }
320     }
321 
322     // Class for looping a recording for several seconds and measuring the sample rate.
323     // This is not static because it needs to call getContext().
324     abstract class SamplePlayerBase {
325         private final int mSampleRate;
326         private final int mEncoding;
327         private final int mChannelConfig;
328         private int mBlockSize = 512;
329         protected int mOffset = 0;
330         protected AudioTrack mTrack;
331         private final TimestampAnalyzer mTimestampAnalyzer = new TimestampAnalyzer();
332 
SamplePlayerBase(int sampleRate, int encoding, int channelConfig)333         SamplePlayerBase(int sampleRate, int encoding, int channelConfig) {
334             mSampleRate = sampleRate;
335             mEncoding = encoding;
336             mChannelConfig = channelConfig;
337         }
338 
339         // Use abstract write to handle byte[] or short[] data.
writeBlock(int numSamples)340         protected abstract int writeBlock(int numSamples);
341 
primeBuffer()342         private int primeBuffer() {
343             // Will not block when track is stopped.
344             return writeBlock(Integer.MAX_VALUE);
345         }
346 
347         // Add a warning to the assert message that might help folks figure out why their
348         // PCM test is failing.
getPcmWarning()349         private String getPcmWarning() {
350             return (mProfilePCM16 == null && AudioFormat.isEncodingLinearPcm(mEncoding))
351                 ? " (No PCM profile!)" : "";
352         }
353 
playAndMeasureRate(long testDurationMillis)354         public void playAndMeasureRate(long testDurationMillis) throws Exception {
355             final String TEST_NAME = "playAndMeasureRate";
356 
357             if (mLastPlayedEncoding == AudioFormat.ENCODING_INVALID ||
358                     !AudioFormat.isEncodingLinearPcm(mEncoding) ||
359                     !AudioFormat.isEncodingLinearPcm(mLastPlayedEncoding)) {
360                 Log.d(TAG, "switching from format: " + mLastPlayedEncoding
361                         + " to: " + mEncoding
362                         + " requires sleep");
363                 // Switching between compressed formats may require
364                 // some time for the HAL to adjust and give proper timing.
365                 // One second should be ok, but we use 2 just in case.
366                 Thread.sleep(2000 /* millis */);
367             }
368             mLastPlayedEncoding = mEncoding;
369 
370             log(TEST_NAME, String.format("test using rate = %d, encoding = 0x%08x",
371                     mSampleRate, mEncoding));
372             // Create a track and prime it.
373             mTrack = createAudioTrack(mSampleRate, mEncoding, mChannelConfig);
374             try {
375                 assertEquals(TEST_NAME + ": track created " + getPcmWarning(),
376                         AudioTrack.STATE_INITIALIZED,
377                         mTrack.getState());
378 
379                 int bytesWritten = 0;
380                 mOffset = primeBuffer(); // prime the buffer
381                 assertTrue(TEST_NAME + ": priming offset = " + mOffset + ", " + getPcmWarning(),
382                     mOffset > 0);
383                 bytesWritten += mOffset;
384 
385                 // Play for a while.
386                 mTrack.play();
387 
388                 log(TEST_NAME, "native rate = "
389                         + mTrack.getNativeOutputSampleRate(mTrack.getStreamType()));
390                 long elapsedMillis = 0;
391                 long startTime = System.currentTimeMillis();
392                 while (elapsedMillis < testDurationMillis) {
393                     writeBlock(mBlockSize);
394                     elapsedMillis = System.currentTimeMillis() - startTime;
395                     mTimestampAnalyzer.addTimestamp(mTrack);
396                 }
397 
398                 // Did we underrun? Allow 0 or 1 because there is sometimes
399                 // an underrun on startup.
400                 int underrunCount1 = mTrack.getUnderrunCount();
401                 assertTrue(TEST_NAME + ": too many underruns, got " + underrunCount1
402                         + ", " + getPcmWarning(),
403                         underrunCount1 < 2);
404 
405                 // Estimate the sample rate and compare it with expected.
406                 double estimatedRate = mTimestampAnalyzer.estimateSampleRate();
407                 Log.d(TAG, "measured sample rate = " + estimatedRate);
408                 assertEquals(TEST_NAME + ": measured sample rate " + getPcmWarning(),
409                         mSampleRate, estimatedRate, mSampleRate * MAX_RATE_TOLERANCE_FRACTION);
410 
411                 // Check for jitter or retrograde motion in each timestamp.
412                 mTimestampAnalyzer.checkIndividualTimestamps(mSampleRate);
413 
414             } finally {
415                 mTrack.release();
416             }
417         }
418     }
419 
420     // Create player for short[]
421     class SamplePlayerShorts extends SamplePlayerBase {
422         private final short[] mData;
423 
SamplePlayerShorts(int sampleRate, int encoding, int channelConfig)424         SamplePlayerShorts(int sampleRate, int encoding, int channelConfig) {
425             super(sampleRate, encoding, channelConfig);
426             mData = new short[64 * 1024];
427             // Fill with noise. We should not hear the noise for IEC61937.
428             int amplitude = 8000;
429             Random random = new Random();
430             for (int i = 0; i < mData.length; i++) {
431                 mData[i] = (short)(random.nextInt(amplitude) - (amplitude / 2));
432             }
433         }
434 
SamplePlayerShorts(int sampleRate, int encoding, int channelConfig, @RawRes int resourceId)435         SamplePlayerShorts(int sampleRate, int encoding, int channelConfig, @RawRes int resourceId)
436                 throws Exception {
437             super(sampleRate, encoding, channelConfig);
438             mData = loadRawResourceShorts(resourceId);
439             assertTrue("SamplePlayerShorts: load resource file as shorts", mData.length > 0);
440         }
441 
442         @Override
writeBlock(int numShorts)443         protected int writeBlock(int numShorts) {
444             int result = 0;
445             int shortsToWrite = numShorts;
446             int shortsLeft = mData.length - mOffset;
447             if (shortsToWrite > shortsLeft) {
448                 shortsToWrite = shortsLeft;
449             }
450             if (shortsToWrite > 0) {
451                 result = mTrack.write(mData, mOffset, shortsToWrite);
452                 mOffset += result;
453             } else {
454                 mOffset = 0; // rewind
455             }
456             return result;
457         }
458     }
459 
460     // Create player for byte[]
461     class SamplePlayerBytes extends SamplePlayerBase {
462         private final byte[] mData;
463 
SamplePlayerBytes(int sampleRate, int encoding, int channelConfig)464         SamplePlayerBytes(int sampleRate, int encoding, int channelConfig) {
465             super(sampleRate, encoding, channelConfig);
466             mData = new byte[128 * 1024];
467         }
468 
SamplePlayerBytes(int sampleRate, int encoding, int channelConfig, @RawRes int resourceId)469         SamplePlayerBytes(int sampleRate, int encoding, int channelConfig, @RawRes int resourceId)
470                 throws Exception {
471             super(sampleRate, encoding, channelConfig);
472             mData = loadRawResourceBytes(resourceId);
473             assertTrue("SamplePlayerBytes: load resource file as bytes", mData.length > 0);
474         }
475 
476         @Override
writeBlock(int numBytes)477         protected int writeBlock(int numBytes) {
478             int result = 0;
479             int bytesToWrite = numBytes;
480             int bytesLeft = mData.length - mOffset;
481             if (bytesToWrite > bytesLeft) {
482                 bytesToWrite = bytesLeft;
483             }
484             if (bytesToWrite > 0) {
485                 result = mTrack.write(mData, mOffset, bytesToWrite);
486                 mOffset += result;
487             } else {
488                 mOffset = 0; // rewind
489             }
490             return result;
491         }
492     }
493 
testPlayAC3Bytes()494     public void testPlayAC3Bytes() throws Exception {
495         if (mProfileAC3 != null) {
496             SamplePlayerBytes player = new SamplePlayerBytes(
497                     48000, AudioFormat.ENCODING_AC3, AudioFormat.CHANNEL_OUT_STEREO,
498                     RES_AC3_VOICE_48000);
499             player.playAndMeasureRate(SAMPLE_RATE_LONG_TEST_DURATION_MILLIS);
500         }
501     }
502 
testPlayAC3Shorts()503     public void testPlayAC3Shorts() throws Exception {
504         if (mProfileAC3 != null) {
505             SamplePlayerShorts player = new SamplePlayerShorts(
506                     48000, AudioFormat.ENCODING_AC3, AudioFormat.CHANNEL_OUT_STEREO,
507                     RES_AC3_VOICE_48000);
508             player.playAndMeasureRate(SAMPLE_RATE_LONG_TEST_DURATION_MILLIS);
509         }
510     }
511 
testPlayAC4L4Bytes()512     public void testPlayAC4L4Bytes() throws Exception {
513         if (mProfileAC4_L4 != null) {
514             SamplePlayerBytes player = new SamplePlayerBytes(
515                     48000, AudioFormat.ENCODING_AC4_L4, AudioFormat.CHANNEL_OUT_STEREO,
516                     RES_AC4_L4_SAMPLE_48000);
517             player.playAndMeasureRate(SAMPLE_RATE_LONG_TEST_DURATION_MILLIS);
518         }
519     }
520 
testPlayIEC61937_32000()521     public void testPlayIEC61937_32000() throws Exception {
522         if (mProfileIEC61937 != null) {
523             SamplePlayerShorts player = new SamplePlayerShorts(
524                     32000, AudioFormat.ENCODING_IEC61937, AudioFormat.CHANNEL_OUT_STEREO,
525                     RES_AC3_SPDIF_VOICE_32000);
526             player.playAndMeasureRate(SAMPLE_RATE_LONG_TEST_DURATION_MILLIS);
527         }
528     }
529 
testPlayIEC61937_44100()530     public void testPlayIEC61937_44100() throws Exception {
531         if (mProfileIEC61937 != null) {
532             SamplePlayerShorts player = new SamplePlayerShorts(
533                     44100, AudioFormat.ENCODING_IEC61937, AudioFormat.CHANNEL_OUT_STEREO,
534                     RES_AC3_SPDIF_VOICE_44100);
535             player.playAndMeasureRate(SAMPLE_RATE_LONG_TEST_DURATION_MILLIS);
536         }
537     }
538 
testPlayIEC61937_48000()539     public void testPlayIEC61937_48000() throws Exception {
540         if (mProfileIEC61937 != null) {
541             SamplePlayerShorts player = new SamplePlayerShorts(
542                     48000, AudioFormat.ENCODING_IEC61937, AudioFormat.CHANNEL_OUT_STEREO,
543                     RES_AC3_SPDIF_VOICE_48000);
544             player.playAndMeasureRate(SAMPLE_RATE_LONG_TEST_DURATION_MILLIS);
545         }
546     }
547 
testPcmSupport()548     public void testPcmSupport() throws Exception {
549         if (REQUIRE_PCM_PROFILE) {
550             // There should always be a fake PCM profile available.
551             assertTrue("testPcmSupport: PCM should be supported."
552                     + " On ATV device please check HDMI connection.",
553                     mProfilePCM16 != null);
554         }
555     }
556 
isPcmTestingEnabled()557     private boolean isPcmTestingEnabled() {
558         return (mProfilePCM16 != null || !REQUIRE_PCM_PROFILE);
559     }
560 
testPlaySineSweepShorts()561     public void testPlaySineSweepShorts() throws Exception {
562         if (isPcmTestingEnabled()) {
563             SamplePlayerShorts player = new SamplePlayerShorts(
564                     44100, AudioFormat.ENCODING_PCM_16BIT, AudioFormat.CHANNEL_OUT_STEREO,
565                     R.raw.sinesweepraw);
566             player.playAndMeasureRate(SAMPLE_RATE_SHORT_TEST_DURATION_MILLIS);
567         }
568     }
569 
testPlaySineSweepBytes()570     public void testPlaySineSweepBytes() throws Exception {
571         if (isPcmTestingEnabled()) {
572             SamplePlayerBytes player = new SamplePlayerBytes(
573                     44100, AudioFormat.ENCODING_PCM_16BIT, AudioFormat.CHANNEL_OUT_STEREO,
574                     R.raw.sinesweepraw);
575             player.playAndMeasureRate(SAMPLE_RATE_SHORT_TEST_DURATION_MILLIS);
576         }
577     }
578 
testPlaySineSweepBytes48000()579     public void testPlaySineSweepBytes48000() throws Exception {
580         if (isPcmTestingEnabled()) {
581             SamplePlayerBytes player = new SamplePlayerBytes(
582                     48000, AudioFormat.ENCODING_PCM_16BIT, AudioFormat.CHANNEL_OUT_STEREO,
583                     R.raw.sinesweepraw);
584             player.playAndMeasureRate(SAMPLE_RATE_SHORT_TEST_DURATION_MILLIS);
585         }
586     }
587 
testPlaySineSweepShortsMono()588     public void testPlaySineSweepShortsMono() throws Exception {
589         if (isPcmTestingEnabled()) {
590             SamplePlayerShorts player = new SamplePlayerShorts(44100, AudioFormat.ENCODING_PCM_16BIT,
591                     AudioFormat.CHANNEL_OUT_MONO,
592                     R.raw.sinesweepraw);
593             player.playAndMeasureRate(SAMPLE_RATE_SHORT_TEST_DURATION_MILLIS);
594         }
595     }
596 
testPlaySineSweepBytesMono()597     public void testPlaySineSweepBytesMono()
598             throws Exception {
599         if (isPcmTestingEnabled()) {
600             SamplePlayerBytes player = new SamplePlayerBytes(44100, AudioFormat.ENCODING_PCM_16BIT,
601                     AudioFormat.CHANNEL_OUT_MONO, R.raw.sinesweepraw);
602             player.playAndMeasureRate(SAMPLE_RATE_SHORT_TEST_DURATION_MILLIS);
603         }
604     }
605 
606 }
607