/frameworks/av/media/libaudioprocessing/ |
D | RecordBufferConverter.cpp | 78 AudioBufferProvider *provider, size_t frames) in convert() argument 90 for (size_t i = frames; i > 0; ) { in convert() 94 frames -= i; // cannot fill request. in convert() 109 if (mBufFrameSize != 0 && mBufFrames < frames) { in convert() 111 mBufFrames = frames; in convert() 115 memset(mBuf, 0, frames * mBufFrameSize); in convert() 116 frames = mResampler->resample((int32_t*)mBuf, frames, provider); in convert() 118 convertResampler(dst, mBuf, frames); in convert() 125 return frames; in convert() 225 void *dst, const void *src, size_t frames) in convertNoResampler() argument [all …]
|
D | BufferProviders.cpp | 284 void DownmixerBufferProvider::copyFrames(void *dst, const void *src, size_t frames) in copyFrames() argument 287 mInBuffer->setFrameCount(frames); in copyFrames() 288 mInBuffer->update(mInFrameSize * frames); in copyFrames() 289 mOutBuffer->setFrameCount(frames); in copyFrames() 294 mOutBuffer->update(mOutFrameSize * frames); in copyFrames() 299 mOutBuffer->commit(mOutFrameSize * frames); in copyFrames() 361 void RemixBufferProvider::copyFrames(void *dst, const void *src, size_t frames) in copyFrames() argument 364 src, mInputChannels, mIdxAry, mSampleSize, frames); in copyFrames() 382 void ReformatBufferProvider::copyFrames(void *dst, const void *src, size_t frames) in copyFrames() argument 384 memcpy_by_audio_format(dst, mOutputFormat, src, mInputFormat, frames * mChannelCount); in copyFrames() [all …]
|
/frameworks/av/media/libeffects/downmix/tests/ |
D | downmix_tests.cpp | 74 const size_t frames = input.size() / channels; in channelStatistics() local 75 if (frames > 0) { in channelStatistics() 77 for (size_t i = 0; i < frames; ++i) { in channelStatistics() 96 size_t frames = 100; in testBalance() local 99 std::vector<float> input(frames * inChannels); in testBalance() 100 std::vector<float> output(frames * outChannels); in testBalance() 113 for (unsigned j = 0; j < frames; ++j) { in testBalance() 118 run(sampleRate, channelMask, input, output, frames); in testBalance() 162 std::vector<float>& input, std::vector<float>& output, size_t frames) { in run() argument 165 ASSERT_EQ(frames * inputChannelCount_, input.size()); in run() [all …]
|
/frameworks/av/services/audioflinger/ |
D | NBAIO_Tee.h | 126 TEE_FLAG flags = TEE_FLAG_NONE, size_t frames = 0) const { 127 return mTee->set(format, flags, frames); 131 TEE_FLAG flags = TEE_FLAG_NONE, size_t frames = 0) const { 132 return mTee->set(Format_from_SR_C(sampleRate, channelCount, format), flags, frames); 181 status_t set(const NBAIO_Format &format, TEE_FLAG flags, size_t frames) { in set() argument 200 if (frames == 0) { in set() 202 frames = DEFAULT_TEE_FRAMES; in set() 208 if (Format_isEqual(format, mFormat) && frames == mFrames) { in set() 213 auto sinksource = makeSinkSource(format, frames, &enabled); in set() 222 mFrames = frames; in set() [all …]
|
D | AudioStreamOut.cpp | 54 status_t AudioStreamOut::getRenderPosition(uint64_t *frames) in getRenderPosition() argument 76 *frames = mRenderPosition / mRateMultiplier; in getRenderPosition() 82 status_t AudioStreamOut::getRenderPosition(uint32_t *frames) in getRenderPosition() argument 87 *frames = (uint32_t)position64; in getRenderPosition() 92 status_t AudioStreamOut::getPresentationPosition(uint64_t *frames, struct timespec *timestamp) in getPresentationPosition() argument 110 *frames = adjustedPosition / mRateMultiplier; in getPresentationPosition() 113 *frames = halPosition; in getPresentationPosition()
|
D | AudioStreamOut.h | 57 status_t getRenderPosition(uint32_t *frames); 59 virtual status_t getRenderPosition(uint64_t *frames); 61 virtual status_t getPresentationPosition(uint64_t *frames, struct timespec *timestamp);
|
D | ThreadMetrics.h | 122 void logUnderrunFrames(size_t frames) { in logUnderrunFrames() argument 124 if (mLastUnderrun == false && frames > 0) { in logUnderrunFrames() 127 mLastUnderrun = (frames > 0); in logUnderrunFrames() 128 mUnderrunFrames += frames; in logUnderrunFrames()
|
/frameworks/av/media/libstagefright/webm/ |
D | WebmFrameThread.cpp | 115 List<const sp<WebmFrame> >& frames, in initCluster() 118 CHECK(!frames.empty() && children.empty()); in initCluster() 120 const sp<WebmFrame> f = *(frames.begin()); in initCluster() 141 void WebmFrameSinkThread::flushFrames(List<const sp<WebmFrame> >& frames, bool last) { in flushFrames() argument 142 if (frames.empty()) { in flushFrames() 148 initCluster(frames, clusterTimecodeL, children); in flushFrames() 152 size_t n = frames.size(); in flushFrames() 165 const sp<WebmFrame> f = *(frames.begin()); in flushFrames() 172 initCluster(frames, clusterTimecodeL, children); in flushFrames() 175 frames.erase(frames.begin()); in flushFrames() [all …]
|
/frameworks/av/media/libaaudio/src/legacy/ |
D | AudioStreamLegacy.h | 78 virtual int64_t incrementClientFrameCounter(int32_t frames) = 0; 107 int64_t incrementFramesWritten(int32_t frames) { in incrementFramesWritten() argument 108 return mFramesWritten.increment(frames); in incrementFramesWritten() 111 int64_t incrementFramesRead(int32_t frames) { in incrementFramesRead() argument 112 return mFramesRead.increment(frames); in incrementFramesRead()
|
/frameworks/av/media/libaudioprocessing/tests/ |
D | test_utils.h | 102 TestProvider(void* addr, size_t frames, size_t frameSize, 105 mNumFrames(frames), 194 static void createSine(void *vbuffer, size_t frames, 199 for (size_t i = 0; i < frames; ++i) { 218 static void createChirp(void *vbuffer, size_t frames, 224 double k = (maxfreq - minfreq) / (2. * tscale * frames); 225 for (size_t i = 0; i < frames; ++i) { 281 createBufferByFrames<T>(info.channels, info.samplerate, info.frames); 291 void createBufferByFrames(size_t channels, uint32_t sampleRate, size_t frames) 293 mNumFrames = frames;
|
/frameworks/native/services/surfaceflinger/TimeStats/timestatsatomsproto/ |
D | timestats_atoms.proto | 54 // Total number of frames presented during the tracing period 58 // Total number of frames missed 62 // Total number of frames that fell back to client composition 95 // Number of frames where SF saw a frame, based on its frame timeline. 99 // Number of frames where SF saw a janky frame. 102 // Number of janky frames where SF spent a long time on the CPU. 105 // Number of janky frames where SF spent a long time on the GPU. 108 // Number of janky frames where SF missed the frame deadline, but there 112 // Number of janky frames where the app missed the frame deadline, but 116 // Number of janky frames that were caused because of scheduling errors in [all …]
|
/frameworks/av/media/libstagefright/rtsp/ |
D | ARTPAssembler.cpp | 81 const List<sp<ABuffer> > &frames) { in MakeADTSCompoundFromAACFrames() 83 for (List<sp<ABuffer> >::const_iterator it = frames.begin(); in MakeADTSCompoundFromAACFrames() 84 it != frames.end(); ++it) { in MakeADTSCompoundFromAACFrames() 91 for (List<sp<ABuffer> >::const_iterator it = frames.begin(); in MakeADTSCompoundFromAACFrames() 92 it != frames.end(); ++it) { in MakeADTSCompoundFromAACFrames() 121 CopyTimes(accessUnit, *frames.begin()); in MakeADTSCompoundFromAACFrames()
|
/frameworks/base/core/java/android/speech/tts/ |
D | SynthesisPlaybackQueueItem.java | 213 public final int frames; field in SynthesisPlaybackQueueItem.ProgressMarker 219 public ProgressMarker(int frames, int start, int end) { in ProgressMarker() argument 220 this.frames = frames; in ProgressMarker() 232 int markerInFrames = marker.frames == 0 ? 1 : marker.frames; in updateMarker() 251 getDispatcher().dispatchOnRangeStart(marker.start, marker.end, marker.frames); in onMarkerReached()
|
/frameworks/av/media/libstagefright/bqhelper/tests/ |
D | FrameDropper_test.cpp | 99 void RunTest(const TestFrame* frames, size_t size) { in RunTest() argument 102 int64_t testTimeUs = frames[i].timeUs + jitter; in RunTest() 104 (long long)frames[i].timeUs, (long long)testTimeUs, jitter); in RunTest() 105 EXPECT_EQ(frames[i].shouldDrop, mFrameDropper->shouldDrop(testTimeUs)); in RunTest()
|
/frameworks/native/services/inputflinger/reader/ |
D | TouchVideoDevice.cpp | 162 std::vector<TouchVideoFrame> frames = readFrames(); in readAndQueueFrames() local 163 const size_t numFrames = frames.size(); in readAndQueueFrames() 169 mFrames.insert(mFrames.end(), std::make_move_iterator(frames.begin()), in readAndQueueFrames() 170 std::make_move_iterator(frames.end())); in readAndQueueFrames() 181 std::vector<TouchVideoFrame> frames = std::move(mFrames); in consumeFrames() local 183 return frames; in consumeFrames() 222 std::vector<TouchVideoFrame> frames; in readFrames() local 228 frames.push_back(std::move(*frame)); in readFrames() 230 return frames; in readFrames()
|
/frameworks/wilhelm/tools/permute/ |
D | permute.c | 202 switch (sfinfo_in.frames) { in permute() 205 fprintf(stderr, "%s: unsupported frames %d\n", path_in, (int) sfinfo_in.frames); in permute() 212 double durationSeconds = (double) sfinfo_in.frames / (double) sfinfo_in.samplerate; in permute() 224 used = split(&s, 0, sfinfo_in.frames, s.mSegmentMax); in permute() 241 void *ptr = malloc(sfinfo_in.frames * frameSizeRead); in permute() 244 count = sf_readf_short(sf_in, ptr, sfinfo_in.frames); in permute() 245 if (count != sfinfo_in.frames) { in permute() 247 (int) sfinfo_in.frames, (int) count); in permute() 279 assert(permutedStart == sfinfo_in.frames); in permute()
|
/frameworks/native/opengl/tests/hwc/ |
D | hwcStress.cpp | 199 static vector <vector <sp<GraphicBuffer> > > frames; variable 413 list = hwcTestCreateLayerList(testRandMod(frames.size()) + 1); in main() 421 selectedFrames = vectorRandSelect(frames, list->numHwLayers); in main() 562 frames.clear(); in initFrames() 563 frames.resize(rows); in initFrames() 591 frames[row].resize(cols); in initFrames() 596 frames[row][col] = new GraphicBuffer(w, h, format, texUsage); in initFrames() 597 if ((rv = frames[row][col]->initCheck()) != NO_ERROR) { in initFrames() 604 hwcTestFillColor(frames[row][col].get(), color, alpha); in initFrames() 607 frames[row][col].get(), frames[row][col]->handle, in initFrames()
|
/frameworks/av/media/libaudioprocessing/include/media/ |
D | BufferProviders.h | 87 virtual void copyFrames(void *dst, const void *src, size_t frames) = 0; 108 virtual void copyFrames(void *dst, const void *src, size_t frames); 140 virtual void copyFrames(void *dst, const void *src, size_t frames); 157 virtual void copyFrames(void *dst, const void *src, size_t frames); 170 virtual void copyFrames(void *dst, const void *src, size_t frames); 239 void copyFrames(void *dst, const void *src, size_t frames) override;
|
D | RecordBufferConverter.h | 63 size_t convert(void *dst, AudioBufferProvider *provider, size_t frames); 83 void convertNoResampler(void *dst, const void *src, size_t frames); 86 void convertResampler(void *dst, /*not-a-const*/ void *src, size_t frames);
|
/frameworks/native/services/surfaceflinger/TimeStats/timestatsproto/ |
D | timestats.proto | 34 // Total number of frames presented during tracing period. 36 // Total missed frames of SurfaceFlinger. 38 // Total frames fallback to client composition. 64 // Total number of frames presented during tracing period. 66 // Total number of frames dropped by SurfaceFlinger. 86 // Number of frames in the bucket.
|
/frameworks/av/media/tests/SampleVideoEncoder/ |
D | README.md | 3 …VC streams with B-Frames enabled. It uses MediaRecorder APIs to record B-frames enabled video from… 38 …which will test the MediaCodec APIs for encoding avc/hevc streams with B-frames enabled. This does… 55 The total number of I-frames, P-frames and B-frames after encoding has been done using MediaCodec A…
|
/frameworks/av/media/libaudioprocessing/tests/fuzzer/ |
D | libaudioprocessing_record_buffer_converter_fuzzer.cpp | 158 const size_t frames = fdp.ConsumeIntegralInRange<size_t>(0, MAX_FRAMES + 1); in LLVMFuzzerTestOneInput() local 159 int8_t dst[dstFrameSize * frames]; in LLVMFuzzerTestOneInput() 160 memset(dst, 0, sizeof(int8_t) * dstFrameSize * frames); in LLVMFuzzerTestOneInput() 171 converter.convert(dst, &provider, frames); in LLVMFuzzerTestOneInput()
|
/frameworks/av/services/mediametrics/ |
D | statsd_nuplayer.cpp | 84 int64_t frames = -1; in statsd_nuplayer() local 85 if (item->getInt64("android.media.mediaplayer.frames", &frames)) { in statsd_nuplayer() 86 metrics_proto.set_frames(frames); in statsd_nuplayer() 176 << " frames:" << frames in statsd_nuplayer()
|
/frameworks/base/libs/hwui/protos/ |
D | graphicsstats.proto | 67 // Number of frames with slow render time. Frames are considered janky if 74 // Number of frames in triple-buffering scenario (high input latency) 86 // Number of frames that missed their deadline (aka, visibly janked) 93 // Number of frames in the bucket.
|
/frameworks/av/media/extractors/mp3/ |
D | XINGSeeker.cpp | 142 int32_t frames = U32_AT(buffer); in CreateFromSource() local 147 if (frames) { in CreateFromSource() 148 seeker->mDurationUs = (int64_t)frames * samples_per_frame * 1000000LL / sampling_rate; in CreateFromSource()
|