1 /*
2 * Copyright (c) 2023-2025 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15 #ifndef LOG_TAG
16 #define LOG_TAG "AudioEndpointInner"
17 #endif
18
19 #include "audio_endpoint.h"
20 #include "audio_endpoint_private.h"
21
22
23 #include "audio_errors.h"
24 #include "audio_service_log.h"
25 #include "audio_schedule.h"
26 #include "audio_qosmanager.h"
27 #include "audio_utils.h"
28 #include "manager/hdi_adapter_manager.h"
29 #include "format_converter.h"
30 #include "policy_handler.h"
31 #include "media_monitor_manager.h"
32 #include "volume_tools.h"
33 #include "audio_dump_pcm.h"
34 #include "audio_performance_monitor.h"
35 #include "audio_service.h"
36 #ifdef RESSCHE_ENABLE
37 #include "res_type.h"
38 #include "res_sched_client.h"
39 #endif
40 #include "audio_volume.h"
41 #include "audio_stream_monitor.h"
42
43 namespace OHOS {
44 namespace AudioStandard {
45 namespace {
46 static constexpr int32_t VOLUME_SHIFT_NUMBER = 16; // 1 >> 16 = 65536, max volume
47 static constexpr int64_t RECORD_DELAY_TIME_NS = 4000000; // 4ms = 4 * 1000 * 1000ns
48 static constexpr int64_t RECORD_VOIP_DELAY_TIME_NS = 20000000; // 20ms = 20 * 1000 * 1000ns
49 static constexpr int64_t MAX_SPAN_DURATION_NS = 100000000; // 100ms = 100 * 1000 * 1000ns
50 static constexpr int64_t PLAYBACK_DELAY_STOP_HDI_TIME_NS = 3000000000; // 3s = 3 * 1000 * 1000 * 1000ns
51 static constexpr int64_t RECORDER_DELAY_STOP_HDI_TIME_NS = 200000000; // 200ms = 200 * 1000 * 1000ns
52 static constexpr int64_t LINK_RECORDER_DELAY_STOP_HDI_TIME_NS = 2000000000; // 2000ms = 2000 * 1000 * 1000ns
53 static constexpr int64_t WAIT_CLIENT_STANDBY_TIME_NS = 1000000000; // 1s = 1000 * 1000 * 1000ns
54 static constexpr int64_t DELAY_STOP_HDI_TIME_WHEN_NO_RUNNING_NS = 1000000000; // 1s
55 static constexpr int32_t SLEEP_TIME_IN_DEFAULT = 400; // 400ms
56 static constexpr int64_t DELTA_TO_REAL_READ_START_TIME = 0; // 0ms
57 const uint16_t GET_MAX_AMPLITUDE_FRAMES_THRESHOLD = 40;
58 static const int32_t START_DEVICE_TIMEOUT = 10; // 10s
59 static const int32_t ONE_MINUTE = 60;
60 const int32_t DUP_COMMON_LEN = 40; // 40 -> 40ms
61 const int32_t DUP_DEFAULT_LEN = 20; // 20 -> 20ms
62 }
63
GenerateEndpointKey(AudioDeviceDescriptor & deviceInfo,int32_t endpointFlag)64 std::string AudioEndpoint::GenerateEndpointKey(AudioDeviceDescriptor &deviceInfo, int32_t endpointFlag)
65 {
66 // All primary sinks share one endpoint
67 int32_t endpointId = 0;
68 if (deviceInfo.deviceType_ == DEVICE_TYPE_BLUETOOTH_A2DP) {
69 endpointId = deviceInfo.deviceId_;
70 }
71 return deviceInfo.networkId_ + "_" + std::to_string(endpointId) + "_" +
72 std::to_string(deviceInfo.deviceRole_) + "_" + std::to_string(endpointFlag);
73 }
74
CreateEndpoint(EndpointType type,uint64_t id,const AudioProcessConfig & clientConfig,const AudioDeviceDescriptor & deviceInfo,AudioStreamInfo & streamInfo)75 std::shared_ptr<AudioEndpoint> AudioEndpoint::CreateEndpoint(EndpointType type, uint64_t id,
76 const AudioProcessConfig &clientConfig, const AudioDeviceDescriptor &deviceInfo, AudioStreamInfo &streamInfo)
77 {
78 std::shared_ptr<AudioEndpoint> audioEndpoint = nullptr;
79 audioEndpoint = std::make_shared<AudioEndpointInner>(type, id, clientConfig);
80 CHECK_AND_RETURN_RET_LOG(audioEndpoint != nullptr, nullptr, "Create AudioEndpoint failed.");
81
82 if (!audioEndpoint->Config(deviceInfo, streamInfo)) {
83 AUDIO_ERR_LOG("Config AudioEndpoint failed!");
84 audioEndpoint = nullptr;
85 }
86 return audioEndpoint;
87 }
88
GetDeviceInfo()89 AudioDeviceDescriptor &AudioEndpoint::GetDeviceInfo()
90 {
91 return deviceInfo_;
92 }
93
GetDeviceRole()94 DeviceRole AudioEndpoint::GetDeviceRole()
95 {
96 return deviceInfo_.deviceRole_;
97 }
98
GetAudioStreamInfo()99 AudioStreamInfo &AudioEndpoint::GetAudioStreamInfo()
100 {
101 return dstStreamInfo_;
102 }
103
AudioEndpointInner(EndpointType type,uint64_t id,const AudioProcessConfig & clientConfig)104 AudioEndpointInner::AudioEndpointInner(EndpointType type, uint64_t id,
105 const AudioProcessConfig &clientConfig) : endpointType_(type), id_(id), clientConfig_(clientConfig)
106 {
107 AUDIO_INFO_LOG("AudioEndpoint type:%{public}d", endpointType_);
108 if (clientConfig_.audioMode == AUDIO_MODE_PLAYBACK) {
109 logUtilsTag_ = "AudioEndpoint::Play";
110 } else {
111 logUtilsTag_ = "AudioEndpoint::Rec";
112 }
113 }
114
GetEndpointName()115 std::string AudioEndpointInner::GetEndpointName()
116 {
117 return GenerateEndpointKey(deviceInfo_, id_);
118 }
119
SetVolume(AudioStreamType streamType,float volume)120 int32_t AudioEndpointInner::SetVolume(AudioStreamType streamType, float volume)
121 {
122 if (streamType == AudioStreamType::STREAM_VOICE_CALL && endpointType_ == TYPE_VOIP_MMAP) {
123 std::shared_ptr<IAudioRenderSink> sink = HdiAdapterManager::GetInstance().GetRenderSink(fastRenderId_);
124 if (sink != nullptr) {
125 AUDIO_INFO_LOG("SetVolume:%{public}f, streamType:%{public}d", volume, streamType);
126 sink->SetVolume(volume, volume);
127 }
128 }
129 return SUCCESS;
130 }
131
MockCallbacks(uint32_t streamIndex)132 MockCallbacks::MockCallbacks(uint32_t streamIndex) : streamIndex_(streamIndex)
133 {
134 AUDIO_INFO_LOG("DupStream %{public}u create MockCallbacks", streamIndex_);
135 if (GetEngineFlag() == 1) {
136 dumpDupOutFileName_ = std::to_string(streamIndex_) + "_endpoint_dup_out_" + ".pcm";
137 DumpFileUtil::OpenDumpFile(DumpFileUtil::DUMP_SERVER_PARA, dumpDupOutFileName_, &dumpDupOut_);
138 }
139 }
140
~MockCallbacks()141 MockCallbacks::~MockCallbacks()
142 {
143 if (GetEngineFlag() == 1) {
144 DumpFileUtil::CloseDumpFile(&dumpDupOut_);
145 }
146 }
147
OnStatusUpdate(IOperation operation)148 void MockCallbacks::OnStatusUpdate(IOperation operation)
149 {
150 AUDIO_INFO_LOG("DupStream %{public}u recv operation: %{public}d", streamIndex_, operation);
151 }
152
OnWriteData(size_t length)153 int32_t MockCallbacks::OnWriteData(size_t length)
154 {
155 Trace trace("DupStream::OnWriteData length " + std::to_string(length));
156 return SUCCESS;
157 }
158
OnWriteData(int8_t * inputData,size_t requestDataLen)159 int32_t MockCallbacks::OnWriteData(int8_t *inputData, size_t requestDataLen)
160 {
161 Trace trace("DupStream::OnWriteData length " + std::to_string(requestDataLen));
162 if (GetEngineFlag() == 1 && dupRingBuffer_ != nullptr) {
163 OptResult result = dupRingBuffer_->GetReadableSize();
164 CHECK_AND_RETURN_RET_LOG(result.ret == OPERATION_SUCCESS, ERROR,
165 "dupBuffer get readable size failed, size is:%{public}zu", result.size);
166 CHECK_AND_RETURN_RET_LOG((result.size != 0) && (result.size >= requestDataLen), ERROR,
167 "Readable size is invaild, result.size:%{public}zu, requstDataLen:%{public}zu",
168 result.size, requestDataLen);
169 AUDIO_DEBUG_LOG("requstDataLen is:%{public}zu readSize is:%{public}zu", requestDataLen, result.size);
170 result = dupRingBuffer_->Dequeue({reinterpret_cast<uint8_t *>(inputData), requestDataLen});
171 CHECK_AND_RETURN_RET_LOG(result.ret == OPERATION_SUCCESS, ERROR, "dupBuffer dequeue failed!");\
172 DumpFileUtil::WriteDumpFile(dumpDupOut_, static_cast<void *>(inputData), requestDataLen);
173 }
174 return SUCCESS;
175 }
176
GetAvailableSize(size_t & length)177 int32_t MockCallbacks::GetAvailableSize(size_t &length)
178 {
179 return ERR_NOT_SUPPORTED;
180 }
181
GetDupRingBuffer()182 std::unique_ptr<AudioRingCache>& MockCallbacks::GetDupRingBuffer()
183 {
184 return dupRingBuffer_;
185 }
186
ShouldInnerCap(int32_t innerCapId)187 bool AudioEndpointInner::ShouldInnerCap(int32_t innerCapId)
188 {
189 bool shouldBecapped = false;
190 std::lock_guard<std::mutex> lock(listLock_);
191 for (uint32_t i = 0; i < processList_.size(); i++) {
192 if (processList_[i]->GetInnerCapState(innerCapId)) {
193 shouldBecapped = true;
194 break;
195 }
196 }
197 AUDIO_INFO_LOG("find endpoint inner-cap state: %{public}s", shouldBecapped ? "true" : "false");
198 return shouldBecapped;
199 }
200
GetInnerCapConfig()201 AudioProcessConfig AudioEndpointInner::GetInnerCapConfig()
202 {
203 AudioProcessConfig processConfig;
204
205 processConfig.appInfo.appPid = static_cast<int32_t>(getpid());
206 processConfig.appInfo.appUid = static_cast<int32_t>(getuid());
207
208 processConfig.streamInfo = dstStreamInfo_;
209
210 processConfig.audioMode = AUDIO_MODE_PLAYBACK;
211
212 // processConfig.rendererInfo ?
213
214 processConfig.streamType = STREAM_MUSIC;
215
216 return processConfig;
217 }
218
InitDupStream(int32_t innerCapId)219 int32_t AudioEndpointInner::InitDupStream(int32_t innerCapId)
220 {
221 std::lock_guard<std::mutex> lock(dupMutex_);
222 bool hasEnabled = (fastCaptureInfos_.count(innerCapId) && fastCaptureInfos_[innerCapId].isInnerCapEnabled);
223 CHECK_AND_RETURN_RET_LOG((hasEnabled == false), SUCCESS, "already enabled");
224
225 AudioProcessConfig processConfig = GetInnerCapConfig();
226 processConfig.innerCapId = innerCapId;
227 auto &captureInfo = fastCaptureInfos_[innerCapId];
228 int32_t ret = IStreamManager::GetDupPlaybackManager().CreateRender(processConfig, captureInfo.dupStream);
229 CHECK_AND_RETURN_RET_LOG(ret == SUCCESS && captureInfo.dupStream != nullptr,
230 ERR_OPERATION_FAILED, "Failed: %{public}d", ret);
231 uint32_t dupStreamIndex = captureInfo.dupStream->GetStreamIndex();
232
233 innerCapIdToDupStreamCallbackMap_[innerCapId] = std::make_shared<MockCallbacks>(dupStreamIndex);
234 captureInfo.dupStream->RegisterStatusCallback(innerCapIdToDupStreamCallbackMap_[innerCapId]);
235 captureInfo.dupStream->RegisterWriteCallback(innerCapIdToDupStreamCallbackMap_[innerCapId]);
236
237 int32_t engineFlag = GetEngineFlag();
238 if (engineFlag == 1) {
239 ret = InitDupBuffer(processConfig, innerCapId, dupStreamIndex); // buffer init
240 CHECK_AND_RETURN_RET_LOG(ret == SUCCESS, ERROR, "InitProAudioDupBuffer failed");
241 }
242
243 // eg: /data/local/tmp/LocalDevice6_0_c2s_dup_48000_2_1.pcm
244 AudioStreamInfo tempInfo = processConfig.streamInfo;
245 dupDumpName_ = GetEndpointName() + "_c2s_dup_" + std::to_string(tempInfo.samplingRate) + "_" +
246 std::to_string(tempInfo.channels) + "_" + std::to_string(tempInfo.format) + ".pcm";
247 DumpFileUtil::OpenDumpFile(DumpFileUtil::DUMP_SERVER_PARA, dupDumpName_, &dumpC2SDup_);
248
249 AUDIO_INFO_LOG("Dup Renderer %{public}d with Endpoint status: %{public}s", dupStreamIndex,
250 GetStatusStr(endpointStatus_).c_str());
251 CHECK_AND_RETURN_RET_LOG(endpointStatus_ != INVALID, ERR_ILLEGAL_STATE, "Endpoint is invalid!");
252
253 // buffer init
254 dupBufferSize_ = dstSpanSizeInframe_ * dstByteSizePerFrame_; // each
255 CHECK_AND_RETURN_RET_LOG(dstAudioBuffer_ != nullptr, ERR_OPERATION_FAILED, "DstAudioBuffer is nullptr!");
256 CHECK_AND_RETURN_RET_LOG(dupBufferSize_ < dstAudioBuffer_->GetDataSize(), ERR_OPERATION_FAILED, "Init buffer fail");
257 dupBuffer_ = std::make_unique<uint8_t []>(dupBufferSize_);
258 ret = memset_s(reinterpret_cast<void *>(dupBuffer_.get()), dupBufferSize_, 0, dupBufferSize_);
259 if (ret != EOK) {
260 AUDIO_WARNING_LOG("memset buffer fail, ret %{public}d", ret);
261 }
262
263 if (endpointStatus_ == RUNNING || (endpointStatus_ == IDEL && isDeviceRunningInIdel_)) {
264 int32_t audioId = deviceInfo_.deviceId_;
265 AUDIO_INFO_LOG("Endpoint %{public}d is already running, let's start the dup stream", audioId);
266 captureInfo.dupStream->Start();
267 }
268 captureInfo.isInnerCapEnabled = true;
269 float clientVolume = dstAudioBuffer_->GetStreamVolume();
270 float duckFactor = dstAudioBuffer_->GetDuckFactor();
271 if (engineFlag == 1 && AudioVolume::GetInstance() != nullptr) {
272 AudioVolume::GetInstance()->SetStreamVolume(dupStreamIndex, clientVolume);
273 AudioVolume::GetInstance()->SetStreamVolumeDuckFactor(dupStreamIndex, duckFactor);
274 }
275
276 return SUCCESS;
277 }
278
InitDupBuffer(AudioProcessConfig processConfig,int32_t innerCapId,uint32_t dupStreamIndex)279 int32_t AudioEndpointInner::InitDupBuffer(AudioProcessConfig processConfig, int32_t innerCapId,
280 uint32_t dupStreamIndex)
281 {
282 int32_t ret = CreateDupBufferInner(innerCapId);
283
284 dumpDupInFileName_ = std::to_string(dupStreamIndex) + "_endpoint_dup_in_" + ".pcm";
285 DumpFileUtil::OpenDumpFile(DumpFileUtil::DUMP_SERVER_PARA, dumpDupInFileName_, &dumpDupIn_);
286 CHECK_AND_RETURN_RET_LOG(ret == SUCCESS, ERROR, "Config dup buffer failed");
287
288 bool isSystemApp = CheckoutSystemAppUtil::CheckoutSystemApp(processConfig.appInfo.appUid);
289 if (AudioVolume::GetInstance() != nullptr) {
290 StreamVolumeParams streamVolumeParams = { dupStreamIndex, processConfig.streamType,
291 processConfig.rendererInfo.streamUsage, processConfig.appInfo.appUid, processConfig.appInfo.appPid,
292 isSystemApp, processConfig.rendererInfo.volumeMode, processConfig.rendererInfo.isVirtualKeyboard };
293 AudioVolume::GetInstance()->AddStreamVolume(streamVolumeParams);
294 }
295
296 return SUCCESS;
297 }
298
EnableFastInnerCap(int32_t innerCapId)299 int32_t AudioEndpointInner::EnableFastInnerCap(int32_t innerCapId)
300 {
301 CHECK_AND_RETURN_RET_LOG(deviceInfo_.deviceRole_ == OUTPUT_DEVICE, ERR_INVALID_OPERATION, "Not output device!");
302 int32_t ret = InitDupStream(innerCapId);
303 CHECK_AND_RETURN_RET_LOG(ret == SUCCESS, ERR_OPERATION_FAILED, "Init dup stream failed!");
304 return SUCCESS;
305 }
306
DisableFastInnerCap()307 int32_t AudioEndpointInner::DisableFastInnerCap()
308 {
309 if (deviceInfo_.deviceRole_ != OUTPUT_DEVICE) {
310 return SUCCESS;
311 }
312 std::lock_guard<std::mutex> lock(dupMutex_);
313 for (auto &capInfo : fastCaptureInfos_) {
314 HandleDisableFastCap(capInfo.second);
315 }
316 fastCaptureInfos_.clear();
317 return SUCCESS;
318 }
319
DisableFastInnerCap(int32_t innerCapId)320 int32_t AudioEndpointInner::DisableFastInnerCap(int32_t innerCapId)
321 {
322 if (deviceInfo_.deviceRole_ != OUTPUT_DEVICE) {
323 return SUCCESS;
324 }
325 std::lock_guard<std::mutex> lock(dupMutex_);
326 if (!fastCaptureInfos_.count(innerCapId)) {
327 AUDIO_INFO_LOG("InnerCap is already disabled.");
328 return SUCCESS;
329 }
330 HandleDisableFastCap(fastCaptureInfos_[innerCapId]);
331 fastCaptureInfos_.erase(innerCapId);
332 return SUCCESS;
333 }
334
HandleDisableFastCap(CaptureInfo & captureInfo)335 int32_t AudioEndpointInner::HandleDisableFastCap(CaptureInfo &captureInfo)
336 {
337 if (!captureInfo.isInnerCapEnabled) {
338 captureInfo.dupStream = nullptr;
339 AUDIO_INFO_LOG("InnerCap is already disabled.");
340 return SUCCESS;
341 }
342 if (captureInfo.dupStream == nullptr) {
343 captureInfo.isInnerCapEnabled = false;
344 AUDIO_INFO_LOG("dupStream is nullptr.");
345 return SUCCESS;
346 }
347 captureInfo.isInnerCapEnabled = false;
348 AUDIO_INFO_LOG("Disable dup renderer %{public}d with Endpoint status: %{public}s",
349 captureInfo.dupStream->GetStreamIndex(), GetStatusStr(endpointStatus_).c_str());
350
351 if (GetEngineFlag() == 1) {
352 uint32_t dupStreamIndex = captureInfo.dupStream->GetStreamIndex();
353 if (AudioVolume::GetInstance() != nullptr) {
354 AudioVolume::GetInstance()->RemoveStreamVolume(dupStreamIndex);
355 }
356 }
357 IStreamManager::GetDupPlaybackManager().ReleaseRender(captureInfo.dupStream->GetStreamIndex());
358 captureInfo.dupStream = nullptr;
359 return SUCCESS;
360 }
361
GetStatus()362 AudioEndpoint::EndpointStatus AudioEndpointInner::GetStatus()
363 {
364 AUDIO_INFO_LOG("AudioEndpoint get status:%{public}s", GetStatusStr(endpointStatus_).c_str());
365 return endpointStatus_.load();
366 }
367
Release()368 void AudioEndpointInner::Release()
369 {
370 // Wait for thread end and then clear other data to avoid using any cleared data in thread.
371 AUDIO_INFO_LOG("Release enter.");
372 if (!isInited_.load()) {
373 AUDIO_WARNING_LOG("already released");
374 return;
375 }
376
377 isInited_.store(false);
378 workThreadCV_.notify_all();
379 if (endpointWorkThread_.joinable()) {
380 AUDIO_DEBUG_LOG("AudioEndpoint join work thread start");
381 endpointWorkThread_.join();
382 AUDIO_DEBUG_LOG("AudioEndpoint join work thread end");
383 }
384 AudioPerformanceMonitor::GetInstance().DeleteOvertimeMonitor(ADAPTER_TYPE_FAST);
385
386 stopUpdateThread_.store(true);
387 updateThreadCV_.notify_all();
388 if (updatePosTimeThread_.joinable()) {
389 AUDIO_DEBUG_LOG("AudioEndpoint join update thread start");
390 updatePosTimeThread_.join();
391 AUDIO_DEBUG_LOG("AudioEndpoint join update thread end");
392 }
393
394 std::shared_ptr<IAudioRenderSink> sink = HdiAdapterManager::GetInstance().GetRenderSink(fastRenderId_);
395 std::shared_ptr<IAudioCaptureSource> source = HdiAdapterManager::GetInstance().GetCaptureSource(fastCaptureId_);
396 if (sink != nullptr) {
397 sink->DeInit();
398 }
399 HdiAdapterManager::GetInstance().ReleaseId(fastRenderId_);
400
401 if (source != nullptr) {
402 source->DeInit();
403 }
404 HdiAdapterManager::GetInstance().ReleaseId(fastCaptureId_);
405
406 endpointStatus_.store(INVALID);
407
408 if (dstAudioBuffer_ != nullptr) {
409 AUDIO_INFO_LOG("Set device buffer null");
410 dstAudioBuffer_ = nullptr;
411 }
412
413 if (deviceInfo_.deviceRole_ == OUTPUT_DEVICE) {
414 DisableFastInnerCap();
415 }
416
417 DumpFileUtil::CloseDumpFile(&dumpHdi_);
418 }
419
~AudioEndpointInner()420 AudioEndpointInner::~AudioEndpointInner()
421 {
422 if (isInited_.load()) {
423 AudioEndpointInner::Release();
424 }
425 AUDIO_INFO_LOG("~AudioEndpoint()");
426 }
427
ConfigInputPoint(const AudioDeviceDescriptor & deviceInfo)428 bool AudioEndpointInner::ConfigInputPoint(const AudioDeviceDescriptor &deviceInfo)
429 {
430 AUDIO_INFO_LOG("ConfigInputPoint enter.");
431 IAudioSourceAttr attr = {};
432 attr.sampleRate = dstStreamInfo_.samplingRate;
433 attr.channel = dstStreamInfo_.channels;
434 attr.format = dstStreamInfo_.format;
435 attr.deviceNetworkId = deviceInfo.networkId_.c_str();
436 attr.deviceType = deviceInfo.deviceType_;
437 attr.audioStreamFlag = endpointType_ == TYPE_VOIP_MMAP ? AUDIO_FLAG_VOIP_FAST : AUDIO_FLAG_MMAP;
438
439 std::shared_ptr<IAudioCaptureSource> source = GetFastSource(deviceInfo.networkId_, endpointType_, attr);
440
441 if (deviceInfo.networkId_ == LOCAL_NETWORK_ID) {
442 attr.adapterName = "primary";
443 } else {
444 #ifdef DAUDIO_ENABLE
445 attr.adapterName = "remote";
446 #endif
447 }
448 if (source == nullptr) {
449 AUDIO_ERR_LOG("ConfigInputPoint GetInstance failed.");
450 HdiAdapterManager::GetInstance().ReleaseId(fastCaptureId_);
451 return false;
452 }
453
454 if (!source->IsInited()) {
455 AUDIO_INFO_LOG("Source is not inited");
456 int32_t err = source->Init(attr);
457 if (err != SUCCESS || !source->IsInited()) {
458 AUDIO_ERR_LOG("init remote fast fail, err %{public}d.", err);
459 HdiAdapterManager::GetInstance().ReleaseId(fastCaptureId_);
460 return false;
461 }
462 }
463
464 if (PrepareDeviceBuffer(deviceInfo) != SUCCESS) {
465 source->DeInit();
466 HdiAdapterManager::GetInstance().ReleaseId(fastCaptureId_);
467 return false;
468 }
469
470 bool ret = writeTimeModel_.ConfigSampleRate(dstStreamInfo_.samplingRate);
471 CHECK_AND_RETURN_RET_LOG(ret != false, false, "Config LinearPosTimeModel failed.");
472
473 endpointStatus_ = UNLINKED;
474 isInited_.store(true);
475 endpointWorkThread_ = std::thread([this] { this->RecordEndpointWorkLoopFuc(); });
476 pthread_setname_np(endpointWorkThread_.native_handle(), "OS_AudioEpLoop");
477
478 updatePosTimeThread_ = std::thread([this] { this->AsyncGetPosTime(); });
479 pthread_setname_np(updatePosTimeThread_.native_handle(), "OS_AudioEpUpdate");
480
481 // eg: input_endpoint_hdi_audio_8_0_20240527202236189_48000_2_1.pcm
482 dumpHdiName_ = "input_endpoint_hdi_audio_" + std::to_string(attr.deviceType) + '_' +
483 std::to_string(endpointType_) + '_' + GetTime() + '_' + std::to_string(attr.sampleRate) + "_" +
484 std::to_string(attr.channel) + "_" + std::to_string(attr.format) + ".pcm";
485 DumpFileUtil::OpenDumpFile(DumpFileUtil::DUMP_SERVER_PARA, dumpHdiName_, &dumpHdi_);
486 return true;
487 }
488
SwitchSource(uint32_t & id,HdiIdType type,const std::string & info)489 static std::shared_ptr<IAudioCaptureSource> SwitchSource(uint32_t &id, HdiIdType type, const std::string &info)
490 {
491 if (id != HDI_INVALID_ID) {
492 HdiAdapterManager::GetInstance().ReleaseId(id);
493 }
494 id = HdiAdapterManager::GetInstance().GetId(HDI_ID_BASE_CAPTURE, type, info, true);
495 return HdiAdapterManager::GetInstance().GetCaptureSource(id, true);
496 }
497
GetFastSource(const std::string & networkId,EndpointType type,IAudioSourceAttr & attr)498 std::shared_ptr<IAudioCaptureSource> AudioEndpointInner::GetFastSource(const std::string &networkId, EndpointType type,
499 IAudioSourceAttr &attr)
500 {
501 AUDIO_INFO_LOG("Network id %{public}s, endpoint type %{public}d", networkId.c_str(), type);
502 if (networkId != LOCAL_NETWORK_ID) {
503 attr.adapterName = "remote";
504 #ifdef DAUDIO_ENABLE
505 fastSourceType_ = type == AudioEndpoint::TYPE_MMAP ? FAST_SOURCE_TYPE_REMOTE : FAST_SOURCE_TYPE_VOIP;
506 // Distributed only requires a singleton because there won't be both voip and regular fast simultaneously
507 return SwitchSource(fastCaptureId_, HDI_ID_TYPE_REMOTE_FAST, networkId);
508 #endif
509 }
510
511 attr.adapterName = "primary";
512 if (type == AudioEndpoint::TYPE_MMAP) {
513 AUDIO_INFO_LOG("Use mmap");
514 fastSourceType_ = FAST_SOURCE_TYPE_NORMAL;
515 return SwitchSource(fastCaptureId_, HDI_ID_TYPE_FAST, HDI_ID_INFO_DEFAULT);
516 } else if (type == AudioEndpoint::TYPE_VOIP_MMAP) {
517 AUDIO_INFO_LOG("Use voip mmap");
518 fastSourceType_ = FAST_SOURCE_TYPE_VOIP;
519 SwitchSource(fastCaptureId_, HDI_ID_TYPE_FAST, HDI_ID_INFO_VOIP);
520 }
521 return nullptr;
522 }
523
StartThread(const IAudioSinkAttr & attr)524 void AudioEndpointInner::StartThread(const IAudioSinkAttr &attr)
525 {
526 endpointStatus_ = UNLINKED;
527 isInited_.store(true);
528 endpointWorkThread_ = std::thread([this] { this->EndpointWorkLoopFuc(); });
529 pthread_setname_np(endpointWorkThread_.native_handle(), "OS_AudioEpLoop");
530
531 updatePosTimeThread_ = std::thread([this] { this->AsyncGetPosTime(); });
532 pthread_setname_np(updatePosTimeThread_.native_handle(), "OS_AudioEpUpdate");
533
534 // eg: endpoint_hdi_audio_8_0_20240527202236189_48000_2_1.pcm
535 dumpHdiName_ = "endpoint_hdi_audio_" + std::to_string(attr.deviceType) + '_' + std::to_string(endpointType_) +
536 '_' + GetTime() + '_' + std::to_string(attr.sampleRate) + "_" +
537 std::to_string(attr.channel) + "_" + std::to_string(attr.format) + ".pcm";
538 DumpFileUtil::OpenDumpFile(DumpFileUtil::DUMP_SERVER_PARA, dumpHdiName_, &dumpHdi_);
539 }
540
Config(const AudioDeviceDescriptor & deviceInfo,AudioStreamInfo & streamInfo)541 bool AudioEndpointInner::Config(const AudioDeviceDescriptor &deviceInfo, AudioStreamInfo &streamInfo)
542 {
543 AUDIO_INFO_LOG("Role %{public}d, format %{public}d", deviceInfo.deviceRole_,
544 streamInfo.format);
545 deviceInfo_ = deviceInfo;
546 dstStreamInfo_ = streamInfo;
547
548 if (deviceInfo.deviceRole_ == INPUT_DEVICE) {
549 return ConfigInputPoint(deviceInfo);
550 }
551
552 std::shared_ptr<IAudioRenderSink> sink = GetFastSink(deviceInfo, endpointType_);
553 if (sink == nullptr) {
554 AUDIO_ERR_LOG("Get fastSink instance failed");
555 HdiAdapterManager::GetInstance().ReleaseId(fastRenderId_);
556 return false;
557 }
558
559 IAudioSinkAttr attr = {};
560 InitSinkAttr(attr, deviceInfo);
561
562 if (!sink->IsInited()) {
563 AUDIO_INFO_LOG("Sink is not inited");
564 sink->Init(attr);
565 if (!sink->IsInited()) {
566 HdiAdapterManager::GetInstance().ReleaseId(fastRenderId_);
567 return false;
568 }
569 }
570
571 if (PrepareDeviceBuffer(deviceInfo) != SUCCESS) {
572 sink->DeInit();
573 HdiAdapterManager::GetInstance().ReleaseId(fastRenderId_);
574 return false;
575 }
576
577 Volume vol = {true, 1.0f, 0};
578 DeviceType deviceType = PolicyHandler::GetInstance().GetActiveOutPutDevice();
579 if ((clientConfig_.streamType == STREAM_VOICE_COMMUNICATION || clientConfig_.streamType == STREAM_VOICE_CALL) &&
580 endpointType_ == TYPE_VOIP_MMAP) {
581 PolicyHandler::GetInstance().GetSharedVolume(STREAM_VOICE_CALL, deviceType, vol);
582 sink->SetVolume(vol.volumeFloat, vol.volumeFloat);
583 AUDIO_INFO_LOG("Init Volume %{public}f with Device %{public}d", vol.volumeFloat, deviceType);
584 } else {
585 sink->SetVolume(1.0f, 1.0f);
586 AUDIO_INFO_LOG("Init Volume 1.0 with Device %{public}d", deviceType);
587 }
588
589 bool ret = readTimeModel_.ConfigSampleRate(dstStreamInfo_.samplingRate);
590 CHECK_AND_RETURN_RET_LOG(ret != false, false, "Config LinearPosTimeModel failed.");
591 StartThread(attr);
592 return true;
593 }
594
SwitchSink(uint32_t & id,HdiIdType type,const std::string & info)595 static std::shared_ptr<IAudioRenderSink> SwitchSink(uint32_t &id, HdiIdType type, const std::string &info)
596 {
597 AUDIO_INFO_LOG("Id: %{public}u", id);
598 if (id != HDI_INVALID_ID) {
599 HdiAdapterManager::GetInstance().ReleaseId(id);
600 }
601 id = HdiAdapterManager::GetInstance().GetId(HDI_ID_BASE_RENDER, type, info, true);
602 AUDIO_INFO_LOG("Id after process: %{public}u", id);
603 return HdiAdapterManager::GetInstance().GetRenderSink(id, true);
604 }
605
GetFastSink(const AudioDeviceDescriptor & deviceInfo,EndpointType type)606 std::shared_ptr<IAudioRenderSink> AudioEndpointInner::GetFastSink(const AudioDeviceDescriptor &deviceInfo,
607 EndpointType type)
608 {
609 AUDIO_INFO_LOG("Network id %{public}s, endpoint type %{public}d", deviceInfo.networkId_.c_str(), type);
610 if (deviceInfo.networkId_ != LOCAL_NETWORK_ID) {
611 #ifdef DAUDIO_ENABLE
612 fastSinkType_ = type == AudioEndpoint::TYPE_MMAP ? FAST_SINK_TYPE_REMOTE : FAST_SINK_TYPE_VOIP;
613 // Distributed only requires a singleton because there won't be both voip and regular fast simultaneously
614 return SwitchSink(fastRenderId_, HDI_ID_TYPE_REMOTE_FAST, deviceInfo.networkId_);
615 #endif
616 }
617
618 if (deviceInfo.deviceType_ == DEVICE_TYPE_BLUETOOTH_A2DP && deviceInfo.a2dpOffloadFlag_ != A2DP_OFFLOAD) {
619 fastSinkType_ = FAST_SINK_TYPE_BLUETOOTH;
620 return SwitchSink(fastRenderId_, HDI_ID_TYPE_BLUETOOTH, HDI_ID_INFO_MMAP);
621 }
622
623 if (type == AudioEndpoint::TYPE_MMAP) {
624 fastSinkType_ = FAST_SINK_TYPE_NORMAL;
625 return SwitchSink(fastRenderId_, HDI_ID_TYPE_FAST, HDI_ID_INFO_DEFAULT);
626 } else if (type == AudioEndpoint::TYPE_VOIP_MMAP) {
627 fastSinkType_ = FAST_SINK_TYPE_VOIP;
628 return SwitchSink(fastRenderId_, HDI_ID_TYPE_FAST, HDI_ID_INFO_VOIP);
629 }
630 return nullptr;
631 }
632
InitSinkAttr(IAudioSinkAttr & attr,const AudioDeviceDescriptor & deviceInfo)633 void AudioEndpointInner::InitSinkAttr(IAudioSinkAttr &attr, const AudioDeviceDescriptor &deviceInfo)
634 {
635 bool isDefaultAdapterEnable = AudioService::GetInstance()->GetDefaultAdapterEnable();
636 if (isDefaultAdapterEnable) {
637 attr.adapterName = "dp";
638 } else {
639 attr.adapterName = deviceInfo.networkId_ == LOCAL_NETWORK_ID ? "primary" : "remote";
640 }
641 attr.sampleRate = dstStreamInfo_.samplingRate; // 48000hz
642 attr.channel = dstStreamInfo_.channels; // STEREO = 2
643 attr.format = dstStreamInfo_.format; // SAMPLE_S16LE = 1
644 attr.deviceNetworkId = deviceInfo.networkId_.c_str();
645 attr.deviceType = static_cast<int32_t>(deviceInfo.deviceType_);
646 attr.audioStreamFlag = endpointType_ == TYPE_VOIP_MMAP ? AUDIO_FLAG_VOIP_FAST : AUDIO_FLAG_MMAP;
647 }
648
GetAdapterBufferInfo(const AudioDeviceDescriptor & deviceInfo)649 int32_t AudioEndpointInner::GetAdapterBufferInfo(const AudioDeviceDescriptor &deviceInfo)
650 {
651 int32_t ret = 0;
652 AUDIO_INFO_LOG("GetAdapterBufferInfo enter, deviceRole %{public}d.", deviceInfo.deviceRole_);
653 if (deviceInfo.deviceRole_ == INPUT_DEVICE) {
654 std::shared_ptr<IAudioCaptureSource> source = HdiAdapterManager::GetInstance().GetCaptureSource(fastCaptureId_);
655 CHECK_AND_RETURN_RET_LOG(source != nullptr, ERR_INVALID_HANDLE, "fast source is null.");
656 ret = source->GetMmapBufferInfo(dstBufferFd_, dstTotalSizeInframe_, dstSpanSizeInframe_,
657 dstByteSizePerFrame_, syncInfoSize_);
658 } else {
659 std::shared_ptr<IAudioRenderSink> sink = HdiAdapterManager::GetInstance().GetRenderSink(fastRenderId_);
660 CHECK_AND_RETURN_RET_LOG(sink != nullptr, ERR_INVALID_HANDLE, "fast sink is null.");
661 ret = sink->GetMmapBufferInfo(dstBufferFd_, dstTotalSizeInframe_, dstSpanSizeInframe_,
662 dstByteSizePerFrame_, syncInfoSize_);
663 }
664
665 if (ret != SUCCESS || dstBufferFd_ == -1 || dstTotalSizeInframe_ == 0 || dstSpanSizeInframe_ == 0 ||
666 dstByteSizePerFrame_ == 0) {
667 AUDIO_ERR_LOG("get mmap buffer info fail, ret %{public}d, dstBufferFd %{public}d, \
668 dstTotalSizeInframe %{public}d, dstSpanSizeInframe %{public}d, dstByteSizePerFrame %{public}d.",
669 ret, dstBufferFd_, dstTotalSizeInframe_, dstSpanSizeInframe_, dstByteSizePerFrame_);
670 return ERR_ILLEGAL_STATE;
671 }
672 AUDIO_INFO_LOG("mmap buffer info: dstTotalSizeInframe %{public}d, dstSpanSizeInframe %{public}d,"
673 "dstByteSizePerFrame %{public}d.", dstTotalSizeInframe_, dstSpanSizeInframe_, dstByteSizePerFrame_);
674 return SUCCESS;
675 }
676
PrepareDeviceBuffer(const AudioDeviceDescriptor & deviceInfo)677 int32_t AudioEndpointInner::PrepareDeviceBuffer(const AudioDeviceDescriptor &deviceInfo)
678 {
679 AUDIO_INFO_LOG("enter, deviceRole %{public}d.", deviceInfo.deviceRole_);
680 if (dstAudioBuffer_ != nullptr) {
681 AUDIO_INFO_LOG("endpoint buffer is preapred, fd:%{public}d", dstBufferFd_);
682 return SUCCESS;
683 }
684
685 int32_t ret = GetAdapterBufferInfo(deviceInfo);
686 CHECK_AND_RETURN_RET_LOG(ret == SUCCESS, ERR_OPERATION_FAILED,
687 "get adapter buffer Info fail, ret %{public}d.", ret);
688
689 // spanDuration_ may be less than the correct time of dstSpanSizeInframe_.
690 spanDuration_ = static_cast<int64_t>(dstSpanSizeInframe_) * AUDIO_NS_PER_SECOND /
691 static_cast<int64_t>(dstStreamInfo_.samplingRate);
692 int64_t temp = spanDuration_ / 5 * 3; // 3/5 spanDuration
693 int64_t setTime = -1;
694 int64_t maxSetTime = (static_cast<int64_t>(dstTotalSizeInframe_ - dstSpanSizeInframe_)) *
695 AUDIO_NS_PER_SECOND / static_cast<int64_t>(dstStreamInfo_.samplingRate);
696 GetSysPara("persist.multimedia.serveraheadreadtime", setTime);
697 temp = setTime > 0 && setTime < maxSetTime ? setTime : temp;
698 serverAheadReadTime_ = temp < ONE_MILLISECOND_DURATION ? ONE_MILLISECOND_DURATION : temp; // at least 1ms ahead.
699 AUDIO_INFO_LOG("spanDuration %{public}" PRIu64" ns, serverAheadReadTime %{public}" PRIu64" ns.",
700 spanDuration_, serverAheadReadTime_);
701
702 CHECK_AND_RETURN_RET_LOG(spanDuration_ > 0 && spanDuration_ < MAX_SPAN_DURATION_NS,
703 ERR_INVALID_PARAM, "mmap span info error, spanDuration %{public}" PRIu64".", spanDuration_);
704 AudioBufferHolder holder = syncInfoSize_ != 0 ? AUDIO_SERVER_ONLY_WITH_SYNC : AUDIO_SERVER_ONLY;
705 dstAudioBuffer_ = OHAudioBuffer::CreateFromRemote(dstTotalSizeInframe_, dstSpanSizeInframe_, dstByteSizePerFrame_,
706 holder, dstBufferFd_, INVALID_BUFFER_FD);
707 CHECK_AND_RETURN_RET_LOG(dstAudioBuffer_ != nullptr && dstAudioBuffer_->GetBufferHolder() == holder,
708 ERR_ILLEGAL_STATE, "create buffer from remote fail.");
709
710 if (dstAudioBuffer_ == nullptr || dstAudioBuffer_->GetStreamStatus() == nullptr) {
711 AUDIO_ERR_LOG("The stream status is null!");
712 return ERR_INVALID_PARAM;
713 }
714
715 dstAudioBuffer_->GetStreamStatus()->store(StreamStatus::STREAM_IDEL);
716
717 // clear data buffer
718 ret = memset_s(dstAudioBuffer_->GetDataBase(), dstAudioBuffer_->GetDataSize(), 0, dstAudioBuffer_->GetDataSize());
719 if (ret != EOK) {
720 AUDIO_WARNING_LOG("memset buffer fail, ret %{public}d, fd %{public}d.", ret, dstBufferFd_);
721 }
722 InitAudiobuffer(true);
723
724 AUDIO_DEBUG_LOG("end, fd %{public}d.", dstBufferFd_);
725 return SUCCESS;
726 }
727
InitAudiobuffer(bool resetReadWritePos)728 void AudioEndpointInner::InitAudiobuffer(bool resetReadWritePos)
729 {
730 CHECK_AND_RETURN_LOG((dstAudioBuffer_ != nullptr), "dst audio buffer is null.");
731 if (resetReadWritePos) {
732 dstAudioBuffer_->ResetCurReadWritePos(0, 0, false);
733 }
734
735 uint32_t spanCount = dstAudioBuffer_->GetSpanCount();
736 for (uint32_t i = 0; i < spanCount; i++) {
737 SpanInfo *spanInfo = dstAudioBuffer_->GetSpanInfoByIndex(i);
738 CHECK_AND_RETURN_LOG(spanInfo != nullptr, "InitAudiobuffer failed.");
739 if (deviceInfo_.deviceRole_ == INPUT_DEVICE) {
740 spanInfo->spanStatus = SPAN_WRITE_DONE;
741 } else {
742 spanInfo->spanStatus = SPAN_READ_DONE;
743 }
744 spanInfo->offsetInFrame = 0;
745
746 spanInfo->readStartTime = 0;
747 spanInfo->readDoneTime = 0;
748
749 spanInfo->writeStartTime = 0;
750 spanInfo->writeDoneTime = 0;
751
752 spanInfo->volumeStart = 1 << VOLUME_SHIFT_NUMBER; // 65536 for initialize
753 spanInfo->volumeEnd = 1 << VOLUME_SHIFT_NUMBER; // 65536 for initialize
754 spanInfo->isMute = false;
755 }
756 return;
757 }
758
GetPreferBufferInfo(uint32_t & totalSizeInframe,uint32_t & spanSizeInframe)759 int32_t AudioEndpointInner::GetPreferBufferInfo(uint32_t &totalSizeInframe, uint32_t &spanSizeInframe)
760 {
761 totalSizeInframe = dstTotalSizeInframe_;
762 spanSizeInframe = dstSpanSizeInframe_;
763 return SUCCESS;
764 }
765
IsAnyProcessRunning()766 bool AudioEndpointInner::IsAnyProcessRunning()
767 {
768 std::lock_guard<std::mutex> lock(listLock_);
769 return IsAnyProcessRunningInner();
770 }
771
772 // Should be called with AudioEndpointInner::listLock_ locked
IsAnyProcessRunningInner()773 bool AudioEndpointInner::IsAnyProcessRunningInner()
774 {
775 bool isRunning = false;
776 for (size_t i = 0; i < processBufferList_.size(); i++) {
777 if (processBufferList_[i]->GetStreamStatus() &&
778 processBufferList_[i]->GetStreamStatus()->load() == STREAM_RUNNING) {
779 isRunning = true;
780 break;
781 }
782 }
783 return isRunning;
784 }
785
RecordReSyncPosition()786 void AudioEndpointInner::RecordReSyncPosition()
787 {
788 AUDIO_INFO_LOG("RecordReSyncPosition enter.");
789 uint64_t curHdiWritePos = 0;
790 int64_t writeTime = 0;
791 CHECK_AND_RETURN_LOG(GetDeviceHandleInfo(curHdiWritePos, writeTime), "get device handle info fail.");
792 AUDIO_DEBUG_LOG("get capturer info, curHdiWritePos %{public}" PRIu64", writeTime %{public}" PRId64".",
793 curHdiWritePos, writeTime);
794 int64_t temp = ClockTime::GetCurNano() - writeTime;
795 if (temp > spanDuration_) {
796 AUDIO_WARNING_LOG("GetDeviceHandleInfo cost long time %{public}" PRIu64".", temp);
797 }
798
799 writeTimeModel_.ResetFrameStamp(curHdiWritePos, writeTime);
800 uint64_t nextDstReadPos = curHdiWritePos;
801 uint64_t nextDstWritePos = curHdiWritePos;
802 InitAudiobuffer(false);
803 int32_t ret = dstAudioBuffer_->ResetCurReadWritePos(nextDstReadPos, nextDstWritePos, false);
804 CHECK_AND_RETURN_LOG(ret == SUCCESS, "ResetCurReadWritePos failed.");
805
806 SpanInfo *nextReadSapn = dstAudioBuffer_->GetSpanInfo(nextDstReadPos);
807 CHECK_AND_RETURN_LOG(nextReadSapn != nullptr, "GetSpanInfo failed.");
808 nextReadSapn->offsetInFrame = nextDstReadPos;
809 nextReadSapn->spanStatus = SpanStatus::SPAN_WRITE_DONE;
810 }
811
ReSyncPosition()812 void AudioEndpointInner::ReSyncPosition()
813 {
814 Trace loopTrace("AudioEndpoint::ReSyncPosition");
815 uint64_t curHdiReadPos = 0;
816 int64_t readTime = 0;
817 bool res = GetDeviceHandleInfo(curHdiReadPos, readTime);
818 CHECK_AND_RETURN_LOG(res, "ReSyncPosition call GetDeviceHandleInfo failed.");
819 int64_t curTime = ClockTime::GetCurNano();
820 int64_t temp = curTime - readTime;
821 if (temp > spanDuration_) {
822 AUDIO_ERR_LOG("GetDeviceHandleInfo may cost long time.");
823 }
824
825 readTimeModel_.ResetFrameStamp(curHdiReadPos, readTime);
826 uint64_t nextDstWritePos = curHdiReadPos + dstSpanSizeInframe_;
827 InitAudiobuffer(false);
828 int32_t ret = dstAudioBuffer_->ResetCurReadWritePos(nextDstWritePos, nextDstWritePos, false);
829 CHECK_AND_RETURN_LOG(ret == SUCCESS, "ResetCurReadWritePos failed.");
830
831 SpanInfo *nextWriteSapn = dstAudioBuffer_->GetSpanInfo(nextDstWritePos);
832 CHECK_AND_RETURN_LOG(nextWriteSapn != nullptr, "GetSpanInfo failed.");
833 nextWriteSapn->offsetInFrame = nextDstWritePos;
834 nextWriteSapn->spanStatus = SpanStatus::SPAN_READ_DONE;
835 return;
836 }
837
StartDevice(EndpointStatus preferredState)838 bool AudioEndpointInner::StartDevice(EndpointStatus preferredState)
839 {
840 AUDIO_INFO_LOG("StartDevice enter.");
841 // how to modify the status while unlinked and started?
842
843 AudioXCollie audioXCollie("AudioEndpointInner::StartDevice", START_DEVICE_TIMEOUT,
844 [](void *) {
845 AUDIO_ERR_LOG("[xcollie] StartDevice timeout");
846 }, nullptr, AUDIO_XCOLLIE_FLAG_LOG | AUDIO_XCOLLIE_FLAG_RECOVERY);
847 // startDevice and unlinkprocess may called in concurrency
848 std::unique_lock<std::mutex> listLock(listLock_);
849
850 CHECK_AND_RETURN_RET_LOG(endpointStatus_ == IDEL, false, "Endpoint status is %{public}s",
851 GetStatusStr(endpointStatus_).c_str());
852 endpointStatus_ = STARTING;
853 std::shared_ptr<IAudioRenderSink> sink = HdiAdapterManager::GetInstance().GetRenderSink(fastRenderId_);
854 std::shared_ptr<IAudioCaptureSource> source = HdiAdapterManager::GetInstance().GetCaptureSource(fastCaptureId_);
855 if ((deviceInfo_.deviceRole_ == INPUT_DEVICE && (source == nullptr || source->Start() != SUCCESS)) ||
856 (deviceInfo_.deviceRole_ == OUTPUT_DEVICE && (sink == nullptr || sink->Start() != SUCCESS))) {
857 HandleStartDeviceFailed();
858 return false;
859 }
860 isStarted_ = true;
861 ResetZeroVolumeState();
862
863 Trace trace("AudioEndpointInner::StartDupStream");
864 {
865 std::lock_guard<std::mutex> lock(dupMutex_);
866 for (auto &capture : fastCaptureInfos_) {
867 if (capture.second.isInnerCapEnabled && capture.second.dupStream != nullptr) {
868 capture.second.dupStream->Start();
869 }
870 }
871 }
872
873 std::unique_lock<std::mutex> lock(loopThreadLock_);
874 needReSyncPosition_ = true;
875 endpointStatus_ = IsAnyProcessRunningInner() ? RUNNING : IDEL;
876 if (preferredState != INVALID) {
877 AUDIO_INFO_LOG("Preferred state: %{public}d, current: %{public}d", preferredState, endpointStatus_.load());
878 endpointStatus_ = preferredState;
879 }
880 workThreadCV_.notify_all();
881 AUDIO_DEBUG_LOG("StartDevice out, status is %{public}s", GetStatusStr(endpointStatus_).c_str());
882
883 listLock.unlock();
884 return true;
885 }
886
HandleStartDeviceFailed()887 void AudioEndpointInner::HandleStartDeviceFailed()
888 {
889 AUDIO_ERR_LOG("Start failed for %{public}d, endpoint type %{public}u, process list size: %{public}zu.",
890 deviceInfo_.deviceRole_, endpointType_, processList_.size());
891 isStarted_ = false;
892 if (processList_.size() <= 1) { // The endpoint only has the current stream
893 endpointStatus_ = UNLINKED;
894 } else {
895 endpointStatus_ = IDEL;
896 }
897 workThreadCV_.notify_all();
898 }
899
900 // will not change state to stopped
DelayStopDevice()901 bool AudioEndpointInner::DelayStopDevice()
902 {
903 AUDIO_INFO_LOG("Status:%{public}s", GetStatusStr(endpointStatus_).c_str());
904
905 // Clear data buffer to avoid noise in some case.
906 if (dstAudioBuffer_ != nullptr) {
907 int32_t ret = memset_s(dstAudioBuffer_->GetDataBase(), dstAudioBuffer_->GetDataSize(), 0,
908 dstAudioBuffer_->GetDataSize());
909 if (ret != EOK) {
910 AUDIO_WARNING_LOG("reset buffer fail, ret %{public}d.", ret);
911 }
912 }
913
914 {
915 Trace trace("AudioEndpointInner::StopDupStreamInDelay");
916 std::lock_guard<std::mutex> lock(dupMutex_);
917 for (auto &capture : fastCaptureInfos_) {
918 if (capture.second.isInnerCapEnabled && capture.second.dupStream != nullptr) {
919 capture.second.dupStream->Stop();
920 }
921 }
922 }
923
924 if (deviceInfo_.deviceRole_ == INPUT_DEVICE) {
925 std::shared_ptr<IAudioCaptureSource> source = HdiAdapterManager::GetInstance().GetCaptureSource(fastCaptureId_);
926 CHECK_AND_RETURN_RET_LOG(source != nullptr && source->Stop() == SUCCESS,
927 false, "Source stop failed.");
928 } else {
929 std::shared_ptr<IAudioRenderSink> sink = HdiAdapterManager::GetInstance().GetRenderSink(fastRenderId_);
930 CHECK_AND_RETURN_RET_LOG(endpointStatus_ == IDEL && sink != nullptr && sink->Stop() == SUCCESS,
931 false, "Sink stop failed.");
932 }
933 isStarted_ = false;
934 return true;
935 }
936
StopDevice()937 bool AudioEndpointInner::StopDevice()
938 {
939 DeinitLatencyMeasurement();
940
941 AUDIO_INFO_LOG("StopDevice with status:%{public}s", GetStatusStr(endpointStatus_).c_str());
942 // todo
943 endpointStatus_ = STOPPING;
944 // Clear data buffer to avoid noise in some case.
945 if (dstAudioBuffer_ != nullptr) {
946 int32_t ret = memset_s(dstAudioBuffer_->GetDataBase(), dstAudioBuffer_->GetDataSize(), 0,
947 dstAudioBuffer_->GetDataSize());
948 AUDIO_INFO_LOG("StopDevice clear buffer ret:%{public}d", ret);
949 }
950
951 {
952 Trace trace("AudioEndpointInner::StopDupStream");
953 std::lock_guard<std::mutex> lock(dupMutex_);
954 for (auto &capture : fastCaptureInfos_) {
955 if (capture.second.isInnerCapEnabled && capture.second.dupStream != nullptr) {
956 capture.second.dupStream->Stop();
957 }
958 }
959 }
960
961 if (deviceInfo_.deviceRole_ == INPUT_DEVICE) {
962 std::shared_ptr<IAudioCaptureSource> source = HdiAdapterManager::GetInstance().GetCaptureSource(fastCaptureId_);
963 CHECK_AND_RETURN_RET_LOG(source != nullptr && source->Stop() == SUCCESS,
964 false, "Source stop failed.");
965 } else {
966 std::shared_ptr<IAudioRenderSink> sink = HdiAdapterManager::GetInstance().GetRenderSink(fastRenderId_);
967 CHECK_AND_RETURN_RET_LOG(sink != nullptr && sink->Stop() == SUCCESS, false, "Sink stop failed.");
968 }
969 endpointStatus_ = STOPPED;
970 isStarted_ = false;
971 return true;
972 }
973
OnStart(IAudioProcessStream * processStream)974 int32_t AudioEndpointInner::OnStart(IAudioProcessStream *processStream)
975 {
976 InitLatencyMeasurement();
977 AUDIO_PRERELEASE_LOGI("OnStart endpoint status:%{public}s", GetStatusStr(endpointStatus_).c_str());
978 if (endpointStatus_ == RUNNING) {
979 AUDIO_INFO_LOG("OnStart find endpoint already in RUNNING.");
980 return SUCCESS;
981 }
982 if (endpointStatus_ == IDEL) {
983 // call sink start
984 if (!isStarted_) {
985 CHECK_AND_RETURN_RET_LOG(StartDevice(RUNNING), ERR_OPERATION_FAILED, "StartDevice failed");
986 }
987 }
988
989 endpointStatus_ = RUNNING;
990 delayStopTime_ = INT64_MAX;
991 return SUCCESS;
992 }
993
OnPause(IAudioProcessStream * processStream)994 int32_t AudioEndpointInner::OnPause(IAudioProcessStream *processStream)
995 {
996 AUDIO_PRERELEASE_LOGI("OnPause endpoint status:%{public}s", GetStatusStr(endpointStatus_).c_str());
997 if (endpointStatus_ == RUNNING) {
998 endpointStatus_ = IsAnyProcessRunning() ? RUNNING : IDEL;
999 }
1000 if (endpointStatus_ == IDEL) {
1001 // delay call sink stop when no process running
1002 AUDIO_PRERELEASE_LOGI("OnPause status is IDEL, need delay call stop");
1003 delayStopTime_ = ClockTime::GetCurNano() + ((clientConfig_.audioMode == AUDIO_MODE_PLAYBACK)
1004 ? PLAYBACK_DELAY_STOP_HDI_TIME_NS : RECORDER_DELAY_STOP_HDI_TIME_NS);
1005 }
1006 // todo
1007 return SUCCESS;
1008 }
1009
OnUpdateHandleInfo(IAudioProcessStream * processStream)1010 int32_t AudioEndpointInner::OnUpdateHandleInfo(IAudioProcessStream *processStream)
1011 {
1012 Trace trace("AudioEndpoint::OnUpdateHandleInfo");
1013 bool isFind = false;
1014 std::lock_guard<std::mutex> lock(listLock_);
1015 auto processItr = processList_.begin();
1016 while (processItr != processList_.end()) {
1017 if (*processItr != processStream) {
1018 processItr++;
1019 continue;
1020 }
1021 std::shared_ptr<OHAudioBufferBase> processBuffer = (*processItr)->GetStreamBuffer();
1022 CHECK_AND_RETURN_RET_LOG(processBuffer != nullptr, ERR_OPERATION_FAILED, "Process found but buffer is null");
1023 uint64_t proHandleFrame = 0;
1024 int64_t proHandleTime = 0;
1025 processBuffer->GetHandleInfo(proHandleFrame, proHandleTime);
1026
1027 isFind = true;
1028 break;
1029 }
1030 CHECK_AND_RETURN_RET_LOG(isFind, ERR_OPERATION_FAILED, "Can not find any process to UpdateHandleInfo");
1031 return SUCCESS;
1032 }
1033
AddProcessStreamToList(IAudioProcessStream * processStream,const std::shared_ptr<OHAudioBufferBase> & processBuffer)1034 void AudioEndpointInner::AddProcessStreamToList(IAudioProcessStream *processStream,
1035 const std::shared_ptr<OHAudioBufferBase> &processBuffer)
1036 {
1037 std::lock_guard<std::mutex> lock(listLock_);
1038 processList_.push_back(processStream);
1039 processBufferList_.push_back(processBuffer);
1040 processTmpBufferList_.push_back({});
1041 }
1042
LinkProcessStream(IAudioProcessStream * processStream,bool startWhenLinking)1043 int32_t AudioEndpointInner::LinkProcessStream(IAudioProcessStream *processStream, bool startWhenLinking)
1044 {
1045 CHECK_AND_RETURN_RET_LOG(processStream != nullptr, ERR_INVALID_PARAM, "IAudioProcessStream is null");
1046 std::shared_ptr<OHAudioBufferBase> processBuffer = processStream->GetStreamBuffer();
1047 CHECK_AND_RETURN_RET_LOG(processBuffer != nullptr, ERR_INVALID_PARAM, "processBuffer is null");
1048 CHECK_AND_RETURN_RET_LOG(processBuffer->GetStreamStatus() != nullptr, ERR_INVALID_PARAM, "stream status is null");
1049
1050 CHECK_AND_RETURN_RET_LOG(processList_.size() < MAX_LINKED_PROCESS, ERR_OPERATION_FAILED, "reach link limit.");
1051
1052 AUDIO_INFO_LOG("LinkProcessStream start status is:%{public}s.", GetStatusStr(endpointStatus_).c_str());
1053 processBuffer->SetSessionId(processStream->GetAudioSessionId());
1054 bool needEndpointRunning = processBuffer->GetStreamStatus()->load() == STREAM_RUNNING;
1055
1056 AddEndpointStreamVolume(processStream);
1057
1058 if (endpointStatus_ == STARTING) {
1059 AUDIO_INFO_LOG("LinkProcessStream wait start begin.");
1060 std::unique_lock<std::mutex> lock(loopThreadLock_);
1061 workThreadCV_.wait_for(lock, std::chrono::milliseconds(SLEEP_TIME_IN_DEFAULT), [this] {
1062 return endpointStatus_ != STARTING;
1063 });
1064 }
1065
1066 if (endpointStatus_ == RUNNING) {
1067 LinkProcessStreamExt(processStream, processBuffer);
1068 return SUCCESS;
1069 }
1070
1071 if (endpointStatus_ == UNLINKED) {
1072 endpointStatus_ = IDEL; // handle push_back in IDEL
1073 if (isDeviceRunningInIdel_) {
1074 delayStopTime_ = INT64_MAX;
1075 CHECK_AND_RETURN_RET_LOG(StartDevice(), ERR_OPERATION_FAILED, "StartDevice failed");
1076 delayStopTime_ = ClockTime::GetCurNano() + ((clientConfig_.audioMode == AUDIO_MODE_PLAYBACK)
1077 ? PLAYBACK_DELAY_STOP_HDI_TIME_NS : LINK_RECORDER_DELAY_STOP_HDI_TIME_NS);
1078 }
1079 }
1080
1081 if (endpointStatus_ == IDEL) {
1082 {
1083 AddProcessStreamToList(processStream, processBuffer);
1084 }
1085 if (!needEndpointRunning || !startWhenLinking) {
1086 AUDIO_INFO_LOG("LinkProcessStream success, process stream status is not running.");
1087 return SUCCESS;
1088 }
1089 // needEndpointRunning = true
1090 if (isDeviceRunningInIdel_) {
1091 endpointStatus_ = IsAnyProcessRunning() ? RUNNING : IDEL;
1092 } else {
1093 // needEndpointRunning = true & isDeviceRunningInIdel_ = false
1094 // KeepWorkloopRunning will wait on IDEL
1095 CHECK_AND_RETURN_RET_LOG(StartDevice(), ERR_OPERATION_FAILED, "StartDevice failed");
1096 }
1097 AUDIO_INFO_LOG("LinkProcessStream success with status:%{public}s", GetStatusStr(endpointStatus_).c_str());
1098 return SUCCESS;
1099 }
1100
1101 AUDIO_INFO_LOG("LinkProcessStream success with status:%{public}s", GetStatusStr(endpointStatus_).c_str());
1102 return SUCCESS;
1103 }
1104
AddEndpointStreamVolume(IAudioProcessStream * processStream)1105 void AudioEndpointInner::AddEndpointStreamVolume(IAudioProcessStream *processStream)
1106 {
1107 Trace trace("AudioEndpointInner::AddEndpointStreamVolume");
1108 bool isSystemApp = CheckoutSystemAppUtil::CheckoutSystemApp(processStream->GetAppInfo().appUid);
1109 StreamVolumeParams streamVolumeParams = { processStream->GetAudioSessionId(),
1110 processStream->GetAudioProcessConfig().streamType,
1111 processStream->GetAudioProcessConfig().rendererInfo.streamUsage,
1112 processStream->GetAppInfo().appUid, processStream->GetAppInfo().appPid, isSystemApp,
1113 processStream->GetAudioProcessConfig().rendererInfo.volumeMode,
1114 processStream->GetAudioProcessConfig().rendererInfo.isVirtualKeyboard };
1115 AudioVolume::GetInstance()->AddStreamVolume(streamVolumeParams);
1116 AUDIO_INFO_LOG("when stream start, add streamVolume for this stream");
1117 }
1118
LinkProcessStreamExt(IAudioProcessStream * processStream,const std::shared_ptr<OHAudioBufferBase> & processBuffer)1119 void AudioEndpointInner::LinkProcessStreamExt(IAudioProcessStream *processStream,
1120 const std::shared_ptr<OHAudioBufferBase>& processBuffer)
1121 {
1122 AddProcessStreamToList(processStream, processBuffer);
1123 AUDIO_INFO_LOG("LinkProcessStream success in RUNNING.");
1124 }
1125
UnlinkProcessStream(IAudioProcessStream * processStream)1126 int32_t AudioEndpointInner::UnlinkProcessStream(IAudioProcessStream *processStream)
1127 {
1128 AUDIO_INFO_LOG("UnlinkProcessStream in status:%{public}s.", GetStatusStr(endpointStatus_).c_str());
1129 CHECK_AND_RETURN_RET_LOG(processStream != nullptr, ERR_INVALID_PARAM, "IAudioProcessStream is null");
1130 std::shared_ptr<OHAudioBufferBase> processBuffer = processStream->GetStreamBuffer();
1131 CHECK_AND_RETURN_RET_LOG(processBuffer != nullptr, ERR_INVALID_PARAM, "processBuffer is null");
1132 AudioVolume::GetInstance()->RemoveStreamVolume(processStream->GetAudioSessionId());
1133
1134 bool isFind = false;
1135 std::lock_guard<std::mutex> lock(listLock_);
1136 auto processItr = processList_.begin();
1137 auto bufferItr = processBufferList_.begin();
1138 auto tmpBufferItr = processTmpBufferList_.begin();
1139 while (processItr != processList_.end()) {
1140 if (*processItr == processStream && *bufferItr == processBuffer) {
1141 processList_.erase(processItr);
1142 processBufferList_.erase(bufferItr);
1143 processTmpBufferList_.erase(tmpBufferItr);
1144 isFind = true;
1145 break;
1146 } else {
1147 processItr++;
1148 bufferItr++;
1149 tmpBufferItr++;
1150 }
1151 }
1152 if (processList_.size() == 0) {
1153 StopDevice();
1154 endpointStatus_ = UNLINKED;
1155 } else if (!IsAnyProcessRunningInner()) {
1156 endpointStatus_ = IDEL;
1157 delayStopTime_ = DELAY_STOP_HDI_TIME_WHEN_NO_RUNNING_NS;
1158 }
1159
1160 AUDIO_INFO_LOG("UnlinkProcessStream end, %{public}s the process.", (isFind ? "find and remove" : "not find"));
1161 return SUCCESS;
1162 }
1163
IsBufferDataInsufficient(int32_t readableDataFrame,uint32_t spanSizeInFrame)1164 bool AudioEndpointInner::IsBufferDataInsufficient(int32_t readableDataFrame, uint32_t spanSizeInFrame)
1165 {
1166 if (readableDataFrame < 0) {
1167 return false;
1168 }
1169
1170 if (static_cast<uint32_t>(readableDataFrame) >= spanSizeInFrame) {
1171 return false;
1172 }
1173
1174 return true;
1175 }
1176
CheckAllBufferReady(int64_t checkTime,uint64_t curWritePos)1177 bool AudioEndpointInner::CheckAllBufferReady(int64_t checkTime, uint64_t curWritePos)
1178 {
1179 bool isAllReady = true;
1180 bool needCheckStandby = false;
1181 {
1182 // lock list without sleep
1183 std::lock_guard<std::mutex> lock(listLock_);
1184 for (size_t i = 0; i < processBufferList_.size(); i++) {
1185 std::shared_ptr<OHAudioBufferBase> tempBuffer = processBufferList_[i];
1186 uint64_t eachCurReadPos = processBufferList_[i]->GetCurReadFrame();
1187 lastHandleProcessTime_ = checkTime;
1188 processBufferList_[i]->SetHandleInfo(eachCurReadPos, lastHandleProcessTime_); // update handle info
1189 if (tempBuffer->GetStreamStatus() &&
1190 tempBuffer->GetStreamStatus()->load() != StreamStatus::STREAM_RUNNING) {
1191 // Process is not running, server will continue to check the same location in the next cycle.
1192 int64_t duration = 5000000; // 5ms
1193 processBufferList_[i]->SetHandleInfo(eachCurReadPos, lastHandleProcessTime_ + duration);
1194 continue; // process not running
1195 }
1196 // Status is RUNNING
1197 int64_t current = ClockTime::GetCurNano();
1198 int64_t lastWrittenTime = tempBuffer->GetLastWrittenTime();
1199 uint32_t sessionId = processList_[i]->GetAudioSessionId();
1200 if (current - lastWrittenTime > WAIT_CLIENT_STANDBY_TIME_NS) {
1201 Trace trace("AudioEndpoint::MarkClientStandby:" + std::to_string(sessionId));
1202 AUDIO_INFO_LOG("change the status to stand-by, session %{public}u", sessionId);
1203 processList_[i]->EnableStandby();
1204 needCheckStandby = true;
1205 continue;
1206 }
1207 int32_t readableDataFrame = tempBuffer->GetReadableDataFrames();
1208 uint32_t spanSizeInFrame = processList_[i]->GetSpanSizeInFrame();
1209 if (IsBufferDataInsufficient(readableDataFrame, spanSizeInFrame)) {
1210 isAllReady = false;
1211 AudioPerformanceMonitor::GetInstance().RecordSilenceState(sessionId, true, PIPE_TYPE_LOWLATENCY_OUT,
1212 processList_[i]->GetAppInfo().appUid);
1213 continue;
1214 } else {
1215 AudioPerformanceMonitor::GetInstance().RecordSilenceState(sessionId, false, PIPE_TYPE_LOWLATENCY_OUT,
1216 processList_[i]->GetAppInfo().appUid);
1217 }
1218 // process Status is RUNNING && buffer status is WRITE_DONE
1219 tempBuffer->SetLastWrittenTime(current);
1220 }
1221 }
1222
1223 if (needCheckStandby) {
1224 CheckStandBy();
1225 }
1226
1227 if (!isAllReady) {
1228 WaitAllProcessReady(curWritePos);
1229 }
1230 return isAllReady;
1231 }
1232
WaitAllProcessReady(uint64_t curWritePos)1233 void AudioEndpointInner::WaitAllProcessReady(uint64_t curWritePos)
1234 {
1235 Trace trace("AudioEndpoint::WaitAllProcessReady");
1236 int64_t tempWakeupTime = readTimeModel_.GetTimeOfPos(curWritePos) + WRITE_TO_HDI_AHEAD_TIME;
1237 if (tempWakeupTime - ClockTime::GetCurNano() < ONE_MILLISECOND_DURATION) {
1238 ClockTime::RelativeSleep(ONE_MILLISECOND_DURATION);
1239 } else {
1240 ClockTime::AbsoluteSleep(tempWakeupTime); // sleep to hdi read time ahead 1ms.
1241 }
1242 }
1243
MixToDupStream(const std::vector<AudioStreamData> & srcDataList,int32_t innerCapId)1244 void AudioEndpointInner::MixToDupStream(const std::vector<AudioStreamData> &srcDataList, int32_t innerCapId)
1245 {
1246 Trace trace("AudioEndpointInner::MixToDupStream");
1247 CHECK_AND_RETURN_LOG(fastCaptureInfos_.count(innerCapId) && fastCaptureInfos_[innerCapId].dupStream != nullptr,
1248 "captureInfo is errro");
1249 CHECK_AND_RETURN_LOG(dupBuffer_ != nullptr, "Buffer is not ready");
1250
1251 std::vector<AudioStreamData> tempList;
1252 for (size_t i = 0; i < srcDataList.size(); i++) {
1253 if (!srcDataList[i].isInnerCapeds.count(innerCapId) ||
1254 !srcDataList[i].isInnerCapeds.at(innerCapId)) {
1255 continue;
1256 }
1257 AudioStreamData cur = srcDataList[i];
1258 cur.volumeStart = cur.volumeHap;
1259 tempList.push_back(cur);
1260 }
1261 BufferDesc temp;
1262 temp.buffer = dupBuffer_.get();
1263 temp.bufLength = dupBufferSize_;
1264 temp.dataLength = dupBufferSize_;
1265 AudioStreamData dstStream;
1266 dstStream.streamInfo = dstStreamInfo_;
1267 dstStream.bufferDesc = temp;
1268 FormatConverter::DataAccumulationFromVolume(tempList, dstStream);
1269
1270 int32_t ret;
1271 if (GetEngineFlag() == 1) {
1272 WriteDupBufferInner(temp, innerCapId);
1273 } else {
1274 ret = fastCaptureInfos_[innerCapId].dupStream->EnqueueBuffer(temp);
1275 CHECK_AND_RETURN_LOG(ret == SUCCESS, "EnqueueBuffer failed:%{public}d", ret);
1276 }
1277
1278 ret = memset_s(reinterpret_cast<void *>(dupBuffer_.get()), dupBufferSize_, 0, dupBufferSize_);
1279 if (ret != EOK) {
1280 AUDIO_WARNING_LOG("memset buffer fail, ret %{public}d", ret);
1281 }
1282 }
1283
ProcessData(const std::vector<AudioStreamData> & srcDataList,const AudioStreamData & dstData)1284 void AudioEndpointInner::ProcessData(const std::vector<AudioStreamData> &srcDataList, const AudioStreamData &dstData)
1285 {
1286 bool ret = FormatConverter::DataAccumulationFromVolume(srcDataList, dstData);
1287 CHECK_AND_RETURN_LOG(ret, "Format may not match");
1288
1289 ChannelVolumes channelVolumes = VolumeTools::CountVolumeLevel(
1290 dstData.bufferDesc, dstData.streamInfo.format, dstData.streamInfo.channels);
1291 if (!isExistLoopback_) {
1292 ZeroVolumeCheck(std::accumulate(channelVolumes.volStart, channelVolumes.volStart +
1293 channelVolumes.channel, static_cast<int64_t>(0)) / channelVolumes.channel);
1294 }
1295 }
1296
HandleRendererDataParams(const AudioStreamData & srcData,const AudioStreamData & dstData,bool applyVol)1297 void AudioEndpointInner::HandleRendererDataParams(const AudioStreamData &srcData, const AudioStreamData &dstData,
1298 bool applyVol)
1299 {
1300 if (srcData.streamInfo.encoding != dstData.streamInfo.encoding) {
1301 AUDIO_ERR_LOG("Different encoding formats");
1302 return;
1303 }
1304 if (srcData.streamInfo.format == SAMPLE_S16LE && srcData.streamInfo.channels == STEREO) {
1305 return ProcessSingleData(srcData, dstData, applyVol);
1306 }
1307
1308 if (srcData.streamInfo.format == SAMPLE_S16LE || srcData.streamInfo.format == SAMPLE_F32LE) {
1309 CHECK_AND_RETURN_LOG(processList_.size() > 0 && processList_[0] != nullptr, "No avaliable process");
1310 BufferDesc &convertedBuffer = processList_[0]->GetConvertedBuffer();
1311 int32_t ret = -1;
1312 if (srcData.streamInfo.format == SAMPLE_S16LE && srcData.streamInfo.channels == MONO) {
1313 ret = FormatConverter::S16MonoToS16Stereo(srcData.bufferDesc, convertedBuffer);
1314 CHECK_AND_RETURN_LOG(ret == SUCCESS, "Convert channel from s16 mono to s16 stereo failed");
1315 } else if (srcData.streamInfo.format == SAMPLE_F32LE && srcData.streamInfo.channels == MONO) {
1316 ret = FormatConverter::F32MonoToS16Stereo(srcData.bufferDesc, convertedBuffer);
1317 CHECK_AND_RETURN_LOG(ret == SUCCESS, "Convert channel from f32 mono to s16 stereo failed");
1318 } else if (srcData.streamInfo.format == SAMPLE_F32LE && srcData.streamInfo.channels == STEREO) {
1319 ret = FormatConverter::F32StereoToS16Stereo(srcData.bufferDesc, convertedBuffer);
1320 CHECK_AND_RETURN_LOG(ret == SUCCESS, "Convert channel from f32 stereo to s16 stereo failed");
1321 } else {
1322 CHECK_AND_RETURN_LOG(ret == SUCCESS, "Unsupport conversion");
1323 }
1324 AudioStreamData dataAfterProcess = srcData;
1325 dataAfterProcess.bufferDesc = convertedBuffer;
1326 ProcessSingleData(dataAfterProcess, dstData, applyVol);
1327 ret = memset_s(static_cast<void *>(convertedBuffer.buffer), convertedBuffer.bufLength, 0,
1328 convertedBuffer.bufLength);
1329 CHECK_AND_RETURN_LOG(ret == EOK, "memset converted buffer to 0 failed");
1330 }
1331 }
1332
ProcessSingleData(const AudioStreamData & srcData,const AudioStreamData & dstData,bool applyVol)1333 void AudioEndpointInner::ProcessSingleData(const AudioStreamData &srcData, const AudioStreamData &dstData,
1334 bool applyVol)
1335 {
1336 CHECK_AND_RETURN_LOG(dstData.streamInfo.format == SAMPLE_S16LE && dstData.streamInfo.channels == STEREO,
1337 "ProcessData failed, streamInfo are not support");
1338
1339 size_t dataLength = dstData.bufferDesc.dataLength;
1340 dataLength /= 2; // SAMPLE_S16LE--> 2 byte
1341 int16_t *dstPtr = reinterpret_cast<int16_t *>(dstData.bufferDesc.buffer);
1342 for (size_t offset = 0; dataLength > 0; dataLength--) {
1343 int32_t vol = 1 << VOLUME_SHIFT_NUMBER;
1344 int16_t *srcPtr = reinterpret_cast<int16_t *>(srcData.bufferDesc.buffer) + offset;
1345 int32_t sum = applyVol ? (*srcPtr * static_cast<int64_t>(vol)) >> VOLUME_SHIFT_NUMBER : *srcPtr; // 1/65536
1346 offset++;
1347 *dstPtr++ = sum > INT16_MAX ? INT16_MAX : (sum < INT16_MIN ? INT16_MIN : sum);
1348 }
1349 }
1350
1351 // call with listLock_ hold
GetAllReadyProcessData(std::vector<AudioStreamData> & audioDataList,std::function<void ()> & moveClientsIndex)1352 void AudioEndpointInner::GetAllReadyProcessData(std::vector<AudioStreamData> &audioDataList,
1353 std::function<void()> &moveClientsIndex)
1354 {
1355 isExistLoopback_ = false;
1356 audioHapticsSyncId_ = 0;
1357 std::vector<std::function<void()>> moveClientIndexVector;
1358 for (size_t i = 0; i < processBufferList_.size(); i++) {
1359 CHECK_AND_CONTINUE_LOG(processBufferList_[i] != nullptr, "this processBuffer is nullptr!");
1360 uint64_t curRead = processBufferList_[i]->GetCurReadFrame();
1361 Trace trace("AudioEndpoint::ReadProcessData->" + std::to_string(curRead));
1362 CHECK_AND_CONTINUE_LOG(processList_[i] != nullptr, "this process is nullptr!");
1363 auto processConfig = processList_[i]->GetAudioProcessConfig();
1364 if (processConfig.rendererInfo.isLoopback) {
1365 isExistLoopback_ = true;
1366 }
1367 // If there is a sync ID in the process and it is the current first frame.
1368 // then the sync ID needs to be recorded.
1369 if (processList_[i]->GetAudioHapticsSyncId() > 0 && curRead == 0) {
1370 audioHapticsSyncId_ = processList_[i]->GetAudioHapticsSyncId();
1371 }
1372 std::function<void()> moveClientIndexFunc;
1373 GetAllReadyProcessDataSub(i, audioDataList, curRead, moveClientIndexFunc);
1374 moveClientIndexVector.push_back(moveClientIndexFunc);
1375 }
1376
1377 moveClientsIndex =
1378 [moveClientIndexVec = std::move(moveClientIndexVector)] () {
1379 for (const auto& moveFunc : moveClientIndexVec) {
1380 if (moveFunc) {
1381 moveFunc();
1382 }
1383 }
1384 };
1385 }
1386
CalculateVolume(size_t i)1387 AudioEndpointInner::VolumeResult AudioEndpointInner::CalculateVolume(size_t i)
1388 {
1389 Volume vol = {true, 1.0f, 0};
1390
1391 AudioStreamType streamType = processList_[i]->GetAudioStreamType();
1392 AudioVolumeType volumeType = VolumeUtils::GetVolumeTypeFromStreamType(streamType);
1393 DeviceType deviceType = PolicyHandler::GetInstance().GetActiveOutPutDevice();
1394 bool getVolumeRet = PolicyHandler::GetInstance().GetSharedVolume(volumeType, deviceType, vol);
1395 int32_t doNotDisturbStatusVolume = static_cast<int32_t>(AudioVolume::GetInstance()->GetDoNotDisturbStatusVolume(
1396 streamType, clientConfig_.appInfo.appUid, processList_[i]->GetAudioSessionId()));
1397 float appVolume = AudioVolume::GetInstance()->GetAppVolume(clientConfig_.appInfo.appUid,
1398 clientConfig_.rendererInfo.volumeMode);
1399 int32_t volumeFromOhaudioBuffer = processBufferList_[i]->GetStreamVolume() *
1400 processBufferList_[i]->GetDuckFactor() * processBufferList_[i]->GetMuteFactor() * (1 << VOLUME_SHIFT_NUMBER);
1401 float baseVolume = volumeFromOhaudioBuffer * appVolume * doNotDisturbStatusVolume;
1402
1403 VolumeResult result;
1404 if (deviceInfo_.networkId_ != LOCAL_NETWORK_ID || (deviceInfo_.deviceType_ == DEVICE_TYPE_BLUETOOTH_A2DP
1405 && volumeType == STREAM_MUSIC && PolicyHandler::GetInstance().IsAbsVolumeSupported()) || !getVolumeRet ||
1406 IsNearlinkAbsVolSupportStream(deviceInfo_.deviceType_, volumeType)) {
1407 result.volumeStart = vol.isMute ? 0 : static_cast<int32_t>(baseVolume);
1408 } else if (clientConfig_.rendererInfo.isVirtualKeyboard) {
1409 result.volumeStart = vol.isMute ? 0 : static_cast<int32_t>(baseVolume);
1410 } else {
1411 result.volumeStart = vol.isMute ? 0 : static_cast<int32_t>(baseVolume * vol.volumeFloat);
1412 }
1413
1414 result.muteFlag = processList_[i]->GetMuteState();
1415 result.volumeEnd = volumeFromOhaudioBuffer;
1416 result.volumeHap = result.muteFlag ? 0 : volumeFromOhaudioBuffer;
1417 AudioStreamMonitor::GetInstance().UpdateMonitorVolume(processList_[i]->GetAudioSessionId(), result.volumeStart);
1418
1419 return result;
1420 }
1421
PrepareRingBuffer(size_t i,uint64_t curRead,RingBufferWrapper & ringBuffer)1422 bool AudioEndpointInner::PrepareRingBuffer(size_t i, uint64_t curRead, RingBufferWrapper& ringBuffer)
1423 {
1424 int32_t ret = processBufferList_[i]->GetAllReadableBufferFromPosFrame(curRead, ringBuffer);
1425 CHECK_AND_RETURN_RET_LOG(ret == SUCCESS && ringBuffer.dataLength > 0, false,
1426 "getBuffer failed ret: %{public}d lenth: %{public}zu",
1427 ret, ringBuffer.dataLength);
1428
1429 auto byteSizePerFrame = processList_[i]->GetByteSizePerFrame();
1430 CHECK_AND_RETURN_RET_LOG(byteSizePerFrame != 0, false, "byteSizePerFrame is 0");
1431
1432 size_t spanSizeInByte = processList_[i]->GetSpanSizeInFrame() * byteSizePerFrame;
1433 if (ringBuffer.dataLength > spanSizeInByte) {
1434 ringBuffer.dataLength = spanSizeInByte;
1435 }
1436
1437 return true;
1438 }
1439
SetupMoveCallback(size_t i,uint64_t curRead,const RingBufferWrapper & ringBuffer,std::function<void ()> & moveClientIndex)1440 void AudioEndpointInner::SetupMoveCallback(size_t i, uint64_t curRead, const RingBufferWrapper& ringBuffer,
1441 std::function<void()>& moveClientIndex
1442 )
1443 {
1444 auto byteSizePerFrame = processList_[i]->GetByteSizePerFrame();
1445 CHECK_AND_RETURN_LOG(byteSizePerFrame != 0, "byteSizePerFrame is 0");
1446 uint64_t readFramePosAfterRead = curRead + (ringBuffer.dataLength / byteSizePerFrame);
1447 auto ohAudioBuffer = processBufferList_[i];
1448 moveClientIndex = [readFramePosAfterRead, ringBuffer, ohAudioBuffer] () mutable {
1449 ohAudioBuffer->SetCurReadFrame(readFramePosAfterRead);
1450 ringBuffer.SetBuffersValueWithSpecifyDataLen(0);
1451 };
1452 }
1453
IsNearlinkAbsVolSupportStream(DeviceType deviceType,AudioVolumeType volumeType)1454 bool AudioEndpointInner::IsNearlinkAbsVolSupportStream(DeviceType deviceType, AudioVolumeType volumeType)
1455 {
1456 bool isNearlink = deviceType == DEVICE_TYPE_NEARLINK;
1457 bool isMusicStream = volumeType == STREAM_MUSIC;
1458 bool isVoiceCallStream = volumeType == STREAM_VOICE_CALL;
1459 return isNearlink && (isMusicStream || isVoiceCallStream);
1460 }
1461
NeedUseTempBuffer(const RingBufferWrapper & ringBuffer,size_t spanSizeInByte)1462 bool AudioEndpointInner::NeedUseTempBuffer(const RingBufferWrapper &ringBuffer, size_t spanSizeInByte)
1463 {
1464 if (ringBuffer.dataLength > ringBuffer.basicBufferDescs[0].bufLength) {
1465 return true;
1466 }
1467
1468 if (ringBuffer.dataLength < spanSizeInByte) {
1469 return true;
1470 }
1471
1472 return false;
1473 }
1474
PrepareStreamDataBuffer(size_t i,size_t spanSizeInByte,RingBufferWrapper & ringBuffer,AudioStreamData & streamData)1475 void AudioEndpointInner::PrepareStreamDataBuffer(size_t i, size_t spanSizeInByte,
1476 RingBufferWrapper &ringBuffer, AudioStreamData &streamData)
1477 {
1478 if (NeedUseTempBuffer(ringBuffer, spanSizeInByte)) {
1479 processTmpBufferList_[i].resize(0);
1480 processTmpBufferList_[i].resize(spanSizeInByte);
1481 RingBufferWrapper ringBufferDescForCotinueData;
1482 ringBufferDescForCotinueData.dataLength = ringBuffer.dataLength;
1483 ringBufferDescForCotinueData.basicBufferDescs[0].buffer = processTmpBufferList_[i].data();
1484 ringBufferDescForCotinueData.basicBufferDescs[0].bufLength = ringBuffer.dataLength;
1485 ringBufferDescForCotinueData.CopyInputBufferValueToCurBuffer(ringBuffer);
1486 streamData.bufferDesc.buffer = processTmpBufferList_[i].data();
1487 streamData.bufferDesc.bufLength = spanSizeInByte;
1488 streamData.bufferDesc.dataLength = spanSizeInByte;
1489 } else {
1490 streamData.bufferDesc.buffer = ringBuffer.basicBufferDescs[0].buffer;
1491 streamData.bufferDesc.bufLength = ringBuffer.dataLength;
1492 streamData.bufferDesc.dataLength = ringBuffer.dataLength;
1493 }
1494 }
1495
GetAllReadyProcessDataSub(size_t i,std::vector<AudioStreamData> & audioDataList,uint64_t curRead,std::function<void ()> & moveClientIndex)1496 void AudioEndpointInner::GetAllReadyProcessDataSub(size_t i,
1497 std::vector<AudioStreamData> &audioDataList, uint64_t curRead, std::function<void()> &moveClientIndex)
1498 {
1499 VolumeResult volResult = CalculateVolume(i);
1500 AudioStreamData streamData;
1501 streamData.volumeStart = volResult.volumeStart;
1502 streamData.volumeEnd = volResult.volumeEnd;
1503 streamData.volumeHap = volResult.volumeHap;
1504
1505 streamData.streamInfo = processList_[i]->GetStreamInfo();
1506 streamData.isInnerCapeds = processList_[i]->GetInnerCapState();
1507
1508 Trace traceVol("VolumeProcess " + std::to_string(volResult.volumeStart) +
1509 " sessionid:" + std::to_string(processList_[i]->GetAudioSessionId()) +
1510 (volResult.muteFlag ? " muted" : " unmuted"));
1511
1512 RingBufferWrapper ringBuffer;
1513 if (!PrepareRingBuffer(i, curRead, ringBuffer)) {
1514 auto tempProcess = processList_[i];
1515 CHECK_AND_RETURN_LOG(tempProcess, "tempProcess is nullptr!");
1516 if (tempProcess->GetStreamStatus() == STREAM_RUNNING) {
1517 tempProcess->AddNoDataFrameSize();
1518 }
1519 return;
1520 }
1521
1522 SetupMoveCallback(i, curRead, ringBuffer, moveClientIndex);
1523
1524 if (volResult.muteFlag) {
1525 ringBuffer.SetBuffersValueWithSpecifyDataLen(0);
1526 }
1527 size_t spanSizeInByte = processList_[i]->GetSpanSizeInFrame() * processList_[i]->GetByteSizePerFrame();
1528 PrepareStreamDataBuffer(i, spanSizeInByte, ringBuffer, streamData);
1529 CheckPlaySignal(streamData.bufferDesc.buffer, streamData.bufferDesc.bufLength);
1530 audioDataList.push_back(streamData);
1531 processList_[i]->WriteDumpFile(static_cast<void *>(streamData.bufferDesc.buffer),
1532 streamData.bufferDesc.bufLength);
1533 WriteMuteDataSysEvent(streamData.bufferDesc.buffer, streamData.bufferDesc.bufLength, i);
1534 HandleMuteWriteData(streamData.bufferDesc, i);
1535 }
1536
HandleMuteWriteData(BufferDesc & bufferDesc,int32_t index)1537 void AudioEndpointInner::HandleMuteWriteData(BufferDesc &bufferDesc, int32_t index)
1538 {
1539 CHECK_AND_RETURN_LOG(static_cast<size_t>(index + 1) <= processList_.size(), "invalid index");
1540 auto tempProcess = processList_[index];
1541 CHECK_AND_RETURN_LOG(tempProcess, "tempProcess is nullptr");
1542 tempProcess->AddNormalFrameSize();
1543 int64_t muteFrameCnt = 0;
1544 VolumeTools::CalcMuteFrame(bufferDesc, dstStreamInfo_, logUtilsTag_, volumeDataCount_, muteFrameCnt);
1545 tempProcess->AddMuteWriteFrameCnt(muteFrameCnt);
1546 tempProcess->AddMuteFrameSize(volumeDataCount_);
1547 }
1548
ProcessToEndpointDataHandle(uint64_t curWritePos,std::function<void ()> & moveClientIndex)1549 bool AudioEndpointInner::ProcessToEndpointDataHandle(uint64_t curWritePos, std::function<void()> &moveClientIndex)
1550 {
1551 std::lock_guard<std::mutex> lock(listLock_);
1552
1553 std::vector<AudioStreamData> audioDataList;
1554 GetAllReadyProcessData(audioDataList, moveClientIndex);
1555 CheckAudioHapticsSync(curWritePos);
1556
1557 AudioStreamData dstStreamData;
1558 dstStreamData.streamInfo = dstStreamInfo_;
1559 int32_t ret = dstAudioBuffer_->GetWriteBuffer(curWritePos, dstStreamData.bufferDesc);
1560 CHECK_AND_RETURN_RET_LOG(((ret == SUCCESS && dstStreamData.bufferDesc.buffer != nullptr)), false,
1561 "GetWriteBuffer failed, ret:%{public}d", ret);
1562
1563 Trace trace("AudioEndpoint::WriteDstBuffer=>" + std::to_string(curWritePos));
1564 // do write work
1565 if (audioDataList.size() == 0) {
1566 memset_s(dstStreamData.bufferDesc.buffer, dstStreamData.bufferDesc.bufLength, 0,
1567 dstStreamData.bufferDesc.bufLength);
1568 } else {
1569 if (endpointType_ == TYPE_VOIP_MMAP && audioDataList.size() == 1) {
1570 HandleRendererDataParams(audioDataList[0], dstStreamData);
1571 } else {
1572 ProcessData(audioDataList, dstStreamData);
1573 }
1574 }
1575 if (syncInfoSize_ != 0) {
1576 CheckSyncInfo(curWritePos);
1577 lastWriteTime_ = ClockTime::GetCurNano();
1578 }
1579 AdapterType type = endpointType_ == TYPE_VOIP_MMAP ? ADAPTER_TYPE_VOIP_FAST : ADAPTER_TYPE_FAST;
1580 AudioPerformanceMonitor::GetInstance().RecordTimeStamp(type, ClockTime::GetCurNano());
1581 {
1582 std::lock_guard<std::mutex> captureLock(dupMutex_);
1583 for (auto &capture: fastCaptureInfos_) {
1584 if (capture.second.isInnerCapEnabled) {
1585 ProcessToDupStream(audioDataList, dstStreamData, capture.first);
1586 }
1587 }
1588 }
1589 if (AudioDump::GetInstance().GetVersionType() == DumpFileUtil::BETA_VERSION) {
1590 DumpFileUtil::WriteDumpFile(dumpHdi_, static_cast<void *>(dstStreamData.bufferDesc.buffer),
1591 dstStreamData.bufferDesc.bufLength);
1592 AudioCacheMgr::GetInstance().CacheData(dumpHdiName_,
1593 static_cast<void *>(dstStreamData.bufferDesc.buffer), dstStreamData.bufferDesc.bufLength);
1594 }
1595
1596 CheckUpdateState(reinterpret_cast<char *>(dstStreamData.bufferDesc.buffer),
1597 dstStreamData.bufferDesc.bufLength);
1598
1599 return true;
1600 }
1601
CheckSyncInfo(uint64_t curWritePos)1602 void AudioEndpointInner::CheckSyncInfo(uint64_t curWritePos)
1603 {
1604 if (dstSpanSizeInframe_ == 0) {
1605 return;
1606 }
1607 uint32_t curWriteFrame = curWritePos / dstSpanSizeInframe_;
1608 dstAudioBuffer_->SetSyncWriteFrame(curWriteFrame);
1609 uint32_t curReadFrame = dstAudioBuffer_->GetSyncReadFrame();
1610 Trace trace("Sync: writeIndex:" + std::to_string(curWriteFrame) + " readIndex:" + std::to_string(curReadFrame));
1611
1612 if (curWriteFrame >= curReadFrame) {
1613 // seems running ok.
1614 return;
1615 }
1616 AUDIO_WARNING_LOG("write %{public}d is slower than read %{public}d ", curWriteFrame, curReadFrame);
1617 AdapterType type = endpointType_ == TYPE_VOIP_MMAP ? ADAPTER_TYPE_VOIP_FAST : ADAPTER_TYPE_FAST;
1618 int64_t cost = (ClockTime::GetCurNano() - lastWriteTime_) / AUDIO_US_PER_SECOND;
1619 AudioPerformanceMonitor::GetInstance().ReportWriteSlow(type, cost);
1620 return;
1621 }
1622
ProcessToDupStream(const std::vector<AudioStreamData> & audioDataList,AudioStreamData & dstStreamData,int32_t innerCapId)1623 void AudioEndpointInner::ProcessToDupStream(const std::vector<AudioStreamData> &audioDataList,
1624 AudioStreamData &dstStreamData, int32_t innerCapId)
1625 {
1626 if (!fastCaptureInfos_.count(innerCapId) || fastCaptureInfos_[innerCapId].dupStream == nullptr) {
1627 AUDIO_ERR_LOG("innerCapId error or dupStream error");
1628 return;
1629 }
1630 Trace trace("AudioEndpointInner::ProcessToDupStream");
1631 if (endpointType_ == TYPE_VOIP_MMAP) {
1632 if (audioDataList.size() == 1 && audioDataList[0].isInnerCapeds.count(innerCapId)
1633 && audioDataList[0].isInnerCapeds.at(innerCapId)) {
1634 BufferDesc temp;
1635 temp.buffer = dupBuffer_.get();
1636 temp.bufLength = dupBufferSize_;
1637 temp.dataLength = dupBufferSize_;
1638
1639 dstStreamData.bufferDesc = temp;
1640 HandleRendererDataParams(audioDataList[0], dstStreamData, false);
1641 if (GetEngineFlag() == 1) {
1642 WriteDupBufferInner(temp, innerCapId);
1643 } else {
1644 fastCaptureInfos_[innerCapId].dupStream->EnqueueBuffer(temp);
1645 }
1646 }
1647 } else {
1648 MixToDupStream(audioDataList, innerCapId);
1649 }
1650 }
1651
CheckUpdateState(char * frame,uint64_t replyBytes)1652 void AudioEndpointInner::CheckUpdateState(char *frame, uint64_t replyBytes)
1653 {
1654 if (startUpdate_) {
1655 if (renderFrameNum_ == 0) {
1656 last10FrameStartTime_ = ClockTime::GetCurNano();
1657 }
1658 renderFrameNum_++;
1659 maxAmplitude_ = UpdateMaxAmplitude(static_cast<ConvertHdiFormat>(dstStreamInfo_.format),
1660 frame, replyBytes);
1661 if (renderFrameNum_ == GET_MAX_AMPLITUDE_FRAMES_THRESHOLD) {
1662 renderFrameNum_ = 0;
1663 if (last10FrameStartTime_ > lastGetMaxAmplitudeTime_) {
1664 startUpdate_ = false;
1665 maxAmplitude_ = 0;
1666 }
1667 }
1668 }
1669 }
1670
GetMaxAmplitude()1671 float AudioEndpointInner::GetMaxAmplitude()
1672 {
1673 lastGetMaxAmplitudeTime_ = ClockTime::GetCurNano();
1674 startUpdate_ = true;
1675 return maxAmplitude_;
1676 }
1677
GetAudioMode() const1678 AudioMode AudioEndpointInner::GetAudioMode() const
1679 {
1680 return clientConfig_.audioMode;
1681 }
1682
GetPredictNextReadTime(uint64_t posInFrame)1683 int64_t AudioEndpointInner::GetPredictNextReadTime(uint64_t posInFrame)
1684 {
1685 Trace trace("AudioEndpoint::GetPredictNextRead");
1686 uint64_t handleSpanCnt = posInFrame / dstSpanSizeInframe_;
1687 uint32_t startPeriodCnt = 20; // sync each time when start
1688 uint32_t oneBigPeriodCnt = 40; // 200ms
1689 if (handleSpanCnt < startPeriodCnt || handleSpanCnt % oneBigPeriodCnt == 0) {
1690 updateThreadCV_.notify_all();
1691 }
1692 uint64_t readFrame = 0;
1693 int64_t readtime = 0;
1694 if (readTimeModel_.GetFrameStamp(readFrame, readtime)) {
1695 if (readFrame != posInFrame_) {
1696 CheckPosTimeRes res = readTimeModel_.UpdataFrameStamp(posInFrame_, timeInNano_);
1697 if (res == CHECK_FAILED) {
1698 updateThreadCV_.notify_all();
1699 } else if (res == NEED_MODIFY) {
1700 needReSyncPosition_ = true;
1701 }
1702 }
1703 }
1704
1705 int64_t nextHdiReadTime = readTimeModel_.GetTimeOfPos(posInFrame);
1706 return nextHdiReadTime;
1707 }
1708
GetPredictNextWriteTime(uint64_t posInFrame)1709 int64_t AudioEndpointInner::GetPredictNextWriteTime(uint64_t posInFrame)
1710 {
1711 uint64_t handleSpanCnt = posInFrame / dstSpanSizeInframe_;
1712 uint32_t startPeriodCnt = 20;
1713 uint32_t oneBigPeriodCnt = 40;
1714 if (handleSpanCnt < startPeriodCnt || handleSpanCnt % oneBigPeriodCnt == 0) {
1715 updateThreadCV_.notify_all();
1716 }
1717 uint64_t writeFrame = 0;
1718 int64_t writetime = 0;
1719 if (writeTimeModel_.GetFrameStamp(writeFrame, writetime)) {
1720 if (writeFrame != posInFrame_) {
1721 CheckPosTimeRes res = writeTimeModel_.UpdataFrameStamp(posInFrame_, timeInNano_);
1722 if (res == CHECK_FAILED) {
1723 updateThreadCV_.notify_all();
1724 } else if (res == NEED_MODIFY) {
1725 needReSyncPosition_ = true;
1726 }
1727 }
1728 }
1729 int64_t nextHdiWriteTime = writeTimeModel_.GetTimeOfPos(posInFrame);
1730 return nextHdiWriteTime;
1731 }
1732
RecordPrepareNextLoop(uint64_t curReadPos,int64_t & wakeUpTime)1733 bool AudioEndpointInner::RecordPrepareNextLoop(uint64_t curReadPos, int64_t &wakeUpTime)
1734 {
1735 uint64_t nextHandlePos = curReadPos + dstSpanSizeInframe_;
1736 int64_t nextHdiWriteTime = GetPredictNextWriteTime(nextHandlePos);
1737 int64_t tempDelay = endpointType_ == TYPE_VOIP_MMAP ? RECORD_VOIP_DELAY_TIME_NS : RECORD_DELAY_TIME_NS;
1738 int64_t predictWakeupTime = nextHdiWriteTime + tempDelay;
1739 if (predictWakeupTime <= ClockTime::GetCurNano()) {
1740 wakeUpTime = ClockTime::GetCurNano() + ONE_MILLISECOND_DURATION;
1741 AUDIO_ERR_LOG("hdi send wrong position time");
1742 } else {
1743 wakeUpTime = predictWakeupTime;
1744 }
1745
1746 int32_t ret = dstAudioBuffer_->SetCurWriteFrame(nextHandlePos, false);
1747 CHECK_AND_RETURN_RET_LOG(ret == SUCCESS, false, "set dst buffer write frame fail, ret %{public}d.", ret);
1748 ret = dstAudioBuffer_->SetCurReadFrame(nextHandlePos, false);
1749 CHECK_AND_RETURN_RET_LOG(ret == SUCCESS, false, "set dst buffer read frame fail, ret %{public}d.", ret);
1750
1751 return true;
1752 }
1753
PrepareNextLoop(uint64_t curWritePos,int64_t & wakeUpTime,const std::function<void ()> & moveClientIndex)1754 bool AudioEndpointInner::PrepareNextLoop(uint64_t curWritePos, int64_t &wakeUpTime,
1755 const std::function<void()> &moveClientIndex)
1756 {
1757 uint64_t nextHandlePos = curWritePos + dstSpanSizeInframe_;
1758 Trace prepareTrace("AudioEndpoint::PrepareNextLoop " + std::to_string(nextHandlePos));
1759 int64_t nextHdiReadTime = GetPredictNextReadTime(nextHandlePos);
1760 int64_t predictWakeupTime = nextHdiReadTime - serverAheadReadTime_;
1761 if (predictWakeupTime <= ClockTime::GetCurNano()) {
1762 wakeUpTime = ClockTime::GetCurNano() + ONE_MILLISECOND_DURATION;
1763 AUDIO_ERR_LOG("hdi send wrong position time");
1764 } else {
1765 wakeUpTime = predictWakeupTime;
1766 }
1767
1768 SpanInfo *nextWriteSpan = dstAudioBuffer_->GetSpanInfo(nextHandlePos);
1769 CHECK_AND_RETURN_RET_LOG(nextWriteSpan != nullptr, false, "GetSpanInfo failed, can not get next write span");
1770
1771 int32_t ret1 = dstAudioBuffer_->SetCurWriteFrame(nextHandlePos, false);
1772 int32_t ret2 = dstAudioBuffer_->SetCurReadFrame(nextHandlePos, false);
1773 CHECK_AND_RETURN_RET_LOG(ret1 == SUCCESS && ret2 == SUCCESS, false,
1774 "SetCurWriteFrame or SetCurReadFrame failed, ret1:%{public}d ret2:%{public}d", ret1, ret2);
1775 std::lock_guard<std::mutex> lock(listLock_);
1776 if (moveClientIndex) {
1777 moveClientIndex();
1778 }
1779 return true;
1780 }
1781
GetDeviceHandleInfo(uint64_t & frames,int64_t & nanoTime)1782 bool AudioEndpointInner::GetDeviceHandleInfo(uint64_t &frames, int64_t &nanoTime)
1783 {
1784 Trace trace("AudioEndpoint::GetMmapHandlePosition");
1785 int64_t timeSec = 0;
1786 int64_t timeNanoSec = 0;
1787 int32_t ret = 0;
1788 if (deviceInfo_.deviceRole_ == INPUT_DEVICE) {
1789 std::shared_ptr<IAudioCaptureSource> source = HdiAdapterManager::GetInstance().GetCaptureSource(fastCaptureId_);
1790 CHECK_AND_RETURN_RET_LOG(source != nullptr && source->IsInited(),
1791 false, "Source start failed.");
1792 // GetMmapHandlePosition will call using ipc.
1793 ret = source->GetMmapHandlePosition(frames, timeSec, timeNanoSec);
1794 } else {
1795 std::shared_ptr<IAudioRenderSink> sink = HdiAdapterManager::GetInstance().GetRenderSink(fastRenderId_);
1796 CHECK_AND_RETURN_RET_LOG(sink != nullptr && sink->IsInited(),
1797 false, "GetDeviceHandleInfo failed: sink is not inited.");
1798 // GetMmapHandlePosition will call using ipc.
1799 ret = sink->GetMmapHandlePosition(frames, timeSec, timeNanoSec);
1800 }
1801 CHECK_AND_RETURN_RET_LOG(ret == SUCCESS, false, "Call adapter GetMmapHandlePosition failed: %{public}d", ret);
1802 trace.End();
1803 nanoTime = timeNanoSec + timeSec * AUDIO_NS_PER_SECOND;
1804 Trace infoTrace("AudioEndpoint::GetDeviceHandleInfo frames=>" + std::to_string(frames) + " " +
1805 std::to_string(nanoTime) + " at " + std::to_string(ClockTime::GetCurNano()));
1806 nanoTime += DELTA_TO_REAL_READ_START_TIME; // global delay in server
1807 return true;
1808 }
1809
AsyncGetPosTime()1810 void AudioEndpointInner::AsyncGetPosTime()
1811 {
1812 AUDIO_INFO_LOG("AsyncGetPosTime thread start.");
1813 while (!stopUpdateThread_) {
1814 std::unique_lock<std::mutex> lock(updateThreadLock_);
1815 updateThreadCV_.wait_for(lock, std::chrono::milliseconds(UPDATE_THREAD_TIMEOUT));
1816 if (stopUpdateThread_) {
1817 break;
1818 }
1819 if (endpointStatus_ == IDEL && isStarted_ && ClockTime::GetCurNano() > delayStopTime_) {
1820 AUDIO_INFO_LOG("IDEL for too long, let's call hdi stop");
1821 DelayStopDevice();
1822 continue;
1823 }
1824 if (!isStarted_) {
1825 continue;
1826 }
1827 // get signaled, call get pos-time
1828 uint64_t curHdiHandlePos = posInFrame_;
1829 int64_t handleTime = timeInNano_;
1830 if (!GetDeviceHandleInfo(curHdiHandlePos, handleTime)) {
1831 AUDIO_WARNING_LOG("AsyncGetPosTime call GetDeviceHandleInfo failed.");
1832 continue;
1833 }
1834 // keep it
1835 if (posInFrame_ != curHdiHandlePos) {
1836 posInFrame_ = curHdiHandlePos;
1837 timeInNano_ = handleTime;
1838 }
1839 }
1840 }
1841
GetStatusStr(EndpointStatus status)1842 std::string AudioEndpointInner::GetStatusStr(EndpointStatus status)
1843 {
1844 switch (status) {
1845 case INVALID:
1846 return "INVALID";
1847 case UNLINKED:
1848 return "UNLINKED";
1849 case IDEL:
1850 return "IDEL";
1851 case STARTING:
1852 return "STARTING";
1853 case RUNNING:
1854 return "RUNNING";
1855 case STOPPING:
1856 return "STOPPING";
1857 case STOPPED:
1858 return "STOPPED";
1859 default:
1860 break;
1861 }
1862 return "NO_SUCH_STATUS";
1863 }
1864
KeepWorkloopRunning()1865 bool AudioEndpointInner::KeepWorkloopRunning()
1866 {
1867 EndpointStatus targetStatus = INVALID;
1868 switch (endpointStatus_.load()) {
1869 case RUNNING:
1870 return true;
1871 case IDEL:
1872 if (ClockTime::GetCurNano() > delayStopTime_) {
1873 targetStatus = RUNNING;
1874 updateThreadCV_.notify_all();
1875 break;
1876 }
1877 if (isDeviceRunningInIdel_) {
1878 return true;
1879 }
1880 break;
1881 case UNLINKED:
1882 targetStatus = IDEL;
1883 break;
1884 case STARTING:
1885 targetStatus = RUNNING;
1886 break;
1887 case STOPPING:
1888 targetStatus = STOPPED;
1889 break;
1890 default:
1891 break;
1892 }
1893
1894 // when return false, EndpointWorkLoopFuc will continue loop immediately. Wait to avoid a inifity loop.
1895 std::unique_lock<std::mutex> lock(loopThreadLock_);
1896 AUDIO_PRERELEASE_LOGI("%{public}s now, wait for %{public}s...", GetStatusStr(endpointStatus_).c_str(),
1897 GetStatusStr(targetStatus).c_str());
1898 threadStatus_ = WAITTING;
1899 workThreadCV_.wait_for(lock, std::chrono::milliseconds(SLEEP_TIME_IN_DEFAULT));
1900 AUDIO_DEBUG_LOG("Wait end. Cur is %{public}s now, target is %{public}s...", GetStatusStr(endpointStatus_).c_str(),
1901 GetStatusStr(targetStatus).c_str());
1902
1903 return false;
1904 }
1905
WriteToSpecialProcBuf(const std::shared_ptr<OHAudioBufferBase> & procBuf,const BufferDesc & readBuf,const BufferDesc & convertedBuffer,bool muteFlag)1906 int32_t AudioEndpointInner::WriteToSpecialProcBuf(const std::shared_ptr<OHAudioBufferBase> &procBuf,
1907 const BufferDesc &readBuf, const BufferDesc &convertedBuffer, bool muteFlag)
1908 {
1909 CHECK_AND_RETURN_RET_LOG(procBuf != nullptr, ERR_INVALID_HANDLE, "process buffer is null.");
1910 uint64_t curWritePos = procBuf->GetCurWriteFrame();
1911 Trace trace("AudioEndpoint::WriteProcessData-<" + std::to_string(curWritePos));
1912
1913 int32_t writeAbleSize = procBuf->GetWritableDataFrames();
1914 if (writeAbleSize <= 0 || static_cast<uint32_t>(writeAbleSize) <= dstSpanSizeInframe_) {
1915 AUDIO_WARNING_LOG("client read too slow: curWritePos:%{public}" PRIu64" writeAbleSize:%{public}d",
1916 curWritePos, writeAbleSize);
1917 return ERR_OPERATION_FAILED;
1918 }
1919
1920 RingBufferWrapper ringBuffer;
1921 int32_t ret = procBuf->GetAllWritableBufferFromPosFrame(curWritePos, ringBuffer);
1922 CHECK_AND_RETURN_RET_LOG(ret == SUCCESS, ret, "get write buffer fail, ret %{public}d.", ret);
1923
1924 uint32_t totalSizeInFrame;
1925 uint32_t byteSizePerFrame;
1926 procBuf->GetSizeParameter(totalSizeInFrame, byteSizePerFrame);
1927 CHECK_AND_RETURN_RET_LOG(byteSizePerFrame > 0, ERR_OPERATION_FAILED, "byteSizePerFrame is 0");
1928 uint32_t writeableSizeInFrame = ringBuffer.dataLength / byteSizePerFrame;
1929 if (writeableSizeInFrame > dstSpanSizeInframe_) {
1930 ringBuffer.dataLength = dstSpanSizeInframe_ * byteSizePerFrame;
1931 }
1932
1933 if (muteFlag) {
1934 ringBuffer.SetBuffersValueWithSpecifyDataLen(0);
1935 } else {
1936 ret = HandleCapturerDataParams(ringBuffer, readBuf, convertedBuffer);
1937 }
1938
1939 CHECK_AND_RETURN_RET_LOG(ret == EOK, ERR_WRITE_FAILED, "memcpy data to process buffer fail, "
1940 "curWritePos %{public}" PRIu64", ret %{public}d.", curWritePos, ret);
1941
1942 procBuf->SetHandleInfo(curWritePos, ClockTime::GetCurNano());
1943 ret = procBuf->SetCurWriteFrame(curWritePos + dstSpanSizeInframe_);
1944 if (ret != SUCCESS) {
1945 AUDIO_WARNING_LOG("set procBuf next write frame fail, ret %{public}d.", ret);
1946 return ERR_OPERATION_FAILED;
1947 }
1948 return SUCCESS;
1949 }
1950
WriteToRingBuffer(RingBufferWrapper & writeBuf,const BufferDesc & buffer)1951 int32_t AudioEndpointInner::WriteToRingBuffer(RingBufferWrapper &writeBuf, const BufferDesc &buffer)
1952 {
1953 CHECK_AND_RETURN_RET_LOG(buffer.buffer != nullptr && buffer.bufLength > 0, ERR_WRITE_FAILED, "failed");
1954 return writeBuf.CopyInputBufferValueToCurBuffer(RingBufferWrapper{
1955 .basicBufferDescs = {{
1956 {.buffer = buffer.buffer, .bufLength = buffer.bufLength},
1957 {.buffer = nullptr, .bufLength = 0}}},
1958 .dataLength = buffer.bufLength
1959 });
1960 }
1961
HandleCapturerDataParams(RingBufferWrapper & writeBuf,const BufferDesc & readBuf,const BufferDesc & convertedBuffer)1962 int32_t AudioEndpointInner::HandleCapturerDataParams(RingBufferWrapper &writeBuf, const BufferDesc &readBuf,
1963 const BufferDesc &convertedBuffer)
1964 {
1965 if (clientConfig_.streamInfo.format == SAMPLE_S16LE && clientConfig_.streamInfo.channels == STEREO) {
1966 return WriteToRingBuffer(writeBuf, readBuf);
1967 }
1968 if (clientConfig_.streamInfo.format == SAMPLE_S16LE && clientConfig_.streamInfo.channels == MONO) {
1969 int32_t ret = FormatConverter::S16StereoToS16Mono(readBuf, convertedBuffer);
1970 CHECK_AND_RETURN_RET_LOG(ret == SUCCESS, ERR_WRITE_FAILED, "Convert channel from stereo to mono failed");
1971 ret = WriteToRingBuffer(writeBuf, convertedBuffer);
1972 CHECK_AND_RETURN_RET_LOG(ret == SUCCESS, ERR_WRITE_FAILED, "memcpy_s failed");
1973 ret = memset_s(static_cast<void *>(convertedBuffer.buffer), convertedBuffer.bufLength, 0,
1974 convertedBuffer.bufLength);
1975 CHECK_AND_RETURN_RET_LOG(ret == EOK, ERR_WRITE_FAILED, "memset converted buffer to 0 failed");
1976 return EOK;
1977 }
1978 if (clientConfig_.streamInfo.format == SAMPLE_F32LE) {
1979 int32_t ret = 0;
1980 if (clientConfig_.streamInfo.channels == STEREO) {
1981 ret = FormatConverter::S16StereoToF32Stereo(readBuf, convertedBuffer);
1982 CHECK_AND_RETURN_RET_LOG(ret == SUCCESS, ERR_WRITE_FAILED,
1983 "Convert channel from s16 stereo to f32 stereo failed");
1984 } else if (clientConfig_.streamInfo.channels == MONO) {
1985 ret = FormatConverter::S16StereoToF32Mono(readBuf, convertedBuffer);
1986 CHECK_AND_RETURN_RET_LOG(ret == SUCCESS, ERR_WRITE_FAILED,
1987 "Convert channel from s16 stereo to f32 mono failed");
1988 } else {
1989 return ERR_NOT_SUPPORTED;
1990 }
1991 ret = WriteToRingBuffer(writeBuf, convertedBuffer);
1992 CHECK_AND_RETURN_RET_LOG(ret == SUCCESS, ERR_WRITE_FAILED, "memcpy_s failed");
1993 ret = memset_s(static_cast<void *>(convertedBuffer.buffer), convertedBuffer.bufLength, 0,
1994 convertedBuffer.bufLength);
1995 CHECK_AND_RETURN_RET_LOG(ret == EOK, ERR_WRITE_FAILED, "memset converted buffer to 0 failed");
1996 return EOK;
1997 }
1998
1999 return ERR_NOT_SUPPORTED;
2000 }
2001
WriteToProcessBuffers(const BufferDesc & readBuf)2002 void AudioEndpointInner::WriteToProcessBuffers(const BufferDesc &readBuf)
2003 {
2004 CheckRecordSignal(readBuf.buffer, readBuf.bufLength);
2005 std::lock_guard<std::mutex> lock(listLock_);
2006 for (size_t i = 0; i < processBufferList_.size(); i++) {
2007 CHECK_AND_CONTINUE_LOG(processBufferList_[i] != nullptr, "process buffer %{public}zu is null.", i);
2008 if (processBufferList_[i]->GetStreamStatus() &&
2009 processBufferList_[i]->GetStreamStatus()->load() != STREAM_RUNNING) {
2010 AUDIO_WARNING_LOG("process buffer %{public}zu not running, stream status %{public}d.",
2011 i, processBufferList_[i]->GetStreamStatus()->load());
2012 continue;
2013 }
2014
2015 int32_t ret = WriteToSpecialProcBuf(processBufferList_[i], readBuf, processList_[i]->GetConvertedBuffer(),
2016 processList_[i]->GetMuteState());
2017 CHECK_AND_CONTINUE_LOG(ret == SUCCESS,
2018 "endpoint write to process buffer %{public}zu fail, ret %{public}d.", i, ret);
2019 AUDIO_DEBUG_LOG("endpoint process buffer %{public}zu write success.", i);
2020 }
2021 }
2022
ReadFromEndpoint(uint64_t curReadPos)2023 int32_t AudioEndpointInner::ReadFromEndpoint(uint64_t curReadPos)
2024 {
2025 Trace trace("AudioEndpoint::ReadDstBuffer=<" + std::to_string(curReadPos));
2026 AUDIO_DEBUG_LOG("ReadFromEndpoint enter, dstAudioBuffer curReadPos %{public}" PRIu64".", curReadPos);
2027 CHECK_AND_RETURN_RET_LOG(dstAudioBuffer_ != nullptr, ERR_INVALID_HANDLE,
2028 "dst audio buffer is null.");
2029 SpanInfo *curReadSpan = dstAudioBuffer_->GetSpanInfo(curReadPos);
2030 CHECK_AND_RETURN_RET_LOG(curReadSpan != nullptr, ERR_INVALID_HANDLE,
2031 "get source read span info of source adapter fail.");
2032 curReadSpan->readStartTime = ClockTime::GetCurNano();
2033 curReadSpan->spanStatus.store(SpanStatus::SPAN_READING);
2034 BufferDesc readBuf;
2035 int32_t ret = dstAudioBuffer_->GetReadbuffer(curReadPos, readBuf);
2036 CHECK_AND_RETURN_RET_LOG(ret == SUCCESS, ret, "get read buffer fail, ret %{public}d.", ret);
2037 VolumeTools::DfxOperation(readBuf, dstStreamInfo_, logUtilsTag_, volumeDataCount_);
2038 if (AudioDump::GetInstance().GetVersionType() == DumpFileUtil::BETA_VERSION) {
2039 DumpFileUtil::WriteDumpFile(dumpHdi_, static_cast<void *>(readBuf.buffer), readBuf.bufLength);
2040 AudioCacheMgr::GetInstance().CacheData(dumpHdiName_,
2041 static_cast<void *>(readBuf.buffer), readBuf.bufLength);
2042 }
2043 WriteToProcessBuffers(readBuf);
2044 ret = memset_s(readBuf.buffer, readBuf.bufLength, 0, readBuf.bufLength);
2045 if (ret != EOK) {
2046 AUDIO_WARNING_LOG("reset buffer fail, ret %{public}d.", ret);
2047 }
2048 curReadSpan->readDoneTime = ClockTime::GetCurNano();
2049 curReadSpan->spanStatus.store(SpanStatus::SPAN_READ_DONE);
2050 return SUCCESS;
2051 }
2052
RecordEndpointWorkLoopFuc()2053 void AudioEndpointInner::RecordEndpointWorkLoopFuc()
2054 {
2055 SetThreadQosLevel();
2056 int64_t curTime = 0;
2057 uint64_t curReadPos = 0;
2058 int64_t wakeUpTime = ClockTime::GetCurNano();
2059 AUDIO_INFO_LOG("Record endpoint work loop fuc start.");
2060 while (isInited_.load()) {
2061 if (!KeepWorkloopRunning()) {
2062 continue;
2063 }
2064 threadStatus_ = INRUNNING;
2065 if (needReSyncPosition_) {
2066 RecordReSyncPosition();
2067 wakeUpTime = ClockTime::GetCurNano();
2068 needReSyncPosition_ = false;
2069 continue;
2070 }
2071 curTime = ClockTime::GetCurNano();
2072 Trace loopTrace("Record_loop_trace");
2073 if (curTime - wakeUpTime > THREE_MILLISECOND_DURATION) {
2074 AUDIO_WARNING_LOG("Wake up cost %{public}" PRId64" ms!", (curTime - wakeUpTime) / AUDIO_US_PER_SECOND);
2075 } else if (curTime - wakeUpTime > ONE_MILLISECOND_DURATION) {
2076 AUDIO_DEBUG_LOG("Wake up cost %{public}" PRId64" ms!", (curTime - wakeUpTime) / AUDIO_US_PER_SECOND);
2077 }
2078
2079 curReadPos = dstAudioBuffer_->GetCurReadFrame();
2080 CHECK_AND_BREAK_LOG(ReadFromEndpoint(curReadPos) == SUCCESS, "read from endpoint to process service fail.");
2081
2082 bool ret = RecordPrepareNextLoop(curReadPos, wakeUpTime);
2083 CHECK_AND_BREAK_LOG(ret, "PrepareNextLoop failed!");
2084
2085 ProcessUpdateAppsUidForRecord();
2086
2087 loopTrace.End();
2088 threadStatus_ = SLEEPING;
2089 CheckWakeUpTime(wakeUpTime);
2090 ClockTime::AbsoluteSleep(wakeUpTime);
2091 }
2092 ResetThreadQosLevel();
2093 }
2094
BindCore()2095 void AudioEndpointInner::BindCore()
2096 {
2097 if (coreBinded_) {
2098 return;
2099 }
2100 // bind cpu cores 2-7 for fast mixer
2101 cpu_set_t targetCpus;
2102 CPU_ZERO(&targetCpus);
2103 int32_t cpuNum = sysconf(_SC_NPROCESSORS_CONF);
2104 for (int32_t i = CPU_INDEX; i < cpuNum; i++) {
2105 CPU_SET(i, &targetCpus);
2106 }
2107
2108 int32_t ret = sched_setaffinity(gettid(), sizeof(cpu_set_t), &targetCpus);
2109 if (ret != 0) {
2110 AUDIO_ERR_LOG("set target cpu failed, set ret: %{public}d", ret);
2111 }
2112 AUDIO_INFO_LOG("set pid: %{public}d, tid: %{public}d cpus", getpid(), gettid());
2113 coreBinded_ = true;
2114 }
2115
CheckTimeAndBufferReady(uint64_t & curWritePos,int64_t & wakeUpTime,int64_t & curTime)2116 void AudioEndpointInner::CheckTimeAndBufferReady(uint64_t &curWritePos, int64_t &wakeUpTime, int64_t &curTime)
2117 {
2118 int64_t deltaTime = curTime - wakeUpTime;
2119 if (deltaTime > THREE_MILLISECOND_DURATION) {
2120 AUDIO_WARNING_LOG("Wake up cost %{public}" PRId64" ms!", deltaTime / AUDIO_US_PER_SECOND);
2121 } else if (deltaTime > ONE_MILLISECOND_DURATION) {
2122 AUDIO_DEBUG_LOG("Wake up cost %{public}" PRId64" ms!", deltaTime / AUDIO_US_PER_SECOND);
2123 }
2124
2125 // First, wake up at client may-write-done time, and check if all process write done.
2126 // If not, do another sleep to the possible latest write time.
2127 curWritePos = dstAudioBuffer_->GetCurWriteFrame();
2128 if (!CheckAllBufferReady(wakeUpTime, curWritePos)) { curTime = ClockTime::GetCurNano(); }
2129 }
2130
CheckWakeUpTime(int64_t & wakeUpTime)2131 void AudioEndpointInner::CheckWakeUpTime(int64_t &wakeUpTime)
2132 {
2133 int64_t curTime = ClockTime::GetCurNano();
2134 if (wakeUpTime - curTime > MAX_WAKEUP_TIME_NS) {
2135 wakeUpTime = curTime + RELATIVE_SLEEP_TIME_NS;
2136 }
2137 }
2138
EndpointWorkLoopFuc()2139 void AudioEndpointInner::EndpointWorkLoopFuc()
2140 {
2141 BindCore();
2142 bool setPriorityResult = SetEndpointThreadPriority();
2143 if (!setPriorityResult) {
2144 SetThreadQosLevel();
2145 }
2146 int64_t curTime = 0;
2147 uint64_t curWritePos = 0;
2148 int64_t wakeUpTime = ClockTime::GetCurNano();
2149 AUDIO_INFO_LOG("Endpoint work loop fuc start");
2150 while (isInited_.load()) {
2151 if (!KeepWorkloopRunning()) {
2152 continue;
2153 }
2154 threadStatus_ = INRUNNING;
2155 curTime = ClockTime::GetCurNano();
2156 Trace loopTrace("AudioEndpoint::loop_trace " + std::to_string(wakeUpTime));
2157 if (needReSyncPosition_) {
2158 ReSyncPosition();
2159 wakeUpTime = curTime;
2160 needReSyncPosition_ = false;
2161 continue;
2162 }
2163
2164 CheckTimeAndBufferReady(curWritePos, wakeUpTime, curTime);
2165
2166 std::function<void()> moveClientIndex;
2167 // then do mix & write to hdi buffer and prepare next loop
2168 CHECK_AND_BREAK_LOG(ProcessToEndpointDataHandle(curWritePos, moveClientIndex),
2169 "ProcessToEndpointDataHandle failed!");
2170
2171 // prepare info of next loop
2172 CHECK_AND_BREAK_LOG(PrepareNextLoop(curWritePos, wakeUpTime, moveClientIndex),
2173 "ProcessToEndpointDataHandle failed!");
2174
2175 ProcessUpdateAppsUidForPlayback();
2176
2177 loopTrace.End();
2178 // start sleep
2179 threadStatus_ = SLEEPING;
2180 CheckWakeUpTime(wakeUpTime);
2181 ClockTime::AbsoluteSleep(wakeUpTime);
2182 }
2183 AUDIO_DEBUG_LOG("Endpoint work loop fuc end");
2184 if (setPriorityResult) {
2185 ResetEndpointThreadPriority();
2186 } else {
2187 ResetThreadQosLevel();
2188 }
2189 }
2190
ProcessUpdateAppsUidForPlayback()2191 void AudioEndpointInner::ProcessUpdateAppsUidForPlayback()
2192 {
2193 std::vector<int32_t> appsUid;
2194 {
2195 std::lock_guard<std::mutex> lock(listLock_);
2196
2197 appsUid.reserve(processList_.size());
2198 for (auto iProccessStream : processList_) {
2199 appsUid.push_back(iProccessStream->GetAppInfo().appUid);
2200 }
2201 }
2202 std::shared_ptr<IAudioRenderSink> sink = HdiAdapterManager::GetInstance().GetRenderSink(fastRenderId_);
2203 CHECK_AND_RETURN_LOG(sink, "fastSink_ is nullptr");
2204 sink->UpdateAppsUid(appsUid);
2205 }
2206
ProcessUpdateAppsUidForRecord()2207 void AudioEndpointInner::ProcessUpdateAppsUidForRecord()
2208 {
2209 std::vector<int32_t> appsUid;
2210 {
2211 std::lock_guard<std::mutex> lock(listLock_);
2212
2213 appsUid.reserve(processList_.size());
2214 for (auto iProccessStream : processList_) {
2215 appsUid.push_back(iProccessStream->GetAppInfo().appUid);
2216 }
2217 }
2218 std::shared_ptr<IAudioCaptureSource> source = HdiAdapterManager::GetInstance().GetCaptureSource(fastCaptureId_);
2219 CHECK_AND_RETURN_LOG(source, "fastSource_ is nullptr");
2220 source->UpdateAppsUid(appsUid);
2221 }
2222
GetLinkedProcessCount()2223 uint32_t AudioEndpointInner::GetLinkedProcessCount()
2224 {
2225 std::lock_guard<std::mutex> lock(listLock_);
2226 return processList_.size();
2227 }
2228
IsInvalidBuffer(uint8_t * buffer,size_t bufferSize,AudioSampleFormat format)2229 bool AudioEndpointInner::IsInvalidBuffer(uint8_t *buffer, size_t bufferSize, AudioSampleFormat format)
2230 {
2231 bool isInvalid = false;
2232 uint8_t ui8Data = 0;
2233 int16_t i16Data = 0;
2234 switch (format) {
2235 case SAMPLE_U8:
2236 CHECK_AND_RETURN_RET_LOG(bufferSize > 0, false, "buffer size is too small");
2237 ui8Data = *buffer;
2238 isInvalid = ui8Data == 0;
2239 break;
2240 case SAMPLE_S16LE:
2241 CHECK_AND_RETURN_RET_LOG(bufferSize > 1, false, "buffer size is too small");
2242 i16Data = *(reinterpret_cast<const int16_t*>(buffer));
2243 isInvalid = i16Data == 0;
2244 break;
2245 default:
2246 break;
2247 }
2248 return isInvalid;
2249 }
2250
WriteMuteDataSysEvent(uint8_t * buffer,size_t bufferSize,int32_t index)2251 void AudioEndpointInner::WriteMuteDataSysEvent(uint8_t *buffer, size_t bufferSize, int32_t index)
2252 {
2253 CHECK_AND_RETURN_LOG(static_cast<size_t>(index + 1) <= processList_.size(), "invalid index");
2254 auto tempProcess = processList_[index];
2255 CHECK_AND_RETURN_LOG(tempProcess, "tempProcess is nullptr");
2256 if (IsInvalidBuffer(buffer, bufferSize, processList_[index]->GetStreamInfo().format)) {
2257 if (tempProcess->GetStartMuteTime() == 0) {
2258 tempProcess->SetStartMuteTime(std::chrono::system_clock::to_time_t(std::chrono::system_clock::now()));
2259 }
2260 std::time_t currentTime = std::chrono::system_clock::to_time_t(std::chrono::system_clock::now());
2261 if ((currentTime - tempProcess->GetStartMuteTime() >= ONE_MINUTE) && !tempProcess->GetSilentState()) {
2262 tempProcess->SetSilentState(true);
2263 AUDIO_WARNING_LOG("write invalid data for some time in server");
2264
2265 std::unordered_map<std::string, std::string> payload;
2266 payload["uid"] = std::to_string(tempProcess->GetAppInfo().appUid);
2267 payload["sessionId"] = std::to_string(tempProcess->GetAudioSessionId());
2268 payload["isSilent"] = std::to_string(true);
2269 #ifdef RESSCHE_ENABLE
2270 ReportDataToResSched(payload, ResourceSchedule::ResType::RES_TYPE_AUDIO_RENDERER_SILENT_PLAYBACK);
2271 #endif
2272 }
2273 } else {
2274 if (tempProcess->GetStartMuteTime() != 0) {
2275 tempProcess->SetStartMuteTime(0);
2276 }
2277 if (tempProcess->GetSilentState()) {
2278 AUDIO_WARNING_LOG("begin write valid data in server");
2279 tempProcess->SetSilentState(false);
2280
2281 std::unordered_map<std::string, std::string> payload;
2282 payload["uid"] = std::to_string(tempProcess->GetAppInfo().appUid);
2283 payload["sessionId"] = std::to_string(tempProcess->GetAudioSessionId());
2284 payload["isSilent"] = std::to_string(false);
2285 #ifdef RESSCHE_ENABLE
2286 ReportDataToResSched(payload, ResourceSchedule::ResType::RES_TYPE_AUDIO_RENDERER_SILENT_PLAYBACK);
2287 #endif
2288 }
2289 }
2290 }
2291
ReportDataToResSched(std::unordered_map<std::string,std::string> payload,uint32_t type)2292 void AudioEndpointInner::ReportDataToResSched(std::unordered_map<std::string, std::string> payload, uint32_t type)
2293 {
2294 #ifdef RESSCHE_ENABLE
2295 AUDIO_INFO_LOG("report event to ResSched ,event type : %{public}d", type);
2296 ResourceSchedule::ResSchedClient::GetInstance().ReportData(type, 0, payload);
2297 #endif
2298 }
2299
CreateDupBufferInner(int32_t innerCapId)2300 int32_t AudioEndpointInner::CreateDupBufferInner(int32_t innerCapId)
2301 {
2302 // todo dynamic
2303 if (innerCapIdToDupStreamCallbackMap_[innerCapId] == nullptr ||
2304 innerCapIdToDupStreamCallbackMap_[innerCapId]->GetDupRingBuffer() != nullptr) {
2305 AUDIO_INFO_LOG("dup buffer already configed!");
2306 return SUCCESS;
2307 }
2308
2309 auto &capInfo = fastCaptureInfos_[innerCapId];
2310
2311 capInfo.dupStream->GetSpanSizePerFrame(dupSpanSizeInFrame_);
2312 dupTotalSizeInFrame_ = dupSpanSizeInFrame_ * (DUP_COMMON_LEN/DUP_DEFAULT_LEN);
2313 capInfo.dupStream->GetByteSizePerFrame(dupByteSizePerFrame_);
2314 if (dupSpanSizeInFrame_ == 0 || dupByteSizePerFrame_ == 0) {
2315 AUDIO_ERR_LOG("ERR_INVALID_PARAM");
2316 return ERR_INVALID_PARAM;
2317 }
2318 dupSpanSizeInByte_ = dupSpanSizeInFrame_ * dupByteSizePerFrame_;
2319 CHECK_AND_RETURN_RET_LOG(dupSpanSizeInByte_ != 0, ERR_OPERATION_FAILED, "Config dup buffer failed");
2320 AUDIO_INFO_LOG("dupTotalSizeInFrame_: %{public}zu, dupSpanSizeInFrame_: %{public}zu,"
2321 "dupByteSizePerFrame_:%{public}zu dupSpanSizeInByte_: %{public}zu,",
2322 dupTotalSizeInFrame_, dupSpanSizeInFrame_, dupByteSizePerFrame_, dupSpanSizeInByte_);
2323
2324 // create dupBuffer in server
2325 innerCapIdToDupStreamCallbackMap_[innerCapId]->GetDupRingBuffer() =
2326 AudioRingCache::Create(dupTotalSizeInFrame_ * dupByteSizePerFrame_);
2327 CHECK_AND_RETURN_RET_LOG(innerCapIdToDupStreamCallbackMap_[innerCapId]->GetDupRingBuffer() != nullptr,
2328 ERR_OPERATION_FAILED, "Create dup buffer failed");
2329 size_t emptyBufferSize = static_cast<size_t>(dupSpanSizeInFrame_) * dupByteSizePerFrame_;
2330 auto buffer = std::make_unique<uint8_t []>(emptyBufferSize);
2331 BufferDesc emptyBufferDesc = {buffer.get(), emptyBufferSize, emptyBufferSize};
2332 memset_s(emptyBufferDesc.buffer, emptyBufferDesc.bufLength, 0, emptyBufferDesc.bufLength);
2333 WriteDupBufferInner(emptyBufferDesc, innerCapId);
2334 return SUCCESS;
2335 }
2336
WriteDupBufferInner(const BufferDesc & bufferDesc,int32_t innerCapId)2337 int32_t AudioEndpointInner::WriteDupBufferInner(const BufferDesc &bufferDesc, int32_t innerCapId)
2338 {
2339 size_t targetSize = bufferDesc.bufLength;
2340
2341 if (innerCapIdToDupStreamCallbackMap_[innerCapId]->GetDupRingBuffer() == nullptr) {
2342 AUDIO_INFO_LOG("dup buffer is nnullptr, failed WriteDupBuffer!");
2343 return ERROR;
2344 }
2345 OptResult result = innerCapIdToDupStreamCallbackMap_[innerCapId]->GetDupRingBuffer()->GetWritableSize();
2346 // todo get writeable size failed
2347 CHECK_AND_RETURN_RET_LOG(result.ret == OPERATION_SUCCESS, ERROR,
2348 "DupRingBuffer write invalid size is:%{public}zu", result.size);
2349 size_t writableSize = result.size;
2350 AUDIO_DEBUG_LOG("targetSize: %{public}zu, writableSize: %{public}zu", targetSize, writableSize);
2351 size_t writeSize = std::min(writableSize, targetSize);
2352 BufferWrap bufferWrap = {bufferDesc.buffer, writeSize};
2353
2354 if (writeSize > 0) {
2355 result = innerCapIdToDupStreamCallbackMap_[innerCapId]->GetDupRingBuffer()->Enqueue(bufferWrap);
2356 if (result.ret != OPERATION_SUCCESS) {
2357 AUDIO_ERR_LOG("RingCache Enqueue failed ret:%{public}d size:%{public}zu", result.ret, result.size);
2358 }
2359 DumpFileUtil::WriteDumpFile(dumpDupIn_, static_cast<void *>(bufferDesc.buffer), writeSize);
2360 }
2361 return SUCCESS;
2362 }
2363
CheckAudioHapticsSync(uint64_t curWritePos)2364 void AudioEndpointInner::CheckAudioHapticsSync(uint64_t curWritePos)
2365 {
2366 if (audioHapticsSyncId_ > 0) {
2367 std::shared_ptr<IAudioRenderSink> sink = HdiAdapterManager::GetInstance().GetRenderSink(fastRenderId_);
2368 if (sink != nullptr) {
2369 uint64_t offset = dstSpanSizeInframe_ * curWritePos;
2370 std::string condition = "AudioHapticsSync";
2371 std::string value = "haptic_sessionid=" + std::to_string(audioHapticsSyncId_) +
2372 ";haptic_offset=" + std::to_string(offset);
2373 sink->SetAudioParameter(AudioParamKey::NONE, condition, value);
2374 }
2375 audioHapticsSyncId_ = 0;
2376 }
2377 }
2378 } // namespace AudioStandard
2379 } // namespace OHOS
2380