1 /*
2 * Copyright (c) 2023-2025 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15 #ifndef LOG_TAG
16 #define LOG_TAG "AudioEndpointInner"
17 #endif
18
19 #include "audio_endpoint.h"
20 #include "audio_endpoint_private.h"
21
22 #include <atomic>
23 #include <cinttypes>
24 #include <condition_variable>
25 #include <thread>
26 #include <vector>
27 #include <mutex>
28 #include <numeric>
29
30 #include "securec.h"
31 #include "xcollie/watchdog.h"
32
33 #include "audio_errors.h"
34 #include "audio_service_log.h"
35 #include "audio_schedule.h"
36 #include "audio_qosmanager.h"
37 #include "manager/hdi_adapter_manager.h"
38 #include "sink/i_audio_render_sink.h"
39 #include "source/i_audio_capture_source.h"
40 #include "format_converter.h"
41 #include "linear_pos_time_model.h"
42 #include "policy_handler.h"
43 #include "media_monitor_manager.h"
44 #include "volume_tools.h"
45 #include "audio_dump_pcm.h"
46 #include "audio_performance_monitor.h"
47 #include "audio_service.h"
48 #ifdef RESSCHE_ENABLE
49 #include "res_type.h"
50 #include "res_sched_client.h"
51 #endif
52 #include "audio_volume.h"
53
54 namespace OHOS {
55 namespace AudioStandard {
56 namespace {
57 static constexpr int32_t VOLUME_SHIFT_NUMBER = 16; // 1 >> 16 = 65536, max volume
58 static constexpr int64_t RECORD_DELAY_TIME_NS = 4000000; // 4ms = 4 * 1000 * 1000ns
59 static constexpr int64_t RECORD_VOIP_DELAY_TIME_NS = 20000000; // 20ms = 20 * 1000 * 1000ns
60 static constexpr int64_t MAX_SPAN_DURATION_NS = 100000000; // 100ms = 100 * 1000 * 1000ns
61 static constexpr int64_t PLAYBACK_DELAY_STOP_HDI_TIME_NS = 3000000000; // 3s = 3 * 1000 * 1000 * 1000ns
62 static constexpr int64_t RECORDER_DELAY_STOP_HDI_TIME_NS = 200000000; // 200ms = 200 * 1000 * 1000ns
63 static constexpr int64_t LINK_RECORDER_DELAY_STOP_HDI_TIME_NS = 1000000000; // 1000ms = 1000 * 1000 * 1000ns
64 static constexpr int64_t WAIT_CLIENT_STANDBY_TIME_NS = 1000000000; // 1s = 1000 * 1000 * 1000ns
65 static constexpr int64_t DELAY_STOP_HDI_TIME_WHEN_NO_RUNNING_NS = 1000000000; // 1s
66 static constexpr int32_t SLEEP_TIME_IN_DEFAULT = 400; // 400ms
67 static constexpr int64_t DELTA_TO_REAL_READ_START_TIME = 0; // 0ms
68 const uint16_t GET_MAX_AMPLITUDE_FRAMES_THRESHOLD = 40;
69 constexpr int32_t WATCHDOG_INTERVAL_TIME_MS = 3000; // 3000ms
70 constexpr int32_t WATCHDOG_DELAY_TIME_MS = 10 * 1000; // 10000ms
71 static const int32_t ONE_MINUTE = 60;
72 static constexpr int64_t MAX_WAKEUP_TIME_NS = 2000000000; // 2s
73 static constexpr int64_t RELATIVE_SLEEP_TIME_NS = 5000000; // 5ms
74 }
75
ConvertToHdiAdapterFormat(AudioSampleFormat format)76 AudioSampleFormat ConvertToHdiAdapterFormat(AudioSampleFormat format)
77 {
78 AudioSampleFormat adapterFormat;
79 switch (format) {
80 case AudioSampleFormat::SAMPLE_U8:
81 adapterFormat = AudioSampleFormat::SAMPLE_U8;
82 break;
83 case AudioSampleFormat::SAMPLE_S16LE:
84 adapterFormat = AudioSampleFormat::SAMPLE_S16LE;
85 break;
86 case AudioSampleFormat::SAMPLE_S24LE:
87 adapterFormat = AudioSampleFormat::SAMPLE_S24LE;
88 break;
89 case AudioSampleFormat::SAMPLE_S32LE:
90 adapterFormat = AudioSampleFormat::SAMPLE_S32LE;
91 break;
92 case AudioSampleFormat::SAMPLE_F32LE:
93 adapterFormat = AudioSampleFormat::SAMPLE_F32LE;
94 break;
95 default:
96 adapterFormat = AudioSampleFormat::INVALID_WIDTH;
97 break;
98 }
99
100 return adapterFormat;
101 }
102
GenerateEndpointKey(AudioDeviceDescriptor & deviceInfo,int32_t endpointFlag)103 std::string AudioEndpoint::GenerateEndpointKey(AudioDeviceDescriptor &deviceInfo, int32_t endpointFlag)
104 {
105 // All primary sinks share one endpoint
106 int32_t endpointId = 0;
107 if (deviceInfo.deviceType_ == DEVICE_TYPE_BLUETOOTH_A2DP) {
108 endpointId = deviceInfo.deviceId_;
109 }
110 return deviceInfo.networkId_ + "_" + std::to_string(endpointId) + "_" +
111 std::to_string(deviceInfo.deviceRole_) + "_" + std::to_string(endpointFlag);
112 }
113
CreateEndpoint(EndpointType type,uint64_t id,const AudioProcessConfig & clientConfig,const AudioDeviceDescriptor & deviceInfo)114 std::shared_ptr<AudioEndpoint> AudioEndpoint::CreateEndpoint(EndpointType type, uint64_t id,
115 const AudioProcessConfig &clientConfig, const AudioDeviceDescriptor &deviceInfo)
116 {
117 std::shared_ptr<AudioEndpoint> audioEndpoint = nullptr;
118 if (type == EndpointType::TYPE_INDEPENDENT && deviceInfo.deviceRole_ != INPUT_DEVICE &&
119 deviceInfo.networkId_ == LOCAL_NETWORK_ID) {
120 audioEndpoint = std::make_shared<AudioEndpointSeparate>(type, id, clientConfig.streamType);
121 } else {
122 audioEndpoint = std::make_shared<AudioEndpointInner>(type, id, clientConfig);
123 }
124 CHECK_AND_RETURN_RET_LOG(audioEndpoint != nullptr, nullptr, "Create AudioEndpoint failed.");
125
126 if (!audioEndpoint->Config(deviceInfo)) {
127 AUDIO_ERR_LOG("Config AudioEndpoint failed.");
128 audioEndpoint = nullptr;
129 }
130 return audioEndpoint;
131 }
132
AudioEndpointInner(EndpointType type,uint64_t id,const AudioProcessConfig & clientConfig)133 AudioEndpointInner::AudioEndpointInner(EndpointType type, uint64_t id,
134 const AudioProcessConfig &clientConfig) : endpointType_(type), id_(id), clientConfig_(clientConfig)
135 {
136 AUDIO_INFO_LOG("AudioEndpoint type:%{public}d", endpointType_);
137 if (clientConfig_.audioMode == AUDIO_MODE_PLAYBACK) {
138 logUtilsTag_ = "AudioEndpoint::Play";
139 } else {
140 logUtilsTag_ = "AudioEndpoint::Rec";
141 }
142 }
143
GetEndpointName()144 std::string AudioEndpointInner::GetEndpointName()
145 {
146 return GenerateEndpointKey(deviceInfo_, id_);
147 }
148
SetVolume(AudioStreamType streamType,float volume)149 int32_t AudioEndpointInner::SetVolume(AudioStreamType streamType, float volume)
150 {
151 if (streamType == AudioStreamType::STREAM_VOICE_CALL && endpointType_ == TYPE_VOIP_MMAP) {
152 std::shared_ptr<IAudioRenderSink> sink = HdiAdapterManager::GetInstance().GetRenderSink(fastRenderId_);
153 if (sink != nullptr) {
154 AUDIO_INFO_LOG("SetVolume:%{public}f, streamType:%{public}d", volume, streamType);
155 sink->SetVolume(volume, volume);
156 }
157 }
158 return SUCCESS;
159 }
160
ResolveBuffer(std::shared_ptr<OHAudioBuffer> & buffer)161 int32_t AudioEndpointInner::ResolveBuffer(std::shared_ptr<OHAudioBuffer> &buffer)
162 {
163 return SUCCESS;
164 }
165
MockCallbacks(uint32_t streamIndex)166 MockCallbacks::MockCallbacks(uint32_t streamIndex) : streamIndex_(streamIndex)
167 {
168 AUDIO_INFO_LOG("DupStream %{public}u create MockCallbacks", streamIndex_);
169 }
170
OnStatusUpdate(IOperation operation)171 void MockCallbacks::OnStatusUpdate(IOperation operation)
172 {
173 AUDIO_INFO_LOG("DupStream %{public}u recv operation: %{public}d", streamIndex_, operation);
174 }
175
OnWriteData(size_t length)176 int32_t MockCallbacks::OnWriteData(size_t length)
177 {
178 Trace trace("DupStream::OnWriteData length " + std::to_string(length));
179 return SUCCESS;
180 }
181
ShouldInnerCap(int32_t innerCapId)182 bool AudioEndpointInner::ShouldInnerCap(int32_t innerCapId)
183 {
184 bool shouldBecapped = false;
185 std::lock_guard<std::mutex> lock(listLock_);
186 for (uint32_t i = 0; i < processList_.size(); i++) {
187 if (processList_[i]->GetInnerCapState(innerCapId)) {
188 shouldBecapped = true;
189 break;
190 }
191 }
192 AUDIO_INFO_LOG("find endpoint inner-cap state: %{public}s", shouldBecapped ? "true" : "false");
193 return shouldBecapped;
194 }
195
GetInnerCapConfig()196 AudioProcessConfig AudioEndpointInner::GetInnerCapConfig()
197 {
198 AudioProcessConfig processConfig;
199
200 processConfig.appInfo.appPid = static_cast<int32_t>(getpid());
201 processConfig.appInfo.appUid = static_cast<int32_t>(getuid());
202
203 processConfig.streamInfo = dstStreamInfo_;
204
205 processConfig.audioMode = AUDIO_MODE_PLAYBACK;
206
207 // processConfig.rendererInfo ?
208
209 processConfig.streamType = STREAM_MUSIC;
210
211 return processConfig;
212 }
213
InitDupStream(int32_t innerCapId)214 int32_t AudioEndpointInner::InitDupStream(int32_t innerCapId)
215 {
216 std::lock_guard<std::mutex> lock(dupMutex_);
217 bool hasEnabled = (fastCaptureInfos_.count(innerCapId) && fastCaptureInfos_[innerCapId].isInnerCapEnabled);
218 CHECK_AND_RETURN_RET_LOG((hasEnabled == false), SUCCESS, "already enabled");
219
220 AudioProcessConfig processConfig = GetInnerCapConfig();
221 processConfig.innerCapId = innerCapId;
222 auto &captureInfo = fastCaptureInfos_[innerCapId];
223 int32_t ret = IStreamManager::GetDupPlaybackManager().CreateRender(processConfig, captureInfo.dupStream);
224 CHECK_AND_RETURN_RET_LOG(ret == SUCCESS && captureInfo.dupStream != nullptr,
225 ERR_OPERATION_FAILED, "Failed: %{public}d", ret);
226 uint32_t dupStreamIndex = captureInfo.dupStream->GetStreamIndex();
227
228 dupStreamCallback_ = std::make_shared<MockCallbacks>(dupStreamIndex);
229 captureInfo.dupStream->RegisterStatusCallback(dupStreamCallback_);
230 captureInfo.dupStream->RegisterWriteCallback(dupStreamCallback_);
231
232 // eg: /data/local/tmp/LocalDevice6_0_c2s_dup_48000_2_1.pcm
233 AudioStreamInfo tempInfo = processConfig.streamInfo;
234 dupDumpName_ = GetEndpointName() + "_c2s_dup_" + std::to_string(tempInfo.samplingRate) + "_" +
235 std::to_string(tempInfo.channels) + "_" + std::to_string(tempInfo.format) + ".pcm";
236 DumpFileUtil::OpenDumpFile(DumpFileUtil::DUMP_SERVER_PARA, dupDumpName_, &dumpC2SDup_);
237
238 AUDIO_INFO_LOG("Dup Renderer %{public}d with Endpoint status: %{public}s", dupStreamIndex,
239 GetStatusStr(endpointStatus_).c_str());
240 CHECK_AND_RETURN_RET_LOG(endpointStatus_ != INVALID, ERR_ILLEGAL_STATE, "Endpoint is invalid");
241
242 // buffer init
243 dupBufferSize_ = dstSpanSizeInframe_ * dstByteSizePerFrame_; // each
244 CHECK_AND_RETURN_RET_LOG(dstAudioBuffer_ != nullptr, ERR_OPERATION_FAILED, "DstAudioBuffer is nullptr");
245 CHECK_AND_RETURN_RET_LOG(dupBufferSize_ < dstAudioBuffer_->GetDataSize(), ERR_OPERATION_FAILED, "Init buffer fail");
246 dupBuffer_ = std::make_unique<uint8_t []>(dupBufferSize_);
247 ret = memset_s(reinterpret_cast<void *>(dupBuffer_.get()), dupBufferSize_, 0, dupBufferSize_);
248 if (ret != EOK) {
249 AUDIO_WARNING_LOG("memset buffer fail, ret %{public}d", ret);
250 }
251
252 if (endpointStatus_ == RUNNING || (endpointStatus_ == IDEL && isDeviceRunningInIdel_)) {
253 int32_t audioId = deviceInfo_.deviceId_;
254 AUDIO_INFO_LOG("Endpoint %{public}d is already running, let's start the dup stream", audioId);
255 captureInfo.dupStream->Start();
256 }
257 captureInfo.isInnerCapEnabled = true;
258 return SUCCESS;
259 }
260
EnableFastInnerCap(int32_t innerCapId)261 int32_t AudioEndpointInner::EnableFastInnerCap(int32_t innerCapId)
262 {
263 if (fastCaptureInfos_.count(innerCapId) && fastCaptureInfos_[innerCapId].isInnerCapEnabled) {
264 AUDIO_INFO_LOG("InnerCap is already enabled");
265 return SUCCESS;
266 }
267
268 CHECK_AND_RETURN_RET_LOG(deviceInfo_.deviceRole_ == OUTPUT_DEVICE, ERR_INVALID_OPERATION, "Not output device!");
269 int32_t ret = InitDupStream(innerCapId);
270 CHECK_AND_RETURN_RET_LOG(ret == SUCCESS, ERR_OPERATION_FAILED, "Init dup stream failed");
271 return SUCCESS;
272 }
273
DisableFastInnerCap()274 int32_t AudioEndpointInner::DisableFastInnerCap()
275 {
276 if (deviceInfo_.deviceRole_ != OUTPUT_DEVICE) {
277 return SUCCESS;
278 }
279 std::lock_guard<std::mutex> lock(dupMutex_);
280 for (auto &capInfo : fastCaptureInfos_) {
281 HandleDisableFastCap(capInfo.second);
282 }
283 fastCaptureInfos_.clear();
284 return SUCCESS;
285 }
286
DisableFastInnerCap(int32_t innerCapId)287 int32_t AudioEndpointInner::DisableFastInnerCap(int32_t innerCapId)
288 {
289 if (deviceInfo_.deviceRole_ != OUTPUT_DEVICE) {
290 return SUCCESS;
291 }
292 std::lock_guard<std::mutex> lock(dupMutex_);
293 if (!fastCaptureInfos_.count(innerCapId)) {
294 AUDIO_INFO_LOG("InnerCap is already disabled.");
295 return SUCCESS;
296 }
297 HandleDisableFastCap(fastCaptureInfos_[innerCapId]);
298 fastCaptureInfos_.erase(innerCapId);
299 return SUCCESS;
300 }
301
HandleDisableFastCap(CaptureInfo & captureInfo)302 int32_t AudioEndpointInner::HandleDisableFastCap(CaptureInfo &captureInfo)
303 {
304 if (!captureInfo.isInnerCapEnabled) {
305 captureInfo.dupStream = nullptr;
306 AUDIO_INFO_LOG("InnerCap is already disabled.");
307 return SUCCESS;
308 }
309 if (captureInfo.dupStream == nullptr) {
310 captureInfo.isInnerCapEnabled = false;
311 AUDIO_INFO_LOG("dupStream is nullptr");
312 return SUCCESS;
313 }
314 captureInfo.isInnerCapEnabled = false;
315 AUDIO_INFO_LOG("Disable dup renderer %{public}d with Endpoint status: %{public}s",
316 captureInfo.dupStream->GetStreamIndex(), GetStatusStr(endpointStatus_).c_str());
317 IStreamManager::GetDupPlaybackManager().ReleaseRender(captureInfo.dupStream->GetStreamIndex());
318 captureInfo.dupStream = nullptr;
319 return SUCCESS;
320 }
321
GetStatus()322 AudioEndpoint::EndpointStatus AudioEndpointInner::GetStatus()
323 {
324 AUDIO_INFO_LOG("AudioEndpoint get status:%{public}s", GetStatusStr(endpointStatus_).c_str());
325 return endpointStatus_.load();
326 }
327
Release()328 void AudioEndpointInner::Release()
329 {
330 // Wait for thread end and then clear other data to avoid using any cleared data in thread.
331 AUDIO_INFO_LOG("Release enter.");
332 if (!isInited_.load()) {
333 AUDIO_WARNING_LOG("already released");
334 return;
335 }
336
337 isInited_.store(false);
338 workThreadCV_.notify_all();
339 if (endpointWorkThread_.joinable()) {
340 AUDIO_DEBUG_LOG("AudioEndpoint join work thread start");
341 endpointWorkThread_.join();
342 AUDIO_DEBUG_LOG("AudioEndpoint join work thread end");
343 }
344 AudioPerformanceMonitor::GetInstance().DeleteOvertimeMonitor(ADAPTER_TYPE_FAST);
345
346 stopUpdateThread_.store(true);
347 updateThreadCV_.notify_all();
348 if (updatePosTimeThread_.joinable()) {
349 AUDIO_DEBUG_LOG("AudioEndpoint join update thread start");
350 updatePosTimeThread_.join();
351 AUDIO_DEBUG_LOG("AudioEndpoint join update thread end");
352 }
353
354 std::shared_ptr<IAudioRenderSink> sink = HdiAdapterManager::GetInstance().GetRenderSink(fastRenderId_);
355 std::shared_ptr<IAudioCaptureSource> source = HdiAdapterManager::GetInstance().GetCaptureSource(fastCaptureId_);
356 if (sink != nullptr) {
357 sink->DeInit();
358 }
359 HdiAdapterManager::GetInstance().ReleaseId(fastRenderId_);
360
361 if (source != nullptr) {
362 source->DeInit();
363 }
364 HdiAdapterManager::GetInstance().ReleaseId(fastCaptureId_);
365
366 endpointStatus_.store(INVALID);
367
368 if (dstAudioBuffer_ != nullptr) {
369 AUDIO_INFO_LOG("Set device buffer null");
370 dstAudioBuffer_ = nullptr;
371 }
372
373 if (deviceInfo_.deviceRole_ == OUTPUT_DEVICE) {
374 DisableFastInnerCap();
375 }
376
377 DumpFileUtil::CloseDumpFile(&dumpHdi_);
378 }
379
~AudioEndpointInner()380 AudioEndpointInner::~AudioEndpointInner()
381 {
382 if (isInited_.load()) {
383 AudioEndpointInner::Release();
384 }
385 AUDIO_INFO_LOG("~AudioEndpoint()");
386 }
387
ConfigInputPoint(const AudioDeviceDescriptor & deviceInfo)388 bool AudioEndpointInner::ConfigInputPoint(const AudioDeviceDescriptor &deviceInfo)
389 {
390 AUDIO_INFO_LOG("ConfigInputPoint enter.");
391 IAudioSourceAttr attr = {};
392 attr.sampleRate = dstStreamInfo_.samplingRate;
393 attr.channel = dstStreamInfo_.channels;
394 attr.format = ConvertToHdiAdapterFormat(dstStreamInfo_.format);
395 attr.deviceNetworkId = deviceInfo.networkId_.c_str();
396 attr.deviceType = deviceInfo.deviceType_;
397 attr.audioStreamFlag = endpointType_ == TYPE_VOIP_MMAP ? AUDIO_FLAG_VOIP_FAST : AUDIO_FLAG_MMAP;
398
399 std::shared_ptr<IAudioCaptureSource> source = GetFastSource(deviceInfo.networkId_, endpointType_, attr);
400
401 if (deviceInfo.networkId_ == LOCAL_NETWORK_ID) {
402 attr.adapterName = "primary";
403 } else {
404 #ifdef DAUDIO_ENABLE
405 attr.adapterName = "remote";
406 #endif
407 }
408 if (source == nullptr) {
409 AUDIO_ERR_LOG("ConfigInputPoint GetInstance failed.");
410 HdiAdapterManager::GetInstance().ReleaseId(fastCaptureId_);
411 return false;
412 }
413
414 int32_t err = source->Init(attr);
415 if (err != SUCCESS || !source->IsInited()) {
416 AUDIO_ERR_LOG("init remote fast fail, err %{public}d.", err);
417 HdiAdapterManager::GetInstance().ReleaseId(fastCaptureId_);
418 return false;
419 }
420 if (PrepareDeviceBuffer(deviceInfo) != SUCCESS) {
421 source->DeInit();
422 HdiAdapterManager::GetInstance().ReleaseId(fastCaptureId_);
423 return false;
424 }
425
426 bool ret = writeTimeModel_.ConfigSampleRate(dstStreamInfo_.samplingRate);
427 CHECK_AND_RETURN_RET_LOG(ret != false, false, "Config LinearPosTimeModel failed.");
428
429 endpointStatus_ = UNLINKED;
430 isInited_.store(true);
431 endpointWorkThread_ = std::thread([this] { this->RecordEndpointWorkLoopFuc(); });
432 pthread_setname_np(endpointWorkThread_.native_handle(), "OS_AudioEpLoop");
433
434 updatePosTimeThread_ = std::thread([this] { this->AsyncGetPosTime(); });
435 pthread_setname_np(updatePosTimeThread_.native_handle(), "OS_AudioEpUpdate");
436
437 // eg: input_endpoint_hdi_audio_8_0_20240527202236189_48000_2_1.pcm
438 dumpHdiName_ = "input_endpoint_hdi_audio_" + std::to_string(attr.deviceType) + '_' +
439 std::to_string(endpointType_) + '_' + GetTime() + '_' + std::to_string(attr.sampleRate) + "_" +
440 std::to_string(attr.channel) + "_" + std::to_string(attr.format) + ".pcm";
441 DumpFileUtil::OpenDumpFile(DumpFileUtil::DUMP_SERVER_PARA, dumpHdiName_, &dumpHdi_);
442 return true;
443 }
444
SwitchSource(uint32_t & id,HdiIdType type,const std::string & info)445 static std::shared_ptr<IAudioCaptureSource> SwitchSource(uint32_t &id, HdiIdType type, const std::string &info)
446 {
447 if (id != HDI_INVALID_ID) {
448 HdiAdapterManager::GetInstance().ReleaseId(id);
449 }
450 id = HdiAdapterManager::GetInstance().GetId(HDI_ID_BASE_CAPTURE, type, info, true);
451 return HdiAdapterManager::GetInstance().GetCaptureSource(id, true);
452 }
453
GetFastSource(const std::string & networkId,EndpointType type,IAudioSourceAttr & attr)454 std::shared_ptr<IAudioCaptureSource> AudioEndpointInner::GetFastSource(const std::string &networkId, EndpointType type,
455 IAudioSourceAttr &attr)
456 {
457 AUDIO_INFO_LOG("Network id %{public}s, endpoint type %{public}d", networkId.c_str(), type);
458 if (networkId != LOCAL_NETWORK_ID) {
459 attr.adapterName = "remote";
460 #ifdef DAUDIO_ENABLE
461 fastSourceType_ = type == AudioEndpoint::TYPE_MMAP ? FAST_SOURCE_TYPE_REMOTE : FAST_SOURCE_TYPE_VOIP;
462 // Distributed only requires a singleton because there won't be both voip and regular fast simultaneously
463 return SwitchSource(fastCaptureId_, HDI_ID_TYPE_REMOTE_FAST, networkId);
464 #endif
465 }
466
467 attr.adapterName = "primary";
468 if (type == AudioEndpoint::TYPE_MMAP) {
469 fastSourceType_ = FAST_SOURCE_TYPE_NORMAL;
470 } else if (type == AudioEndpoint::TYPE_VOIP_MMAP) {
471 fastSourceType_ = FAST_SOURCE_TYPE_VOIP;
472 }
473 // voip delete, maybe need fix
474 return SwitchSource(fastCaptureId_, HDI_ID_TYPE_FAST, HDI_ID_INFO_DEFAULT);
475 }
476
StartThread(const IAudioSinkAttr & attr)477 void AudioEndpointInner::StartThread(const IAudioSinkAttr &attr)
478 {
479 endpointStatus_ = UNLINKED;
480 isInited_.store(true);
481 endpointWorkThread_ = std::thread([this] { this->EndpointWorkLoopFuc(); });
482 pthread_setname_np(endpointWorkThread_.native_handle(), "OS_AudioEpLoop");
483
484 updatePosTimeThread_ = std::thread([this] { this->AsyncGetPosTime(); });
485 pthread_setname_np(updatePosTimeThread_.native_handle(), "OS_AudioEpUpdate");
486
487 // eg: endpoint_hdi_audio_8_0_20240527202236189_48000_2_1.pcm
488 dumpHdiName_ = "endpoint_hdi_audio_" + std::to_string(attr.deviceType) + '_' + std::to_string(endpointType_) +
489 '_' + GetTime() + '_' + std::to_string(attr.sampleRate) + "_" +
490 std::to_string(attr.channel) + "_" + std::to_string(attr.format) + ".pcm";
491 DumpFileUtil::OpenDumpFile(DumpFileUtil::DUMP_SERVER_PARA, dumpHdiName_, &dumpHdi_);
492 }
493
Config(const AudioDeviceDescriptor & deviceInfo)494 bool AudioEndpointInner::Config(const AudioDeviceDescriptor &deviceInfo)
495 {
496 AUDIO_INFO_LOG("Config enter, deviceRole %{public}d.", deviceInfo.deviceRole_);
497 deviceInfo_ = deviceInfo;
498 bool res = deviceInfo_.audioStreamInfo_.CheckParams();
499 CHECK_AND_RETURN_RET_LOG(res, false, "samplingRate or channels size is 0");
500
501 dstStreamInfo_ = {
502 *deviceInfo.audioStreamInfo_.samplingRate.rbegin(),
503 deviceInfo.audioStreamInfo_.encoding,
504 deviceInfo.audioStreamInfo_.format,
505 *deviceInfo.audioStreamInfo_.channels.rbegin()
506 };
507 dstStreamInfo_.channelLayout = deviceInfo.audioStreamInfo_.channelLayout;
508
509 if (deviceInfo.deviceRole_ == INPUT_DEVICE) {
510 return ConfigInputPoint(deviceInfo);
511 }
512
513 std::shared_ptr<IAudioRenderSink> sink = GetFastSink(deviceInfo, endpointType_);
514 if (sink == nullptr) {
515 AUDIO_ERR_LOG("Get fastSink instance failed");
516 HdiAdapterManager::GetInstance().ReleaseId(fastRenderId_);
517 return false;
518 }
519
520 IAudioSinkAttr attr = {};
521 InitSinkAttr(attr, deviceInfo);
522
523 sink->Init(attr);
524 if (!sink->IsInited()) {
525 HdiAdapterManager::GetInstance().ReleaseId(fastRenderId_);
526 return false;
527 }
528 if (PrepareDeviceBuffer(deviceInfo) != SUCCESS) {
529 sink->DeInit();
530 HdiAdapterManager::GetInstance().ReleaseId(fastRenderId_);
531 return false;
532 }
533
534 float initVolume = 1.0; // init volume to 1.0
535 sink->SetVolume(initVolume, initVolume);
536
537 bool ret = readTimeModel_.ConfigSampleRate(dstStreamInfo_.samplingRate);
538 CHECK_AND_RETURN_RET_LOG(ret != false, false, "Config LinearPosTimeModel failed.");
539 StartThread(attr);
540 return true;
541 }
542
SwitchSink(uint32_t & id,HdiIdType type,const std::string & info)543 static std::shared_ptr<IAudioRenderSink> SwitchSink(uint32_t &id, HdiIdType type, const std::string &info)
544 {
545 if (id != HDI_INVALID_ID) {
546 HdiAdapterManager::GetInstance().ReleaseId(id);
547 }
548 id = HdiAdapterManager::GetInstance().GetId(HDI_ID_BASE_RENDER, type, info, true);
549 return HdiAdapterManager::GetInstance().GetRenderSink(id, true);
550 }
551
GetFastSink(const AudioDeviceDescriptor & deviceInfo,EndpointType type)552 std::shared_ptr<IAudioRenderSink> AudioEndpointInner::GetFastSink(const AudioDeviceDescriptor &deviceInfo,
553 EndpointType type)
554 {
555 AUDIO_INFO_LOG("Network id %{public}s, endpoint type %{public}d", deviceInfo.networkId_.c_str(), type);
556 if (deviceInfo.networkId_ != LOCAL_NETWORK_ID) {
557 #ifdef DAUDIO_ENABLE
558 fastSinkType_ = type == AudioEndpoint::TYPE_MMAP ? FAST_SINK_TYPE_REMOTE : FAST_SINK_TYPE_VOIP;
559 // Distributed only requires a singleton because there won't be both voip and regular fast simultaneously
560 return SwitchSink(fastRenderId_, HDI_ID_TYPE_REMOTE_FAST, deviceInfo.networkId_);
561 #endif
562 }
563
564 if (deviceInfo.deviceType_ == DEVICE_TYPE_BLUETOOTH_A2DP && deviceInfo.a2dpOffloadFlag_ != A2DP_OFFLOAD) {
565 fastSinkType_ = FAST_SINK_TYPE_BLUETOOTH;
566 return SwitchSink(fastRenderId_, HDI_ID_TYPE_BLUETOOTH, HDI_ID_INFO_MMAP);
567 }
568
569 if (type == AudioEndpoint::TYPE_MMAP) {
570 fastSinkType_ = FAST_SINK_TYPE_NORMAL;
571 return SwitchSink(fastRenderId_, HDI_ID_TYPE_FAST, HDI_ID_INFO_DEFAULT);
572 } else if (type == AudioEndpoint::TYPE_VOIP_MMAP) {
573 fastSinkType_ = FAST_SINK_TYPE_VOIP;
574 return SwitchSink(fastRenderId_, HDI_ID_TYPE_FAST, HDI_ID_INFO_VOIP);
575 }
576 return nullptr;
577 }
578
InitSinkAttr(IAudioSinkAttr & attr,const AudioDeviceDescriptor & deviceInfo)579 void AudioEndpointInner::InitSinkAttr(IAudioSinkAttr &attr, const AudioDeviceDescriptor &deviceInfo)
580 {
581 bool isDefaultAdapterEnable = AudioService::GetInstance()->GetDefaultAdapterEnable();
582 if (isDefaultAdapterEnable) {
583 attr.adapterName = "dp";
584 } else {
585 attr.adapterName = deviceInfo.networkId_ == LOCAL_NETWORK_ID ? "primary" : "remote";
586 }
587 attr.sampleRate = dstStreamInfo_.samplingRate; // 48000hz
588 attr.channel = dstStreamInfo_.channels; // STEREO = 2
589 attr.format = ConvertToHdiAdapterFormat(dstStreamInfo_.format); // SAMPLE_S16LE = 1
590 attr.deviceNetworkId = deviceInfo.networkId_.c_str();
591 attr.deviceType = static_cast<int32_t>(deviceInfo.deviceType_);
592 attr.audioStreamFlag = endpointType_ == TYPE_VOIP_MMAP ? AUDIO_FLAG_VOIP_FAST : AUDIO_FLAG_MMAP;
593 }
594
GetAdapterBufferInfo(const AudioDeviceDescriptor & deviceInfo)595 int32_t AudioEndpointInner::GetAdapterBufferInfo(const AudioDeviceDescriptor &deviceInfo)
596 {
597 int32_t ret = 0;
598 AUDIO_INFO_LOG("GetAdapterBufferInfo enter, deviceRole %{public}d.", deviceInfo.deviceRole_);
599 if (deviceInfo.deviceRole_ == INPUT_DEVICE) {
600 std::shared_ptr<IAudioCaptureSource> source = HdiAdapterManager::GetInstance().GetCaptureSource(fastCaptureId_);
601 CHECK_AND_RETURN_RET_LOG(source != nullptr, ERR_INVALID_HANDLE, "fast source is null.");
602 ret = source->GetMmapBufferInfo(dstBufferFd_, dstTotalSizeInframe_, dstSpanSizeInframe_,
603 dstByteSizePerFrame_);
604 } else {
605 std::shared_ptr<IAudioRenderSink> sink = HdiAdapterManager::GetInstance().GetRenderSink(fastRenderId_);
606 CHECK_AND_RETURN_RET_LOG(sink != nullptr, ERR_INVALID_HANDLE, "fast sink is null.");
607 ret = sink->GetMmapBufferInfo(dstBufferFd_, dstTotalSizeInframe_, dstSpanSizeInframe_,
608 dstByteSizePerFrame_);
609 }
610
611 if (ret != SUCCESS || dstBufferFd_ == -1 || dstTotalSizeInframe_ == 0 || dstSpanSizeInframe_ == 0 ||
612 dstByteSizePerFrame_ == 0) {
613 AUDIO_ERR_LOG("get mmap buffer info fail, ret %{public}d, dstBufferFd %{public}d, \
614 dstTotalSizeInframe %{public}d, dstSpanSizeInframe %{public}d, dstByteSizePerFrame %{public}d.",
615 ret, dstBufferFd_, dstTotalSizeInframe_, dstSpanSizeInframe_, dstByteSizePerFrame_);
616 return ERR_ILLEGAL_STATE;
617 }
618 AUDIO_DEBUG_LOG("end, fd %{public}d.", dstBufferFd_);
619 return SUCCESS;
620 }
621
PrepareDeviceBuffer(const AudioDeviceDescriptor & deviceInfo)622 int32_t AudioEndpointInner::PrepareDeviceBuffer(const AudioDeviceDescriptor &deviceInfo)
623 {
624 AUDIO_INFO_LOG("enter, deviceRole %{public}d.", deviceInfo.deviceRole_);
625 if (dstAudioBuffer_ != nullptr) {
626 AUDIO_INFO_LOG("endpoint buffer is preapred, fd:%{public}d", dstBufferFd_);
627 return SUCCESS;
628 }
629
630 int32_t ret = GetAdapterBufferInfo(deviceInfo);
631 CHECK_AND_RETURN_RET_LOG(ret == SUCCESS, ERR_OPERATION_FAILED,
632 "get adapter buffer Info fail, ret %{public}d.", ret);
633
634 // spanDuration_ may be less than the correct time of dstSpanSizeInframe_.
635 spanDuration_ = static_cast<int64_t>(dstSpanSizeInframe_) * AUDIO_NS_PER_SECOND /
636 static_cast<int64_t>(dstStreamInfo_.samplingRate);
637 int64_t temp = spanDuration_ / 5 * 3; // 3/5 spanDuration
638 int64_t setTime = -1;
639 int64_t maxSetTime = (static_cast<int64_t>(dstTotalSizeInframe_ - dstSpanSizeInframe_)) *
640 AUDIO_NS_PER_SECOND / static_cast<int64_t>(dstStreamInfo_.samplingRate);
641 GetSysPara("persist.multimedia.serveraheadreadtime", setTime);
642 temp = setTime > 0 && setTime < maxSetTime ? setTime : temp;
643 serverAheadReadTime_ = temp < ONE_MILLISECOND_DURATION ? ONE_MILLISECOND_DURATION : temp; // at least 1ms ahead.
644 AUDIO_INFO_LOG("spanDuration %{public}" PRIu64" ns, serverAheadReadTime %{public}" PRIu64" ns.",
645 spanDuration_, serverAheadReadTime_);
646
647 CHECK_AND_RETURN_RET_LOG(spanDuration_ > 0 && spanDuration_ < MAX_SPAN_DURATION_NS,
648 ERR_INVALID_PARAM, "mmap span info error, spanDuration %{public}" PRIu64".", spanDuration_);
649 dstAudioBuffer_ = OHAudioBuffer::CreateFromRemote(dstTotalSizeInframe_, dstSpanSizeInframe_, dstByteSizePerFrame_,
650 AUDIO_SERVER_ONLY, dstBufferFd_, OHAudioBuffer::INVALID_BUFFER_FD);
651 CHECK_AND_RETURN_RET_LOG(dstAudioBuffer_ != nullptr && dstAudioBuffer_->GetBufferHolder() ==
652 AudioBufferHolder::AUDIO_SERVER_ONLY, ERR_ILLEGAL_STATE, "create buffer from remote fail.");
653
654 if (dstAudioBuffer_ == nullptr || dstAudioBuffer_->GetStreamStatus() == nullptr) {
655 AUDIO_ERR_LOG("The stream status is null!");
656 return ERR_INVALID_PARAM;
657 }
658
659 dstAudioBuffer_->GetStreamStatus()->store(StreamStatus::STREAM_IDEL);
660
661 // clear data buffer
662 ret = memset_s(dstAudioBuffer_->GetDataBase(), dstAudioBuffer_->GetDataSize(), 0, dstAudioBuffer_->GetDataSize());
663 if (ret != EOK) {
664 AUDIO_WARNING_LOG("memset buffer fail, ret %{public}d, fd %{public}d.", ret, dstBufferFd_);
665 }
666 InitAudiobuffer(true);
667
668 AUDIO_DEBUG_LOG("end, fd %{public}d.", dstBufferFd_);
669 return SUCCESS;
670 }
671
InitAudiobuffer(bool resetReadWritePos)672 void AudioEndpointInner::InitAudiobuffer(bool resetReadWritePos)
673 {
674 CHECK_AND_RETURN_LOG((dstAudioBuffer_ != nullptr), "dst audio buffer is null.");
675 if (resetReadWritePos) {
676 dstAudioBuffer_->ResetCurReadWritePos(0, 0);
677 }
678
679 uint32_t spanCount = dstAudioBuffer_->GetSpanCount();
680 for (uint32_t i = 0; i < spanCount; i++) {
681 SpanInfo *spanInfo = dstAudioBuffer_->GetSpanInfoByIndex(i);
682 CHECK_AND_RETURN_LOG(spanInfo != nullptr, "InitAudiobuffer failed.");
683 if (deviceInfo_.deviceRole_ == INPUT_DEVICE) {
684 spanInfo->spanStatus = SPAN_WRITE_DONE;
685 } else {
686 spanInfo->spanStatus = SPAN_READ_DONE;
687 }
688 spanInfo->offsetInFrame = 0;
689
690 spanInfo->readStartTime = 0;
691 spanInfo->readDoneTime = 0;
692
693 spanInfo->writeStartTime = 0;
694 spanInfo->writeDoneTime = 0;
695
696 spanInfo->volumeStart = 1 << VOLUME_SHIFT_NUMBER; // 65536 for initialize
697 spanInfo->volumeEnd = 1 << VOLUME_SHIFT_NUMBER; // 65536 for initialize
698 spanInfo->isMute = false;
699 }
700 return;
701 }
702
GetPreferBufferInfo(uint32_t & totalSizeInframe,uint32_t & spanSizeInframe)703 int32_t AudioEndpointInner::GetPreferBufferInfo(uint32_t &totalSizeInframe, uint32_t &spanSizeInframe)
704 {
705 totalSizeInframe = dstTotalSizeInframe_;
706 spanSizeInframe = dstSpanSizeInframe_;
707 return SUCCESS;
708 }
709
IsAnyProcessRunning()710 bool AudioEndpointInner::IsAnyProcessRunning()
711 {
712 std::lock_guard<std::mutex> lock(listLock_);
713 return IsAnyProcessRunningInner();
714 }
715
716 // Should be called with AudioEndpointInner::listLock_ locked
IsAnyProcessRunningInner()717 bool AudioEndpointInner::IsAnyProcessRunningInner()
718 {
719 bool isRunning = false;
720 for (size_t i = 0; i < processBufferList_.size(); i++) {
721 if (processBufferList_[i]->GetStreamStatus() &&
722 processBufferList_[i]->GetStreamStatus()->load() == STREAM_RUNNING) {
723 isRunning = true;
724 break;
725 }
726 }
727 return isRunning;
728 }
729
RecordReSyncPosition()730 void AudioEndpointInner::RecordReSyncPosition()
731 {
732 AUDIO_INFO_LOG("RecordReSyncPosition enter.");
733 uint64_t curHdiWritePos = 0;
734 int64_t writeTime = 0;
735 CHECK_AND_RETURN_LOG(GetDeviceHandleInfo(curHdiWritePos, writeTime), "get device handle info fail.");
736 AUDIO_DEBUG_LOG("get capturer info, curHdiWritePos %{public}" PRIu64", writeTime %{public}" PRId64".",
737 curHdiWritePos, writeTime);
738 int64_t temp = ClockTime::GetCurNano() - writeTime;
739 if (temp > spanDuration_) {
740 AUDIO_WARNING_LOG("GetDeviceHandleInfo cost long time %{public}" PRIu64".", temp);
741 }
742
743 writeTimeModel_.ResetFrameStamp(curHdiWritePos, writeTime);
744 uint64_t nextDstReadPos = curHdiWritePos;
745 uint64_t nextDstWritePos = curHdiWritePos;
746 InitAudiobuffer(false);
747 int32_t ret = dstAudioBuffer_->ResetCurReadWritePos(nextDstReadPos, nextDstWritePos);
748 CHECK_AND_RETURN_LOG(ret == SUCCESS, "ResetCurReadWritePos failed.");
749
750 SpanInfo *nextReadSapn = dstAudioBuffer_->GetSpanInfo(nextDstReadPos);
751 CHECK_AND_RETURN_LOG(nextReadSapn != nullptr, "GetSpanInfo failed.");
752 nextReadSapn->offsetInFrame = nextDstReadPos;
753 nextReadSapn->spanStatus = SpanStatus::SPAN_WRITE_DONE;
754 }
755
ReSyncPosition()756 void AudioEndpointInner::ReSyncPosition()
757 {
758 Trace loopTrace("AudioEndpoint::ReSyncPosition");
759 uint64_t curHdiReadPos = 0;
760 int64_t readTime = 0;
761 bool res = GetDeviceHandleInfo(curHdiReadPos, readTime);
762 CHECK_AND_RETURN_LOG(res, "ReSyncPosition call GetDeviceHandleInfo failed.");
763 int64_t curTime = ClockTime::GetCurNano();
764 int64_t temp = curTime - readTime;
765 if (temp > spanDuration_) {
766 AUDIO_ERR_LOG("GetDeviceHandleInfo may cost long time.");
767 }
768
769 readTimeModel_.ResetFrameStamp(curHdiReadPos, readTime);
770 uint64_t nextDstWritePos = curHdiReadPos + dstSpanSizeInframe_;
771 InitAudiobuffer(false);
772 int32_t ret = dstAudioBuffer_->ResetCurReadWritePos(nextDstWritePos, nextDstWritePos);
773 CHECK_AND_RETURN_LOG(ret == SUCCESS, "ResetCurReadWritePos failed.");
774
775 SpanInfo *nextWriteSapn = dstAudioBuffer_->GetSpanInfo(nextDstWritePos);
776 CHECK_AND_RETURN_LOG(nextWriteSapn != nullptr, "GetSpanInfo failed.");
777 nextWriteSapn->offsetInFrame = nextDstWritePos;
778 nextWriteSapn->spanStatus = SpanStatus::SPAN_READ_DONE;
779 return;
780 }
781
StartDevice(EndpointStatus preferredState)782 bool AudioEndpointInner::StartDevice(EndpointStatus preferredState)
783 {
784 AUDIO_INFO_LOG("StartDevice enter.");
785 // how to modify the status while unlinked and started?
786 CHECK_AND_RETURN_RET_LOG(endpointStatus_ == IDEL, false, "Endpoint status is %{public}s",
787 GetStatusStr(endpointStatus_).c_str());
788 endpointStatus_ = STARTING;
789 std::shared_ptr<IAudioRenderSink> sink = HdiAdapterManager::GetInstance().GetRenderSink(fastRenderId_);
790 std::shared_ptr<IAudioCaptureSource> source = HdiAdapterManager::GetInstance().GetCaptureSource(fastCaptureId_);
791 if ((deviceInfo_.deviceRole_ == INPUT_DEVICE && (source == nullptr || source->Start() != SUCCESS)) ||
792 (deviceInfo_.deviceRole_ == OUTPUT_DEVICE && (sink == nullptr || sink->Start() != SUCCESS))) {
793 HandleStartDeviceFailed();
794 return false;
795 }
796 isStarted_ = true;
797 ResetZeroVolumeState();
798
799 Trace trace("AudioEndpointInner::StartDupStream");
800 {
801 std::lock_guard<std::mutex> lock(dupMutex_);
802 for (auto &capture : fastCaptureInfos_) {
803 if (capture.second.isInnerCapEnabled && capture.second.dupStream != nullptr) {
804 capture.second.dupStream->Start();
805 }
806 }
807 }
808
809 std::unique_lock<std::mutex> lock(loopThreadLock_);
810 needReSyncPosition_ = true;
811 endpointStatus_ = IsAnyProcessRunning() ? RUNNING : IDEL;
812 if (preferredState != INVALID) {
813 AUDIO_INFO_LOG("Preferred state: %{public}d, current: %{public}d", preferredState, endpointStatus_.load());
814 endpointStatus_ = preferredState;
815 }
816 workThreadCV_.notify_all();
817 AUDIO_DEBUG_LOG("StartDevice out, status is %{public}s", GetStatusStr(endpointStatus_).c_str());
818 return true;
819 }
820
HandleStartDeviceFailed()821 void AudioEndpointInner::HandleStartDeviceFailed()
822 {
823 AUDIO_ERR_LOG("Start failed for %{public}d, endpoint type %{public}u, process list size: %{public}zu.",
824 deviceInfo_.deviceRole_, endpointType_, processList_.size());
825 std::lock_guard<std::mutex> lock(listLock_);
826 isStarted_ = false;
827 if (processList_.size() <= 1) { // The endpoint only has the current stream
828 endpointStatus_ = UNLINKED;
829 } else {
830 endpointStatus_ = IDEL;
831 }
832 workThreadCV_.notify_all();
833 }
834
835 // will not change state to stopped
DelayStopDevice()836 bool AudioEndpointInner::DelayStopDevice()
837 {
838 AUDIO_INFO_LOG("Status:%{public}s", GetStatusStr(endpointStatus_).c_str());
839
840 // Clear data buffer to avoid noise in some case.
841 if (dstAudioBuffer_ != nullptr) {
842 int32_t ret = memset_s(dstAudioBuffer_->GetDataBase(), dstAudioBuffer_->GetDataSize(), 0,
843 dstAudioBuffer_->GetDataSize());
844 if (ret != EOK) {
845 AUDIO_WARNING_LOG("reset buffer fail, ret %{public}d.", ret);
846 }
847 }
848
849 {
850 Trace trace("AudioEndpointInner::StopDupStreamInDelay");
851 std::lock_guard<std::mutex> lock(dupMutex_);
852 for (auto &capture : fastCaptureInfos_) {
853 if (capture.second.isInnerCapEnabled && capture.second.dupStream != nullptr) {
854 capture.second.dupStream->Stop();
855 }
856 }
857 }
858
859 if (deviceInfo_.deviceRole_ == INPUT_DEVICE) {
860 std::shared_ptr<IAudioCaptureSource> source = HdiAdapterManager::GetInstance().GetCaptureSource(fastCaptureId_);
861 CHECK_AND_RETURN_RET_LOG(source != nullptr && source->Stop() == SUCCESS,
862 false, "Source stop failed.");
863 } else {
864 std::shared_ptr<IAudioRenderSink> sink = HdiAdapterManager::GetInstance().GetRenderSink(fastRenderId_);
865 CHECK_AND_RETURN_RET_LOG(endpointStatus_ == IDEL && sink != nullptr && sink->Stop() == SUCCESS,
866 false, "Sink stop failed.");
867 }
868 isStarted_ = false;
869 return true;
870 }
871
StopDevice()872 bool AudioEndpointInner::StopDevice()
873 {
874 DeinitLatencyMeasurement();
875
876 AUDIO_INFO_LOG("StopDevice with status:%{public}s", GetStatusStr(endpointStatus_).c_str());
877 // todo
878 endpointStatus_ = STOPPING;
879 // Clear data buffer to avoid noise in some case.
880 if (dstAudioBuffer_ != nullptr) {
881 int32_t ret = memset_s(dstAudioBuffer_->GetDataBase(), dstAudioBuffer_->GetDataSize(), 0,
882 dstAudioBuffer_->GetDataSize());
883 AUDIO_INFO_LOG("StopDevice clear buffer ret:%{public}d", ret);
884 }
885
886 {
887 Trace trace("AudioEndpointInner::StopDupStream");
888 std::lock_guard<std::mutex> lock(dupMutex_);
889 for (auto &capture : fastCaptureInfos_) {
890 if (capture.second.isInnerCapEnabled && capture.second.dupStream != nullptr) {
891 capture.second.dupStream->Stop();
892 }
893 }
894 }
895
896 if (deviceInfo_.deviceRole_ == INPUT_DEVICE) {
897 std::shared_ptr<IAudioCaptureSource> source = HdiAdapterManager::GetInstance().GetCaptureSource(fastCaptureId_);
898 CHECK_AND_RETURN_RET_LOG(source != nullptr && source->Stop() == SUCCESS,
899 false, "Source stop failed.");
900 } else {
901 std::shared_ptr<IAudioRenderSink> sink = HdiAdapterManager::GetInstance().GetRenderSink(fastRenderId_);
902 CHECK_AND_RETURN_RET_LOG(sink != nullptr && sink->Stop() == SUCCESS, false, "Sink stop failed.");
903 }
904 endpointStatus_ = STOPPED;
905 isStarted_ = false;
906 return true;
907 }
908
OnStart(IAudioProcessStream * processStream)909 int32_t AudioEndpointInner::OnStart(IAudioProcessStream *processStream)
910 {
911 InitLatencyMeasurement();
912 AUDIO_PRERELEASE_LOGI("OnStart endpoint status:%{public}s", GetStatusStr(endpointStatus_).c_str());
913 if (endpointStatus_ == RUNNING) {
914 AUDIO_INFO_LOG("OnStart find endpoint already in RUNNING.");
915 return SUCCESS;
916 }
917 if (endpointStatus_ == IDEL) {
918 // call sink start
919 if (!isStarted_) {
920 CHECK_AND_RETURN_RET_LOG(StartDevice(RUNNING), ERR_OPERATION_FAILED, "StartDevice failed");
921 }
922 }
923
924 endpointStatus_ = RUNNING;
925 delayStopTime_ = INT64_MAX;
926 return SUCCESS;
927 }
928
OnPause(IAudioProcessStream * processStream)929 int32_t AudioEndpointInner::OnPause(IAudioProcessStream *processStream)
930 {
931 AUDIO_PRERELEASE_LOGI("OnPause endpoint status:%{public}s", GetStatusStr(endpointStatus_).c_str());
932 if (endpointStatus_ == RUNNING) {
933 endpointStatus_ = IsAnyProcessRunning() ? RUNNING : IDEL;
934 }
935 if (endpointStatus_ == IDEL) {
936 // delay call sink stop when no process running
937 AUDIO_PRERELEASE_LOGI("OnPause status is IDEL, need delay call stop");
938 delayStopTime_ = ClockTime::GetCurNano() + ((clientConfig_.audioMode == AUDIO_MODE_PLAYBACK)
939 ? PLAYBACK_DELAY_STOP_HDI_TIME_NS : RECORDER_DELAY_STOP_HDI_TIME_NS);
940 }
941 // todo
942 return SUCCESS;
943 }
944
GetProcLastWriteDoneInfo(const std::shared_ptr<OHAudioBuffer> processBuffer,uint64_t curWriteFrame,uint64_t & proHandleFrame,int64_t & proHandleTime)945 int32_t AudioEndpointInner::GetProcLastWriteDoneInfo(const std::shared_ptr<OHAudioBuffer> processBuffer,
946 uint64_t curWriteFrame, uint64_t &proHandleFrame, int64_t &proHandleTime)
947 {
948 CHECK_AND_RETURN_RET_LOG(processBuffer != nullptr, ERR_INVALID_HANDLE, "Process found but buffer is null");
949 uint64_t curReadFrame = processBuffer->GetCurReadFrame();
950 SpanInfo *curWriteSpan = processBuffer->GetSpanInfo(curWriteFrame);
951 CHECK_AND_RETURN_RET_LOG(curWriteSpan != nullptr, ERR_INVALID_HANDLE,
952 "curWriteSpan of curWriteFrame %{public}" PRIu64" is null", curWriteFrame);
953 if (curWriteSpan->spanStatus == SpanStatus::SPAN_WRITE_DONE || curWriteFrame < dstSpanSizeInframe_ ||
954 curWriteFrame < curReadFrame) {
955 proHandleFrame = curWriteFrame;
956 proHandleTime = curWriteSpan->writeDoneTime;
957 } else {
958 int32_t ret = GetProcLastWriteDoneInfo(processBuffer, curWriteFrame - dstSpanSizeInframe_,
959 proHandleFrame, proHandleTime);
960 CHECK_AND_RETURN_RET_LOG(ret == SUCCESS, ret,
961 "get process last write done info fail, ret %{public}d.", ret);
962 }
963
964 AUDIO_INFO_LOG("GetProcLastWriteDoneInfo end, curWriteFrame %{public}" PRIu64", proHandleFrame %{public}" PRIu64", "
965 "proHandleTime %{public}" PRId64".", curWriteFrame, proHandleFrame, proHandleTime);
966 return SUCCESS;
967 }
968
OnUpdateHandleInfo(IAudioProcessStream * processStream)969 int32_t AudioEndpointInner::OnUpdateHandleInfo(IAudioProcessStream *processStream)
970 {
971 Trace trace("AudioEndpoint::OnUpdateHandleInfo");
972 bool isFind = false;
973 std::lock_guard<std::mutex> lock(listLock_);
974 auto processItr = processList_.begin();
975 while (processItr != processList_.end()) {
976 if (*processItr != processStream) {
977 processItr++;
978 continue;
979 }
980 std::shared_ptr<OHAudioBuffer> processBuffer = (*processItr)->GetStreamBuffer();
981 CHECK_AND_RETURN_RET_LOG(processBuffer != nullptr, ERR_OPERATION_FAILED, "Process found but buffer is null");
982 uint64_t proHandleFrame = 0;
983 int64_t proHandleTime = 0;
984 if (deviceInfo_.deviceRole_ == INPUT_DEVICE) {
985 uint64_t curWriteFrame = processBuffer->GetCurWriteFrame();
986 int32_t ret = GetProcLastWriteDoneInfo(processBuffer, curWriteFrame, proHandleFrame, proHandleTime);
987 CHECK_AND_RETURN_RET_LOG(ret == SUCCESS, ret,
988 "get process last write done info fail, ret %{public}d.", ret);
989 processBuffer->SetHandleInfo(proHandleFrame, proHandleTime);
990 } else {
991 // For output device, handle info is updated in CheckAllBufferReady
992 processBuffer->GetHandleInfo(proHandleFrame, proHandleTime);
993 }
994
995 isFind = true;
996 break;
997 }
998 CHECK_AND_RETURN_RET_LOG(isFind, ERR_OPERATION_FAILED, "Can not find any process to UpdateHandleInfo");
999 return SUCCESS;
1000 }
1001
LinkProcessStream(IAudioProcessStream * processStream,bool startWhenLinking)1002 int32_t AudioEndpointInner::LinkProcessStream(IAudioProcessStream *processStream, bool startWhenLinking)
1003 {
1004 CHECK_AND_RETURN_RET_LOG(processStream != nullptr, ERR_INVALID_PARAM, "IAudioProcessStream is null");
1005 std::shared_ptr<OHAudioBuffer> processBuffer = processStream->GetStreamBuffer();
1006 CHECK_AND_RETURN_RET_LOG(processBuffer != nullptr, ERR_INVALID_PARAM, "processBuffer is null");
1007 CHECK_AND_RETURN_RET_LOG(processBuffer->GetStreamStatus() != nullptr, ERR_INVALID_PARAM, "stream status is null");
1008
1009 CHECK_AND_RETURN_RET_LOG(processList_.size() < MAX_LINKED_PROCESS, ERR_OPERATION_FAILED, "reach link limit.");
1010
1011 AUDIO_INFO_LOG("LinkProcessStream start status is:%{public}s.", GetStatusStr(endpointStatus_).c_str());
1012 processBuffer->SetSessionId(processStream->GetAudioSessionId());
1013 bool needEndpointRunning = processBuffer->GetStreamStatus()->load() == STREAM_RUNNING;
1014
1015 if (endpointStatus_ == STARTING) {
1016 AUDIO_INFO_LOG("LinkProcessStream wait start begin.");
1017 std::unique_lock<std::mutex> lock(loopThreadLock_);
1018 workThreadCV_.wait_for(lock, std::chrono::milliseconds(SLEEP_TIME_IN_DEFAULT), [this] {
1019 return endpointStatus_ != STARTING;
1020 });
1021 }
1022
1023 if (endpointStatus_ == RUNNING) {
1024 LinkProcessStreamExt(processStream, processBuffer);
1025 return SUCCESS;
1026 }
1027
1028 if (endpointStatus_ == UNLINKED) {
1029 endpointStatus_ = IDEL; // handle push_back in IDEL
1030 if (isDeviceRunningInIdel_) {
1031 delayStopTime_ = INT64_MAX;
1032 CHECK_AND_RETURN_RET_LOG(StartDevice(), ERR_OPERATION_FAILED, "StartDevice failed");
1033 delayStopTime_ = ClockTime::GetCurNano() + ((clientConfig_.audioMode == AUDIO_MODE_PLAYBACK)
1034 ? PLAYBACK_DELAY_STOP_HDI_TIME_NS : LINK_RECORDER_DELAY_STOP_HDI_TIME_NS);
1035 }
1036 }
1037
1038 if (endpointStatus_ == IDEL) {
1039 {
1040 std::lock_guard<std::mutex> lock(listLock_);
1041 processList_.push_back(processStream);
1042 processBufferList_.push_back(processBuffer);
1043 }
1044 if (!needEndpointRunning || !startWhenLinking) {
1045 AUDIO_INFO_LOG("LinkProcessStream success, process stream status is not running.");
1046 return SUCCESS;
1047 }
1048 // needEndpointRunning = true
1049 if (isDeviceRunningInIdel_) {
1050 endpointStatus_ = IsAnyProcessRunning() ? RUNNING : IDEL;
1051 } else {
1052 // needEndpointRunning = true & isDeviceRunningInIdel_ = false
1053 // KeepWorkloopRunning will wait on IDEL
1054 CHECK_AND_RETURN_RET_LOG(StartDevice(), ERR_OPERATION_FAILED, "StartDevice failed");
1055 }
1056 AUDIO_INFO_LOG("LinkProcessStream success with status:%{public}s", GetStatusStr(endpointStatus_).c_str());
1057 return SUCCESS;
1058 }
1059
1060 AUDIO_INFO_LOG("LinkProcessStream success with status:%{public}s", GetStatusStr(endpointStatus_).c_str());
1061 return SUCCESS;
1062 }
1063
LinkProcessStreamExt(IAudioProcessStream * processStream,const std::shared_ptr<OHAudioBuffer> & processBuffer)1064 void AudioEndpointInner::LinkProcessStreamExt(IAudioProcessStream *processStream,
1065 const std::shared_ptr<OHAudioBuffer>& processBuffer)
1066 {
1067 std::lock_guard<std::mutex> lock(listLock_);
1068 processList_.push_back(processStream);
1069 processBufferList_.push_back(processBuffer);
1070 AUDIO_INFO_LOG("LinkProcessStream success in RUNNING.");
1071 }
1072
UnlinkProcessStream(IAudioProcessStream * processStream)1073 int32_t AudioEndpointInner::UnlinkProcessStream(IAudioProcessStream *processStream)
1074 {
1075 AUDIO_INFO_LOG("UnlinkProcessStream in status:%{public}s.", GetStatusStr(endpointStatus_).c_str());
1076 CHECK_AND_RETURN_RET_LOG(processStream != nullptr, ERR_INVALID_PARAM, "IAudioProcessStream is null");
1077 std::shared_ptr<OHAudioBuffer> processBuffer = processStream->GetStreamBuffer();
1078 CHECK_AND_RETURN_RET_LOG(processBuffer != nullptr, ERR_INVALID_PARAM, "processBuffer is null");
1079
1080 bool isFind = false;
1081 std::lock_guard<std::mutex> lock(listLock_);
1082 auto processItr = processList_.begin();
1083 auto bufferItr = processBufferList_.begin();
1084 while (processItr != processList_.end()) {
1085 if (*processItr == processStream && *bufferItr == processBuffer) {
1086 processList_.erase(processItr);
1087 processBufferList_.erase(bufferItr);
1088 isFind = true;
1089 break;
1090 } else {
1091 processItr++;
1092 bufferItr++;
1093 }
1094 }
1095 if (processList_.size() == 0) {
1096 StopDevice();
1097 endpointStatus_ = UNLINKED;
1098 } else if (!IsAnyProcessRunningInner()) {
1099 endpointStatus_ = IDEL;
1100 delayStopTime_ = DELAY_STOP_HDI_TIME_WHEN_NO_RUNNING_NS;
1101 }
1102
1103 AUDIO_INFO_LOG("UnlinkProcessStream end, %{public}s the process.", (isFind ? "find and remove" : "not find"));
1104 return SUCCESS;
1105 }
1106
CheckAllBufferReady(int64_t checkTime,uint64_t curWritePos)1107 bool AudioEndpointInner::CheckAllBufferReady(int64_t checkTime, uint64_t curWritePos)
1108 {
1109 bool isAllReady = true;
1110 bool needCheckStandby = false;
1111 {
1112 // lock list without sleep
1113 std::lock_guard<std::mutex> lock(listLock_);
1114 for (size_t i = 0; i < processBufferList_.size(); i++) {
1115 std::shared_ptr<OHAudioBuffer> tempBuffer = processBufferList_[i];
1116 uint64_t eachCurReadPos = processBufferList_[i]->GetCurReadFrame();
1117 lastHandleProcessTime_ = checkTime;
1118 processBufferList_[i]->SetHandleInfo(eachCurReadPos, lastHandleProcessTime_); // update handle info
1119 if (tempBuffer->GetStreamStatus() &&
1120 tempBuffer->GetStreamStatus()->load() != StreamStatus::STREAM_RUNNING) {
1121 // Process is not running, server will continue to check the same location in the next cycle.
1122 int64_t duration = 5000000; // 5ms
1123 processBufferList_[i]->SetHandleInfo(eachCurReadPos, lastHandleProcessTime_ + duration);
1124 continue; // process not running
1125 }
1126 // Status is RUNNING
1127 int64_t current = ClockTime::GetCurNano();
1128 int64_t lastWrittenTime = tempBuffer->GetLastWrittenTime();
1129 uint32_t sessionId = processList_[i]->GetAudioSessionId();
1130 if (current - lastWrittenTime > WAIT_CLIENT_STANDBY_TIME_NS) {
1131 Trace trace("AudioEndpoint::MarkClientStandby:" + std::to_string(sessionId));
1132 AUDIO_INFO_LOG("change the status to stand-by, session %{public}u", sessionId);
1133 processList_[i]->EnableStandby();
1134 needCheckStandby = true;
1135 continue;
1136 }
1137 uint64_t curRead = tempBuffer->GetCurReadFrame();
1138 SpanInfo *curReadSpan = tempBuffer->GetSpanInfo(curRead);
1139 if (curReadSpan == nullptr || curReadSpan->spanStatus != SpanStatus::SPAN_WRITE_DONE) {
1140 AUDIO_DEBUG_LOG("Find one process not ready"); // print uid of the process?
1141 isAllReady = false;
1142 AudioPerformanceMonitor::GetInstance().RecordSilenceState(sessionId, true, PIPE_TYPE_LOWLATENCY_OUT);
1143 continue;
1144 } else {
1145 AudioPerformanceMonitor::GetInstance().RecordSilenceState(sessionId, false, PIPE_TYPE_LOWLATENCY_OUT);
1146 }
1147 // process Status is RUNNING && buffer status is WRITE_DONE
1148 tempBuffer->SetLastWrittenTime(current);
1149 }
1150 }
1151
1152 if (needCheckStandby) {
1153 CheckStandBy();
1154 }
1155
1156 if (!isAllReady) {
1157 WaitAllProcessReady(curWritePos);
1158 }
1159 return isAllReady;
1160 }
1161
WaitAllProcessReady(uint64_t curWritePos)1162 void AudioEndpointInner::WaitAllProcessReady(uint64_t curWritePos)
1163 {
1164 Trace trace("AudioEndpoint::WaitAllProcessReady");
1165 int64_t tempWakeupTime = readTimeModel_.GetTimeOfPos(curWritePos) + WRITE_TO_HDI_AHEAD_TIME;
1166 if (tempWakeupTime - ClockTime::GetCurNano() < ONE_MILLISECOND_DURATION) {
1167 ClockTime::RelativeSleep(ONE_MILLISECOND_DURATION);
1168 } else {
1169 ClockTime::AbsoluteSleep(tempWakeupTime); // sleep to hdi read time ahead 1ms.
1170 }
1171 }
1172
MixToDupStream(const std::vector<AudioStreamData> & srcDataList,int32_t innerCapId)1173 void AudioEndpointInner::MixToDupStream(const std::vector<AudioStreamData> &srcDataList, int32_t innerCapId)
1174 {
1175 Trace trace("AudioEndpointInner::MixToDupStream");
1176 std::lock_guard<std::mutex> lock(dupMutex_);
1177 CHECK_AND_RETURN_LOG(fastCaptureInfos_.count(innerCapId) && fastCaptureInfos_[innerCapId].dupStream != nullptr,
1178 "captureInfo is errro");
1179 CHECK_AND_RETURN_LOG(dupBuffer_ != nullptr, "Buffer is not ready");
1180
1181 for (size_t i = 0; i < srcDataList.size(); i++) {
1182 if (!srcDataList[i].isInnerCapeds.count(innerCapId) ||
1183 !srcDataList[i].isInnerCapeds.at(innerCapId)) {
1184 continue;
1185 }
1186 size_t dataLength = dupBufferSize_;
1187 dataLength /= 2; // SAMPLE_S16LE--> 2 byte
1188 int16_t *dstPtr = reinterpret_cast<int16_t *>(dupBuffer_.get());
1189
1190 for (size_t offset = 0; dataLength > 0; dataLength--) {
1191 int32_t sum = *dstPtr;
1192 sum += *(reinterpret_cast<int16_t *>(srcDataList[i].bufferDesc.buffer) + offset);
1193 *dstPtr = sum > INT16_MAX ? INT16_MAX : (sum < INT16_MIN ? INT16_MIN : sum);
1194 dstPtr++;
1195 offset++;
1196 }
1197 }
1198 BufferDesc temp;
1199 temp.buffer = dupBuffer_.get();
1200 temp.bufLength = dupBufferSize_;
1201 temp.dataLength = dupBufferSize_;
1202
1203 int32_t ret = fastCaptureInfos_[innerCapId].dupStream->EnqueueBuffer(temp);
1204 CHECK_AND_RETURN_LOG(ret == SUCCESS, "EnqueueBuffer failed:%{public}d", ret);
1205
1206 ret = memset_s(reinterpret_cast<void *>(dupBuffer_.get()), dupBufferSize_, 0, dupBufferSize_);
1207 if (ret != EOK) {
1208 AUDIO_WARNING_LOG("memset buffer fail, ret %{public}d", ret);
1209 }
1210 }
1211
ProcessData(const std::vector<AudioStreamData> & srcDataList,const AudioStreamData & dstData)1212 void AudioEndpointInner::ProcessData(const std::vector<AudioStreamData> &srcDataList, const AudioStreamData &dstData)
1213 {
1214 size_t srcListSize = srcDataList.size();
1215 for (size_t i = 0; i < srcListSize; i++) {
1216 if (srcDataList[i].streamInfo.format != SAMPLE_S16LE || srcDataList[i].streamInfo.channels != STEREO ||
1217 srcDataList[i].bufferDesc.bufLength != dstData.bufferDesc.bufLength ||
1218 srcDataList[i].bufferDesc.dataLength != dstData.bufferDesc.dataLength) {
1219 AUDIO_ERR_LOG("ProcessData failed, streamInfo are different");
1220 return;
1221 }
1222 }
1223 // Assum using the same format and same size
1224 CHECK_AND_RETURN_LOG(dstData.streamInfo.format == SAMPLE_S16LE && dstData.streamInfo.channels == STEREO,
1225 "ProcessData failed, streamInfo are not support");
1226
1227 FormatConverter::DataAccumulationFromVolume(srcDataList, dstData);
1228
1229 ChannelVolumes channelVolumes = VolumeTools::CountVolumeLevel(
1230 dstData.bufferDesc, dstData.streamInfo.format, dstData.streamInfo.channels);
1231 ZeroVolumeCheck(std::accumulate(channelVolumes.volStart, channelVolumes.volStart + channelVolumes.channel, 0) /
1232 channelVolumes.channel);
1233 }
1234
HandleRendererDataParams(const AudioStreamData & srcData,const AudioStreamData & dstData,bool applyVol)1235 void AudioEndpointInner::HandleRendererDataParams(const AudioStreamData &srcData, const AudioStreamData &dstData,
1236 bool applyVol)
1237 {
1238 if (srcData.streamInfo.encoding != dstData.streamInfo.encoding) {
1239 AUDIO_ERR_LOG("Different encoding formats");
1240 return;
1241 }
1242 if (srcData.streamInfo.format == SAMPLE_S16LE && srcData.streamInfo.channels == STEREO) {
1243 return ProcessSingleData(srcData, dstData, applyVol);
1244 }
1245
1246 if (srcData.streamInfo.format == SAMPLE_S16LE || srcData.streamInfo.format == SAMPLE_F32LE) {
1247 CHECK_AND_RETURN_LOG(processList_.size() > 0 && processList_[0] != nullptr, "No avaliable process");
1248 BufferDesc &convertedBuffer = processList_[0]->GetConvertedBuffer();
1249 int32_t ret = -1;
1250 if (srcData.streamInfo.format == SAMPLE_S16LE && srcData.streamInfo.channels == MONO) {
1251 ret = FormatConverter::S16MonoToS16Stereo(srcData.bufferDesc, convertedBuffer);
1252 CHECK_AND_RETURN_LOG(ret == SUCCESS, "Convert channel from s16 mono to s16 stereo failed");
1253 } else if (srcData.streamInfo.format == SAMPLE_F32LE && srcData.streamInfo.channels == MONO) {
1254 ret = FormatConverter::F32MonoToS16Stereo(srcData.bufferDesc, convertedBuffer);
1255 CHECK_AND_RETURN_LOG(ret == SUCCESS, "Convert channel from f32 mono to s16 stereo failed");
1256 } else if (srcData.streamInfo.format == SAMPLE_F32LE && srcData.streamInfo.channels == STEREO) {
1257 ret = FormatConverter::F32StereoToS16Stereo(srcData.bufferDesc, convertedBuffer);
1258 CHECK_AND_RETURN_LOG(ret == SUCCESS, "Convert channel from f32 stereo to s16 stereo failed");
1259 } else {
1260 CHECK_AND_RETURN_LOG(ret == SUCCESS, "Unsupport conversion");
1261 }
1262 AudioStreamData dataAfterProcess = srcData;
1263 dataAfterProcess.bufferDesc = convertedBuffer;
1264 ProcessSingleData(dataAfterProcess, dstData, applyVol);
1265 ret = memset_s(static_cast<void *>(convertedBuffer.buffer), convertedBuffer.bufLength, 0,
1266 convertedBuffer.bufLength);
1267 CHECK_AND_RETURN_LOG(ret == EOK, "memset converted buffer to 0 failed");
1268 }
1269 }
1270
ProcessSingleData(const AudioStreamData & srcData,const AudioStreamData & dstData,bool applyVol)1271 void AudioEndpointInner::ProcessSingleData(const AudioStreamData &srcData, const AudioStreamData &dstData,
1272 bool applyVol)
1273 {
1274 CHECK_AND_RETURN_LOG(dstData.streamInfo.format == SAMPLE_S16LE && dstData.streamInfo.channels == STEREO,
1275 "ProcessData failed, streamInfo are not support");
1276
1277 size_t dataLength = dstData.bufferDesc.dataLength;
1278 dataLength /= 2; // SAMPLE_S16LE--> 2 byte
1279 int16_t *dstPtr = reinterpret_cast<int16_t *>(dstData.bufferDesc.buffer);
1280 for (size_t offset = 0; dataLength > 0; dataLength--) {
1281 int32_t vol = 1 << VOLUME_SHIFT_NUMBER;
1282 int16_t *srcPtr = reinterpret_cast<int16_t *>(srcData.bufferDesc.buffer) + offset;
1283 int32_t sum = applyVol ? (*srcPtr * static_cast<int64_t>(vol)) >> VOLUME_SHIFT_NUMBER : *srcPtr; // 1/65536
1284 offset++;
1285 *dstPtr++ = sum > INT16_MAX ? INT16_MAX : (sum < INT16_MIN ? INT16_MIN : sum);
1286 }
1287 }
1288
1289 // call with listLock_ hold
GetAllReadyProcessData(std::vector<AudioStreamData> & audioDataList)1290 void AudioEndpointInner::GetAllReadyProcessData(std::vector<AudioStreamData> &audioDataList)
1291 {
1292 for (size_t i = 0; i < processBufferList_.size(); i++) {
1293 uint64_t curRead = processBufferList_[i]->GetCurReadFrame();
1294 Trace trace("AudioEndpoint::ReadProcessData->" + std::to_string(curRead));
1295 SpanInfo *curReadSpan = processBufferList_[i]->GetSpanInfo(curRead);
1296 CHECK_AND_CONTINUE_LOG(curReadSpan != nullptr, "GetSpanInfo failed, can not get client curReadSpan");
1297 AudioStreamData streamData;
1298 Volume vol = {true, 1.0f, 0};
1299 AudioStreamType streamType = processList_[i]->GetAudioStreamType();
1300 AudioVolumeType volumeType = VolumeUtils::GetVolumeTypeFromStreamType(streamType);
1301 DeviceType deviceType = PolicyHandler::GetInstance().GetActiveOutPutDevice();
1302 bool muteFlag = processList_[i]->GetMuteState();
1303 bool getVolumeRet = PolicyHandler::GetInstance().GetSharedVolume(volumeType, deviceType, vol);
1304 if (deviceInfo_.networkId_ == LOCAL_NETWORK_ID &&
1305 !(deviceInfo_.deviceType_ == DEVICE_TYPE_BLUETOOTH_A2DP && volumeType == STREAM_MUSIC &&
1306 PolicyHandler::GetInstance().IsAbsVolumeSupported()) && getVolumeRet) {
1307 streamData.volumeStart = vol.isMute ? 0 : static_cast<int32_t>(curReadSpan->volumeStart * vol.volumeFloat *
1308 AudioVolume::GetInstance()->GetAppVolume(clientConfig_.appInfo.appUid,
1309 clientConfig_.rendererInfo.volumeMode));
1310 } else {
1311 streamData.volumeStart = vol.isMute ? 0 : static_cast<int32_t>(curReadSpan->volumeStart *
1312 AudioVolume::GetInstance()->GetAppVolume(clientConfig_.appInfo.appUid,
1313 clientConfig_.rendererInfo.volumeMode));
1314 }
1315 Trace traceVol("VolumeProcess " + std::to_string(streamData.volumeStart) +
1316 " sessionid:" + std::to_string(processList_[i]->GetAudioSessionId()));
1317 streamData.volumeEnd = curReadSpan->volumeEnd;
1318 streamData.streamInfo = processList_[i]->GetStreamInfo();
1319 streamData.isInnerCapeds = processList_[i]->GetInnerCapState();
1320 SpanStatus targetStatus = SpanStatus::SPAN_WRITE_DONE;
1321 if (curReadSpan->spanStatus.compare_exchange_strong(targetStatus, SpanStatus::SPAN_READING)) {
1322 processBufferList_[i]->GetReadbuffer(curRead, streamData.bufferDesc); // check return?
1323 if (muteFlag) {
1324 memset_s(static_cast<void *>(streamData.bufferDesc.buffer), streamData.bufferDesc.bufLength,
1325 0, streamData.bufferDesc.bufLength);
1326 }
1327 CheckPlaySignal(streamData.bufferDesc.buffer, streamData.bufferDesc.bufLength);
1328 audioDataList.push_back(streamData);
1329 curReadSpan->readStartTime = ClockTime::GetCurNano();
1330 processList_[i]->WriteDumpFile(static_cast<void *>(streamData.bufferDesc.buffer),
1331 streamData.bufferDesc.bufLength);
1332 WriteMuteDataSysEvent(streamData.bufferDesc.buffer, streamData.bufferDesc.bufLength, i);
1333 HandleMuteWriteData(streamData.bufferDesc, i);
1334 }
1335 }
1336 }
1337
HandleMuteWriteData(BufferDesc & bufferDesc,int32_t index)1338 void AudioEndpointInner::HandleMuteWriteData(BufferDesc &bufferDesc, int32_t index)
1339 {
1340 auto tempProcess = processList_[index];
1341 CHECK_AND_RETURN_LOG(tempProcess, "tempProcess is nullptr");
1342
1343 int64_t muteFrameCnt = 0;
1344 VolumeTools::CalcMuteFrame(bufferDesc, dstStreamInfo_, logUtilsTag_, volumeDataCount_, muteFrameCnt);
1345 tempProcess->AddMuteWriteFrameCnt(muteFrameCnt);
1346 }
1347
ProcessToEndpointDataHandle(uint64_t curWritePos)1348 bool AudioEndpointInner::ProcessToEndpointDataHandle(uint64_t curWritePos)
1349 {
1350 std::lock_guard<std::mutex> lock(listLock_);
1351
1352 std::vector<AudioStreamData> audioDataList;
1353 GetAllReadyProcessData(audioDataList);
1354
1355 AudioStreamData dstStreamData;
1356 dstStreamData.streamInfo = dstStreamInfo_;
1357 int32_t ret = dstAudioBuffer_->GetWriteBuffer(curWritePos, dstStreamData.bufferDesc);
1358 CHECK_AND_RETURN_RET_LOG(((ret == SUCCESS && dstStreamData.bufferDesc.buffer != nullptr)), false,
1359 "GetWriteBuffer failed, ret:%{public}d", ret);
1360
1361 SpanInfo *curWriteSpan = dstAudioBuffer_->GetSpanInfo(curWritePos);
1362 CHECK_AND_RETURN_RET_LOG(curWriteSpan != nullptr, false, "GetSpanInfo failed, can not get curWriteSpan");
1363
1364 dstStreamData.volumeStart = curWriteSpan->volumeStart;
1365 dstStreamData.volumeEnd = curWriteSpan->volumeEnd;
1366
1367 Trace trace("AudioEndpoint::WriteDstBuffer=>" + std::to_string(curWritePos));
1368 // do write work
1369 if (audioDataList.size() == 0) {
1370 memset_s(dstStreamData.bufferDesc.buffer, dstStreamData.bufferDesc.bufLength, 0,
1371 dstStreamData.bufferDesc.bufLength);
1372 } else {
1373 if (endpointType_ == TYPE_VOIP_MMAP && audioDataList.size() == 1) {
1374 HandleRendererDataParams(audioDataList[0], dstStreamData);
1375 AudioPerformanceMonitor::GetInstance().RecordTimeStamp(ADAPTER_TYPE_VOIP_FAST, ClockTime::GetCurNano());
1376 } else {
1377 ProcessData(audioDataList, dstStreamData);
1378 AudioPerformanceMonitor::GetInstance().RecordTimeStamp(ADAPTER_TYPE_FAST, ClockTime::GetCurNano());
1379 }
1380 }
1381
1382 for (auto &capture: fastCaptureInfos_) {
1383 if (capture.second.isInnerCapEnabled) {
1384 ProcessToDupStream(audioDataList, dstStreamData, capture.first);
1385 }
1386 }
1387
1388 if (AudioDump::GetInstance().GetVersionType() == DumpFileUtil::BETA_VERSION) {
1389 DumpFileUtil::WriteDumpFile(dumpHdi_, static_cast<void *>(dstStreamData.bufferDesc.buffer),
1390 dstStreamData.bufferDesc.bufLength);
1391 AudioCacheMgr::GetInstance().CacheData(dumpHdiName_,
1392 static_cast<void *>(dstStreamData.bufferDesc.buffer), dstStreamData.bufferDesc.bufLength);
1393 }
1394
1395 CheckUpdateState(reinterpret_cast<char *>(dstStreamData.bufferDesc.buffer),
1396 dstStreamData.bufferDesc.bufLength);
1397
1398 return true;
1399 }
1400
ProcessToDupStream(const std::vector<AudioStreamData> & audioDataList,AudioStreamData & dstStreamData,int32_t innerCapId)1401 void AudioEndpointInner::ProcessToDupStream(const std::vector<AudioStreamData> &audioDataList,
1402 AudioStreamData &dstStreamData, int32_t innerCapId)
1403 {
1404 if (!fastCaptureInfos_.count(innerCapId) || fastCaptureInfos_[innerCapId].dupStream == nullptr) {
1405 AUDIO_ERR_LOG("innerCapId error or dupStream error");
1406 return;
1407 }
1408 Trace trace("AudioEndpointInner::ProcessToDupStream");
1409 if (endpointType_ == TYPE_VOIP_MMAP) {
1410 if (audioDataList.size() == 1 && audioDataList[0].isInnerCapeds.count(innerCapId)
1411 && audioDataList[0].isInnerCapeds.at(innerCapId)) {
1412 BufferDesc temp;
1413 temp.buffer = dupBuffer_.get();
1414 temp.bufLength = dupBufferSize_;
1415 temp.dataLength = dupBufferSize_;
1416
1417 dstStreamData.bufferDesc = temp;
1418 HandleRendererDataParams(audioDataList[0], dstStreamData, false);
1419 fastCaptureInfos_[innerCapId].dupStream->EnqueueBuffer(temp);
1420 }
1421 } else {
1422 MixToDupStream(audioDataList, innerCapId);
1423 }
1424 }
1425
CheckUpdateState(char * frame,uint64_t replyBytes)1426 void AudioEndpointInner::CheckUpdateState(char *frame, uint64_t replyBytes)
1427 {
1428 if (startUpdate_) {
1429 if (renderFrameNum_ == 0) {
1430 last10FrameStartTime_ = ClockTime::GetCurNano();
1431 }
1432 renderFrameNum_++;
1433 maxAmplitude_ = UpdateMaxAmplitude(static_cast<ConvertHdiFormat>(dstStreamInfo_.format),
1434 frame, replyBytes);
1435 if (renderFrameNum_ == GET_MAX_AMPLITUDE_FRAMES_THRESHOLD) {
1436 renderFrameNum_ = 0;
1437 if (last10FrameStartTime_ > lastGetMaxAmplitudeTime_) {
1438 startUpdate_ = false;
1439 maxAmplitude_ = 0;
1440 }
1441 }
1442 }
1443 }
1444
GetMaxAmplitude()1445 float AudioEndpointInner::GetMaxAmplitude()
1446 {
1447 lastGetMaxAmplitudeTime_ = ClockTime::GetCurNano();
1448 startUpdate_ = true;
1449 return maxAmplitude_;
1450 }
1451
GetAudioMode() const1452 AudioMode AudioEndpointInner::GetAudioMode() const
1453 {
1454 return clientConfig_.audioMode;
1455 }
1456
GetPredictNextReadTime(uint64_t posInFrame)1457 int64_t AudioEndpointInner::GetPredictNextReadTime(uint64_t posInFrame)
1458 {
1459 Trace trace("AudioEndpoint::GetPredictNextRead");
1460 uint64_t handleSpanCnt = posInFrame / dstSpanSizeInframe_;
1461 uint32_t startPeriodCnt = 20; // sync each time when start
1462 uint32_t oneBigPeriodCnt = 40; // 200ms
1463 if (handleSpanCnt < startPeriodCnt || handleSpanCnt % oneBigPeriodCnt == 0) {
1464 updateThreadCV_.notify_all();
1465 }
1466 uint64_t readFrame = 0;
1467 int64_t readtime = 0;
1468 if (readTimeModel_.GetFrameStamp(readFrame, readtime)) {
1469 if (readFrame != posInFrame_) {
1470 CheckPosTimeRes res = readTimeModel_.UpdataFrameStamp(posInFrame_, timeInNano_);
1471 if (res == CHECK_FAILED) {
1472 updateThreadCV_.notify_all();
1473 } else if (res == NEED_MODIFY) {
1474 needReSyncPosition_ = true;
1475 }
1476 }
1477 }
1478
1479 int64_t nextHdiReadTime = readTimeModel_.GetTimeOfPos(posInFrame);
1480 return nextHdiReadTime;
1481 }
1482
GetPredictNextWriteTime(uint64_t posInFrame)1483 int64_t AudioEndpointInner::GetPredictNextWriteTime(uint64_t posInFrame)
1484 {
1485 uint64_t handleSpanCnt = posInFrame / dstSpanSizeInframe_;
1486 uint32_t startPeriodCnt = 20;
1487 uint32_t oneBigPeriodCnt = 40;
1488 if (handleSpanCnt < startPeriodCnt || handleSpanCnt % oneBigPeriodCnt == 0) {
1489 updateThreadCV_.notify_all();
1490 }
1491 uint64_t writeFrame = 0;
1492 int64_t writetime = 0;
1493 if (writeTimeModel_.GetFrameStamp(writeFrame, writetime)) {
1494 if (writeFrame != posInFrame_) {
1495 CheckPosTimeRes res = writeTimeModel_.UpdataFrameStamp(posInFrame_, timeInNano_);
1496 if (res == CHECK_FAILED) {
1497 updateThreadCV_.notify_all();
1498 } else if (res == NEED_MODIFY) {
1499 needReSyncPosition_ = true;
1500 }
1501 }
1502 }
1503 int64_t nextHdiWriteTime = writeTimeModel_.GetTimeOfPos(posInFrame);
1504 return nextHdiWriteTime;
1505 }
1506
RecordPrepareNextLoop(uint64_t curReadPos,int64_t & wakeUpTime)1507 bool AudioEndpointInner::RecordPrepareNextLoop(uint64_t curReadPos, int64_t &wakeUpTime)
1508 {
1509 uint64_t nextHandlePos = curReadPos + dstSpanSizeInframe_;
1510 int64_t nextHdiWriteTime = GetPredictNextWriteTime(nextHandlePos);
1511 int64_t tempDelay = endpointType_ == TYPE_VOIP_MMAP ? RECORD_VOIP_DELAY_TIME_NS : RECORD_DELAY_TIME_NS;
1512 int64_t predictWakeupTime = nextHdiWriteTime + tempDelay;
1513 if (predictWakeupTime <= ClockTime::GetCurNano()) {
1514 wakeUpTime = ClockTime::GetCurNano() + ONE_MILLISECOND_DURATION;
1515 AUDIO_ERR_LOG("hdi send wrong position time");
1516 } else {
1517 wakeUpTime = predictWakeupTime;
1518 }
1519
1520 int32_t ret = dstAudioBuffer_->SetCurWriteFrame(nextHandlePos);
1521 CHECK_AND_RETURN_RET_LOG(ret == SUCCESS, false, "set dst buffer write frame fail, ret %{public}d.", ret);
1522 ret = dstAudioBuffer_->SetCurReadFrame(nextHandlePos);
1523 CHECK_AND_RETURN_RET_LOG(ret == SUCCESS, false, "set dst buffer read frame fail, ret %{public}d.", ret);
1524
1525 return true;
1526 }
1527
PrepareNextLoop(uint64_t curWritePos,int64_t & wakeUpTime)1528 bool AudioEndpointInner::PrepareNextLoop(uint64_t curWritePos, int64_t &wakeUpTime)
1529 {
1530 uint64_t nextHandlePos = curWritePos + dstSpanSizeInframe_;
1531 Trace prepareTrace("AudioEndpoint::PrepareNextLoop " + std::to_string(nextHandlePos));
1532 int64_t nextHdiReadTime = GetPredictNextReadTime(nextHandlePos);
1533 int64_t predictWakeupTime = nextHdiReadTime - serverAheadReadTime_;
1534 if (predictWakeupTime <= ClockTime::GetCurNano()) {
1535 wakeUpTime = ClockTime::GetCurNano() + ONE_MILLISECOND_DURATION;
1536 AUDIO_ERR_LOG("hdi send wrong position time");
1537 } else {
1538 wakeUpTime = predictWakeupTime;
1539 }
1540
1541 SpanInfo *nextWriteSpan = dstAudioBuffer_->GetSpanInfo(nextHandlePos);
1542 CHECK_AND_RETURN_RET_LOG(nextWriteSpan != nullptr, false, "GetSpanInfo failed, can not get next write span");
1543
1544 int32_t ret1 = dstAudioBuffer_->SetCurWriteFrame(nextHandlePos);
1545 int32_t ret2 = dstAudioBuffer_->SetCurReadFrame(nextHandlePos);
1546 CHECK_AND_RETURN_RET_LOG(ret1 == SUCCESS && ret2 == SUCCESS, false,
1547 "SetCurWriteFrame or SetCurReadFrame failed, ret1:%{public}d ret2:%{public}d", ret1, ret2);
1548 // handl each process buffer info
1549 int64_t curReadDoneTime = ClockTime::GetCurNano();
1550 {
1551 std::lock_guard<std::mutex> lock(listLock_);
1552 for (size_t i = 0; i < processBufferList_.size(); i++) {
1553 uint64_t eachCurReadPos = processBufferList_[i]->GetCurReadFrame();
1554 SpanInfo *tempSpan = processBufferList_[i]->GetSpanInfo(eachCurReadPos);
1555 CHECK_AND_RETURN_RET_LOG(tempSpan != nullptr, false,
1556 "GetSpanInfo failed, can not get process read span");
1557 SpanStatus targetStatus = SpanStatus::SPAN_READING;
1558 CHECK_AND_RETURN_RET_LOG(processBufferList_[i]->GetStreamStatus() != nullptr, false,
1559 "stream status is null");
1560 if (tempSpan->spanStatus.compare_exchange_strong(targetStatus, SpanStatus::SPAN_READ_DONE)) {
1561 tempSpan->readDoneTime = curReadDoneTime;
1562 BufferDesc bufferReadDone = { nullptr, 0, 0};
1563 processBufferList_[i]->GetReadbuffer(eachCurReadPos, bufferReadDone);
1564 if (bufferReadDone.buffer != nullptr && bufferReadDone.bufLength != 0) {
1565 memset_s(bufferReadDone.buffer, bufferReadDone.bufLength, 0, bufferReadDone.bufLength);
1566 }
1567 processBufferList_[i]->SetCurReadFrame(eachCurReadPos + dstSpanSizeInframe_); // use client span size
1568 } else if (processBufferList_[i]->GetStreamStatus() &&
1569 processBufferList_[i]->GetStreamStatus()->load() == StreamStatus::STREAM_RUNNING) {
1570 AUDIO_DEBUG_LOG("Current %{public}" PRIu64" span not ready:%{public}d", eachCurReadPos, targetStatus);
1571 }
1572 }
1573 }
1574 return true;
1575 }
1576
GetDeviceHandleInfo(uint64_t & frames,int64_t & nanoTime)1577 bool AudioEndpointInner::GetDeviceHandleInfo(uint64_t &frames, int64_t &nanoTime)
1578 {
1579 Trace trace("AudioEndpoint::GetMmapHandlePosition");
1580 int64_t timeSec = 0;
1581 int64_t timeNanoSec = 0;
1582 int32_t ret = 0;
1583 if (deviceInfo_.deviceRole_ == INPUT_DEVICE) {
1584 std::shared_ptr<IAudioCaptureSource> source = HdiAdapterManager::GetInstance().GetCaptureSource(fastCaptureId_);
1585 CHECK_AND_RETURN_RET_LOG(source != nullptr && source->IsInited(),
1586 false, "Source start failed.");
1587 // GetMmapHandlePosition will call using ipc.
1588 ret = source->GetMmapHandlePosition(frames, timeSec, timeNanoSec);
1589 } else {
1590 std::shared_ptr<IAudioRenderSink> sink = HdiAdapterManager::GetInstance().GetRenderSink(fastRenderId_);
1591 CHECK_AND_RETURN_RET_LOG(sink != nullptr && sink->IsInited(),
1592 false, "GetDeviceHandleInfo failed: sink is not inited.");
1593 // GetMmapHandlePosition will call using ipc.
1594 ret = sink->GetMmapHandlePosition(frames, timeSec, timeNanoSec);
1595 }
1596 CHECK_AND_RETURN_RET_LOG(ret == SUCCESS, false, "Call adapter GetMmapHandlePosition failed: %{public}d", ret);
1597 trace.End();
1598 nanoTime = timeNanoSec + timeSec * AUDIO_NS_PER_SECOND;
1599 Trace infoTrace("AudioEndpoint::GetDeviceHandleInfo frames=>" + std::to_string(frames) + " " +
1600 std::to_string(nanoTime) + " at " + std::to_string(ClockTime::GetCurNano()));
1601 nanoTime += DELTA_TO_REAL_READ_START_TIME; // global delay in server
1602 return true;
1603 }
1604
AsyncGetPosTime()1605 void AudioEndpointInner::AsyncGetPosTime()
1606 {
1607 AUDIO_INFO_LOG("AsyncGetPosTime thread start.");
1608 while (!stopUpdateThread_) {
1609 std::unique_lock<std::mutex> lock(updateThreadLock_);
1610 updateThreadCV_.wait_for(lock, std::chrono::milliseconds(UPDATE_THREAD_TIMEOUT));
1611 if (stopUpdateThread_) {
1612 break;
1613 }
1614 if (endpointStatus_ == IDEL && isStarted_ && ClockTime::GetCurNano() > delayStopTime_) {
1615 AUDIO_INFO_LOG("IDEL for too long, let's call hdi stop");
1616 DelayStopDevice();
1617 continue;
1618 }
1619 if (!isStarted_) {
1620 continue;
1621 }
1622 // get signaled, call get pos-time
1623 uint64_t curHdiHandlePos = posInFrame_;
1624 int64_t handleTime = timeInNano_;
1625 if (!GetDeviceHandleInfo(curHdiHandlePos, handleTime)) {
1626 AUDIO_WARNING_LOG("AsyncGetPosTime call GetDeviceHandleInfo failed.");
1627 continue;
1628 }
1629 // keep it
1630 if (posInFrame_ != curHdiHandlePos) {
1631 posInFrame_ = curHdiHandlePos;
1632 timeInNano_ = handleTime;
1633 }
1634 }
1635 }
1636
GetStatusStr(EndpointStatus status)1637 std::string AudioEndpointInner::GetStatusStr(EndpointStatus status)
1638 {
1639 switch (status) {
1640 case INVALID:
1641 return "INVALID";
1642 case UNLINKED:
1643 return "UNLINKED";
1644 case IDEL:
1645 return "IDEL";
1646 case STARTING:
1647 return "STARTING";
1648 case RUNNING:
1649 return "RUNNING";
1650 case STOPPING:
1651 return "STOPPING";
1652 case STOPPED:
1653 return "STOPPED";
1654 default:
1655 break;
1656 }
1657 return "NO_SUCH_STATUS";
1658 }
1659
KeepWorkloopRunning()1660 bool AudioEndpointInner::KeepWorkloopRunning()
1661 {
1662 EndpointStatus targetStatus = INVALID;
1663 switch (endpointStatus_.load()) {
1664 case RUNNING:
1665 return true;
1666 case IDEL:
1667 if (ClockTime::GetCurNano() > delayStopTime_) {
1668 targetStatus = RUNNING;
1669 updateThreadCV_.notify_all();
1670 break;
1671 }
1672 if (isDeviceRunningInIdel_) {
1673 return true;
1674 }
1675 break;
1676 case UNLINKED:
1677 targetStatus = IDEL;
1678 break;
1679 case STARTING:
1680 targetStatus = RUNNING;
1681 break;
1682 case STOPPING:
1683 targetStatus = STOPPED;
1684 break;
1685 default:
1686 break;
1687 }
1688
1689 // when return false, EndpointWorkLoopFuc will continue loop immediately. Wait to avoid a inifity loop.
1690 std::unique_lock<std::mutex> lock(loopThreadLock_);
1691 AUDIO_PRERELEASE_LOGI("Status is %{public}s now, wait for %{public}s...", GetStatusStr(endpointStatus_).c_str(),
1692 GetStatusStr(targetStatus).c_str());
1693 threadStatus_ = WAITTING;
1694 workThreadCV_.wait_for(lock, std::chrono::milliseconds(SLEEP_TIME_IN_DEFAULT));
1695 AUDIO_DEBUG_LOG("Wait end. Cur is %{public}s now, target is %{public}s...", GetStatusStr(endpointStatus_).c_str(),
1696 GetStatusStr(targetStatus).c_str());
1697
1698 return false;
1699 }
1700
WriteToSpecialProcBuf(const std::shared_ptr<OHAudioBuffer> & procBuf,const BufferDesc & readBuf,const BufferDesc & convertedBuffer,bool muteFlag)1701 int32_t AudioEndpointInner::WriteToSpecialProcBuf(const std::shared_ptr<OHAudioBuffer> &procBuf,
1702 const BufferDesc &readBuf, const BufferDesc &convertedBuffer, bool muteFlag)
1703 {
1704 CHECK_AND_RETURN_RET_LOG(procBuf != nullptr, ERR_INVALID_HANDLE, "process buffer is null.");
1705 uint64_t curWritePos = procBuf->GetCurWriteFrame();
1706 Trace trace("AudioEndpoint::WriteProcessData-<" + std::to_string(curWritePos));
1707
1708 int32_t writeAbleSize = procBuf->GetAvailableDataFrames();
1709 if (writeAbleSize <= 0 || static_cast<uint32_t>(writeAbleSize) <= dstSpanSizeInframe_) {
1710 AUDIO_WARNING_LOG("client read too slow: curWritePos:%{public}" PRIu64" writeAbleSize:%{public}d",
1711 curWritePos, writeAbleSize);
1712 return ERR_OPERATION_FAILED;
1713 }
1714
1715 SpanInfo *curWriteSpan = procBuf->GetSpanInfo(curWritePos);
1716 CHECK_AND_RETURN_RET_LOG(curWriteSpan != nullptr, ERR_INVALID_HANDLE,
1717 "get write span info of procBuf fail.");
1718
1719 AUDIO_DEBUG_LOG("process buffer write start, curWritePos %{public}" PRIu64".", curWritePos);
1720 curWriteSpan->spanStatus.store(SpanStatus::SPAN_WRITTING);
1721 curWriteSpan->writeStartTime = ClockTime::GetCurNano();
1722
1723 BufferDesc writeBuf;
1724 int32_t ret = procBuf->GetWriteBuffer(curWritePos, writeBuf);
1725 CHECK_AND_RETURN_RET_LOG(ret == SUCCESS, ret, "get write buffer fail, ret %{public}d.", ret);
1726 if (muteFlag) {
1727 memset_s(static_cast<void *>(writeBuf.buffer), writeBuf.bufLength, 0, writeBuf.bufLength);
1728 } else {
1729 ret = HandleCapturerDataParams(writeBuf, readBuf, convertedBuffer);
1730 }
1731
1732 CHECK_AND_RETURN_RET_LOG(ret == EOK, ERR_WRITE_FAILED, "memcpy data to process buffer fail, "
1733 "curWritePos %{public}" PRIu64", ret %{public}d.", curWritePos, ret);
1734
1735 curWriteSpan->writeDoneTime = ClockTime::GetCurNano();
1736 procBuf->SetHandleInfo(curWritePos, curWriteSpan->writeDoneTime);
1737 ret = procBuf->SetCurWriteFrame(curWritePos + dstSpanSizeInframe_);
1738 if (ret != SUCCESS) {
1739 AUDIO_WARNING_LOG("set procBuf next write frame fail, ret %{public}d.", ret);
1740 curWriteSpan->spanStatus.store(SpanStatus::SPAN_READ_DONE);
1741 return ERR_OPERATION_FAILED;
1742 }
1743 curWriteSpan->spanStatus.store(SpanStatus::SPAN_WRITE_DONE);
1744 return SUCCESS;
1745 }
1746
HandleCapturerDataParams(const BufferDesc & writeBuf,const BufferDesc & readBuf,const BufferDesc & convertedBuffer)1747 int32_t AudioEndpointInner::HandleCapturerDataParams(const BufferDesc &writeBuf, const BufferDesc &readBuf,
1748 const BufferDesc &convertedBuffer)
1749 {
1750 if (clientConfig_.streamInfo.format == SAMPLE_S16LE && clientConfig_.streamInfo.channels == STEREO) {
1751 return memcpy_s(static_cast<void *>(writeBuf.buffer), writeBuf.bufLength,
1752 static_cast<void *>(readBuf.buffer), readBuf.bufLength);
1753 }
1754 if (clientConfig_.streamInfo.format == SAMPLE_S16LE && clientConfig_.streamInfo.channels == MONO) {
1755 int32_t ret = FormatConverter::S16StereoToS16Mono(readBuf, convertedBuffer);
1756 CHECK_AND_RETURN_RET_LOG(ret == SUCCESS, ERR_WRITE_FAILED, "Convert channel from stereo to mono failed");
1757 ret = memcpy_s(static_cast<void *>(writeBuf.buffer), writeBuf.bufLength,
1758 static_cast<void *>(convertedBuffer.buffer), convertedBuffer.bufLength);
1759 CHECK_AND_RETURN_RET_LOG(ret == EOK, ERR_WRITE_FAILED, "memcpy_s failed");
1760 ret = memset_s(static_cast<void *>(convertedBuffer.buffer), convertedBuffer.bufLength, 0,
1761 convertedBuffer.bufLength);
1762 CHECK_AND_RETURN_RET_LOG(ret == EOK, ERR_WRITE_FAILED, "memset converted buffer to 0 failed");
1763 return EOK;
1764 }
1765 if (clientConfig_.streamInfo.format == SAMPLE_F32LE) {
1766 int32_t ret = 0;
1767 if (clientConfig_.streamInfo.channels == STEREO) {
1768 ret = FormatConverter::S16StereoToF32Stereo(readBuf, convertedBuffer);
1769 CHECK_AND_RETURN_RET_LOG(ret == SUCCESS, ERR_WRITE_FAILED,
1770 "Convert channel from s16 stereo to f32 stereo failed");
1771 } else if (clientConfig_.streamInfo.channels == MONO) {
1772 ret = FormatConverter::S16StereoToF32Mono(readBuf, convertedBuffer);
1773 CHECK_AND_RETURN_RET_LOG(ret == SUCCESS, ERR_WRITE_FAILED,
1774 "Convert channel from s16 stereo to f32 mono failed");
1775 } else {
1776 return ERR_NOT_SUPPORTED;
1777 }
1778
1779 ret = memcpy_s(static_cast<void *>(writeBuf.buffer), writeBuf.bufLength,
1780 static_cast<void *>(convertedBuffer.buffer), convertedBuffer.bufLength);
1781 CHECK_AND_RETURN_RET_LOG(ret == EOK, ERR_WRITE_FAILED, "memcpy_s failed");
1782 ret = memset_s(static_cast<void *>(convertedBuffer.buffer), convertedBuffer.bufLength, 0,
1783 convertedBuffer.bufLength);
1784 CHECK_AND_RETURN_RET_LOG(ret == EOK, ERR_WRITE_FAILED, "memset converted buffer to 0 failed");
1785 return EOK;
1786 }
1787
1788 return ERR_NOT_SUPPORTED;
1789 }
1790
WriteToProcessBuffers(const BufferDesc & readBuf)1791 void AudioEndpointInner::WriteToProcessBuffers(const BufferDesc &readBuf)
1792 {
1793 CheckRecordSignal(readBuf.buffer, readBuf.bufLength);
1794 std::lock_guard<std::mutex> lock(listLock_);
1795 for (size_t i = 0; i < processBufferList_.size(); i++) {
1796 CHECK_AND_CONTINUE_LOG(processBufferList_[i] != nullptr, "process buffer %{public}zu is null.", i);
1797 if (processBufferList_[i]->GetStreamStatus() &&
1798 processBufferList_[i]->GetStreamStatus()->load() != STREAM_RUNNING) {
1799 AUDIO_WARNING_LOG("process buffer %{public}zu not running, stream status %{public}d.",
1800 i, processBufferList_[i]->GetStreamStatus()->load());
1801 continue;
1802 }
1803
1804 int32_t ret = WriteToSpecialProcBuf(processBufferList_[i], readBuf, processList_[i]->GetConvertedBuffer(),
1805 processList_[i]->GetMuteState());
1806 CHECK_AND_CONTINUE_LOG(ret == SUCCESS,
1807 "endpoint write to process buffer %{public}zu fail, ret %{public}d.", i, ret);
1808 AUDIO_DEBUG_LOG("endpoint process buffer %{public}zu write success.", i);
1809 }
1810 }
1811
ReadFromEndpoint(uint64_t curReadPos)1812 int32_t AudioEndpointInner::ReadFromEndpoint(uint64_t curReadPos)
1813 {
1814 Trace trace("AudioEndpoint::ReadDstBuffer=<" + std::to_string(curReadPos));
1815 AUDIO_DEBUG_LOG("ReadFromEndpoint enter, dstAudioBuffer curReadPos %{public}" PRIu64".", curReadPos);
1816 CHECK_AND_RETURN_RET_LOG(dstAudioBuffer_ != nullptr, ERR_INVALID_HANDLE,
1817 "dst audio buffer is null.");
1818 SpanInfo *curReadSpan = dstAudioBuffer_->GetSpanInfo(curReadPos);
1819 CHECK_AND_RETURN_RET_LOG(curReadSpan != nullptr, ERR_INVALID_HANDLE,
1820 "get source read span info of source adapter fail.");
1821 curReadSpan->readStartTime = ClockTime::GetCurNano();
1822 curReadSpan->spanStatus.store(SpanStatus::SPAN_READING);
1823 BufferDesc readBuf;
1824 int32_t ret = dstAudioBuffer_->GetReadbuffer(curReadPos, readBuf);
1825 CHECK_AND_RETURN_RET_LOG(ret == SUCCESS, ret, "get read buffer fail, ret %{public}d.", ret);
1826 VolumeTools::DfxOperation(readBuf, dstStreamInfo_, logUtilsTag_, volumeDataCount_);
1827 if (AudioDump::GetInstance().GetVersionType() == DumpFileUtil::BETA_VERSION) {
1828 DumpFileUtil::WriteDumpFile(dumpHdi_, static_cast<void *>(readBuf.buffer), readBuf.bufLength);
1829 AudioCacheMgr::GetInstance().CacheData(dumpHdiName_,
1830 static_cast<void *>(readBuf.buffer), readBuf.bufLength);
1831 }
1832 WriteToProcessBuffers(readBuf);
1833 ret = memset_s(readBuf.buffer, readBuf.bufLength, 0, readBuf.bufLength);
1834 if (ret != EOK) {
1835 AUDIO_WARNING_LOG("reset buffer fail, ret %{public}d.", ret);
1836 }
1837 curReadSpan->readDoneTime = ClockTime::GetCurNano();
1838 curReadSpan->spanStatus.store(SpanStatus::SPAN_READ_DONE);
1839 return SUCCESS;
1840 }
1841
EndPointRemoveWatchdog(const std::string & message,const std::string & endPointName)1842 void EndPointRemoveWatchdog(const std::string &message, const std::string &endPointName)
1843 {
1844 std::string watchDogMessage = message;
1845 watchDogMessage += endPointName;
1846 HiviewDFX::Watchdog::GetInstance().RemovePeriodicalTask(watchDogMessage);
1847 AUDIO_INFO_LOG("%{public}s end %{public}s", watchDogMessage.c_str(), endPointName.c_str());
1848 }
1849
WatchingRecordEndpointWorkLoopFuc()1850 void AudioEndpointInner::WatchingRecordEndpointWorkLoopFuc()
1851 {
1852 recordEndpointWorkLoopFucThreadStatus_ = true;
1853 auto taskFunc = [this]() {
1854 if (recordEndpointWorkLoopFucThreadStatus_) {
1855 AUDIO_DEBUG_LOG("Set recordEndpointWorkLoopFucThreadStatus_ to false");
1856 recordEndpointWorkLoopFucThreadStatus_ = false;
1857 } else {
1858 AUDIO_INFO_LOG("watchdog happened");
1859 }
1860 };
1861 std::string endPointName = GetEndpointName();
1862 std::string watchDogMessage = "WatchingRecordEndpointWorkLoopFuc" + endPointName;
1863 AUDIO_INFO_LOG("watchdog start");
1864 HiviewDFX::Watchdog::GetInstance().RunPeriodicalTask(watchDogMessage, taskFunc,
1865 WATCHDOG_INTERVAL_TIME_MS, WATCHDOG_DELAY_TIME_MS);
1866 }
1867
RecordEndpointWorkLoopFuc()1868 void AudioEndpointInner::RecordEndpointWorkLoopFuc()
1869 {
1870 SetThreadQosLevel();
1871 int64_t curTime = 0;
1872 uint64_t curReadPos = 0;
1873 int64_t wakeUpTime = ClockTime::GetCurNano();
1874 AUDIO_INFO_LOG("Record endpoint work loop fuc start.");
1875 // add watchdog
1876 WatchingRecordEndpointWorkLoopFuc();
1877 while (isInited_.load()) {
1878 if (!KeepWorkloopRunning()) {
1879 recordEndpointWorkLoopFucThreadStatus_ = true;
1880 continue;
1881 }
1882 threadStatus_ = INRUNNING;
1883 if (needReSyncPosition_) {
1884 RecordReSyncPosition();
1885 wakeUpTime = ClockTime::GetCurNano();
1886 needReSyncPosition_ = false;
1887 recordEndpointWorkLoopFucThreadStatus_ = true;
1888 continue;
1889 }
1890 curTime = ClockTime::GetCurNano();
1891 Trace loopTrace("Record_loop_trace");
1892 if (curTime - wakeUpTime > THREE_MILLISECOND_DURATION) {
1893 AUDIO_WARNING_LOG("Wake up cost %{public}" PRId64" ms!", (curTime - wakeUpTime) / AUDIO_US_PER_SECOND);
1894 } else if (curTime - wakeUpTime > ONE_MILLISECOND_DURATION) {
1895 AUDIO_DEBUG_LOG("Wake up cost %{public}" PRId64" ms!", (curTime - wakeUpTime) / AUDIO_US_PER_SECOND);
1896 }
1897
1898 curReadPos = dstAudioBuffer_->GetCurReadFrame();
1899 CHECK_AND_BREAK_LOG(ReadFromEndpoint(curReadPos) == SUCCESS, "read from endpoint to process service fail.");
1900
1901 bool ret = RecordPrepareNextLoop(curReadPos, wakeUpTime);
1902 CHECK_AND_BREAK_LOG(ret, "PrepareNextLoop failed!");
1903
1904 ProcessUpdateAppsUidForRecord();
1905
1906 loopTrace.End();
1907 threadStatus_ = SLEEPING;
1908 CheckWakeUpTime(wakeUpTime);
1909 ClockTime::AbsoluteSleep(wakeUpTime);
1910 recordEndpointWorkLoopFucThreadStatus_ = true;
1911 }
1912 ReSetThreadQosLevel();
1913 // stop watchdog
1914 EndPointRemoveWatchdog("WatchingRecordEndpointWorkLoopFuc", GetEndpointName());
1915 }
1916
WatchingEndpointWorkLoopFuc()1917 void AudioEndpointInner::WatchingEndpointWorkLoopFuc()
1918 {
1919 endpointWorkLoopFucThreadStatus_ = true;
1920 auto taskFunc = [this]() {
1921 if (endpointWorkLoopFucThreadStatus_) {
1922 AUDIO_DEBUG_LOG("Set endpointWorkLoopFucThreadStatus_ to false");
1923 endpointWorkLoopFucThreadStatus_ = false;
1924 } else {
1925 AUDIO_INFO_LOG("watchdog happened");
1926 }
1927 };
1928 std::string endPointName = GetEndpointName();
1929 std::string watchDogMessage = "WatchingEndpointWorkLoopFuc" + endPointName;
1930 AUDIO_INFO_LOG("watchDog start");
1931 HiviewDFX::Watchdog::GetInstance().RunPeriodicalTask(watchDogMessage, taskFunc,
1932 WATCHDOG_INTERVAL_TIME_MS, WATCHDOG_DELAY_TIME_MS);
1933 }
1934
BindCore()1935 void AudioEndpointInner::BindCore()
1936 {
1937 if (coreBinded_) {
1938 return;
1939 }
1940 // bind cpu cores 2-7 for fast mixer
1941 cpu_set_t targetCpus;
1942 CPU_ZERO(&targetCpus);
1943 int32_t cpuNum = sysconf(_SC_NPROCESSORS_CONF);
1944 for (int32_t i = CPU_INDEX; i < cpuNum; i++) {
1945 CPU_SET(i, &targetCpus);
1946 }
1947
1948 int32_t ret = sched_setaffinity(gettid(), sizeof(cpu_set_t), &targetCpus);
1949 if (ret != 0) {
1950 AUDIO_ERR_LOG("set target cpu failed, set ret: %{public}d", ret);
1951 }
1952 AUDIO_INFO_LOG("set pid: %{public}d, tid: %{public}d cpus", getpid(), gettid());
1953 coreBinded_ = true;
1954 }
1955
CheckWakeUpTime(int64_t & wakeUpTime)1956 void AudioEndpointInner::CheckWakeUpTime(int64_t &wakeUpTime)
1957 {
1958 int64_t curTime = ClockTime::GetCurNano();
1959 if (wakeUpTime - curTime > MAX_WAKEUP_TIME_NS) {
1960 wakeUpTime = curTime + RELATIVE_SLEEP_TIME_NS;
1961 }
1962 }
1963
EndpointWorkLoopFuc()1964 void AudioEndpointInner::EndpointWorkLoopFuc()
1965 {
1966 BindCore();
1967 SetThreadQosLevel();
1968 int64_t curTime = 0;
1969 uint64_t curWritePos = 0;
1970 int64_t wakeUpTime = ClockTime::GetCurNano();
1971 AUDIO_INFO_LOG("Endpoint work loop fuc start");
1972 // add watchdog
1973 WatchingEndpointWorkLoopFuc();
1974 while (isInited_.load()) {
1975 if (!KeepWorkloopRunning()) {
1976 endpointWorkLoopFucThreadStatus_ = true;
1977 continue;
1978 }
1979 threadStatus_ = INRUNNING;
1980 curTime = ClockTime::GetCurNano();
1981 Trace loopTrace("AudioEndpoint::loop_trace");
1982 if (needReSyncPosition_) {
1983 ReSyncPosition();
1984 wakeUpTime = curTime;
1985 needReSyncPosition_ = false;
1986 endpointWorkLoopFucThreadStatus_ = true;
1987 continue;
1988 }
1989 if (curTime - wakeUpTime > THREE_MILLISECOND_DURATION) {
1990 AUDIO_WARNING_LOG("Wake up cost %{public}" PRId64" ms!", (curTime - wakeUpTime) / AUDIO_US_PER_SECOND);
1991 } else if (curTime - wakeUpTime > ONE_MILLISECOND_DURATION) {
1992 AUDIO_DEBUG_LOG("Wake up cost %{public}" PRId64" ms!", (curTime - wakeUpTime) / AUDIO_US_PER_SECOND);
1993 }
1994
1995 // First, wake up at client may-write-done time, and check if all process write done.
1996 // If not, do another sleep to the possible latest write time.
1997 curWritePos = dstAudioBuffer_->GetCurWriteFrame();
1998 if (!CheckAllBufferReady(wakeUpTime, curWritePos)) { curTime = ClockTime::GetCurNano(); }
1999
2000 // then do mix & write to hdi buffer and prepare next loop
2001 if (!ProcessToEndpointDataHandle(curWritePos)) {
2002 AUDIO_ERR_LOG("ProcessToEndpointDataHandle failed!");
2003 break;
2004 }
2005
2006 // prepare info of next loop
2007 if (!PrepareNextLoop(curWritePos, wakeUpTime)) {
2008 AUDIO_ERR_LOG("PrepareNextLoop failed!");
2009 break;
2010 }
2011
2012 ProcessUpdateAppsUidForPlayback();
2013
2014 loopTrace.End();
2015 // start sleep
2016 threadStatus_ = SLEEPING;
2017 CheckWakeUpTime(wakeUpTime);
2018 ClockTime::AbsoluteSleep(wakeUpTime);
2019 endpointWorkLoopFucThreadStatus_ = true;
2020 }
2021 AUDIO_DEBUG_LOG("Endpoint work loop fuc end");
2022 ReSetThreadQosLevel();
2023 // stop watchdog
2024 EndPointRemoveWatchdog("WatchingEndpointWorkLoopFuc", GetEndpointName());
2025 }
2026
ProcessUpdateAppsUidForPlayback()2027 void AudioEndpointInner::ProcessUpdateAppsUidForPlayback()
2028 {
2029 std::vector<int32_t> appsUid;
2030 {
2031 std::lock_guard<std::mutex> lock(listLock_);
2032
2033 appsUid.reserve(processList_.size());
2034 for (auto iProccessStream : processList_) {
2035 appsUid.push_back(iProccessStream->GetAppInfo().appUid);
2036 }
2037 }
2038 std::shared_ptr<IAudioRenderSink> sink = HdiAdapterManager::GetInstance().GetRenderSink(fastRenderId_);
2039 CHECK_AND_RETURN_LOG(sink, "fastSink_ is nullptr");
2040 sink->UpdateAppsUid(appsUid);
2041 }
2042
ProcessUpdateAppsUidForRecord()2043 void AudioEndpointInner::ProcessUpdateAppsUidForRecord()
2044 {
2045 std::vector<int32_t> appsUid;
2046 {
2047 std::lock_guard<std::mutex> lock(listLock_);
2048
2049 appsUid.reserve(processList_.size());
2050 for (auto iProccessStream : processList_) {
2051 appsUid.push_back(iProccessStream->GetAppInfo().appUid);
2052 }
2053 }
2054 std::shared_ptr<IAudioCaptureSource> source = HdiAdapterManager::GetInstance().GetCaptureSource(fastCaptureId_);
2055 CHECK_AND_RETURN_LOG(source, "fastSource_ is nullptr");
2056 source->UpdateAppsUid(appsUid);
2057 }
2058
GetLinkedProcessCount()2059 uint32_t AudioEndpointInner::GetLinkedProcessCount()
2060 {
2061 std::lock_guard<std::mutex> lock(listLock_);
2062 return processList_.size();
2063 }
2064
IsInvalidBuffer(uint8_t * buffer,size_t bufferSize,AudioSampleFormat format)2065 bool AudioEndpointInner::IsInvalidBuffer(uint8_t *buffer, size_t bufferSize, AudioSampleFormat format)
2066 {
2067 bool isInvalid = false;
2068 uint8_t ui8Data = 0;
2069 int16_t i16Data = 0;
2070 switch (format) {
2071 case SAMPLE_U8:
2072 CHECK_AND_RETURN_RET_LOG(bufferSize > 0, false, "buffer size is too small");
2073 ui8Data = *buffer;
2074 isInvalid = ui8Data == 0;
2075 break;
2076 case SAMPLE_S16LE:
2077 CHECK_AND_RETURN_RET_LOG(bufferSize > 1, false, "buffer size is too small");
2078 i16Data = *(reinterpret_cast<const int16_t*>(buffer));
2079 isInvalid = i16Data == 0;
2080 break;
2081 default:
2082 break;
2083 }
2084 return isInvalid;
2085 }
2086
WriteMuteDataSysEvent(uint8_t * buffer,size_t bufferSize,int32_t index)2087 void AudioEndpointInner::WriteMuteDataSysEvent(uint8_t *buffer, size_t bufferSize, int32_t index)
2088 {
2089 auto tempProcess = processList_[index];
2090 CHECK_AND_RETURN_LOG(tempProcess, "tempProcess is nullptr");
2091 if (IsInvalidBuffer(buffer, bufferSize, processList_[index]->GetStreamInfo().format)) {
2092 if (tempProcess->GetStartMuteTime() == 0) {
2093 tempProcess->SetStartMuteTime(std::chrono::system_clock::to_time_t(std::chrono::system_clock::now()));
2094 }
2095 std::time_t currentTime = std::chrono::system_clock::to_time_t(std::chrono::system_clock::now());
2096 if ((currentTime - tempProcess->GetStartMuteTime() >= ONE_MINUTE) && !tempProcess->GetSilentState()) {
2097 tempProcess->SetSilentState(true);
2098 AUDIO_WARNING_LOG("write invalid data for some time in server");
2099
2100 std::unordered_map<std::string, std::string> payload;
2101 payload["uid"] = std::to_string(tempProcess->GetAppInfo().appUid);
2102 payload["sessionId"] = std::to_string(tempProcess->GetAudioSessionId());
2103 payload["isSilent"] = std::to_string(true);
2104 #ifdef RESSCHE_ENABLE
2105 ReportDataToResSched(payload, ResourceSchedule::ResType::RES_TYPE_AUDIO_RENDERER_SILENT_PLAYBACK);
2106 #endif
2107 }
2108 } else {
2109 if (tempProcess->GetStartMuteTime() != 0) {
2110 tempProcess->SetStartMuteTime(0);
2111 }
2112 if (tempProcess->GetSilentState()) {
2113 AUDIO_WARNING_LOG("begin write valid data in server");
2114 tempProcess->SetSilentState(false);
2115
2116 std::unordered_map<std::string, std::string> payload;
2117 payload["uid"] = std::to_string(tempProcess->GetAppInfo().appUid);
2118 payload["sessionId"] = std::to_string(tempProcess->GetAudioSessionId());
2119 payload["isSilent"] = std::to_string(false);
2120 #ifdef RESSCHE_ENABLE
2121 ReportDataToResSched(payload, ResourceSchedule::ResType::RES_TYPE_AUDIO_RENDERER_SILENT_PLAYBACK);
2122 #endif
2123 }
2124 }
2125 }
2126
ReportDataToResSched(std::unordered_map<std::string,std::string> payload,uint32_t type)2127 void AudioEndpointInner::ReportDataToResSched(std::unordered_map<std::string, std::string> payload, uint32_t type)
2128 {
2129 #ifdef RESSCHE_ENABLE
2130 AUDIO_INFO_LOG("report event to ResSched ,event type : %{public}d", type);
2131 ResourceSchedule::ResSchedClient::GetInstance().ReportData(type, 0, payload);
2132 #endif
2133 }
2134 } // namespace AudioStandard
2135 } // namespace OHOS
2136