1 /*
2 * Copyright (c) 2025 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15 #ifndef LOG_TAG
16 #define LOG_TAG "HpaeNodeCommon"
17 #endif
18
19 #include <cinttypes>
20 #include "hpae_node_common.h"
21 #include "audio_errors.h"
22 #include "audio_engine_log.h"
23
24 namespace OHOS {
25 namespace AudioStandard {
26 namespace HPAE {
27 static constexpr uint64_t TIME_US_PER_S = 1000000;
28 static constexpr uint32_t DEFAULT_MULTICHANNEL_NUM = 6;
29 static constexpr uint32_t DEFAULT_MULTICHANNEL_CHANNELLAYOUT = 1551;
30 static constexpr float MAX_SINK_VOLUME_LEVEL = 1.0;
31 static constexpr uint32_t DEFAULT_MULTICHANNEL_FRAME_LEN_MS = 20;
32 static constexpr uint32_t MS_PER_SECOND = 1000;
33
34 static std::map<AudioStreamType, HpaeProcessorType> g_streamTypeToSceneTypeMap = {
35 {STREAM_MUSIC, HPAE_SCENE_MUSIC},
36 {STREAM_GAME, HPAE_SCENE_GAME},
37 {STREAM_MOVIE, HPAE_SCENE_MOVIE},
38 {STREAM_GAME, HPAE_SCENE_GAME},
39 {STREAM_SPEECH, HPAE_SCENE_SPEECH},
40 {STREAM_VOICE_RING, HPAE_SCENE_RING},
41 {STREAM_VOICE_COMMUNICATION, HPAE_SCENE_VOIP_DOWN},
42 {STREAM_MEDIA, HPAE_SCENE_OTHERS}
43 };
44
45 static std::map<AudioEffectScene, HpaeProcessorType> g_effectSceneToProcessorTypeMap = {
46 {SCENE_OTHERS, HPAE_SCENE_OTHERS},
47 {SCENE_MUSIC, HPAE_SCENE_MUSIC},
48 {SCENE_MOVIE, HPAE_SCENE_MOVIE},
49 {SCENE_GAME, HPAE_SCENE_GAME},
50 {SCENE_SPEECH, HPAE_SCENE_SPEECH},
51 {SCENE_RING, HPAE_SCENE_RING},
52 {SCENE_VOIP_DOWN, HPAE_SCENE_VOIP_DOWN},
53 {SCENE_COLLABORATIVE, HPAE_SCENE_COLLABORATIVE}
54 };
55
56 static std::unordered_map<SourceType, HpaeProcessorType> g_sourceTypeToSceneTypeMap = {
57 {SOURCE_TYPE_MIC, HPAE_SCENE_RECORD},
58 {SOURCE_TYPE_CAMCORDER, HPAE_SCENE_RECORD},
59 {SOURCE_TYPE_VOICE_CALL, HPAE_SCENE_VOIP_UP},
60 {SOURCE_TYPE_VOICE_COMMUNICATION, HPAE_SCENE_VOIP_UP},
61 {SOURCE_TYPE_VOICE_TRANSCRIPTION, HPAE_SCENE_PRE_ENHANCE},
62 {SOURCE_TYPE_VOICE_MESSAGE, HPAE_SCENE_VOICE_MESSAGE},
63 {SOURCE_TYPE_VOICE_RECOGNITION, HPAE_SCENE_RECOGNITION}
64 };
65
66
67 static std::unordered_set<HpaeProcessorType> g_processorTypeNeedEcSet = {
68 HPAE_SCENE_VOIP_UP,
69 HPAE_SCENE_PRE_ENHANCE,
70 HPAE_SCENE_RECOGNITION,
71 };
72
73 static std::unordered_set<HpaeProcessorType> g_processorTypeNeedMicRefSet = {
74 HPAE_SCENE_VOIP_UP,
75 HPAE_SCENE_RECORD,
76 };
77
78 static std::unordered_map<HpaeProcessorType, AudioEnhanceScene> g_processorTypeToSceneTypeMap = {
79 {HPAE_SCENE_RECORD, SCENE_RECORD},
80 {HPAE_SCENE_VOIP_UP, SCENE_VOIP_UP},
81 {HPAE_SCENE_PRE_ENHANCE, SCENE_PRE_ENHANCE},
82 {HPAE_SCENE_VOICE_MESSAGE, SCENE_VOICE_MESSAGE},
83 {HPAE_SCENE_RECOGNITION, SCENE_RECOGNITION},
84 };
85
86 static std::unordered_map<HpaeSessionState, std::string> g_sessionStateToStrMap = {
87 {HPAE_SESSION_NEW, "NEW"},
88 {HPAE_SESSION_PREPARED, "PREPARED"},
89 {HPAE_SESSION_RUNNING, "RUNNING"},
90 {HPAE_SESSION_PAUSING, "PAUSING"},
91 {HPAE_SESSION_PAUSED, "PAUSED"},
92 {HPAE_SESSION_STOPPING, "STOPPING"},
93 {HPAE_SESSION_STOPPED, "STOPPED"},
94 {HPAE_SESSION_RELEASED, "RELEASED"}
95 };
96
97 static std::unordered_map<StreamManagerState, std::string> g_streamMgrStateToStrMap = {
98 {STREAM_MANAGER_NEW, "NEW"},
99 {STREAM_MANAGER_IDLE, "IDLE"},
100 {STREAM_MANAGER_RUNNING, "RUNNING"},
101 {STREAM_MANAGER_SUSPENDED, "SUSPENDED"},
102 {STREAM_MANAGER_RELEASED, "RELEASED"}
103 };
104
105 static std::map<std::string, uint32_t> g_formatFromParserStrToEnum = {
106 {"s16", SAMPLE_S16LE},
107 {"s16le", SAMPLE_S16LE},
108 {"s24", SAMPLE_S24LE},
109 {"s24le", SAMPLE_S24LE},
110 {"s32", SAMPLE_S32LE},
111 {"s32le", SAMPLE_S32LE},
112 {"f32", SAMPLE_F32LE},
113 {"f32le", SAMPLE_F32LE},
114 };
115
116 static std::map<uint32_t, std::string> g_formatFromParserEnumToStr = {
117 {SAMPLE_S16LE, "s16le"},
118 {SAMPLE_S24LE, "s24le"},
119 {SAMPLE_S32LE, "s32le"},
120 {SAMPLE_F32LE, "f32le"},
121 };
122
123 static std::unordered_map<std::string, AudioPipeType> g_deviceClassToPipeMap = {
124 {"primary", PIPE_TYPE_NORMAL_OUT},
125 {"a2dp", PIPE_TYPE_NORMAL_OUT},
126 {"remote", PIPE_TYPE_NORMAL_OUT},
127 {"dp", PIPE_TYPE_NORMAL_OUT},
128 {"multichannel", PIPE_TYPE_MULTICHANNEL},
129 };
130
ConvertDeviceClassToPipe(const std::string & deviceClass)131 AudioPipeType ConvertDeviceClassToPipe(const std::string &deviceClass)
132 {
133 auto item = g_deviceClassToPipeMap.find(deviceClass);
134 if (item == g_deviceClassToPipeMap.end()) {
135 return PIPE_TYPE_UNKNOWN;
136 }
137 return item->second;
138 }
139
ConvertSessionState2Str(HpaeSessionState state)140 std::string ConvertSessionState2Str(HpaeSessionState state)
141 {
142 if (g_sessionStateToStrMap.find(state) == g_sessionStateToStrMap.end()) {
143 return "UNKNOWN";
144 }
145 return g_sessionStateToStrMap[state];
146 }
147
ConvertStreamManagerState2Str(StreamManagerState state)148 std::string ConvertStreamManagerState2Str(StreamManagerState state)
149 {
150 if (g_streamMgrStateToStrMap.find(state) == g_streamMgrStateToStrMap.end()) {
151 return "UNKNOWN";
152 }
153 return g_streamMgrStateToStrMap[state];
154 }
155
TransStreamTypeToSceneType(AudioStreamType streamType)156 HpaeProcessorType TransStreamTypeToSceneType(AudioStreamType streamType)
157 {
158 if (g_streamTypeToSceneTypeMap.find(streamType) == g_streamTypeToSceneTypeMap.end()) {
159 return HPAE_SCENE_EFFECT_NONE;
160 } else {
161 return g_streamTypeToSceneTypeMap[streamType];
162 }
163 }
164
TransEffectSceneToSceneType(AudioEffectScene effectScene)165 HpaeProcessorType TransEffectSceneToSceneType(AudioEffectScene effectScene)
166 {
167 if (g_effectSceneToProcessorTypeMap.find(effectScene) == g_effectSceneToProcessorTypeMap.end()) {
168 return HPAE_SCENE_EFFECT_NONE;
169 } else {
170 return g_effectSceneToProcessorTypeMap[effectScene];
171 }
172 }
173
TransNodeInfoForCollaboration(HpaeNodeInfo & nodeInfo,bool isCollaborationEnabled)174 void TransNodeInfoForCollaboration(HpaeNodeInfo &nodeInfo, bool isCollaborationEnabled)
175 {
176 if (isCollaborationEnabled) {
177 if (nodeInfo.effectInfo.effectScene == SCENE_MUSIC || nodeInfo.effectInfo.effectScene == SCENE_MOVIE) {
178 nodeInfo.effectInfo.lastEffectScene = nodeInfo.effectInfo.effectScene;
179 nodeInfo.effectInfo.effectScene = SCENE_COLLABORATIVE;
180 nodeInfo.sceneType = HPAE_SCENE_COLLABORATIVE;
181 AUDIO_INFO_LOG("collaboration enabled, effectScene from %{public}d, sceneType changed to %{public}d",
182 nodeInfo.effectInfo.lastEffectScene, nodeInfo.sceneType);
183 }
184 } else {
185 RecoverNodeInfoForCollaboration(nodeInfo);
186 }
187 }
188
TransSourceTypeToSceneType(SourceType sourceType)189 HpaeProcessorType TransSourceTypeToSceneType(SourceType sourceType)
190 {
191 if (g_sourceTypeToSceneTypeMap.find(sourceType) == g_sourceTypeToSceneTypeMap.end()) {
192 return HPAE_SCENE_EFFECT_NONE;
193 } else {
194 return g_sourceTypeToSceneTypeMap[sourceType];
195 }
196 }
197
CheckSceneTypeNeedEc(HpaeProcessorType processorType)198 bool CheckSceneTypeNeedEc(HpaeProcessorType processorType)
199 {
200 return g_processorTypeNeedEcSet.find(processorType) != g_processorTypeNeedEcSet.end();
201 }
202
CheckSceneTypeNeedMicRef(HpaeProcessorType processorType)203 bool CheckSceneTypeNeedMicRef(HpaeProcessorType processorType)
204 {
205 return g_processorTypeNeedMicRefSet.find(processorType) != g_processorTypeNeedMicRefSet.end();
206 }
207
208 static std::unordered_map<HpaeProcessorType, std::string> g_processorTypeToEffectSceneTypeMap = {
209 {HPAE_SCENE_DEFAULT, "HPAE_SCENE_DEFAULT"},
210 {HPAE_SCENE_OTHERS, "SCENE_OTHERS"},
211 {HPAE_SCENE_MUSIC, "SCENE_MUSIC"},
212 {HPAE_SCENE_GAME, "SCENE_GAME"},
213 {HPAE_SCENE_MOVIE, "SCENE_MOVIE"},
214 {HPAE_SCENE_SPEECH, "SCENE_SPEECH"},
215 {HPAE_SCENE_RING, "SCENE_RING"},
216 {HPAE_SCENE_VOIP_DOWN, "SCENE_VOIP_DOWN"},
217 {HPAE_SCENE_COLLABORATIVE, "SCENE_COLLABORATIVE"}};
218
TransProcessorTypeToSceneType(HpaeProcessorType processorType)219 std::string TransProcessorTypeToSceneType(HpaeProcessorType processorType)
220 {
221 if (g_processorTypeToEffectSceneTypeMap.find(processorType) == g_processorTypeToEffectSceneTypeMap.end()) {
222 return "SCENE_EXTRA";
223 } else {
224 return g_processorTypeToEffectSceneTypeMap[processorType];
225 }
226 }
227
CheckHpaeNodeInfoIsSame(HpaeNodeInfo & preNodeInfo,HpaeNodeInfo & curNodeInfo)228 bool CheckHpaeNodeInfoIsSame(HpaeNodeInfo &preNodeInfo, HpaeNodeInfo &curNodeInfo)
229 {
230 return preNodeInfo.channels == curNodeInfo.channels && //&& preNodeInfo.format == curNodeInfo.format todo
231 preNodeInfo.samplingRate == curNodeInfo.samplingRate &&
232 preNodeInfo.channelLayout == curNodeInfo.channelLayout;
233 }
234
TransNodeInfoToStringKey(HpaeNodeInfo & nodeInfo)235 std::string TransNodeInfoToStringKey(HpaeNodeInfo& nodeInfo)
236 {
237 std::string nodeKey = std::to_string(nodeInfo.sourceBufferType) + "_" +
238 std::to_string(nodeInfo.samplingRate) + "_" +
239 std::to_string(nodeInfo.channels) + "_" +
240 std::to_string(nodeInfo.format);
241 return nodeKey;
242 }
243
TransProcessType2EnhanceScene(const HpaeProcessorType & processorType)244 AudioEnhanceScene TransProcessType2EnhanceScene(const HpaeProcessorType &processorType)
245 {
246 if (g_processorTypeToSceneTypeMap.find(processorType) == g_processorTypeToSceneTypeMap.end()) {
247 return SCENE_NONE;
248 } else {
249 return g_processorTypeToSceneTypeMap[processorType];
250 }
251 }
252
ConvertUsToFrameCount(uint64_t usTime,const HpaeNodeInfo & nodeInfo)253 size_t ConvertUsToFrameCount(uint64_t usTime, const HpaeNodeInfo &nodeInfo)
254 {
255 return usTime * nodeInfo.samplingRate / TIME_US_PER_S / nodeInfo.frameLen;
256 }
257
ConvertDatalenToUs(size_t bufferSize,const HpaeNodeInfo & nodeInfo)258 uint64_t ConvertDatalenToUs(size_t bufferSize, const HpaeNodeInfo &nodeInfo)
259 {
260 if (nodeInfo.channels == 0 || GetSizeFromFormat(nodeInfo.format) == 0 || nodeInfo.samplingRate == 0) {
261 AUDIO_ERR_LOG("invalid nodeInfo");
262 return 0;
263 }
264
265 double samples = static_cast<double>(bufferSize) /
266 (nodeInfo.channels * GetSizeFromFormat(nodeInfo.format));
267 double seconds = samples / static_cast<int32_t>(nodeInfo.samplingRate);
268 double microseconds = seconds * TIME_US_PER_S;
269
270 return static_cast<uint64_t>(microseconds);
271 }
272
TransFormatFromStringToEnum(std::string format)273 AudioSampleFormat TransFormatFromStringToEnum(std::string format)
274 {
275 return static_cast<AudioSampleFormat>(g_formatFromParserStrToEnum[format]);
276 }
277
AdjustMchSinkInfo(const AudioModuleInfo & audioModuleInfo,HpaeSinkInfo & sinkInfo)278 void AdjustMchSinkInfo(const AudioModuleInfo &audioModuleInfo, HpaeSinkInfo &sinkInfo)
279 {
280 if (sinkInfo.deviceName == "DP_MCH_speaker") {
281 sinkInfo.channelLayout = static_cast<uint64_t>(std::atol(audioModuleInfo.channelLayout.c_str()));
282 return;
283 }
284 if (sinkInfo.deviceName != "MCH_Speaker") {
285 return;
286 }
287 sinkInfo.channels = static_cast<AudioChannel>(DEFAULT_MULTICHANNEL_NUM);
288 sinkInfo.channelLayout = DEFAULT_MULTICHANNEL_CHANNELLAYOUT;
289 sinkInfo.frameLen = DEFAULT_MULTICHANNEL_FRAME_LEN_MS * sinkInfo.samplingRate / MS_PER_SECOND;
290 sinkInfo.volume = MAX_SINK_VOLUME_LEVEL;
291 AUDIO_INFO_LOG("adjust MCH SINK info ch: %{public}u, channelLayout: %{public}" PRIu64
292 " frameLen: %{public}zu volume %{public}f",
293 sinkInfo.channels,
294 sinkInfo.channelLayout,
295 sinkInfo.frameLen,
296 sinkInfo.volume);
297 }
298
TransModuleInfoToHpaeSinkInfo(const AudioModuleInfo & audioModuleInfo,HpaeSinkInfo & sinkInfo)299 int32_t TransModuleInfoToHpaeSinkInfo(const AudioModuleInfo &audioModuleInfo, HpaeSinkInfo &sinkInfo)
300 {
301 if (g_formatFromParserStrToEnum.find(audioModuleInfo.format) == g_formatFromParserStrToEnum.end()) {
302 AUDIO_ERR_LOG("openaudioport failed,format:%{public}s not supported", audioModuleInfo.format.c_str());
303 return ERROR;
304 }
305 sinkInfo.deviceNetId = audioModuleInfo.networkId;
306 sinkInfo.deviceClass = audioModuleInfo.className;
307 AUDIO_INFO_LOG("HpaeManager::deviceNetId: %{public}s, deviceClass: %{public}s",
308 sinkInfo.deviceNetId.c_str(),
309 sinkInfo.deviceClass.c_str());
310 sinkInfo.adapterName = audioModuleInfo.adapterName;
311 sinkInfo.lib = audioModuleInfo.lib;
312 sinkInfo.splitMode = audioModuleInfo.extra;
313 sinkInfo.filePath = audioModuleInfo.fileName;
314
315 sinkInfo.samplingRate = static_cast<AudioSamplingRate>(std::atol(audioModuleInfo.rate.c_str()));
316 sinkInfo.format = static_cast<AudioSampleFormat>(TransFormatFromStringToEnum(audioModuleInfo.format));
317 sinkInfo.channels = static_cast<AudioChannel>(std::atol(audioModuleInfo.channels.c_str()));
318 int32_t bufferSize = static_cast<int32_t>(std::atol(audioModuleInfo.bufferSize.c_str()));
319 sinkInfo.frameLen = static_cast<size_t>(bufferSize) / (sinkInfo.channels *
320 static_cast<size_t>(GetSizeFromFormat(sinkInfo.format)));
321 sinkInfo.channelLayout = 0ULL;
322 sinkInfo.deviceType = static_cast<int32_t>(std::atol(audioModuleInfo.deviceType.c_str()));
323 sinkInfo.volume = MAX_SINK_VOLUME_LEVEL;
324 sinkInfo.openMicSpeaker = static_cast<uint32_t>(std::atol(audioModuleInfo.OpenMicSpeaker.c_str()));
325 sinkInfo.renderInIdleState = static_cast<uint32_t>(std::atol(audioModuleInfo.renderInIdleState.c_str()));
326 sinkInfo.offloadEnable = static_cast<uint32_t>(std::atol(audioModuleInfo.offloadEnable.c_str()));
327 sinkInfo.sinkLatency = static_cast<uint32_t>(std::atol(audioModuleInfo.sinkLatency.c_str()));
328 sinkInfo.fixedLatency = static_cast<uint32_t>(std::atol(audioModuleInfo.fixedLatency.c_str()));
329 sinkInfo.deviceName = audioModuleInfo.name;
330 AdjustMchSinkInfo(audioModuleInfo, sinkInfo);
331 if (audioModuleInfo.needEmptyChunk) {
332 sinkInfo.needEmptyChunk = audioModuleInfo.needEmptyChunk.value();
333 }
334 return SUCCESS;
335 }
336
TransModuleInfoToHpaeSourceInfo(const AudioModuleInfo & audioModuleInfo,HpaeSourceInfo & sourceInfo)337 int32_t TransModuleInfoToHpaeSourceInfo(const AudioModuleInfo &audioModuleInfo, HpaeSourceInfo &sourceInfo)
338 {
339 if (g_formatFromParserStrToEnum.find(audioModuleInfo.format) == g_formatFromParserStrToEnum.end()) {
340 AUDIO_ERR_LOG("openaudioport failed,format:%{public}s not supported", audioModuleInfo.format.c_str());
341 return ERROR;
342 }
343 sourceInfo.deviceNetId = audioModuleInfo.networkId;
344 sourceInfo.deviceClass = audioModuleInfo.className;
345 sourceInfo.adapterName = audioModuleInfo.adapterName;
346 sourceInfo.sourceName = audioModuleInfo.name; // built_in_mic
347 sourceInfo.deviceName = audioModuleInfo.name;
348 sourceInfo.sourceType = static_cast<SourceType>(std::atol(audioModuleInfo.sourceType.c_str()));
349 sourceInfo.filePath = audioModuleInfo.fileName;
350 int32_t bufferSize = static_cast<int32_t>(std::atol(audioModuleInfo.bufferSize.c_str()));
351 sourceInfo.channels = static_cast<AudioChannel>(std::atol(audioModuleInfo.channels.c_str()));
352 sourceInfo.format = TransFormatFromStringToEnum(audioModuleInfo.format);
353 sourceInfo.frameLen = static_cast<size_t>(bufferSize) / (sourceInfo.channels *
354 static_cast<size_t>(GetSizeFromFormat(sourceInfo.format)));
355 sourceInfo.samplingRate = static_cast<AudioSamplingRate>(std::atol(audioModuleInfo.rate.c_str()));
356 sourceInfo.channelLayout = 0ULL;
357 sourceInfo.deviceType = static_cast<int32_t>(std::atol(audioModuleInfo.deviceType.c_str()));
358 sourceInfo.volume = MAX_SINK_VOLUME_LEVEL; // 1.0f;
359
360 sourceInfo.ecType = static_cast<HpaeEcType>(std::atol(audioModuleInfo.ecType.c_str()));
361 sourceInfo.ecAdapterName = audioModuleInfo.ecAdapter;
362 sourceInfo.ecSamplingRate = static_cast<AudioSamplingRate>(std::atol(audioModuleInfo.ecSamplingRate.c_str()));
363 sourceInfo.ecFormat = TransFormatFromStringToEnum(audioModuleInfo.ecFormat);
364 sourceInfo.ecChannels = static_cast<AudioChannel>(std::atol(audioModuleInfo.ecChannels.c_str()));
365 sourceInfo.ecFrameLen = DEFAULT_MULTICHANNEL_FRAME_LEN_MS * (sourceInfo.ecSamplingRate / MS_PER_SECOND);
366
367 sourceInfo.micRef = static_cast<HpaeMicRefSwitch>(std::atol(audioModuleInfo.openMicRef.c_str()));
368 sourceInfo.micRefSamplingRate = static_cast<AudioSamplingRate>(std::atol(audioModuleInfo.micRefRate.c_str()));
369 sourceInfo.micRefFormat = TransFormatFromStringToEnum(audioModuleInfo.micRefFormat);
370 sourceInfo.micRefChannels = static_cast<AudioChannel>(std::atol(audioModuleInfo.micRefChannels.c_str()));
371 sourceInfo.openMicSpeaker = static_cast<uint32_t>(std::atol(audioModuleInfo.OpenMicSpeaker.c_str()));
372 sourceInfo.micRefFrameLen = DEFAULT_MULTICHANNEL_FRAME_LEN_MS * (sourceInfo.micRefSamplingRate / MS_PER_SECOND);
373 return SUCCESS;
374 }
375
CheckSourceInfoIsDifferent(const HpaeSourceInfo & info,const HpaeSourceInfo & oldInfo)376 bool CheckSourceInfoIsDifferent(const HpaeSourceInfo &info, const HpaeSourceInfo &oldInfo)
377 {
378 auto getKey = [](const HpaeSourceInfo &sourceInfo) {
379 return std::tie(
380 sourceInfo.deviceNetId,
381 sourceInfo.deviceClass,
382 sourceInfo.adapterName,
383 sourceInfo.sourceName,
384 sourceInfo.sourceType,
385 sourceInfo.filePath,
386 sourceInfo.deviceName,
387 sourceInfo.frameLen,
388 sourceInfo.samplingRate,
389 sourceInfo.format,
390 sourceInfo.channels,
391 sourceInfo.channelLayout,
392 sourceInfo.deviceType,
393 sourceInfo.volume,
394 sourceInfo.openMicSpeaker,
395 sourceInfo.ecType,
396 sourceInfo.ecFrameLen,
397 sourceInfo.ecSamplingRate,
398 sourceInfo.ecFormat,
399 sourceInfo.ecChannels,
400 sourceInfo.micRef,
401 sourceInfo.micRefFrameLen,
402 sourceInfo.micRefSamplingRate,
403 sourceInfo.micRefFormat,
404 sourceInfo.micRefChannels);
405 };
406 return getKey(info) != getKey(oldInfo);
407 }
408
PrintAudioModuleInfo(const AudioModuleInfo & audioModuleInfo)409 void PrintAudioModuleInfo(const AudioModuleInfo &audioModuleInfo)
410 {
411 AUDIO_INFO_LOG("rate: %{public}s ch: %{public}s buffersize: %{public}s ",
412 audioModuleInfo.rate.c_str(),
413 audioModuleInfo.channels.c_str(),
414 audioModuleInfo.bufferSize.c_str());
415 AUDIO_INFO_LOG("format: %{public}s name: %{public}s lib: %{public}s ",
416 audioModuleInfo.format.c_str(),
417 audioModuleInfo.name.c_str(),
418 audioModuleInfo.lib.c_str());
419 AUDIO_INFO_LOG("deviceType: %{public}s className: %{public}s adapterName: %{public}s ",
420 audioModuleInfo.deviceType.c_str(),
421 audioModuleInfo.className.c_str(),
422 audioModuleInfo.adapterName.c_str());
423 AUDIO_INFO_LOG("OpenMicSpeaker: %{public}s networkId: %{public}s fileName: %{public}s ",
424 audioModuleInfo.OpenMicSpeaker.c_str(),
425 audioModuleInfo.networkId.c_str(),
426 audioModuleInfo.fileName.c_str());
427 AUDIO_INFO_LOG("fixedLatency: %{public}s sinkLatency: %{public}s renderInIdleState: %{public}s ",
428 audioModuleInfo.fixedLatency.c_str(),
429 audioModuleInfo.sinkLatency.c_str(),
430 audioModuleInfo.renderInIdleState.c_str());
431 AUDIO_INFO_LOG("sceneName: %{public}s sourceType: %{public}s offloadEnable: %{public}s ",
432 audioModuleInfo.sceneName.c_str(),
433 audioModuleInfo.sourceType.c_str(),
434 audioModuleInfo.offloadEnable.c_str());
435 }
436
TransFormatFromEnumToString(AudioSampleFormat format)437 std::string TransFormatFromEnumToString(AudioSampleFormat format)
438 {
439 CHECK_AND_RETURN_RET_LOG(g_formatFromParserEnumToStr.find(format) != g_formatFromParserEnumToStr.end(),
440 "", "error param format");
441 return g_formatFromParserEnumToStr[format];
442 }
443
RecoverNodeInfoForCollaboration(HpaeNodeInfo & nodeInfo)444 void RecoverNodeInfoForCollaboration(HpaeNodeInfo &nodeInfo)
445 {
446 if (nodeInfo.effectInfo.effectScene == SCENE_COLLABORATIVE) {
447 nodeInfo.effectInfo.effectScene = nodeInfo.effectInfo.lastEffectScene;
448 nodeInfo.sceneType = TransEffectSceneToSceneType(nodeInfo.effectInfo.effectScene);
449 AUDIO_INFO_LOG("collaboration disabled, effectScene changed to %{public}d, sceneType changed to %{public}d",
450 nodeInfo.effectInfo.effectScene, nodeInfo.sceneType);
451 }
452 }
453
TransStreamInfoToStreamDumpInfo(const std::unordered_map<uint32_t,HpaeSessionInfo> & streamInfoMap,std::vector<HpaeInputOutputInfo> & dumpInfo)454 void TransStreamInfoToStreamDumpInfo(const std::unordered_map<uint32_t, HpaeSessionInfo> &streamInfoMap,
455 std::vector<HpaeInputOutputInfo> &dumpInfo)
456 {
457 std::transform(streamInfoMap.begin(), streamInfoMap.end(), std::back_inserter(dumpInfo),
458 [](const auto &pair) {
459 const HpaeSessionInfo &sessionInfo = pair.second;
460 std::string config;
461 TransDeviceInfoToString(sessionInfo.streamInfo, config);
462 return HpaeInputOutputInfo {
463 .sessionId = sessionInfo.streamInfo.sessionId,
464 .deviceName = sessionInfo.streamInfo.deviceName,
465 .uid = sessionInfo.streamInfo.uid,
466 .pid = sessionInfo.streamInfo.pid,
467 .tokenId = sessionInfo.streamInfo.tokenId,
468 .offloadEnable = sessionInfo.offloadEnable,
469 .privacyType = sessionInfo.streamInfo.privacyType,
470 .config = config,
471 .state = sessionInfo.state,
472 .startTime = sessionInfo.startTime
473 };
474 });
475 }
476 } // namespace HPAE
477 } // namespace AudioStandard
478 } // namespace OHOS