1 /*
2 * Copyright (c) 2021-2024 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15 #ifndef LOG_TAG
16 #define LOG_TAG "HdiSink"
17 #endif
18
19 #include <config.h>
20 #include <pulse/rtclock.h>
21 #include <pulse/timeval.h>
22 #include <pulse/xmalloc.h>
23 #include <pulsecore/log.h>
24 #include <pulsecore/modargs.h>
25 #include <pulsecore/module.h>
26 #include <pulsecore/rtpoll.h>
27 #include <pulsecore/sink.h>
28 #include <pulsecore/thread-mq.h>
29 #include <pulsecore/thread.h>
30 #include <pulsecore/memblock.c>
31 #include <pulsecore/mix.h>
32 #include <pulse/volume.h>
33 #include <pulsecore/protocol-native.c>
34 #include <pulsecore/memblockq.c>
35
36 #include <stddef.h>
37 #include <stdint.h>
38 #include <stdbool.h>
39 #include <string.h>
40 #include <inttypes.h>
41 #include <sys/types.h>
42 #include <pthread.h>
43 #include <semaphore.h>
44
45 #include "securec.h"
46
47 #include "audio_hdi_log.h"
48 #include "audio_schedule.h"
49 #include "audio_utils_c.h"
50 #include "audio_hdiadapter_info.h"
51 #include "volume_tools_c.h"
52 #include "audio_volume_c.h"
53 #include "renderer_sink_adapter.h"
54 #include "audio_effect_chain_adapter.h"
55 #include "playback_capturer_adapter.h"
56 #include "sink_userdata.h"
57 #include "time.h"
58
59 #define DEFAULT_SINK_NAME "hdi_output"
60 #define DEFAULT_AUDIO_DEVICE_NAME "Speaker"
61 #define DEFAULT_DEVICE_CLASS "primary"
62 #define DEFAULT_DEVICE_NETWORKID "LocalDevice"
63 #define DEFAULT_BUFFER_SIZE 8192
64 #define MAX_SINK_VOLUME_LEVEL 1.0
65 #define DEFAULT_WRITE_TIME 1000
66 #define MIX_BUFFER_LENGTH (pa_page_size())
67 #define MAX_REWIND (7000 * PA_USEC_PER_MSEC)
68 #define USEC_PER_SEC 1000000
69 #define DEFAULT_IN_CHANNEL_NUM 2
70 #define PRIMARY_CHANNEL_NUM 2
71 #define IN_CHANNEL_NUM_MAX 16
72 #define OUT_CHANNEL_NUM_MAX 2
73 #define DEFAULT_FRAMELEN 2048
74 #define SCENE_TYPE_NUM 9
75 #define HDI_MIN_MS_MAINTAIN 40
76 #define OFFLOAD_HDI_CACHE1 200 // ms, should equal with val in client
77 #define OFFLOAD_HDI_CACHE2 7000 // ms, should equal with val in client
78 #define OFFLOAD_FRAME_SIZE 40
79 #define OFFLOAD_HDI_CACHE1_PLUS (OFFLOAD_HDI_CACHE1 + OFFLOAD_FRAME_SIZE + 5) // ms, add 1 frame and 5ms
80 #define OFFLOAD_HDI_CACHE2_PLUS (OFFLOAD_HDI_CACHE2 + OFFLOAD_FRAME_SIZE + 5) // to make sure get full
81 #define SPRINTF_STR_LEN 100
82 #define DEFAULT_MULTICHANNEL_NUM 6
83 #define DEFAULT_NUM_CHANNEL 2
84 #define DEFAULT_MULTICHANNEL_CHANNELLAYOUT 1551
85 #define DEFAULT_CHANNELLAYOUT 3
86 #define OFFLOAD_SET_BUFFER_SIZE_NUM 5
87 #define SPATIALIZATION_FADING_FRAMECOUNT 5
88 #define POSSIBLY_UNUSED __attribute__((unused))
89 #define MIN_SLEEP_FOR_USEC 2000
90 #define DEFAULT_BLOCK_USEC 20000
91 #define FADE_IN_BEGIN 0.0
92 #define FADE_IN_END 1.0
93 #define FADE_OUT_BEGIN 1.0
94 #define FADE_OUT_END 0.0
95 #define PRINT_INTERVAL_FRAME_COUNT 100
96
97 const int64_t LOG_LOOP_THRESHOLD = 50 * 60 * 9; // about 3 min
98 const uint64_t DEFAULT_GETLATENCY_LOG_THRESHOLD_MS = 100;
99 const uint32_t SAMPLE_RATE_96K_HZ = 96000; // 96khz
100 const uint8_t CHANNEL_COUNT_2 = 2; // 2ch
101
102 const char *DEVICE_CLASS_PRIMARY = "primary";
103 const char *DEVICE_CLASS_A2DP = "a2dp";
104 const char *DEVICE_CLASS_REMOTE = "remote";
105 const char *DEVICE_CLASS_OFFLOAD = "offload";
106 const char *DEVICE_CLASS_MULTICHANNEL = "multichannel";
107 const char *SINK_NAME_INNER_CAPTURER = "InnerCapturerSink";
108 const char *SINK_NAME_REMOTE_CAST_INNER_CAPTURER = "RemoteCastInnerCapturer";
109 const char *DUP_STEAM_NAME = "DupStream"; // should be same with DUP_STEAM in audio_info.h
110 const char *BT_SINK_NAME = "Bt_Speaker";
111 const char *MCH_SINK_NAME = "MCH_Speaker";
112 const char *OFFLOAD_SINK_NAME = "Offload_Speaker";
113 const char *DP_SINK_NAME = "DP_speaker";
114
115 const int32_t WAIT_CLOSE_PA_OR_EFFECT_TIME = 4; // secs
116 const int32_t MONITOR_CLOSE_PA_TIME_SEC = 5 * 60; // 5min
117 bool g_speakerPaAllStreamVolumeZero = false;
118 bool g_onlyPrimarySpeakerPaLoading = false;
119 bool g_paHaveDisabled = false;
120 time_t g_speakerPaAllStreamStartVolZeroTime = 0;
121 bool g_speakerPaHaveClosed = false;
122 time_t g_speakerPaClosedTime = 0;
123 bool g_effectAllStreamVolumeZeroMap[SCENE_TYPE_NUM] = {false};
124 bool g_effectHaveDisabledMap[SCENE_TYPE_NUM] = {false};
125 time_t g_effectStartVolZeroTimeMap[SCENE_TYPE_NUM] = {0};
126 char *const SCENE_TYPE_SET[SCENE_TYPE_NUM] = {"SCENE_DEFAULT", "SCENE_MUSIC", "SCENE_GAME", "SCENE_MOVIE",
127 "SCENE_SPEECH", "SCENE_RING", "SCENE_VOIP", "SCENE_OTHERS", "EFFECT_NONE"};
128 const int32_t COMMON_SCENE_TYPE_INDEX = 0;
129 const uint64_t FADE_OUT_TIME = 5000; // 5ms
130
131 enum HdiInputType { HDI_INPUT_TYPE_PRIMARY, HDI_INPUT_TYPE_OFFLOAD, HDI_INPUT_TYPE_MULTICHANNEL };
132
133 enum {
134 HDI_INIT,
135 HDI_DEINIT,
136 HDI_START,
137 HDI_STOP,
138 HDI_RENDER,
139 QUIT
140 };
141
142 enum AudioOffloadType {
143 /**
144 * Indicates audio offload state default.
145 */
146 OFFLOAD_DEFAULT = -1,
147 /**
148 * Indicates audio offload state : screen is active & app is foreground.
149 */
150 OFFLOAD_ACTIVE_FOREGROUND = 0,
151 /**
152 * Indicates audio offload state : screen is active & app is background.
153 */
154 OFFLOAD_ACTIVE_BACKGROUND = 1,
155 /**
156 * Indicates audio offload state : screen is inactive & app is background.
157 */
158 OFFLOAD_INACTIVE_BACKGROUND = 3,
159 };
160
161 static int32_t g_effectProcessFrameCount = 0;
162 static void UserdataFree(struct Userdata *u);
163 static int32_t PrepareDevice(struct Userdata *u, const char *filePath);
164
165 static int32_t PrepareDeviceOffload(struct Userdata *u);
166 static char *GetStateInfo(pa_sink_state_t state);
167 static char *GetInputStateInfo(pa_sink_input_state_t state);
168 static void PaInputStateChangeCb(pa_sink_input *i, pa_sink_input_state_t state);
169 static void OffloadLock(struct Userdata *u);
170 static void OffloadUnlock(struct Userdata *u);
171 static int32_t UpdatePresentationPosition(struct Userdata *u);
172 static bool InputIsPrimary(pa_sink_input *i);
173 static bool InputIsOffload(pa_sink_input *i);
174 static void GetSinkInputName(pa_sink_input *i, char *str, int len);
175 static const char *safeProplistGets(const pa_proplist *p, const char *key, const char *defstr);
176 static void StartOffloadHdi(struct Userdata *u, pa_sink_input *i);
177 static void StartPrimaryHdiIfRunning(struct Userdata *u);
178 static void StartMultiChannelHdiIfRunning(struct Userdata *u);
179 static void CheckInputChangeToOffload(struct Userdata *u, pa_sink_input *i);
180 static void ResetVolumeBySinkInputState(pa_sink_input *i, pa_sink_input_state_t state);
181 static void *AllocateBuffer(size_t size);
182 static bool AllocateEffectBuffer(struct Userdata *u);
183 static void FreeEffectBuffer(struct Userdata *u);
184 static void ResetBufferAttr(struct Userdata *u);
185 static enum HdiAdapterFormat ConvertPaToHdiAdapterFormat(pa_sample_format_t format);
186
187 // BEGIN Utility functions
188 #define FLOAT_EPS 1e-9f
189 #define MEMBLOCKQ_MAXLENGTH (16*1024*16)
190 #define OFFSET_BIT_24 3
191 #define BIT_DEPTH_TWO 2
192 #define BIT_8 8
193 #define BIT_16 16
194 #define BIT_24 24
195 #define BIT_32 32
Read24Bit(const uint8_t * p)196 static uint32_t Read24Bit(const uint8_t *p)
197 {
198 return ((uint32_t) p[BIT_DEPTH_TWO] << BIT_16) | ((uint32_t) p[1] << BIT_8) | ((uint32_t) p[0]);
199 }
200
Write24Bit(uint8_t * p,uint32_t u)201 static void Write24Bit(uint8_t *p, uint32_t u)
202 {
203 p[BIT_DEPTH_TWO] = (uint8_t) (u >> BIT_16);
204 p[1] = (uint8_t) (u >> BIT_8);
205 p[0] = (uint8_t) u;
206 }
207
ConvertFrom16BitToFloat(unsigned n,const int16_t * a,float * b)208 static void ConvertFrom16BitToFloat(unsigned n, const int16_t *a, float *b)
209 {
210 for (; n > 0; n--) {
211 *(b++) = *(a++) * (1.0f / (1 << (BIT_16 - 1)));
212 }
213 }
214
ConvertFrom24BitToFloat(unsigned n,const uint8_t * a,float * b)215 static void ConvertFrom24BitToFloat(unsigned n, const uint8_t *a, float *b)
216 {
217 for (; n > 0; n--) {
218 int32_t s = Read24Bit(a) << BIT_8;
219 *b = s * (1.0f / (1U << (BIT_32 - 1)));
220 a += OFFSET_BIT_24;
221 b++;
222 }
223 }
224
ConvertFrom32BitToFloat(unsigned n,const int32_t * a,float * b)225 static void ConvertFrom32BitToFloat(unsigned n, const int32_t *a, float *b)
226 {
227 for (; n > 0; n--) {
228 *(b++) = *(a++) * (1.0f / (1U << (BIT_32 - 1)));
229 }
230 }
231
CapMax(float v)232 static float CapMax(float v)
233 {
234 float value = v;
235 if (v > 1.0f) {
236 value = 1.0f - FLOAT_EPS;
237 } else if (v < -1.0f) {
238 value = -1.0f + FLOAT_EPS;
239 }
240 return value;
241 }
242
ConvertFromFloatTo16Bit(unsigned n,const float * a,int16_t * b)243 static void ConvertFromFloatTo16Bit(unsigned n, const float *a, int16_t *b)
244 {
245 for (; n > 0; n--) {
246 float tmp = *a++;
247 float v = CapMax(tmp) * (1 << (BIT_16 - 1));
248 *(b++) = (int16_t) v;
249 }
250 }
251
ConvertFromFloatTo24Bit(unsigned n,const float * a,uint8_t * b)252 static void ConvertFromFloatTo24Bit(unsigned n, const float *a, uint8_t *b)
253 {
254 for (; n > 0; n--) {
255 float tmp = *a++;
256 float v = CapMax(tmp) * (1U << (BIT_32 - 1));
257 Write24Bit(b, ((int32_t) v) >> BIT_8);
258 b += OFFSET_BIT_24;
259 }
260 }
261
ConvertFromFloatTo32Bit(unsigned n,const float * a,int32_t * b)262 static void ConvertFromFloatTo32Bit(unsigned n, const float *a, int32_t *b)
263 {
264 for (; n > 0; n--) {
265 float tmp = *a++;
266 float v = CapMax(tmp) * (1U << (BIT_32 - 1));
267 *(b++) = (int32_t) v;
268 }
269 }
270
ConvertToFloat(pa_sample_format_t format,unsigned n,void * src,float * dst)271 static void ConvertToFloat(pa_sample_format_t format, unsigned n, void *src, float *dst)
272 {
273 pa_assert(src);
274 pa_assert(dst);
275 int32_t ret;
276 switch (format) {
277 case PA_SAMPLE_S16LE:
278 ConvertFrom16BitToFloat(n, src, dst);
279 break;
280 case PA_SAMPLE_S24LE:
281 ConvertFrom24BitToFloat(n, src, dst);
282 break;
283 case PA_SAMPLE_S32LE:
284 ConvertFrom32BitToFloat(n, src, dst);
285 break;
286 default:
287 ret = memcpy_s(dst, n, src, n);
288 if (ret != 0) {
289 float *srcFloat = (float *)src;
290 for (uint32_t i = 0; i < n; i++) {
291 dst[i] = srcFloat[i];
292 }
293 }
294 break;
295 }
296 }
297
ConvertFromFloat(pa_sample_format_t format,unsigned n,float * src,void * dst)298 static void ConvertFromFloat(pa_sample_format_t format, unsigned n, float *src, void *dst)
299 {
300 pa_assert(src);
301 pa_assert(dst);
302 int32_t ret;
303 switch (format) {
304 case PA_SAMPLE_S16LE:
305 ConvertFromFloatTo16Bit(n, src, dst);
306 break;
307 case PA_SAMPLE_S24LE:
308 ConvertFromFloatTo24Bit(n, src, dst);
309 break;
310 case PA_SAMPLE_S32LE:
311 ConvertFromFloatTo32Bit(n, src, dst);
312 break;
313 default:
314 ret = memcpy_s(dst, n, src, n);
315 if (ret != 0) {
316 float *dstFloat = (float *)dst;
317 for (uint32_t i = 0; i < n; i++) {
318 dstFloat[i] = src[i];
319 }
320 }
321 break;
322 }
323 }
324
updateResampler(pa_sink_input * sinkIn,const char * sceneType,bool mchFlag)325 static void updateResampler(pa_sink_input *sinkIn, const char *sceneType, bool mchFlag)
326 {
327 uint32_t processChannels = DEFAULT_NUM_CHANNEL;
328 uint64_t processChannelLayout = DEFAULT_CHANNELLAYOUT;
329 if (mchFlag) {
330 struct Userdata *u = sinkIn->sink->userdata;
331 processChannels = u->multiChannel.sinkChannel;
332 processChannelLayout = u->multiChannel.sinkChannelLayout;
333 } else {
334 EffectChainManagerReturnEffectChannelInfo(sceneType, &processChannels, &processChannelLayout);
335 }
336
337 pa_resampler *r;
338 pa_sample_spec ss = sinkIn->thread_info.resampler->o_ss;
339 pa_channel_map processCm;
340 ConvertChLayoutToPaChMap(processChannelLayout, &processCm);
341 processCm.channels = processChannels;
342 if (processChannels == sinkIn->thread_info.resampler->i_ss.channels) {
343 ss.channels = sinkIn->thread_info.resampler->i_ss.channels;
344 pa_channel_map cm = sinkIn->thread_info.resampler->i_cm;
345 if (pa_channel_map_equal(&sinkIn->thread_info.resampler->i_cm, &processCm)) {
346 return;
347 }
348 r = pa_resampler_new(sinkIn->thread_info.resampler->mempool,
349 &sinkIn->thread_info.resampler->i_ss,
350 &sinkIn->thread_info.resampler->i_cm,
351 &ss, &cm,
352 sinkIn->core->lfe_crossover_freq,
353 sinkIn->thread_info.resampler->method,
354 sinkIn->thread_info.resampler->flags);
355 } else {
356 ss.channels = processChannels;
357 if (ss.channels == sinkIn->thread_info.resampler->o_ss.channels) {
358 return;
359 }
360 r = pa_resampler_new(sinkIn->thread_info.resampler->mempool,
361 &sinkIn->thread_info.resampler->i_ss,
362 &sinkIn->thread_info.resampler->i_cm,
363 &ss, &processCm,
364 sinkIn->core->lfe_crossover_freq,
365 sinkIn->thread_info.resampler->method,
366 sinkIn->thread_info.resampler->flags);
367 }
368 pa_resampler_free(sinkIn->thread_info.resampler);
369 sinkIn->thread_info.resampler = r;
370 return;
371 }
372
RenderWrite(struct RendererSinkAdapter * sinkAdapter,pa_memchunk * pchunk)373 static ssize_t RenderWrite(struct RendererSinkAdapter *sinkAdapter, pa_memchunk *pchunk)
374 {
375 size_t index;
376 size_t length;
377 ssize_t count = 0;
378 void *p = NULL;
379
380 pa_assert(pchunk);
381
382 index = pchunk->index;
383 length = pchunk->length;
384 p = pa_memblock_acquire(pchunk->memblock);
385 pa_assert(p);
386
387 while (true) {
388 uint64_t writeLen = 0;
389
390 int32_t ret = sinkAdapter->RendererRenderFrame(sinkAdapter, ((char*)p + index),
391 (uint64_t)length, &writeLen);
392 if (writeLen > length) {
393 AUDIO_ERR_LOG("Error writeLen > actual bytes. Length: %zu, Written: %" PRIu64 " bytes, %d ret",
394 length, writeLen, ret);
395 count = -1 - count;
396 break;
397 }
398 if (writeLen == 0) {
399 AUDIO_ERR_LOG("Failed to render Length: %{public}zu, Written: %{public}" PRIu64 " bytes, %{public}d ret",
400 length, writeLen, ret);
401 count = -1 - count;
402 break;
403 } else {
404 count += (ssize_t)writeLen;
405 index += writeLen;
406 length -= writeLen;
407 if (length == 0) {
408 break;
409 }
410 }
411 }
412 pa_memblock_release(pchunk->memblock);
413 pa_memblock_unref(pchunk->memblock);
414
415 return count;
416 }
417
GetInputPolicyState(pa_sink_input * i)418 static enum AudioOffloadType GetInputPolicyState(pa_sink_input *i)
419 {
420 return atoi(safeProplistGets(i->proplist, "stream.offload.statePolicy", "0"));
421 }
422
OffloadSetHdiVolume(pa_sink_input * i)423 static void OffloadSetHdiVolume(pa_sink_input *i)
424 {
425 if (!InputIsOffload(i)) {
426 return;
427 }
428
429 struct Userdata *u = i->sink->userdata;
430 const char *streamType = safeProplistGets(i->proplist, "stream.type", "NULL");
431 const char *sessionIDStr = safeProplistGets(i->proplist, "stream.sessionID", "NULL");
432 const char *deviceClass = GetDeviceClass(u->offload.sinkAdapter->deviceClass);
433 uint32_t sessionID = sessionIDStr != NULL ? (uint32_t)atoi(sessionIDStr) : 0;
434 float volumeEnd = GetCurVolume(sessionID, streamType, deviceClass);
435 float volumeBeg = GetPreVolume(sessionID);
436 float fadeBeg = 1.0f;
437 float fadeEnd = 1.0f;
438 if (!pa_safe_streq(streamType, "ultrasonic")) {
439 GetStreamVolumeFade(sessionID, &fadeBeg, &fadeEnd);
440 }
441 if (volumeBeg != volumeEnd || fadeBeg != fadeEnd) {
442 AUDIO_INFO_LOG("sessionID:%{public}s, volumeBeg:%{public}f, volumeEnd:%{public}f"
443 ", fadeBeg:%{public}f, fadeEnd:%{public}f",
444 sessionIDStr, volumeBeg, volumeEnd, fadeBeg, fadeEnd);
445 if (volumeBeg != volumeEnd) {
446 SetPreVolume(sessionID, volumeEnd);
447 MonitorVolume(sessionID, true);
448 }
449 if (fadeBeg != fadeEnd) {
450 SetStreamVolumeFade(sessionID, fadeEnd, fadeEnd);
451 }
452 }
453 u->offload.sinkAdapter->RendererSinkSetVolume(u->offload.sinkAdapter, volumeEnd, volumeEnd);
454 }
455
OffloadSetHdiBufferSize(pa_sink_input * i)456 static void OffloadSetHdiBufferSize(pa_sink_input *i)
457 {
458 if (!InputIsOffload(i)) {
459 return;
460 }
461
462 struct Userdata *u = i->sink->userdata;
463 const uint32_t bufSize = (GetInputPolicyState(i) == OFFLOAD_INACTIVE_BACKGROUND ?
464 OFFLOAD_HDI_CACHE2 : OFFLOAD_HDI_CACHE1);
465 u->offload.sinkAdapter->RendererSinkSetBufferSize(u->offload.sinkAdapter, bufSize);
466 }
467
RenderWriteOffload(struct Userdata * u,pa_sink_input * i,pa_memchunk * pchunk)468 static int32_t RenderWriteOffload(struct Userdata *u, pa_sink_input *i, pa_memchunk *pchunk)
469 {
470 size_t index;
471 size_t length;
472 void *p = NULL;
473
474 pa_assert(pchunk);
475
476 index = pchunk->index;
477 length = pchunk->length;
478 p = pa_memblock_acquire(pchunk->memblock);
479 pa_assert(p);
480
481 uint64_t writeLen = 0;
482 uint64_t now = pa_rtclock_now();
483 if (!u->offload.isHDISinkStarted) {
484 AUDIO_DEBUG_LOG("StartOffloadHdi before write, because maybe sink switch");
485 StartOffloadHdi(u, i);
486 }
487 if (u->offload.firstWriteHdi) {
488 AudioRawFormat rawFormat;
489 rawFormat.format = (uint32_t)ConvertPaToHdiAdapterFormat(u->ss.format);
490 rawFormat.channels = (uint32_t)u->ss.channels;
491 ProcessVol((uint8_t*)p + index, length, rawFormat, 0, 1); // do fade in
492 }
493 int32_t ret = u->offload.sinkAdapter->RendererRenderFrame(u->offload.sinkAdapter, ((char*)p + index),
494 (uint64_t)length, &writeLen);
495 pa_memblock_release(pchunk->memblock);
496 if (writeLen != length && writeLen != 0) {
497 AUDIO_ERR_LOG("Error writeLen != actual bytes. Length: %zu, Written: %" PRIu64 " bytes, %d ret",
498 length, writeLen, ret);
499 return -1;
500 }
501 if (ret == 0 && u->offload.firstWriteHdi && writeLen == length) {
502 u->offload.firstWriteHdi = false;
503 u->offload.hdiPosTs = now;
504 u->offload.hdiPos = 0;
505 // if the hdi is flushing, it will block the volume setting.
506 // so the render frame judge it.
507 OffloadSetHdiVolume(i);
508 }
509 if (ret == 0 && u->offload.setHdiBufferSizeNum > 0 && writeLen == length) {
510 u->offload.setHdiBufferSizeNum--;
511 OffloadSetHdiBufferSize(i);
512 }
513 if (ret == 0 && writeLen == 0 && !u->offload.firstWriteHdi) { // is full
514 AUDIO_DEBUG_LOG("RenderWriteOffload, hdi is full, break");
515 return 1; // 1 indicates full
516 } else if (writeLen == 0) {
517 AUDIO_ERR_LOG("Failed to render Length: %{public}zu, Written: %{public}" PRIu64 " bytes, %{public}d ret",
518 length, writeLen, ret);
519 return -1;
520 }
521 return 0;
522 }
523
OffloadCallback(const enum RenderCallbackType type,int8_t * userdata)524 static void OffloadCallback(const enum RenderCallbackType type, int8_t *userdata)
525 {
526 struct Userdata *u = (struct Userdata *)userdata;
527 switch (type) {
528 case CB_NONBLOCK_WRITE_COMPLETED: { //need more data
529 const int hdistate = pa_atomic_load(&u->offload.hdistate);
530 if (hdistate == 1) {
531 pa_atomic_store(&u->offload.hdistate, 0);
532 OffloadLock(u);
533 UpdatePresentationPosition(u);
534 }
535 if (u->thread_mq.inq) {
536 pa_asyncmsgq_post(u->thread_mq.inq, NULL, 0, NULL, 0, NULL, NULL);
537 }
538 break;
539 }
540 case CB_DRAIN_COMPLETED:
541 case CB_FLUSH_COMPLETED:
542 case CB_RENDER_FULL:
543 case CB_ERROR_OCCUR:
544 break;
545 default:
546 break;
547 }
548 }
549
RegOffloadCallback(struct Userdata * u)550 static void RegOffloadCallback(struct Userdata *u)
551 {
552 u->offload.sinkAdapter->RendererRegCallback(u->offload.sinkAdapter, (int8_t *)OffloadCallback, (int8_t *)u);
553 }
554
TestModeRenderWrite(struct Userdata * u,pa_memchunk * pchunk)555 static ssize_t TestModeRenderWrite(struct Userdata *u, pa_memchunk *pchunk)
556 {
557 size_t index;
558 size_t length;
559 ssize_t count = 0;
560 void *p = NULL;
561
562 pa_assert(pchunk);
563
564 index = pchunk->index;
565 length = pchunk->length;
566 p = pa_memblock_acquire(pchunk->memblock);
567 pa_assert(p);
568
569 if (*((int32_t*)p) > 0) {
570 AUDIO_DEBUG_LOG("RenderWrite Write: %{public}d", ++u->writeCount);
571 }
572 AUDIO_DEBUG_LOG("RenderWrite Write renderCount: %{public}d", ++u->renderCount);
573
574 while (true) {
575 uint64_t writeLen = 0;
576
577 int32_t ret = u->primary.sinkAdapter->RendererRenderFrame(u->primary.sinkAdapter, ((char *)p + index),
578 (uint64_t)length, &writeLen);
579 if (writeLen > length) {
580 AUDIO_ERR_LOG("Error writeLen > actual bytes. Length: %zu, Written: %" PRIu64 " bytes, %d ret",
581 length, writeLen, ret);
582 count = -1 - count;
583 break;
584 }
585 if (writeLen == 0) {
586 AUDIO_ERR_LOG("Failed to render Length: %zu, Written: %" PRIu64 " bytes, %d ret",
587 length, writeLen, ret);
588 count = -1 - count;
589 break;
590 } else {
591 count += (ssize_t)writeLen;
592 index += writeLen;
593 length -= writeLen;
594 if (length == 0) {
595 break;
596 }
597 }
598 }
599 pa_memblock_release(pchunk->memblock);
600 pa_memblock_unref(pchunk->memblock);
601
602 return count;
603 }
604
IsInnerCapturer(pa_sink_input * sinkIn)605 static bool IsInnerCapturer(pa_sink_input *sinkIn)
606 {
607 pa_sink_input_assert_ref(sinkIn);
608
609 if (!GetInnerCapturerState()) {
610 return false;
611 }
612
613 const char *usageStr = pa_proplist_gets(sinkIn->proplist, "stream.usage");
614 const char *privacyTypeStr = pa_proplist_gets(sinkIn->proplist, "stream.privacyType");
615 int32_t usage = -1;
616 int32_t privacyType = -1;
617 bool usageSupport = false;
618 bool privacySupport = true;
619
620 if (privacyTypeStr != NULL) {
621 pa_atoi(privacyTypeStr, &privacyType);
622 privacySupport = IsPrivacySupportInnerCapturer(privacyType);
623 }
624
625 if (usageStr != NULL) {
626 pa_atoi(usageStr, &usage);
627 usageSupport = IsStreamSupportInnerCapturer(usage);
628 }
629 return privacySupport && usageSupport;
630 }
631
safeProplistGets(const pa_proplist * p,const char * key,const char * defstr)632 static const char *safeProplistGets(const pa_proplist *p, const char *key, const char *defstr)
633 {
634 const char *res = pa_proplist_gets(p, key);
635 if (res == NULL) {
636 return defstr;
637 }
638 return res;
639 }
640
641 //modify from pa inputs_drop
642 static void InputsDropFromInputs(pa_mix_info *infoInputs, unsigned nInputs, pa_mix_info *info, unsigned n,
643 pa_memchunk *result);
644
645 static unsigned GetInputsInfo(enum HdiInputType type, bool isRun, pa_sink *s, pa_mix_info *info, unsigned maxinfo);
646
SinkRenderPrimaryClusterCap(pa_sink * si,size_t * length,pa_mix_info * infoIn,unsigned maxInfo)647 static unsigned SinkRenderPrimaryClusterCap(pa_sink *si, size_t *length, pa_mix_info *infoIn, unsigned maxInfo)
648 {
649 AUTO_CTRACE("hdi_sink::SinkRenderPrimaryClusterCap:len:%zu", *length);
650 pa_sink_input *sinkIn;
651
652 pa_sink_assert_ref(si);
653 pa_sink_assert_io_context(si);
654 pa_assert(infoIn);
655
656 unsigned n = 0;
657 void *state = NULL;
658 size_t mixlength = *length;
659 while ((sinkIn = pa_hashmap_iterate(si->thread_info.inputs, &state, NULL)) && maxInfo > 0) {
660 if (IsInnerCapturer(sinkIn) && InputIsPrimary(sinkIn)) {
661 pa_sink_input_assert_ref(sinkIn);
662
663 // max_rewind is 0 by default, need change to at least u->buffer_size for InnerCapSinkInputsRewind.
664 if (pa_memblockq_get_maxrewind(sinkIn->thread_info.render_memblockq) == 0) {
665 AUTO_CTRACE("hdi_sink::pa_sink_input_update_max_rewind:%u len:%zu", sinkIn->index, *length);
666 pa_sink_input_update_max_rewind(sinkIn, *length);
667 }
668 AUTO_CTRACE("hdi_sink::ClusterCap::pa_sink_input_peek:%u len:%zu", sinkIn->index, *length);
669 pa_sink_input_peek(sinkIn, *length, &infoIn->chunk, &infoIn->volume);
670
671 if (mixlength == 0 || infoIn->chunk.length < mixlength)
672 mixlength = infoIn->chunk.length;
673
674 if (pa_memblock_is_silence(infoIn->chunk.memblock)) {
675 AUTO_CTRACE("hdi_sink::SinkRenderPrimaryClusterCap::is_silence");
676 pa_memblock_unref(infoIn->chunk.memblock);
677 continue;
678 }
679
680 infoIn->userdata = pa_sink_input_ref(sinkIn);
681 pa_assert(infoIn->chunk.memblock);
682 pa_assert(infoIn->chunk.length > 0);
683
684 infoIn++;
685 n++;
686 maxInfo--;
687 }
688 }
689
690 if (mixlength > 0) {
691 *length = mixlength;
692 }
693
694 return n;
695 }
696
SinkRenderPrimaryMix(pa_sink * si,size_t length,pa_mix_info * infoIn,unsigned n,pa_memchunk * chunkIn)697 static void SinkRenderPrimaryMix(pa_sink *si, size_t length, pa_mix_info *infoIn, unsigned n, pa_memchunk *chunkIn)
698 {
699 if (n == 0) {
700 if (chunkIn->length > length)
701 chunkIn->length = length;
702
703 pa_silence_memchunk(chunkIn, &si->sample_spec);
704 } else if (n == 1) {
705 pa_cvolume volume;
706
707 if (chunkIn->length > length)
708 chunkIn->length = length;
709
710 pa_sw_cvolume_multiply(&volume, &si->thread_info.soft_volume, &infoIn[0].volume);
711
712 if (si->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
713 pa_silence_memchunk(chunkIn, &si->sample_spec);
714 } else {
715 pa_memchunk tmpChunk;
716
717 tmpChunk = infoIn[0].chunk;
718 pa_memblock_ref(tmpChunk.memblock);
719
720 if (tmpChunk.length > length)
721 tmpChunk.length = length;
722
723 if (!pa_cvolume_is_norm(&volume)) {
724 pa_memchunk_make_writable(&tmpChunk, 0);
725 pa_volume_memchunk(&tmpChunk, &si->sample_spec, &volume);
726 }
727
728 pa_memchunk_memcpy(chunkIn, &tmpChunk);
729 pa_memblock_unref(tmpChunk.memblock);
730 }
731 } else {
732 void *ptr;
733
734 ptr = pa_memblock_acquire(chunkIn->memblock);
735
736 chunkIn->length = pa_mix(infoIn, n,
737 (uint8_t*) ptr + chunkIn->index, length,
738 &si->sample_spec,
739 &si->thread_info.soft_volume,
740 si->thread_info.soft_muted);
741
742 pa_memblock_release(chunkIn->memblock);
743 }
744 }
745
SinkRenderPrimaryMixCap(pa_sink * si,size_t length,pa_mix_info * infoIn,unsigned n,pa_memchunk * chunkIn)746 static void SinkRenderPrimaryMixCap(pa_sink *si, size_t length, pa_mix_info *infoIn, unsigned n, pa_memchunk *chunkIn)
747 {
748 AUTO_CTRACE("hdi_sink::SinkRenderPrimaryMixCap:%u:len:%zu", n, chunkIn->length);
749 if (n == 0) {
750 if (chunkIn->length > length) {
751 chunkIn->length = length;
752 }
753
754 pa_silence_memchunk(chunkIn, &si->sample_spec);
755 } else if (n == 1) {
756 pa_memchunk tmpChunk;
757 // If chunkIn is not full filled, we need re-call SinkRenderPrimaryPeekCap.
758 if (chunkIn->length > length) {
759 chunkIn->length = length;
760 }
761
762 tmpChunk = infoIn[0].chunk;
763 pa_memblock_ref(tmpChunk.memblock);
764
765 if (tmpChunk.length > length) {
766 tmpChunk.length = length;
767 }
768
769 pa_memchunk_memcpy(chunkIn, &tmpChunk);
770 pa_memblock_unref(tmpChunk.memblock);
771 } else {
772 void *ptr;
773
774 ptr = pa_memblock_acquire(chunkIn->memblock);
775
776 for (unsigned index = 0; index < n; index++) {
777 for (unsigned channel = 0; channel < si->sample_spec.channels; channel++) {
778 infoIn[index].volume.values[channel] = PA_VOLUME_NORM;
779 }
780 }
781
782 chunkIn->length = pa_mix(infoIn, n, (uint8_t*) ptr + chunkIn->index, length, &si->sample_spec, NULL, false);
783
784 pa_memblock_release(chunkIn->memblock);
785 }
786 }
787
SinkRenderPrimaryInputsDropCap(pa_sink * si,pa_mix_info * infoIn,unsigned n,pa_memchunk * chunkIn)788 static void SinkRenderPrimaryInputsDropCap(pa_sink *si, pa_mix_info *infoIn, unsigned n, pa_memchunk *chunkIn)
789 {
790 AUTO_CTRACE("hdi_sink::SinkRenderPrimaryInputsDropCap:%u:len:%zu", n, chunkIn->length);
791 pa_sink_assert_ref(si);
792 pa_sink_assert_io_context(si);
793 pa_assert(chunkIn);
794 pa_assert(chunkIn->memblock);
795 pa_assert(chunkIn->length > 0);
796
797 /* We optimize for the case where the order of the inputs has not changed */
798
799 pa_mix_info *infoCur = NULL;
800 pa_sink_input *sceneSinkInput = NULL;
801 bool isCaptureSilently = IsCaptureSilently();
802 for (uint32_t k = 0; k < n; k++) {
803 sceneSinkInput = infoIn[k].userdata;
804 pa_sink_input_assert_ref(sceneSinkInput);
805 AUTO_CTRACE("hdi_sink::InnerCap:pa_sink_input_drop:%u:len:%zu", sceneSinkInput->index, chunkIn->length);
806 pa_sink_input_drop(sceneSinkInput, chunkIn->length);
807
808 infoCur = infoIn + k;
809 if (infoCur) {
810 if (infoCur->chunk.memblock) {
811 pa_memblock_unref(infoCur->chunk.memblock);
812 pa_memchunk_reset(&infoCur->chunk);
813 }
814
815 pa_sink_input_unref(infoCur->userdata);
816
817 if (isCaptureSilently) {
818 infoCur->userdata = NULL;
819 }
820 }
821 }
822 }
823
SinkRenderPrimaryPeekCap(pa_sink * si,pa_memchunk * chunkIn)824 static int32_t SinkRenderPrimaryPeekCap(pa_sink *si, pa_memchunk *chunkIn)
825 {
826 AUTO_CTRACE("hdi_sink::SinkRenderPrimaryPeekCap:len:%zu", chunkIn->length);
827 pa_mix_info infoIn[MAX_MIX_CHANNELS];
828 unsigned n;
829 size_t length;
830 size_t blockSizeMax;
831
832 pa_sink_assert_ref(si);
833 pa_sink_assert_io_context(si);
834 pa_assert(PA_SINK_IS_LINKED(si->thread_info.state));
835 pa_assert(chunkIn);
836 pa_assert(chunkIn->memblock);
837 pa_assert(chunkIn->length > 0);
838 pa_assert(pa_frame_aligned(chunkIn->length, &si->sample_spec));
839
840 pa_assert(!si->thread_info.rewind_requested);
841 pa_assert(si->thread_info.rewind_nbytes == 0);
842
843 if (si->thread_info.state == PA_SINK_SUSPENDED) {
844 pa_silence_memchunk(chunkIn, &si->sample_spec);
845 return 0;
846 }
847
848 pa_sink_ref(si);
849
850 length = chunkIn->length;
851 blockSizeMax = pa_mempool_block_size_max(si->core->mempool);
852 if (length > blockSizeMax)
853 length = pa_frame_align(blockSizeMax, &si->sample_spec);
854
855 pa_assert(length > 0);
856
857 n = SinkRenderPrimaryClusterCap(si, &length, infoIn, MAX_MIX_CHANNELS);
858 SinkRenderPrimaryMixCap(si, length, infoIn, n, chunkIn);
859
860 SinkRenderPrimaryInputsDropCap(si, infoIn, n, chunkIn);
861 pa_sink_unref(si);
862
863 return n;
864 }
865
SinkRenderPrimaryGetDataCap(pa_sink * si,pa_memchunk * chunkIn)866 static int32_t SinkRenderPrimaryGetDataCap(pa_sink *si, pa_memchunk *chunkIn)
867 {
868 AUTO_CTRACE("hdi_sink::SinkRenderPrimaryGetDataCap:len:%zu", chunkIn->length);
869 pa_memchunk chunk;
870 size_t l;
871 size_t d;
872 pa_sink_assert_ref(si);
873 pa_sink_assert_io_context(si);
874 pa_assert(PA_SINK_IS_LINKED(si->thread_info.state));
875 pa_assert(chunkIn);
876 pa_assert(chunkIn->memblock);
877 pa_assert(chunkIn->length > 0);
878 pa_assert(pa_frame_aligned(chunkIn->length, &si->sample_spec));
879
880 pa_assert(!si->thread_info.rewind_requested);
881 pa_assert(si->thread_info.rewind_nbytes == 0);
882
883 if (si->thread_info.state == PA_SINK_SUSPENDED) {
884 pa_silence_memchunk(chunkIn, &si->sample_spec);
885 return 0;
886 }
887
888 pa_sink_ref(si);
889
890 l = chunkIn->length;
891 d = 0;
892
893 int32_t nSinkInput = 0;
894 while (l > 0) {
895 chunk = *chunkIn;
896 chunk.index += d;
897 chunk.length -= d;
898
899 nSinkInput = SinkRenderPrimaryPeekCap(si, &chunk);
900
901 d += (size_t)chunk.length;
902 l -= (size_t)chunk.length;
903 }
904
905 pa_sink_unref(si);
906
907 return nSinkInput;
908 }
909
monitorLinked(pa_sink * si,bool isRunning)910 static bool monitorLinked(pa_sink *si, bool isRunning)
911 {
912 if (isRunning) {
913 return si->monitor_source && PA_SOURCE_IS_RUNNING(si->monitor_source->thread_info.state);
914 } else {
915 return si->monitor_source && PA_SOURCE_IS_LINKED(si->monitor_source->thread_info.state);
916 }
917 }
918
InnerCapSinkInputsRewind(pa_sink * si,size_t length)919 static void InnerCapSinkInputsRewind(pa_sink *si, size_t length)
920 {
921 AUTO_CTRACE("hdi_sink::InnerCapSinkInputsRewind:len:%zu", length);
922
923 pa_sink_assert_ref(si);
924 pa_sink_assert_io_context(si);
925
926 pa_sink_input *sinkIn = NULL;
927 void *state = NULL;
928 while ((sinkIn = pa_hashmap_iterate(si->thread_info.inputs, &state, NULL))) {
929 if (IsInnerCapturer(sinkIn) && InputIsPrimary(sinkIn)) {
930 pa_sink_input_assert_ref(sinkIn);
931 pa_sink_input_process_rewind(sinkIn, length); // will not work well if maxrewind = 0
932 }
933 }
934 }
935
SinkRenderCapProcess(pa_sink * si,size_t length,pa_memchunk * capResult)936 static void SinkRenderCapProcess(pa_sink *si, size_t length, pa_memchunk *capResult)
937 {
938 AUTO_CTRACE("hdi_sink::SinkRenderCapProcess:len:%zu", length);
939 capResult->memblock = pa_memblock_new(si->core->mempool, length);
940 capResult->index = 0;
941 capResult->length = length;
942 SinkRenderPrimaryGetDataCap(si, capResult);
943 if (monitorLinked(si, false)) {
944 AUTO_CTRACE("hdi_sink::pa_source_post:len:%zu", capResult->length);
945 pa_source_post(si->monitor_source, capResult);
946 }
947
948 //If not silent capture, we need to call rewind for Speak.
949 if (!IsCaptureSilently()) {
950 InnerCapSinkInputsRewind(si, capResult->length);
951 }
952 return;
953 }
954
SinkRenderPrimaryInputsDrop(pa_sink * si,pa_mix_info * infoIn,unsigned n,pa_memchunk * chunkIn)955 static void SinkRenderPrimaryInputsDrop(pa_sink *si, pa_mix_info *infoIn, unsigned n, pa_memchunk *chunkIn)
956 {
957 unsigned nUnreffed = 0;
958
959 pa_sink_assert_ref(si);
960 pa_sink_assert_io_context(si);
961 pa_assert(chunkIn);
962 pa_assert(chunkIn->memblock);
963 pa_assert(chunkIn->length > 0);
964
965 /* We optimize for the case where the order of the inputs has not changed */
966 pa_mix_info *infoCur = NULL;
967 pa_sink_input *sceneSinkInput = NULL;
968 for (uint32_t k = 0; k < n; k++) {
969 sceneSinkInput = infoIn[k].userdata;
970 pa_sink_input_assert_ref(sceneSinkInput);
971
972 /* Drop read data */
973 pa_sink_input_drop(sceneSinkInput, chunkIn->length);
974 infoCur = infoIn + k;
975 if (infoCur) {
976 if (infoCur->chunk.memblock) {
977 pa_memblock_unref(infoCur->chunk.memblock);
978 pa_memchunk_reset(&infoCur->chunk);
979 }
980
981 pa_sink_input_unref(infoCur->userdata);
982 infoCur->userdata = NULL;
983
984 nUnreffed += 1;
985 }
986 }
987 /* Now drop references to entries that are included in the
988 * pa_mix_info array but don't exist anymore */
989
990 if (nUnreffed < n) {
991 for (; n > 0; infoIn++, n--) {
992 if (infoIn->userdata)
993 pa_sink_input_unref(infoIn->userdata);
994 if (infoIn->chunk.memblock)
995 pa_memblock_unref(infoIn->chunk.memblock);
996 }
997 }
998 }
999
SinkRenderMultiChannelInputsDrop(pa_sink * si,pa_mix_info * infoIn,unsigned n,pa_memchunk * chunkIn)1000 static void SinkRenderMultiChannelInputsDrop(pa_sink *si, pa_mix_info *infoIn, unsigned n, pa_memchunk *chunkIn)
1001 {
1002 AUDIO_DEBUG_LOG("mch inputs drop start");
1003 unsigned nUnreffed = 0;
1004
1005 pa_sink_assert_ref(si);
1006 pa_sink_assert_io_context(si);
1007 pa_assert(chunkIn);
1008 pa_assert(chunkIn->memblock);
1009 pa_assert(chunkIn->length > 0);
1010
1011 /* We optimize for the case where the order of the inputs has not changed */
1012 pa_mix_info *infoCur = NULL;
1013 pa_sink_input *sceneSinkInput = NULL;
1014 for (uint32_t k = 0; k < n; k++) {
1015 sceneSinkInput = infoIn[k].userdata;
1016 pa_sink_input_assert_ref(sceneSinkInput);
1017
1018 /* Drop read data */
1019 pa_sink_input_drop(sceneSinkInput, chunkIn->length);
1020 infoCur = infoIn + k;
1021 if (infoCur) {
1022 if (infoCur->chunk.memblock) {
1023 pa_memblock_unref(infoCur->chunk.memblock);
1024 pa_memchunk_reset(&infoCur->chunk);
1025 }
1026
1027 pa_sink_input_unref(infoCur->userdata);
1028 infoCur->userdata = NULL;
1029
1030 nUnreffed += 1;
1031 }
1032 }
1033 /* Now drop references to entries that are included in the
1034 * pa_mix_info array but don't exist anymore */
1035
1036 if (nUnreffed < n) {
1037 for (; n > 0; infoIn++, n--) {
1038 if (infoIn->userdata)
1039 pa_sink_input_unref(infoIn->userdata);
1040 if (infoIn->chunk.memblock)
1041 pa_memblock_unref(infoIn->chunk.memblock);
1042 }
1043 }
1044 }
1045
CheckAndPushUidToArr(pa_sink_input * sinkIn,int32_t appsUid[MAX_MIX_CHANNELS],size_t * count)1046 static void CheckAndPushUidToArr(pa_sink_input *sinkIn, int32_t appsUid[MAX_MIX_CHANNELS], size_t *count)
1047 {
1048 const char *cstringClientUid = pa_proplist_gets(sinkIn->proplist, "stream.client.uid");
1049 if (cstringClientUid && (sinkIn->thread_info.state == PA_SINK_INPUT_RUNNING)) {
1050 appsUid[(*count)] = atoi(cstringClientUid);
1051 (*count)++;
1052 }
1053 }
1054
SafeRendererSinkUpdateAppsUid(struct RendererSinkAdapter * sinkAdapter,const int32_t appsUid[MAX_MIX_CHANNELS],const size_t count)1055 static void SafeRendererSinkUpdateAppsUid(struct RendererSinkAdapter *sinkAdapter,
1056 const int32_t appsUid[MAX_MIX_CHANNELS], const size_t count)
1057 {
1058 if (sinkAdapter) {
1059 sinkAdapter->RendererSinkUpdateAppsUid(sinkAdapter, appsUid, count);
1060 }
1061 }
1062
silenceData(pa_mix_info * infoIn,pa_sink * si)1063 static void silenceData(pa_mix_info *infoIn, pa_sink *si)
1064 {
1065 pa_memchunk_make_writable(&infoIn->chunk, 0);
1066 void *tmpdata = pa_memblock_acquire_chunk(&infoIn->chunk);
1067 memset_s(tmpdata, infoIn->chunk.length, 0, infoIn->chunk.length);
1068 pa_memblock_release(infoIn->chunk.memblock);
1069 }
1070
ConvertPaToHdiAdapterFormat(pa_sample_format_t format)1071 static enum HdiAdapterFormat ConvertPaToHdiAdapterFormat(pa_sample_format_t format)
1072 {
1073 enum HdiAdapterFormat adapterFormat;
1074 switch (format) {
1075 case PA_SAMPLE_U8:
1076 adapterFormat = SAMPLE_U8;
1077 break;
1078 case PA_SAMPLE_S16LE:
1079 adapterFormat = SAMPLE_S16;
1080 break;
1081 case PA_SAMPLE_S24LE:
1082 adapterFormat = SAMPLE_S24;
1083 break;
1084 case PA_SAMPLE_S32LE:
1085 adapterFormat = SAMPLE_S32;
1086 break;
1087 default:
1088 adapterFormat = INVALID_WIDTH;
1089 break;
1090 }
1091
1092 return adapterFormat;
1093 }
1094
DoFading(void * data,int32_t length,uint32_t format,uint32_t channel,int32_t fadeType)1095 static void DoFading(void *data, int32_t length, uint32_t format, uint32_t channel, int32_t fadeType)
1096 {
1097 AudioRawFormat rawFormat;
1098 rawFormat.format = format;
1099 rawFormat.channels = channel;
1100 AUDIO_INFO_LOG("length:%{public}d channels:%{public}d format:%{public}d fadeType:%{public}d",
1101 length, rawFormat.channels, rawFormat.format, fadeType);
1102 int32_t ret = 0;
1103 if (fadeType == 0) {
1104 ret = ProcessVol(data, length, rawFormat, FADE_IN_BEGIN, FADE_IN_END);
1105 } else {
1106 ret = ProcessVol(data, length, rawFormat, FADE_OUT_BEGIN, FADE_OUT_END);
1107 }
1108 if (ret != 0) {
1109 AUDIO_WARNING_LOG("ProcessVol failed:%{public}d", ret);
1110 }
1111 }
1112
GetFadeLenth(enum FadeStrategy fadeStrategy,size_t chunkLength,pa_sample_spec ss)1113 static int32_t GetFadeLenth(enum FadeStrategy fadeStrategy, size_t chunkLength, pa_sample_spec ss)
1114 {
1115 if (fadeStrategy == FADE_STRATEGY_NONE) {
1116 // none fade
1117 return 0;
1118 }
1119
1120 if (fadeStrategy == FADE_STRATEGY_SHORTER) {
1121 // do 5ms fade-in fade-out
1122 size_t fadeLenth = pa_usec_to_bytes(FADE_OUT_TIME, &ss);
1123 return ((fadeLenth < chunkLength) ? fadeLenth : chunkLength);
1124 }
1125
1126 if (fadeStrategy == FADE_STRATEGY_DEFAULT) {
1127 return chunkLength;
1128 }
1129
1130 return chunkLength;
1131 }
1132
PreparePrimaryFading(pa_sink_input * sinkIn,pa_mix_info * infoIn,pa_sink * si)1133 static void PreparePrimaryFading(pa_sink_input *sinkIn, pa_mix_info *infoIn, pa_sink *si)
1134 {
1135 struct Userdata *u;
1136 pa_assert_se(u = si->userdata);
1137
1138 const char *streamType = safeProplistGets(sinkIn->proplist, "stream.type", "NULL");
1139 if (pa_safe_streq(streamType, "ultrasonic")) { return; }
1140
1141 const char *strExpectedPlaybackDurationBytes = safeProplistGets(sinkIn->proplist,
1142 "expectedPlaybackDurationBytes", "0");
1143 uint64_t expectedPlaybackDurationBytes = 0;
1144 pa_atou64(strExpectedPlaybackDurationBytes, &expectedPlaybackDurationBytes);
1145 enum FadeStrategy fadeStrategy
1146 = GetFadeStrategy(pa_bytes_to_usec(expectedPlaybackDurationBytes, &(sinkIn->sample_spec)) / PA_USEC_PER_MSEC);
1147
1148 uint32_t streamIndex = sinkIn->index;
1149 uint32_t sinkFadeoutPause = GetFadeoutState(streamIndex);
1150 if (sinkFadeoutPause == DONE_FADE && (sinkIn->thread_info.state == PA_SINK_INPUT_RUNNING)) {
1151 silenceData(infoIn, si);
1152 AUDIO_PRERELEASE_LOGI("after pause fadeout done, silenceData");
1153 return;
1154 }
1155 uint32_t format = (uint32_t)ConvertPaToHdiAdapterFormat(u->format);
1156 int32_t fadeLenth = GetFadeLenth(fadeStrategy, infoIn->chunk.length, u->ss);
1157
1158 if (pa_atomic_load(&u->primary.fadingFlagForPrimary) == 1 &&
1159 u->primary.primarySinkInIndex == (int32_t)sinkIn->index) {
1160 if (pa_memblock_is_silence(infoIn->chunk.memblock)) {
1161 AUDIO_PRERELEASE_LOGI("pa_memblock_is_silence");
1162 return;
1163 }
1164 //do fading in
1165 pa_memchunk_make_writable(&infoIn->chunk, 0);
1166 void *data = pa_memblock_acquire_chunk(&infoIn->chunk);
1167 DoFading(data, fadeLenth, format, (uint32_t)u->ss.channels, 0);
1168 u->primary.primaryFadingInDone = 1;
1169 pa_memblock_release(infoIn->chunk.memblock);
1170 }
1171 if (sinkFadeoutPause == DO_FADE) {
1172 //do fading out
1173 pa_memchunk_make_writable(&infoIn->chunk, 0);
1174 void *data = pa_memblock_acquire_chunk(&infoIn->chunk);
1175 DoFading(data + infoIn->chunk.length - fadeLenth, fadeLenth, format, (uint32_t)u->ss.channels, 1);
1176 if (fadeStrategy == FADE_STRATEGY_DEFAULT) { SetFadeoutState(streamIndex, DONE_FADE); }
1177 pa_memblock_release(infoIn->chunk.memblock);
1178 }
1179 }
1180
CheckPrimaryFadeinIsDone(pa_sink * si,pa_sink_input * sinkIn)1181 static void CheckPrimaryFadeinIsDone(pa_sink *si, pa_sink_input *sinkIn)
1182 {
1183 struct Userdata *u;
1184 pa_assert_se(u = si->userdata);
1185
1186 if (u->primary.primaryFadingInDone && u->primary.primarySinkInIndex == (int32_t)sinkIn->index) {
1187 pa_atomic_store(&u->primary.fadingFlagForPrimary, 0);
1188 }
1189 }
1190
RecordEffectChainStatus(bool existFlag,const char * sinkSceneType,const char * sinkSceneMode,bool actualSpatializationEnabled)1191 static void RecordEffectChainStatus(bool existFlag, const char *sinkSceneType, const char *sinkSceneMode,
1192 bool actualSpatializationEnabled)
1193 {
1194 if (g_effectProcessFrameCount == PRINT_INTERVAL_FRAME_COUNT) {
1195 AUDIO_DEBUG_LOG("Effect Chain Status is %{public}d, "
1196 "scene type is %{public}s, scene mode is %{public}s, spatializationEnabled is %{public}d.",
1197 existFlag, sinkSceneType, sinkSceneMode, actualSpatializationEnabled);
1198 }
1199 }
1200
GetExistFlag(pa_sink_input * sinkIn,const char * sinkSceneType,const char * sinkSceneMode,const char * spatializationEnabled)1201 static bool GetExistFlag(pa_sink_input *sinkIn, const char *sinkSceneType, const char *sinkSceneMode,
1202 const char *spatializationEnabled)
1203 {
1204 bool existFlag =
1205 EffectChainManagerExist(sinkSceneType, sinkSceneMode, spatializationEnabled ? "1" : "0");
1206 const char *deviceString = pa_proplist_gets(sinkIn->sink->proplist, PA_PROP_DEVICE_STRING);
1207 if (pa_safe_streq(deviceString, "remote")) {
1208 existFlag = false;
1209 }
1210
1211 return existFlag;
1212 }
1213
ProcessAudioVolume(pa_sink_input * sinkIn,size_t length,pa_memchunk * pchunk,pa_sink * si)1214 static void ProcessAudioVolume(pa_sink_input *sinkIn, size_t length, pa_memchunk *pchunk, pa_sink *si)
1215 {
1216 AUTO_CTRACE("hdi_sink::ProcessAudioVolume: len:%zu", length);
1217 struct Userdata *u;
1218 pa_assert_se(sinkIn);
1219 pa_assert_se(pchunk);
1220 pa_assert_se(si);
1221 pa_assert_se(u = si->userdata);
1222 const char *streamType = safeProplistGets(sinkIn->proplist, "stream.type", "NULL");
1223 const char *sessionIDStr = safeProplistGets(sinkIn->proplist, "stream.sessionID", "NULL");
1224 const char *deviceClass = GetDeviceClass(u->primary.sinkAdapter->deviceClass);
1225 uint32_t sessionID = sessionIDStr != NULL ? atoi(sessionIDStr) : 0;
1226 float volumeEnd = GetCurVolume(sessionID, streamType, deviceClass);
1227 float volumeBeg = GetPreVolume(sessionID);
1228 float fadeBeg = 1.0f;
1229 float fadeEnd = 1.0f;
1230 if (!pa_safe_streq(streamType, "ultrasonic")) {
1231 GetStreamVolumeFade(sessionID, &fadeBeg, &fadeEnd);
1232 }
1233 if (pa_memblock_is_silence(pchunk->memblock)) {
1234 AUTO_CTRACE("hdi_sink::ProcessAudioVolume: is_silence");
1235 AUDIO_PRERELEASE_LOGI("pa_memblock_is_silence");
1236 } else {
1237 AudioRawFormat rawFormat;
1238 rawFormat.format = (uint32_t)ConvertPaToHdiAdapterFormat(si->sample_spec.format);
1239 rawFormat.channels = (uint32_t)si->sample_spec.channels;
1240
1241 pa_memchunk_make_writable(pchunk, 0);
1242 void *data = pa_memblock_acquire_chunk(pchunk);
1243
1244 AUDIO_DEBUG_LOG("length:%{public}zu channels:%{public}d format:%{public}d"
1245 " volumeBeg:%{public}f, volumeEnd:%{public}f, fadeBeg:%{public}f, fadeEnd:%{public}f",
1246 length, rawFormat.channels, rawFormat.format, volumeBeg, volumeEnd, fadeBeg, fadeEnd);
1247 int32_t ret = ProcessVol(data, length, rawFormat, volumeBeg * fadeBeg, volumeEnd * fadeEnd);
1248 if (ret != 0) {
1249 AUDIO_WARNING_LOG("ProcessVol failed:%{public}d", ret);
1250 }
1251 pa_memblock_release(pchunk->memblock);
1252 }
1253 if (volumeBeg != volumeEnd || fadeBeg != fadeEnd) {
1254 AUDIO_INFO_LOG("sessionID:%{public}s, length:%{public}zu, volumeBeg:%{public}f, volumeEnd:%{public}f"
1255 ", fadeBeg:%{public}f, fadeEnd:%{public}f",
1256 sessionIDStr, length, volumeBeg, volumeEnd, fadeBeg, fadeEnd);
1257 if (volumeBeg != volumeEnd) {
1258 SetPreVolume(sessionID, volumeEnd);
1259 MonitorVolume(sessionID, true);
1260 }
1261 if (fadeBeg != fadeEnd) {
1262 SetStreamVolumeFade(sessionID, fadeEnd, fadeEnd);
1263 }
1264 }
1265 }
1266
HandleFading(pa_sink * si,size_t length,pa_sink_input * sinkIn,pa_mix_info * infoIn)1267 static void HandleFading(pa_sink *si, size_t length, pa_sink_input *sinkIn, pa_mix_info *infoIn)
1268 {
1269 struct Userdata *u;
1270 pa_assert_se(u = si->userdata);
1271
1272 infoIn->userdata = pa_sink_input_ref(sinkIn);
1273 pa_assert(infoIn->chunk.memblock);
1274 pa_assert(infoIn->chunk.length > 0);
1275 PreparePrimaryFading(sinkIn, infoIn, si);
1276 CheckPrimaryFadeinIsDone(si, sinkIn);
1277
1278 uint32_t sinkFadeoutPause = GetFadeoutState(sinkIn->index);
1279 if (!sinkFadeoutPause && (length <= infoIn->chunk.length)) {
1280 u->streamAvailable++;
1281 }
1282 }
1283
SinkRenderPrimaryCluster(pa_sink * si,size_t * length,pa_mix_info * infoIn,unsigned maxInfo,const char * sceneType)1284 static unsigned SinkRenderPrimaryCluster(pa_sink *si, size_t *length, pa_mix_info *infoIn,
1285 unsigned maxInfo, const char *sceneType)
1286 {
1287 AUTO_CTRACE("hdi_sink::SinkRenderPrimaryCluster:%s len:%zu", sceneType, *length);
1288
1289 struct Userdata *u;
1290 pa_assert_se(u = si->userdata);
1291
1292 pa_sink_input *sinkIn;
1293 unsigned n = 0;
1294 void *state = NULL;
1295 size_t mixlength = *length;
1296
1297 pa_sink_assert_ref(si);
1298 pa_sink_assert_io_context(si);
1299 pa_assert(infoIn);
1300
1301 int32_t appsUid[MAX_MIX_CHANNELS];
1302 size_t count = 0;
1303 while ((sinkIn = pa_hashmap_iterate(si->thread_info.inputs, &state, NULL)) && maxInfo > 0) {
1304 CheckAndPushUidToArr(sinkIn, appsUid, &count);
1305 const char *sSceneType = pa_proplist_gets(sinkIn->proplist, "scene.type");
1306 const char *sSceneMode = pa_proplist_gets(sinkIn->proplist, "scene.mode");
1307 bool existFlag = GetExistFlag(sinkIn, sSceneType, sSceneMode, u->actualSpatializationEnabled ? "1" : "0");
1308 bool sceneTypeFlag = EffectChainManagerSceneCheck(sSceneType, sceneType);
1309 if ((IsInnerCapturer(sinkIn) && IsCaptureSilently()) || !InputIsPrimary(sinkIn)) {
1310 AUTO_CTRACE("hdi_sink::PrimaryCluster:InnerCapturer and CaptureSilently or not primary");
1311 continue;
1312 } else if ((sceneTypeFlag && existFlag) || (pa_safe_streq(sceneType, "EFFECT_NONE") && (!existFlag))) {
1313 RecordEffectChainStatus(existFlag, sSceneType, sSceneMode, u->actualSpatializationEnabled);
1314 pa_sink_input_assert_ref(sinkIn);
1315 updateResampler(sinkIn, sceneType, false);
1316
1317 AUTO_CTRACE("hdi_sink::PrimaryCluster:%u len:%zu", sinkIn->index, *length);
1318 pa_sink_input_peek(sinkIn, *length, &infoIn->chunk, &infoIn->volume);
1319
1320 if (mixlength == 0 || infoIn->chunk.length < mixlength) {mixlength = infoIn->chunk.length;}
1321
1322 ProcessAudioVolume(sinkIn, mixlength, &infoIn->chunk, si);
1323
1324 if (pa_memblock_is_silence(infoIn->chunk.memblock) && sinkIn->thread_info.state == PA_SINK_INPUT_RUNNING) {
1325 AUTO_CTRACE("hdi_sink::PrimaryCluster::is_silence");
1326 pa_sink_input_handle_ohos_underrun(sinkIn);
1327 } else {
1328 AUTO_CTRACE("hdi_sink::PrimaryCluster::is_not_silence");
1329 }
1330
1331 HandleFading(si, *length, sinkIn, infoIn);
1332
1333 infoIn++;
1334 n++;
1335 maxInfo--;
1336 }
1337 }
1338
1339 SafeRendererSinkUpdateAppsUid(u->primary.sinkAdapter, appsUid, count);
1340
1341 if (mixlength > 0) { *length = mixlength; }
1342
1343 return n;
1344 }
1345
PrepareMultiChannelFading(pa_sink_input * sinkIn,pa_mix_info * infoIn,pa_sink * si)1346 static void PrepareMultiChannelFading(pa_sink_input *sinkIn, pa_mix_info *infoIn, pa_sink *si)
1347 {
1348 struct Userdata *u;
1349 pa_assert_se(u = si->userdata);
1350
1351 uint32_t streamIndex = sinkIn->index;
1352 uint32_t sinkFadeoutPause = GetFadeoutState(streamIndex);
1353 if (sinkFadeoutPause == DONE_FADE) {
1354 silenceData(infoIn, si);
1355 AUDIO_PRERELEASE_LOGI("silenceData.");
1356 return;
1357 }
1358
1359 uint32_t format = (uint32_t)ConvertPaToHdiAdapterFormat(u->format);
1360 if (pa_atomic_load(&u->multiChannel.fadingFlagForMultiChannel) == 1 &&
1361 u->multiChannel.multiChannelSinkInIndex == (int32_t)sinkIn->index) {
1362 if (pa_memblock_is_silence(infoIn->chunk.memblock)) {
1363 AUDIO_DEBUG_LOG("pa_memblock_is_silence");
1364 return;
1365 }
1366 //do fading in
1367 pa_memchunk_make_writable(&infoIn->chunk, 0);
1368 void *data = pa_memblock_acquire_chunk(&infoIn->chunk);
1369 DoFading(data, infoIn->chunk.length, format, (uint32_t)u->ss.channels, 0);
1370 u->multiChannel.multiChannelFadingInDone = 1;
1371 pa_memblock_release(infoIn->chunk.memblock);
1372 }
1373 if (sinkFadeoutPause == DO_FADE) {
1374 //do fading out
1375 pa_memchunk_make_writable(&infoIn->chunk, 0);
1376 void *data = pa_memblock_acquire_chunk(&infoIn->chunk);
1377 DoFading(data, infoIn->chunk.length, format, (uint32_t)u->ss.channels, 1);
1378 SetFadeoutState(streamIndex, DONE_FADE);
1379 }
1380 }
1381
CheckMultiChannelFadeinIsDone(pa_sink * si,pa_sink_input * sinkIn)1382 static void CheckMultiChannelFadeinIsDone(pa_sink *si, pa_sink_input *sinkIn)
1383 {
1384 struct Userdata *u;
1385 pa_assert_se(u = si->userdata);
1386
1387 if (u->multiChannel.multiChannelFadingInDone &&
1388 u->multiChannel.multiChannelSinkInIndex == (int32_t)sinkIn->index) {
1389 pa_atomic_store(&u->multiChannel.fadingFlagForMultiChannel, 0);
1390 }
1391 }
1392
SinkRenderMultiChannelCluster(pa_sink * si,size_t * length,pa_mix_info * infoIn,unsigned maxInfo)1393 static unsigned SinkRenderMultiChannelCluster(pa_sink *si, size_t *length, pa_mix_info *infoIn,
1394 unsigned maxInfo)
1395 {
1396 pa_sink_input *sinkIn;
1397 unsigned n = 0;
1398 void *state = NULL;
1399 size_t mixlength = *length;
1400
1401 pa_sink_assert_ref(si);
1402 pa_sink_assert_io_context(si);
1403 pa_assert(infoIn);
1404
1405 struct Userdata *u;
1406 pa_assert_se(u = si->userdata);
1407
1408 int32_t appsUid[MAX_MIX_CHANNELS];
1409 size_t count = 0;
1410
1411 while ((sinkIn = pa_hashmap_iterate(si->thread_info.inputs, &state, NULL)) && maxInfo > 0) {
1412 CheckAndPushUidToArr(sinkIn, appsUid, &count);
1413 int32_t sinkChannels = sinkIn->sample_spec.channels;
1414 const char *sinkSceneType = pa_proplist_gets(sinkIn->proplist, "scene.type");
1415 const char *sinkSceneMode = pa_proplist_gets(sinkIn->proplist, "scene.mode");
1416 const char *sinkSpatializationEnabled = pa_proplist_gets(sinkIn->proplist, "spatialization.enabled");
1417 bool existFlag = EffectChainManagerExist(sinkSceneType, sinkSceneMode, sinkSpatializationEnabled);
1418 if (!existFlag && sinkChannels > PRIMARY_CHANNEL_NUM) {
1419 pa_sink_input_assert_ref(sinkIn);
1420 updateResampler(sinkIn, NULL, true);
1421 pa_sink_input_peek(sinkIn, *length, &infoIn->chunk, &infoIn->volume);
1422
1423 if (mixlength == 0 || infoIn->chunk.length < mixlength) {mixlength = infoIn->chunk.length;}
1424
1425 ProcessAudioVolume(sinkIn, mixlength, &infoIn->chunk, si);
1426
1427 if (pa_memblock_is_silence(infoIn->chunk.memblock) && sinkIn->thread_info.state == PA_SINK_INPUT_RUNNING) {
1428 AUTO_CTRACE("hdi_sink::SinkRenderMultiChannelCluster::is_silence");
1429 pa_sink_input_handle_ohos_underrun(sinkIn);
1430 }
1431
1432 infoIn->userdata = pa_sink_input_ref(sinkIn);
1433 pa_assert(infoIn->chunk.memblock);
1434 pa_assert(infoIn->chunk.length > 0);
1435
1436 if (pa_safe_streq(sinkSpatializationEnabled, "true")) {
1437 PrepareMultiChannelFading(sinkIn, infoIn, si);
1438 CheckMultiChannelFadeinIsDone(si, sinkIn);
1439 }
1440 infoIn++;
1441 n++;
1442 maxInfo--;
1443 }
1444 }
1445
1446 SafeRendererSinkUpdateAppsUid(u->multiChannel.sinkAdapter, appsUid, count);
1447
1448 if (mixlength > 0) { *length = mixlength; }
1449
1450 return n;
1451 }
1452
SinkRenderPrimaryPeek(pa_sink * si,pa_memchunk * chunkIn,const char * sceneType)1453 static int32_t SinkRenderPrimaryPeek(pa_sink *si, pa_memchunk *chunkIn, const char *sceneType)
1454 {
1455 pa_mix_info info[MAX_MIX_CHANNELS];
1456 unsigned n;
1457 size_t length;
1458 size_t blockSizeMax;
1459
1460 pa_sink_assert_ref(si);
1461 pa_sink_assert_io_context(si);
1462 pa_assert(PA_SINK_IS_LINKED(si->thread_info.state));
1463 pa_assert(chunkIn);
1464 pa_assert(chunkIn->memblock);
1465 pa_assert(chunkIn->length > 0);
1466 pa_assert(pa_frame_aligned(chunkIn->length, &si->sample_spec));
1467
1468 pa_assert(!si->thread_info.rewind_requested);
1469 pa_assert(si->thread_info.rewind_nbytes == 0);
1470
1471 if (si->thread_info.state == PA_SINK_SUSPENDED) {
1472 AUTO_CTRACE("hdi_sink::Primary:PA_SINK_SUSPENDED");
1473 pa_silence_memchunk(chunkIn, &si->sample_spec);
1474 return 0;
1475 }
1476
1477 pa_sink_ref(si);
1478
1479 length = chunkIn->length;
1480 blockSizeMax = pa_mempool_block_size_max(si->core->mempool);
1481 if (length > blockSizeMax)
1482 length = pa_frame_align(blockSizeMax, &si->sample_spec);
1483
1484 pa_assert(length > 0);
1485 n = SinkRenderPrimaryCluster(si, &length, info, MAX_MIX_CHANNELS, sceneType);
1486
1487 AUTO_CTRACE("hdi_sink:Primary:SinkRenderPrimaryMix:%u len:%zu", n, length);
1488 SinkRenderPrimaryMix(si, length, info, n, chunkIn);
1489
1490 SinkRenderPrimaryInputsDrop(si, info, n, chunkIn);
1491 pa_sink_unref(si);
1492 return n;
1493 }
1494
SinkRenderMultiChannelPeek(pa_sink * si,pa_memchunk * chunkIn)1495 static int32_t SinkRenderMultiChannelPeek(pa_sink *si, pa_memchunk *chunkIn)
1496 {
1497 pa_mix_info info[MAX_MIX_CHANNELS];
1498 unsigned n;
1499 size_t length;
1500 size_t blockSizeMax;
1501
1502 pa_sink_assert_ref(si);
1503 pa_sink_assert_io_context(si);
1504 pa_assert(PA_SINK_IS_LINKED(si->thread_info.state));
1505 pa_assert(chunkIn);
1506 pa_assert(chunkIn->memblock);
1507 pa_assert(chunkIn->length > 0);
1508 pa_assert(pa_frame_aligned(chunkIn->length, &si->sample_spec));
1509
1510 pa_assert(!si->thread_info.rewind_requested);
1511 pa_assert(si->thread_info.rewind_nbytes == 0);
1512
1513 if (si->thread_info.state == PA_SINK_SUSPENDED) {
1514 AUTO_CTRACE("hdi_sink::MultiCh:PA_SINK_SUSPENDED");
1515 pa_silence_memchunk(chunkIn, &si->sample_spec);
1516 return 0;
1517 }
1518
1519 pa_sink_ref(si);
1520
1521 length = chunkIn->length;
1522 blockSizeMax = pa_mempool_block_size_max(si->core->mempool);
1523 if (length > blockSizeMax)
1524 length = pa_frame_align(blockSizeMax, &si->sample_spec);
1525
1526 pa_assert(length > 0);
1527
1528 n = SinkRenderMultiChannelCluster(si, &length, info, MAX_MIX_CHANNELS);
1529
1530 AUTO_CTRACE("hdi_sink:MultiCh:SinkRenderPrimaryMix:%u len:%zu", n, length);
1531 SinkRenderPrimaryMix(si, length, info, n, chunkIn);
1532
1533 SinkRenderMultiChannelInputsDrop(si, info, n, chunkIn);
1534 pa_sink_unref(si);
1535
1536 return n;
1537 }
1538
SinkRenderPrimaryGetData(pa_sink * si,pa_memchunk * chunkIn,char * sceneType)1539 static int32_t SinkRenderPrimaryGetData(pa_sink *si, pa_memchunk *chunkIn, char *sceneType)
1540 {
1541 AUTO_CTRACE("hdi_sink::SinkRenderPrimaryGetData:%s", sceneType);
1542 pa_memchunk chunk;
1543 size_t l;
1544 size_t d;
1545 pa_sink_assert_ref(si);
1546 pa_sink_assert_io_context(si);
1547 pa_assert(PA_SINK_IS_LINKED(si->thread_info.state));
1548 pa_assert(chunkIn);
1549 pa_assert(chunkIn->memblock);
1550 pa_assert(chunkIn->length > 0);
1551 pa_assert(pa_frame_aligned(chunkIn->length, &si->sample_spec));
1552
1553 pa_assert(!si->thread_info.rewind_requested);
1554 pa_assert(si->thread_info.rewind_nbytes == 0);
1555
1556 if (si->thread_info.state == PA_SINK_SUSPENDED) {
1557 pa_silence_memchunk(chunkIn, &si->sample_spec);
1558 return 0;
1559 }
1560
1561 pa_sink_ref(si);
1562
1563 l = chunkIn->length;
1564 d = 0;
1565 int32_t nSinkInput = 0;
1566 while (l > 0) {
1567 chunk = *chunkIn;
1568 chunk.index += d;
1569 chunk.length -= d;
1570
1571 nSinkInput = SinkRenderPrimaryPeek(si, &chunk, sceneType);
1572
1573 d += chunk.length;
1574 l -= chunk.length;
1575 }
1576 pa_sink_unref(si);
1577
1578 return nSinkInput;
1579 }
1580
SinkRenderMultiChannelGetData(pa_sink * si,pa_memchunk * chunkIn)1581 static int32_t SinkRenderMultiChannelGetData(pa_sink *si, pa_memchunk *chunkIn)
1582 {
1583 pa_memchunk chunk;
1584 size_t l;
1585 size_t d;
1586 pa_sink_assert_ref(si);
1587 pa_sink_assert_io_context(si);
1588 pa_assert(PA_SINK_IS_LINKED(si->thread_info.state));
1589 pa_assert(chunkIn);
1590 pa_assert(chunkIn->memblock);
1591 pa_assert(chunkIn->length > 0);
1592 pa_assert(pa_frame_aligned(chunkIn->length, &si->sample_spec));
1593
1594 pa_assert(!si->thread_info.rewind_requested);
1595 pa_assert(si->thread_info.rewind_nbytes == 0);
1596
1597 if (si->thread_info.state == PA_SINK_SUSPENDED) {
1598 pa_silence_memchunk(chunkIn, &si->sample_spec);
1599 return 0;
1600 }
1601
1602 pa_sink_ref(si);
1603
1604 l = chunkIn->length;
1605 d = 0;
1606
1607 int32_t nSinkInput = 0;
1608 while (l > 0) {
1609 chunk = *chunkIn;
1610 chunk.index += d;
1611 chunk.length -= d;
1612
1613 nSinkInput = SinkRenderMultiChannelPeek(si, &chunk);
1614
1615 d += chunk.length;
1616 l -= chunk.length;
1617 }
1618
1619 pa_sink_unref(si);
1620
1621 return nSinkInput;
1622 }
1623
PrepareSpatializationFading(int8_t * fadingState,int8_t * fadingCount,bool * actualSpatializationEnabled)1624 static void PrepareSpatializationFading(int8_t *fadingState, int8_t *fadingCount, bool *actualSpatializationEnabled)
1625 {
1626 (*fadingCount) = (*fadingCount) < 0 ? 0 : (*fadingCount);
1627 (*fadingCount) =
1628 (*fadingCount) > SPATIALIZATION_FADING_FRAMECOUNT ? SPATIALIZATION_FADING_FRAMECOUNT : (*fadingCount);
1629 // fading out if spatialization changed
1630 if (*fadingState >= 0 && *actualSpatializationEnabled != EffectChainManagerGetSpatializationEnabled()) {
1631 *fadingState = -1;
1632 }
1633 // fading in when fading out is done
1634 if (*fadingState < 0 && *fadingCount == 0) {
1635 *fadingState = 1;
1636 *actualSpatializationEnabled = EffectChainManagerGetSpatializationEnabled();
1637 EffectChainManagerFlush();
1638 }
1639 // no need to fade when fading out is done
1640 if (*fadingState > 0 && *fadingCount == SPATIALIZATION_FADING_FRAMECOUNT) {
1641 *fadingState = 0;
1642 }
1643 }
1644
DoSpatializationFading(float * buf,int8_t fadingState,int8_t * fadingCount,int32_t frameLen,int32_t channels)1645 static void DoSpatializationFading(float *buf, int8_t fadingState, int8_t *fadingCount, int32_t frameLen,
1646 int32_t channels)
1647 {
1648 // no need fading
1649 if (fadingState == 0) {
1650 return;
1651 }
1652 // fading out when fadingState equals -1, fading in when fadingState equals 1;
1653 for (int32_t i = 0; i < frameLen; i++) {
1654 for (int32_t j = 0; j < channels; j++) {
1655 buf[i * channels + j] *=
1656 ((*fadingCount) * frameLen + i * fadingState) / (float)(SPATIALIZATION_FADING_FRAMECOUNT * frameLen);
1657 }
1658 }
1659 (*fadingCount) += fadingState;
1660 }
1661
SinkRenderPrimaryAfterProcess(pa_sink * si,size_t length,pa_memchunk * chunkIn)1662 static void SinkRenderPrimaryAfterProcess(pa_sink *si, size_t length, pa_memchunk *chunkIn)
1663 {
1664 struct Userdata *u;
1665 pa_assert_se(u = si->userdata);
1666 int32_t bitSize = (int32_t)pa_sample_size_of_format(u->format);
1667 u->bufferAttr->numChanIn = DEFAULT_IN_CHANNEL_NUM;
1668 void *dst = pa_memblock_acquire_chunk(chunkIn);
1669 int32_t frameLen = bitSize > 0 ? ((int32_t)length / bitSize) : 0;
1670 DoSpatializationFading(u->bufferAttr->tempBufOut, u->spatializationFadingState, &u->spatializationFadingCount,
1671 frameLen / DEFAULT_IN_CHANNEL_NUM, DEFAULT_IN_CHANNEL_NUM);
1672 ConvertFromFloat(u->format, frameLen, u->bufferAttr->tempBufOut, dst);
1673
1674 chunkIn->index = 0;
1675 chunkIn->length = length;
1676 pa_memblock_release(chunkIn->memblock);
1677 }
1678
HandleSinkSceneType(struct Userdata * u,time_t currentTime,int32_t i)1679 static char *HandleSinkSceneType(struct Userdata *u, time_t currentTime, int32_t i)
1680 {
1681 char *sinkSceneType = SCENE_TYPE_SET[i];
1682 if (g_effectAllStreamVolumeZeroMap[i] && PA_SINK_IS_RUNNING(u->sink->thread_info.state) &&
1683 difftime(currentTime, g_effectStartVolZeroTimeMap[i]) > WAIT_CLOSE_PA_OR_EFFECT_TIME) {
1684 sinkSceneType = SCENE_TYPE_SET[SCENE_TYPE_NUM - 1]; // EFFECT_NONE
1685 if (!g_effectHaveDisabledMap[i]) {
1686 AUDIO_INFO_LOG("volume change to zero over %{public}ds, close effect:%{public}s success.",
1687 WAIT_CLOSE_PA_OR_EFFECT_TIME, SCENE_TYPE_SET[i]);
1688 g_effectHaveDisabledMap[i] = true;
1689 g_effectStartVolZeroTimeMap[i] = 0;
1690 }
1691 } else {
1692 sinkSceneType = SCENE_TYPE_SET[i];
1693 if (g_effectHaveDisabledMap[i]) {
1694 g_effectHaveDisabledMap[i] = false;
1695 AUDIO_INFO_LOG("volume change to non zero, open effect:%{public}s success. ", SCENE_TYPE_SET[i]);
1696 }
1697 }
1698 return sinkSceneType;
1699 }
1700
CheckAndDealEffectZeroVolume(struct Userdata * u,time_t currentTime,int32_t i)1701 static char *CheckAndDealEffectZeroVolume(struct Userdata *u, time_t currentTime, int32_t i)
1702 {
1703 void *state = NULL;
1704 pa_sink_input *input;
1705 g_effectAllStreamVolumeZeroMap[i] = true;
1706 while ((input = pa_hashmap_iterate(u->sink->thread_info.inputs, &state, NULL))) {
1707 pa_sink_input_assert_ref(input);
1708 if (input->thread_info.state != PA_SINK_INPUT_RUNNING) {
1709 continue;
1710 }
1711 const char *sinkSceneTypeTmp = pa_proplist_gets(input->proplist, "scene.type");
1712 const char *streamType = safeProplistGets(input->proplist, "stream.type", "NULL");
1713 const char *sessionIDStr = safeProplistGets(input->proplist, "stream.sessionID", "NULL");
1714 const char *deviceClass = GetDeviceClass(u->primary.sinkAdapter->deviceClass);
1715 uint32_t sessionID = sessionIDStr != NULL ? atoi(sessionIDStr) : 0;
1716 float volume = GetCurVolume(sessionID, streamType, deviceClass);
1717 bool isZeroVolume = IsSameVolume(volume, 0.0f);
1718 if (EffectChainManagerSceneCheck(sinkSceneTypeTmp, SCENE_TYPE_SET[i]) && !isZeroVolume) {
1719 g_effectAllStreamVolumeZeroMap[i] = false;
1720 g_effectStartVolZeroTimeMap[i] = 0;
1721 AUDIO_DEBUG_LOG("SCENE_TYPE_SET[%{public}d]:%{public}s for streamtype:[%{public}s]'s"
1722 " volume is non zero, this effect all streamtype is non zero volume.", i,
1723 SCENE_TYPE_SET[i], streamType);
1724 break;
1725 }
1726 }
1727
1728 if (g_effectAllStreamVolumeZeroMap[i] && !g_effectHaveDisabledMap[i] && (g_effectStartVolZeroTimeMap[i] == 0) &&
1729 PA_SINK_IS_RUNNING(u->sink->thread_info.state)) {
1730 AUDIO_INFO_LOG("Timing begins, will close [%{public}s] effect after [%{public}d]s", SCENE_TYPE_SET[i],
1731 WAIT_CLOSE_PA_OR_EFFECT_TIME);
1732 time(&g_effectStartVolZeroTimeMap[i]);
1733 }
1734
1735 char *handledSceneType = HandleSinkSceneType(u, currentTime, i);
1736 AUDIO_DEBUG_LOG("handle sink scene type:%{public}s", handledSceneType);
1737 return handledSceneType;
1738 }
1739
CheckOnlyPrimarySpeakerPaLoading(struct Userdata * u)1740 static void CheckOnlyPrimarySpeakerPaLoading(struct Userdata *u)
1741 {
1742 pa_sink *s;
1743 pa_core *c = u->core;
1744 uint32_t idx;
1745 PA_IDXSET_FOREACH(s, c->sinks, idx) {
1746 bool isHdiSink = !strncmp(s->driver, "module_hdi_sink", 15); // 15 cmp length
1747 if (isHdiSink && strcmp(s->name, "Speaker")) {
1748 AUDIO_DEBUG_LOG("Have new routing:[%{public}s] on primary, dont close it.", s->name);
1749 g_onlyPrimarySpeakerPaLoading = false;
1750 g_speakerPaAllStreamVolumeZero = false;
1751 g_speakerPaAllStreamStartVolZeroTime = 0;
1752 break;
1753 }
1754 }
1755
1756 if (strcmp(GetDeviceClass(u->primary.sinkAdapter->deviceClass), "primary")) {
1757 AUDIO_DEBUG_LOG("Sink[%{public}s] -- no primary, dont close it.",
1758 GetDeviceClass(u->primary.sinkAdapter->deviceClass));
1759 g_onlyPrimarySpeakerPaLoading = false;
1760 g_speakerPaAllStreamVolumeZero = false;
1761 g_speakerPaAllStreamStartVolZeroTime = 0;
1762 }
1763
1764 if (PA_SINK_IS_RUNNING(u->sink->thread_info.state) && !g_onlyPrimarySpeakerPaLoading && g_paHaveDisabled) {
1765 int32_t ret = u->primary.sinkAdapter->RendererSinkSetPaPower(u->primary.sinkAdapter, 1);
1766 AUDIO_INFO_LOG("sink running, open closed pa:[%{public}s] -- [%{public}s], ret:%{public}d", u->sink->name,
1767 (ret == 0 ? "success" : "failed"), ret);
1768 g_paHaveDisabled = false;
1769 g_speakerPaHaveClosed = false;
1770 }
1771 }
1772
HandleClosePa(struct Userdata * u)1773 static void HandleClosePa(struct Userdata *u)
1774 {
1775 if (!g_paHaveDisabled) {
1776 int32_t ret = u->primary.sinkAdapter->RendererSinkSetPaPower(u->primary.sinkAdapter, 0);
1777 AUDIO_INFO_LOG("Speaker pa volume change to zero over [%{public}d]s, close %{public}s pa [%{public}s], "
1778 "ret:%{public}d", WAIT_CLOSE_PA_OR_EFFECT_TIME, u->sink->name, (ret == 0 ? "success" : "failed"), ret);
1779 g_paHaveDisabled = true;
1780 g_speakerPaAllStreamStartVolZeroTime = 0;
1781 g_speakerPaHaveClosed = true;
1782 time(&g_speakerPaClosedTime);
1783 }
1784 }
1785
HandleOpenPa(struct Userdata * u)1786 static void HandleOpenPa(struct Userdata *u)
1787 {
1788 if (g_paHaveDisabled) {
1789 int32_t ret = u->primary.sinkAdapter->RendererSinkSetPaPower(u->primary.sinkAdapter, 1);
1790 AUDIO_INFO_LOG("volume change to non zero, open closed pa:[%{public}s] -- [%{public}s], ret:%{public}d",
1791 u->sink->name, (ret == 0 ? "success" : "failed"), ret);
1792 g_paHaveDisabled = false;
1793 g_speakerPaHaveClosed = false;
1794 }
1795 }
1796
CheckAndDealSpeakerPaZeroVolume(struct Userdata * u,time_t currentTime)1797 static void CheckAndDealSpeakerPaZeroVolume(struct Userdata *u, time_t currentTime)
1798 {
1799 if (!g_onlyPrimarySpeakerPaLoading) {
1800 AUDIO_DEBUG_LOG("Not only the speaker pa, dont deal speaker pa.");
1801 return;
1802 }
1803 void *state = NULL;
1804 pa_sink_input *input;
1805 while ((input = pa_hashmap_iterate(u->sink->thread_info.inputs, &state, NULL))) {
1806 pa_sink_input_assert_ref(input);
1807 if (input->thread_info.state != PA_SINK_INPUT_RUNNING) {
1808 continue;
1809 }
1810 const char *streamType = safeProplistGets(input->proplist, "stream.type", "NULL");
1811 const char *sessionIDStr = safeProplistGets(input->proplist, "stream.sessionID", "NULL");
1812 const char *deviceClass = GetDeviceClass(u->primary.sinkAdapter->deviceClass);
1813 uint32_t sessionID = sessionIDStr != NULL ? atoi(sessionIDStr) : 0;
1814 float volume = GetCurVolume(sessionID, streamType, deviceClass);
1815 bool isZeroVolume = IsSameVolume(volume, 0.0f);
1816 if (!strcmp(u->sink->name, "Speaker") && !isZeroVolume) {
1817 g_speakerPaAllStreamVolumeZero = false;
1818 g_speakerPaAllStreamStartVolZeroTime = 0;
1819 break;
1820 }
1821 }
1822
1823 if (g_speakerPaAllStreamVolumeZero && !g_paHaveDisabled && (g_speakerPaAllStreamStartVolZeroTime == 0) &&
1824 PA_SINK_IS_RUNNING(u->sink->thread_info.state)) {
1825 AUDIO_INFO_LOG("Timing begins, will close speaker after [%{public}d]s", WAIT_CLOSE_PA_OR_EFFECT_TIME);
1826 time(&g_speakerPaAllStreamStartVolZeroTime);
1827 }
1828 if (g_speakerPaAllStreamVolumeZero && PA_SINK_IS_RUNNING(u->sink->thread_info.state) &&
1829 difftime(currentTime, g_speakerPaAllStreamStartVolZeroTime) > WAIT_CLOSE_PA_OR_EFFECT_TIME) {
1830 HandleClosePa(u);
1831 } else {
1832 HandleOpenPa(u);
1833 }
1834
1835 if (g_speakerPaHaveClosed && difftime(currentTime, g_speakerPaClosedTime) >= MONITOR_CLOSE_PA_TIME_SEC) {
1836 time(&g_speakerPaClosedTime);
1837 AUDIO_INFO_LOG("Speaker pa have closed [%{public}d]s.", MONITOR_CLOSE_PA_TIME_SEC);
1838 }
1839 }
1840
UpdateStreamAvailableMap(struct Userdata * u,const char * sceneType)1841 static void UpdateStreamAvailableMap(struct Userdata *u, const char *sceneType)
1842 {
1843 if (u->streamAvailableMap == NULL) {
1844 AUDIO_ERR_LOG("streamAvailableMap is null");
1845 return;
1846 }
1847 uint32_t *num = (uint32_t *)pa_hashmap_get(u->streamAvailableMap, sceneType);
1848 if (num == NULL) {
1849 num = pa_xnew0(uint32_t, 1);
1850 (*num) = 0;
1851 }
1852 int32_t fadeDirection = (u->streamAvailable != 0) && ((*num) == 0) ? 0 :
1853 (u->streamAvailable == 0 && (*num) != 0) ? 1 : -1;
1854 int32_t outLength = u->bufferAttr->frameLen * u->bufferAttr->numChanOut * sizeof(float);
1855 if (fadeDirection != -1) {
1856 AUDIO_INFO_LOG("do %{public}s for MIXED DATA", fadeDirection ? "fade-out" : "fade-in");
1857 DoFading(u->bufferAttr->bufOut, outLength, (uint32_t)SAMPLE_F32, (uint32_t)u->ss.channels, fadeDirection);
1858 }
1859 if (u->streamAvailable == 0 && (*num) == 0) {
1860 memset_s(u->bufferAttr->bufOut, outLength, 0, outLength);
1861 }
1862
1863 if (pa_hashmap_get(u->streamAvailableMap, sceneType) != NULL) {
1864 (*num) = u->streamAvailable;
1865 } else {
1866 char *scene = strdup(sceneType);
1867 if (scene != NULL) {
1868 (*num) = u->streamAvailable;
1869 if (pa_hashmap_put(u->streamAvailableMap, scene, num) != 0) {
1870 AUDIO_ERR_LOG("pa_hashmap_put failed");
1871 free(scene);
1872 pa_xfree(num);
1873 }
1874 }
1875 }
1876 }
1877
PrimaryEffectProcess(struct Userdata * u,pa_memchunk * chunkIn,char * sinkSceneType)1878 static void PrimaryEffectProcess(struct Userdata *u, pa_memchunk *chunkIn, char *sinkSceneType)
1879 {
1880 AUTO_CTRACE("hdi_sink::EffectChainManagerProcess:%s", sinkSceneType);
1881 EffectChainManagerProcess(sinkSceneType, u->bufferAttr);
1882 UpdateStreamAvailableMap(u, sinkSceneType);
1883 for (int32_t k = 0; k < u->bufferAttr->frameLen * u->bufferAttr->numChanOut; k++) {
1884 u->bufferAttr->tempBufOut[k] += u->bufferAttr->bufOut[k];
1885 }
1886 pa_memblock_release(chunkIn->memblock);
1887 u->bufferAttr->numChanIn = DEFAULT_IN_CHANNEL_NUM;
1888 }
1889
AllocateBuffer(size_t size)1890 static void *AllocateBuffer(size_t size)
1891 {
1892 if (size > 0 && size <= sizeof(float) * DEFAULT_FRAMELEN * IN_CHANNEL_NUM_MAX) {
1893 return malloc(size);
1894 } else {
1895 return NULL;
1896 }
1897 }
1898
AllocateEffectBuffer(struct Userdata * u)1899 static bool AllocateEffectBuffer(struct Userdata *u)
1900 {
1901 if (u->bufferAttr == NULL) {
1902 return false;
1903 }
1904 float **buffers[] = { &u->bufferAttr->bufIn, &u->bufferAttr->bufOut,
1905 &u->bufferAttr->tempBufIn, &u->bufferAttr->tempBufOut };
1906 size_t numBuffers = sizeof(buffers) / sizeof(buffers[0]);
1907 for (size_t i = 0; i < numBuffers; i++) {
1908 *buffers[i] = (float *)AllocateBuffer(u->processSize);
1909 if (*buffers[i] == NULL) {
1910 AUDIO_ERR_LOG("failed to allocate effect buffer");
1911 FreeEffectBuffer(u);
1912 return false;
1913 }
1914 }
1915 return true;
1916 }
1917
FreeEffectBuffer(struct Userdata * u)1918 static void FreeEffectBuffer(struct Userdata *u)
1919 {
1920 if (u->bufferAttr == NULL) {
1921 return;
1922 }
1923 float **buffers[] = { &u->bufferAttr->bufIn, &u->bufferAttr->bufOut,
1924 &u->bufferAttr->tempBufIn, &u->bufferAttr->tempBufOut };
1925 size_t numBuffers = sizeof(buffers) / sizeof(buffers[0]);
1926 for (size_t i = 0; i < numBuffers; i++) {
1927 free(*buffers[i]);
1928 *buffers[i] = NULL;
1929 }
1930 }
1931
ResetBufferAttr(struct Userdata * u)1932 static void ResetBufferAttr(struct Userdata *u)
1933 {
1934 size_t memsetInLen = sizeof(float) * DEFAULT_FRAMELEN * IN_CHANNEL_NUM_MAX;
1935 size_t memsetOutLen = sizeof(float) * DEFAULT_FRAMELEN * OUT_CHANNEL_NUM_MAX;
1936 if (memset_s(u->bufferAttr->tempBufIn, u->processSize, 0, memsetInLen) != EOK) {
1937 AUDIO_WARNING_LOG("SinkRenderBufIn memset_s failed");
1938 }
1939 if (memset_s(u->bufferAttr->tempBufOut, u->processSize, 0, memsetOutLen) != EOK) {
1940 AUDIO_WARNING_LOG("SinkRenderBufOut memset_s failed");
1941 }
1942 }
1943
SinkRenderPrimaryProcess(pa_sink * si,size_t length,pa_memchunk * chunkIn)1944 static void SinkRenderPrimaryProcess(pa_sink *si, size_t length, pa_memchunk *chunkIn)
1945 {
1946 if (GetInnerCapturerState()) {
1947 pa_memchunk capResult;
1948 SinkRenderCapProcess(si, length, &capResult);
1949 pa_memblock_unref(capResult.memblock);
1950 }
1951
1952 struct Userdata *u;
1953 pa_assert_se(u = si->userdata);
1954
1955 ResetBufferAttr(u);
1956 int32_t bitSize = (int32_t)pa_sample_size_of_format(u->format);
1957 chunkIn->memblock = pa_memblock_new(si->core->mempool, length * IN_CHANNEL_NUM_MAX / DEFAULT_IN_CHANNEL_NUM);
1958 time_t currentTime = time(NULL);
1959 PrepareSpatializationFading(&u->spatializationFadingState, &u->spatializationFadingCount,
1960 &u->actualSpatializationEnabled);
1961 g_effectProcessFrameCount++;
1962 u->streamAvailable = 0;
1963 for (int32_t i = 0; i < SCENE_TYPE_NUM; i++) {
1964 uint32_t processChannels = DEFAULT_NUM_CHANNEL;
1965 uint64_t processChannelLayout = DEFAULT_CHANNELLAYOUT;
1966 EffectChainManagerReturnEffectChannelInfo(SCENE_TYPE_SET[i], &processChannels, &processChannelLayout);
1967 char *sinkSceneType = CheckAndDealEffectZeroVolume(u, currentTime, i);
1968 size_t tmpLength = length * processChannels / DEFAULT_IN_CHANNEL_NUM;
1969 chunkIn->index = 0;
1970 chunkIn->length = tmpLength;
1971 int32_t nSinkInput = SinkRenderPrimaryGetData(si, chunkIn, SCENE_TYPE_SET[i]);
1972 if (nSinkInput == 0) { continue; }
1973 chunkIn->index = 0;
1974 chunkIn->length = tmpLength;
1975 void *src = pa_memblock_acquire_chunk(chunkIn);
1976 int32_t frameLen = bitSize > 0 ? ((int32_t)tmpLength / bitSize) : 0;
1977
1978 ConvertToFloat(u->format, frameLen, src, u->bufferAttr->tempBufIn);
1979 memcpy_s(u->bufferAttr->bufIn, frameLen * sizeof(float), u->bufferAttr->tempBufIn, frameLen * sizeof(float));
1980 u->bufferAttr->numChanIn = (int32_t)processChannels;
1981 u->bufferAttr->frameLen = frameLen / u->bufferAttr->numChanIn;
1982 PrimaryEffectProcess(u, chunkIn, sinkSceneType);
1983 }
1984 if (g_effectProcessFrameCount == PRINT_INTERVAL_FRAME_COUNT) { g_effectProcessFrameCount = 0; }
1985 CheckAndDealSpeakerPaZeroVolume(u, currentTime);
1986 SinkRenderPrimaryAfterProcess(si, length, chunkIn);
1987 }
1988
SinkRenderPrimary(pa_sink * si,size_t length,pa_memchunk * chunkIn)1989 static void SinkRenderPrimary(pa_sink *si, size_t length, pa_memchunk *chunkIn)
1990 {
1991 pa_sink_ref(si);
1992
1993 size_t blockSizeMax;
1994
1995 pa_sink_assert_ref(si);
1996 pa_sink_assert_io_context(si);
1997 pa_assert(PA_SINK_IS_LINKED(si->thread_info.state));
1998 pa_assert(length > 0);
1999 pa_assert(pa_frame_aligned(length, &si->sample_spec));
2000 pa_assert(chunkIn);
2001
2002 pa_assert(!si->thread_info.rewind_requested);
2003 pa_assert(si->thread_info.rewind_nbytes == 0);
2004
2005 if (si->thread_info.state == PA_SINK_SUSPENDED) {
2006 chunkIn->memblock = pa_memblock_ref(si->silence.memblock);
2007 chunkIn->index = si->silence.index;
2008 chunkIn->length = PA_MIN(si->silence.length, length);
2009 return;
2010 }
2011
2012 if (length == 0)
2013 length = pa_frame_align(MIX_BUFFER_LENGTH, &si->sample_spec);
2014
2015 blockSizeMax = pa_mempool_block_size_max(si->core->mempool);
2016 if (length > blockSizeMax)
2017 length = pa_frame_align(blockSizeMax, &si->sample_spec);
2018
2019 pa_assert(length > 0);
2020 AUTO_CTRACE("hdi_sink::SinkRenderPrimaryProcess:len:%zu", length);
2021 SinkRenderPrimaryProcess(si, length, chunkIn);
2022
2023 pa_sink_unref(si);
2024 }
2025
SetSinkVolumeByDeviceClass(pa_sink * s,const char * deviceClass)2026 static void SetSinkVolumeByDeviceClass(pa_sink *s, const char *deviceClass)
2027 {
2028 pa_assert(s);
2029 void *state = NULL;
2030 pa_sink_input *input;
2031 while ((input = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL))) {
2032 pa_sink_input_assert_ref(input);
2033 if (input->thread_info.state != PA_SINK_INPUT_RUNNING) {
2034 continue;
2035 }
2036 const char *streamType = safeProplistGets(input->proplist, "stream.type", "NULL");
2037 const char *sessionIDStr = safeProplistGets(input->proplist, "stream.sessionID", "NULL");
2038 uint32_t sessionID = sessionIDStr != NULL ? atoi(sessionIDStr) : 0;
2039 float volumeEnd = GetCurVolume(sessionID, streamType, deviceClass);
2040 float volumeBeg = GetPreVolume(sessionID);
2041 if (volumeBeg != volumeEnd) {
2042 AUDIO_INFO_LOG("sessionID:%{public}s, volumeBeg:%{public}f, volumeEnd:%{public}f",
2043 sessionIDStr, volumeBeg, volumeEnd);
2044 SetPreVolume(sessionID, volumeEnd);
2045 MonitorVolume(sessionID, true);
2046 }
2047 uint32_t volume = pa_sw_volume_from_linear(volumeEnd);
2048 pa_cvolume_set(&input->thread_info.soft_volume, input->thread_info.soft_volume.channels, volume);
2049 }
2050 }
2051
UnsetSinkVolume(pa_sink * s)2052 static void UnsetSinkVolume(pa_sink *s)
2053 {
2054 pa_assert(s);
2055 void *state = NULL;
2056 pa_sink_input *input;
2057 while ((input = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL))) {
2058 pa_sink_input_assert_ref(input);
2059 if (input->thread_info.state != PA_SINK_INPUT_RUNNING) {
2060 continue;
2061 }
2062 uint32_t volume = pa_sw_volume_from_linear(1.0f);
2063 pa_cvolume_set(&input->thread_info.soft_volume, input->thread_info.soft_volume.channels, volume);
2064 }
2065 }
2066
ProcessRenderUseTiming(struct Userdata * u,pa_usec_t now)2067 static void ProcessRenderUseTiming(struct Userdata *u, pa_usec_t now)
2068 {
2069 pa_assert(u);
2070
2071 // Fill the buffer up the latency size
2072 pa_memchunk chunk;
2073
2074 AUTO_CTRACE("hdi_sink::SinkRenderPrimary");
2075 // Change from pa_sink_render to pa_sink_render_full for alignment issue in 3516
2076
2077 if (!strcmp(u->sink->name, DP_SINK_NAME) && u->render_full_enable) {
2078 // dp update volume
2079 SetSinkVolumeByDeviceClass(u->sink, GetDeviceClass(u->primary.sinkAdapter->deviceClass));
2080 pa_sink_render_full(u->sink, u->sink->thread_info.max_request, &chunk); // only work for dp more than 2ch
2081 UnsetSinkVolume(u->sink); // reset volume 1.0f
2082 } else {
2083 if (u->isEffectBufferAllocated || AllocateEffectBuffer(u)) {
2084 u->isEffectBufferAllocated = true;
2085 SinkRenderPrimary(u->sink, u->sink->thread_info.max_request, &chunk);
2086 }
2087 }
2088 pa_assert(chunk.length > 0);
2089
2090 StartPrimaryHdiIfRunning(u);
2091 pa_asyncmsgq_post(u->primary.dq, NULL, HDI_RENDER, NULL, 0, &chunk, NULL);
2092 u->primary.timestamp += pa_bytes_to_usec(chunk.length, &u->sink->sample_spec);
2093 }
2094
InputIsOffload(pa_sink_input * i)2095 static bool InputIsOffload(pa_sink_input *i)
2096 {
2097 if (monitorLinked(i->sink, true)) {
2098 return false;
2099 }
2100 if (strncmp(i->sink->driver, "module_hdi_sink", 15)) { // 15 cmp length
2101 return false;
2102 }
2103 struct Userdata *u = i->sink->userdata;
2104 if (!u->offload_enable || !u->offload.inited) {
2105 return false;
2106 }
2107 const char *offloadEnableStr = pa_proplist_gets(i->proplist, "stream.offload.enable");
2108 if (offloadEnableStr == NULL) {
2109 return false;
2110 }
2111 const bool offloadEnable = !strcmp(offloadEnableStr, "1");
2112 return offloadEnable;
2113 }
2114
InputIsMultiChannel(pa_sink_input * i)2115 static bool InputIsMultiChannel(pa_sink_input *i)
2116 {
2117 bool effectOffloadFlag = EffectChainManagerCheckEffectOffload();
2118 if (effectOffloadFlag) {
2119 int32_t sinkChannels = i->sample_spec.channels;
2120 const char *sinkSceneType = pa_proplist_gets(i->proplist, "scene.type");
2121 const char *sinkSceneMode = pa_proplist_gets(i->proplist, "scene.mode");
2122 const char *sinkSpatializationEnabled = pa_proplist_gets(i->proplist, "spatialization.enabled");
2123 bool existFlag = EffectChainManagerExist(sinkSceneType, sinkSceneMode, sinkSpatializationEnabled);
2124 if (!existFlag && sinkChannels > PRIMARY_CHANNEL_NUM) {
2125 return true;
2126 }
2127 }
2128 return false;
2129 }
2130
InputIsPrimary(pa_sink_input * i)2131 static bool InputIsPrimary(pa_sink_input *i)
2132 {
2133 const bool isOffload = InputIsOffload(i);
2134 const bool isRunning = i->thread_info.state == PA_SINK_INPUT_RUNNING;
2135 return !isOffload && isRunning;
2136 }
2137
GetInputsInfo(enum HdiInputType type,bool isRun,pa_sink * s,pa_mix_info * info,unsigned maxinfo)2138 static unsigned GetInputsInfo(enum HdiInputType type, bool isRun, pa_sink *s, pa_mix_info *info, unsigned maxinfo)
2139 {
2140 pa_sink_input *i;
2141 unsigned n = 0;
2142 void *state = NULL;
2143
2144 pa_sink_assert_ref(s);
2145 pa_sink_assert_io_context(s);
2146 pa_assert(s);
2147
2148 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)) && maxinfo > 0) {
2149 pa_sink_input_assert_ref(i);
2150
2151 bool flag = false;
2152 const bool isOffload = InputIsOffload(i);
2153 const bool isHD = false; // add func is hd
2154 const bool isRunning = i->thread_info.state == PA_SINK_INPUT_RUNNING;
2155 if (isRun && !isRunning) {
2156 continue;
2157 }
2158 switch (type) {
2159 case HDI_INPUT_TYPE_PRIMARY:
2160 flag = !isOffload && !isHD;
2161 break;
2162 case HDI_INPUT_TYPE_OFFLOAD:
2163 flag = isOffload;
2164 break;
2165 case HDI_INPUT_TYPE_MULTICHANNEL:
2166 flag = isHD;
2167 break;
2168 default:
2169 break;
2170 }
2171 if (flag) {
2172 info->userdata = pa_sink_input_ref(i);
2173 } else {
2174 const char *sessionIDStr = safeProplistGets(i->proplist, "stream.sessionID", "NULL");
2175 AUDIO_PRERELEASE_LOGE("sink: %{public}s, sink_input: %{public}s, the type is not %{public}d",
2176 s->name, sessionIDStr, type);
2177 continue;
2178 }
2179
2180 info++;
2181 n++;
2182 maxinfo--;
2183 }
2184 return n;
2185 }
2186
GetInputsType(pa_sink * s,unsigned * nPrimary,unsigned * nOffload,unsigned * nMultiChannel,bool isRunning)2187 static int32_t GetInputsType(pa_sink *s, unsigned *nPrimary, unsigned *nOffload,
2188 unsigned *nMultiChannel, bool isRunning)
2189 {
2190 int ret;
2191 struct Userdata *u;
2192 pa_assert_se(u = s->userdata);
2193 if ((ret = pthread_mutex_lock(&u->mutexPa2)) != 0) {
2194 AUDIO_WARNING_LOG("GetInputsType pthread_mutex_lock ret %d", ret);
2195 }
2196 pa_sink_input *i;
2197 void *state = NULL;
2198 *nPrimary = 0;
2199 *nOffload = 0;
2200 *nMultiChannel = 0;
2201 int n = 0;
2202
2203 pa_sink_assert_ref(s);
2204 pa_sink_assert_io_context(s);
2205 pa_assert(s);
2206 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL))) {
2207 pa_sink_input_assert_ref(i);
2208 if (isRunning && i->thread_info.state != PA_SINK_INPUT_RUNNING) {
2209 continue;
2210 }
2211 n++;
2212 if (InputIsOffload(i)) {
2213 (*nOffload)++;
2214 } else if (!strcmp(u->sink->name, MCH_SINK_NAME) && InputIsMultiChannel(i)) {
2215 (*nMultiChannel)++;
2216 } else {
2217 (*nPrimary)++;
2218 }
2219 }
2220 if ((ret = pthread_mutex_unlock(&u->mutexPa2)) != 0) {
2221 AUDIO_WARNING_LOG("GetInputsType pthread_mutex_unlock ret %d", ret);
2222 }
2223 return n;
2224 }
2225
GetOffloadRenderLength(struct Userdata * u,pa_sink_input * i,bool * wait)2226 static size_t GetOffloadRenderLength(struct Userdata *u, pa_sink_input *i, bool *wait)
2227 {
2228 size_t length;
2229 playback_stream *ps = i->userdata;
2230 const bool b = (bool)ps->sink_input->thread_info.resampler;
2231 const pa_sample_spec sampleSpecIn = b ? ps->sink_input->thread_info.resampler->i_ss : ps->sink_input->sample_spec;
2232 const pa_sample_spec sampleSpecOut = b ? ps->sink_input->thread_info.resampler->o_ss : ps->sink_input->sample_spec;
2233 const int statePolicy = GetInputPolicyState(i);
2234 u->offload.prewrite = (statePolicy == OFFLOAD_INACTIVE_BACKGROUND ?
2235 OFFLOAD_HDI_CACHE2_PLUS : OFFLOAD_HDI_CACHE1_PLUS) * PA_USEC_PER_MSEC;
2236 size_t sizeFrame = pa_frame_align(pa_usec_to_bytes(OFFLOAD_FRAME_SIZE * PA_USEC_PER_MSEC, &sampleSpecOut),
2237 &sampleSpecOut);
2238 size_t tlengthHalfResamp = pa_frame_align(pa_usec_to_bytes(pa_bytes_to_usec(pa_memblockq_get_tlength(
2239 ps->memblockq) / 1.5, &sampleSpecIn), &sampleSpecOut), &sampleSpecOut); // 1.5 for half
2240 size_t sizeTgt = PA_MIN(sizeFrame, tlengthHalfResamp);
2241 const size_t bql = pa_memblockq_get_length(ps->memblockq);
2242 const size_t bqlResamp = pa_usec_to_bytes(pa_bytes_to_usec(bql, &sampleSpecIn), &sampleSpecOut);
2243 const size_t bqlRend = pa_memblockq_get_length(i->thread_info.render_memblockq);
2244 const size_t bqlAlin = pa_frame_align(bqlResamp + bqlRend, &sampleSpecOut);
2245
2246 if (ps->drain_request) {
2247 if (i->thread_info.render_memblockq->maxrewind != 0) {
2248 pa_sink_input_update_max_rewind(i, 0);
2249 }
2250 const uint64_t hdiPos = u->offload.hdiPos + (pa_rtclock_now() - u->offload.hdiPosTs);
2251 *wait = u->offload.pos > hdiPos + HDI_MIN_MS_MAINTAIN * PA_USEC_PER_MSEC ? true : false;
2252 length = u->offload.pos > hdiPos + HDI_MIN_MS_MAINTAIN * PA_USEC_PER_MSEC ? 0 : sizeFrame;
2253 } else {
2254 bool waitable = false;
2255 const uint64_t hdiPos = u->offload.hdiPos + (pa_rtclock_now() - u->offload.hdiPosTs);
2256 if (u->offload.pos > hdiPos + 60 * PA_USEC_PER_MSEC) { // if hdi cache < 60ms, indicate no enough data
2257 // hdi left 100ms is triggered process_complete_msg, it leads to kartun. Could be stating time leads it.
2258 waitable = true;
2259 }
2260 length = PA_MIN(bqlAlin, sizeTgt);
2261 *wait = false;
2262 if (length < sizeTgt) {
2263 *wait = waitable || length == 0;
2264 length = waitable ? 0 : length;
2265 if (ps->memblockq->missing > 0) {
2266 playback_stream_request_bytes(ps);
2267 }
2268 }
2269 }
2270 return length;
2271 }
2272
InputsDropFromInputs2(pa_mix_info * info,unsigned n)2273 static void InputsDropFromInputs2(pa_mix_info *info, unsigned n)
2274 {
2275 for (; n > 0; info++, n--) {
2276 if (info->userdata) {
2277 pa_sink_input_unref(info->userdata);
2278 info->userdata = NULL;
2279 }
2280 if (info->chunk.memblock) {
2281 pa_memblock_unref(info->chunk.memblock);
2282 }
2283 }
2284 }
2285
InputsDropFromInputs(pa_mix_info * infoInputs,unsigned nInputs,pa_mix_info * info,unsigned n,pa_memchunk * result)2286 static void InputsDropFromInputs(pa_mix_info *infoInputs, unsigned nInputs, pa_mix_info *info, unsigned n,
2287 pa_memchunk *result)
2288 {
2289 unsigned p = 0;
2290 unsigned ii = 0;
2291 unsigned nUnreffed = 0;
2292 pa_assert(result && result->memblock && result->length > 0);
2293 /* We optimize for the case where the order of the inputs has not changed */
2294
2295 for (ii = 0; ii < nInputs; ++ii) {
2296 pa_sink_input *i = infoInputs[ii].userdata;
2297 unsigned j;
2298 pa_mix_info *m = NULL;
2299
2300 pa_sink_input_assert_ref(i);
2301
2302 /* Let's try to find the matching entyr info the pa_mix_info array */
2303 for (j = 0; j < n; j++) {
2304 if (info[p].userdata == i) {
2305 m = info + p;
2306 break;
2307 }
2308
2309 p++;
2310 if (p >= n) {
2311 p = 0;
2312 }
2313 }
2314
2315 /* Drop read data */
2316 pa_sink_input_drop(i, result->length);
2317
2318 if (m) {
2319 if (m->chunk.memblock) {
2320 pa_memblock_unref(m->chunk.memblock);
2321 pa_memchunk_reset(&m->chunk);
2322 }
2323
2324 pa_sink_input_unref(m->userdata);
2325 m->userdata = NULL;
2326
2327 nUnreffed += 1;
2328 }
2329 }
2330
2331 /* Now drop references to entries that are included in the
2332 * pa_mix_info array but don't exist anymore */
2333
2334 if (nUnreffed < n) {
2335 for (; n > 0; info++, n--) {
2336 if (info->userdata)
2337 pa_sink_input_unref(info->userdata);
2338 if (info->chunk.memblock) {
2339 pa_memblock_unref(info->chunk.memblock);
2340 }
2341 }
2342 }
2343 }
2344
PaSinkRenderIntoOffload(pa_sink * s,pa_mix_info * infoInputs,unsigned nInputs,pa_memchunk * target)2345 static void PaSinkRenderIntoOffload(pa_sink *s, pa_mix_info *infoInputs, unsigned nInputs, pa_memchunk *target)
2346 {
2347 unsigned n = 0;
2348 unsigned ii = 0;
2349 pa_mix_info info[MAX_MIX_CHANNELS];
2350 pa_sink_assert_ref(s);
2351 pa_sink_assert_io_context(s);
2352
2353 size_t length = target->length;
2354 size_t mixlength = length;
2355 size_t blockSizeMax = pa_mempool_block_size_max(s->core->mempool);
2356 if (length > blockSizeMax)
2357 length = pa_frame_align(blockSizeMax, &s->sample_spec);
2358
2359 pa_assert(length > 0);
2360
2361 for (ii = 0; ii < nInputs; ++ii) {
2362 pa_sink_input *i = infoInputs[ii].userdata;
2363 pa_sink_input_assert_ref(i);
2364 AUTO_CTRACE("hdi_sink::Offload:pa_sink_input_peek:%u len:%zu", i->index, length);
2365 pa_sink_input_peek(i, length, &info[n].chunk, &info[n].volume);
2366 if (mixlength == 0 || info[n].chunk.length < mixlength)
2367 mixlength = info[n].chunk.length;
2368
2369 if (pa_memblock_is_silence(info[n].chunk.memblock)) {
2370 AUTO_CTRACE("hdi_sink::Offload::is_silence");
2371 pa_memblock_unref(info[n].chunk.memblock);
2372 continue;
2373 }
2374
2375 info[n].userdata = pa_sink_input_ref(i);
2376
2377 pa_assert(info[n].chunk.memblock);
2378 pa_assert(info[n].chunk.length > 0);
2379
2380 n++;
2381 }
2382 length = mixlength > 0 ? mixlength : length;
2383
2384 pa_assert(n == 1 || n == 0);
2385 if (n == 0) {
2386 if (target->length > length)
2387 target->length = length;
2388
2389 pa_silence_memchunk(target, &s->sample_spec);
2390 } else if (n == 1) {
2391 if (target->length > length)
2392 target->length = length;
2393
2394 pa_memchunk vchunk;
2395 vchunk = info[0].chunk;
2396
2397 if (vchunk.length > length)
2398 vchunk.length = length;
2399 // if target lead pa_memblock_new memory leak, fixed chunk length can solve it.
2400 pa_memchunk_memcpy(target, &vchunk);
2401 }
2402
2403 InputsDropFromInputs(infoInputs, nInputs, info, n, target);
2404 }
2405
OffloadReset(struct Userdata * u)2406 static void OffloadReset(struct Userdata *u)
2407 {
2408 u->offload.sessionID = -1;
2409 u->offload.pos = 0;
2410 u->offload.hdiPos = 0;
2411 u->offload.hdiPosTs = pa_rtclock_now();
2412 u->offload.prewrite = OFFLOAD_HDI_CACHE1_PLUS * PA_USEC_PER_MSEC;
2413 u->offload.firstWriteHdi = true;
2414 u->offload.setHdiBufferSizeNum = OFFLOAD_SET_BUFFER_SIZE_NUM;
2415 pa_atomic_store(&u->offload.hdistate, 0);
2416 u->offload.fullTs = 0;
2417 }
2418
RenderWriteOffloadFunc(struct Userdata * u,size_t length,pa_mix_info * infoInputs,unsigned nInputs,int32_t * writen)2419 static int32_t RenderWriteOffloadFunc(struct Userdata *u, size_t length, pa_mix_info *infoInputs, unsigned nInputs,
2420 int32_t *writen)
2421 {
2422 pa_sink_input *i = infoInputs[0].userdata;
2423
2424 pa_assert(length != 0);
2425 pa_memchunk *chunk = &(u->offload.chunk);
2426 chunk->index = 0;
2427 chunk->length = length;
2428 int64_t l;
2429 int64_t d;
2430 l = (int64_t)chunk->length;
2431 size_t blockSize = pa_memblock_get_length(u->offload.chunk.memblock);
2432 blockSize = PA_MAX(blockSize, pa_usec_to_bytes(0.6 * OFFLOAD_HDI_CACHE1 * PA_USEC_PER_MSEC, // 0.6 40% is hdi limit
2433 &u->sink->sample_spec));
2434 d = 0;
2435 while (l > 0) {
2436 pa_memchunk tchunk;
2437 tchunk = *chunk;
2438 tchunk.index += (size_t)d;
2439 tchunk.length = PA_MIN((size_t)l, blockSize - tchunk.index);
2440
2441 PaSinkRenderIntoOffload(i->sink, infoInputs, nInputs, &tchunk);
2442 d += (int64_t)tchunk.length;
2443 l -= (int64_t)tchunk.length;
2444 }
2445 if (l < 0) {
2446 chunk->length += (size_t)-l;
2447 }
2448
2449 int32_t appsUid[MAX_MIX_CHANNELS];
2450 size_t count = 0;
2451
2452 const char *cstringClientUid = pa_proplist_gets(i->proplist, "stream.client.uid");
2453 if (cstringClientUid && (i->thread_info.state == PA_SINK_INPUT_RUNNING)) {
2454 appsUid[count++] = atoi(cstringClientUid);
2455 }
2456
2457 SafeRendererSinkUpdateAppsUid(u->offload.sinkAdapter, appsUid, count);
2458
2459 int ret = RenderWriteOffload(u, i, chunk);
2460 *writen = ret == 0 ? (int32_t)chunk->length : 0;
2461 if (ret == 1) { // 1 indicates full
2462 const int hdistate = pa_atomic_load(&u->offload.hdistate);
2463 if (hdistate == 0) {
2464 pa_atomic_store(&u->offload.hdistate, 1);
2465 }
2466 if (GetInputPolicyState(i) == OFFLOAD_INACTIVE_BACKGROUND) {
2467 u->offload.fullTs = pa_rtclock_now();
2468 }
2469 pa_memblockq_rewind(i->thread_info.render_memblockq, chunk->length);
2470 } else if (ret < 0) {
2471 pa_memblockq_rewind(i->thread_info.render_memblockq, chunk->length);
2472 }
2473
2474 u->offload.pos += pa_bytes_to_usec(*writen, &u->sink->sample_spec);
2475 InputsDropFromInputs2(infoInputs, nInputs);
2476 return ret;
2477 }
2478
ProcessRenderUseTimingOffload(struct Userdata * u,bool * wait,int32_t * nInput,int32_t * writen)2479 static int32_t ProcessRenderUseTimingOffload(struct Userdata *u, bool *wait, int32_t *nInput, int32_t *writen)
2480 {
2481 *wait = true;
2482 pa_sink *s = u->sink;
2483 pa_mix_info infoInputs[MAX_MIX_CHANNELS];
2484 unsigned nInputs;
2485
2486 pa_sink_assert_ref(s);
2487 pa_sink_assert_io_context(s);
2488 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2489
2490 pa_assert(s->thread_info.rewind_nbytes == 0);
2491
2492 if (s->thread_info.state == PA_SINK_SUSPENDED) {
2493 return 0;
2494 }
2495
2496 pa_sink_ref(s);
2497
2498 nInputs = GetInputsInfo(HDI_INPUT_TYPE_OFFLOAD, true, s, infoInputs, MAX_MIX_CHANNELS);
2499 *nInput = (int32_t)nInputs;
2500
2501 if (nInputs == 0) {
2502 pa_sink_unref(s);
2503 return 0;
2504 } else if (nInputs > 1) {
2505 AUDIO_WARNING_LOG("GetInputsInfo offload input != 1");
2506 }
2507
2508 pa_sink_input *i = infoInputs[0].userdata;
2509 if (GetFadeoutState(i->index) != NO_FADE) {
2510 InputsDropFromInputs2(infoInputs, nInputs);
2511 pa_sink_unref(s);
2512 AUDIO_WARNING_LOG("stream is croked, do not need peek");
2513 return 0;
2514 }
2515 CheckInputChangeToOffload(u, i);
2516 size_t length = GetOffloadRenderLength(u, i, wait);
2517 if (*wait && length == 0) {
2518 InputsDropFromInputs2(infoInputs, nInputs);
2519 AUTO_CTRACE("hdi_sink::ProcessRenderUseTimingOffload::underrun");
2520 pa_sink_input_handle_ohos_underrun(i);
2521 pa_sink_unref(s);
2522 return 0;
2523 }
2524 int ret = RenderWriteOffloadFunc(u, length, infoInputs, nInputs, writen);
2525 pa_sink_unref(s);
2526 return ret;
2527 }
2528
UpdatePresentationPosition(struct Userdata * u)2529 static int32_t UpdatePresentationPosition(struct Userdata *u)
2530 {
2531 uint64_t frames;
2532 int64_t timeSec;
2533 int64_t timeNanoSec;
2534 int ret = u->offload.sinkAdapter->RendererSinkGetPresentationPosition(
2535 u->offload.sinkAdapter, &frames, &timeSec, &timeNanoSec);
2536 if (ret != 0) {
2537 AUDIO_ERR_LOG("RendererSinkGetPresentationPosition fail, ret %d", ret);
2538 return ret;
2539 }
2540 u->offload.hdiPos = frames;
2541 u->offload.hdiPosTs = (uint64_t)timeSec * USEC_PER_SEC + (uint64_t)timeNanoSec / PA_NSEC_PER_USEC;
2542 return 0;
2543 }
2544
OffloadRewindAndFlush(struct Userdata * u,pa_sink_input * i,bool afterRender)2545 static void OffloadRewindAndFlush(struct Userdata *u, pa_sink_input *i, bool afterRender)
2546 {
2547 pa_sink_input_assert_ref(i);
2548 playback_stream *ps = i->userdata;
2549 pa_assert(ps);
2550
2551 OffloadLock(u); // flush will interrupt the offload callback, may be offload unlock.
2552 int ret = UpdatePresentationPosition(u);
2553 u->offload.sinkAdapter->RendererSinkFlush(u->offload.sinkAdapter);
2554 if (ret == 0) {
2555 uint64_t offloadFade = 180000; // 180000 us fade out
2556 uint64_t cacheLenInHdi =
2557 u->offload.pos > u->offload.hdiPos + offloadFade ? u->offload.pos - u->offload.hdiPos - offloadFade : 0;
2558 uint64_t bufSizeInRender = pa_usec_to_bytes(cacheLenInHdi, &i->sink->sample_spec);
2559 const pa_sample_spec sampleSpecIn = i->thread_info.resampler ? i->thread_info.resampler->i_ss
2560 : i->sample_spec;
2561 uint64_t bufSizeInInput = pa_usec_to_bytes(cacheLenInHdi, &sampleSpecIn);
2562 bufSizeInInput += pa_usec_to_bytes(pa_bytes_to_usec(
2563 pa_memblockq_get_length(i->thread_info.render_memblockq), &i->sink->sample_spec), &sampleSpecIn);
2564 uint64_t rewindSize = afterRender ? bufSizeInRender : bufSizeInInput;
2565 uint64_t maxRewind = afterRender ? i->thread_info.render_memblockq->maxrewind : ps->memblockq->maxrewind;
2566 if (rewindSize > maxRewind) {
2567 AUDIO_WARNING_LOG("OffloadRewindAndFlush, rewindSize(%" PRIu64 ") > maxrewind(%u), afterRender(%d)",
2568 rewindSize, (uint32_t)(afterRender ? i->thread_info.render_memblockq->maxrewind :
2569 ps->memblockq->maxrewind), afterRender);
2570 rewindSize = maxRewind;
2571 }
2572
2573 if (afterRender) {
2574 pa_memblockq_rewind(i->thread_info.render_memblockq, rewindSize);
2575 } else {
2576 pa_memblockq_rewind(ps->memblockq, rewindSize);
2577 pa_memblockq_flush_read(i->thread_info.render_memblockq);
2578 }
2579 }
2580 OffloadReset(u);
2581 }
2582
GetSinkInputName(pa_sink_input * i,char * str,int len)2583 static void GetSinkInputName(pa_sink_input *i, char *str, int len)
2584 {
2585 const char *streamUid = safeProplistGets(i->proplist, "stream.client.uid", "NULL");
2586 const char *streamPid = safeProplistGets(i->proplist, "stream.client.pid", "NULL");
2587 const char *streamType = safeProplistGets(i->proplist, "stream.type", "NULL");
2588 const char *sessionID = safeProplistGets(i->proplist, "stream.sessionID", "NULL");
2589 int ret = sprintf_s(str, len, "%s_%s_%s_%s_of%d", streamType, streamUid, streamPid, sessionID, InputIsOffload(i));
2590 if (ret < 0) {
2591 AUDIO_ERR_LOG("sprintf_s fail! ret %d", ret);
2592 }
2593 }
2594
getSinkInputSessionID(pa_sink_input * i)2595 static int32_t getSinkInputSessionID(pa_sink_input *i)
2596 {
2597 const char *res = pa_proplist_gets(i->proplist, "stream.sessionID");
2598 if (res == NULL) {
2599 return -1;
2600 } else {
2601 return atoi(res);
2602 }
2603 }
2604
OffloadLock(struct Userdata * u)2605 static void OffloadLock(struct Userdata *u)
2606 {
2607 if (!u->offload.runninglocked) {
2608 u->offload.sinkAdapter->RendererSinkOffloadRunningLockLock(u->offload.sinkAdapter);
2609 u->offload.runninglocked = true;
2610 } else {
2611 }
2612 }
2613
OffloadUnlock(struct Userdata * u)2614 static void OffloadUnlock(struct Userdata *u)
2615 {
2616 if (u->offload.runninglocked) {
2617 u->offload.sinkAdapter->RendererSinkOffloadRunningLockUnlock(u->offload.sinkAdapter);
2618 u->offload.runninglocked = false;
2619 } else {
2620 }
2621 }
2622
offloadSetMaxRewind(struct Userdata * u,pa_sink_input * i)2623 static void offloadSetMaxRewind(struct Userdata *u, pa_sink_input *i)
2624 {
2625 pa_sink_input_assert_ref(i);
2626 pa_sink_input_assert_io_context(i);
2627 pa_assert(PA_SINK_INPUT_IS_LINKED(i->thread_info.state));
2628
2629 size_t blockSize = pa_memblock_get_length(u->offload.chunk.memblock);
2630 pa_memblockq_set_maxrewind(i->thread_info.render_memblockq, blockSize);
2631
2632 size_t nbytes = pa_usec_to_bytes(MAX_REWIND, &i->sink->sample_spec);
2633
2634 if (i->update_max_rewind) {
2635 i->update_max_rewind(i, i->thread_info.resampler ?
2636 pa_resampler_request(i->thread_info.resampler, nbytes) : nbytes);
2637 }
2638 }
2639
CheckInputChangeToOffload(struct Userdata * u,pa_sink_input * i)2640 static void CheckInputChangeToOffload(struct Userdata *u, pa_sink_input *i)
2641 {
2642 // if maxrewind is set to buffer_size while inner-cap, reset it to 0 for offload.
2643 if (pa_memblockq_get_maxrewind(i->thread_info.render_memblockq) == u->buffer_size) {
2644 AUDIO_INFO_LOG("Reset maxwind to 0 to enable offload for sink-input:%{public}u", i->index);
2645 pa_sink_input_update_max_rewind(i, 0);
2646 }
2647 if (InputIsOffload(i) && pa_memblockq_get_maxrewind(i->thread_info.render_memblockq) == 0) {
2648 offloadSetMaxRewind(u, i);
2649 pa_sink_input *it;
2650 void *state = NULL;
2651 while ((it = pa_hashmap_iterate(u->sink->thread_info.inputs, &state, NULL))) {
2652 if (it != i && pa_memblockq_get_maxrewind(it->thread_info.render_memblockq) != 0) {
2653 pa_sink_input_update_max_rewind(it, 0);
2654 drop_backlog(it->thread_info.render_memblockq);
2655 playback_stream *ps = it->userdata;
2656 drop_backlog(ps->memblockq);
2657 }
2658 }
2659 }
2660 }
2661
StartOffloadHdi(struct Userdata * u,pa_sink_input * i)2662 static void StartOffloadHdi(struct Userdata *u, pa_sink_input *i)
2663 {
2664 CheckInputChangeToOffload(u, i);
2665 int32_t sessionID = getSinkInputSessionID(i);
2666 if (u->offload.isHDISinkStarted) {
2667 AUDIO_INFO_LOG("StartOffloadHdi, sessionID : %{public}d -> %{public}d", u->offload.sessionID, sessionID);
2668 if (sessionID != u->offload.sessionID) {
2669 if (u->offload.sessionID != -1) {
2670 u->offload.sinkAdapter->RendererSinkReset(u->offload.sinkAdapter);
2671 OffloadReset(u);
2672 }
2673 u->offload.sessionID = sessionID;
2674 }
2675 OffloadLock(u);
2676 } else {
2677 AUDIO_INFO_LOG("StartOffloadHdi, Restart offload with rate:%{public}d, channels:%{public}d",
2678 u->ss.rate, u->ss.channels);
2679 if (u->offload.sinkAdapter->RendererSinkStart(u->offload.sinkAdapter)) {
2680 AUDIO_WARNING_LOG("StartOffloadHdi, audiorenderer control start failed!");
2681 } else {
2682 RegOffloadCallback(u);
2683 u->offload.isHDISinkStarted = true;
2684 AUDIO_INFO_LOG("StartOffloadHdi, Successfully restarted offload HDI renderer");
2685 OffloadLock(u);
2686 u->offload.sessionID = sessionID;
2687 OffloadSetHdiVolume(i);
2688 }
2689 }
2690 }
2691
PaInputStateChangeCbOffload(struct Userdata * u,pa_sink_input * i,pa_sink_input_state_t state)2692 static void POSSIBLY_UNUSED PaInputStateChangeCbOffload(struct Userdata *u, pa_sink_input *i,
2693 pa_sink_input_state_t state)
2694 {
2695 const bool corking = i->thread_info.state == PA_SINK_INPUT_RUNNING && state == PA_SINK_INPUT_CORKED;
2696 const bool starting = i->thread_info.state == PA_SINK_INPUT_CORKED && state == PA_SINK_INPUT_RUNNING;
2697 const bool stopping = state == PA_SINK_INPUT_UNLINKED;
2698
2699 if (!u->offload.inited && PrepareDeviceOffload(u) == 0) {
2700 u->offload.inited = true;
2701 }
2702 if (starting) {
2703 StartOffloadHdi(u, i);
2704 } else if (corking) {
2705 pa_atomic_store(&u->offload.hdistate, 2); // 2 indicates corking
2706 OffloadRewindAndFlush(u, i, false);
2707 } else if (stopping) {
2708 u->offload.sinkAdapter->RendererSinkFlush(u->offload.sinkAdapter);
2709 OffloadReset(u);
2710 g_speakerPaAllStreamStartVolZeroTime = 0;
2711 }
2712 ResetVolumeBySinkInputState(i, state);
2713 }
2714
ResetVolumeBySinkInputState(pa_sink_input * i,pa_sink_input_state_t state)2715 static void ResetVolumeBySinkInputState(pa_sink_input *i, pa_sink_input_state_t state)
2716 {
2717 pa_assert(i);
2718 const bool corking = i->thread_info.state == PA_SINK_INPUT_RUNNING && state == PA_SINK_INPUT_CORKED;
2719 if (corking) {
2720 const char *sessionIDStr = safeProplistGets(i->proplist, "stream.sessionID", "NULL");
2721 uint32_t sessionID = sessionIDStr != NULL ? atoi(sessionIDStr) : 0;
2722 SetPreVolume(sessionID, 0.0f);
2723 }
2724 }
2725
2726 // call from IO thread(OS_ProcessData)
PaInputStateChangeCbPrimary(struct Userdata * u,pa_sink_input * i,pa_sink_input_state_t state)2727 static void PaInputStateChangeCbPrimary(struct Userdata *u, pa_sink_input *i, pa_sink_input_state_t state)
2728 {
2729 const bool starting = i->thread_info.state == PA_SINK_INPUT_CORKED && state == PA_SINK_INPUT_RUNNING;
2730 const bool corking = i->thread_info.state == PA_SINK_INPUT_RUNNING && state == PA_SINK_INPUT_CORKED;
2731 uint32_t streamIndex = i->index;
2732 if (corking) {
2733 SetFadeoutState(streamIndex, NO_FADE);
2734 }
2735
2736 if (starting) {
2737 u->primary.timestamp = pa_rtclock_now();
2738 if (pa_atomic_load(&u->primary.isHDISinkStarted) == 1) {
2739 pa_atomic_store(&u->primary.fadingFlagForPrimary, 1);
2740 AUDIO_INFO_LOG("store fadingFlagForPrimary for 1");
2741 SetFadeoutState(streamIndex, NO_FADE);
2742 u->primary.primaryFadingInDone = 0;
2743 u->primary.primarySinkInIndex = (int32_t)(i->index);
2744 AUDIO_INFO_LOG("PaInputStateChangeCb, HDI renderer already started");
2745 return;
2746 }
2747 AUDIO_INFO_LOG("PaInputStateChangeCb, Restart with rate:%{public}d,channels:%{public}d, format:%{public}d",
2748 u->ss.rate, u->ss.channels, (int)pa_sample_size_of_format(u->format));
2749 if (pa_asyncmsgq_send(u->primary.dq, NULL, HDI_START, NULL, 0, NULL)) {
2750 AUDIO_ERR_LOG("audiorenderer control start failed!");
2751 u->primary.sinkAdapter->RendererSinkDeInit(u->primary.sinkAdapter);
2752 } else {
2753 pa_atomic_store(&u->primary.isHDISinkStarted, 1);
2754 u->writeCount = 0;
2755 u->renderCount = 0;
2756 pa_atomic_store(&u->primary.fadingFlagForPrimary, 1);
2757 AUDIO_INFO_LOG("store fadingFlagForPrimary for 1");
2758 SetFadeoutState(streamIndex, NO_FADE);
2759 u->primary.primaryFadingInDone = 0;
2760 u->primary.primarySinkInIndex = (int32_t)(i->index);
2761 AUDIO_INFO_LOG("PaInputStateChangeCb, Successfully restarted HDI renderer");
2762 }
2763 }
2764 const char *strExpectedPlaybackDurationBytes = safeProplistGets(i->proplist, "expectedPlaybackDurationBytes", "0");
2765 uint64_t expectedPlaybackDurationBytes = 0;
2766 pa_atou64(strExpectedPlaybackDurationBytes, &expectedPlaybackDurationBytes);
2767 enum FadeStrategy fadeStrategy
2768 = GetFadeStrategy(pa_bytes_to_usec(expectedPlaybackDurationBytes, &(i->sample_spec)) / PA_USEC_PER_MSEC);
2769 if (fadeStrategy == FADE_STRATEGY_DEFAULT) {
2770 ResetVolumeBySinkInputState(i, state);
2771 }
2772 }
2773
2774 // call from IO thread(OS_ProcessData)
StartPrimaryHdiIfRunning(struct Userdata * u)2775 static void StartPrimaryHdiIfRunning(struct Userdata *u)
2776 {
2777 AUTO_CTRACE("hdi_sink::StartPrimaryHdiIfRunning");
2778 if (pa_atomic_load(&u->primary.isHDISinkStarted) == 1) {
2779 return;
2780 }
2781
2782 unsigned nPrimary = 0;
2783 unsigned nOffload = 0;
2784 unsigned nMultiChannel = 0;
2785 GetInputsType(u->sink, &nPrimary, &nOffload, &nMultiChannel, true);
2786 if (nPrimary == 0) {
2787 return;
2788 }
2789
2790 if (pa_asyncmsgq_send(u->primary.dq, NULL, HDI_START, NULL, 0, NULL)) {
2791 AUDIO_ERR_LOG("audiorenderer control start failed!");
2792 u->primary.sinkAdapter->RendererSinkDeInit(u->primary.sinkAdapter);
2793 } else {
2794 pa_atomic_store(&u->primary.isHDISinkStarted, 1);
2795 u->writeCount = 0;
2796 u->renderCount = 0;
2797 AUDIO_INFO_LOG("StartPrimaryHdiIfRunning, Successfully restarted HDI renderer");
2798 }
2799 }
2800
ResetMultiChannelHdiState(struct Userdata * u)2801 static void ResetMultiChannelHdiState(struct Userdata *u)
2802 {
2803 if (u->multiChannel.sinkAdapter == NULL) {
2804 return;
2805 }
2806 if (u->multiChannel.isHDISinkInited) {
2807 if (u->multiChannel.sample_attrs.channel != (uint32_t)u->multiChannel.sinkChannel) {
2808 u->multiChannel.sinkAdapter->RendererSinkStop(u->multiChannel.sinkAdapter);
2809 u->multiChannel.isHDISinkStarted = false;
2810 u->multiChannel.sinkAdapter->RendererSinkDeInit(u->multiChannel.sinkAdapter);
2811 u->multiChannel.isHDISinkInited = false;
2812 u->multiChannel.sample_attrs.adapterName = "primary";
2813 u->multiChannel.sample_attrs.channel = (uint32_t)u->multiChannel.sinkChannel;
2814 u->multiChannel.sample_attrs.channelLayout = u->multiChannel.sinkChannelLayout;
2815 u->multiChannel.sinkAdapter->RendererSinkInit(u->multiChannel.sinkAdapter, &u->multiChannel.sample_attrs);
2816 u->multiChannel.isHDISinkInited = true;
2817 } else {
2818 if (u->multiChannel.isHDISinkStarted) {
2819 pa_atomic_store(&u->multiChannel.fadingFlagForMultiChannel, 1);
2820 u->multiChannel.multiChannelFadingInDone = 0;
2821 u->multiChannel.multiChannelSinkInIndex = u->multiChannel.multiChannelTmpSinkInIndex;
2822 return;
2823 }
2824 }
2825 } else {
2826 u->multiChannel.sample_attrs.adapterName = "primary";
2827 u->multiChannel.sample_attrs.channel = (uint32_t)u->multiChannel.sinkChannel;
2828 u->multiChannel.sample_attrs.channelLayout = u->multiChannel.sinkChannelLayout;
2829 u->multiChannel.sinkAdapter->RendererSinkInit(u->multiChannel.sinkAdapter, &u->multiChannel.sample_attrs);
2830 u->multiChannel.isHDISinkInited = true;
2831 }
2832 if (u->multiChannel.sinkAdapter->RendererSinkStart(u->multiChannel.sinkAdapter)) {
2833 u->multiChannel.isHDISinkStarted = false;
2834 u->multiChannel.sinkAdapter->RendererSinkDeInit(u->multiChannel.sinkAdapter);
2835 u->multiChannel.isHDISinkInited = false;
2836 AUDIO_INFO_LOG("ResetMultiChannelHdiState deinit success");
2837 } else {
2838 u->multiChannel.isHDISinkStarted = true;
2839 AUDIO_INFO_LOG("ResetMultiChannelHdiState start success");
2840 u->writeCount = 0;
2841 u->renderCount = 0;
2842 pa_atomic_store(&u->multiChannel.fadingFlagForMultiChannel, 1);
2843 u->multiChannel.multiChannelFadingInDone = 0;
2844 u->multiChannel.multiChannelSinkInIndex = u->multiChannel.multiChannelTmpSinkInIndex;
2845 }
2846 }
2847
StartMultiChannelHdiIfRunning(struct Userdata * u)2848 static void StartMultiChannelHdiIfRunning(struct Userdata *u)
2849 {
2850 ResetMultiChannelHdiState(u);
2851 }
2852
PaInputStateChangeCbMultiChannel(struct Userdata * u,pa_sink_input * i,pa_sink_input_state_t state)2853 static void PaInputStateChangeCbMultiChannel(struct Userdata *u, pa_sink_input *i, pa_sink_input_state_t state)
2854 {
2855 const bool corking = i->thread_info.state == PA_SINK_INPUT_RUNNING && state == PA_SINK_INPUT_CORKED;
2856 const bool starting = i->thread_info.state == PA_SINK_INPUT_CORKED && state == PA_SINK_INPUT_RUNNING;
2857 const bool stopping = state == PA_SINK_INPUT_UNLINKED;
2858 if (corking) {
2859 SetFadeoutState(i->index, NO_FADE);
2860 }
2861 if (starting) {
2862 u->multiChannel.timestamp = pa_rtclock_now();
2863 u->multiChannel.multiChannelTmpSinkInIndex = (int32_t)(i->index);
2864 } else if (stopping) {
2865 // Continuously dropping data clear counter on entering suspended state.
2866 if (u->bytes_dropped != 0) {
2867 AUDIO_INFO_LOG("PaInputStateChangeCbMultiChannel, HDI-sink continuously dropping data - clear statistics "
2868 "(%zu -> 0 bytes dropped)", u->bytes_dropped);
2869 u->bytes_dropped = 0;
2870 }
2871 g_speakerPaAllStreamStartVolZeroTime = 0;
2872 }
2873 ResetVolumeBySinkInputState(i, state);
2874 }
2875
ResetFadeoutPause(pa_sink_input * i,pa_sink_input_state_t state)2876 static void ResetFadeoutPause(pa_sink_input *i, pa_sink_input_state_t state)
2877 {
2878 bool corking = i->thread_info.state == PA_SINK_INPUT_RUNNING && state == PA_SINK_INPUT_CORKED;
2879 bool starting = i->thread_info.state == PA_SINK_INPUT_CORKED && state == PA_SINK_INPUT_RUNNING;
2880 if (corking || starting) {
2881 AUDIO_INFO_LOG("set fadeoutPause to 0");
2882 SetFadeoutState(i->index, NO_FADE);
2883 }
2884 }
2885
RendererSinkSetPriPaPower(pa_sink_input * i,pa_sink_input_state_t state,struct Userdata * u)2886 static void RendererSinkSetPriPaPower(pa_sink_input *i, pa_sink_input_state_t state, struct Userdata *u)
2887 {
2888 if (state == PA_SINK_INPUT_RUNNING) {
2889 if (u->primary.sinkAdapter == NULL) {
2890 return;
2891 }
2892 const char *streamType = safeProplistGets(i->proplist, "stream.type", "NULL");
2893 const char *sessionIDStr = safeProplistGets(i->proplist, "stream.sessionID", "NULL");
2894 const char *deviceClass = GetDeviceClass(u->primary.sinkAdapter->deviceClass);
2895 uint32_t sessionID = sessionIDStr != NULL ? (uint32_t)atoi(sessionIDStr) : 0;
2896 float volume = GetCurVolume(sessionID, streamType, deviceClass);
2897 bool isZeroVolume = IsSameVolume(volume, 0.0f);
2898 AUDIO_INFO_LOG(
2899 "session %{public}u, stream %{public}s, zerovol %{public}d", sessionID, streamType, isZeroVolume);
2900 if (!isZeroVolume) {
2901 u->primary.sinkAdapter->RendererSinkSetPriPaPower(u->primary.sinkAdapter);
2902 }
2903 }
2904 }
2905
PaInputStateChangeCb(pa_sink_input * i,pa_sink_input_state_t state)2906 static void PaInputStateChangeCb(pa_sink_input *i, pa_sink_input_state_t state)
2907 {
2908 struct Userdata *u = NULL;
2909
2910 pa_assert(i);
2911 pa_sink_input_assert_ref(i);
2912 pa_assert(i->sink);
2913
2914 const bool corking = i->thread_info.state == PA_SINK_INPUT_RUNNING && state == PA_SINK_INPUT_CORKED;
2915 const bool starting = i->thread_info.state == PA_SINK_INPUT_CORKED && state == PA_SINK_INPUT_RUNNING;
2916 const bool stopping = state == PA_SINK_INPUT_UNLINKED;
2917
2918 corking ? pa_atomic_store(&i->isFirstReaded, 0) : (void)0;
2919 starting ? pa_atomic_store(&i->isFirstReaded, 1) : (void)0;
2920
2921 if (!strcmp(i->sink->name, SINK_NAME_INNER_CAPTURER) ||
2922 !strcmp(i->sink->name, SINK_NAME_REMOTE_CAST_INNER_CAPTURER) ||
2923 !strcmp(i->sink->driver, "module_split_stream_sink.c")) {
2924 ResetFadeoutPause(i, state);
2925 AUDIO_INFO_LOG("PaInputStateChangeCb inner_cap return");
2926 return;
2927 }
2928 pa_assert_se(u = i->sink->userdata);
2929
2930 RendererSinkSetPriPaPower(i, state, u);
2931
2932 char str[SPRINTF_STR_LEN] = {0};
2933 GetSinkInputName(i, str, SPRINTF_STR_LEN);
2934 AUDIO_INFO_LOG(
2935 "PaInputStateChangeCb, Sink[%{public}s]->SinkInput[%{public}s] state change:[%{public}s]-->[%{public}s]",
2936 GetDeviceClass(u->primary.sinkAdapter->deviceClass), str, GetInputStateInfo(i->thread_info.state),
2937 GetInputStateInfo(state));
2938
2939 if (i->thread_info.state == state) {
2940 return;
2941 }
2942
2943 if (!corking && !starting && !stopping) {
2944 AUDIO_WARNING_LOG("PaInputStateChangeCb, input state change: invalid");
2945 return;
2946 }
2947
2948 if (u->offload_enable && !strcmp(i->sink->name, OFFLOAD_SINK_NAME)) {
2949 ResetFadeoutPause(i, state);
2950 PaInputStateChangeCbOffload(u, i, state);
2951 } else if (u->multichannel_enable && !strcmp(i->sink->name, MCH_SINK_NAME)) {
2952 PaInputStateChangeCbMultiChannel(u, i, state);
2953 } else {
2954 PaInputStateChangeCbPrimary(u, i, state);
2955 }
2956 }
2957
ThreadFuncRendererTimerOffloadProcess(struct Userdata * u,pa_usec_t now,int64_t * sleepForUsec)2958 static void ThreadFuncRendererTimerOffloadProcess(struct Userdata *u, pa_usec_t now, int64_t *sleepForUsec)
2959 {
2960 static uint32_t timeWait = 1; // 1ms init
2961 const uint64_t pos = u->offload.pos;
2962 const uint64_t hdiPos = u->offload.hdiPos + (pa_rtclock_now() - u->offload.hdiPosTs);
2963 const uint64_t pw = u->offload.prewrite;
2964 int64_t blockTime = (int64_t)pa_bytes_to_usec(u->sink->thread_info.max_request, &u->sink->sample_spec);
2965
2966 int32_t nInput = -1;
2967 const int hdistate = (int)pa_atomic_load(&u->offload.hdistate);
2968 if (pos <= hdiPos + pw && hdistate == 0) {
2969 bool wait;
2970 int32_t writen = -1;
2971 int ret = ProcessRenderUseTimingOffload(u, &wait, &nInput, &writen);
2972 if (ret < 0) {
2973 blockTime = 20 * PA_USEC_PER_MSEC; // 20ms for render write error
2974 } else if (wait) {
2975 blockTime = (int64_t)(timeWait * PA_USEC_PER_MSEC); // timeWait ms for first write no data
2976 if (timeWait < 20) { // 20ms max wait no data
2977 timeWait++;
2978 }
2979 } else {
2980 timeWait = 1; // 1ms have data reset timeWait
2981 blockTime = 1 * PA_USEC_PER_MSEC; // 1ms for min wait
2982 }
2983 } else if (hdistate == 1) {
2984 blockTime = (int64_t)(pos - hdiPos - HDI_MIN_MS_MAINTAIN * PA_USEC_PER_MSEC);
2985 if (blockTime < 0) {
2986 blockTime = OFFLOAD_FRAME_SIZE * PA_USEC_PER_MSEC; // block for one frame
2987 }
2988 }
2989 if (pos < hdiPos) {
2990 if (pos != 0) {
2991 AUDIO_DEBUG_LOG("ThreadFuncRendererTimerOffload hdiPos wrong need sync, pos %" PRIu64 ", hdiPos %" PRIu64,
2992 pos, hdiPos);
2993 }
2994 if (u->offload.hdiPosTs + 300 * PA_USEC_PER_MSEC < now) { // 300ms for update pos
2995 UpdatePresentationPosition(u);
2996 }
2997 }
2998 if (blockTime != -1) {
2999 *sleepForUsec = PA_MAX(blockTime, 0) - (int64_t)(pa_rtclock_now() - now);
3000 *sleepForUsec = PA_MAX(*sleepForUsec, 0);
3001 }
3002 }
3003
ThreadFuncRendererTimerOffloadFlag(struct Userdata * u,pa_usec_t now,bool * flagOut,int64_t * sleepForUsec)3004 static void ThreadFuncRendererTimerOffloadFlag(struct Userdata *u, pa_usec_t now, bool *flagOut, int64_t *sleepForUsec)
3005 {
3006 bool flag = PA_SINK_IS_RUNNING(u->sink->thread_info.state);
3007 if (!flag && !PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
3008 OffloadUnlock(u);
3009 }
3010 *flagOut = flag;
3011 }
3012
SinkRenderMultiChannelProcess(pa_sink * si,size_t length,pa_memchunk * chunkIn)3013 static void SinkRenderMultiChannelProcess(pa_sink *si, size_t length, pa_memchunk *chunkIn)
3014 {
3015 struct Userdata *u;
3016 pa_assert_se(u = si->userdata);
3017
3018 EffectChainManagerReturnMultiChannelInfo(&u->multiChannel.sinkChannel, &u->multiChannel.sinkChannelLayout);
3019
3020 chunkIn->memblock = pa_memblock_new(si->core->mempool, length * IN_CHANNEL_NUM_MAX / DEFAULT_IN_CHANNEL_NUM);
3021 size_t tmpLength = length * u->multiChannel.sinkChannel / DEFAULT_IN_CHANNEL_NUM;
3022 chunkIn->index = 0;
3023 chunkIn->length = tmpLength;
3024 SinkRenderMultiChannelGetData(si, chunkIn);
3025 chunkIn->index = 0;
3026 chunkIn->length = tmpLength;
3027 }
3028
SinkRenderMultiChannel(pa_sink * si,size_t length,pa_memchunk * chunkIn)3029 static void SinkRenderMultiChannel(pa_sink *si, size_t length, pa_memchunk *chunkIn)
3030 {
3031 pa_sink_ref(si);
3032
3033 size_t blockSizeMax;
3034
3035 pa_sink_assert_ref(si);
3036 pa_sink_assert_io_context(si);
3037 pa_assert(PA_SINK_IS_LINKED(si->thread_info.state));
3038 pa_assert(length > 0);
3039 pa_assert(pa_frame_aligned(length, &si->sample_spec));
3040 pa_assert(chunkIn);
3041
3042 pa_assert(!si->thread_info.rewind_requested);
3043 pa_assert(si->thread_info.rewind_nbytes == 0);
3044
3045 if (si->thread_info.state == PA_SINK_SUSPENDED) {
3046 chunkIn->memblock = pa_memblock_ref(si->silence.memblock);
3047 chunkIn->index = si->silence.index;
3048 chunkIn->length = PA_MIN(si->silence.length, length);
3049 return;
3050 }
3051
3052 if (length == 0)
3053 length = pa_frame_align(MIX_BUFFER_LENGTH, &si->sample_spec);
3054
3055 blockSizeMax = pa_mempool_block_size_max(si->core->mempool);
3056 if (length > blockSizeMax)
3057 length = pa_frame_align(blockSizeMax, &si->sample_spec);
3058
3059 pa_assert(length > 0);
3060
3061 SinkRenderMultiChannelProcess(si, length, chunkIn);
3062
3063 pa_sink_unref(si);
3064 }
3065
ProcessRenderUseTimingMultiChannel(struct Userdata * u,pa_usec_t now)3066 static void ProcessRenderUseTimingMultiChannel(struct Userdata *u, pa_usec_t now)
3067 {
3068 pa_assert(u);
3069
3070 // Fill the buffer up the latency size
3071 pa_memchunk chunk;
3072
3073 // Change from pa_sink_render to pa_sink_render_full for alignment issue in 3516
3074 SinkRenderMultiChannel(u->sink, u->sink->thread_info.max_request, &chunk);
3075 pa_assert(chunk.length > 0);
3076
3077 StartMultiChannelHdiIfRunning(u);
3078
3079 if (!chunk.memblock) {
3080 if (pa_atomic_load(&u->multiChannel.dflag) == 1) {
3081 pa_atomic_sub(&u->multiChannel.dflag, 1);
3082 }
3083 return;
3084 }
3085 pa_asyncmsgq_post(u->multiChannel.dq, NULL, HDI_RENDER, NULL, 0, &chunk, NULL);
3086 u->multiChannel.timestamp += pa_bytes_to_usec(u->sink->thread_info.max_request, &u->sink->sample_spec);
3087 }
3088
ThreadFuncRendererTimerMultiChannelFlagJudge(struct Userdata * u)3089 static bool POSSIBLY_UNUSED ThreadFuncRendererTimerMultiChannelFlagJudge(struct Userdata *u)
3090 {
3091 pa_assert(u);
3092 bool flag = (u->render_in_idle_state && PA_SINK_IS_OPENED(u->sink->thread_info.state)) ||
3093 (!u->render_in_idle_state && PA_SINK_IS_RUNNING(u->sink->thread_info.state)) ||
3094 (u->sink->thread_info.state == PA_SINK_IDLE && u->sink->monitor_source &&
3095 PA_SOURCE_IS_RUNNING(u->sink->monitor_source->thread_info.state));
3096 pa_sink_input *i;
3097 void *state = NULL;
3098 int nMultiChannel = 0;
3099 while ((i = pa_hashmap_iterate(u->sink->thread_info.inputs, &state, NULL))) {
3100 pa_sink_input_assert_ref(i);
3101 if (InputIsMultiChannel(i)) {
3102 nMultiChannel++;
3103 }
3104 }
3105 flag &= nMultiChannel > 0;
3106 return flag;
3107 }
3108
ProcessNormalData(struct Userdata * u)3109 static void ProcessNormalData(struct Userdata *u)
3110 {
3111 AUTO_CTRACE("ProcessNormalData");
3112 int64_t sleepForUsec = -1;
3113 pa_usec_t now = 0;
3114
3115 if (u->sink->thread_info.state == PA_SINK_SUSPENDED && u->isEffectBufferAllocated == true) {
3116 FreeEffectBuffer(u);
3117 u->isEffectBufferAllocated = false;
3118 }
3119
3120 bool flag = (((u->render_in_idle_state && PA_SINK_IS_OPENED(u->sink->thread_info.state)) ||
3121 (!u->render_in_idle_state && PA_SINK_IS_RUNNING(u->sink->thread_info.state))) &&
3122 !(u->sink->thread_info.state == PA_SINK_IDLE && u->primary.previousState == PA_SINK_SUSPENDED) &&
3123 !(u->sink->thread_info.state == PA_SINK_IDLE && u->primary.previousState == PA_SINK_INIT)) ||
3124 (u->sink->thread_info.state == PA_SINK_IDLE && monitorLinked(u->sink, true));
3125 if (flag) {
3126 now = pa_rtclock_now();
3127 }
3128
3129 if (PA_UNLIKELY(u->sink->thread_info.rewind_requested)) {
3130 pa_sink_process_rewind(u->sink, 0);
3131 }
3132
3133 if (flag) {
3134 pa_usec_t frameUsec = pa_bytes_to_usec(u->sink->thread_info.max_request, &u->sink->sample_spec);
3135 pa_usec_t blockTime = u->primary.timestamp + frameUsec - now;
3136 if (blockTime > frameUsec) { blockTime = frameUsec; }
3137 if (pa_atomic_load(&u->primary.dflag) == 1) {
3138 sleepForUsec = (int64_t)blockTime -
3139 ((int64_t)pa_rtclock_now() - (int64_t)(u->primary.lastProcessDataTime));
3140 if (sleepForUsec < MIN_SLEEP_FOR_USEC) {
3141 sleepForUsec = MIN_SLEEP_FOR_USEC;
3142 }
3143 } else {
3144 if (u->primary.timestamp <= now + u->primary.prewrite || !strcmp(u->sink->name, DP_SINK_NAME)) {
3145 pa_atomic_add(&u->primary.dflag, 1);
3146 u->primary.lastProcessDataTime = pa_rtclock_now();
3147 ProcessRenderUseTiming(u, now);
3148 }
3149 sleepForUsec = (int64_t)blockTime - ((int64_t)pa_rtclock_now() - (int64_t)now);
3150 if (u->primary.timestamp <= now + u->primary.prewrite) {
3151 sleepForUsec = PA_MIN(sleepForUsec, (int64_t)u->primary.writeTime);
3152 }
3153 }
3154 sleepForUsec = PA_MAX(sleepForUsec, 0);
3155 }
3156
3157 if (sleepForUsec != -1) {
3158 if (u->timestampSleep == -1) {
3159 u->timestampSleep = (int64_t)pa_rtclock_now() + sleepForUsec;
3160 } else {
3161 u->timestampSleep = PA_MIN(u->timestampSleep, (int64_t)pa_rtclock_now() + sleepForUsec);
3162 }
3163 }
3164 }
3165
ProcessMCHData(struct Userdata * u)3166 static void ProcessMCHData(struct Userdata *u)
3167 {
3168 AUTO_CTRACE("ProcessMCHData");
3169 const uint64_t pw = u->multiChannel.prewrite;
3170
3171 pa_usec_t now = 0;
3172
3173 int64_t sleepForUsec = -1;
3174
3175 now = pa_rtclock_now();
3176
3177 if (PA_UNLIKELY(u->sink->thread_info.rewind_requested)) {
3178 pa_sink_process_rewind(u->sink, 0);
3179 }
3180
3181 if (!ThreadFuncRendererTimerMultiChannelFlagJudge(u)) {
3182 return;
3183 }
3184
3185 if (u->multiChannel.timestamp <= now + pw && pa_atomic_load(&u->multiChannel.dflag) == 0) {
3186 pa_atomic_add(&u->multiChannel.dflag, 1);
3187 ProcessRenderUseTimingMultiChannel(u, now);
3188 }
3189 pa_usec_t blockTime = pa_bytes_to_usec(u->sink->thread_info.max_request, &u->sink->sample_spec);
3190 sleepForUsec = PA_MIN((int64_t)blockTime - ((int64_t)pa_rtclock_now() - (int64_t)now),
3191 (int64_t)(u->multiChannel.writeTime));
3192 sleepForUsec = PA_MAX(sleepForUsec, 0);
3193 if (sleepForUsec != -1) {
3194 if (u->timestampSleep == -1) {
3195 u->timestampSleep = (int64_t)pa_rtclock_now() + sleepForUsec;
3196 } else {
3197 u->timestampSleep = PA_MIN(u->timestampSleep, (int64_t)pa_rtclock_now() + sleepForUsec);
3198 }
3199 }
3200 }
3201
ProcessOffloadData(struct Userdata * u)3202 static void ProcessOffloadData(struct Userdata *u)
3203 {
3204 AUTO_CTRACE("ProcessOffloadData");
3205 pa_usec_t now = pa_rtclock_now();
3206 int64_t sleepForUsec = -1;
3207 bool flag;
3208 ThreadFuncRendererTimerOffloadFlag(u, now, &flag, &sleepForUsec);
3209
3210 if (flag) {
3211 ThreadFuncRendererTimerOffloadProcess(u, now, &sleepForUsec);
3212 sleepForUsec = PA_MAX(sleepForUsec, 0);
3213 }
3214
3215 if (u->offload.fullTs != 0) {
3216 if (u->offload.fullTs + 10 * PA_USEC_PER_MSEC > now) { // 10 is min checking size
3217 const int64_t s = ((int64_t)(u->offload.fullTs) + 10 * PA_USEC_PER_MSEC) - (int64_t)now;
3218 sleepForUsec = sleepForUsec == -1 ? s : PA_MIN(s, sleepForUsec);
3219 } else if (pa_atomic_load(&u->offload.hdistate) == 1) {
3220 u->offload.fullTs = 0;
3221 OffloadUnlock(u);
3222 } else {
3223 u->offload.fullTs = 0;
3224 }
3225 }
3226
3227 if (sleepForUsec != -1) {
3228 if (u->timestampSleep == -1) {
3229 u->timestampSleep = (int64_t)pa_rtclock_now() + sleepForUsec;
3230 } else {
3231 u->timestampSleep = PA_MIN(u->timestampSleep, (int64_t)pa_rtclock_now() + sleepForUsec);
3232 }
3233 }
3234 }
3235
ThreadFuncRendererTimerProcessData(struct Userdata * u)3236 static void ThreadFuncRendererTimerProcessData(struct Userdata *u)
3237 {
3238 if (u->timestampSleep < (int64_t)pa_rtclock_now()) {
3239 u->timestampSleep = -1;
3240 }
3241
3242 pthread_rwlock_unlock(&u->rwlockSleep);
3243
3244 static int64_t logCnt = 0;
3245 if (logCnt == 0) {
3246 AUDIO_INFO_LOG("Bus thread still running");
3247 }
3248 ++logCnt;
3249 if (logCnt > LOG_LOOP_THRESHOLD) {
3250 logCnt = 0;
3251 }
3252
3253 g_onlyPrimarySpeakerPaLoading = true;
3254 g_speakerPaAllStreamVolumeZero = true;
3255 CheckOnlyPrimarySpeakerPaLoading(u);
3256 if (!strcmp(u->sink->name, MCH_SINK_NAME)) {
3257 ProcessMCHData(u);
3258 } else if (!strcmp(u->sink->name, OFFLOAD_SINK_NAME) && u->offload_enable && u->offload.msgq) {
3259 ProcessOffloadData(u);
3260 } else {
3261 ProcessNormalData(u);
3262 }
3263 }
3264
ThreadFuncRendererTimerBus(void * userdata)3265 static void ThreadFuncRendererTimerBus(void *userdata)
3266 {
3267 // set audio thread priority
3268 ScheduleThreadInServer(getpid(), gettid());
3269
3270 struct Userdata *u = userdata;
3271
3272 pa_assert(u);
3273
3274 const char *deviceClass = GetDeviceClass(u->primary.sinkAdapter->deviceClass);
3275 AUDIO_INFO_LOG("Thread %s(use timing bus) starting up, pid %d, tid %d", deviceClass, getpid(), gettid());
3276 pa_thread_mq_install(&u->thread_mq);
3277
3278 if (!strcmp(u->sink->name, OFFLOAD_SINK_NAME)) {
3279 OffloadReset(u);
3280 CHECK_AND_RETURN_LOG(u->offload.sinkAdapter != NULL, "offload.sinkAdapter is NULL");
3281 u->offload.sinkAdapter->RendererSinkOffloadRunningLockInit(u->offload.sinkAdapter);
3282 }
3283 while (true) {
3284 int ret;
3285 pthread_rwlock_wrlock(&u->rwlockSleep);
3286
3287 int64_t sleepForUsec = 0;
3288
3289 if (u->timestampSleep == -1) {
3290 pa_rtpoll_set_timer_disabled(u->rtpoll); // sleep forever
3291 } else if ((sleepForUsec = u->timestampSleep - (int64_t)(pa_rtclock_now())) <= 0) {
3292 pa_rtpoll_set_timer_relative(u->rtpoll, 0);
3293 } else {
3294 pa_rtpoll_set_timer_relative(u->rtpoll, sleepForUsec);
3295 }
3296
3297 AUTO_CTRACE("ProcessDataLoop %s sleep:%lld us", deviceClass, sleepForUsec);
3298 // Hmm, nothing to do. Let's sleep
3299 if ((ret = pa_rtpoll_run(u->rtpoll)) < 0) {
3300 AUDIO_ERR_LOG("Thread %{public}s(use timing bus) shutting down, error %{public}d, "
3301 "pid %{public}d, tid %{public}d", deviceClass, ret, getpid(), gettid());
3302 if (!strcmp(deviceClass, DEVICE_CLASS_PRIMARY)) {
3303 AUDIO_ERR_LOG("Primary sink's pa_rtpoll_run error, exit");
3304 _Exit(0);
3305 }
3306
3307 // If this was no regular exit from the loop we have to continue
3308 // processing messages until we received PA_MESSAGE_SHUTDOWN
3309 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE,
3310 u->module, 0, NULL, NULL);
3311 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
3312 pthread_rwlock_unlock(&u->rwlockSleep);
3313 break;
3314 }
3315
3316 if (ret == 0) {
3317 AUDIO_INFO_LOG("Thread %{public}s(use timing bus) shutting down, pid %{public}d, tid %{public}d",
3318 deviceClass, getpid(), gettid());
3319 pthread_rwlock_unlock(&u->rwlockSleep);
3320 break;
3321 }
3322
3323 ThreadFuncRendererTimerProcessData(u);
3324 }
3325 UnscheduleThreadInServer(getpid(), gettid());
3326 }
3327
ThreadFuncWriteHDIMultiChannel(void * userdata)3328 static void ThreadFuncWriteHDIMultiChannel(void *userdata)
3329 {
3330 AUDIO_DEBUG_LOG("ThreadFuncWriteHDIMultiChannel start");
3331 // set audio thread priority
3332 ScheduleThreadInServer(getpid(), gettid());
3333
3334 struct Userdata *u = userdata;
3335 pa_assert(u);
3336
3337 int32_t quit = 0;
3338
3339 do {
3340 int32_t code = 0;
3341 pa_memchunk chunk;
3342
3343 pa_assert_se(pa_asyncmsgq_get(u->multiChannel.dq, NULL, &code, NULL, NULL, &chunk, 1) == 0);
3344
3345 switch (code) {
3346 case HDI_RENDER: {
3347 pa_usec_t now = pa_rtclock_now();
3348 if (RenderWrite(u->multiChannel.sinkAdapter, &chunk) < 0) {
3349 AUDIO_DEBUG_LOG("ThreadFuncWriteHDIMultiChannel RenderWrite");
3350 u->bytes_dropped += chunk.length;
3351 }
3352 if (pa_atomic_load(&u->multiChannel.dflag) == 1) {
3353 pa_atomic_sub(&u->multiChannel.dflag, 1);
3354 }
3355 u->multiChannel.writeTime = pa_rtclock_now() - now;
3356 break;
3357 }
3358 case QUIT:
3359 quit = 1;
3360 break;
3361 default:
3362 break;
3363 }
3364 pa_asyncmsgq_done(u->multiChannel.dq, 0);
3365 } while (!quit);
3366 UnscheduleThreadInServer(getpid(), gettid());
3367 }
3368
ProcessHdiRendererPrimary(struct Userdata * u,pa_memchunk * pChunk)3369 static void ProcessHdiRendererPrimary(struct Userdata *u, pa_memchunk *pChunk)
3370 {
3371 pa_usec_t now = pa_rtclock_now();
3372 if (pa_atomic_load(&u->primary.isHDISinkStarted) != 1 && now - u->timestampLastLog > USEC_PER_SEC) {
3373 u->timestampLastLog = now;
3374 const char *deviceClass = GetDeviceClass(u->primary.sinkAdapter->deviceClass);
3375 AUDIO_DEBUG_LOG("HDI not started, skip RenderWrite, wait sink[%s] suspend", deviceClass);
3376 pa_memblock_unref(pChunk->memblock);
3377 } else if (pa_atomic_load(&u->primary.isHDISinkStarted) != 1) {
3378 pa_memblock_unref(pChunk->memblock);
3379 } else if (RenderWrite(u->primary.sinkAdapter, pChunk) < 0) {
3380 u->bytes_dropped += pChunk->length;
3381 AUDIO_ERR_LOG("RenderWrite failed");
3382 }
3383 if (pa_atomic_load(&u->primary.dflag) == 1) {
3384 pa_atomic_sub(&u->primary.dflag, 1);
3385 }
3386 u->primary.writeTime = pa_rtclock_now() - now;
3387 }
3388
ThreadFuncWriteHDI(void * userdata)3389 static void ThreadFuncWriteHDI(void *userdata)
3390 {
3391 // set audio thread priority
3392 ScheduleThreadInServer(getpid(), gettid());
3393
3394 struct Userdata *u = userdata;
3395 pa_assert(u);
3396
3397 int32_t quit = 0;
3398
3399 do {
3400 int32_t code = 0;
3401 pa_memchunk chunk;
3402
3403 CHECK_AND_RETURN_LOG(u->primary.dq != NULL, "u->primary.dq is NULL");
3404 pa_assert_se(pa_asyncmsgq_get(u->primary.dq, NULL, &code, NULL, NULL, &chunk, 1) == 0);
3405
3406 int ret = 0;
3407 AUTO_CTRACE("hdi_sink::ThreadFuncWriteHDI code: %d", code);
3408 switch (code) {
3409 case HDI_RENDER: {
3410 ProcessHdiRendererPrimary(u, &chunk);
3411 break;
3412 }
3413 case HDI_STOP: {
3414 if (pa_atomic_load(&u->primary.isHDISinkStarted) == 1) {
3415 u->primary.sinkAdapter->RendererSinkStop(u->primary.sinkAdapter);
3416 AUDIO_INFO_LOG("Stopped HDI renderer");
3417 pa_atomic_store(&u->primary.isHDISinkStarted, 0);
3418 }
3419 break;
3420 }
3421 case HDI_START: {
3422 ret = u->primary.sinkAdapter->RendererSinkStart(u->primary.sinkAdapter);
3423 break;
3424 }
3425 case QUIT:
3426 quit = 1;
3427 break;
3428 default:
3429 break;
3430 }
3431 AUTO_CTRACE("hdi_sink::ThreadFuncWriteHDI done ret: %d", ret);
3432 pa_asyncmsgq_done(u->primary.dq, ret);
3433 } while (!quit);
3434 UnscheduleThreadInServer(getpid(), gettid());
3435 }
3436
TestModeThreadFuncWriteHDI(void * userdata)3437 static void TestModeThreadFuncWriteHDI(void *userdata)
3438 {
3439 // set audio thread priority
3440 ScheduleThreadInServer(getpid(), gettid());
3441
3442 struct Userdata *u = userdata;
3443 pa_assert(u);
3444
3445 int32_t quit = 0;
3446
3447 do {
3448 int32_t code = 0;
3449 pa_memchunk chunk;
3450
3451 pa_assert_se(pa_asyncmsgq_get(u->primary.dq, NULL, &code, NULL, NULL, &chunk, 1) == 0);
3452
3453 switch (code) {
3454 case HDI_RENDER:
3455 if (TestModeRenderWrite(u, &chunk) < 0) {
3456 u->bytes_dropped += chunk.length;
3457 AUDIO_ERR_LOG("TestModeRenderWrite failed");
3458 }
3459 if (pa_atomic_load(&u->primary.dflag) == 1) {
3460 pa_atomic_sub(&u->primary.dflag, 1);
3461 }
3462 break;
3463 case QUIT:
3464 quit = 1;
3465 break;
3466 default:
3467 break;
3468 }
3469 pa_asyncmsgq_done(u->primary.dq, 0);
3470 } while (!quit);
3471 UnscheduleThreadInServer(getpid(), gettid());
3472 }
3473
SinkUpdateRequestedLatencyCb(pa_sink * s)3474 static void SinkUpdateRequestedLatencyCb(pa_sink *s)
3475 {
3476 struct Userdata *u = NULL;
3477 size_t nbytes;
3478
3479 pa_sink_assert_ref(s);
3480 pa_assert_se(u = s->userdata);
3481
3482 u->block_usec = pa_sink_get_requested_latency_within_thread(s);
3483
3484 if (u->block_usec == (pa_usec_t) - 1)
3485 u->block_usec = s->thread_info.max_latency;
3486
3487 if (u->block_usec < DEFAULT_BLOCK_USEC) {
3488 AUDIO_WARNING_LOG("block_usec is less than 20000, block_usec: %{public}" PRIu64, u->block_usec);
3489 u->block_usec = DEFAULT_BLOCK_USEC;
3490 }
3491 nbytes = pa_usec_to_bytes(u->block_usec, &s->sample_spec);
3492 pa_sink_set_max_request_within_thread(s, nbytes);
3493 }
3494
CheckAndPrintLatency(uint64_t lastRecodedLatency,uint64_t latency,bool getLatencyFromHdiSucess,uint32_t continuesGetLatencyErrCount,uint64_t logThreshold)3495 static void CheckAndPrintLatency(uint64_t lastRecodedLatency, uint64_t latency, bool getLatencyFromHdiSucess,
3496 uint32_t continuesGetLatencyErrCount, uint64_t logThreshold)
3497 {
3498 uint64_t latencyDifference = (latency > lastRecodedLatency)
3499 ? (latency - lastRecodedLatency) : (lastRecodedLatency - latency);
3500 if (latencyDifference > logThreshold) {
3501 AUDIO_INFO_LOG("lastLatency: %{public}" PRIu64 " latency: %{public}" PRIu64 ""
3502 " getLatencyFromHdiSucess: %{public}d continuesGetLatencyErrCount: %{public}u",
3503 lastRecodedLatency, latency, getLatencyFromHdiSucess, continuesGetLatencyErrCount);
3504 }
3505 }
3506
SinkProcessMsg(pa_msgobject * o,int32_t code,void * data,int64_t offset,pa_memchunk * chunk)3507 static int32_t SinkProcessMsg(pa_msgobject *o, int32_t code, void *data, int64_t offset,
3508 pa_memchunk *chunk)
3509 {
3510 AUDIO_DEBUG_LOG("SinkProcessMsg: code: %{public}d", code);
3511 AUTO_CTRACE("hdi_sink::SinkProcessMsg code: %d", code);
3512 struct Userdata *u = PA_SINK(o)->userdata;
3513 pa_assert(u);
3514
3515 switch (code) {
3516 case PA_SINK_MESSAGE_GET_LATENCY: {
3517 if (!strcmp(GetDeviceClass(u->primary.sinkAdapter->deviceClass), DEVICE_CLASS_OFFLOAD)) {
3518 uint64_t pos = u->offload.pos;
3519 pa_usec_t now = pa_rtclock_now();
3520 uint64_t time = now > u->offload.hdiPosTs ? (now - u->offload.hdiPosTs) / PA_USEC_PER_MSEC : 0;
3521 uint64_t hdiPos = u->offload.hdiPos + time * PA_USEC_PER_MSEC;
3522 *((uint64_t *)data) = pos > hdiPos ? (pos - hdiPos) : 0;
3523 AUDIO_DEBUG_LOG("offload latency: %{public}" PRIu64 " write pos: %{public}" PRIu64
3524 " hdi pos: %{public}" PRIu64 " time: %{public}" PRIu64,
3525 *((uint64_t *)data), pos, u->offload.hdiPos, time * PA_USEC_PER_MSEC);
3526 } else if (u->sink_latency) {
3527 *((uint64_t *)data) = u->sink_latency * PA_USEC_PER_MSEC;
3528 } else {
3529 uint64_t latency;
3530 uint32_t hdiLatency;
3531 bool getLatencyFromHdiSucess = true;
3532 // Tries to fetch latency from HDI else will make an estimate based
3533 // on samples to be rendered based on the timestamp and current time
3534 if (u->primary.sinkAdapter->RendererSinkGetLatency(u->primary.sinkAdapter, &hdiLatency) == 0) {
3535 latency = (PA_USEC_PER_MSEC * hdiLatency);
3536 } else {
3537 pa_usec_t now = pa_rtclock_now();
3538 latency = (now - u->primary.timestamp);
3539 getLatencyFromHdiSucess = false;
3540 }
3541
3542 *((uint64_t *)data) = latency;
3543 CheckAndPrintLatency(u->lastRecodedLatency, latency, getLatencyFromHdiSucess,
3544 u->continuesGetLatencyErrCount, DEFAULT_GETLATENCY_LOG_THRESHOLD_MS);
3545 u->lastRecodedLatency = latency;
3546 u->continuesGetLatencyErrCount = getLatencyFromHdiSucess ? 0 : (u->continuesGetLatencyErrCount + 1);
3547 }
3548 return 0;
3549 }
3550 default:
3551 break;
3552 }
3553 return pa_sink_process_msg(o, code, data, offset, chunk);
3554 }
3555
GetStateInfo(pa_sink_state_t state)3556 static char *GetStateInfo(pa_sink_state_t state)
3557 {
3558 switch (state) {
3559 case PA_SINK_INVALID_STATE:
3560 return "INVALID";
3561 case PA_SINK_RUNNING:
3562 return "RUNNING";
3563 case PA_SINK_IDLE:
3564 return "IDLE";
3565 case PA_SINK_SUSPENDED:
3566 return "SUSPENDED";
3567 case PA_SINK_INIT:
3568 return "INIT";
3569 case PA_SINK_UNLINKED:
3570 return "UNLINKED";
3571 default:
3572 return "error state";
3573 }
3574 }
3575
GetInputStateInfo(pa_sink_input_state_t state)3576 static char *GetInputStateInfo(pa_sink_input_state_t state)
3577 {
3578 switch (state) {
3579 case PA_SINK_INPUT_INIT:
3580 return "INIT";
3581 case PA_SINK_INPUT_RUNNING:
3582 return "RUNNING";
3583 case PA_SINK_INPUT_CORKED:
3584 return "CORKED";
3585 case PA_SINK_INPUT_UNLINKED:
3586 return "UNLINKED";
3587 default:
3588 return "UNKNOWN";
3589 }
3590 }
3591
3592 // call from IO thread(OS_ProcessData)
RemoteSinkStateChange(pa_sink * s,pa_sink_state_t newState)3593 static int32_t RemoteSinkStateChange(pa_sink *s, pa_sink_state_t newState)
3594 {
3595 struct Userdata *u = s->userdata;
3596 if (s->thread_info.state == PA_SINK_INIT || newState == PA_SINK_INIT) {
3597 u->isFirstStarted = false;
3598 }
3599
3600 if (!u->isFirstStarted && (newState == PA_SINK_RUNNING)) {
3601 u->primary.timestamp = pa_rtclock_now();
3602 u->isFirstStarted = true;
3603 }
3604
3605 if (s->thread_info.state == PA_SINK_INIT && newState == PA_SINK_IDLE) {
3606 AUDIO_INFO_LOG("First start.");
3607 }
3608
3609 if (s->thread_info.state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(newState)) {
3610 u->primary.timestamp = pa_rtclock_now();
3611 if (pa_atomic_load(&u->primary.isHDISinkStarted) == 1) {
3612 return 0;
3613 }
3614
3615 if (pa_asyncmsgq_send(u->primary.dq, NULL, HDI_START, NULL, 0, NULL)) {
3616 AUDIO_ERR_LOG("audiorenderer control start failed!");
3617 } else {
3618 pa_atomic_store(&u->primary.isHDISinkStarted, 1);
3619 u->render_in_idle_state = 1; // enable to reduce noise from idle to running.
3620 u->writeCount = 0;
3621 u->renderCount = 0;
3622 AUDIO_INFO_LOG("Successfully restarted remote renderer");
3623 }
3624 }
3625 if (PA_SINK_IS_OPENED(s->thread_info.state) && newState == PA_SINK_SUSPENDED) {
3626 // Continuously dropping data (clear counter on entering suspended state.
3627 if (u->bytes_dropped != 0) {
3628 AUDIO_INFO_LOG("HDI-sink continuously dropping data - clear statistics (%zu -> 0 bytes dropped)",
3629 u->bytes_dropped);
3630 u->bytes_dropped = 0;
3631 }
3632
3633 if (pa_atomic_load(&u->primary.isHDISinkStarted) == 1) {
3634 pa_asyncmsgq_post(u->primary.dq, NULL, HDI_STOP, NULL, 0, NULL, NULL);
3635 }
3636 }
3637
3638 return 0;
3639 }
3640
3641 // call from IO thread(OS_ProcessData)
SinkSetStateInIoThreadCbStartPrimary(struct Userdata * u,pa_sink_state_t newState)3642 static int32_t SinkSetStateInIoThreadCbStartPrimary(struct Userdata *u, pa_sink_state_t newState)
3643 {
3644 if (!PA_SINK_IS_OPENED(newState)) {
3645 return 0;
3646 }
3647
3648 u->primary.timestamp = pa_rtclock_now();
3649 if (pa_atomic_load(&u->primary.isHDISinkStarted) == 1) {
3650 return 0;
3651 }
3652
3653 if (u->sink->thread_info.state == PA_SINK_SUSPENDED && newState == PA_SINK_IDLE) {
3654 AUDIO_INFO_LOG("Primary sink from suspend to idle");
3655 return 0;
3656 }
3657
3658 if (pa_asyncmsgq_send(u->primary.dq, NULL, HDI_START, NULL, 0, NULL)) {
3659 AUDIO_ERR_LOG("audiorenderer control start failed!");
3660 u->primary.sinkAdapter->RendererSinkDeInit(u->primary.sinkAdapter);
3661 } else {
3662 pa_atomic_store(&u->primary.isHDISinkStarted, 1);
3663 u->writeCount = 0;
3664 u->renderCount = 0;
3665 AUDIO_INFO_LOG("SinkSetStateInIoThreadCbStartPrimary, Successfully restarted HDI renderer");
3666 }
3667 return 0;
3668 }
3669
SinkSetStateInIoThreadCbStartMultiChannel(struct Userdata * u,pa_sink_state_t newState)3670 static int32_t SinkSetStateInIoThreadCbStartMultiChannel(struct Userdata *u, pa_sink_state_t newState)
3671 {
3672 if (!PA_SINK_IS_OPENED(newState)) {
3673 return 0;
3674 }
3675
3676 u->multiChannel.timestamp = pa_rtclock_now();
3677
3678 EffectChainManagerReturnMultiChannelInfo(&u->multiChannel.sinkChannel, &u->multiChannel.sinkChannelLayout);
3679 ResetMultiChannelHdiState(u);
3680 return 0;
3681 }
3682
OffloadSinkStateChangeCb(pa_sink * sink,pa_sink_state_t newState)3683 static void OffloadSinkStateChangeCb(pa_sink *sink, pa_sink_state_t newState)
3684 {
3685 struct Userdata *u = (struct Userdata *)(sink->userdata);
3686 const bool starting = PA_SINK_IS_OPENED(newState);
3687 const bool stopping = newState == PA_SINK_SUSPENDED;
3688 AUDIO_INFO_LOG("starting: %{public}d, stopping: %{public}d, offload_enable: %{public}d",
3689 starting, stopping, u->offload_enable);
3690 if (starting && u->offload_enable && !u->offload.inited && PrepareDeviceOffload(u) == 0) {
3691 u->offload.inited = true;
3692 } else if (stopping && u->offload_enable) {
3693 if (u->offload.isHDISinkStarted) {
3694 u->offload.sinkAdapter->RendererSinkStop(u->offload.sinkAdapter);
3695 AUDIO_INFO_LOG("Stopped Offload HDI renderer, DeInit later");
3696 u->offload.isHDISinkStarted = false;
3697 }
3698 OffloadReset(u);
3699 OffloadUnlock(u);
3700 if (u->offload.inited) {
3701 u->offload.inited = false;
3702 u->offload.sinkAdapter->RendererSinkDeInit(u->offload.sinkAdapter);
3703 AUDIO_INFO_LOG("DeInited Offload HDI renderer");
3704 }
3705 g_speakerPaAllStreamStartVolZeroTime = 0;
3706 }
3707 }
3708
3709 // Called from the IO thread.
SinkSetStateInIoThreadCb(pa_sink * s,pa_sink_state_t newState,pa_suspend_cause_t newSuspendCause)3710 static int32_t SinkSetStateInIoThreadCb(pa_sink *s, pa_sink_state_t newState, pa_suspend_cause_t newSuspendCause)
3711 {
3712 struct Userdata *u = NULL;
3713
3714 pa_assert(s);
3715 pa_assert_se(u = s->userdata);
3716
3717 AUDIO_INFO_LOG("Sink[%{public}s] state change:[%{public}s]-->[%{public}s]",
3718 GetDeviceClass(u->primary.sinkAdapter->deviceClass), GetStateInfo(s->thread_info.state),
3719 GetStateInfo(newState));
3720 u->primary.previousState = u->sink->thread_info.state;
3721
3722 if (!strcmp(GetDeviceClass(u->primary.sinkAdapter->deviceClass), DEVICE_CLASS_REMOTE)) {
3723 return RemoteSinkStateChange(s, newState);
3724 }
3725
3726 if (!strcmp(u->sink->name, OFFLOAD_SINK_NAME)) {
3727 OffloadSinkStateChangeCb(s, newState);
3728 return 0;
3729 }
3730
3731 if (s->thread_info.state == PA_SINK_SUSPENDED || s->thread_info.state == PA_SINK_INIT ||
3732 newState == PA_SINK_RUNNING) {
3733 if (EffectChainManagerCheckEffectOffload() && (!strcmp(u->sink->name, "Speaker"))) {
3734 SinkSetStateInIoThreadCbStartMultiChannel(u, newState);
3735 }
3736 if (strcmp(u->sink->name, BT_SINK_NAME) || newState == PA_SINK_RUNNING) {
3737 return SinkSetStateInIoThreadCbStartPrimary(u, newState);
3738 }
3739 } else if (PA_SINK_IS_OPENED(s->thread_info.state)) {
3740 if (newState != PA_SINK_SUSPENDED) {
3741 return 0;
3742 }
3743 // Continuously dropping data (clear counter on entering suspended state.
3744 if (u->bytes_dropped != 0) {
3745 AUDIO_INFO_LOG("HDI-sink continuously dropping data - clear statistics (%zu -> 0 bytes dropped)",
3746 u->bytes_dropped);
3747 u->bytes_dropped = 0;
3748 }
3749
3750 if (pa_atomic_load(&u->primary.isHDISinkStarted) == 1) {
3751 pa_asyncmsgq_post(u->primary.dq, NULL, HDI_STOP, NULL, 0, NULL, NULL);
3752 }
3753
3754 if (u->multiChannel.isHDISinkStarted) {
3755 u->multiChannel.sinkAdapter->RendererSinkStop(u->multiChannel.sinkAdapter);
3756 AUDIO_INFO_LOG("MultiChannel Stopped HDI renderer");
3757 u->multiChannel.isHDISinkStarted = false;
3758 }
3759 }
3760
3761 return 0;
3762 }
3763
SinkInputMoveStartCb(pa_core * core,pa_sink_input * i,struct Userdata * u)3764 static pa_hook_result_t SinkInputMoveStartCb(pa_core *core, pa_sink_input *i, struct Userdata *u)
3765 {
3766 pa_sink_input_assert_ref(i);
3767 char str[SPRINTF_STR_LEN] = {0};
3768 GetSinkInputName(i, str, SPRINTF_STR_LEN);
3769 AUDIO_INFO_LOG("SinkInputMoveStartCb sink[%{public}s] - %{public}s", i->sink->name, str);
3770 if (u->offload_enable && !strcmp(i->sink->name, OFFLOAD_SINK_NAME) &&
3771 i->thread_info.state == PA_SINK_INPUT_RUNNING) {
3772 const bool maybeOffload = pa_memblockq_get_maxrewind(i->thread_info.render_memblockq) != 0;
3773 if (maybeOffload || InputIsOffload(i)) {
3774 OffloadRewindAndFlush(u, i, false);
3775 pa_sink_input_update_max_rewind(i, 0);
3776 }
3777 }
3778 return PA_HOOK_OK;
3779 }
3780
SinkInputStateChangedCb(pa_core * core,pa_sink_input * i,struct Userdata * u)3781 static pa_hook_result_t SinkInputStateChangedCb(pa_core *core, pa_sink_input *i, struct Userdata *u)
3782 {
3783 pa_sink_input_assert_ref(i);
3784 char str[SPRINTF_STR_LEN] = {0};
3785 GetSinkInputName(i, str, SPRINTF_STR_LEN);
3786 AUDIO_INFO_LOG("SinkInputStateChangedCb sink[%{public}s] - %{public}s", i->sink->name, str);
3787 if (u->offload_enable && !strcmp(i->sink->name, OFFLOAD_SINK_NAME)) {
3788 if (i->thread_info.state == PA_SINK_INPUT_CORKED) {
3789 pa_atomic_store(&u->offload.hdistate, 0);
3790 }
3791 }
3792 return PA_HOOK_OK;
3793 }
3794
SinkInputPutCb(pa_core * core,pa_sink_input * i,struct Userdata * u)3795 static pa_hook_result_t SinkInputPutCb(pa_core *core, pa_sink_input *i, struct Userdata *u)
3796 {
3797 pa_sink_input_assert_ref(i);
3798 const char *streamMode = pa_proplist_gets(i->proplist, "stream.mode");
3799 if (streamMode != NULL && !strcmp(streamMode, DUP_STEAM_NAME)) {
3800 AUDIO_INFO_LOG("Dup stream is dismissed:%{public}u", i->index);
3801 return PA_HOOK_OK;
3802 }
3803 i->state_change = PaInputStateChangeCb;
3804 return PA_HOOK_OK;
3805 }
3806
PrepareDevice(struct Userdata * u,const char * filePath)3807 static int32_t PrepareDevice(struct Userdata *u, const char *filePath)
3808 {
3809 SinkAttr sample_attrs;
3810 int32_t ret;
3811
3812 sample_attrs.format = ConvertPaToHdiAdapterFormat(u->ss.format);
3813 sample_attrs.adapterName = u->adapterName;
3814 sample_attrs.openMicSpeaker = u->open_mic_speaker;
3815 sample_attrs.sampleRate = (uint32_t)u->ss.rate;
3816 sample_attrs.channel = u->ss.channels;
3817 sample_attrs.volume = MAX_SINK_VOLUME_LEVEL;
3818 sample_attrs.filePath = filePath;
3819 sample_attrs.deviceNetworkId = u->deviceNetworkId;
3820 sample_attrs.deviceType = u->deviceType;
3821
3822 if (!strcmp(GetDeviceClass(u->primary.sinkAdapter->deviceClass), DEVICE_CLASS_MULTICHANNEL)) {
3823 sample_attrs.channel = DEFAULT_MULTICHANNEL_NUM;
3824 sample_attrs.channelLayout = DEFAULT_MULTICHANNEL_CHANNELLAYOUT;
3825 }
3826
3827 ret = u->primary.sinkAdapter->RendererSinkInit(u->primary.sinkAdapter, &sample_attrs);
3828 if (ret != 0) {
3829 AUDIO_ERR_LOG("audiorenderer Init failed!");
3830 return -1;
3831 }
3832
3833 if (ret != 0) {
3834 AUDIO_ERR_LOG("audiorenderer control start failed!");
3835 u->primary.sinkAdapter->RendererSinkDeInit(u->primary.sinkAdapter);
3836 return -1;
3837 }
3838
3839 return 0;
3840 }
3841
PrepareDeviceOffload(struct Userdata * u)3842 static int32_t PrepareDeviceOffload(struct Userdata *u)
3843 {
3844 const char *adapterName = safeProplistGets(u->sink->proplist, PA_PROP_DEVICE_STRING, "");
3845 const char *filePath = safeProplistGets(u->sink->proplist, "filePath", "");
3846 const char *deviceNetworkId = safeProplistGets(u->sink->proplist, "NetworkId", "");
3847 AUDIO_INFO_LOG("PrepareDeviceOffload enter, deviceClass %d, filePath %s",
3848 u->offload.sinkAdapter->deviceClass, filePath);
3849 SinkAttr sample_attrs;
3850 int32_t ret;
3851
3852 enum HdiAdapterFormat format = ConvertPaToHdiAdapterFormat(u->ss.format);
3853 sample_attrs.format = format;
3854 AUDIO_INFO_LOG("PrepareDeviceOffload audiorenderer format: %d ,adapterName %s",
3855 sample_attrs.format, GetDeviceClass(u->offload.sinkAdapter->deviceClass));
3856 sample_attrs.adapterName = adapterName;
3857 sample_attrs.openMicSpeaker = u->open_mic_speaker;
3858 sample_attrs.sampleRate = u->ss.rate;
3859 sample_attrs.channel = u->ss.channels;
3860 sample_attrs.volume = MAX_SINK_VOLUME_LEVEL;
3861 sample_attrs.filePath = filePath;
3862 sample_attrs.deviceNetworkId = deviceNetworkId;
3863 sample_attrs.deviceType = u->deviceType;
3864
3865 ret = u->offload.sinkAdapter->RendererSinkInit(u->offload.sinkAdapter, &sample_attrs);
3866 if (ret != 0) {
3867 AUDIO_ERR_LOG("PrepareDeviceOffload audiorenderer Init failed!");
3868 return -1;
3869 }
3870
3871 return 0;
3872 }
3873
PrepareDeviceMultiChannel(struct Userdata * u,struct RendererSinkAdapter * sinkAdapter,const char * filePath)3874 static int32_t PrepareDeviceMultiChannel(struct Userdata *u, struct RendererSinkAdapter *sinkAdapter,
3875 const char *filePath)
3876 {
3877 int32_t ret;
3878
3879 enum HdiAdapterFormat format = ConvertPaToHdiAdapterFormat(u->ss.format);
3880
3881 u->multiChannel.sample_attrs.format = format;
3882 u->multiChannel.sample_attrs.sampleRate = u->ss.rate;
3883 AUDIO_INFO_LOG("PrepareDeviceMultiChannel format: %d ,adapterName %s",
3884 u->multiChannel.sample_attrs.format, GetDeviceClass(sinkAdapter->deviceClass));
3885 u->multiChannel.sample_attrs.adapterName = u->adapterName;
3886 u->multiChannel.sample_attrs.openMicSpeaker = u->open_mic_speaker;
3887 u->multiChannel.sample_attrs.sampleRate = u->ss.rate;
3888 u->multiChannel.sample_attrs.channel = DEFAULT_MULTICHANNEL_NUM;
3889 u->multiChannel.sample_attrs.channelLayout = DEFAULT_MULTICHANNEL_CHANNELLAYOUT;
3890 u->multiChannel.sinkChannel = DEFAULT_MULTICHANNEL_NUM;
3891 u->multiChannel.sinkChannelLayout = DEFAULT_MULTICHANNEL_CHANNELLAYOUT;
3892 u->multiChannel.sample_attrs.volume = MAX_SINK_VOLUME_LEVEL;
3893 u->multiChannel.sample_attrs.filePath = filePath;
3894 u->multiChannel.sample_attrs.deviceNetworkId = u->deviceNetworkId;
3895 u->multiChannel.sample_attrs.deviceType = u->deviceType;
3896
3897 ret = sinkAdapter->RendererSinkInit(sinkAdapter, &u->multiChannel.sample_attrs);
3898 if (ret != 0) {
3899 AUDIO_ERR_LOG("PrepareDeviceMultiChannel Init failed!");
3900 return -1;
3901 }
3902 u->multiChannel.isHDISinkInited = true;
3903 AUDIO_DEBUG_LOG("PrepareDeviceMultiChannel init success");
3904 return 0;
3905 }
3906
PaHdiSinkUserdataInit(struct Userdata * u)3907 static void PaHdiSinkUserdataInit(struct Userdata *u)
3908 {
3909 u->format = u->ss.format;
3910 u->processLen = IN_CHANNEL_NUM_MAX * DEFAULT_FRAMELEN;
3911 u->processSize = (uint32_t)u->processLen * sizeof(float);
3912 u->bufferAttr = pa_xnew0(BufferAttr, 1);
3913 u->bufferAttr->samplingRate = (int32_t)u->ss.rate;
3914 u->bufferAttr->frameLen = DEFAULT_FRAMELEN;
3915 u->bufferAttr->numChanIn = u->ss.channels;
3916 u->bufferAttr->numChanOut = u->ss.channels;
3917 u->bufferAttr->bufOutUsed = true;
3918 u->sinkSceneMode = -1;
3919 u->sinkSceneType = -1;
3920 }
3921
PaHdiSinkInit(struct Userdata * u,pa_modargs * ma,const char * driver)3922 static pa_sink *PaHdiSinkInit(struct Userdata *u, pa_modargs *ma, const char *driver)
3923 {
3924 pa_sink_new_data data;
3925 pa_module *m;
3926 pa_sink *sink = NULL;
3927
3928 m = u->module;
3929 u->ss = m->core->default_sample_spec;
3930 u->map = m->core->default_channel_map;
3931 if (pa_modargs_get_sample_spec_and_channel_map(ma, &u->ss, &u->map, PA_CHANNEL_MAP_DEFAULT) < 0) {
3932 AUDIO_ERR_LOG("Failed to parse sample specification and channel map");
3933 goto fail;
3934 }
3935
3936 AUDIO_INFO_LOG("Initializing HDI rendering device with rate: %{public}d, channels: %{public}d",
3937 u->ss.rate, u->ss.channels);
3938 if (PrepareDevice(u, pa_modargs_get_value(ma, "file_path", "")) < 0) { goto fail; }
3939
3940 u->primary.prewrite = 0;
3941 if (u->offload_enable && !strcmp(GetDeviceClass(u->primary.sinkAdapter->deviceClass), DEVICE_CLASS_PRIMARY)) {
3942 u->primary.prewrite = u->block_usec * 7; // 7 frame, set cache len in hdi, avoid pop
3943 }
3944
3945 AUDIO_DEBUG_LOG("Initialization of HDI rendering device[%{public}s] completed", u->adapterName);
3946 pa_sink_new_data_init(&data);
3947 data.driver = driver;
3948 data.module = m;
3949
3950 PaHdiSinkUserdataInit(u);
3951 pa_sink_new_data_set_name(&data, pa_modargs_get_value(ma, "sink_name", DEFAULT_SINK_NAME));
3952 pa_sink_new_data_set_sample_spec(&data, &u->ss);
3953 pa_sink_new_data_set_channel_map(&data, &u->map);
3954 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING,
3955 (u->adapterName ? u->adapterName : DEFAULT_AUDIO_DEVICE_NAME));
3956 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_DESCRIPTION, "HDI sink is %s",
3957 (u->adapterName ? u->adapterName : DEFAULT_AUDIO_DEVICE_NAME));
3958 pa_proplist_sets(data.proplist, "filePath", pa_modargs_get_value(ma, "file_path", ""));
3959 pa_proplist_sets(data.proplist, "networkId", pa_modargs_get_value(ma, "network_id", DEFAULT_DEVICE_NETWORKID));
3960
3961 if (pa_modargs_get_proplist(ma, "sink_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
3962 AUDIO_ERR_LOG("Invalid properties");
3963 pa_sink_new_data_done(&data);
3964 goto fail;
3965 }
3966
3967 if (u->fixed_latency) {
3968 sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE | PA_SINK_LATENCY);
3969 } else {
3970 sink = pa_sink_new(m->core, &data,
3971 PA_SINK_HARDWARE | PA_SINK_LATENCY | PA_SINK_DYNAMIC_LATENCY);
3972 }
3973 pa_sink_new_data_done(&data);
3974
3975 return sink;
3976
3977 fail:
3978 AUDIO_ERR_LOG("PaHdiSinkInit fail");
3979 return NULL;
3980 }
3981
PaHdiSinkNewInitThreadMultiChannel(pa_module * m,pa_modargs * ma,struct Userdata * u)3982 static int32_t PaHdiSinkNewInitThreadMultiChannel(pa_module *m, pa_modargs *ma, struct Userdata *u)
3983 {
3984 int ret;
3985 pa_atomic_store(&u->multiChannel.dflag, 0);
3986 u->multiChannel.msgq = pa_asyncmsgq_new(0);
3987 u->multiChannel.dq = pa_asyncmsgq_new(0);
3988 ret = LoadSinkAdapter(DEVICE_CLASS_MULTICHANNEL, "LocalDevice", &u->multiChannel.sinkAdapter);
3989 if (ret) {
3990 AUDIO_ERR_LOG("Load mch adapter failed");
3991 return -1;
3992 }
3993 if (PrepareDeviceMultiChannel(u, u->multiChannel.sinkAdapter, pa_modargs_get_value(ma, "file_path", "")) < 0) {
3994 return -1;
3995 }
3996
3997 u->multiChannel.used = true;
3998
3999 u->multiChannel.chunk.memblock = pa_memblock_new(u->sink->core->mempool, -1); // -1 == pa_mempool_block_size_max
4000
4001 return 0;
4002 }
4003
PaHdiSinkNewInitThread(pa_module * m,pa_modargs * ma,struct Userdata * u)4004 static int32_t PaHdiSinkNewInitThread(pa_module *m, pa_modargs *ma, struct Userdata *u)
4005 {
4006 char *paThreadName = NULL;
4007
4008 // offload
4009 const char *deviceClass = GetDeviceClass(u->primary.sinkAdapter->deviceClass);
4010 if (!strcmp(u->sink->name, OFFLOAD_SINK_NAME) && u->offload_enable) {
4011 AUDIO_DEBUG_LOG("PaHdiSinkNew device[%s] sink[%s] init offload thread", deviceClass, u->sink->name);
4012 int32_t ret = LoadSinkAdapter(DEVICE_CLASS_OFFLOAD, "LocalDevice", &u->offload.sinkAdapter);
4013 if (ret) {
4014 AUDIO_ERR_LOG("Load adapter failed");
4015 return -1;
4016 }
4017 u->offload.msgq = pa_asyncmsgq_new(0);
4018 pa_atomic_store(&u->offload.hdistate, 0);
4019 u->offload.chunk.memblock = pa_memblock_new(u->sink->core->mempool,
4020 pa_usec_to_bytes(200 * PA_USEC_PER_MSEC, &u->sink->sample_spec)); // 200ms for max len once offload render
4021 pa_module_hook_connect(m, &m->core->hooks[PA_CORE_HOOK_SINK_INPUT_MOVE_START], PA_HOOK_LATE,
4022 (pa_hook_cb_t)SinkInputMoveStartCb, u);
4023 pa_module_hook_connect(m, &m->core->hooks[PA_CORE_HOOK_SINK_INPUT_STATE_CHANGED], PA_HOOK_NORMAL,
4024 (pa_hook_cb_t)SinkInputStateChangedCb, u);
4025 } else {
4026 AUDIO_INFO_LOG("PaHdiSinkNew device[%s] sink[%s] skip offload thread", deviceClass, u->sink->name);
4027 }
4028
4029 if (!strcmp(u->sink->name, MCH_SINK_NAME)) {
4030 PaHdiSinkNewInitThreadMultiChannel(m, ma, u);
4031 u->multichannel_enable = true;
4032 } else {
4033 u->multichannel_enable = false;
4034 }
4035
4036 if (!strcmp(u->sink->name, "Speaker") || !strcmp(u->sink->name, MCH_SINK_NAME)
4037 || !strcmp(u->sink->name, OFFLOAD_SINK_NAME)) {
4038 pa_module_hook_connect(m, &m->core->hooks[PA_CORE_HOOK_SINK_INPUT_PUT], PA_HOOK_EARLY,
4039 (pa_hook_cb_t)SinkInputPutCb, u);
4040 }
4041
4042 paThreadName = "OS_ProcessData";
4043 if (!(u->thread = pa_thread_new(paThreadName, ThreadFuncRendererTimerBus, u))) {
4044 AUDIO_ERR_LOG("Failed to create bus thread.");
4045 return -1;
4046 }
4047
4048 return 0;
4049 }
4050
PaHdiSinkNewInitUserData(pa_module * m,pa_modargs * ma,struct Userdata * u)4051 static int32_t PaHdiSinkNewInitUserData(pa_module *m, pa_modargs *ma, struct Userdata *u)
4052 {
4053 u->core = m->core;
4054 u->module = m;
4055
4056 pa_memchunk_reset(&u->memchunk);
4057 u->rtpoll = pa_rtpoll_new();
4058 u->primary.msgq = pa_asyncmsgq_new(0);
4059 pthread_rwlock_init(&u->rwlockSleep, NULL);
4060 pthread_mutex_init(&u->mutexPa, NULL);
4061 pthread_mutex_init(&u->mutexPa2, NULL);
4062
4063 if (pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll) < 0) {
4064 AUDIO_ERR_LOG("pa_thread_mq_init() failed.");
4065 return -1;
4066 }
4067
4068 AUDIO_DEBUG_LOG("Load sink adapter");
4069 int32_t ret = LoadSinkAdapter(pa_modargs_get_value(ma, "device_class", DEFAULT_DEVICE_CLASS),
4070 pa_modargs_get_value(ma, "network_id", DEFAULT_DEVICE_NETWORKID), &u->primary.sinkAdapter);
4071 if (ret) {
4072 AUDIO_ERR_LOG("Load adapter failed");
4073 return -1;
4074 }
4075 if (pa_modargs_get_value_u32(ma, "fixed_latency", &u->fixed_latency) < 0) {
4076 AUDIO_ERR_LOG("Failed to parse fixed latency argument.");
4077 return -1;
4078 }
4079 if (pa_modargs_get_value_s32(ma, "device_type", &u->deviceType) < 0) {
4080 AUDIO_ERR_LOG("Failed to parse deviceType argument.");
4081 return -1;
4082 }
4083
4084 u->adapterName = pa_modargs_get_value(ma, "adapter_name", DEFAULT_DEVICE_CLASS);
4085 u->sink_latency = 0;
4086 if (pa_modargs_get_value_u32(ma, "sink_latency", &u->sink_latency) < 0) {
4087 AUDIO_ERR_LOG("No sink_latency argument.");
4088 }
4089
4090 u->deviceNetworkId = pa_modargs_get_value(ma, "network_id", DEFAULT_DEVICE_NETWORKID);
4091
4092 if (pa_modargs_get_value_u32(ma, "render_in_idle_state", &u->render_in_idle_state) < 0) {
4093 AUDIO_ERR_LOG("Failed to parse render_in_idle_state argument.");
4094 return -1;
4095 }
4096
4097 if (pa_modargs_get_value_u32(ma, "open_mic_speaker", &u->open_mic_speaker) < 0) {
4098 AUDIO_ERR_LOG("Failed to parse open_mic_speaker argument.");
4099 return -1;
4100 }
4101
4102 u->test_mode_on = false;
4103 if (pa_modargs_get_value_boolean(ma, "test_mode_on", &u->test_mode_on) < 0) {
4104 AUDIO_INFO_LOG("No test_mode_on arg. Normal mode it is.");
4105 }
4106
4107 return 0;
4108 }
4109
PaHdiSinkNewInitUserDataAndSink(pa_module * m,pa_modargs * ma,const char * driver,struct Userdata * u)4110 static int32_t PaHdiSinkNewInitUserDataAndSink(pa_module *m, pa_modargs *ma, const char *driver, struct Userdata *u)
4111 {
4112 if (pa_modargs_get_value_boolean(ma, "offload_enable", &u->offload_enable) < 0) {
4113 AUDIO_ERR_LOG("Failed to parse offload_enable argument.");
4114 return -1;
4115 }
4116
4117 pa_atomic_store(&u->primary.dflag, 0);
4118 u->primary.dq = pa_asyncmsgq_new(0);
4119 CHECK_AND_RETURN_RET_LOG(u->primary.dq, -1, "Failed to create u->primary.dq");
4120
4121 u->sink = PaHdiSinkInit(u, ma, driver);
4122 if (!u->sink) {
4123 AUDIO_ERR_LOG("Failed to create sink object");
4124 return -1;
4125 }
4126 u->render_full_enable = false; // default to false
4127 if (u->ss.channels > CHANNEL_COUNT_2) {
4128 AUDIO_INFO_LOG("multichannel case, will call render_full for dp");
4129 u->render_full_enable = true;
4130 }
4131
4132 u->sink->parent.process_msg = SinkProcessMsg;
4133 u->sink->set_state_in_io_thread = SinkSetStateInIoThreadCb;
4134 if (!u->fixed_latency) {
4135 u->sink->update_requested_latency = SinkUpdateRequestedLatencyCb;
4136 }
4137 u->sink->userdata = u;
4138
4139 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
4140 pa_sink_set_rtpoll(u->sink, u->rtpoll);
4141
4142 u->bytes_dropped = 0;
4143 u->buffer_size = DEFAULT_BUFFER_SIZE;
4144 if (pa_modargs_get_value_u32(ma, "buffer_size", &u->buffer_size) < 0) {
4145 AUDIO_ERR_LOG("Failed to parse buffer_size argument.");
4146 return -1;
4147 }
4148
4149 u->block_usec = pa_bytes_to_usec(u->buffer_size, &u->sink->sample_spec);
4150
4151 u->lastRecodedLatency = 0;
4152 u->continuesGetLatencyErrCount = 0;
4153 u->lastStreamAvailable = 0;
4154 u->streamAvailable = 0;
4155
4156 if (u->fixed_latency) {
4157 pa_sink_set_fixed_latency(u->sink, u->block_usec);
4158 } else {
4159 pa_sink_set_latency_range(u->sink, 0, u->block_usec);
4160 }
4161
4162 pa_sink_set_max_request(u->sink, u->buffer_size);
4163
4164 u->streamAvailableMap = pa_hashmap_new_full(pa_idxset_string_hash_func, pa_idxset_string_compare_func,
4165 pa_xfree, pa_xfree);
4166
4167 return 0;
4168 }
4169
PaHdiSinkNew(pa_module * m,pa_modargs * ma,const char * driver)4170 pa_sink *PaHdiSinkNew(pa_module *m, pa_modargs *ma, const char *driver)
4171 {
4172 struct Userdata *u = NULL;
4173 char *hdiThreadName = NULL;
4174 char *hdiThreadNameMch = NULL;
4175
4176 pa_assert(m);
4177 pa_assert(ma);
4178
4179 u = pa_xnew0(struct Userdata, 1);
4180 pa_assert(u);
4181
4182 if (PaHdiSinkNewInitUserData(m, ma, u) < 0) {
4183 goto fail;
4184 }
4185
4186 if (PaHdiSinkNewInitUserDataAndSink(m, ma, driver, u) < 0) {
4187 goto fail;
4188 }
4189
4190 int32_t ret = PaHdiSinkNewInitThread(m, ma, u);
4191 if (ret) {
4192 AUDIO_ERR_LOG("PaHdiSinkNewInitThread failed");
4193 goto fail;
4194 }
4195
4196 if (u->test_mode_on) {
4197 u->writeCount = 0;
4198 u->renderCount = 0;
4199 hdiThreadName = "OS_WriteHdiTest";
4200 if (!(u->primary.thread_hdi = pa_thread_new(hdiThreadName, TestModeThreadFuncWriteHDI, u))) {
4201 AUDIO_ERR_LOG("Failed to test-mode-write-hdi thread.");
4202 goto fail;
4203 }
4204 } else {
4205 hdiThreadName = "OS_WriteHdi";
4206 if (!(u->primary.thread_hdi = pa_thread_new(hdiThreadName, ThreadFuncWriteHDI, u))) {
4207 AUDIO_ERR_LOG("Failed to write-hdi-primary2 thread.");
4208 goto fail;
4209 }
4210
4211 if (!strcmp(u->sink->name, MCH_SINK_NAME)) {
4212 hdiThreadNameMch = "OS_WriteHdiMch";
4213 if (!(u->multiChannel.thread_hdi = pa_thread_new(hdiThreadNameMch, ThreadFuncWriteHDIMultiChannel, u))) {
4214 AUDIO_ERR_LOG("Failed to write-hdi-multichannel thread.");
4215 goto fail;
4216 }
4217 }
4218 }
4219
4220 u->primary.writeTime = DEFAULT_WRITE_TIME;
4221 u->multiChannel.writeTime = DEFAULT_WRITE_TIME;
4222 pa_sink_put(u->sink);
4223
4224 return u->sink;
4225 fail:
4226 AUDIO_ERR_LOG("PaHdiSinkNew failed, free userdata");
4227 UserdataFree(u);
4228
4229 return NULL;
4230 }
4231
UserdataFreeOffload(struct Userdata * u)4232 static void UserdataFreeOffload(struct Userdata *u)
4233 {
4234 if (u->offload.msgq) {
4235 pa_asyncmsgq_unref(u->offload.msgq);
4236 }
4237
4238 if (u->offload.sinkAdapter) {
4239 u->offload.sinkAdapter->RendererSinkStop(u->offload.sinkAdapter);
4240 OffloadUnlock(u);
4241 u->offload.sinkAdapter->RendererSinkDeInit(u->offload.sinkAdapter);
4242 UnLoadSinkAdapter(u->offload.sinkAdapter);
4243 }
4244
4245 if (u->offload.chunk.memblock) {
4246 pa_memblock_unref(u->offload.chunk.memblock);
4247 }
4248 }
4249
UserdataFreeMultiChannel(struct Userdata * u)4250 static void UserdataFreeMultiChannel(struct Userdata *u)
4251 {
4252 if (u->multiChannel.msgq) {
4253 pa_asyncmsgq_unref(u->multiChannel.msgq);
4254 }
4255
4256 if (u->multiChannel.dq) {
4257 pa_asyncmsgq_unref(u->multiChannel.dq);
4258 }
4259
4260 if (u->multiChannel.sinkAdapter) {
4261 u->multiChannel.sinkAdapter->RendererSinkStop(u->multiChannel.sinkAdapter);
4262 u->multiChannel.sinkAdapter->RendererSinkDeInit(u->multiChannel.sinkAdapter);
4263 UnLoadSinkAdapter(u->multiChannel.sinkAdapter);
4264 }
4265
4266 if (u->multiChannel.chunk.memblock) {
4267 pa_memblock_unref(u->multiChannel.chunk.memblock);
4268 }
4269 }
4270
UserdataFreeThread(struct Userdata * u)4271 static void UserdataFreeThread(struct Userdata *u)
4272 {
4273 if (u->thread) {
4274 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
4275 pa_thread_free(u->thread);
4276 }
4277
4278 if (u->offload.thread) {
4279 pa_asyncmsgq_send(u->offload.msgq, NULL, QUIT, NULL, 0, NULL);
4280 pa_thread_free(u->offload.thread);
4281 }
4282
4283 if (u->multiChannel.thread) {
4284 pa_asyncmsgq_send(u->multiChannel.msgq, NULL, QUIT, NULL, 0, NULL);
4285 pa_thread_free(u->multiChannel.thread);
4286 }
4287
4288 if (u->multiChannel.thread_hdi) {
4289 pa_asyncmsgq_post(u->multiChannel.dq, NULL, QUIT, NULL, 0, NULL, NULL);
4290 pa_thread_free(u->multiChannel.thread_hdi);
4291 }
4292
4293 if (u->primary.thread) {
4294 pa_asyncmsgq_send(u->primary.msgq, NULL, QUIT, NULL, 0, NULL);
4295 pa_thread_free(u->primary.thread);
4296 }
4297
4298 if (u->primary.thread_hdi) {
4299 pa_asyncmsgq_post(u->primary.dq, NULL, QUIT, NULL, 0, NULL, NULL);
4300 pa_thread_free(u->primary.thread_hdi);
4301 }
4302
4303 pa_thread_mq_done(&u->thread_mq);
4304 }
4305
FreeBufferAttr(struct Userdata * u)4306 static bool FreeBufferAttr(struct Userdata *u)
4307 {
4308 // free heap allocated in userdata init
4309 if (u->bufferAttr == NULL) {
4310 pa_xfree(u);
4311 AUDIO_DEBUG_LOG("buffer attr is null, free done");
4312 return false;
4313 }
4314 FreeEffectBuffer(u);
4315
4316 pa_xfree(u->bufferAttr);
4317 u->bufferAttr = NULL;
4318 return true;
4319 }
4320
UserdataFree(struct Userdata * u)4321 static void UserdataFree(struct Userdata *u)
4322 {
4323 if (u == NULL) {
4324 AUDIO_INFO_LOG("Userdata is null, free done");
4325 return;
4326 }
4327
4328 if (u->sink) {
4329 pa_sink_unlink(u->sink);
4330 }
4331
4332 UserdataFreeThread(u);
4333
4334 if (u->sink) {
4335 pa_sink_unref(u->sink);
4336 }
4337
4338 if (u->memchunk.memblock) {
4339 pa_memblock_unref(u->memchunk.memblock);
4340 }
4341
4342 if (u->rtpoll) {
4343 pa_rtpoll_free(u->rtpoll);
4344 }
4345
4346 UserdataFreeOffload(u);
4347 UserdataFreeMultiChannel(u);
4348
4349 if (u->primary.msgq) {
4350 pa_asyncmsgq_unref(u->primary.msgq);
4351 }
4352
4353 if (u->primary.dq) {
4354 pa_asyncmsgq_unref(u->primary.dq);
4355 }
4356
4357 if (u->primary.sinkAdapter) {
4358 u->primary.sinkAdapter->RendererSinkStop(u->primary.sinkAdapter);
4359 u->primary.sinkAdapter->RendererSinkDeInit(u->primary.sinkAdapter);
4360 UnLoadSinkAdapter(u->primary.sinkAdapter);
4361 }
4362
4363 if (!FreeBufferAttr(u)) {
4364 return;
4365 }
4366
4367 if (u->streamAvailableMap) {
4368 pa_hashmap_free(u->streamAvailableMap);
4369 }
4370
4371 pa_xfree(u);
4372
4373 AUDIO_DEBUG_LOG("UserdataFree done");
4374 }
4375
PaHdiSinkFree(pa_sink * s)4376 void PaHdiSinkFree(pa_sink *s)
4377 {
4378 AUTO_CTRACE("PaHdiSinkFree");
4379 AUDIO_INFO_LOG("PaHdiSinkFree, free userdata");
4380 struct Userdata *u = NULL;
4381
4382 pa_sink_assert_ref(s);
4383 pa_assert_se(u = s->userdata);
4384
4385 UserdataFree(u);
4386 }
4387