1 /*
2 * Copyright (c) 2021-2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "audio_renderer_napi.h"
17 #include "ability.h"
18 #include "audio_renderer_callback_napi.h"
19 #include "renderer_period_position_callback_napi.h"
20 #include "renderer_position_callback_napi.h"
21
22 #include "audio_common_napi.h"
23 #include "audio_errors.h"
24 #include "audio_manager_napi.h"
25 #include "audio_parameters_napi.h"
26 #include "hilog/log.h"
27 #include "media_log.h"
28 #include "napi_base_context.h"
29 #include "securec.h"
30
31 using namespace std;
32 using OHOS::HiviewDFX::HiLog;
33 using OHOS::HiviewDFX::HiLogLabel;
34
35 namespace OHOS {
36 namespace AudioStandard {
37 static __thread napi_ref g_rendererConstructor = nullptr;
38 std::unique_ptr<AudioParameters> AudioRendererNapi::sAudioParameters_ = nullptr;
39 std::unique_ptr<AudioRendererOptions> AudioRendererNapi::sRendererOptions_ = nullptr;
40 napi_ref AudioRendererNapi::audioRendererRate_ = nullptr;
41 napi_ref AudioRendererNapi::interruptEventType_ = nullptr;
42 napi_ref AudioRendererNapi::interruptHintType_ = nullptr;
43 napi_ref AudioRendererNapi::interruptForceType_ = nullptr;
44 napi_ref AudioRendererNapi::audioState_ = nullptr;
45 napi_ref AudioRendererNapi::sampleFormat_ = nullptr;
46
47 namespace {
48 const int ARGS_ONE = 1;
49 const int ARGS_TWO = 2;
50
51 const int PARAM0 = 0;
52 const int PARAM1 = 1;
53 const int PARAM2 = 2;
54
55 constexpr HiLogLabel LABEL = {LOG_CORE, LOG_DOMAIN, "AudioRendererNapi"};
56
57 const std::string MARK_REACH_CALLBACK_NAME = "markReach";
58 const std::string PERIOD_REACH_CALLBACK_NAME = "periodReach";
59
60 #define GET_PARAMS(env, info, num) \
61 size_t argc = num; \
62 napi_value argv[num] = {0}; \
63 napi_value thisVar = nullptr; \
64 void *data; \
65 napi_get_cb_info(env, info, &argc, argv, &thisVar, &data)
66 }
67
AudioRendererNapi()68 AudioRendererNapi::AudioRendererNapi()
69 : audioRenderer_(nullptr), contentType_(CONTENT_TYPE_MUSIC), streamUsage_(STREAM_USAGE_MEDIA),
70 deviceRole_(OUTPUT_DEVICE), deviceType_(DEVICE_TYPE_SPEAKER), env_(nullptr), wrapper_(nullptr),
71 scheduleFromApiCall_(true), doNotScheduleWrite_(false), isDrainWriteQInProgress_(false) {}
72
~AudioRendererNapi()73 AudioRendererNapi::~AudioRendererNapi()
74 {
75 if (wrapper_ != nullptr) {
76 napi_delete_reference(env_, wrapper_);
77 }
78 }
79
Destructor(napi_env env,void * nativeObject,void * finalize_hint)80 void AudioRendererNapi::Destructor(napi_env env, void *nativeObject, void *finalize_hint)
81 {
82 if (nativeObject != nullptr) {
83 auto obj = static_cast<AudioRendererNapi *>(nativeObject);
84 delete obj;
85 obj = nullptr;
86 }
87 }
88
AddNamedProperty(napi_env env,napi_value object,const std::string name,int32_t enumValue)89 napi_status AudioRendererNapi::AddNamedProperty(napi_env env, napi_value object,
90 const std::string name, int32_t enumValue)
91 {
92 napi_status status;
93 napi_value enumNapiValue;
94
95 status = napi_create_int32(env, enumValue, &enumNapiValue);
96 if (status == napi_ok) {
97 status = napi_set_named_property(env, object, name.c_str(), enumNapiValue);
98 }
99
100 return status;
101 }
102
SetValueInt32(const napi_env & env,const std::string & fieldStr,const int intValue,napi_value & result)103 static void SetValueInt32(const napi_env& env, const std::string& fieldStr, const int intValue, napi_value &result)
104 {
105 napi_value value = nullptr;
106 napi_create_int32(env, intValue, &value);
107 napi_set_named_property(env, result, fieldStr.c_str(), value);
108 }
109
GetNativeAudioSampleFormat(int32_t napiSampleFormat)110 static AudioSampleFormat GetNativeAudioSampleFormat(int32_t napiSampleFormat)
111 {
112 AudioSampleFormat format = INVALID_WIDTH;
113
114 switch (napiSampleFormat) {
115 case AudioRendererNapi::AudioSampleFormat::SAMPLE_FORMAT_U8:
116 format = SAMPLE_U8;
117 break;
118 case AudioRendererNapi::AudioSampleFormat::SAMPLE_FORMAT_S16LE:
119 format = SAMPLE_S16LE;
120 break;
121 case AudioRendererNapi::AudioSampleFormat::SAMPLE_FORMAT_S24LE:
122 format = SAMPLE_S24LE;
123 break;
124 case AudioRendererNapi::AudioSampleFormat::SAMPLE_FORMAT_S32LE:
125 format = SAMPLE_S32LE;
126 break;
127 default:
128 format = INVALID_WIDTH;
129 HiLog::Error(LABEL, "Unknown sample format requested by JS, Set it to default INVALID_WIDTH!");
130 break;
131 }
132
133 return format;
134 }
135
GetJsAudioSampleFormat(int32_t nativeSampleFormat)136 static AudioRendererNapi::AudioSampleFormat GetJsAudioSampleFormat(int32_t nativeSampleFormat)
137 {
138 AudioRendererNapi::AudioSampleFormat format = AudioRendererNapi::AudioSampleFormat::SAMPLE_FORMAT_INVALID;
139
140 switch (nativeSampleFormat) {
141 case SAMPLE_U8:
142 format = AudioRendererNapi::AudioSampleFormat::SAMPLE_FORMAT_U8;
143 break;
144 case SAMPLE_S16LE:
145 format = AudioRendererNapi::AudioSampleFormat::SAMPLE_FORMAT_S16LE;
146 break;
147 case SAMPLE_S24LE:
148 format = AudioRendererNapi::AudioSampleFormat::SAMPLE_FORMAT_S24LE;
149 break;
150 case SAMPLE_S32LE:
151 format = AudioRendererNapi::AudioSampleFormat::SAMPLE_FORMAT_S32LE;
152 break;
153 default:
154 format = AudioRendererNapi::AudioSampleFormat::SAMPLE_FORMAT_INVALID;
155 HiLog::Error(LABEL, "Unknown sample format returned from native, Set it to default SAMPLE_FORMAT_INVALID!");
156 break;
157 }
158
159 return format;
160 }
161
CreateAudioSampleFormatObject(napi_env env)162 napi_value AudioRendererNapi::CreateAudioSampleFormatObject(napi_env env)
163 {
164 napi_value result = nullptr;
165 napi_status status;
166 string propName;
167
168 status = napi_create_object(env, &result);
169 if (status == napi_ok) {
170 for (int i = AudioRendererNapi::SAMPLE_FORMAT_INVALID; i <= AudioRendererNapi::SAMPLE_FORMAT_S32LE; i++) {
171 switch (i) {
172 case AudioRendererNapi::SAMPLE_FORMAT_INVALID:
173 propName = "SAMPLE_FORMAT_INVALID";
174 break;
175 case AudioRendererNapi::SAMPLE_FORMAT_U8:
176 propName = "SAMPLE_FORMAT_U8";
177 break;
178 case AudioRendererNapi::SAMPLE_FORMAT_S16LE:
179 propName = "SAMPLE_FORMAT_S16LE";
180 break;
181 case AudioRendererNapi::SAMPLE_FORMAT_S24LE:
182 propName = "SAMPLE_FORMAT_S24LE";
183 break;
184 case AudioRendererNapi::SAMPLE_FORMAT_S32LE:
185 propName = "SAMPLE_FORMAT_S32LE";
186 break;
187 default:
188 HiLog::Error(LABEL, "CreateAudioSampleFormatObject: No prop with this value try next value!");
189 continue;
190 }
191 status = AddNamedProperty(env, result, propName, i);
192 if (status != napi_ok) {
193 HiLog::Error(LABEL, "Failed to add named prop!");
194 break;
195 }
196 propName.clear();
197 }
198 if (status == napi_ok) {
199 status = napi_create_reference(env, result, REFERENCE_CREATION_COUNT, &sampleFormat_);
200 if (status == napi_ok) {
201 return result;
202 }
203 }
204 }
205 HiLog::Error(LABEL, "CreateAudioSampleFormatObject is Failed!");
206 napi_get_undefined(env, &result);
207
208 return result;
209 }
210
CreateAudioRendererRateObject(napi_env env)211 napi_value AudioRendererNapi::CreateAudioRendererRateObject(napi_env env)
212 {
213 napi_value result = nullptr;
214 napi_status status;
215 std::string propName;
216
217 status = napi_create_object(env, &result);
218 if (status == napi_ok) {
219 for (auto &iter: rendererRateMap) {
220 propName = iter.first;
221 status = AddNamedProperty(env, result, propName, iter.second);
222 if (status != napi_ok) {
223 HiLog::Error(LABEL, "Failed to add named prop!");
224 break;
225 }
226 propName.clear();
227 }
228 if (status == napi_ok) {
229 status = napi_create_reference(env, result, REFERENCE_CREATION_COUNT, &audioRendererRate_);
230 if (status == napi_ok) {
231 return result;
232 }
233 }
234 }
235 HiLog::Error(LABEL, "CreateAudioRendererRateObject is Failed!");
236 napi_get_undefined(env, &result);
237
238 return result;
239 }
240
CreateAudioStateObject(napi_env env)241 napi_value AudioRendererNapi::CreateAudioStateObject(napi_env env)
242 {
243 napi_value result = nullptr;
244 napi_status status;
245 std::string propName;
246 int32_t refCount = 1;
247
248 status = napi_create_object(env, &result);
249 if (status == napi_ok) {
250 for (auto &iter: audioStateMap) {
251 propName = iter.first;
252 status = AddNamedProperty(env, result, propName, iter.second);
253 if (status != napi_ok) {
254 HiLog::Error(LABEL, "Failed to add named prop in CreateAudioStateObject!");
255 break;
256 }
257 propName.clear();
258 }
259 if (status == napi_ok) {
260 status = napi_create_reference(env, result, refCount, &audioState_);
261 if (status == napi_ok) {
262 return result;
263 }
264 }
265 }
266 HiLog::Error(LABEL, "CreateAudioStateObject is Failed!");
267 napi_get_undefined(env, &result);
268
269 return result;
270 }
271
CreateInterruptEventTypeObject(napi_env env)272 napi_value AudioRendererNapi::CreateInterruptEventTypeObject(napi_env env)
273 {
274 napi_value result = nullptr;
275 napi_status status;
276 std::string propName;
277 int32_t refCount = 1;
278
279 status = napi_create_object(env, &result);
280 if (status == napi_ok) {
281 for (auto &iter: interruptEventTypeMap) {
282 propName = iter.first;
283 status = AddNamedProperty(env, result, propName, iter.second);
284 if (status != napi_ok) {
285 HiLog::Error(LABEL, "Failed to add named prop in CreateInterruptEventTypeObject!");
286 break;
287 }
288 propName.clear();
289 }
290 if (status == napi_ok) {
291 status = napi_create_reference(env, result, refCount, &interruptEventType_);
292 if (status == napi_ok) {
293 return result;
294 }
295 }
296 }
297 HiLog::Error(LABEL, "CreateInterruptEventTypeObject is Failed!");
298 napi_get_undefined(env, &result);
299
300 return result;
301 }
302
CreateInterruptForceTypeObject(napi_env env)303 napi_value AudioRendererNapi::CreateInterruptForceTypeObject(napi_env env)
304 {
305 napi_value result = nullptr;
306 napi_status status;
307 std::string propName;
308 int32_t refCount = 1;
309
310 status = napi_create_object(env, &result);
311 if (status == napi_ok) {
312 for (auto &iter: interruptForceTypeMap) {
313 propName = iter.first;
314 status = AddNamedProperty(env, result, propName, iter.second);
315 if (status != napi_ok) {
316 HiLog::Error(LABEL, "Failed to add named prop in CreateInterruptTypeObject!");
317 break;
318 }
319 propName.clear();
320 }
321 if (status == napi_ok) {
322 status = napi_create_reference(env, result, refCount, &interruptForceType_);
323 if (status == napi_ok) {
324 return result;
325 }
326 }
327 }
328 HiLog::Error(LABEL, "CreateInterruptForceTypeObject is Failed!");
329 napi_get_undefined(env, &result);
330
331 return result;
332 }
333
CreateInterruptHintTypeObject(napi_env env)334 napi_value AudioRendererNapi::CreateInterruptHintTypeObject(napi_env env)
335 {
336 napi_value result = nullptr;
337 napi_status status;
338 std::string propName;
339 int32_t refCount = 1;
340
341 status = napi_create_object(env, &result);
342 if (status == napi_ok) {
343 for (auto &iter: interruptHintTypeMap) {
344 propName = iter.first;
345 status = AddNamedProperty(env, result, propName, iter.second);
346 if (status != napi_ok) {
347 HiLog::Error(LABEL, "Failed to add named prop in CreateInterruptHintTypeObject!");
348 break;
349 }
350 propName.clear();
351 }
352 if (status == napi_ok) {
353 status = napi_create_reference(env, result, refCount, &interruptHintType_);
354 if (status == napi_ok) {
355 return result;
356 }
357 }
358 }
359 HiLog::Error(LABEL, "CreateInterruptHintTypeObject is Failed!");
360 napi_get_undefined(env, &result);
361
362 return result;
363 }
364
Init(napi_env env,napi_value exports)365 napi_value AudioRendererNapi::Init(napi_env env, napi_value exports)
366 {
367 napi_status status;
368 napi_value constructor;
369 napi_value result = nullptr;
370 const int32_t refCount = 1;
371 napi_get_undefined(env, &result);
372
373 napi_property_descriptor audio_renderer_properties[] = {
374 DECLARE_NAPI_FUNCTION("setRenderRate", SetRenderRate),
375 DECLARE_NAPI_FUNCTION("getRenderRate", GetRenderRate),
376 DECLARE_NAPI_FUNCTION("start", Start),
377 DECLARE_NAPI_FUNCTION("write", Write),
378 DECLARE_NAPI_FUNCTION("getAudioTime", GetAudioTime),
379 DECLARE_NAPI_FUNCTION("drain", Drain),
380 DECLARE_NAPI_FUNCTION("pause", Pause),
381 DECLARE_NAPI_FUNCTION("stop", Stop),
382 DECLARE_NAPI_FUNCTION("release", Release),
383 DECLARE_NAPI_FUNCTION("getBufferSize", GetBufferSize),
384 DECLARE_NAPI_FUNCTION("on", On),
385 DECLARE_NAPI_FUNCTION("off", Off),
386 DECLARE_NAPI_FUNCTION("getRendererInfo", GetRendererInfo),
387 DECLARE_NAPI_FUNCTION("getStreamInfo", GetStreamInfo),
388 DECLARE_NAPI_GETTER("state", GetState)
389 };
390
391 napi_property_descriptor static_prop[] = {
392 DECLARE_NAPI_STATIC_FUNCTION("createAudioRenderer", CreateAudioRenderer),
393 DECLARE_NAPI_PROPERTY("AudioRendererRate", CreateAudioRendererRateObject(env)),
394 DECLARE_NAPI_PROPERTY("InterruptType", CreateInterruptEventTypeObject(env)),
395 DECLARE_NAPI_PROPERTY("InterruptForceType", CreateInterruptForceTypeObject(env)),
396 DECLARE_NAPI_PROPERTY("InterruptHint", CreateInterruptHintTypeObject(env)),
397 DECLARE_NAPI_PROPERTY("AudioState", CreateAudioStateObject(env)),
398 DECLARE_NAPI_PROPERTY("AudioSampleFormat", CreateAudioSampleFormatObject(env)),
399 };
400
401 status = napi_define_class(env, AUDIO_RENDERER_NAPI_CLASS_NAME.c_str(), NAPI_AUTO_LENGTH, Construct, nullptr,
402 sizeof(audio_renderer_properties) / sizeof(audio_renderer_properties[PARAM0]),
403 audio_renderer_properties, &constructor);
404 if (status != napi_ok) {
405 return result;
406 }
407
408 status = napi_create_reference(env, constructor, refCount, &g_rendererConstructor);
409 if (status == napi_ok) {
410 status = napi_set_named_property(env, exports, AUDIO_RENDERER_NAPI_CLASS_NAME.c_str(), constructor);
411 if (status == napi_ok) {
412 status = napi_define_properties(env, exports,
413 sizeof(static_prop) / sizeof(static_prop[PARAM0]), static_prop);
414 if (status == napi_ok) {
415 return exports;
416 }
417 }
418 }
419
420 HiLog::Error(LABEL, "Failure in AudioRendererNapi::Init()");
421 return result;
422 }
423
GetAbilityContext(napi_env env)424 static shared_ptr<AbilityRuntime::Context> GetAbilityContext(napi_env env)
425 {
426 HiLog::Info(LABEL, "Getting context with FA model");
427 auto ability = OHOS::AbilityRuntime::GetCurrentAbility(env);
428 if (ability == nullptr) {
429 HiLog::Error(LABEL, "Failed to obtain ability in FA mode");
430 return nullptr;
431 }
432
433 auto faContext = ability->GetAbilityContext();
434 if (faContext == nullptr) {
435 HiLog::Error(LABEL, "GetAbilityContext returned null in FA model");
436 return nullptr;
437 }
438
439 return faContext;
440 }
441
Construct(napi_env env,napi_callback_info info)442 napi_value AudioRendererNapi::Construct(napi_env env, napi_callback_info info)
443 {
444 napi_status status;
445 napi_value result = nullptr;
446 napi_get_undefined(env, &result);
447
448 GET_PARAMS(env, info, ARGS_TWO);
449
450 unique_ptr<AudioRendererNapi> rendererNapi = make_unique<AudioRendererNapi>();
451 CHECK_AND_RETURN_RET_LOG(rendererNapi != nullptr, result, "No memory");
452
453 rendererNapi->env_ = env;
454 rendererNapi->contentType_ = sRendererOptions_->rendererInfo.contentType;
455 rendererNapi->streamUsage_ = sRendererOptions_->rendererInfo.streamUsage;
456 rendererNapi->rendererFlags_ = sRendererOptions_->rendererInfo.rendererFlags;
457
458 AudioRendererOptions rendererOptions = {};
459 rendererOptions.streamInfo.samplingRate = sRendererOptions_->streamInfo.samplingRate;
460 rendererOptions.streamInfo.encoding = sRendererOptions_->streamInfo.encoding;
461 rendererOptions.streamInfo.format = sRendererOptions_->streamInfo.format;
462 rendererOptions.streamInfo.channels = sRendererOptions_->streamInfo.channels;
463
464 rendererOptions.rendererInfo.contentType = sRendererOptions_->rendererInfo.contentType;
465 rendererOptions.rendererInfo.streamUsage = sRendererOptions_->rendererInfo.streamUsage;
466 rendererOptions.rendererInfo.rendererFlags = sRendererOptions_->rendererInfo.rendererFlags;
467
468 std::shared_ptr<AbilityRuntime::Context> abilityContext = GetAbilityContext(env);
469 if (abilityContext != nullptr) {
470 std::string cacheDir = abilityContext->GetCacheDir();
471 rendererNapi->audioRenderer_ = AudioRenderer::Create(cacheDir, rendererOptions);
472 } else {
473 rendererNapi->audioRenderer_ = AudioRenderer::Create(rendererOptions);
474 }
475
476 CHECK_AND_RETURN_RET_LOG(rendererNapi->audioRenderer_ != nullptr, result, "Renderer Create failed");
477
478 if (rendererNapi->callbackNapi_ == nullptr) {
479 rendererNapi->callbackNapi_ = std::make_shared<AudioRendererCallbackNapi>(env);
480 CHECK_AND_RETURN_RET_LOG(rendererNapi->callbackNapi_ != nullptr, result, "No memory");
481 int32_t ret = rendererNapi->audioRenderer_->SetRendererCallback(rendererNapi->callbackNapi_);
482 if (ret) {
483 MEDIA_DEBUG_LOG("AudioRendererNapi::Construct SetRendererCallback failed");
484 }
485 }
486
487 status = napi_wrap(env, thisVar, static_cast<void*>(rendererNapi.get()),
488 AudioRendererNapi::Destructor, nullptr, &(rendererNapi->wrapper_));
489 if (status == napi_ok) {
490 rendererNapi.release();
491 return thisVar;
492 }
493
494 HiLog::Error(LABEL, "Failed in AudioRendererNapi::Construct()!");
495 return result;
496 }
497
CreateAudioRenderer(napi_env env,napi_callback_info info)498 napi_value AudioRendererNapi::CreateAudioRenderer(napi_env env, napi_callback_info info)
499 {
500 HiLog::Info(LABEL, "%{public}s IN", __func__);
501 napi_status status;
502 napi_value result = nullptr;
503
504 GET_PARAMS(env, info, ARGS_TWO);
505 NAPI_ASSERT(env, argc >= ARGS_ONE, "requires 1 parameters minimum");
506
507 unique_ptr<AudioRendererAsyncContext> asyncContext = make_unique<AudioRendererAsyncContext>();
508 CHECK_AND_RETURN_RET_LOG(asyncContext != nullptr, nullptr, "AudioRendererAsyncContext object creation failed");
509
510 for (size_t i = PARAM0; i < argc; i++) {
511 napi_valuetype valueType = napi_undefined;
512 napi_typeof(env, argv[i], &valueType);
513 if (i == PARAM0 && valueType == napi_object) {
514 if (!ParseRendererOptions(env, argv[i], &(asyncContext->rendererOptions))) {
515 HiLog::Error(LABEL, "Parsing of renderer options failed");
516 return result;
517 }
518 } else if (i == PARAM1 && valueType == napi_function) {
519 napi_create_reference(env, argv[i], REFERENCE_CREATION_COUNT, &asyncContext->callbackRef);
520 break;
521 } else {
522 NAPI_ASSERT(env, false, "type mismatch");
523 }
524 }
525
526 if (asyncContext->callbackRef == nullptr) {
527 napi_create_promise(env, &asyncContext->deferred, &result);
528 } else {
529 napi_get_undefined(env, &result);
530 }
531
532 napi_value resource = nullptr;
533 napi_create_string_utf8(env, "CreateAudioRenderer", NAPI_AUTO_LENGTH, &resource);
534
535 status = napi_create_async_work(
536 env, nullptr, resource,
537 [](napi_env env, void *data) {
538 auto context = static_cast<AudioRendererAsyncContext *>(data);
539 context->status = SUCCESS;
540 },
541 GetRendererAsyncCallbackComplete, static_cast<void *>(asyncContext.get()), &asyncContext->work);
542 if (status != napi_ok) {
543 result = nullptr;
544 } else {
545 status = napi_queue_async_work(env, asyncContext->work);
546 if (status == napi_ok) {
547 asyncContext.release();
548 } else {
549 result = nullptr;
550 }
551 }
552
553 return result;
554 }
555
CommonCallbackRoutine(napi_env env,AudioRendererAsyncContext * & asyncContext,const napi_value & valueParam)556 void AudioRendererNapi::CommonCallbackRoutine(napi_env env, AudioRendererAsyncContext* &asyncContext,
557 const napi_value &valueParam)
558 {
559 napi_value result[ARGS_TWO] = {0};
560 napi_value retVal;
561
562 if (!asyncContext->status) {
563 napi_get_undefined(env, &result[PARAM0]);
564 result[PARAM1] = valueParam;
565 } else {
566 napi_value message = nullptr;
567 napi_create_string_utf8(env, "Error, Operation not supported or Failed", NAPI_AUTO_LENGTH, &message);
568 napi_create_error(env, nullptr, message, &result[PARAM0]);
569 napi_get_undefined(env, &result[PARAM1]);
570 }
571
572 if (asyncContext->deferred) {
573 if (!asyncContext->status) {
574 napi_resolve_deferred(env, asyncContext->deferred, result[PARAM1]);
575 } else {
576 napi_reject_deferred(env, asyncContext->deferred, result[PARAM0]);
577 }
578 } else {
579 napi_value callback = nullptr;
580 napi_get_reference_value(env, asyncContext->callbackRef, &callback);
581 napi_call_function(env, nullptr, callback, ARGS_TWO, result, &retVal);
582 napi_delete_reference(env, asyncContext->callbackRef);
583 }
584 napi_delete_async_work(env, asyncContext->work);
585
586 delete asyncContext;
587 asyncContext = nullptr;
588 }
589
WriteAsyncCallbackComplete(napi_env env,napi_status status,void * data)590 void AudioRendererNapi::WriteAsyncCallbackComplete(napi_env env, napi_status status, void *data)
591 {
592 napi_value result[ARGS_TWO] = {0};
593 napi_value retVal;
594
595 auto asyncContext = static_cast<AudioRendererAsyncContext *>(data);
596 napi_value valueParam = nullptr;
597
598 if (asyncContext != nullptr) {
599 if (!asyncContext->status) {
600 napi_create_uint32(env, asyncContext->totalBytesWritten, &valueParam);
601 }
602 if (!asyncContext->status) {
603 napi_get_undefined(env, &result[PARAM0]);
604 result[PARAM1] = valueParam;
605 } else {
606 napi_value message = nullptr;
607 napi_create_string_utf8(env, "Error, Operation not supported or Failed", NAPI_AUTO_LENGTH, &message);
608 napi_create_error(env, nullptr, message, &result[PARAM0]);
609 napi_get_undefined(env, &result[PARAM1]);
610 }
611
612 if (asyncContext->deferred) {
613 if (!asyncContext->status) {
614 napi_resolve_deferred(env, asyncContext->deferred, result[PARAM1]);
615 } else {
616 napi_reject_deferred(env, asyncContext->deferred, result[PARAM0]);
617 }
618 } else {
619 napi_value callback = nullptr;
620 napi_get_reference_value(env, asyncContext->callbackRef, &callback);
621 napi_call_function(env, nullptr, callback, ARGS_TWO, result, &retVal);
622 napi_delete_reference(env, asyncContext->callbackRef);
623 }
624 napi_delete_async_work(env, asyncContext->work);
625 // queue the next write request from internal queue to napi queue
626 if (!asyncContext->objectInfo->doNotScheduleWrite_ && !asyncContext->objectInfo->isDrainWriteQInProgress_) {
627 if (!asyncContext->objectInfo->writeRequestQ_.empty()) {
628 napi_queue_async_work(env, asyncContext->objectInfo->writeRequestQ_.front());
629 asyncContext->objectInfo->writeRequestQ_.pop();
630 } else {
631 asyncContext->objectInfo->scheduleFromApiCall_ = true;
632 }
633 }
634
635 delete asyncContext;
636 asyncContext = nullptr;
637 } else {
638 HiLog::Error(LABEL, "ERROR: AudioRendererAsyncContext* is Null!");
639 }
640 }
641
PauseAsyncCallbackComplete(napi_env env,napi_status status,void * data)642 void AudioRendererNapi::PauseAsyncCallbackComplete(napi_env env, napi_status status, void *data)
643 {
644 napi_value result[ARGS_TWO] = {0};
645 napi_value retVal;
646
647 auto asyncContext = static_cast<AudioRendererAsyncContext *>(data);
648 napi_value valueParam = nullptr;
649
650 if (asyncContext != nullptr) {
651 if (!asyncContext->status) {
652 // set pause result to doNotScheduleWrite_
653 asyncContext->objectInfo->doNotScheduleWrite_ = asyncContext->isTrue;
654 napi_get_undefined(env, &valueParam);
655 }
656 if (!asyncContext->status) {
657 napi_get_undefined(env, &result[PARAM0]);
658 result[PARAM1] = valueParam;
659 } else {
660 napi_value message = nullptr;
661 napi_create_string_utf8(env, "Error, Operation not supported or Failed", NAPI_AUTO_LENGTH, &message);
662 napi_create_error(env, nullptr, message, &result[PARAM0]);
663 napi_get_undefined(env, &result[PARAM1]);
664 }
665
666 if (asyncContext->deferred) {
667 if (!asyncContext->status) {
668 napi_resolve_deferred(env, asyncContext->deferred, result[PARAM1]);
669 } else {
670 napi_reject_deferred(env, asyncContext->deferred, result[PARAM0]);
671 }
672 } else {
673 napi_value callback = nullptr;
674 napi_get_reference_value(env, asyncContext->callbackRef, &callback);
675 napi_call_function(env, nullptr, callback, ARGS_TWO, result, &retVal);
676 napi_delete_reference(env, asyncContext->callbackRef);
677 }
678 napi_delete_async_work(env, asyncContext->work);
679 // Pause failed . Contine Write
680 if (!asyncContext->isTrue) {
681 HiLog::Info(LABEL, "PauseAsyncCallbackComplete: Pasue failed, Continue Write");
682 if (!asyncContext->objectInfo->writeRequestQ_.empty()) {
683 napi_queue_async_work(env, asyncContext->objectInfo->writeRequestQ_.front());
684 asyncContext->objectInfo->writeRequestQ_.pop();
685 } else {
686 asyncContext->objectInfo->scheduleFromApiCall_ = true;
687 }
688 }
689
690 delete asyncContext;
691 asyncContext = nullptr;
692 } else {
693 HiLog::Error(LABEL, "ERROR: AudioRendererAsyncContext* is Null!");
694 }
695 }
696
StartAsyncCallbackComplete(napi_env env,napi_status status,void * data)697 void AudioRendererNapi::StartAsyncCallbackComplete(napi_env env, napi_status status, void *data)
698 {
699 napi_value result[ARGS_TWO] = {0};
700 napi_value retVal;
701
702 auto asyncContext = static_cast<AudioRendererAsyncContext *>(data);
703 napi_value valueParam = nullptr;
704
705 if (asyncContext != nullptr) {
706 if (!asyncContext->status) {
707 napi_get_undefined(env, &valueParam);
708 }
709 if (!asyncContext->status) {
710 napi_get_undefined(env, &result[PARAM0]);
711 result[PARAM1] = valueParam;
712 } else {
713 napi_value message = nullptr;
714 napi_create_string_utf8(env, "Error, Operation not supported or Failed", NAPI_AUTO_LENGTH, &message);
715 napi_create_error(env, nullptr, message, &result[PARAM0]);
716 napi_get_undefined(env, &result[PARAM1]);
717 }
718
719 if (asyncContext->deferred) {
720 if (!asyncContext->status) {
721 napi_resolve_deferred(env, asyncContext->deferred, result[PARAM1]);
722 } else {
723 napi_reject_deferred(env, asyncContext->deferred, result[PARAM0]);
724 }
725 } else {
726 napi_value callback = nullptr;
727 napi_get_reference_value(env, asyncContext->callbackRef, &callback);
728 napi_call_function(env, nullptr, callback, ARGS_TWO, result, &retVal);
729 napi_delete_reference(env, asyncContext->callbackRef);
730 }
731 napi_delete_async_work(env, asyncContext->work);
732 // If start success , set doNotScheduleWrite_ = false and queue write request
733 if (asyncContext->isTrue) {
734 asyncContext->objectInfo->doNotScheduleWrite_ = false;
735 if (!asyncContext->objectInfo->writeRequestQ_.empty()) {
736 napi_queue_async_work(env, asyncContext->objectInfo->writeRequestQ_.front());
737 asyncContext->objectInfo->writeRequestQ_.pop();
738 } else {
739 asyncContext->objectInfo->scheduleFromApiCall_ = true;
740 }
741 }
742
743 delete asyncContext;
744 asyncContext = nullptr;
745 } else {
746 HiLog::Error(LABEL, "ERROR: AudioRendererAsyncContext* is Null!");
747 }
748 }
749
StopAsyncCallbackComplete(napi_env env,napi_status status,void * data)750 void AudioRendererNapi::StopAsyncCallbackComplete(napi_env env, napi_status status, void *data)
751 {
752 napi_value result[ARGS_TWO] = {0};
753 napi_value retVal;
754
755 auto asyncContext = static_cast<AudioRendererAsyncContext *>(data);
756 napi_value valueParam = nullptr;
757
758 if (asyncContext != nullptr) {
759 if (!asyncContext->status) {
760 // set pause result to doNotScheduleWrite_
761 asyncContext->objectInfo->doNotScheduleWrite_ = asyncContext->isTrue;
762 napi_get_undefined(env, &valueParam);
763 }
764 if (!asyncContext->status) {
765 napi_get_undefined(env, &result[PARAM0]);
766 result[PARAM1] = valueParam;
767 } else {
768 napi_value message = nullptr;
769 napi_create_string_utf8(env, "Error, Operation not supported or Failed", NAPI_AUTO_LENGTH, &message);
770 napi_create_error(env, nullptr, message, &result[PARAM0]);
771 napi_get_undefined(env, &result[PARAM1]);
772 }
773
774 if (asyncContext->deferred) {
775 if (!asyncContext->status) {
776 napi_resolve_deferred(env, asyncContext->deferred, result[PARAM1]);
777 } else {
778 napi_reject_deferred(env, asyncContext->deferred, result[PARAM0]);
779 }
780 } else {
781 napi_value callback = nullptr;
782 napi_get_reference_value(env, asyncContext->callbackRef, &callback);
783 napi_call_function(env, nullptr, callback, ARGS_TWO, result, &retVal);
784 napi_delete_reference(env, asyncContext->callbackRef);
785 }
786 napi_delete_async_work(env, asyncContext->work);
787
788 delete asyncContext;
789 asyncContext = nullptr;
790 } else {
791 HiLog::Error(LABEL, "ERROR: AudioRendererAsyncContext* is Null!");
792 }
793 }
794
SetFunctionAsyncCallbackComplete(napi_env env,napi_status status,void * data)795 void AudioRendererNapi::SetFunctionAsyncCallbackComplete(napi_env env, napi_status status, void *data)
796 {
797 auto asyncContext = static_cast<AudioRendererAsyncContext *>(data);
798 napi_value valueParam = nullptr;
799
800 if (asyncContext != nullptr) {
801 if (!asyncContext->status) {
802 napi_get_undefined(env, &valueParam);
803 }
804 CommonCallbackRoutine(env, asyncContext, valueParam);
805 } else {
806 HiLog::Error(LABEL, "ERROR: AudioRendererAsyncContext* is Null!");
807 }
808 }
809
IsTrueAsyncCallbackComplete(napi_env env,napi_status status,void * data)810 void AudioRendererNapi::IsTrueAsyncCallbackComplete(napi_env env, napi_status status, void *data)
811 {
812 auto asyncContext = static_cast<AudioRendererAsyncContext*>(data);
813 napi_value valueParam = nullptr;
814
815 if (asyncContext != nullptr) {
816 if (!asyncContext->status) {
817 napi_get_boolean(env, asyncContext->isTrue, &valueParam);
818 }
819 CommonCallbackRoutine(env, asyncContext, valueParam);
820 } else {
821 HiLog::Error(LABEL, "ERROR: AudioRendererAsyncContext* is Null!");
822 }
823 }
824
VoidAsyncCallbackComplete(napi_env env,napi_status status,void * data)825 void AudioRendererNapi::VoidAsyncCallbackComplete(napi_env env, napi_status status, void *data)
826 {
827 auto asyncContext = static_cast<AudioRendererAsyncContext*>(data);
828 napi_value valueParam = nullptr;
829
830 if (asyncContext != nullptr) {
831 if (!asyncContext->status) {
832 napi_get_undefined(env, &valueParam);
833 }
834 CommonCallbackRoutine(env, asyncContext, valueParam);
835 } else {
836 HiLog::Error(LABEL, "ERROR: AudioRendererAsyncContext* is Null!");
837 }
838 }
839
GetBufferSizeAsyncCallbackComplete(napi_env env,napi_status status,void * data)840 void AudioRendererNapi::GetBufferSizeAsyncCallbackComplete(napi_env env, napi_status status, void *data)
841 {
842 auto asyncContext = static_cast<AudioRendererAsyncContext *>(data);
843 napi_value valueParam = nullptr;
844
845 if (asyncContext != nullptr) {
846 if (!asyncContext->status) {
847 napi_create_uint32(env, asyncContext->bufferSize, &valueParam);
848 }
849 CommonCallbackRoutine(env, asyncContext, valueParam);
850 } else {
851 HiLog::Error(LABEL, "ERROR: AudioRendererAsyncContext* is Null!");
852 }
853 }
854
GetIntValueAsyncCallbackComplete(napi_env env,napi_status status,void * data)855 void AudioRendererNapi::GetIntValueAsyncCallbackComplete(napi_env env, napi_status status, void *data)
856 {
857 auto asyncContext = static_cast<AudioRendererAsyncContext *>(data);
858 napi_value valueParam = nullptr;
859
860 if (asyncContext != nullptr) {
861 if (!asyncContext->status) {
862 napi_create_int32(env, asyncContext->intValue, &valueParam);
863 }
864 CommonCallbackRoutine(env, asyncContext, valueParam);
865 } else {
866 HiLog::Error(LABEL, "ERROR: AudioRendererAsyncContext* is Null!");
867 }
868 }
869
GetRendererAsyncCallbackComplete(napi_env env,napi_status status,void * data)870 void AudioRendererNapi::GetRendererAsyncCallbackComplete(napi_env env, napi_status status, void *data)
871 {
872 napi_value valueParam = nullptr;
873 auto asyncContext = static_cast<AudioRendererAsyncContext *>(data);
874
875 if (asyncContext != nullptr) {
876 if (!asyncContext->status) {
877 unique_ptr<AudioRendererOptions> rendererOptions = make_unique<AudioRendererOptions>();
878 rendererOptions->streamInfo.samplingRate = asyncContext->rendererOptions.streamInfo.samplingRate;
879 rendererOptions->streamInfo.encoding = asyncContext->rendererOptions.streamInfo.encoding;
880 rendererOptions->streamInfo.format = asyncContext->rendererOptions.streamInfo.format;
881 rendererOptions->streamInfo.channels = asyncContext->rendererOptions.streamInfo.channels;
882 rendererOptions->rendererInfo.contentType = asyncContext->rendererOptions.rendererInfo.contentType;
883 rendererOptions->rendererInfo.streamUsage = asyncContext->rendererOptions.rendererInfo.streamUsage;
884 rendererOptions->rendererInfo.rendererFlags = asyncContext->rendererOptions.rendererInfo.rendererFlags;
885
886 valueParam = CreateAudioRendererWrapper(env, rendererOptions);
887 }
888 CommonCallbackRoutine(env, asyncContext, valueParam);
889 } else {
890 HiLog::Error(LABEL, "ERROR: GetRendererAsyncCallbackComplete asyncContext is Null!");
891 }
892 }
893
GetInt64ValueAsyncCallbackComplete(napi_env env,napi_status status,void * data)894 void AudioRendererNapi::GetInt64ValueAsyncCallbackComplete(napi_env env, napi_status status, void *data)
895 {
896 auto asyncContext = static_cast<AudioRendererAsyncContext*>(data);
897 napi_value valueParam = nullptr;
898
899 if (asyncContext != nullptr) {
900 if (!asyncContext->status) {
901 napi_create_int64(env, asyncContext->time, &valueParam);
902 }
903 CommonCallbackRoutine(env, asyncContext, valueParam);
904 } else {
905 HiLog::Error(LABEL, "ERROR: AudioRendererAsyncContext* is Null!");
906 }
907 }
908
AudioRendererInfoAsyncCallbackComplete(napi_env env,napi_status status,void * data)909 void AudioRendererNapi::AudioRendererInfoAsyncCallbackComplete(napi_env env, napi_status status, void *data)
910 {
911 auto asyncContext = static_cast<AudioRendererAsyncContext *>(data);
912 napi_value valueParam = nullptr;
913
914 if (asyncContext != nullptr) {
915 if (asyncContext->status == SUCCESS) {
916 (void)napi_create_object(env, &valueParam);
917 SetValueInt32(env, "content", static_cast<int32_t>(asyncContext->contentType), valueParam);
918 SetValueInt32(env, "usage", static_cast<int32_t>(asyncContext->usage), valueParam);
919 SetValueInt32(env, "rendererFlags", asyncContext->rendererFlags, valueParam);
920 }
921 CommonCallbackRoutine(env, asyncContext, valueParam);
922 } else {
923 HiLog::Error(LABEL, "ERROR: asyncContext is Null!");
924 }
925 }
926
AudioStreamInfoAsyncCallbackComplete(napi_env env,napi_status status,void * data)927 void AudioRendererNapi::AudioStreamInfoAsyncCallbackComplete(napi_env env, napi_status status, void *data)
928 {
929 auto asyncContext = static_cast<AudioRendererAsyncContext *>(data);
930 napi_value valueParam = nullptr;
931
932 if (asyncContext != nullptr) {
933 if (asyncContext->status == SUCCESS) {
934 (void)napi_create_object(env, &valueParam);
935 SetValueInt32(env, "samplingRate", static_cast<int32_t>(asyncContext->samplingRate), valueParam);
936 SetValueInt32(env, "channels", static_cast<int32_t>(asyncContext->channelCount), valueParam);
937 SetValueInt32(env, "sampleFormat", static_cast<int32_t>(asyncContext->sampleFormat), valueParam);
938 SetValueInt32(env, "encodingType", static_cast<int32_t>(asyncContext->encodingType), valueParam);
939 }
940 CommonCallbackRoutine(env, asyncContext, valueParam);
941 } else {
942 HiLog::Error(LABEL, "ERROR: AudioStreamInfoAsyncCallbackComplete* is Null!");
943 }
944 }
945
SetRenderRate(napi_env env,napi_callback_info info)946 napi_value AudioRendererNapi::SetRenderRate(napi_env env, napi_callback_info info)
947 {
948 napi_status status;
949 const int32_t refCount = 1;
950 napi_value result = nullptr;
951
952 GET_PARAMS(env, info, ARGS_TWO);
953 NAPI_ASSERT(env, argc >= ARGS_ONE, "requires 1 parameter minimum");
954
955 unique_ptr<AudioRendererAsyncContext> asyncContext = make_unique<AudioRendererAsyncContext>();
956 status = napi_unwrap(env, thisVar, reinterpret_cast<void **>(&asyncContext->objectInfo));
957 if (status == napi_ok && asyncContext->objectInfo != nullptr) {
958 for (size_t i = PARAM0; i < argc; i++) {
959 napi_valuetype valueType = napi_undefined;
960 napi_typeof(env, argv[i], &valueType);
961
962 if (i == PARAM0 && valueType == napi_number) {
963 napi_get_value_int32(env, argv[PARAM0], &asyncContext->audioRendererRate);
964 } else if (i == PARAM1 && valueType == napi_function) {
965 napi_create_reference(env, argv[i], refCount, &asyncContext->callbackRef);
966 break;
967 } else {
968 NAPI_ASSERT(env, false, "type mismatch");
969 }
970 }
971
972 if (asyncContext->callbackRef == nullptr) {
973 napi_create_promise(env, &asyncContext->deferred, &result);
974 } else {
975 napi_get_undefined(env, &result);
976 }
977
978 napi_value resource = nullptr;
979 napi_create_string_utf8(env, "SetRenderRate", NAPI_AUTO_LENGTH, &resource);
980
981 status = napi_create_async_work(
982 env, nullptr, resource,
983 [](napi_env env, void *data) {
984 auto context = static_cast<AudioRendererAsyncContext *>(data);
985 AudioRendererRate audioRenderRate = static_cast<AudioRendererRate>(context->audioRendererRate);
986 context->intValue = context->objectInfo->audioRenderer_->SetRenderRate(audioRenderRate);
987 context->status = SUCCESS;
988 },
989 VoidAsyncCallbackComplete, static_cast<void *>(asyncContext.get()), &asyncContext->work);
990 if (status != napi_ok) {
991 result = nullptr;
992 } else {
993 status = napi_queue_async_work(env, asyncContext->work);
994 if (status == napi_ok) {
995 asyncContext.release();
996 } else {
997 result = nullptr;
998 }
999 }
1000 }
1001
1002 return result;
1003 }
1004
GetRenderRate(napi_env env,napi_callback_info info)1005 napi_value AudioRendererNapi::GetRenderRate(napi_env env, napi_callback_info info)
1006 {
1007 napi_status status;
1008 const int32_t refCount = 1;
1009 napi_value result = nullptr;
1010
1011 GET_PARAMS(env, info, ARGS_ONE);
1012
1013 unique_ptr<AudioRendererAsyncContext> asyncContext = make_unique<AudioRendererAsyncContext>();
1014 status = napi_unwrap(env, thisVar, reinterpret_cast<void**>(&asyncContext->objectInfo));
1015 if (status == napi_ok && asyncContext->objectInfo != nullptr) {
1016 for (size_t i = PARAM0; i < argc; i++) {
1017 napi_valuetype valueType = napi_undefined;
1018 napi_typeof(env, argv[i], &valueType);
1019
1020 if (i == PARAM0 && valueType == napi_function) {
1021 napi_create_reference(env, argv[i], refCount, &asyncContext->callbackRef);
1022 break;
1023 } else {
1024 NAPI_ASSERT(env, false, "type mismatch");
1025 }
1026 }
1027
1028 if (asyncContext->callbackRef == nullptr) {
1029 napi_create_promise(env, &asyncContext->deferred, &result);
1030 } else {
1031 napi_get_undefined(env, &result);
1032 }
1033
1034 napi_value resource = nullptr;
1035 napi_create_string_utf8(env, "GetRenderRate", NAPI_AUTO_LENGTH, &resource);
1036
1037 status = napi_create_async_work(
1038 env, nullptr, resource,
1039 [](napi_env env, void *data) {
1040 auto context = static_cast<AudioRendererAsyncContext *>(data);
1041 context->intValue = context->objectInfo->audioRenderer_->GetRenderRate();
1042 context->status = SUCCESS;
1043 },
1044 GetIntValueAsyncCallbackComplete, static_cast<void *>(asyncContext.get()), &asyncContext->work);
1045 if (status != napi_ok) {
1046 result = nullptr;
1047 } else {
1048 status = napi_queue_async_work(env, asyncContext->work);
1049 if (status == napi_ok) {
1050 asyncContext.release();
1051 } else {
1052 result = nullptr;
1053 }
1054 }
1055 }
1056
1057 return result;
1058 }
1059
Start(napi_env env,napi_callback_info info)1060 napi_value AudioRendererNapi::Start(napi_env env, napi_callback_info info)
1061 {
1062 napi_status status;
1063 const int32_t refCount = 1;
1064 napi_value result = nullptr;
1065
1066 GET_PARAMS(env, info, ARGS_ONE);
1067
1068 unique_ptr<AudioRendererAsyncContext> asyncContext = make_unique<AudioRendererAsyncContext>();
1069
1070 status = napi_unwrap(env, thisVar, reinterpret_cast<void**>(&asyncContext->objectInfo));
1071 if (status == napi_ok && asyncContext->objectInfo != nullptr) {
1072 for (size_t i = PARAM0; i < argc; i++) {
1073 napi_valuetype valueType = napi_undefined;
1074 napi_typeof(env, argv[i], &valueType);
1075
1076 if (i == PARAM0 && valueType == napi_function) {
1077 napi_create_reference(env, argv[i], refCount, &asyncContext->callbackRef);
1078 break;
1079 } else {
1080 NAPI_ASSERT(env, false, "type mismatch");
1081 }
1082 }
1083
1084 if (asyncContext->callbackRef == nullptr) {
1085 napi_create_promise(env, &asyncContext->deferred, &result);
1086 } else {
1087 napi_get_undefined(env, &result);
1088 }
1089
1090 napi_value resource = nullptr;
1091 napi_create_string_utf8(env, "Start", NAPI_AUTO_LENGTH, &resource);
1092
1093 status = napi_create_async_work(
1094 env, nullptr, resource,
1095 [](napi_env env, void *data) {
1096 auto context = static_cast<AudioRendererAsyncContext *>(data);
1097 context->isTrue = context->objectInfo->audioRenderer_->Start();
1098 context->status = SUCCESS;
1099 },
1100 StartAsyncCallbackComplete, static_cast<void *>(asyncContext.get()), &asyncContext->work);
1101 if (status != napi_ok) {
1102 result = nullptr;
1103 } else {
1104 status = napi_queue_async_work(env, asyncContext->work);
1105 if (status == napi_ok) {
1106 asyncContext.release();
1107 } else {
1108 result = nullptr;
1109 }
1110 }
1111 }
1112
1113 return result;
1114 }
1115
Write(napi_env env,napi_callback_info info)1116 napi_value AudioRendererNapi::Write(napi_env env, napi_callback_info info)
1117 {
1118 napi_status status;
1119 const int32_t refCount = 1;
1120 napi_value result = nullptr;
1121
1122 GET_PARAMS(env, info, ARGS_TWO);
1123 NAPI_ASSERT(env, argc >= ARGS_ONE, "requires 1 parameters minimum");
1124
1125 unique_ptr<AudioRendererAsyncContext> asyncContext = make_unique<AudioRendererAsyncContext>();
1126
1127 status = napi_unwrap(env, thisVar, reinterpret_cast<void **>(&asyncContext->objectInfo));
1128 if (status == napi_ok && asyncContext->objectInfo != nullptr) {
1129 for (size_t i = PARAM0; i < argc; i++) {
1130 napi_valuetype valueType = napi_undefined;
1131 napi_typeof(env, argv[i], &valueType);
1132
1133 if ((i == PARAM0) && (valueType == napi_object)) {
1134 napi_get_arraybuffer_info(env, argv[i], &asyncContext->data, &asyncContext->bufferLen);
1135 } else if (i == PARAM1 && valueType == napi_function) {
1136 napi_create_reference(env, argv[i], refCount, &asyncContext->callbackRef);
1137 break;
1138 } else {
1139 NAPI_ASSERT(env, false, "type mismatch");
1140 }
1141 }
1142
1143 if (asyncContext->callbackRef == nullptr) {
1144 napi_create_promise(env, &asyncContext->deferred, &result);
1145 } else {
1146 napi_get_undefined(env, &result);
1147 }
1148
1149 napi_value resource = nullptr;
1150 napi_create_string_utf8(env, "Write", NAPI_AUTO_LENGTH, &resource);
1151
1152 status = napi_create_async_work(
1153 env, nullptr, resource,
1154 [](napi_env env, void *data) {
1155 auto context = static_cast<AudioRendererAsyncContext *>(data);
1156 context->status = ERROR;
1157 size_t bufferLen = context->bufferLen;
1158 auto buffer = std::make_unique<uint8_t[]>(bufferLen);
1159 if (buffer == nullptr) {
1160 HiLog::Error(LABEL, "Renderer write buffer allocation failed");
1161 return;
1162 }
1163
1164 if (memcpy_s(buffer.get(), bufferLen, context->data, bufferLen)) {
1165 HiLog::Info(LABEL, "Renderer mem copy failed");
1166 return;
1167 }
1168
1169 int32_t bytesWritten = 0;
1170 size_t totalBytesWritten = 0;
1171 size_t minBytes = 4;
1172 while ((totalBytesWritten < bufferLen) && ((bufferLen - totalBytesWritten) > minBytes)) {
1173 bytesWritten = context->objectInfo->audioRenderer_->Write(buffer.get() + totalBytesWritten,
1174 bufferLen - totalBytesWritten);
1175 if (bytesWritten < 0) {
1176 break;
1177 }
1178
1179 totalBytesWritten += bytesWritten;
1180 }
1181
1182 context->status = SUCCESS;
1183 context->totalBytesWritten = totalBytesWritten;
1184 },
1185 WriteAsyncCallbackComplete, static_cast<void *>(asyncContext.get()), &asyncContext->work);
1186 if (status != napi_ok) {
1187 result = nullptr;
1188 } else if (asyncContext->objectInfo->scheduleFromApiCall_) {
1189 status = napi_queue_async_work(env, asyncContext->work);
1190 if (status == napi_ok) {
1191 asyncContext->objectInfo->scheduleFromApiCall_ = false;
1192 asyncContext.release();
1193 } else {
1194 result = nullptr;
1195 }
1196 } else {
1197 asyncContext->objectInfo->writeRequestQ_.push(asyncContext->work);
1198 asyncContext.release();
1199 }
1200 }
1201
1202 return result;
1203 }
1204
GetAudioTime(napi_env env,napi_callback_info info)1205 napi_value AudioRendererNapi::GetAudioTime(napi_env env, napi_callback_info info)
1206 {
1207 napi_status status;
1208 const int32_t refCount = 1;
1209 napi_value result = nullptr;
1210
1211 GET_PARAMS(env, info, ARGS_ONE);
1212
1213 unique_ptr<AudioRendererAsyncContext> asyncContext = make_unique<AudioRendererAsyncContext>();
1214
1215 status = napi_unwrap(env, thisVar, reinterpret_cast<void **>(&asyncContext->objectInfo));
1216 if (status == napi_ok && asyncContext->objectInfo != nullptr) {
1217 for (size_t i = PARAM0; i < argc; i++) {
1218 napi_valuetype valueType = napi_undefined;
1219 napi_typeof(env, argv[i], &valueType);
1220
1221 if (i == PARAM0 && valueType == napi_function) {
1222 napi_create_reference(env, argv[i], refCount, &asyncContext->callbackRef);
1223 break;
1224 } else {
1225 NAPI_ASSERT(env, false, "type mismatch");
1226 }
1227 }
1228
1229 if (asyncContext->callbackRef == nullptr) {
1230 napi_create_promise(env, &asyncContext->deferred, &result);
1231 } else {
1232 napi_get_undefined(env, &result);
1233 }
1234
1235 napi_value resource = nullptr;
1236 napi_create_string_utf8(env, "GetAudioTime", NAPI_AUTO_LENGTH, &resource);
1237
1238 status = napi_create_async_work(
1239 env, nullptr, resource,
1240 [](napi_env env, void *data) {
1241 auto context = static_cast<AudioRendererAsyncContext *>(data);
1242 context->status = ERROR;
1243 Timestamp timestamp;
1244 if (context->objectInfo->audioRenderer_->GetAudioTime(timestamp, Timestamp::Timestampbase::MONOTONIC)) {
1245 const uint64_t secToNanosecond = 1000000000;
1246 context->time = timestamp.time.tv_nsec + timestamp.time.tv_sec * secToNanosecond;
1247 context->status = SUCCESS;
1248 }
1249 },
1250 GetInt64ValueAsyncCallbackComplete, static_cast<void*>(asyncContext.get()), &asyncContext->work);
1251 if (status != napi_ok) {
1252 result = nullptr;
1253 } else {
1254 status = napi_queue_async_work(env, asyncContext->work);
1255 if (status == napi_ok) {
1256 asyncContext.release();
1257 } else {
1258 result = nullptr;
1259 }
1260 }
1261 }
1262
1263 return result;
1264 }
1265
Drain(napi_env env,napi_callback_info info)1266 napi_value AudioRendererNapi::Drain(napi_env env, napi_callback_info info)
1267 {
1268 napi_status status;
1269 const int32_t refCount = 1;
1270 napi_value result = nullptr;
1271
1272 GET_PARAMS(env, info, ARGS_ONE);
1273
1274 unique_ptr<AudioRendererAsyncContext> asyncContext = make_unique<AudioRendererAsyncContext>();
1275
1276 status = napi_unwrap(env, thisVar, reinterpret_cast<void**>(&asyncContext->objectInfo));
1277 if (status == napi_ok && asyncContext->objectInfo != nullptr) {
1278 for (size_t i = PARAM0; i < argc; i++) {
1279 napi_valuetype valueType = napi_undefined;
1280 napi_typeof(env, argv[i], &valueType);
1281
1282 if (i == PARAM0 && valueType == napi_function) {
1283 napi_create_reference(env, argv[i], refCount, &asyncContext->callbackRef);
1284 break;
1285 } else {
1286 NAPI_ASSERT(env, false, "type mismatch");
1287 }
1288 }
1289
1290 if (asyncContext->callbackRef == nullptr) {
1291 napi_create_promise(env, &asyncContext->deferred, &result);
1292 } else {
1293 napi_get_undefined(env, &result);
1294 }
1295
1296 napi_value resource = nullptr;
1297 napi_create_string_utf8(env, "Drain", NAPI_AUTO_LENGTH, &resource);
1298
1299 status = napi_create_async_work(
1300 env, nullptr, resource,
1301 [](napi_env env, void *data) {
1302 auto context = static_cast<AudioRendererAsyncContext *>(data);
1303 context->isTrue = context->objectInfo->audioRenderer_->Drain();
1304 context->status = SUCCESS;
1305 },
1306 VoidAsyncCallbackComplete, static_cast<void*>(asyncContext.get()), &asyncContext->work);
1307 if (status != napi_ok) {
1308 result = nullptr;
1309 } else {
1310 if (!asyncContext->objectInfo->doNotScheduleWrite_) {
1311 asyncContext->objectInfo->isDrainWriteQInProgress_ = true;
1312 while (!asyncContext->objectInfo->writeRequestQ_.empty()) {
1313 napi_queue_async_work(env, asyncContext->objectInfo->writeRequestQ_.front());
1314 asyncContext->objectInfo->writeRequestQ_.pop();
1315 }
1316 asyncContext->objectInfo->isDrainWriteQInProgress_ = false;
1317 }
1318 status = napi_queue_async_work(env, asyncContext->work);
1319 if (status == napi_ok) {
1320 asyncContext.release();
1321 } else {
1322 result = nullptr;
1323 }
1324 }
1325 }
1326
1327 return result;
1328 }
1329
Pause(napi_env env,napi_callback_info info)1330 napi_value AudioRendererNapi::Pause(napi_env env, napi_callback_info info)
1331 {
1332 napi_status status;
1333 const int32_t refCount = 1;
1334 napi_value result = nullptr;
1335
1336 GET_PARAMS(env, info, ARGS_ONE);
1337
1338 unique_ptr<AudioRendererAsyncContext> asyncContext = make_unique<AudioRendererAsyncContext>();
1339
1340 status = napi_unwrap(env, thisVar, reinterpret_cast<void**>(&asyncContext->objectInfo));
1341 if (status == napi_ok && asyncContext->objectInfo != nullptr) {
1342 for (size_t i = PARAM0; i < argc; i++) {
1343 napi_valuetype valueType = napi_undefined;
1344 napi_typeof(env, argv[i], &valueType);
1345
1346 if (i == PARAM0 && valueType == napi_function) {
1347 napi_create_reference(env, argv[i], refCount, &asyncContext->callbackRef);
1348 break;
1349 } else {
1350 NAPI_ASSERT(env, false, "type mismatch");
1351 }
1352 }
1353
1354 if (asyncContext->callbackRef == nullptr) {
1355 napi_create_promise(env, &asyncContext->deferred, &result);
1356 } else {
1357 napi_get_undefined(env, &result);
1358 }
1359
1360 napi_value resource = nullptr;
1361 napi_create_string_utf8(env, "Pause", NAPI_AUTO_LENGTH, &resource);
1362
1363 status = napi_create_async_work(
1364 env, nullptr, resource,
1365 [](napi_env env, void *data) {
1366 auto context = static_cast<AudioRendererAsyncContext *>(data);
1367 context->isTrue = context->objectInfo->audioRenderer_->Pause();
1368 context->status = SUCCESS;
1369 },
1370 PauseAsyncCallbackComplete, static_cast<void*>(asyncContext.get()), &asyncContext->work);
1371 if (status != napi_ok) {
1372 result = nullptr;
1373 } else {
1374 status = napi_queue_async_work(env, asyncContext->work);
1375 if (status == napi_ok) {
1376 asyncContext->objectInfo->doNotScheduleWrite_ = true;
1377 asyncContext.release();
1378 } else {
1379 result = nullptr;
1380 }
1381 }
1382 }
1383
1384 return result;
1385 }
1386
Stop(napi_env env,napi_callback_info info)1387 napi_value AudioRendererNapi::Stop(napi_env env, napi_callback_info info)
1388 {
1389 napi_status status;
1390 const int32_t refCount = 1;
1391 napi_value result = nullptr;
1392
1393 GET_PARAMS(env, info, ARGS_ONE);
1394
1395 unique_ptr<AudioRendererAsyncContext> asyncContext = make_unique<AudioRendererAsyncContext>();
1396
1397 status = napi_unwrap(env, thisVar, reinterpret_cast<void**>(&asyncContext->objectInfo));
1398 if (status == napi_ok && asyncContext->objectInfo != nullptr) {
1399 for (size_t i = PARAM0; i < argc; i++) {
1400 napi_valuetype valueType = napi_undefined;
1401 napi_typeof(env, argv[i], &valueType);
1402
1403 if (i == PARAM0 && valueType == napi_function) {
1404 napi_create_reference(env, argv[i], refCount, &asyncContext->callbackRef);
1405 break;
1406 } else {
1407 NAPI_ASSERT(env, false, "type mismatch");
1408 }
1409 }
1410
1411 if (asyncContext->callbackRef == nullptr) {
1412 napi_create_promise(env, &asyncContext->deferred, &result);
1413 } else {
1414 napi_get_undefined(env, &result);
1415 }
1416
1417 napi_value resource = nullptr;
1418 napi_create_string_utf8(env, "Stop", NAPI_AUTO_LENGTH, &resource);
1419
1420 status = napi_create_async_work(
1421 env, nullptr, resource,
1422 [](napi_env env, void *data) {
1423 auto context = static_cast<AudioRendererAsyncContext *>(data);
1424 context->isTrue = context->objectInfo->audioRenderer_->Stop();
1425 context->status = SUCCESS;
1426 },
1427 StopAsyncCallbackComplete, static_cast<void*>(asyncContext.get()), &asyncContext->work);
1428 if (status != napi_ok) {
1429 result = nullptr;
1430 } else {
1431 status = napi_queue_async_work(env, asyncContext->work);
1432 if (status == napi_ok) {
1433 asyncContext.release();
1434 } else {
1435 result = nullptr;
1436 }
1437 }
1438 }
1439
1440 return result;
1441 }
1442
Release(napi_env env,napi_callback_info info)1443 napi_value AudioRendererNapi::Release(napi_env env, napi_callback_info info)
1444 {
1445 napi_status status;
1446 const int32_t refCount = 1;
1447 napi_value result = nullptr;
1448
1449 GET_PARAMS(env, info, ARGS_ONE);
1450
1451 unique_ptr<AudioRendererAsyncContext> asyncContext = make_unique<AudioRendererAsyncContext>();
1452
1453 status = napi_unwrap(env, thisVar, reinterpret_cast<void**>(&asyncContext->objectInfo));
1454 if (status == napi_ok && asyncContext->objectInfo != nullptr) {
1455 for (size_t i = PARAM0; i < argc; i++) {
1456 napi_valuetype valueType = napi_undefined;
1457 napi_typeof(env, argv[i], &valueType);
1458
1459 if (i == PARAM0 && valueType == napi_function) {
1460 napi_create_reference(env, argv[i], refCount, &asyncContext->callbackRef);
1461 break;
1462 } else {
1463 NAPI_ASSERT(env, false, "type mismatch");
1464 }
1465 }
1466
1467 if (asyncContext->callbackRef == nullptr) {
1468 napi_create_promise(env, &asyncContext->deferred, &result);
1469 } else {
1470 napi_get_undefined(env, &result);
1471 }
1472
1473 napi_value resource = nullptr;
1474 napi_create_string_utf8(env, "Release", NAPI_AUTO_LENGTH, &resource);
1475
1476 status = napi_create_async_work(
1477 env, nullptr, resource,
1478 [](napi_env env, void *data) {
1479 auto context = static_cast<AudioRendererAsyncContext *>(data);
1480 context->isTrue = context->objectInfo->audioRenderer_->Release();
1481 if (context->isTrue) {
1482 context->status = SUCCESS;
1483 } else {
1484 context->status = ERROR;
1485 }
1486 },
1487 VoidAsyncCallbackComplete, static_cast<void*>(asyncContext.get()), &asyncContext->work);
1488 if (status != napi_ok) {
1489 result = nullptr;
1490 } else {
1491 status = napi_queue_async_work(env, asyncContext->work);
1492 if (status == napi_ok) {
1493 asyncContext.release();
1494 } else {
1495 result = nullptr;
1496 }
1497 }
1498 }
1499
1500 return result;
1501 }
1502
GetBufferSize(napi_env env,napi_callback_info info)1503 napi_value AudioRendererNapi::GetBufferSize(napi_env env, napi_callback_info info)
1504 {
1505 napi_status status;
1506 const int32_t refCount = 1;
1507 napi_value result = nullptr;
1508
1509 GET_PARAMS(env, info, ARGS_ONE);
1510
1511 unique_ptr<AudioRendererAsyncContext> asyncContext = make_unique<AudioRendererAsyncContext>();
1512
1513 status = napi_unwrap(env, thisVar, reinterpret_cast<void**>(&asyncContext->objectInfo));
1514 if (status == napi_ok && asyncContext->objectInfo != nullptr) {
1515 for (size_t i = PARAM0; i < argc; i++) {
1516 napi_valuetype valueType = napi_undefined;
1517 napi_typeof(env, argv[i], &valueType);
1518
1519 if (i == PARAM0 && valueType == napi_function) {
1520 napi_create_reference(env, argv[i], refCount, &asyncContext->callbackRef);
1521 break;
1522 } else {
1523 NAPI_ASSERT(env, false, "type mismatch");
1524 }
1525 }
1526
1527 if (asyncContext->callbackRef == nullptr) {
1528 napi_create_promise(env, &asyncContext->deferred, &result);
1529 } else {
1530 napi_get_undefined(env, &result);
1531 }
1532
1533 napi_value resource = nullptr;
1534 napi_create_string_utf8(env, "GetBufferSize", NAPI_AUTO_LENGTH, &resource);
1535
1536 status = napi_create_async_work(
1537 env, nullptr, resource,
1538 [](napi_env env, void *data) {
1539 auto context = static_cast<AudioRendererAsyncContext *>(data);
1540 size_t bufferSize;
1541 context->status = context->objectInfo->audioRenderer_->GetBufferSize(bufferSize);
1542 if (context->status == SUCCESS) {
1543 context->bufferSize = bufferSize;
1544 }
1545 },
1546 GetBufferSizeAsyncCallbackComplete, static_cast<void *>(asyncContext.get()), &asyncContext->work);
1547 if (status != napi_ok) {
1548 result = nullptr;
1549 } else {
1550 status = napi_queue_async_work(env, asyncContext->work);
1551 if (status == napi_ok) {
1552 asyncContext.release();
1553 } else {
1554 result = nullptr;
1555 }
1556 }
1557 }
1558
1559 return result;
1560 }
1561
GetRendererInfo(napi_env env,napi_callback_info info)1562 napi_value AudioRendererNapi::GetRendererInfo(napi_env env, napi_callback_info info)
1563 {
1564 HiLog::Info(LABEL, "Entered GetRendererInfo");
1565 napi_status status;
1566 const int32_t refCount = 1;
1567 napi_value result = nullptr;
1568
1569 GET_PARAMS(env, info, ARGS_ONE);
1570
1571 unique_ptr<AudioRendererAsyncContext> asyncContext = make_unique<AudioRendererAsyncContext>();
1572
1573 status = napi_unwrap(env, thisVar, reinterpret_cast<void**>(&asyncContext->objectInfo));
1574 if (status == napi_ok && asyncContext->objectInfo != nullptr) {
1575 for (size_t i = PARAM0; i < argc; i++) {
1576 napi_valuetype valueType = napi_undefined;
1577 napi_typeof(env, argv[i], &valueType);
1578
1579 if (i == PARAM0 && valueType == napi_function) {
1580 napi_create_reference(env, argv[i], refCount, &asyncContext->callbackRef);
1581 break;
1582 } else {
1583 NAPI_ASSERT(env, false, "type mismatch");
1584 }
1585 }
1586
1587 if (asyncContext->callbackRef == nullptr) {
1588 napi_create_promise(env, &asyncContext->deferred, &result);
1589 } else {
1590 napi_get_undefined(env, &result);
1591 }
1592
1593 napi_value resource = nullptr;
1594 napi_create_string_utf8(env, "GetRendererInfo", NAPI_AUTO_LENGTH, &resource);
1595
1596 status = napi_create_async_work(
1597 env, nullptr, resource,
1598 [](napi_env env, void *data) {
1599 auto context = static_cast<AudioRendererAsyncContext *>(data);
1600 AudioRendererInfo rendererInfo = {};
1601 context->status = context->objectInfo->audioRenderer_->GetRendererInfo(rendererInfo);
1602 if (context->status == SUCCESS) {
1603 context->contentType = rendererInfo.contentType;
1604 context->usage = rendererInfo.streamUsage;
1605 context->rendererFlags = rendererInfo.rendererFlags;
1606 }
1607 },
1608 AudioRendererInfoAsyncCallbackComplete, static_cast<void *>(asyncContext.get()), &asyncContext->work);
1609 if (status != napi_ok) {
1610 result = nullptr;
1611 } else {
1612 status = napi_queue_async_work(env, asyncContext->work);
1613 if (status == napi_ok) {
1614 asyncContext.release();
1615 } else {
1616 result = nullptr;
1617 }
1618 }
1619 }
1620
1621 return result;
1622 }
1623
GetStreamInfo(napi_env env,napi_callback_info info)1624 napi_value AudioRendererNapi::GetStreamInfo(napi_env env, napi_callback_info info)
1625 {
1626 napi_status status;
1627 const int32_t refCount = 1;
1628 napi_value result = nullptr;
1629
1630 GET_PARAMS(env, info, ARGS_ONE);
1631
1632 unique_ptr<AudioRendererAsyncContext> asyncContext = make_unique<AudioRendererAsyncContext>();
1633
1634 status = napi_unwrap(env, thisVar, reinterpret_cast<void**>(&asyncContext->objectInfo));
1635 if (status == napi_ok && asyncContext->objectInfo != nullptr) {
1636 for (size_t i = PARAM0; i < argc; i++) {
1637 napi_valuetype valueType = napi_undefined;
1638 napi_typeof(env, argv[i], &valueType);
1639
1640 if (i == PARAM0 && valueType == napi_function) {
1641 napi_create_reference(env, argv[i], refCount, &asyncContext->callbackRef);
1642 break;
1643 } else {
1644 NAPI_ASSERT(env, false, "type mismatch");
1645 }
1646 }
1647
1648 if (asyncContext->callbackRef == nullptr) {
1649 napi_create_promise(env, &asyncContext->deferred, &result);
1650 } else {
1651 napi_get_undefined(env, &result);
1652 }
1653
1654 napi_value resource = nullptr;
1655 napi_create_string_utf8(env, "GetStreamInfo", NAPI_AUTO_LENGTH, &resource);
1656
1657 status = napi_create_async_work(
1658 env, nullptr, resource,
1659 [](napi_env env, void *data) {
1660 auto context = static_cast<AudioRendererAsyncContext *>(data);
1661 AudioStreamInfo streamInfo;
1662 context->status = context->objectInfo->audioRenderer_->GetStreamInfo(streamInfo);
1663 if (context->status == SUCCESS) {
1664 context->sampleFormat = GetJsAudioSampleFormat(streamInfo.format);
1665 context->samplingRate = streamInfo.samplingRate;
1666 context->channelCount = streamInfo.channels;
1667 context->encodingType = streamInfo.encoding;
1668 }
1669 },
1670 AudioStreamInfoAsyncCallbackComplete, static_cast<void *>(asyncContext.get()), &asyncContext->work);
1671 if (status != napi_ok) {
1672 result = nullptr;
1673 } else {
1674 status = napi_queue_async_work(env, asyncContext->work);
1675 if (status == napi_ok) {
1676 asyncContext.release();
1677 } else {
1678 result = nullptr;
1679 }
1680 }
1681 }
1682
1683 return result;
1684 }
1685
GetState(napi_env env,napi_callback_info info)1686 napi_value AudioRendererNapi::GetState(napi_env env, napi_callback_info info)
1687 {
1688 napi_value jsThis = nullptr;
1689 napi_value undefinedResult = nullptr;
1690 napi_get_undefined(env, &undefinedResult);
1691
1692 size_t argCount = 0;
1693 napi_status status = napi_get_cb_info(env, info, &argCount, nullptr, &jsThis, nullptr);
1694 if (status != napi_ok || jsThis == nullptr) {
1695 MEDIA_INFO_LOG("Failed to retrieve details about the callback");
1696 return undefinedResult;
1697 }
1698
1699 AudioRendererNapi *rendererNapi = nullptr;
1700 status = napi_unwrap(env, jsThis, reinterpret_cast<void **>(&rendererNapi));
1701 CHECK_AND_RETURN_RET_LOG(status == napi_ok && rendererNapi != nullptr, undefinedResult, "Failed to get instance");
1702
1703 CHECK_AND_RETURN_RET_LOG(rendererNapi->audioRenderer_ != nullptr, undefinedResult, "No memory");
1704 int32_t rendererState = rendererNapi->audioRenderer_->GetStatus();
1705
1706 napi_value jsResult = nullptr;
1707 status = napi_create_int32(env, rendererState, &jsResult);
1708 CHECK_AND_RETURN_RET_LOG(status == napi_ok, undefinedResult, "napi_create_int32 error");
1709
1710 MEDIA_INFO_LOG("AudioRendererNapi: GetState Complete, Current state: %{public}d", rendererState);
1711 return jsResult;
1712 }
1713
RegisterPeriodPositionCallback(napi_env env,napi_value * argv,const std::string & cbName,AudioRendererNapi * rendererNapi)1714 napi_value AudioRendererNapi::RegisterPeriodPositionCallback(napi_env env, napi_value* argv, const std::string& cbName,
1715 AudioRendererNapi *rendererNapi)
1716 {
1717 int64_t frameCount = 0;
1718 napi_get_value_int64(env, argv[PARAM1], &frameCount);
1719
1720 if (frameCount > 0) {
1721 if (rendererNapi->periodPositionCBNapi_ == nullptr) {
1722 rendererNapi->periodPositionCBNapi_ = std::make_shared<RendererPeriodPositionCallbackNapi>(env);
1723 NAPI_ASSERT(env, rendererNapi->periodPositionCBNapi_ != nullptr, "AudioRendererNapi: No memory.");
1724
1725 int32_t ret = rendererNapi->audioRenderer_->SetRendererPeriodPositionCallback(frameCount,
1726 rendererNapi->periodPositionCBNapi_);
1727 NAPI_ASSERT(env, ret == SUCCESS, "AudioRendererNapi: SetRendererPositionCallback failed.");
1728
1729 std::shared_ptr<RendererPeriodPositionCallbackNapi> cb =
1730 std::static_pointer_cast<RendererPeriodPositionCallbackNapi>(rendererNapi->periodPositionCBNapi_);
1731 cb->SaveCallbackReference(cbName, argv[PARAM2]);
1732 } else {
1733 MEDIA_DEBUG_LOG("AudioRendererNapi: periodReach already subscribed.");
1734 }
1735 } else {
1736 MEDIA_ERR_LOG("AudioRendererNapi: frameCount value not supported!!");
1737 }
1738
1739 napi_value result = nullptr;
1740 napi_get_undefined(env, &result);
1741 return result;
1742 }
1743
RegisterPositionCallback(napi_env env,napi_value * argv,const std::string & cbName,AudioRendererNapi * rendererNapi)1744 napi_value AudioRendererNapi::RegisterPositionCallback(napi_env env, napi_value* argv,
1745 const std::string& cbName, AudioRendererNapi *rendererNapi)
1746 {
1747 int64_t markPosition = 0;
1748 napi_get_value_int64(env, argv[PARAM1], &markPosition);
1749
1750 if (markPosition > 0) {
1751 rendererNapi->positionCBNapi_ = std::make_shared<RendererPositionCallbackNapi>(env);
1752 NAPI_ASSERT(env, rendererNapi->positionCBNapi_ != nullptr, "AudioRendererNapi: No memory.");
1753 int32_t ret = rendererNapi->audioRenderer_->SetRendererPositionCallback(markPosition,
1754 rendererNapi->positionCBNapi_);
1755 NAPI_ASSERT(env, ret == SUCCESS, "AudioRendererNapi: SetRendererPositionCallback failed.");
1756
1757 std::shared_ptr<RendererPositionCallbackNapi> cb =
1758 std::static_pointer_cast<RendererPositionCallbackNapi>(rendererNapi->positionCBNapi_);
1759 cb->SaveCallbackReference(cbName, argv[PARAM2]);
1760 } else {
1761 MEDIA_ERR_LOG("AudioRendererNapi: Mark Position value not supported!!");
1762 }
1763
1764 napi_value result = nullptr;
1765 napi_get_undefined(env, &result);
1766 return result;
1767 }
1768
RegisterRendererCallback(napi_env env,napi_value * argv,const std::string & cbName,AudioRendererNapi * rendererNapi)1769 napi_value AudioRendererNapi::RegisterRendererCallback(napi_env env, napi_value* argv,
1770 const std::string& cbName, AudioRendererNapi *rendererNapi)
1771 {
1772 NAPI_ASSERT(env, rendererNapi->callbackNapi_ != nullptr, "AudioRendererNapi: callbackNapi_ is nullptr");
1773
1774 std::shared_ptr<AudioRendererCallbackNapi> cb =
1775 std::static_pointer_cast<AudioRendererCallbackNapi>(rendererNapi->callbackNapi_);
1776 cb->SaveCallbackReference(cbName, argv[PARAM1]);
1777
1778 napi_value result = nullptr;
1779 napi_get_undefined(env, &result);
1780 return result;
1781 }
1782
RegisterCallback(napi_env env,napi_value jsThis,napi_value * argv,const std::string & cbName)1783 napi_value AudioRendererNapi::RegisterCallback(napi_env env, napi_value jsThis,
1784 napi_value* argv, const std::string& cbName)
1785 {
1786 AudioRendererNapi *rendererNapi = nullptr;
1787 napi_status status = napi_unwrap(env, jsThis, reinterpret_cast<void **>(&rendererNapi));
1788 NAPI_ASSERT(env, status == napi_ok && rendererNapi != nullptr, "Failed to retrieve audio renderer napi instance.");
1789 NAPI_ASSERT(env, rendererNapi->audioRenderer_ != nullptr, "Audio renderer instance is null.");
1790
1791 napi_value result = nullptr;
1792 napi_get_undefined(env, &result);
1793
1794 if (!cbName.compare(INTERRUPT_CALLBACK_NAME) || !cbName.compare(STATE_CHANGE_CALLBACK_NAME)) {
1795 result = RegisterRendererCallback(env, argv, cbName, rendererNapi);
1796 } else if (!cbName.compare(MARK_REACH_CALLBACK_NAME)) {
1797 result = RegisterPositionCallback(env, argv, cbName, rendererNapi);
1798 } else if (!cbName.compare(PERIOD_REACH_CALLBACK_NAME)) {
1799 result = RegisterPeriodPositionCallback(env, argv, cbName, rendererNapi);
1800 } else {
1801 bool unknownCallback = true;
1802 NAPI_ASSERT(env, !unknownCallback, "No such on callback supported");
1803 }
1804
1805 return result;
1806 }
1807
On(napi_env env,napi_callback_info info)1808 napi_value AudioRendererNapi::On(napi_env env, napi_callback_info info)
1809 {
1810 const size_t requireArgc = 2;
1811 size_t argc = 3;
1812
1813 napi_value argv[requireArgc + 1] = {nullptr, nullptr, nullptr};
1814 napi_value jsThis = nullptr;
1815 napi_status status = napi_get_cb_info(env, info, &argc, argv, &jsThis, nullptr);
1816 NAPI_ASSERT(env, status == napi_ok && argc >= requireArgc, "AudioRendererNapi: On: requires min 2 parameters");
1817
1818 napi_valuetype eventType = napi_undefined;
1819 napi_typeof(env, argv[0], &eventType);
1820 NAPI_ASSERT(env, eventType == napi_string, "AudioRendererNapi:On: type mismatch for event name, parameter 1");
1821
1822 std::string callbackName = AudioCommonNapi::GetStringArgument(env, argv[0]);
1823 MEDIA_DEBUG_LOG("AudioRendererNapi: On callbackName: %{public}s", callbackName.c_str());
1824
1825 napi_valuetype handler = napi_undefined;
1826 if (argc == requireArgc) {
1827 napi_typeof(env, argv[1], &handler);
1828 NAPI_ASSERT(env, handler == napi_function, "type mismatch for parameter 2");
1829 } else {
1830 napi_valuetype paramArg1 = napi_undefined;
1831 napi_typeof(env, argv[1], ¶mArg1);
1832 napi_valuetype expectedValType = napi_number; // Default. Reset it with 'callbackName' if check, if required.
1833 if (paramArg1 != expectedValType) {
1834 MEDIA_ERR_LOG("Type mismatch for param 2!!");
1835 napi_value result = nullptr;
1836 napi_get_undefined(env, &result);
1837 return result;
1838 }
1839
1840 const int32_t arg2 = 2;
1841 napi_typeof(env, argv[arg2], &handler);
1842 NAPI_ASSERT(env, handler == napi_function, "type mismatch for parameter 3");
1843 }
1844
1845 return RegisterCallback(env, jsThis, argv, callbackName);
1846 }
1847
UnregisterCallback(napi_env env,napi_value jsThis,const std::string & cbName)1848 napi_value AudioRendererNapi::UnregisterCallback(napi_env env, napi_value jsThis, const std::string& cbName)
1849 {
1850 AudioRendererNapi *rendererNapi = nullptr;
1851 napi_status status = napi_unwrap(env, jsThis, reinterpret_cast<void **>(&rendererNapi));
1852 NAPI_ASSERT(env, status == napi_ok && rendererNapi != nullptr, "Failed to retrieve audio renderer napi instance.");
1853 NAPI_ASSERT(env, rendererNapi->audioRenderer_ != nullptr, "Audio renderer instance is null.");
1854
1855 if (!cbName.compare(MARK_REACH_CALLBACK_NAME)) {
1856 rendererNapi->audioRenderer_->UnsetRendererPositionCallback();
1857 rendererNapi->positionCBNapi_ = nullptr;
1858 } else if (!cbName.compare(PERIOD_REACH_CALLBACK_NAME)) {
1859 rendererNapi->audioRenderer_->UnsetRendererPeriodPositionCallback();
1860 rendererNapi->periodPositionCBNapi_ = nullptr;
1861 } else {
1862 bool unknownCallback = true;
1863 NAPI_ASSERT(env, !unknownCallback, "No such off callback supported");
1864 }
1865
1866 napi_value result = nullptr;
1867 napi_get_undefined(env, &result);
1868 return result;
1869 }
1870
Off(napi_env env,napi_callback_info info)1871 napi_value AudioRendererNapi::Off(napi_env env, napi_callback_info info)
1872 {
1873 const size_t requireArgc = 1;
1874 size_t argc = 1;
1875
1876 napi_value argv[requireArgc] = {nullptr};
1877 napi_value jsThis = nullptr;
1878 napi_status status = napi_get_cb_info(env, info, &argc, argv, &jsThis, nullptr);
1879 NAPI_ASSERT(env, status == napi_ok && argc >= requireArgc, "AudioRendererNapi: Off: requires min 1 parameters");
1880
1881 napi_valuetype eventType = napi_undefined;
1882 napi_typeof(env, argv[0], &eventType);
1883 NAPI_ASSERT(env, eventType == napi_string, "AudioRendererNapi:Off: type mismatch for event name, parameter 1");
1884
1885 std::string callbackName = AudioCommonNapi::GetStringArgument(env, argv[0]);
1886 MEDIA_DEBUG_LOG("AudioRendererNapi: Off callbackName: %{public}s", callbackName.c_str());
1887
1888 return UnregisterCallback(env, jsThis, callbackName);
1889 }
1890
ParseRendererOptions(napi_env env,napi_value root,AudioRendererOptions * opts)1891 bool AudioRendererNapi::ParseRendererOptions(napi_env env, napi_value root, AudioRendererOptions *opts)
1892 {
1893 napi_value res = nullptr;
1894
1895 if (napi_get_named_property(env, root, "rendererInfo", &res) == napi_ok) {
1896 ParseRendererInfo(env, res, &(opts->rendererInfo));
1897 }
1898
1899 if (napi_get_named_property(env, root, "streamInfo", &res) == napi_ok) {
1900 ParseStreamInfo(env, res, &(opts->streamInfo));
1901 }
1902
1903 return true;
1904 }
1905
ParseRendererInfo(napi_env env,napi_value root,AudioRendererInfo * rendererInfo)1906 bool AudioRendererNapi::ParseRendererInfo(napi_env env, napi_value root, AudioRendererInfo *rendererInfo)
1907 {
1908 napi_value tempValue = nullptr;
1909 int32_t intValue = {0};
1910
1911 if (napi_get_named_property(env, root, "content", &tempValue) == napi_ok) {
1912 napi_get_value_int32(env, tempValue, &intValue);
1913 rendererInfo->contentType = static_cast<ContentType>(intValue);
1914 }
1915
1916 if (napi_get_named_property(env, root, "usage", &tempValue) == napi_ok) {
1917 napi_get_value_int32(env, tempValue, &intValue);
1918 rendererInfo->streamUsage = static_cast<StreamUsage>(intValue);
1919 }
1920
1921 if (napi_get_named_property(env, root, "rendererFlags", &tempValue) == napi_ok) {
1922 napi_get_value_int32(env, tempValue, &(rendererInfo->rendererFlags));
1923 }
1924
1925 return true;
1926 }
1927
ParseStreamInfo(napi_env env,napi_value root,AudioStreamInfo * streamInfo)1928 bool AudioRendererNapi::ParseStreamInfo(napi_env env, napi_value root, AudioStreamInfo* streamInfo)
1929 {
1930 napi_value tempValue = nullptr;
1931 int32_t intValue = {0};
1932
1933 if (napi_get_named_property(env, root, "samplingRate", &tempValue) == napi_ok) {
1934 napi_get_value_int32(env, tempValue, &intValue);
1935 streamInfo->samplingRate = static_cast<AudioSamplingRate>(intValue);
1936 }
1937
1938 if (napi_get_named_property(env, root, "channels", &tempValue) == napi_ok) {
1939 napi_get_value_int32(env, tempValue, &intValue);
1940 streamInfo->channels = static_cast<AudioChannel>(intValue);
1941 }
1942
1943 if (napi_get_named_property(env, root, "sampleFormat", &tempValue) == napi_ok) {
1944 napi_get_value_int32(env, tempValue, &intValue);
1945 streamInfo->format = GetNativeAudioSampleFormat(intValue);
1946 }
1947
1948 if (napi_get_named_property(env, root, "encodingType", &tempValue) == napi_ok) {
1949 napi_get_value_int32(env, tempValue, &intValue);
1950 streamInfo->encoding = static_cast<AudioEncodingType>(intValue);
1951 }
1952
1953 return true;
1954 }
1955
CreateAudioRendererWrapper(napi_env env,unique_ptr<AudioRendererOptions> & renderOptions)1956 napi_value AudioRendererNapi::CreateAudioRendererWrapper(napi_env env, unique_ptr<AudioRendererOptions> &renderOptions)
1957 {
1958 napi_status status;
1959 napi_value result = nullptr;
1960 napi_value constructor;
1961
1962 if (renderOptions != nullptr) {
1963 status = napi_get_reference_value(env, g_rendererConstructor, &constructor);
1964 if (status == napi_ok) {
1965 sRendererOptions_ = move(renderOptions);
1966 status = napi_new_instance(env, constructor, 0, nullptr, &result);
1967 sRendererOptions_.release();
1968 if (status == napi_ok) {
1969 return result;
1970 }
1971 }
1972 HiLog::Error(LABEL, "Failed in CreateAudioRendererWrapper, %{public}d", status);
1973 }
1974
1975 napi_get_undefined(env, &result);
1976
1977 return result;
1978 }
1979 } // namespace AudioStandard
1980 } // namespace OHOS
1981