1 #include "env.h"
2
3 #include "async_wrap.h"
4 #include "debug_utils-inl.h"
5 #include "memory_tracker-inl.h"
6 #include "node_buffer.h"
7 #include "node_context_data.h"
8 #include "node_errors.h"
9 #include "node_file.h"
10 #include "node_internals.h"
11 #include "node_options-inl.h"
12 #include "node_process.h"
13 #include "node_v8_platform-inl.h"
14 #include "node_worker.h"
15 #include "req_wrap-inl.h"
16 #include "tracing/agent.h"
17 #include "tracing/traced_value.h"
18 #include "util-inl.h"
19 #include "v8-profiler.h"
20
21 #include <algorithm>
22 #include <atomic>
23 #include <cstdio>
24 #include <memory>
25
26 namespace node {
27
28 using errors::TryCatchScope;
29 using v8::Boolean;
30 using v8::Context;
31 using v8::EmbedderGraph;
32 using v8::FinalizationGroup;
33 using v8::Function;
34 using v8::FunctionTemplate;
35 using v8::HandleScope;
36 using v8::Integer;
37 using v8::Isolate;
38 using v8::Local;
39 using v8::MaybeLocal;
40 using v8::NewStringType;
41 using v8::Number;
42 using v8::Object;
43 using v8::Private;
44 using v8::Script;
45 using v8::SnapshotCreator;
46 using v8::StackTrace;
47 using v8::String;
48 using v8::Symbol;
49 using v8::TracingController;
50 using v8::TryCatch;
51 using v8::Undefined;
52 using v8::Value;
53 using worker::Worker;
54
55 int const Environment::kNodeContextTag = 0x6e6f64;
56 void* const Environment::kNodeContextTagPtr = const_cast<void*>(
57 static_cast<const void*>(&Environment::kNodeContextTag));
58
Serialize(SnapshotCreator * creator)59 std::vector<size_t> IsolateData::Serialize(SnapshotCreator* creator) {
60 Isolate* isolate = creator->GetIsolate();
61 std::vector<size_t> indexes;
62 HandleScope handle_scope(isolate);
63 // XXX(joyeecheung): technically speaking, the indexes here should be
64 // consecutive and we could just return a range instead of an array,
65 // but that's not part of the V8 API contract so we use an array
66 // just to be safe.
67
68 #define VP(PropertyName, StringValue) V(Private, PropertyName)
69 #define VY(PropertyName, StringValue) V(Symbol, PropertyName)
70 #define VS(PropertyName, StringValue) V(String, PropertyName)
71 #define V(TypeName, PropertyName) \
72 indexes.push_back(creator->AddData(PropertyName##_.Get(isolate)));
73 PER_ISOLATE_PRIVATE_SYMBOL_PROPERTIES(VP)
74 PER_ISOLATE_SYMBOL_PROPERTIES(VY)
75 PER_ISOLATE_STRING_PROPERTIES(VS)
76 #undef V
77 #undef VY
78 #undef VS
79 #undef VP
80 for (size_t i = 0; i < AsyncWrap::PROVIDERS_LENGTH; i++)
81 indexes.push_back(creator->AddData(async_wrap_provider(i)));
82
83 return indexes;
84 }
85
DeserializeProperties(const std::vector<size_t> * indexes)86 void IsolateData::DeserializeProperties(const std::vector<size_t>* indexes) {
87 size_t i = 0;
88 HandleScope handle_scope(isolate_);
89
90 #define VP(PropertyName, StringValue) V(Private, PropertyName)
91 #define VY(PropertyName, StringValue) V(Symbol, PropertyName)
92 #define VS(PropertyName, StringValue) V(String, PropertyName)
93 #define V(TypeName, PropertyName) \
94 do { \
95 MaybeLocal<TypeName> field = \
96 isolate_->GetDataFromSnapshotOnce<TypeName>((*indexes)[i++]); \
97 if (field.IsEmpty()) { \
98 fprintf(stderr, "Failed to deserialize " #PropertyName "\n"); \
99 } \
100 PropertyName##_.Set(isolate_, field.ToLocalChecked()); \
101 } while (0);
102 PER_ISOLATE_PRIVATE_SYMBOL_PROPERTIES(VP)
103 PER_ISOLATE_SYMBOL_PROPERTIES(VY)
104 PER_ISOLATE_STRING_PROPERTIES(VS)
105 #undef V
106 #undef VY
107 #undef VS
108 #undef VP
109
110 for (size_t j = 0; j < AsyncWrap::PROVIDERS_LENGTH; j++) {
111 MaybeLocal<String> field =
112 isolate_->GetDataFromSnapshotOnce<String>((*indexes)[i++]);
113 if (field.IsEmpty()) {
114 fprintf(stderr, "Failed to deserialize AsyncWrap provider %zu\n", j);
115 }
116 async_wrap_providers_[j].Set(isolate_, field.ToLocalChecked());
117 }
118 }
119
CreateProperties()120 void IsolateData::CreateProperties() {
121 // Create string and private symbol properties as internalized one byte
122 // strings after the platform is properly initialized.
123 //
124 // Internalized because it makes property lookups a little faster and
125 // because the string is created in the old space straight away. It's going
126 // to end up in the old space sooner or later anyway but now it doesn't go
127 // through v8::Eternal's new space handling first.
128 //
129 // One byte because our strings are ASCII and we can safely skip V8's UTF-8
130 // decoding step.
131
132 HandleScope handle_scope(isolate_);
133
134 #define V(PropertyName, StringValue) \
135 PropertyName##_.Set( \
136 isolate_, \
137 Private::New(isolate_, \
138 String::NewFromOneByte( \
139 isolate_, \
140 reinterpret_cast<const uint8_t*>(StringValue), \
141 NewStringType::kInternalized, \
142 sizeof(StringValue) - 1) \
143 .ToLocalChecked()));
144 PER_ISOLATE_PRIVATE_SYMBOL_PROPERTIES(V)
145 #undef V
146 #define V(PropertyName, StringValue) \
147 PropertyName##_.Set( \
148 isolate_, \
149 Symbol::New(isolate_, \
150 String::NewFromOneByte( \
151 isolate_, \
152 reinterpret_cast<const uint8_t*>(StringValue), \
153 NewStringType::kInternalized, \
154 sizeof(StringValue) - 1) \
155 .ToLocalChecked()));
156 PER_ISOLATE_SYMBOL_PROPERTIES(V)
157 #undef V
158 #define V(PropertyName, StringValue) \
159 PropertyName##_.Set( \
160 isolate_, \
161 String::NewFromOneByte(isolate_, \
162 reinterpret_cast<const uint8_t*>(StringValue), \
163 NewStringType::kInternalized, \
164 sizeof(StringValue) - 1) \
165 .ToLocalChecked());
166 PER_ISOLATE_STRING_PROPERTIES(V)
167 #undef V
168
169 // Create all the provider strings that will be passed to JS. Place them in
170 // an array so the array index matches the PROVIDER id offset. This way the
171 // strings can be retrieved quickly.
172 #define V(Provider) \
173 async_wrap_providers_[AsyncWrap::PROVIDER_ ## Provider].Set( \
174 isolate_, \
175 String::NewFromOneByte( \
176 isolate_, \
177 reinterpret_cast<const uint8_t*>(#Provider), \
178 NewStringType::kInternalized, \
179 sizeof(#Provider) - 1).ToLocalChecked());
180 NODE_ASYNC_PROVIDER_TYPES(V)
181 #undef V
182 }
183
IsolateData(Isolate * isolate,uv_loop_t * event_loop,MultiIsolatePlatform * platform,ArrayBufferAllocator * node_allocator,const std::vector<size_t> * indexes)184 IsolateData::IsolateData(Isolate* isolate,
185 uv_loop_t* event_loop,
186 MultiIsolatePlatform* platform,
187 ArrayBufferAllocator* node_allocator,
188 const std::vector<size_t>* indexes)
189 : isolate_(isolate),
190 event_loop_(event_loop),
191 allocator_(isolate->GetArrayBufferAllocator()),
192 node_allocator_(node_allocator == nullptr ? nullptr
193 : node_allocator->GetImpl()),
194 uses_node_allocator_(allocator_ == node_allocator_),
195 platform_(platform) {
196 CHECK_NOT_NULL(allocator_);
197
198 options_.reset(
199 new PerIsolateOptions(*(per_process::cli_options->per_isolate)));
200
201 if (indexes == nullptr) {
202 CreateProperties();
203 } else {
204 DeserializeProperties(indexes);
205 }
206 }
207
MemoryInfo(MemoryTracker * tracker) const208 void IsolateData::MemoryInfo(MemoryTracker* tracker) const {
209 #define V(PropertyName, StringValue) \
210 tracker->TrackField(#PropertyName, PropertyName());
211 PER_ISOLATE_SYMBOL_PROPERTIES(V)
212 #undef V
213
214 #define V(PropertyName, StringValue) \
215 tracker->TrackField(#PropertyName, PropertyName());
216 PER_ISOLATE_STRING_PROPERTIES(V)
217 #undef V
218
219 tracker->TrackField("async_wrap_providers", async_wrap_providers_);
220
221 if (node_allocator_ != nullptr) {
222 tracker->TrackFieldWithSize(
223 "node_allocator", sizeof(*node_allocator_), "NodeArrayBufferAllocator");
224 } else {
225 tracker->TrackFieldWithSize(
226 "allocator", sizeof(*allocator_), "v8::ArrayBuffer::Allocator");
227 }
228 tracker->TrackFieldWithSize(
229 "platform", sizeof(*platform_), "MultiIsolatePlatform");
230 // TODO(joyeecheung): implement MemoryRetainer in the option classes.
231 }
232
InitThreadLocalOnce()233 void InitThreadLocalOnce() {
234 CHECK_EQ(0, uv_key_create(&Environment::thread_local_env));
235 }
236
UpdateTraceCategoryState()237 void TrackingTraceStateObserver::UpdateTraceCategoryState() {
238 if (!env_->owns_process_state() || !env_->can_call_into_js()) {
239 // Ideally, we’d have a consistent story that treats all threads/Environment
240 // instances equally here. However, tracing is essentially global, and this
241 // callback is called from whichever thread calls `StartTracing()` or
242 // `StopTracing()`. The only way to do this in a threadsafe fashion
243 // seems to be only tracking this from the main thread, and only allowing
244 // these state modifications from the main thread.
245 return;
246 }
247
248 bool async_hooks_enabled = (*(TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(
249 TRACING_CATEGORY_NODE1(async_hooks)))) != 0;
250
251 Isolate* isolate = env_->isolate();
252 HandleScope handle_scope(isolate);
253 Local<Function> cb = env_->trace_category_state_function();
254 if (cb.IsEmpty())
255 return;
256 TryCatchScope try_catch(env_);
257 try_catch.SetVerbose(true);
258 Local<Value> args[] = {Boolean::New(isolate, async_hooks_enabled)};
259 USE(cb->Call(env_->context(), Undefined(isolate), arraysize(args), args));
260 }
261
CreateProperties()262 void Environment::CreateProperties() {
263 HandleScope handle_scope(isolate_);
264 Local<Context> ctx = context();
265 Local<FunctionTemplate> templ = FunctionTemplate::New(isolate());
266 templ->InstanceTemplate()->SetInternalFieldCount(1);
267 templ->Inherit(BaseObject::GetConstructorTemplate(this));
268 Local<Object> obj = templ->GetFunction(ctx)
269 .ToLocalChecked()
270 ->NewInstance(ctx)
271 .ToLocalChecked();
272 obj->SetAlignedPointerInInternalField(0, this);
273 set_as_callback_data(obj);
274 set_as_callback_data_template(templ);
275
276 // Store primordials setup by the per-context script in the environment.
277 Local<Object> per_context_bindings =
278 GetPerContextExports(ctx).ToLocalChecked();
279 Local<Value> primordials =
280 per_context_bindings->Get(ctx, primordials_string()).ToLocalChecked();
281 CHECK(primordials->IsObject());
282 set_primordials(primordials.As<Object>());
283
284 Local<Object> process_object =
285 node::CreateProcessObject(this).FromMaybe(Local<Object>());
286 set_process_object(process_object);
287 }
288
GetExecPath(const std::vector<std::string> & argv)289 std::string GetExecPath(const std::vector<std::string>& argv) {
290 char exec_path_buf[2 * PATH_MAX];
291 size_t exec_path_len = sizeof(exec_path_buf);
292 std::string exec_path;
293 if (uv_exepath(exec_path_buf, &exec_path_len) == 0) {
294 exec_path = std::string(exec_path_buf, exec_path_len);
295 } else {
296 exec_path = argv[0];
297 }
298
299 // On OpenBSD process.execPath will be relative unless we
300 // get the full path before process.execPath is used.
301 #if defined(__OpenBSD__)
302 uv_fs_t req;
303 req.ptr = nullptr;
304 if (0 ==
305 uv_fs_realpath(nullptr, &req, exec_path.c_str(), nullptr)) {
306 CHECK_NOT_NULL(req.ptr);
307 exec_path = std::string(static_cast<char*>(req.ptr));
308 }
309 uv_fs_req_cleanup(&req);
310 #endif
311
312 return exec_path;
313 }
314
Environment(IsolateData * isolate_data,Local<Context> context,const std::vector<std::string> & args,const std::vector<std::string> & exec_args,EnvironmentFlags::Flags flags,ThreadId thread_id)315 Environment::Environment(IsolateData* isolate_data,
316 Local<Context> context,
317 const std::vector<std::string>& args,
318 const std::vector<std::string>& exec_args,
319 EnvironmentFlags::Flags flags,
320 ThreadId thread_id)
321 : isolate_(context->GetIsolate()),
322 isolate_data_(isolate_data),
323 immediate_info_(context->GetIsolate()),
324 tick_info_(context->GetIsolate()),
325 timer_base_(uv_now(isolate_data->event_loop())),
326 exec_argv_(exec_args),
327 argv_(args),
328 exec_path_(GetExecPath(args)),
329 should_abort_on_uncaught_toggle_(isolate_, 1),
330 stream_base_state_(isolate_, StreamBase::kNumStreamBaseStateFields),
331 flags_(flags),
332 thread_id_(thread_id.id == static_cast<uint64_t>(-1) ?
333 AllocateEnvironmentThreadId().id : thread_id.id),
334 fs_stats_field_array_(isolate_, kFsStatsBufferLength),
335 fs_stats_field_bigint_array_(isolate_, kFsStatsBufferLength),
336 context_(context->GetIsolate(), context) {
337 // We'll be creating new objects so make sure we've entered the context.
338 HandleScope handle_scope(isolate());
339 Context::Scope context_scope(context);
340
341 // Set some flags if only kDefaultFlags was passed. This can make API version
342 // transitions easier for embedders.
343 if (flags_ & EnvironmentFlags::kDefaultFlags) {
344 flags_ = flags_ |
345 EnvironmentFlags::kOwnsProcessState |
346 EnvironmentFlags::kOwnsInspector;
347 }
348
349 set_env_vars(per_process::system_environment);
350 enabled_debug_list_.Parse(this);
351
352 // We create new copies of the per-Environment option sets, so that it is
353 // easier to modify them after Environment creation. The defaults are
354 // part of the per-Isolate option set, for which in turn the defaults are
355 // part of the per-process option set.
356 options_.reset(new EnvironmentOptions(*isolate_data->options()->per_env));
357 inspector_host_port_.reset(
358 new ExclusiveAccess<HostPort>(options_->debug_options().host_port));
359
360 if (!(flags_ & EnvironmentFlags::kOwnsProcessState)) {
361 set_abort_on_uncaught_exception(false);
362 }
363
364 #if HAVE_INSPECTOR
365 // We can only create the inspector agent after having cloned the options.
366 inspector_agent_ = std::make_unique<inspector::Agent>(this);
367 #endif
368
369 AssignToContext(context, ContextInfo(""));
370
371 static uv_once_t init_once = UV_ONCE_INIT;
372 uv_once(&init_once, InitThreadLocalOnce);
373 uv_key_set(&thread_local_env, this);
374
375 if (tracing::AgentWriterHandle* writer = GetTracingAgentWriter()) {
376 trace_state_observer_ = std::make_unique<TrackingTraceStateObserver>(this);
377 if (TracingController* tracing_controller = writer->GetTracingController())
378 tracing_controller->AddTraceStateObserver(trace_state_observer_.get());
379 }
380
381 destroy_async_id_list_.reserve(512);
382
383 performance_state_ =
384 std::make_unique<performance::PerformanceState>(isolate());
385 performance_state_->Mark(
386 performance::NODE_PERFORMANCE_MILESTONE_ENVIRONMENT);
387 performance_state_->Mark(performance::NODE_PERFORMANCE_MILESTONE_NODE_START,
388 per_process::node_start_time);
389 performance_state_->Mark(
390 performance::NODE_PERFORMANCE_MILESTONE_V8_START,
391 performance::performance_v8_start);
392
393 if (*TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(
394 TRACING_CATEGORY_NODE1(environment)) != 0) {
395 auto traced_value = tracing::TracedValue::Create();
396 traced_value->BeginArray("args");
397 for (const std::string& arg : args) traced_value->AppendString(arg);
398 traced_value->EndArray();
399 traced_value->BeginArray("exec_args");
400 for (const std::string& arg : exec_args) traced_value->AppendString(arg);
401 traced_value->EndArray();
402 TRACE_EVENT_NESTABLE_ASYNC_BEGIN1(TRACING_CATEGORY_NODE1(environment),
403 "Environment",
404 this,
405 "args",
406 std::move(traced_value));
407 }
408
409 // By default, always abort when --abort-on-uncaught-exception was passed.
410 should_abort_on_uncaught_toggle_[0] = 1;
411
412 if (options_->no_force_async_hooks_checks) {
413 async_hooks_.no_force_checks();
414 }
415
416 // TODO(joyeecheung): deserialize when the snapshot covers the environment
417 // properties.
418 CreateProperties();
419 }
420
~Environment()421 Environment::~Environment() {
422 if (Environment** interrupt_data = interrupt_data_.load()) {
423 // There are pending RequestInterrupt() callbacks. Tell them not to run,
424 // then force V8 to run interrupts by compiling and running an empty script
425 // so as not to leak memory.
426 *interrupt_data = nullptr;
427
428 Isolate::AllowJavascriptExecutionScope allow_js_here(isolate());
429 HandleScope handle_scope(isolate());
430 TryCatch try_catch(isolate());
431 Context::Scope context_scope(context());
432
433 #ifdef DEBUG
434 bool consistency_check = false;
435 isolate()->RequestInterrupt([](Isolate*, void* data) {
436 *static_cast<bool*>(data) = true;
437 }, &consistency_check);
438 #endif
439
440 Local<Script> script;
441 if (Script::Compile(context(), String::Empty(isolate())).ToLocal(&script))
442 USE(script->Run(context()));
443
444 DCHECK(consistency_check);
445 }
446
447 isolate()->GetHeapProfiler()->RemoveBuildEmbedderGraphCallback(
448 BuildEmbedderGraph, this);
449
450 // Make sure there are no re-used libuv wrapper objects.
451 // CleanupHandles() should have removed all of them.
452 CHECK(file_handle_read_wrap_freelist_.empty());
453
454 HandleScope handle_scope(isolate());
455
456 #if HAVE_INSPECTOR
457 // Destroy inspector agent before erasing the context. The inspector
458 // destructor depends on the context still being accessible.
459 inspector_agent_.reset();
460 #endif
461
462 context()->SetAlignedPointerInEmbedderData(
463 ContextEmbedderIndex::kEnvironment, nullptr);
464
465 if (trace_state_observer_) {
466 tracing::AgentWriterHandle* writer = GetTracingAgentWriter();
467 CHECK_NOT_NULL(writer);
468 if (TracingController* tracing_controller = writer->GetTracingController())
469 tracing_controller->RemoveTraceStateObserver(trace_state_observer_.get());
470 }
471
472 delete[] heap_statistics_buffer_;
473 delete[] heap_space_statistics_buffer_;
474 delete[] http_parser_buffer_;
475 delete[] heap_code_statistics_buffer_;
476
477 TRACE_EVENT_NESTABLE_ASYNC_END0(
478 TRACING_CATEGORY_NODE1(environment), "Environment", this);
479
480 // Do not unload addons on the main thread. Some addons need to retain memory
481 // beyond the Environment's lifetime, and unloading them early would break
482 // them; with Worker threads, we have the opportunity to be stricter.
483 // Also, since the main thread usually stops just before the process exits,
484 // this is far less relevant here.
485 if (!is_main_thread()) {
486 // Dereference all addons that were loaded into this environment.
487 for (binding::DLib& addon : loaded_addons_) {
488 addon.Close();
489 }
490 }
491
492 CHECK_EQ(base_object_count(), 0);
493 }
494
InitializeLibuv(bool start_profiler_idle_notifier)495 void Environment::InitializeLibuv(bool start_profiler_idle_notifier) {
496 HandleScope handle_scope(isolate());
497 Context::Scope context_scope(context());
498
499 CHECK_EQ(0, uv_timer_init(event_loop(), timer_handle()));
500 uv_unref(reinterpret_cast<uv_handle_t*>(timer_handle()));
501
502 uv_check_init(event_loop(), immediate_check_handle());
503 uv_unref(reinterpret_cast<uv_handle_t*>(immediate_check_handle()));
504
505 uv_idle_init(event_loop(), immediate_idle_handle());
506
507 uv_check_start(immediate_check_handle(), CheckImmediate);
508
509 // Inform V8's CPU profiler when we're idle. The profiler is sampling-based
510 // but not all samples are created equal; mark the wall clock time spent in
511 // epoll_wait() and friends so profiling tools can filter it out. The samples
512 // still end up in v8.log but with state=IDLE rather than state=EXTERNAL.
513 // TODO(bnoordhuis) Depends on a libuv implementation detail that we should
514 // probably fortify in the API contract, namely that the last started prepare
515 // or check watcher runs first. It's not 100% foolproof; if an add-on starts
516 // a prepare or check watcher after us, any samples attributed to its callback
517 // will be recorded with state=IDLE.
518 uv_prepare_init(event_loop(), &idle_prepare_handle_);
519 uv_check_init(event_loop(), &idle_check_handle_);
520 uv_async_init(
521 event_loop(),
522 &task_queues_async_,
523 [](uv_async_t* async) {
524 Environment* env = ContainerOf(
525 &Environment::task_queues_async_, async);
526 env->CleanupFinalizationGroups();
527 env->RunAndClearNativeImmediates();
528 });
529 uv_unref(reinterpret_cast<uv_handle_t*>(&idle_prepare_handle_));
530 uv_unref(reinterpret_cast<uv_handle_t*>(&idle_check_handle_));
531 uv_unref(reinterpret_cast<uv_handle_t*>(&task_queues_async_));
532
533 {
534 Mutex::ScopedLock lock(native_immediates_threadsafe_mutex_);
535 task_queues_async_initialized_ = true;
536 if (native_immediates_threadsafe_.size() > 0 ||
537 native_immediates_interrupts_.size() > 0) {
538 uv_async_send(&task_queues_async_);
539 }
540 }
541
542 // Register clean-up cb to be called to clean up the handles
543 // when the environment is freed, note that they are not cleaned in
544 // the one environment per process setup, but will be called in
545 // FreeEnvironment.
546 RegisterHandleCleanups();
547
548 if (start_profiler_idle_notifier) {
549 StartProfilerIdleNotifier();
550 }
551 }
552
Stop()553 void Environment::Stop() {
554 set_can_call_into_js(false);
555 set_stopping(true);
556 stop_sub_worker_contexts();
557 isolate_->TerminateExecution();
558 SetImmediateThreadsafe([](Environment* env) { uv_stop(env->event_loop()); });
559 }
560
RegisterHandleCleanups()561 void Environment::RegisterHandleCleanups() {
562 HandleCleanupCb close_and_finish = [](Environment* env, uv_handle_t* handle,
563 void* arg) {
564 handle->data = env;
565
566 env->CloseHandle(handle, [](uv_handle_t* handle) {
567 #ifdef DEBUG
568 memset(handle, 0xab, uv_handle_size(handle->type));
569 #endif
570 });
571 };
572
573 auto register_handle = [&](uv_handle_t* handle) {
574 RegisterHandleCleanup(handle, close_and_finish, nullptr);
575 };
576 register_handle(reinterpret_cast<uv_handle_t*>(timer_handle()));
577 register_handle(reinterpret_cast<uv_handle_t*>(immediate_check_handle()));
578 register_handle(reinterpret_cast<uv_handle_t*>(immediate_idle_handle()));
579 register_handle(reinterpret_cast<uv_handle_t*>(&idle_prepare_handle_));
580 register_handle(reinterpret_cast<uv_handle_t*>(&idle_check_handle_));
581 register_handle(reinterpret_cast<uv_handle_t*>(&task_queues_async_));
582 }
583
CleanupHandles()584 void Environment::CleanupHandles() {
585 {
586 Mutex::ScopedLock lock(native_immediates_threadsafe_mutex_);
587 task_queues_async_initialized_ = false;
588 }
589
590 Isolate::DisallowJavascriptExecutionScope disallow_js(isolate(),
591 Isolate::DisallowJavascriptExecutionScope::THROW_ON_FAILURE);
592
593 RunAndClearNativeImmediates(true /* skip unrefed SetImmediate()s */);
594
595 for (ReqWrapBase* request : req_wrap_queue_)
596 request->Cancel();
597
598 for (HandleWrap* handle : handle_wrap_queue_)
599 handle->Close();
600
601 for (HandleCleanup& hc : handle_cleanup_queue_)
602 hc.cb_(this, hc.handle_, hc.arg_);
603 handle_cleanup_queue_.clear();
604
605 while (handle_cleanup_waiting_ != 0 ||
606 request_waiting_ != 0 ||
607 !handle_wrap_queue_.IsEmpty()) {
608 uv_run(event_loop(), UV_RUN_ONCE);
609 }
610
611 file_handle_read_wrap_freelist_.clear();
612 }
613
StartProfilerIdleNotifier()614 void Environment::StartProfilerIdleNotifier() {
615 if (profiler_idle_notifier_started_)
616 return;
617
618 profiler_idle_notifier_started_ = true;
619
620 uv_prepare_start(&idle_prepare_handle_, [](uv_prepare_t* handle) {
621 Environment* env = ContainerOf(&Environment::idle_prepare_handle_, handle);
622 env->isolate()->SetIdle(true);
623 });
624
625 uv_check_start(&idle_check_handle_, [](uv_check_t* handle) {
626 Environment* env = ContainerOf(&Environment::idle_check_handle_, handle);
627 env->isolate()->SetIdle(false);
628 });
629 }
630
StopProfilerIdleNotifier()631 void Environment::StopProfilerIdleNotifier() {
632 profiler_idle_notifier_started_ = false;
633 uv_prepare_stop(&idle_prepare_handle_);
634 uv_check_stop(&idle_check_handle_);
635 }
636
PrintSyncTrace() const637 void Environment::PrintSyncTrace() const {
638 if (!trace_sync_io_) return;
639
640 HandleScope handle_scope(isolate());
641
642 fprintf(
643 stderr, "(node:%d) WARNING: Detected use of sync API\n", uv_os_getpid());
644 PrintStackTrace(isolate(),
645 StackTrace::CurrentStackTrace(
646 isolate(), stack_trace_limit(), StackTrace::kDetailed));
647 }
648
RunCleanup()649 void Environment::RunCleanup() {
650 started_cleanup_ = true;
651 TraceEventScope trace_scope(TRACING_CATEGORY_NODE1(environment),
652 "RunCleanup", this);
653 CleanupHandles();
654
655 while (!cleanup_hooks_.empty() ||
656 native_immediates_.size() > 0 ||
657 native_immediates_threadsafe_.size() > 0 ||
658 native_immediates_interrupts_.size() > 0) {
659 // Copy into a vector, since we can't sort an unordered_set in-place.
660 std::vector<CleanupHookCallback> callbacks(
661 cleanup_hooks_.begin(), cleanup_hooks_.end());
662 // We can't erase the copied elements from `cleanup_hooks_` yet, because we
663 // need to be able to check whether they were un-scheduled by another hook.
664
665 std::sort(callbacks.begin(), callbacks.end(),
666 [](const CleanupHookCallback& a, const CleanupHookCallback& b) {
667 // Sort in descending order so that the most recently inserted callbacks
668 // are run first.
669 return a.insertion_order_counter_ > b.insertion_order_counter_;
670 });
671
672 for (const CleanupHookCallback& cb : callbacks) {
673 if (cleanup_hooks_.count(cb) == 0) {
674 // This hook was removed from the `cleanup_hooks_` set during another
675 // hook that was run earlier. Nothing to do here.
676 continue;
677 }
678
679 cb.fn_(cb.arg_);
680 cleanup_hooks_.erase(cb);
681 }
682 CleanupHandles();
683 }
684
685 for (const int fd : unmanaged_fds_) {
686 uv_fs_t close_req;
687 uv_fs_close(nullptr, &close_req, fd, nullptr);
688 uv_fs_req_cleanup(&close_req);
689 }
690 }
691
RunAtExitCallbacks()692 void Environment::RunAtExitCallbacks() {
693 TraceEventScope trace_scope(TRACING_CATEGORY_NODE1(environment),
694 "AtExit", this);
695 for (ExitCallback at_exit : at_exit_functions_) {
696 at_exit.cb_(at_exit.arg_);
697 }
698 at_exit_functions_.clear();
699 }
700
AtExit(void (* cb)(void * arg),void * arg)701 void Environment::AtExit(void (*cb)(void* arg), void* arg) {
702 at_exit_functions_.push_front(ExitCallback{cb, arg});
703 }
704
RunAndClearInterrupts()705 void Environment::RunAndClearInterrupts() {
706 while (native_immediates_interrupts_.size() > 0) {
707 NativeImmediateQueue queue;
708 {
709 Mutex::ScopedLock lock(native_immediates_threadsafe_mutex_);
710 queue.ConcatMove(std::move(native_immediates_interrupts_));
711 }
712 DebugSealHandleScope seal_handle_scope(isolate());
713
714 while (auto head = queue.Shift())
715 head->Call(this);
716 }
717 }
718
RunAndClearNativeImmediates(bool only_refed)719 void Environment::RunAndClearNativeImmediates(bool only_refed) {
720 TraceEventScope trace_scope(TRACING_CATEGORY_NODE1(environment),
721 "RunAndClearNativeImmediates", this);
722 HandleScope handle_scope(isolate_);
723 InternalCallbackScope cb_scope(this, Object::New(isolate_), { 0, 0 });
724
725 size_t ref_count = 0;
726
727 // Handle interrupts first. These functions are not allowed to throw
728 // exceptions, so we do not need to handle that.
729 RunAndClearInterrupts();
730
731 auto drain_list = [&](NativeImmediateQueue* queue) {
732 TryCatchScope try_catch(this);
733 DebugSealHandleScope seal_handle_scope(isolate());
734 while (auto head = queue->Shift()) {
735 bool is_refed = head->flags() & CallbackFlags::kRefed;
736 if (is_refed)
737 ref_count++;
738
739 if (is_refed || !only_refed)
740 head->Call(this);
741
742 head.reset(); // Destroy now so that this is also observed by try_catch.
743
744 if (UNLIKELY(try_catch.HasCaught())) {
745 if (!try_catch.HasTerminated() && can_call_into_js())
746 errors::TriggerUncaughtException(isolate(), try_catch);
747
748 return true;
749 }
750 }
751 return false;
752 };
753 while (drain_list(&native_immediates_)) {}
754
755 immediate_info()->ref_count_dec(ref_count);
756
757 if (immediate_info()->ref_count() == 0)
758 ToggleImmediateRef(false);
759
760 // It is safe to check .size() first, because there is a causal relationship
761 // between pushes to the threadsafe immediate list and this function being
762 // called. For the common case, it's worth checking the size first before
763 // establishing a mutex lock.
764 // This is intentionally placed after the `ref_count` handling, because when
765 // refed threadsafe immediates are created, they are not counted towards the
766 // count in immediate_info() either.
767 NativeImmediateQueue threadsafe_immediates;
768 if (native_immediates_threadsafe_.size() > 0) {
769 Mutex::ScopedLock lock(native_immediates_threadsafe_mutex_);
770 threadsafe_immediates.ConcatMove(std::move(native_immediates_threadsafe_));
771 }
772 while (drain_list(&threadsafe_immediates)) {}
773 }
774
RequestInterruptFromV8()775 void Environment::RequestInterruptFromV8() {
776 // The Isolate may outlive the Environment, so some logic to handle the
777 // situation in which the Environment is destroyed before the handler runs
778 // is required.
779
780 // We allocate a new pointer to a pointer to this Environment instance, and
781 // try to set it as interrupt_data_. If interrupt_data_ was already set, then
782 // callbacks are already scheduled to run and we can delete our own pointer
783 // and just return. If it was nullptr previously, the Environment** is stored;
784 // ~Environment sets the Environment* contained in it to nullptr, so that
785 // the callback can check whether ~Environment has already run and it is thus
786 // not safe to access the Environment instance itself.
787 Environment** interrupt_data = new Environment*(this);
788 Environment** dummy = nullptr;
789 if (!interrupt_data_.compare_exchange_strong(dummy, interrupt_data)) {
790 delete interrupt_data;
791 return; // Already scheduled.
792 }
793
794 isolate()->RequestInterrupt([](Isolate* isolate, void* data) {
795 std::unique_ptr<Environment*> env_ptr { static_cast<Environment**>(data) };
796 Environment* env = *env_ptr;
797 if (env == nullptr) {
798 // The Environment has already been destroyed. That should be okay; any
799 // callback added before the Environment shuts down would have been
800 // handled during cleanup.
801 return;
802 }
803 env->interrupt_data_.store(nullptr);
804 env->RunAndClearInterrupts();
805 }, interrupt_data);
806 }
807
ScheduleTimer(int64_t duration_ms)808 void Environment::ScheduleTimer(int64_t duration_ms) {
809 if (started_cleanup_) return;
810 uv_timer_start(timer_handle(), RunTimers, duration_ms, 0);
811 }
812
ToggleTimerRef(bool ref)813 void Environment::ToggleTimerRef(bool ref) {
814 if (started_cleanup_) return;
815
816 if (ref) {
817 uv_ref(reinterpret_cast<uv_handle_t*>(timer_handle()));
818 } else {
819 uv_unref(reinterpret_cast<uv_handle_t*>(timer_handle()));
820 }
821 }
822
RunTimers(uv_timer_t * handle)823 void Environment::RunTimers(uv_timer_t* handle) {
824 Environment* env = Environment::from_timer_handle(handle);
825 TraceEventScope trace_scope(TRACING_CATEGORY_NODE1(environment),
826 "RunTimers", env);
827
828 if (!env->can_call_into_js())
829 return;
830
831 HandleScope handle_scope(env->isolate());
832 Context::Scope context_scope(env->context());
833
834 Local<Object> process = env->process_object();
835 InternalCallbackScope scope(env, process, {0, 0});
836
837 Local<Function> cb = env->timers_callback_function();
838 MaybeLocal<Value> ret;
839 Local<Value> arg = env->GetNow();
840 // This code will loop until all currently due timers will process. It is
841 // impossible for us to end up in an infinite loop due to how the JS-side
842 // is structured.
843 do {
844 TryCatchScope try_catch(env);
845 try_catch.SetVerbose(true);
846 ret = cb->Call(env->context(), process, 1, &arg);
847 } while (ret.IsEmpty() && env->can_call_into_js());
848
849 // NOTE(apapirovski): If it ever becomes possible that `call_into_js` above
850 // is reset back to `true` after being previously set to `false` then this
851 // code becomes invalid and needs to be rewritten. Otherwise catastrophic
852 // timers corruption will occur and all timers behaviour will become
853 // entirely unpredictable.
854 if (ret.IsEmpty())
855 return;
856
857 // To allow for less JS-C++ boundary crossing, the value returned from JS
858 // serves a few purposes:
859 // 1. If it's 0, no more timers exist and the handle should be unrefed
860 // 2. If it's > 0, the value represents the next timer's expiry and there
861 // is at least one timer remaining that is refed.
862 // 3. If it's < 0, the absolute value represents the next timer's expiry
863 // and there are no timers that are refed.
864 int64_t expiry_ms =
865 ret.ToLocalChecked()->IntegerValue(env->context()).FromJust();
866
867 uv_handle_t* h = reinterpret_cast<uv_handle_t*>(handle);
868
869 if (expiry_ms != 0) {
870 int64_t duration_ms =
871 llabs(expiry_ms) - (uv_now(env->event_loop()) - env->timer_base());
872
873 env->ScheduleTimer(duration_ms > 0 ? duration_ms : 1);
874
875 if (expiry_ms > 0)
876 uv_ref(h);
877 else
878 uv_unref(h);
879 } else {
880 uv_unref(h);
881 }
882 }
883
884
CheckImmediate(uv_check_t * handle)885 void Environment::CheckImmediate(uv_check_t* handle) {
886 Environment* env = Environment::from_immediate_check_handle(handle);
887 TraceEventScope trace_scope(TRACING_CATEGORY_NODE1(environment),
888 "CheckImmediate", env);
889
890 HandleScope scope(env->isolate());
891 Context::Scope context_scope(env->context());
892
893 env->RunAndClearNativeImmediates();
894
895 if (env->immediate_info()->count() == 0 || !env->can_call_into_js())
896 return;
897
898 do {
899 MakeCallback(env->isolate(),
900 env->process_object(),
901 env->immediate_callback_function(),
902 0,
903 nullptr,
904 {0, 0}).ToLocalChecked();
905 } while (env->immediate_info()->has_outstanding() && env->can_call_into_js());
906
907 if (env->immediate_info()->ref_count() == 0)
908 env->ToggleImmediateRef(false);
909 }
910
ToggleImmediateRef(bool ref)911 void Environment::ToggleImmediateRef(bool ref) {
912 if (started_cleanup_) return;
913
914 if (ref) {
915 // Idle handle is needed only to stop the event loop from blocking in poll.
916 uv_idle_start(immediate_idle_handle(), [](uv_idle_t*){ });
917 } else {
918 uv_idle_stop(immediate_idle_handle());
919 }
920 }
921
922
GetNow()923 Local<Value> Environment::GetNow() {
924 uv_update_time(event_loop());
925 uint64_t now = uv_now(event_loop());
926 CHECK_GE(now, timer_base());
927 now -= timer_base();
928 if (now <= 0xffffffff)
929 return Integer::NewFromUnsigned(isolate(), static_cast<uint32_t>(now));
930 else
931 return Number::New(isolate(), static_cast<double>(now));
932 }
933
CollectExceptionInfo(Environment * env,Local<Object> obj,int errorno,const char * err_string,const char * syscall,const char * message,const char * path,const char * dest)934 void CollectExceptionInfo(Environment* env,
935 Local<Object> obj,
936 int errorno,
937 const char* err_string,
938 const char* syscall,
939 const char* message,
940 const char* path,
941 const char* dest) {
942 obj->Set(env->context(),
943 env->errno_string(),
944 Integer::New(env->isolate(), errorno)).Check();
945
946 obj->Set(env->context(), env->code_string(),
947 OneByteString(env->isolate(), err_string)).Check();
948
949 if (message != nullptr) {
950 obj->Set(env->context(), env->message_string(),
951 OneByteString(env->isolate(), message)).Check();
952 }
953
954 Local<Value> path_buffer;
955 if (path != nullptr) {
956 path_buffer =
957 Buffer::Copy(env->isolate(), path, strlen(path)).ToLocalChecked();
958 obj->Set(env->context(), env->path_string(), path_buffer).Check();
959 }
960
961 Local<Value> dest_buffer;
962 if (dest != nullptr) {
963 dest_buffer =
964 Buffer::Copy(env->isolate(), dest, strlen(dest)).ToLocalChecked();
965 obj->Set(env->context(), env->dest_string(), dest_buffer).Check();
966 }
967
968 if (syscall != nullptr) {
969 obj->Set(env->context(), env->syscall_string(),
970 OneByteString(env->isolate(), syscall)).Check();
971 }
972 }
973
CollectUVExceptionInfo(Local<Value> object,int errorno,const char * syscall,const char * message,const char * path,const char * dest)974 void Environment::CollectUVExceptionInfo(Local<Value> object,
975 int errorno,
976 const char* syscall,
977 const char* message,
978 const char* path,
979 const char* dest) {
980 if (!object->IsObject() || errorno == 0)
981 return;
982
983 Local<Object> obj = object.As<Object>();
984 const char* err_string = uv_err_name(errorno);
985
986 if (message == nullptr || message[0] == '\0') {
987 message = uv_strerror(errorno);
988 }
989
990 node::CollectExceptionInfo(this, obj, errorno, err_string,
991 syscall, message, path, dest);
992 }
993
MemoryInfo(MemoryTracker * tracker) const994 void ImmediateInfo::MemoryInfo(MemoryTracker* tracker) const {
995 tracker->TrackField("fields", fields_);
996 }
997
MemoryInfo(MemoryTracker * tracker) const998 void TickInfo::MemoryInfo(MemoryTracker* tracker) const {
999 tracker->TrackField("fields", fields_);
1000 }
1001
MemoryInfo(MemoryTracker * tracker) const1002 void AsyncHooks::MemoryInfo(MemoryTracker* tracker) const {
1003 tracker->TrackField("async_ids_stack", async_ids_stack_);
1004 tracker->TrackField("fields", fields_);
1005 tracker->TrackField("async_id_fields", async_id_fields_);
1006 }
1007
grow_async_ids_stack()1008 void AsyncHooks::grow_async_ids_stack() {
1009 async_ids_stack_.reserve(async_ids_stack_.Length() * 3);
1010
1011 env()->async_hooks_binding()->Set(
1012 env()->context(),
1013 env()->async_ids_stack_string(),
1014 async_ids_stack_.GetJSArray()).Check();
1015 }
1016
1017 uv_key_t Environment::thread_local_env = {};
1018
Exit(int exit_code)1019 void Environment::Exit(int exit_code) {
1020 if (options()->trace_exit) {
1021 HandleScope handle_scope(isolate());
1022 Isolate::DisallowJavascriptExecutionScope disallow_js(
1023 isolate(), Isolate::DisallowJavascriptExecutionScope::CRASH_ON_FAILURE);
1024
1025 if (is_main_thread()) {
1026 fprintf(stderr, "(node:%d) ", uv_os_getpid());
1027 } else {
1028 fprintf(stderr, "(node:%d, thread:%" PRIu64 ") ",
1029 uv_os_getpid(), thread_id());
1030 }
1031
1032 fprintf(
1033 stderr, "WARNING: Exited the environment with code %d\n", exit_code);
1034 PrintStackTrace(isolate(),
1035 StackTrace::CurrentStackTrace(
1036 isolate(), stack_trace_limit(), StackTrace::kDetailed));
1037 }
1038 process_exit_handler_(this, exit_code);
1039 }
1040
stop_sub_worker_contexts()1041 void Environment::stop_sub_worker_contexts() {
1042 while (!sub_worker_contexts_.empty()) {
1043 Worker* w = *sub_worker_contexts_.begin();
1044 remove_sub_worker_context(w);
1045 w->Exit(1);
1046 w->JoinThread();
1047 }
1048 }
1049
worker_parent_env() const1050 Environment* Environment::worker_parent_env() const {
1051 if (worker_context() == nullptr) return nullptr;
1052 return worker_context()->env();
1053 }
1054
AddUnmanagedFd(int fd)1055 void Environment::AddUnmanagedFd(int fd) {
1056 if (!tracks_unmanaged_fds()) return;
1057 auto result = unmanaged_fds_.insert(fd);
1058 if (!result.second) {
1059 ProcessEmitWarning(
1060 this, "File descriptor %d opened in unmanaged mode twice", fd);
1061 }
1062 }
1063
RemoveUnmanagedFd(int fd)1064 void Environment::RemoveUnmanagedFd(int fd) {
1065 if (!tracks_unmanaged_fds()) return;
1066 size_t removed_count = unmanaged_fds_.erase(fd);
1067 if (removed_count == 0) {
1068 ProcessEmitWarning(
1069 this, "File descriptor %d closed but not opened in unmanaged mode", fd);
1070 }
1071 }
1072
BuildEmbedderGraph(Isolate * isolate,EmbedderGraph * graph,void * data)1073 void Environment::BuildEmbedderGraph(Isolate* isolate,
1074 EmbedderGraph* graph,
1075 void* data) {
1076 MemoryTracker tracker(isolate, graph);
1077 Environment* env = static_cast<Environment*>(data);
1078 tracker.Track(env);
1079 env->ForEachBaseObject([&](BaseObject* obj) {
1080 if (obj->IsDoneInitializing())
1081 tracker.Track(obj);
1082 });
1083 }
1084
SelfSize() const1085 inline size_t Environment::SelfSize() const {
1086 size_t size = sizeof(*this);
1087 // Remove non pointer fields that will be tracked in MemoryInfo()
1088 // TODO(joyeecheung): refactor the MemoryTracker interface so
1089 // this can be done for common types within the Track* calls automatically
1090 // if a certain scope is entered.
1091 size -= sizeof(async_hooks_);
1092 size -= sizeof(tick_info_);
1093 size -= sizeof(immediate_info_);
1094 return size;
1095 }
1096
MemoryInfo(MemoryTracker * tracker) const1097 void Environment::MemoryInfo(MemoryTracker* tracker) const {
1098 // Iteratable STLs have their own sizes subtracted from the parent
1099 // by default.
1100 tracker->TrackField("isolate_data", isolate_data_);
1101 tracker->TrackField("native_modules_with_cache", native_modules_with_cache);
1102 tracker->TrackField("native_modules_without_cache",
1103 native_modules_without_cache);
1104 tracker->TrackField("destroy_async_id_list", destroy_async_id_list_);
1105 tracker->TrackField("exec_argv", exec_argv_);
1106 tracker->TrackField("should_abort_on_uncaught_toggle",
1107 should_abort_on_uncaught_toggle_);
1108 tracker->TrackField("stream_base_state", stream_base_state_);
1109 tracker->TrackField("fs_stats_field_array", fs_stats_field_array_);
1110 tracker->TrackField("fs_stats_field_bigint_array",
1111 fs_stats_field_bigint_array_);
1112 tracker->TrackFieldWithSize(
1113 "cleanup_hooks", cleanup_hooks_.size() * sizeof(CleanupHookCallback));
1114 tracker->TrackField("async_hooks", async_hooks_);
1115 tracker->TrackField("immediate_info", immediate_info_);
1116 tracker->TrackField("tick_info", tick_info_);
1117
1118 #define V(PropertyName, TypeName) \
1119 tracker->TrackField(#PropertyName, PropertyName());
1120 ENVIRONMENT_STRONG_PERSISTENT_VALUES(V)
1121 #undef V
1122
1123 // FIXME(joyeecheung): track other fields in Environment.
1124 // Currently MemoryTracker is unable to track these
1125 // correctly:
1126 // - Internal types that do not implement MemoryRetainer yet
1127 // - STL containers with MemoryRetainer* inside
1128 // - STL containers with numeric types inside that should not have their
1129 // nodes elided e.g. numeric keys in maps.
1130 // We also need to make sure that when we add a non-pointer field as its own
1131 // node, we shift its sizeof() size out of the Environment node.
1132 }
1133
Reallocate(char * data,size_t old_size,size_t size)1134 char* Environment::Reallocate(char* data, size_t old_size, size_t size) {
1135 if (old_size == size) return data;
1136 // If we know that the allocator is our ArrayBufferAllocator, we can let
1137 // if reallocate directly.
1138 if (isolate_data()->uses_node_allocator()) {
1139 return static_cast<char*>(
1140 isolate_data()->node_allocator()->Reallocate(data, old_size, size));
1141 }
1142 // Generic allocators do not provide a reallocation method; we need to
1143 // allocate a new chunk of memory and copy the data over.
1144 char* new_data = AllocateUnchecked(size);
1145 if (new_data == nullptr) return nullptr;
1146 memcpy(new_data, data, std::min(size, old_size));
1147 if (size > old_size)
1148 memset(new_data + old_size, 0, size - old_size);
1149 Free(data, old_size);
1150 return new_data;
1151 }
1152
AddArrayBufferAllocatorToKeepAliveUntilIsolateDispose(std::shared_ptr<v8::ArrayBuffer::Allocator> allocator)1153 void Environment::AddArrayBufferAllocatorToKeepAliveUntilIsolateDispose(
1154 std::shared_ptr<v8::ArrayBuffer::Allocator> allocator) {
1155 if (keep_alive_allocators_ == nullptr) {
1156 MultiIsolatePlatform* platform = isolate_data()->platform();
1157 CHECK_NOT_NULL(platform);
1158
1159 keep_alive_allocators_ = new ArrayBufferAllocatorList();
1160 platform->AddIsolateFinishedCallback(isolate(), [](void* data) {
1161 delete static_cast<ArrayBufferAllocatorList*>(data);
1162 }, static_cast<void*>(keep_alive_allocators_));
1163 }
1164
1165 keep_alive_allocators_->insert(allocator);
1166 }
1167
RunWeakRefCleanup()1168 void Environment::RunWeakRefCleanup() {
1169 isolate()->ClearKeptObjects();
1170 }
1171
CleanupFinalizationGroups()1172 void Environment::CleanupFinalizationGroups() {
1173 HandleScope handle_scope(isolate());
1174 Context::Scope context_scope(context());
1175 TryCatchScope try_catch(this);
1176
1177 while (!cleanup_finalization_groups_.empty() && can_call_into_js()) {
1178 Local<FinalizationGroup> fg =
1179 cleanup_finalization_groups_.front().Get(isolate());
1180 cleanup_finalization_groups_.pop_front();
1181 if (!FinalizationGroup::Cleanup(fg).FromMaybe(false)) {
1182 if (try_catch.HasCaught() && !try_catch.HasTerminated())
1183 errors::TriggerUncaughtException(isolate(), try_catch);
1184 // Re-schedule the execution of the remainder of the queue.
1185 CHECK(task_queues_async_initialized_);
1186 uv_async_send(&task_queues_async_);
1187 return;
1188 }
1189 }
1190 }
1191
1192 // Not really any better place than env.cc at this moment.
DeleteMe(void * data)1193 void BaseObject::DeleteMe(void* data) {
1194 BaseObject* self = static_cast<BaseObject*>(data);
1195 if (self->has_pointer_data() &&
1196 self->pointer_data()->strong_ptr_count > 0) {
1197 return self->Detach();
1198 }
1199 delete self;
1200 }
1201
IsDoneInitializing() const1202 bool BaseObject::IsDoneInitializing() const { return true; }
1203
WrappedObject() const1204 Local<Object> BaseObject::WrappedObject() const {
1205 return object();
1206 }
1207
IsRootNode() const1208 bool BaseObject::IsRootNode() const {
1209 return !persistent_handle_.IsWeak();
1210 }
1211
GetConstructorTemplate(Environment * env)1212 Local<FunctionTemplate> BaseObject::GetConstructorTemplate(Environment* env) {
1213 Local<FunctionTemplate> tmpl = env->base_object_ctor_template();
1214 if (tmpl.IsEmpty()) {
1215 tmpl = env->NewFunctionTemplate(nullptr);
1216 tmpl->SetClassName(FIXED_ONE_BYTE_STRING(env->isolate(), "BaseObject"));
1217 env->set_base_object_ctor_template(tmpl);
1218 }
1219 return tmpl;
1220 }
1221
1222 } // namespace node
1223