1 #include "env.h"
2 #include "allocated_buffer-inl.h"
3 #include "async_wrap.h"
4 #include "base_object-inl.h"
5 #include "debug_utils-inl.h"
6 #include "diagnosticfilename-inl.h"
7 #include "memory_tracker-inl.h"
8 #include "node_buffer.h"
9 #include "node_context_data.h"
10 #include "node_errors.h"
11 #include "node_internals.h"
12 #include "node_options-inl.h"
13 #include "node_process-inl.h"
14 #include "node_v8_platform-inl.h"
15 #include "node_worker.h"
16 #include "req_wrap-inl.h"
17 #include "stream_base.h"
18 #include "tracing/agent.h"
19 #include "tracing/traced_value.h"
20 #include "util-inl.h"
21 #include "v8-profiler.h"
22
23 #include <algorithm>
24 #include <atomic>
25 #include <cstdio>
26 #include <limits>
27 #include <memory>
28
29 namespace node {
30
31 using errors::TryCatchScope;
32 using v8::Boolean;
33 using v8::Context;
34 using v8::EmbedderGraph;
35 using v8::Function;
36 using v8::FunctionTemplate;
37 using v8::HandleScope;
38 using v8::Integer;
39 using v8::Isolate;
40 using v8::Local;
41 using v8::MaybeLocal;
42 using v8::NewStringType;
43 using v8::Number;
44 using v8::Object;
45 using v8::Private;
46 using v8::Script;
47 using v8::SnapshotCreator;
48 using v8::StackTrace;
49 using v8::String;
50 using v8::Symbol;
51 using v8::TracingController;
52 using v8::TryCatch;
53 using v8::Undefined;
54 using v8::Value;
55 using worker::Worker;
56
57 int const Environment::kNodeContextTag = 0x6e6f64;
58 void* const Environment::kNodeContextTagPtr = const_cast<void*>(
59 static_cast<const void*>(&Environment::kNodeContextTag));
60
Serialize(SnapshotCreator * creator)61 std::vector<size_t> IsolateData::Serialize(SnapshotCreator* creator) {
62 Isolate* isolate = creator->GetIsolate();
63 std::vector<size_t> indexes;
64 HandleScope handle_scope(isolate);
65 // XXX(joyeecheung): technically speaking, the indexes here should be
66 // consecutive and we could just return a range instead of an array,
67 // but that's not part of the V8 API contract so we use an array
68 // just to be safe.
69
70 #define VP(PropertyName, StringValue) V(Private, PropertyName)
71 #define VY(PropertyName, StringValue) V(Symbol, PropertyName)
72 #define VS(PropertyName, StringValue) V(String, PropertyName)
73 #define V(TypeName, PropertyName) \
74 indexes.push_back(creator->AddData(PropertyName##_.Get(isolate)));
75 PER_ISOLATE_PRIVATE_SYMBOL_PROPERTIES(VP)
76 PER_ISOLATE_SYMBOL_PROPERTIES(VY)
77 PER_ISOLATE_STRING_PROPERTIES(VS)
78 #undef V
79 #undef VY
80 #undef VS
81 #undef VP
82 for (size_t i = 0; i < AsyncWrap::PROVIDERS_LENGTH; i++)
83 indexes.push_back(creator->AddData(async_wrap_provider(i)));
84
85 return indexes;
86 }
87
DeserializeProperties(const std::vector<size_t> * indexes)88 void IsolateData::DeserializeProperties(const std::vector<size_t>* indexes) {
89 size_t i = 0;
90 HandleScope handle_scope(isolate_);
91
92 #define VP(PropertyName, StringValue) V(Private, PropertyName)
93 #define VY(PropertyName, StringValue) V(Symbol, PropertyName)
94 #define VS(PropertyName, StringValue) V(String, PropertyName)
95 #define V(TypeName, PropertyName) \
96 do { \
97 MaybeLocal<TypeName> maybe_field = \
98 isolate_->GetDataFromSnapshotOnce<TypeName>((*indexes)[i++]); \
99 Local<TypeName> field; \
100 if (!maybe_field.ToLocal(&field)) { \
101 fprintf(stderr, "Failed to deserialize " #PropertyName "\n"); \
102 } \
103 PropertyName##_.Set(isolate_, field); \
104 } while (0);
105 PER_ISOLATE_PRIVATE_SYMBOL_PROPERTIES(VP)
106 PER_ISOLATE_SYMBOL_PROPERTIES(VY)
107 PER_ISOLATE_STRING_PROPERTIES(VS)
108 #undef V
109 #undef VY
110 #undef VS
111 #undef VP
112
113 for (size_t j = 0; j < AsyncWrap::PROVIDERS_LENGTH; j++) {
114 MaybeLocal<String> maybe_field =
115 isolate_->GetDataFromSnapshotOnce<String>((*indexes)[i++]);
116 Local<String> field;
117 if (!maybe_field.ToLocal(&field)) {
118 fprintf(stderr, "Failed to deserialize AsyncWrap provider %zu\n", j);
119 }
120 async_wrap_providers_[j].Set(isolate_, field);
121 }
122 }
123
CreateProperties()124 void IsolateData::CreateProperties() {
125 // Create string and private symbol properties as internalized one byte
126 // strings after the platform is properly initialized.
127 //
128 // Internalized because it makes property lookups a little faster and
129 // because the string is created in the old space straight away. It's going
130 // to end up in the old space sooner or later anyway but now it doesn't go
131 // through v8::Eternal's new space handling first.
132 //
133 // One byte because our strings are ASCII and we can safely skip V8's UTF-8
134 // decoding step.
135
136 HandleScope handle_scope(isolate_);
137
138 #define V(PropertyName, StringValue) \
139 PropertyName##_.Set( \
140 isolate_, \
141 Private::New(isolate_, \
142 String::NewFromOneByte( \
143 isolate_, \
144 reinterpret_cast<const uint8_t*>(StringValue), \
145 NewStringType::kInternalized, \
146 sizeof(StringValue) - 1) \
147 .ToLocalChecked()));
148 PER_ISOLATE_PRIVATE_SYMBOL_PROPERTIES(V)
149 #undef V
150 #define V(PropertyName, StringValue) \
151 PropertyName##_.Set( \
152 isolate_, \
153 Symbol::New(isolate_, \
154 String::NewFromOneByte( \
155 isolate_, \
156 reinterpret_cast<const uint8_t*>(StringValue), \
157 NewStringType::kInternalized, \
158 sizeof(StringValue) - 1) \
159 .ToLocalChecked()));
160 PER_ISOLATE_SYMBOL_PROPERTIES(V)
161 #undef V
162 #define V(PropertyName, StringValue) \
163 PropertyName##_.Set( \
164 isolate_, \
165 String::NewFromOneByte(isolate_, \
166 reinterpret_cast<const uint8_t*>(StringValue), \
167 NewStringType::kInternalized, \
168 sizeof(StringValue) - 1) \
169 .ToLocalChecked());
170 PER_ISOLATE_STRING_PROPERTIES(V)
171 #undef V
172
173 // Create all the provider strings that will be passed to JS. Place them in
174 // an array so the array index matches the PROVIDER id offset. This way the
175 // strings can be retrieved quickly.
176 #define V(Provider) \
177 async_wrap_providers_[AsyncWrap::PROVIDER_ ## Provider].Set( \
178 isolate_, \
179 String::NewFromOneByte( \
180 isolate_, \
181 reinterpret_cast<const uint8_t*>(#Provider), \
182 NewStringType::kInternalized, \
183 sizeof(#Provider) - 1).ToLocalChecked());
184 NODE_ASYNC_PROVIDER_TYPES(V)
185 #undef V
186 }
187
IsolateData(Isolate * isolate,uv_loop_t * event_loop,MultiIsolatePlatform * platform,ArrayBufferAllocator * node_allocator,const std::vector<size_t> * indexes)188 IsolateData::IsolateData(Isolate* isolate,
189 uv_loop_t* event_loop,
190 MultiIsolatePlatform* platform,
191 ArrayBufferAllocator* node_allocator,
192 const std::vector<size_t>* indexes)
193 : isolate_(isolate),
194 event_loop_(event_loop),
195 node_allocator_(node_allocator == nullptr ? nullptr
196 : node_allocator->GetImpl()),
197 platform_(platform) {
198 options_.reset(
199 new PerIsolateOptions(*(per_process::cli_options->per_isolate)));
200
201 if (indexes == nullptr) {
202 CreateProperties();
203 } else {
204 DeserializeProperties(indexes);
205 }
206 }
207
MemoryInfo(MemoryTracker * tracker) const208 void IsolateData::MemoryInfo(MemoryTracker* tracker) const {
209 #define V(PropertyName, StringValue) \
210 tracker->TrackField(#PropertyName, PropertyName());
211 PER_ISOLATE_SYMBOL_PROPERTIES(V)
212
213 PER_ISOLATE_STRING_PROPERTIES(V)
214 #undef V
215
216 tracker->TrackField("async_wrap_providers", async_wrap_providers_);
217
218 if (node_allocator_ != nullptr) {
219 tracker->TrackFieldWithSize(
220 "node_allocator", sizeof(*node_allocator_), "NodeArrayBufferAllocator");
221 }
222 tracker->TrackFieldWithSize(
223 "platform", sizeof(*platform_), "MultiIsolatePlatform");
224 // TODO(joyeecheung): implement MemoryRetainer in the option classes.
225 }
226
InitThreadLocalOnce()227 void InitThreadLocalOnce() {
228 CHECK_EQ(0, uv_key_create(&Environment::thread_local_env));
229 }
230
UpdateTraceCategoryState()231 void TrackingTraceStateObserver::UpdateTraceCategoryState() {
232 if (!env_->owns_process_state() || !env_->can_call_into_js()) {
233 // Ideally, we’d have a consistent story that treats all threads/Environment
234 // instances equally here. However, tracing is essentially global, and this
235 // callback is called from whichever thread calls `StartTracing()` or
236 // `StopTracing()`. The only way to do this in a threadsafe fashion
237 // seems to be only tracking this from the main thread, and only allowing
238 // these state modifications from the main thread.
239 return;
240 }
241
242 bool async_hooks_enabled = (*(TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(
243 TRACING_CATEGORY_NODE1(async_hooks)))) != 0;
244
245 Isolate* isolate = env_->isolate();
246 HandleScope handle_scope(isolate);
247 Local<Function> cb = env_->trace_category_state_function();
248 if (cb.IsEmpty())
249 return;
250 TryCatchScope try_catch(env_);
251 try_catch.SetVerbose(true);
252 Local<Value> args[] = {Boolean::New(isolate, async_hooks_enabled)};
253 USE(cb->Call(env_->context(), Undefined(isolate), arraysize(args), args));
254 }
255
CreateProperties()256 void Environment::CreateProperties() {
257 HandleScope handle_scope(isolate_);
258 Local<Context> ctx = context();
259
260 {
261 Context::Scope context_scope(ctx);
262 Local<FunctionTemplate> templ = FunctionTemplate::New(isolate());
263 templ->InstanceTemplate()->SetInternalFieldCount(
264 BaseObject::kInternalFieldCount);
265 templ->Inherit(BaseObject::GetConstructorTemplate(this));
266
267 set_binding_data_ctor_template(templ);
268 }
269
270 // Store primordials setup by the per-context script in the environment.
271 Local<Object> per_context_bindings =
272 GetPerContextExports(ctx).ToLocalChecked();
273 Local<Value> primordials =
274 per_context_bindings->Get(ctx, primordials_string()).ToLocalChecked();
275 CHECK(primordials->IsObject());
276 set_primordials(primordials.As<Object>());
277
278 Local<String> prototype_string =
279 FIXED_ONE_BYTE_STRING(isolate(), "prototype");
280
281 #define V(EnvPropertyName, PrimordialsPropertyName) \
282 { \
283 Local<Value> ctor = \
284 primordials.As<Object>() \
285 ->Get(ctx, \
286 FIXED_ONE_BYTE_STRING(isolate(), PrimordialsPropertyName)) \
287 .ToLocalChecked(); \
288 CHECK(ctor->IsObject()); \
289 Local<Value> prototype = \
290 ctor.As<Object>()->Get(ctx, prototype_string).ToLocalChecked(); \
291 CHECK(prototype->IsObject()); \
292 set_##EnvPropertyName(prototype.As<Object>()); \
293 }
294
295 V(primordials_safe_map_prototype_object, "SafeMap");
296 V(primordials_safe_set_prototype_object, "SafeSet");
297 V(primordials_safe_weak_map_prototype_object, "SafeWeakMap");
298 V(primordials_safe_weak_set_prototype_object, "SafeWeakSet");
299 #undef V
300
301 Local<Object> process_object =
302 node::CreateProcessObject(this).FromMaybe(Local<Object>());
303 set_process_object(process_object);
304 }
305
GetExecPath(const std::vector<std::string> & argv)306 std::string GetExecPath(const std::vector<std::string>& argv) {
307 char exec_path_buf[2 * PATH_MAX];
308 size_t exec_path_len = sizeof(exec_path_buf);
309 std::string exec_path;
310 if (uv_exepath(exec_path_buf, &exec_path_len) == 0) {
311 exec_path = std::string(exec_path_buf, exec_path_len);
312 } else {
313 exec_path = argv[0];
314 }
315
316 // On OpenBSD process.execPath will be relative unless we
317 // get the full path before process.execPath is used.
318 #if defined(__OpenBSD__)
319 uv_fs_t req;
320 req.ptr = nullptr;
321 if (0 ==
322 uv_fs_realpath(nullptr, &req, exec_path.c_str(), nullptr)) {
323 CHECK_NOT_NULL(req.ptr);
324 exec_path = std::string(static_cast<char*>(req.ptr));
325 }
326 uv_fs_req_cleanup(&req);
327 #endif
328
329 return exec_path;
330 }
331
Environment(IsolateData * isolate_data,Local<Context> context,const std::vector<std::string> & args,const std::vector<std::string> & exec_args,EnvironmentFlags::Flags flags,ThreadId thread_id)332 Environment::Environment(IsolateData* isolate_data,
333 Local<Context> context,
334 const std::vector<std::string>& args,
335 const std::vector<std::string>& exec_args,
336 EnvironmentFlags::Flags flags,
337 ThreadId thread_id)
338 : isolate_(context->GetIsolate()),
339 isolate_data_(isolate_data),
340 immediate_info_(context->GetIsolate()),
341 tick_info_(context->GetIsolate()),
342 timer_base_(uv_now(isolate_data->event_loop())),
343 exec_argv_(exec_args),
344 argv_(args),
345 exec_path_(GetExecPath(args)),
346 should_abort_on_uncaught_toggle_(isolate_, 1),
347 stream_base_state_(isolate_, StreamBase::kNumStreamBaseStateFields),
348 flags_(flags),
349 thread_id_(thread_id.id == static_cast<uint64_t>(-1) ?
350 AllocateEnvironmentThreadId().id : thread_id.id),
351 context_(context->GetIsolate(), context) {
352 // We'll be creating new objects so make sure we've entered the context.
353 HandleScope handle_scope(isolate());
354 Context::Scope context_scope(context);
355
356 // Set some flags if only kDefaultFlags was passed. This can make API version
357 // transitions easier for embedders.
358 if (flags_ & EnvironmentFlags::kDefaultFlags) {
359 flags_ = flags_ |
360 EnvironmentFlags::kOwnsProcessState |
361 EnvironmentFlags::kOwnsInspector;
362 }
363
364 set_env_vars(per_process::system_environment);
365 enabled_debug_list_.Parse(this);
366
367 // We create new copies of the per-Environment option sets, so that it is
368 // easier to modify them after Environment creation. The defaults are
369 // part of the per-Isolate option set, for which in turn the defaults are
370 // part of the per-process option set.
371 options_ = std::make_shared<EnvironmentOptions>(
372 *isolate_data->options()->per_env);
373 inspector_host_port_ = std::make_shared<ExclusiveAccess<HostPort>>(
374 options_->debug_options().host_port);
375
376 if (!(flags_ & EnvironmentFlags::kOwnsProcessState)) {
377 set_abort_on_uncaught_exception(false);
378 }
379
380 #if HAVE_INSPECTOR
381 // We can only create the inspector agent after having cloned the options.
382 inspector_agent_ = std::make_unique<inspector::Agent>(this);
383 #endif
384
385 AssignToContext(context, ContextInfo(""));
386
387 static uv_once_t init_once = UV_ONCE_INIT;
388 uv_once(&init_once, InitThreadLocalOnce);
389 uv_key_set(&thread_local_env, this);
390
391 if (tracing::AgentWriterHandle* writer = GetTracingAgentWriter()) {
392 trace_state_observer_ = std::make_unique<TrackingTraceStateObserver>(this);
393 if (TracingController* tracing_controller = writer->GetTracingController())
394 tracing_controller->AddTraceStateObserver(trace_state_observer_.get());
395 }
396
397 destroy_async_id_list_.reserve(512);
398
399 performance_state_ =
400 std::make_unique<performance::PerformanceState>(isolate());
401 performance_state_->Mark(
402 performance::NODE_PERFORMANCE_MILESTONE_ENVIRONMENT);
403 performance_state_->Mark(performance::NODE_PERFORMANCE_MILESTONE_NODE_START,
404 per_process::node_start_time);
405 performance_state_->Mark(
406 performance::NODE_PERFORMANCE_MILESTONE_V8_START,
407 performance::performance_v8_start);
408
409 if (*TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(
410 TRACING_CATEGORY_NODE1(environment)) != 0) {
411 auto traced_value = tracing::TracedValue::Create();
412 traced_value->BeginArray("args");
413 for (const std::string& arg : args) traced_value->AppendString(arg);
414 traced_value->EndArray();
415 traced_value->BeginArray("exec_args");
416 for (const std::string& arg : exec_args) traced_value->AppendString(arg);
417 traced_value->EndArray();
418 TRACE_EVENT_NESTABLE_ASYNC_BEGIN1(TRACING_CATEGORY_NODE1(environment),
419 "Environment",
420 this,
421 "args",
422 std::move(traced_value));
423 }
424
425 // By default, always abort when --abort-on-uncaught-exception was passed.
426 should_abort_on_uncaught_toggle_[0] = 1;
427
428 if (!options_->force_async_hooks_checks) {
429 async_hooks_.no_force_checks();
430 }
431
432 // TODO(joyeecheung): deserialize when the snapshot covers the environment
433 // properties.
434 CreateProperties();
435
436 // This adjusts the return value of base_object_count() so that tests that
437 // check the count do not have to account for internally created BaseObjects.
438 initial_base_object_count_ = base_object_count();
439 }
440
~Environment()441 Environment::~Environment() {
442 if (Environment** interrupt_data = interrupt_data_.load()) {
443 // There are pending RequestInterrupt() callbacks. Tell them not to run,
444 // then force V8 to run interrupts by compiling and running an empty script
445 // so as not to leak memory.
446 *interrupt_data = nullptr;
447
448 Isolate::AllowJavascriptExecutionScope allow_js_here(isolate());
449 HandleScope handle_scope(isolate());
450 TryCatch try_catch(isolate());
451 Context::Scope context_scope(context());
452
453 #ifdef DEBUG
454 bool consistency_check = false;
455 isolate()->RequestInterrupt([](Isolate*, void* data) {
456 *static_cast<bool*>(data) = true;
457 }, &consistency_check);
458 #endif
459
460 Local<Script> script;
461 if (Script::Compile(context(), String::Empty(isolate())).ToLocal(&script))
462 USE(script->Run(context()));
463
464 DCHECK(consistency_check);
465 }
466
467 // FreeEnvironment() should have set this.
468 CHECK(is_stopping());
469
470 if (options_->heap_snapshot_near_heap_limit > heap_limit_snapshot_taken_) {
471 isolate_->RemoveNearHeapLimitCallback(Environment::NearHeapLimitCallback,
472 0);
473 }
474
475 isolate()->GetHeapProfiler()->RemoveBuildEmbedderGraphCallback(
476 BuildEmbedderGraph, this);
477
478 HandleScope handle_scope(isolate());
479
480 #if HAVE_INSPECTOR
481 // Destroy inspector agent before erasing the context. The inspector
482 // destructor depends on the context still being accessible.
483 inspector_agent_.reset();
484 #endif
485
486 context()->SetAlignedPointerInEmbedderData(
487 ContextEmbedderIndex::kEnvironment, nullptr);
488
489 if (trace_state_observer_) {
490 tracing::AgentWriterHandle* writer = GetTracingAgentWriter();
491 CHECK_NOT_NULL(writer);
492 if (TracingController* tracing_controller = writer->GetTracingController())
493 tracing_controller->RemoveTraceStateObserver(trace_state_observer_.get());
494 }
495
496 TRACE_EVENT_NESTABLE_ASYNC_END0(
497 TRACING_CATEGORY_NODE1(environment), "Environment", this);
498
499 // Do not unload addons on the main thread. Some addons need to retain memory
500 // beyond the Environment's lifetime, and unloading them early would break
501 // them; with Worker threads, we have the opportunity to be stricter.
502 // Also, since the main thread usually stops just before the process exits,
503 // this is far less relevant here.
504 if (!is_main_thread()) {
505 // Dereference all addons that were loaded into this environment.
506 for (binding::DLib& addon : loaded_addons_) {
507 addon.Close();
508 }
509 }
510
511 CHECK_EQ(base_object_count_, 0);
512 }
513
InitializeLibuv()514 void Environment::InitializeLibuv() {
515 HandleScope handle_scope(isolate());
516 Context::Scope context_scope(context());
517
518 CHECK_EQ(0, uv_timer_init(event_loop(), timer_handle()));
519 uv_unref(reinterpret_cast<uv_handle_t*>(timer_handle()));
520
521 uv_check_init(event_loop(), immediate_check_handle());
522 uv_unref(reinterpret_cast<uv_handle_t*>(immediate_check_handle()));
523
524 uv_idle_init(event_loop(), immediate_idle_handle());
525
526 uv_check_start(immediate_check_handle(), CheckImmediate);
527
528 // Inform V8's CPU profiler when we're idle. The profiler is sampling-based
529 // but not all samples are created equal; mark the wall clock time spent in
530 // epoll_wait() and friends so profiling tools can filter it out. The samples
531 // still end up in v8.log but with state=IDLE rather than state=EXTERNAL.
532 uv_prepare_init(event_loop(), &idle_prepare_handle_);
533 uv_check_init(event_loop(), &idle_check_handle_);
534
535 uv_async_init(
536 event_loop(),
537 &task_queues_async_,
538 [](uv_async_t* async) {
539 Environment* env = ContainerOf(
540 &Environment::task_queues_async_, async);
541 HandleScope handle_scope(env->isolate());
542 Context::Scope context_scope(env->context());
543 env->RunAndClearNativeImmediates();
544 });
545 uv_unref(reinterpret_cast<uv_handle_t*>(&idle_prepare_handle_));
546 uv_unref(reinterpret_cast<uv_handle_t*>(&idle_check_handle_));
547 uv_unref(reinterpret_cast<uv_handle_t*>(&task_queues_async_));
548
549 {
550 Mutex::ScopedLock lock(native_immediates_threadsafe_mutex_);
551 task_queues_async_initialized_ = true;
552 if (native_immediates_threadsafe_.size() > 0 ||
553 native_immediates_interrupts_.size() > 0) {
554 uv_async_send(&task_queues_async_);
555 }
556 }
557
558 // Register clean-up cb to be called to clean up the handles
559 // when the environment is freed, note that they are not cleaned in
560 // the one environment per process setup, but will be called in
561 // FreeEnvironment.
562 RegisterHandleCleanups();
563
564 StartProfilerIdleNotifier();
565 }
566
ExitEnv()567 void Environment::ExitEnv() {
568 set_can_call_into_js(false);
569 set_stopping(true);
570 isolate_->TerminateExecution();
571 SetImmediateThreadsafe([](Environment* env) { uv_stop(env->event_loop()); });
572 }
573
RegisterHandleCleanups()574 void Environment::RegisterHandleCleanups() {
575 HandleCleanupCb close_and_finish = [](Environment* env, uv_handle_t* handle,
576 void* arg) {
577 handle->data = env;
578
579 env->CloseHandle(handle, [](uv_handle_t* handle) {
580 #ifdef DEBUG
581 memset(handle, 0xab, uv_handle_size(handle->type));
582 #endif
583 });
584 };
585
586 auto register_handle = [&](uv_handle_t* handle) {
587 RegisterHandleCleanup(handle, close_and_finish, nullptr);
588 };
589 register_handle(reinterpret_cast<uv_handle_t*>(timer_handle()));
590 register_handle(reinterpret_cast<uv_handle_t*>(immediate_check_handle()));
591 register_handle(reinterpret_cast<uv_handle_t*>(immediate_idle_handle()));
592 register_handle(reinterpret_cast<uv_handle_t*>(&idle_prepare_handle_));
593 register_handle(reinterpret_cast<uv_handle_t*>(&idle_check_handle_));
594 register_handle(reinterpret_cast<uv_handle_t*>(&task_queues_async_));
595 }
596
CleanupHandles()597 void Environment::CleanupHandles() {
598 {
599 Mutex::ScopedLock lock(native_immediates_threadsafe_mutex_);
600 task_queues_async_initialized_ = false;
601 }
602
603 Isolate::DisallowJavascriptExecutionScope disallow_js(isolate(),
604 Isolate::DisallowJavascriptExecutionScope::THROW_ON_FAILURE);
605
606 RunAndClearNativeImmediates(true /* skip unrefed SetImmediate()s */);
607
608 for (ReqWrapBase* request : req_wrap_queue_)
609 request->Cancel();
610
611 for (HandleWrap* handle : handle_wrap_queue_)
612 handle->Close();
613
614 for (HandleCleanup& hc : handle_cleanup_queue_)
615 hc.cb_(this, hc.handle_, hc.arg_);
616 handle_cleanup_queue_.clear();
617
618 while (handle_cleanup_waiting_ != 0 ||
619 request_waiting_ != 0 ||
620 !handle_wrap_queue_.IsEmpty()) {
621 uv_run(event_loop(), UV_RUN_ONCE);
622 }
623 }
624
StartProfilerIdleNotifier()625 void Environment::StartProfilerIdleNotifier() {
626 uv_prepare_start(&idle_prepare_handle_, [](uv_prepare_t* handle) {
627 Environment* env = ContainerOf(&Environment::idle_prepare_handle_, handle);
628 env->isolate()->SetIdle(true);
629 });
630 uv_check_start(&idle_check_handle_, [](uv_check_t* handle) {
631 Environment* env = ContainerOf(&Environment::idle_check_handle_, handle);
632 env->isolate()->SetIdle(false);
633 });
634 }
635
PrintSyncTrace() const636 void Environment::PrintSyncTrace() const {
637 if (!trace_sync_io_) return;
638
639 HandleScope handle_scope(isolate());
640
641 fprintf(
642 stderr, "(node:%d) WARNING: Detected use of sync API\n", uv_os_getpid());
643 PrintStackTrace(isolate(),
644 StackTrace::CurrentStackTrace(
645 isolate(), stack_trace_limit(), StackTrace::kDetailed));
646 }
647
RunCleanup()648 void Environment::RunCleanup() {
649 started_cleanup_ = true;
650 TraceEventScope trace_scope(TRACING_CATEGORY_NODE1(environment),
651 "RunCleanup", this);
652 bindings_.clear();
653 initial_base_object_count_ = 0;
654 CleanupHandles();
655
656 while (!cleanup_hooks_.empty() ||
657 native_immediates_.size() > 0 ||
658 native_immediates_threadsafe_.size() > 0 ||
659 native_immediates_interrupts_.size() > 0) {
660 // Copy into a vector, since we can't sort an unordered_set in-place.
661 std::vector<CleanupHookCallback> callbacks(
662 cleanup_hooks_.begin(), cleanup_hooks_.end());
663 // We can't erase the copied elements from `cleanup_hooks_` yet, because we
664 // need to be able to check whether they were un-scheduled by another hook.
665
666 std::sort(callbacks.begin(), callbacks.end(),
667 [](const CleanupHookCallback& a, const CleanupHookCallback& b) {
668 // Sort in descending order so that the most recently inserted callbacks
669 // are run first.
670 return a.insertion_order_counter_ > b.insertion_order_counter_;
671 });
672
673 for (const CleanupHookCallback& cb : callbacks) {
674 if (cleanup_hooks_.count(cb) == 0) {
675 // This hook was removed from the `cleanup_hooks_` set during another
676 // hook that was run earlier. Nothing to do here.
677 continue;
678 }
679
680 cb.fn_(cb.arg_);
681 cleanup_hooks_.erase(cb);
682 }
683 CleanupHandles();
684 }
685
686 for (const int fd : unmanaged_fds_) {
687 uv_fs_t close_req;
688 uv_fs_close(nullptr, &close_req, fd, nullptr);
689 uv_fs_req_cleanup(&close_req);
690 }
691 }
692
RunAtExitCallbacks()693 void Environment::RunAtExitCallbacks() {
694 TraceEventScope trace_scope(TRACING_CATEGORY_NODE1(environment),
695 "AtExit", this);
696 for (ExitCallback at_exit : at_exit_functions_) {
697 at_exit.cb_(at_exit.arg_);
698 }
699 at_exit_functions_.clear();
700 }
701
AtExit(void (* cb)(void * arg),void * arg)702 void Environment::AtExit(void (*cb)(void* arg), void* arg) {
703 at_exit_functions_.push_front(ExitCallback{cb, arg});
704 }
705
RunAndClearInterrupts()706 void Environment::RunAndClearInterrupts() {
707 while (native_immediates_interrupts_.size() > 0) {
708 NativeImmediateQueue queue;
709 {
710 Mutex::ScopedLock lock(native_immediates_threadsafe_mutex_);
711 queue.ConcatMove(std::move(native_immediates_interrupts_));
712 }
713 DebugSealHandleScope seal_handle_scope(isolate());
714
715 while (auto head = queue.Shift())
716 head->Call(this);
717 }
718 }
719
RunAndClearNativeImmediates(bool only_refed)720 void Environment::RunAndClearNativeImmediates(bool only_refed) {
721 TraceEventScope trace_scope(TRACING_CATEGORY_NODE1(environment),
722 "RunAndClearNativeImmediates", this);
723 HandleScope handle_scope(isolate_);
724 InternalCallbackScope cb_scope(this, Object::New(isolate_), { 0, 0 });
725
726 size_t ref_count = 0;
727
728 // Handle interrupts first. These functions are not allowed to throw
729 // exceptions, so we do not need to handle that.
730 RunAndClearInterrupts();
731
732 auto drain_list = [&](NativeImmediateQueue* queue) {
733 TryCatchScope try_catch(this);
734 DebugSealHandleScope seal_handle_scope(isolate());
735 while (auto head = queue->Shift()) {
736 bool is_refed = head->flags() & CallbackFlags::kRefed;
737 if (is_refed)
738 ref_count++;
739
740 if (is_refed || !only_refed)
741 head->Call(this);
742
743 head.reset(); // Destroy now so that this is also observed by try_catch.
744
745 if (UNLIKELY(try_catch.HasCaught())) {
746 if (!try_catch.HasTerminated() && can_call_into_js())
747 errors::TriggerUncaughtException(isolate(), try_catch);
748
749 return true;
750 }
751 }
752 return false;
753 };
754 while (drain_list(&native_immediates_)) {}
755
756 immediate_info()->ref_count_dec(ref_count);
757
758 if (immediate_info()->ref_count() == 0)
759 ToggleImmediateRef(false);
760
761 // It is safe to check .size() first, because there is a causal relationship
762 // between pushes to the threadsafe immediate list and this function being
763 // called. For the common case, it's worth checking the size first before
764 // establishing a mutex lock.
765 // This is intentionally placed after the `ref_count` handling, because when
766 // refed threadsafe immediates are created, they are not counted towards the
767 // count in immediate_info() either.
768 NativeImmediateQueue threadsafe_immediates;
769 if (native_immediates_threadsafe_.size() > 0) {
770 Mutex::ScopedLock lock(native_immediates_threadsafe_mutex_);
771 threadsafe_immediates.ConcatMove(std::move(native_immediates_threadsafe_));
772 }
773 while (drain_list(&threadsafe_immediates)) {}
774 }
775
RequestInterruptFromV8()776 void Environment::RequestInterruptFromV8() {
777 // The Isolate may outlive the Environment, so some logic to handle the
778 // situation in which the Environment is destroyed before the handler runs
779 // is required.
780
781 // We allocate a new pointer to a pointer to this Environment instance, and
782 // try to set it as interrupt_data_. If interrupt_data_ was already set, then
783 // callbacks are already scheduled to run and we can delete our own pointer
784 // and just return. If it was nullptr previously, the Environment** is stored;
785 // ~Environment sets the Environment* contained in it to nullptr, so that
786 // the callback can check whether ~Environment has already run and it is thus
787 // not safe to access the Environment instance itself.
788 Environment** interrupt_data = new Environment*(this);
789 Environment** dummy = nullptr;
790 if (!interrupt_data_.compare_exchange_strong(dummy, interrupt_data)) {
791 delete interrupt_data;
792 return; // Already scheduled.
793 }
794
795 isolate()->RequestInterrupt([](Isolate* isolate, void* data) {
796 std::unique_ptr<Environment*> env_ptr { static_cast<Environment**>(data) };
797 Environment* env = *env_ptr;
798 if (env == nullptr) {
799 // The Environment has already been destroyed. That should be okay; any
800 // callback added before the Environment shuts down would have been
801 // handled during cleanup.
802 return;
803 }
804 env->interrupt_data_.store(nullptr);
805 env->RunAndClearInterrupts();
806 }, interrupt_data);
807 }
808
ScheduleTimer(int64_t duration_ms)809 void Environment::ScheduleTimer(int64_t duration_ms) {
810 if (started_cleanup_) return;
811 uv_timer_start(timer_handle(), RunTimers, duration_ms, 0);
812 }
813
ToggleTimerRef(bool ref)814 void Environment::ToggleTimerRef(bool ref) {
815 if (started_cleanup_) return;
816
817 if (ref) {
818 uv_ref(reinterpret_cast<uv_handle_t*>(timer_handle()));
819 } else {
820 uv_unref(reinterpret_cast<uv_handle_t*>(timer_handle()));
821 }
822 }
823
RunTimers(uv_timer_t * handle)824 void Environment::RunTimers(uv_timer_t* handle) {
825 Environment* env = Environment::from_timer_handle(handle);
826 TraceEventScope trace_scope(TRACING_CATEGORY_NODE1(environment),
827 "RunTimers", env);
828
829 if (!env->can_call_into_js())
830 return;
831
832 HandleScope handle_scope(env->isolate());
833 Context::Scope context_scope(env->context());
834
835 Local<Object> process = env->process_object();
836 InternalCallbackScope scope(env, process, {0, 0});
837
838 Local<Function> cb = env->timers_callback_function();
839 MaybeLocal<Value> ret;
840 Local<Value> arg = env->GetNow();
841 // This code will loop until all currently due timers will process. It is
842 // impossible for us to end up in an infinite loop due to how the JS-side
843 // is structured.
844 do {
845 TryCatchScope try_catch(env);
846 try_catch.SetVerbose(true);
847 ret = cb->Call(env->context(), process, 1, &arg);
848 } while (ret.IsEmpty() && env->can_call_into_js());
849
850 // NOTE(apapirovski): If it ever becomes possible that `call_into_js` above
851 // is reset back to `true` after being previously set to `false` then this
852 // code becomes invalid and needs to be rewritten. Otherwise catastrophic
853 // timers corruption will occur and all timers behaviour will become
854 // entirely unpredictable.
855 if (ret.IsEmpty())
856 return;
857
858 // To allow for less JS-C++ boundary crossing, the value returned from JS
859 // serves a few purposes:
860 // 1. If it's 0, no more timers exist and the handle should be unrefed
861 // 2. If it's > 0, the value represents the next timer's expiry and there
862 // is at least one timer remaining that is refed.
863 // 3. If it's < 0, the absolute value represents the next timer's expiry
864 // and there are no timers that are refed.
865 int64_t expiry_ms =
866 ret.ToLocalChecked()->IntegerValue(env->context()).FromJust();
867
868 uv_handle_t* h = reinterpret_cast<uv_handle_t*>(handle);
869
870 if (expiry_ms != 0) {
871 int64_t duration_ms =
872 llabs(expiry_ms) - (uv_now(env->event_loop()) - env->timer_base());
873
874 env->ScheduleTimer(duration_ms > 0 ? duration_ms : 1);
875
876 if (expiry_ms > 0)
877 uv_ref(h);
878 else
879 uv_unref(h);
880 } else {
881 uv_unref(h);
882 }
883 }
884
885
CheckImmediate(uv_check_t * handle)886 void Environment::CheckImmediate(uv_check_t* handle) {
887 Environment* env = Environment::from_immediate_check_handle(handle);
888 TraceEventScope trace_scope(TRACING_CATEGORY_NODE1(environment),
889 "CheckImmediate", env);
890
891 HandleScope scope(env->isolate());
892 Context::Scope context_scope(env->context());
893
894 env->RunAndClearNativeImmediates();
895
896 if (env->immediate_info()->count() == 0 || !env->can_call_into_js())
897 return;
898
899 do {
900 MakeCallback(env->isolate(),
901 env->process_object(),
902 env->immediate_callback_function(),
903 0,
904 nullptr,
905 {0, 0}).ToLocalChecked();
906 } while (env->immediate_info()->has_outstanding() && env->can_call_into_js());
907
908 if (env->immediate_info()->ref_count() == 0)
909 env->ToggleImmediateRef(false);
910 }
911
ToggleImmediateRef(bool ref)912 void Environment::ToggleImmediateRef(bool ref) {
913 if (started_cleanup_) return;
914
915 if (ref) {
916 // Idle handle is needed only to stop the event loop from blocking in poll.
917 uv_idle_start(immediate_idle_handle(), [](uv_idle_t*){ });
918 } else {
919 uv_idle_stop(immediate_idle_handle());
920 }
921 }
922
923
GetNow()924 Local<Value> Environment::GetNow() {
925 uv_update_time(event_loop());
926 uint64_t now = uv_now(event_loop());
927 CHECK_GE(now, timer_base());
928 now -= timer_base();
929 if (now <= 0xffffffff)
930 return Integer::NewFromUnsigned(isolate(), static_cast<uint32_t>(now));
931 else
932 return Number::New(isolate(), static_cast<double>(now));
933 }
934
CollectExceptionInfo(Environment * env,Local<Object> obj,int errorno,const char * err_string,const char * syscall,const char * message,const char * path,const char * dest)935 void CollectExceptionInfo(Environment* env,
936 Local<Object> obj,
937 int errorno,
938 const char* err_string,
939 const char* syscall,
940 const char* message,
941 const char* path,
942 const char* dest) {
943 obj->Set(env->context(),
944 env->errno_string(),
945 Integer::New(env->isolate(), errorno)).Check();
946
947 obj->Set(env->context(), env->code_string(),
948 OneByteString(env->isolate(), err_string)).Check();
949
950 if (message != nullptr) {
951 obj->Set(env->context(), env->message_string(),
952 OneByteString(env->isolate(), message)).Check();
953 }
954
955 Local<Value> path_buffer;
956 if (path != nullptr) {
957 path_buffer =
958 Buffer::Copy(env->isolate(), path, strlen(path)).ToLocalChecked();
959 obj->Set(env->context(), env->path_string(), path_buffer).Check();
960 }
961
962 Local<Value> dest_buffer;
963 if (dest != nullptr) {
964 dest_buffer =
965 Buffer::Copy(env->isolate(), dest, strlen(dest)).ToLocalChecked();
966 obj->Set(env->context(), env->dest_string(), dest_buffer).Check();
967 }
968
969 if (syscall != nullptr) {
970 obj->Set(env->context(), env->syscall_string(),
971 OneByteString(env->isolate(), syscall)).Check();
972 }
973 }
974
CollectUVExceptionInfo(Local<Value> object,int errorno,const char * syscall,const char * message,const char * path,const char * dest)975 void Environment::CollectUVExceptionInfo(Local<Value> object,
976 int errorno,
977 const char* syscall,
978 const char* message,
979 const char* path,
980 const char* dest) {
981 if (!object->IsObject() || errorno == 0)
982 return;
983
984 Local<Object> obj = object.As<Object>();
985 const char* err_string = uv_err_name(errorno);
986
987 if (message == nullptr || message[0] == '\0') {
988 message = uv_strerror(errorno);
989 }
990
991 node::CollectExceptionInfo(this, obj, errorno, err_string,
992 syscall, message, path, dest);
993 }
994
MemoryInfo(MemoryTracker * tracker) const995 void ImmediateInfo::MemoryInfo(MemoryTracker* tracker) const {
996 tracker->TrackField("fields", fields_);
997 }
998
MemoryInfo(MemoryTracker * tracker) const999 void TickInfo::MemoryInfo(MemoryTracker* tracker) const {
1000 tracker->TrackField("fields", fields_);
1001 }
1002
MemoryInfo(MemoryTracker * tracker) const1003 void AsyncHooks::MemoryInfo(MemoryTracker* tracker) const {
1004 tracker->TrackField("async_ids_stack", async_ids_stack_);
1005 tracker->TrackField("fields", fields_);
1006 tracker->TrackField("async_id_fields", async_id_fields_);
1007 tracker->TrackField("js_promise_hooks", js_promise_hooks_);
1008 }
1009
grow_async_ids_stack()1010 void AsyncHooks::grow_async_ids_stack() {
1011 async_ids_stack_.reserve(async_ids_stack_.Length() * 3);
1012
1013 env()->async_hooks_binding()->Set(
1014 env()->context(),
1015 env()->async_ids_stack_string(),
1016 async_ids_stack_.GetJSArray()).Check();
1017 }
1018
1019 uv_key_t Environment::thread_local_env = {};
1020
Exit(int exit_code)1021 void Environment::Exit(int exit_code) {
1022 if (options()->trace_exit) {
1023 HandleScope handle_scope(isolate());
1024 Isolate::DisallowJavascriptExecutionScope disallow_js(
1025 isolate(), Isolate::DisallowJavascriptExecutionScope::CRASH_ON_FAILURE);
1026
1027 if (is_main_thread()) {
1028 fprintf(stderr, "(node:%d) ", uv_os_getpid());
1029 } else {
1030 fprintf(stderr, "(node:%d, thread:%" PRIu64 ") ",
1031 uv_os_getpid(), thread_id());
1032 }
1033
1034 fprintf(
1035 stderr, "WARNING: Exited the environment with code %d\n", exit_code);
1036 PrintStackTrace(isolate(),
1037 StackTrace::CurrentStackTrace(
1038 isolate(), stack_trace_limit(), StackTrace::kDetailed));
1039 }
1040 process_exit_handler_(this, exit_code);
1041 }
1042
stop_sub_worker_contexts()1043 void Environment::stop_sub_worker_contexts() {
1044 DCHECK_EQ(Isolate::GetCurrent(), isolate());
1045
1046 while (!sub_worker_contexts_.empty()) {
1047 Worker* w = *sub_worker_contexts_.begin();
1048 remove_sub_worker_context(w);
1049 w->Exit(1);
1050 w->JoinThread();
1051 }
1052 }
1053
worker_parent_env() const1054 Environment* Environment::worker_parent_env() const {
1055 if (worker_context() == nullptr) return nullptr;
1056 return worker_context()->env();
1057 }
1058
AddUnmanagedFd(int fd)1059 void Environment::AddUnmanagedFd(int fd) {
1060 if (!tracks_unmanaged_fds()) return;
1061 auto result = unmanaged_fds_.insert(fd);
1062 if (!result.second) {
1063 ProcessEmitWarning(
1064 this, "File descriptor %d opened in unmanaged mode twice", fd);
1065 }
1066 }
1067
RemoveUnmanagedFd(int fd)1068 void Environment::RemoveUnmanagedFd(int fd) {
1069 if (!tracks_unmanaged_fds()) return;
1070 size_t removed_count = unmanaged_fds_.erase(fd);
1071 if (removed_count == 0) {
1072 ProcessEmitWarning(
1073 this, "File descriptor %d closed but not opened in unmanaged mode", fd);
1074 }
1075 }
1076
VerifyNoStrongBaseObjects()1077 void Environment::VerifyNoStrongBaseObjects() {
1078 // When a process exits cleanly, i.e. because the event loop ends up without
1079 // things to wait for, the Node.js objects that are left on the heap should
1080 // be:
1081 //
1082 // 1. weak, i.e. ready for garbage collection once no longer referenced, or
1083 // 2. detached, i.e. scheduled for destruction once no longer referenced, or
1084 // 3. an unrefed libuv handle, i.e. does not keep the event loop alive, or
1085 // 4. an inactive libuv handle (essentially the same here)
1086 //
1087 // There are a few exceptions to this rule, but generally, if there are
1088 // C++-backed Node.js objects on the heap that do not fall into the above
1089 // categories, we may be looking at a potential memory leak. Most likely,
1090 // the cause is a missing MakeWeak() call on the corresponding object.
1091 //
1092 // In order to avoid this kind of problem, we check the list of BaseObjects
1093 // for these criteria. Currently, we only do so when explicitly instructed to
1094 // or when in debug mode (where --verify-base-objects is always-on).
1095
1096 if (!options()->verify_base_objects) return;
1097
1098 ForEachBaseObject([](BaseObject* obj) {
1099 if (obj->IsNotIndicativeOfMemoryLeakAtExit()) return;
1100 fprintf(stderr, "Found bad BaseObject during clean exit: %s\n",
1101 obj->MemoryInfoName().c_str());
1102 fflush(stderr);
1103 ABORT();
1104 });
1105 }
1106
GuessMemoryAvailableToTheProcess()1107 uint64_t GuessMemoryAvailableToTheProcess() {
1108 uint64_t free_in_system = uv_get_free_memory();
1109 size_t allowed = uv_get_constrained_memory();
1110 if (allowed == 0) {
1111 return free_in_system;
1112 }
1113 size_t rss;
1114 int err = uv_resident_set_memory(&rss);
1115 if (err) {
1116 return free_in_system;
1117 }
1118 if (allowed < rss) {
1119 // Something is probably wrong. Fallback to the free memory.
1120 return free_in_system;
1121 }
1122 // There may still be room for swap, but we will just leave it here.
1123 return allowed - rss;
1124 }
1125
BuildEmbedderGraph(Isolate * isolate,EmbedderGraph * graph,void * data)1126 void Environment::BuildEmbedderGraph(Isolate* isolate,
1127 EmbedderGraph* graph,
1128 void* data) {
1129 MemoryTracker tracker(isolate, graph);
1130 Environment* env = static_cast<Environment*>(data);
1131 tracker.Track(env);
1132 env->ForEachBaseObject([&](BaseObject* obj) {
1133 if (obj->IsDoneInitializing())
1134 tracker.Track(obj);
1135 });
1136 }
1137
NearHeapLimitCallback(void * data,size_t current_heap_limit,size_t initial_heap_limit)1138 size_t Environment::NearHeapLimitCallback(void* data,
1139 size_t current_heap_limit,
1140 size_t initial_heap_limit) {
1141 Environment* env = static_cast<Environment*>(data);
1142
1143 Debug(env,
1144 DebugCategory::DIAGNOSTICS,
1145 "Invoked NearHeapLimitCallback, processing=%d, "
1146 "current_limit=%" PRIu64 ", "
1147 "initial_limit=%" PRIu64 "\n",
1148 env->is_processing_heap_limit_callback_,
1149 static_cast<uint64_t>(current_heap_limit),
1150 static_cast<uint64_t>(initial_heap_limit));
1151
1152 size_t max_young_gen_size = env->isolate_data()->max_young_gen_size;
1153 size_t young_gen_size = 0;
1154 size_t old_gen_size = 0;
1155
1156 v8::HeapSpaceStatistics stats;
1157 size_t num_heap_spaces = env->isolate()->NumberOfHeapSpaces();
1158 for (size_t i = 0; i < num_heap_spaces; ++i) {
1159 env->isolate()->GetHeapSpaceStatistics(&stats, i);
1160 if (strcmp(stats.space_name(), "new_space") == 0 ||
1161 strcmp(stats.space_name(), "new_large_object_space") == 0) {
1162 young_gen_size += stats.space_used_size();
1163 } else {
1164 old_gen_size += stats.space_used_size();
1165 }
1166 }
1167
1168 Debug(env,
1169 DebugCategory::DIAGNOSTICS,
1170 "max_young_gen_size=%" PRIu64 ", "
1171 "young_gen_size=%" PRIu64 ", "
1172 "old_gen_size=%" PRIu64 ", "
1173 "total_size=%" PRIu64 "\n",
1174 static_cast<uint64_t>(max_young_gen_size),
1175 static_cast<uint64_t>(young_gen_size),
1176 static_cast<uint64_t>(old_gen_size),
1177 static_cast<uint64_t>(young_gen_size + old_gen_size));
1178
1179 uint64_t available = GuessMemoryAvailableToTheProcess();
1180 // TODO(joyeecheung): get a better estimate about the native memory
1181 // usage into the overhead, e.g. based on the count of objects.
1182 uint64_t estimated_overhead = max_young_gen_size;
1183 Debug(env,
1184 DebugCategory::DIAGNOSTICS,
1185 "Estimated available memory=%" PRIu64 ", "
1186 "estimated overhead=%" PRIu64 "\n",
1187 static_cast<uint64_t>(available),
1188 static_cast<uint64_t>(estimated_overhead));
1189
1190 // This might be hit when the snapshot is being taken in another
1191 // NearHeapLimitCallback invocation.
1192 // When taking the snapshot, objects in the young generation may be
1193 // promoted to the old generation, result in increased heap usage,
1194 // but it should be no more than the young generation size.
1195 // Ideally, this should be as small as possible - the heap limit
1196 // can only be restored when the heap usage falls down below the
1197 // new limit, so in a heap with unbounded growth the isolate
1198 // may eventually crash with this new limit - effectively raising
1199 // the heap limit to the new one.
1200 if (env->is_processing_heap_limit_callback_) {
1201 size_t new_limit = initial_heap_limit + max_young_gen_size;
1202 Debug(env,
1203 DebugCategory::DIAGNOSTICS,
1204 "Not generating snapshots in nested callback. "
1205 "new_limit=%" PRIu64 "\n",
1206 static_cast<uint64_t>(new_limit));
1207 return new_limit;
1208 }
1209
1210 // Estimate whether the snapshot is going to use up all the memory
1211 // available to the process. If so, just give up to prevent the system
1212 // from killing the process for a system OOM.
1213 if (estimated_overhead > available) {
1214 Debug(env,
1215 DebugCategory::DIAGNOSTICS,
1216 "Not generating snapshots because it's too risky.\n");
1217 env->isolate()->RemoveNearHeapLimitCallback(NearHeapLimitCallback,
1218 initial_heap_limit);
1219 return current_heap_limit;
1220 }
1221
1222 // Take the snapshot synchronously.
1223 env->is_processing_heap_limit_callback_ = true;
1224
1225 std::string dir = env->options()->diagnostic_dir;
1226 if (dir.empty()) {
1227 dir = env->GetCwd();
1228 }
1229 DiagnosticFilename name(env, "Heap", "heapsnapshot");
1230 std::string filename = dir + kPathSeparator + (*name);
1231
1232 Debug(env, DebugCategory::DIAGNOSTICS, "Start generating %s...\n", *name);
1233
1234 // Remove the callback first in case it's triggered when generating
1235 // the snapshot.
1236 env->isolate()->RemoveNearHeapLimitCallback(NearHeapLimitCallback,
1237 initial_heap_limit);
1238
1239 heap::WriteSnapshot(env->isolate(), filename.c_str());
1240 env->heap_limit_snapshot_taken_ += 1;
1241
1242 // Don't take more snapshots than the number specified by
1243 // --heapsnapshot-near-heap-limit.
1244 if (env->heap_limit_snapshot_taken_ <
1245 env->options_->heap_snapshot_near_heap_limit) {
1246 env->isolate()->AddNearHeapLimitCallback(NearHeapLimitCallback, env);
1247 }
1248
1249 FPrintF(stderr, "Wrote snapshot to %s\n", filename.c_str());
1250 // Tell V8 to reset the heap limit once the heap usage falls down to
1251 // 95% of the initial limit.
1252 env->isolate()->AutomaticallyRestoreInitialHeapLimit(0.95);
1253
1254 env->is_processing_heap_limit_callback_ = false;
1255 return initial_heap_limit;
1256 }
1257
SelfSize() const1258 inline size_t Environment::SelfSize() const {
1259 size_t size = sizeof(*this);
1260 // Remove non pointer fields that will be tracked in MemoryInfo()
1261 // TODO(joyeecheung): refactor the MemoryTracker interface so
1262 // this can be done for common types within the Track* calls automatically
1263 // if a certain scope is entered.
1264 size -= sizeof(async_hooks_);
1265 size -= sizeof(tick_info_);
1266 size -= sizeof(immediate_info_);
1267 return size;
1268 }
1269
MemoryInfo(MemoryTracker * tracker) const1270 void Environment::MemoryInfo(MemoryTracker* tracker) const {
1271 // Iteratable STLs have their own sizes subtracted from the parent
1272 // by default.
1273 tracker->TrackField("isolate_data", isolate_data_);
1274 tracker->TrackField("native_modules_with_cache", native_modules_with_cache);
1275 tracker->TrackField("native_modules_without_cache",
1276 native_modules_without_cache);
1277 tracker->TrackField("destroy_async_id_list", destroy_async_id_list_);
1278 tracker->TrackField("exec_argv", exec_argv_);
1279 tracker->TrackField("should_abort_on_uncaught_toggle",
1280 should_abort_on_uncaught_toggle_);
1281 tracker->TrackField("stream_base_state", stream_base_state_);
1282 tracker->TrackFieldWithSize(
1283 "cleanup_hooks", cleanup_hooks_.size() * sizeof(CleanupHookCallback));
1284 tracker->TrackField("async_hooks", async_hooks_);
1285 tracker->TrackField("immediate_info", immediate_info_);
1286 tracker->TrackField("tick_info", tick_info_);
1287
1288 #define V(PropertyName, TypeName) \
1289 tracker->TrackField(#PropertyName, PropertyName());
1290 ENVIRONMENT_STRONG_PERSISTENT_VALUES(V)
1291 #undef V
1292
1293 // FIXME(joyeecheung): track other fields in Environment.
1294 // Currently MemoryTracker is unable to track these
1295 // correctly:
1296 // - Internal types that do not implement MemoryRetainer yet
1297 // - STL containers with MemoryRetainer* inside
1298 // - STL containers with numeric types inside that should not have their
1299 // nodes elided e.g. numeric keys in maps.
1300 // We also need to make sure that when we add a non-pointer field as its own
1301 // node, we shift its sizeof() size out of the Environment node.
1302 }
1303
RunWeakRefCleanup()1304 void Environment::RunWeakRefCleanup() {
1305 isolate()->ClearKeptObjects();
1306 }
1307
1308 // Not really any better place than env.cc at this moment.
DeleteMe(void * data)1309 void BaseObject::DeleteMe(void* data) {
1310 BaseObject* self = static_cast<BaseObject*>(data);
1311 if (self->has_pointer_data() &&
1312 self->pointer_data()->strong_ptr_count > 0) {
1313 return self->Detach();
1314 }
1315 delete self;
1316 }
1317
IsDoneInitializing() const1318 bool BaseObject::IsDoneInitializing() const { return true; }
1319
WrappedObject() const1320 Local<Object> BaseObject::WrappedObject() const {
1321 return object();
1322 }
1323
IsRootNode() const1324 bool BaseObject::IsRootNode() const {
1325 return !persistent_handle_.IsWeak();
1326 }
1327
GetConstructorTemplate(Environment * env)1328 Local<FunctionTemplate> BaseObject::GetConstructorTemplate(Environment* env) {
1329 Local<FunctionTemplate> tmpl = env->base_object_ctor_template();
1330 if (tmpl.IsEmpty()) {
1331 tmpl = env->NewFunctionTemplate(nullptr);
1332 tmpl->SetClassName(FIXED_ONE_BYTE_STRING(env->isolate(), "BaseObject"));
1333 env->set_base_object_ctor_template(tmpl);
1334 }
1335 return tmpl;
1336 }
1337
IsNotIndicativeOfMemoryLeakAtExit() const1338 bool BaseObject::IsNotIndicativeOfMemoryLeakAtExit() const {
1339 return IsWeakOrDetached();
1340 }
1341
1342 } // namespace node
1343