1 // Copyright 2012 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/threading/platform_thread.h"
6
7 #include <errno.h>
8 #include <sched.h>
9 #include <stddef.h>
10 #include <cstdint>
11 #include <atomic>
12
13 #include "base/base_switches.h"
14 #include "base/command_line.h"
15 #include "base/compiler_specific.h"
16 #include "base/feature_list.h"
17 #include "base/files/file_util.h"
18 #include "base/lazy_instance.h"
19 #include "base/logging.h"
20 #include "base/metrics/field_trial_params.h"
21 #include "base/notreached.h"
22 #include "base/process/internal_linux.h"
23 #include "base/strings/string_number_conversions.h"
24 #include "base/strings/stringprintf.h"
25 #include "base/threading/platform_thread_internal_posix.h"
26 #include "base/threading/thread_id_name_manager.h"
27 #include "base/threading/thread_type_delegate.h"
28 #include "build/build_config.h"
29 #include "third_party/abseil-cpp/absl/types/optional.h"
30
31 #if !BUILDFLAG(IS_NACL) && !BUILDFLAG(IS_AIX)
32 #include <pthread.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/time.h>
36 #include <sys/types.h>
37 #include <unistd.h>
38 #endif
39
40 namespace base {
41
42 #if BUILDFLAG(IS_CHROMEOS)
43 BASE_FEATURE(kSchedUtilHints,
44 "SchedUtilHints",
45 base::FEATURE_ENABLED_BY_DEFAULT);
46 #endif
47
48 namespace {
49
50 #if !BUILDFLAG(IS_NACL)
51 ThreadTypeDelegate* g_thread_type_delegate = nullptr;
52 #endif
53
54 #if BUILDFLAG(IS_CHROMEOS)
55 std::atomic<bool> g_use_sched_util(true);
56 std::atomic<bool> g_scheduler_hints_adjusted(false);
57
58 // When a device doesn't specify uclamp values via chrome switches,
59 // default boosting for urgent tasks is hardcoded here as 20%.
60 // Higher values can lead to higher power consumption thus this value
61 // is chosen conservatively where it does not show noticeable
62 // power usage increased from several perf/power tests.
63 const int kSchedulerBoostDef = 20;
64 const int kSchedulerLimitDef = 100;
65 const bool kSchedulerUseLatencyTuneDef = true;
66
67 int g_scheduler_boost_adj;
68 int g_scheduler_limit_adj;
69 bool g_scheduler_use_latency_tune_adj;
70
71 #if !BUILDFLAG(IS_NACL) && !BUILDFLAG(IS_AIX)
72
73 // Defined by linux uclamp ABI of sched_setattr().
74 const uint32_t kSchedulerUclampMin = 0;
75 const uint32_t kSchedulerUclampMax = 1024;
76
77 // sched_attr is used to set scheduler attributes for Linux. It is not a POSIX
78 // struct and glibc does not expose it.
79 struct sched_attr {
80 uint32_t size;
81
82 uint32_t sched_policy;
83 uint64_t sched_flags;
84
85 /* SCHED_NORMAL, SCHED_BATCH */
86 __s32 sched_nice;
87
88 /* SCHED_FIFO, SCHED_RR */
89 uint32_t sched_priority;
90
91 /* SCHED_DEADLINE */
92 uint64_t sched_runtime;
93 uint64_t sched_deadline;
94 uint64_t sched_period;
95
96 /* Utilization hints */
97 uint32_t sched_util_min;
98 uint32_t sched_util_max;
99 };
100
101 #if !defined(__NR_sched_setattr)
102 #if defined(__x86_64__)
103 #define __NR_sched_setattr 314
104 #define __NR_sched_getattr 315
105 #elif defined(__i386__)
106 #define __NR_sched_setattr 351
107 #define __NR_sched_getattr 352
108 #elif defined(__arm__)
109 #define __NR_sched_setattr 380
110 #define __NR_sched_getattr 381
111 #elif defined(__aarch64__)
112 #define __NR_sched_setattr 274
113 #define __NR_sched_getattr 275
114 #else
115 #error "We don't have an __NR_sched_setattr for this architecture."
116 #endif
117 #endif
118
119 #if !defined(SCHED_FLAG_UTIL_CLAMP_MIN)
120 #define SCHED_FLAG_UTIL_CLAMP_MIN 0x20
121 #endif
122
123 #if !defined(SCHED_FLAG_UTIL_CLAMP_MAX)
124 #define SCHED_FLAG_UTIL_CLAMP_MAX 0x40
125 #endif
126
sched_getattr(pid_t pid,const struct sched_attr * attr,unsigned int size,unsigned int flags)127 long sched_getattr(pid_t pid,
128 const struct sched_attr* attr,
129 unsigned int size,
130 unsigned int flags) {
131 return syscall(__NR_sched_getattr, pid, attr, size, flags);
132 }
133
sched_setattr(pid_t pid,const struct sched_attr * attr,unsigned int flags)134 long sched_setattr(pid_t pid,
135 const struct sched_attr* attr,
136 unsigned int flags) {
137 return syscall(__NR_sched_setattr, pid, attr, flags);
138 }
139 #endif // !BUILDFLAG(IS_NACL) && !BUILDFLAG(IS_AIX)
140 #endif // BUILDFLAG(IS_CHROMEOS)
141
142 #if !BUILDFLAG(IS_NACL)
143 const FilePath::CharType kCgroupDirectory[] =
144 FILE_PATH_LITERAL("/sys/fs/cgroup");
145
ThreadTypeToCgroupDirectory(const FilePath & cgroup_filepath,ThreadType thread_type)146 FilePath ThreadTypeToCgroupDirectory(const FilePath& cgroup_filepath,
147 ThreadType thread_type) {
148 switch (thread_type) {
149 case ThreadType::kBackground:
150 case ThreadType::kUtility:
151 case ThreadType::kResourceEfficient:
152 return cgroup_filepath.Append(FILE_PATH_LITERAL("non-urgent"));
153 case ThreadType::kDefault:
154 return cgroup_filepath;
155 case ThreadType::kCompositing:
156 #if BUILDFLAG(IS_CHROMEOS)
157 // On ChromeOS, kCompositing is also considered urgent.
158 return cgroup_filepath.Append(FILE_PATH_LITERAL("urgent"));
159 #else
160 // TODO(1329208): Experiment with bringing IS_LINUX inline with
161 // IS_CHROMEOS.
162 return cgroup_filepath;
163 #endif
164 case ThreadType::kDisplayCritical:
165 case ThreadType::kRealtimeAudio:
166 return cgroup_filepath.Append(FILE_PATH_LITERAL("urgent"));
167 }
168 NOTREACHED();
169 return FilePath();
170 }
171
SetThreadCgroup(PlatformThreadId thread_id,const FilePath & cgroup_directory)172 void SetThreadCgroup(PlatformThreadId thread_id,
173 const FilePath& cgroup_directory) {
174 FilePath tasks_filepath = cgroup_directory.Append(FILE_PATH_LITERAL("tasks"));
175 std::string tid = NumberToString(thread_id);
176 // TODO(crbug.com/1333521): Remove cast.
177 const int size = static_cast<int>(tid.size());
178 int bytes_written = WriteFile(tasks_filepath, tid.data(), size);
179 if (bytes_written != size) {
180 DVLOG(1) << "Failed to add " << tid << " to " << tasks_filepath.value();
181 }
182 }
183
SetThreadCgroupForThreadType(PlatformThreadId thread_id,const FilePath & cgroup_filepath,ThreadType thread_type)184 void SetThreadCgroupForThreadType(PlatformThreadId thread_id,
185 const FilePath& cgroup_filepath,
186 ThreadType thread_type) {
187 // Append "chrome" suffix.
188 FilePath cgroup_directory = ThreadTypeToCgroupDirectory(
189 cgroup_filepath.Append(FILE_PATH_LITERAL("chrome")), thread_type);
190
191 // Silently ignore request if cgroup directory doesn't exist.
192 if (!DirectoryExists(cgroup_directory))
193 return;
194
195 SetThreadCgroup(thread_id, cgroup_directory);
196 }
197
198 #if BUILDFLAG(IS_CHROMEOS)
199 // thread_id should always be the value in the root PID namespace (see
200 // FindThreadID).
SetThreadLatencySensitivity(ProcessId process_id,PlatformThreadId thread_id,ThreadType thread_type)201 void SetThreadLatencySensitivity(ProcessId process_id,
202 PlatformThreadId thread_id,
203 ThreadType thread_type) {
204 struct sched_attr attr;
205 bool is_urgent = false;
206 int boost_percent, limit_percent;
207 int latency_sensitive_urgent;
208
209 // Scheduler boost defaults to true unless disabled.
210 if (!g_use_sched_util.load())
211 return;
212
213 // FieldTrial API can be called only once features were parsed.
214 if (g_scheduler_hints_adjusted.load()) {
215 boost_percent = g_scheduler_boost_adj;
216 limit_percent = g_scheduler_limit_adj;
217 latency_sensitive_urgent = g_scheduler_use_latency_tune_adj;
218 } else {
219 boost_percent = kSchedulerBoostDef;
220 limit_percent = kSchedulerLimitDef;
221 latency_sensitive_urgent = kSchedulerUseLatencyTuneDef;
222 }
223
224 // The thread_id passed in here is either 0 (in which case we ste for current
225 // thread), or is a tid that is not the NS tid but the global one. The
226 // conversion from NS tid to global tid is done by the callers using
227 // FindThreadID().
228 std::string thread_dir;
229 if (thread_id)
230 thread_dir = base::StringPrintf("/proc/%d/task/%d/", process_id, thread_id);
231 else
232 thread_dir = "/proc/thread-self/";
233
234 // Silently ignore request if thread directory doesn't exist.
235 if (!DirectoryExists(FilePath(thread_dir)))
236 return;
237
238 FilePath latency_sensitive_file = FilePath(thread_dir + "latency_sensitive");
239
240 if (!PathExists(latency_sensitive_file))
241 return;
242
243 // Silently ignore if getattr fails due to sandboxing.
244 if (sched_getattr(thread_id, &attr, sizeof(attr), 0) == -1 ||
245 attr.size != sizeof(attr))
246 return;
247
248 switch (thread_type) {
249 case ThreadType::kBackground:
250 case ThreadType::kUtility:
251 case ThreadType::kResourceEfficient:
252 case ThreadType::kDefault:
253 break;
254 case ThreadType::kCompositing:
255 case ThreadType::kDisplayCritical:
256 // Compositing and display critical threads need a boost for consistent 60
257 // fps.
258 [[fallthrough]];
259 case ThreadType::kRealtimeAudio:
260 is_urgent = true;
261 break;
262 }
263
264 if (is_urgent && latency_sensitive_urgent) {
265 PLOG_IF(ERROR, !WriteFile(latency_sensitive_file, "1", 1))
266 << "Failed to write latency file.\n";
267 } else {
268 PLOG_IF(ERROR, !WriteFile(latency_sensitive_file, "0", 1))
269 << "Failed to write latency file.\n";
270 }
271
272 attr.sched_flags |= SCHED_FLAG_UTIL_CLAMP_MIN;
273 attr.sched_flags |= SCHED_FLAG_UTIL_CLAMP_MAX;
274
275 if (is_urgent) {
276 attr.sched_util_min =
277 (saturated_cast<uint32_t>(boost_percent) * kSchedulerUclampMax + 50) /
278 100;
279 attr.sched_util_max = kSchedulerUclampMax;
280 } else {
281 attr.sched_util_min = kSchedulerUclampMin;
282 attr.sched_util_max =
283 (saturated_cast<uint32_t>(limit_percent) * kSchedulerUclampMax + 50) /
284 100;
285 }
286
287 DCHECK_GE(attr.sched_util_min, kSchedulerUclampMin);
288 DCHECK_LE(attr.sched_util_max, kSchedulerUclampMax);
289
290 attr.size = sizeof(struct sched_attr);
291 if (sched_setattr(thread_id, &attr, 0) == -1) {
292 // We log it as an error because, if the PathExists above succeeded, we
293 // expect this syscall to also work since the kernel is new'ish.
294 PLOG_IF(ERROR, errno != E2BIG)
295 << "Failed to set sched_util_min, performance may be effected.\n";
296 }
297 }
298 #endif
299
SetThreadCgroupsForThreadType(PlatformThreadId thread_id,ThreadType thread_type)300 void SetThreadCgroupsForThreadType(PlatformThreadId thread_id,
301 ThreadType thread_type) {
302 FilePath cgroup_filepath(kCgroupDirectory);
303 SetThreadCgroupForThreadType(
304 thread_id, cgroup_filepath.Append(FILE_PATH_LITERAL("cpuset")),
305 thread_type);
306 SetThreadCgroupForThreadType(
307 thread_id, cgroup_filepath.Append(FILE_PATH_LITERAL("schedtune")),
308 thread_type);
309 }
310 #endif
311 } // namespace
312
313 namespace internal {
314
315 namespace {
316 #if !BUILDFLAG(IS_NACL)
317 const struct sched_param kRealTimePrio = {8};
318 #endif
319 } // namespace
320
321 const ThreadPriorityToNiceValuePairForTest
322 kThreadPriorityToNiceValueMapForTest[5] = {
323 {ThreadPriorityForTest::kRealtimeAudio, -10},
324 {ThreadPriorityForTest::kDisplay, -8},
325 {ThreadPriorityForTest::kNormal, 0},
326 {ThreadPriorityForTest::kUtility, 1},
327 {ThreadPriorityForTest::kBackground, 10},
328 };
329
330 const ThreadTypeToNiceValuePair kThreadTypeToNiceValueMap[7] = {
331 {ThreadType::kBackground, 10}, {ThreadType::kUtility, 1},
332 {ThreadType::kResourceEfficient, 0}, {ThreadType::kDefault, 0},
333 #if BUILDFLAG(IS_CHROMEOS)
334 {ThreadType::kCompositing, -8},
335 #else
336 // TODO(1329208): Experiment with bringing IS_LINUX inline with IS_CHROMEOS.
337 {ThreadType::kCompositing, 0},
338 #endif
339 {ThreadType::kDisplayCritical, -8}, {ThreadType::kRealtimeAudio, -10},
340 };
341
CanSetThreadTypeToRealtimeAudio()342 bool CanSetThreadTypeToRealtimeAudio() {
343 #if !BUILDFLAG(IS_NACL)
344 // A non-zero soft-limit on RLIMIT_RTPRIO is required to be allowed to invoke
345 // pthread_setschedparam in SetCurrentThreadTypeForPlatform().
346 struct rlimit rlim;
347 return getrlimit(RLIMIT_RTPRIO, &rlim) != 0 && rlim.rlim_cur != 0;
348 #else
349 return false;
350 #endif
351 }
352
SetCurrentThreadTypeForPlatform(ThreadType thread_type,MessagePumpType pump_type_hint)353 bool SetCurrentThreadTypeForPlatform(ThreadType thread_type,
354 MessagePumpType pump_type_hint) {
355 #if !BUILDFLAG(IS_NACL)
356 const PlatformThreadId tid = PlatformThread::CurrentId();
357
358 if (g_thread_type_delegate &&
359 g_thread_type_delegate->HandleThreadTypeChange(tid, thread_type)) {
360 return true;
361 }
362
363 // For legacy schedtune interface
364 SetThreadCgroupsForThreadType(tid, thread_type);
365
366 #if BUILDFLAG(IS_CHROMEOS)
367 // For upstream uclamp interface. We try both legacy (schedtune, as done
368 // earlier) and upstream (uclamp) interfaces, and whichever succeeds wins.
369 SetThreadLatencySensitivity(0 /* ignore */, 0 /* thread-self */, thread_type);
370 #endif
371
372 return thread_type == ThreadType::kRealtimeAudio &&
373 pthread_setschedparam(pthread_self(), SCHED_RR, &kRealTimePrio) == 0;
374 #else
375 return false;
376 #endif
377 }
378
379 absl::optional<ThreadPriorityForTest>
GetCurrentThreadPriorityForPlatformForTest()380 GetCurrentThreadPriorityForPlatformForTest() {
381 #if !BUILDFLAG(IS_NACL)
382 int maybe_sched_rr = 0;
383 struct sched_param maybe_realtime_prio = {0};
384 if (pthread_getschedparam(pthread_self(), &maybe_sched_rr,
385 &maybe_realtime_prio) == 0 &&
386 maybe_sched_rr == SCHED_RR &&
387 maybe_realtime_prio.sched_priority == kRealTimePrio.sched_priority) {
388 return absl::make_optional(ThreadPriorityForTest::kRealtimeAudio);
389 }
390 #endif
391 return absl::nullopt;
392 }
393
394 } // namespace internal
395
396 // static
SetName(const std::string & name)397 void PlatformThread::SetName(const std::string& name) {
398 ThreadIdNameManager::GetInstance()->SetName(name);
399
400 #if !BUILDFLAG(IS_NACL) && !BUILDFLAG(IS_AIX)
401 // On linux we can get the thread names to show up in the debugger by setting
402 // the process name for the LWP. We don't want to do this for the main
403 // thread because that would rename the process, causing tools like killall
404 // to stop working.
405 if (PlatformThread::CurrentId() == getpid())
406 return;
407
408 // http://0pointer.de/blog/projects/name-your-threads.html
409 // Set the name for the LWP (which gets truncated to 15 characters).
410 // Note that glibc also has a 'pthread_setname_np' api, but it may not be
411 // available everywhere and it's only benefit over using prctl directly is
412 // that it can set the name of threads other than the current thread.
413 int err = prctl(PR_SET_NAME, name.c_str());
414 // We expect EPERM failures in sandboxed processes, just ignore those.
415 if (err < 0 && errno != EPERM)
416 DPLOG(ERROR) << "prctl(PR_SET_NAME)";
417 #endif // !BUILDFLAG(IS_NACL) && !BUILDFLAG(IS_AIX)
418 }
419
420 #if !BUILDFLAG(IS_NACL)
421 // static
SetThreadTypeDelegate(ThreadTypeDelegate * delegate)422 void PlatformThread::SetThreadTypeDelegate(ThreadTypeDelegate* delegate) {
423 // A component cannot override a delegate set by another component, thus
424 // disallow setting a delegate when one already exists.
425 DCHECK(!g_thread_type_delegate || !delegate);
426
427 g_thread_type_delegate = delegate;
428 }
429 #endif
430
431 #if !BUILDFLAG(IS_NACL) && !BUILDFLAG(IS_AIX)
432 // static
SetThreadType(ProcessId process_id,PlatformThreadId thread_id,ThreadType thread_type)433 void PlatformThread::SetThreadType(ProcessId process_id,
434 PlatformThreadId thread_id,
435 ThreadType thread_type) {
436 // For legacy schedtune interface
437 SetThreadCgroupsForThreadType(thread_id, thread_type);
438
439 #if BUILDFLAG(IS_CHROMEOS)
440 // For upstream uclamp interface. We try both legacy (schedtune, as done
441 // earlier) and upstream (uclamp) interfaces, and whichever succeeds wins.
442 SetThreadLatencySensitivity(process_id, thread_id, thread_type);
443 #endif
444
445 const int nice_setting = internal::ThreadTypeToNiceValue(thread_type);
446 if (setpriority(PRIO_PROCESS, static_cast<id_t>(thread_id), nice_setting)) {
447 DVPLOG(1) << "Failed to set nice value of thread (" << thread_id << ") to "
448 << nice_setting;
449 }
450 }
451 #endif // !BUILDFLAG(IS_NACL) && !BUILDFLAG(IS_AIX)
452
453 #if BUILDFLAG(IS_CHROMEOS)
InitFeaturesPostFieldTrial()454 void PlatformThread::InitFeaturesPostFieldTrial() {
455 DCHECK(FeatureList::GetInstance());
456 if (!FeatureList::IsEnabled(kSchedUtilHints)) {
457 g_use_sched_util.store(false);
458 return;
459 }
460
461 int boost_def = kSchedulerBoostDef;
462
463 if (CommandLine::ForCurrentProcess()->HasSwitch(
464 switches::kSchedulerBoostUrgent)) {
465 std::string boost_switch_str =
466 CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
467 switches::kSchedulerBoostUrgent);
468
469 int boost_switch_val;
470 if (!StringToInt(boost_switch_str, &boost_switch_val) ||
471 boost_switch_val < 0 || boost_switch_val > 100) {
472 DVPLOG(1) << "Invalid input for " << switches::kSchedulerBoostUrgent;
473 } else {
474 boost_def = boost_switch_val;
475 }
476 }
477
478 g_scheduler_boost_adj = GetFieldTrialParamByFeatureAsInt(
479 kSchedUtilHints, "BoostUrgent", boost_def);
480 g_scheduler_limit_adj = GetFieldTrialParamByFeatureAsInt(
481 kSchedUtilHints, "LimitNonUrgent", kSchedulerLimitDef);
482 g_scheduler_use_latency_tune_adj = GetFieldTrialParamByFeatureAsBool(
483 kSchedUtilHints, "LatencyTune", kSchedulerUseLatencyTuneDef);
484
485 g_scheduler_hints_adjusted.store(true);
486 }
487 #endif
488
InitThreading()489 void InitThreading() {}
490
TerminateOnThread()491 void TerminateOnThread() {}
492
GetDefaultThreadStackSize(const pthread_attr_t & attributes)493 size_t GetDefaultThreadStackSize(const pthread_attr_t& attributes) {
494 #if !defined(THREAD_SANITIZER) && defined(__GLIBC__)
495 // Generally glibc sets ample default stack sizes, so use the default there.
496 return 0;
497 #elif !defined(THREAD_SANITIZER)
498 // Other libcs (uclibc, musl, etc) tend to use smaller stacks, often too small
499 // for chromium. Make sure we have enough space to work with here. Note that
500 // for comparison glibc stacks are generally around 8MB.
501 return 2 * (1 << 20);
502 #else
503 // ThreadSanitizer bloats the stack heavily. Evidence has been that the
504 // default stack size isn't enough for some browser tests.
505 return 2 * (1 << 23); // 2 times 8192K (the default stack size on Linux).
506 #endif
507 }
508
509 } // namespace base
510