1 /*
2 * Copyright (C) 2019 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define ATRACE_TAG ATRACE_TAG_DALVIK
18
19 #include "palette/palette.h"
20
21 #include <errno.h>
22 #include <sys/resource.h>
23 #include <sys/time.h>
24 #include <unistd.h>
25
26 #include <filesystem>
27 #include <mutex>
28
29 #include <android-base/file.h>
30 #include <android-base/logging.h>
31 #include <android-base/macros.h>
32 #include <cutils/ashmem.h>
33 #include <cutils/trace.h>
34 #include <processgroup/processgroup.h>
35 #include <processgroup/sched_policy.h>
36 #include <selinux/selinux.h>
37 #include <tombstoned/tombstoned.h>
38 #include <utils/Thread.h>
39
40 #include "palette_system.h"
41
42 // Conversion map for "nice" values.
43 //
44 // We use Android thread priority constants to be consistent with the rest
45 // of the system. In some cases adjacent entries may overlap.
46 //
47 static const int kNiceValues[art::palette::kNumManagedThreadPriorities] = {
48 ANDROID_PRIORITY_LOWEST, // 1 (MIN_PRIORITY)
49 ANDROID_PRIORITY_BACKGROUND + 6,
50 ANDROID_PRIORITY_BACKGROUND + 3,
51 ANDROID_PRIORITY_BACKGROUND,
52 ANDROID_PRIORITY_NORMAL, // 5 (NORM_PRIORITY)
53 ANDROID_PRIORITY_NORMAL - 2,
54 ANDROID_PRIORITY_NORMAL - 4,
55 ANDROID_PRIORITY_URGENT_DISPLAY + 3,
56 ANDROID_PRIORITY_URGENT_DISPLAY + 2,
57 ANDROID_PRIORITY_URGENT_DISPLAY // 10 (MAX_PRIORITY)
58 };
59
PaletteSchedSetPriority(int32_t tid,int32_t managed_priority)60 palette_status_t PaletteSchedSetPriority(int32_t tid, int32_t managed_priority) {
61 if (managed_priority < art::palette::kMinManagedThreadPriority ||
62 managed_priority > art::palette::kMaxManagedThreadPriority) {
63 return PALETTE_STATUS_INVALID_ARGUMENT;
64 }
65 int new_nice = kNiceValues[managed_priority - art::palette::kMinManagedThreadPriority];
66 int curr_nice = getpriority(PRIO_PROCESS, tid);
67
68 if (curr_nice == new_nice) {
69 return PALETTE_STATUS_OK;
70 }
71
72 if (new_nice >= ANDROID_PRIORITY_BACKGROUND) {
73 SetTaskProfiles(tid, {"SCHED_SP_BACKGROUND"}, true);
74 } else if (curr_nice >= ANDROID_PRIORITY_BACKGROUND) {
75 SchedPolicy policy;
76 // Change to the sched policy group of the process.
77 if (get_sched_policy(getpid(), &policy) != 0) {
78 policy = SP_FOREGROUND;
79 }
80 SetTaskProfiles(tid, {get_sched_policy_profile_name(policy)}, true);
81 }
82
83 if (setpriority(PRIO_PROCESS, tid, new_nice) != 0) {
84 return PALETTE_STATUS_CHECK_ERRNO;
85 }
86 return PALETTE_STATUS_OK;
87 }
88
PaletteSchedGetPriority(int32_t tid,int32_t * managed_priority)89 palette_status_t PaletteSchedGetPriority(int32_t tid, /*out*/ int32_t* managed_priority) {
90 errno = 0;
91 int native_priority = getpriority(PRIO_PROCESS, tid);
92 if (native_priority == -1 && errno != 0) {
93 *managed_priority = art::palette::kNormalManagedThreadPriority;
94 return PALETTE_STATUS_CHECK_ERRNO;
95 }
96
97 for (int p = art::palette::kMinManagedThreadPriority;
98 p <= art::palette::kMaxManagedThreadPriority; p = p + 1) {
99 int index = p - art::palette::kMinManagedThreadPriority;
100 if (native_priority >= kNiceValues[index]) {
101 *managed_priority = p;
102 return PALETTE_STATUS_OK;
103 }
104 }
105 *managed_priority = art::palette::kMaxManagedThreadPriority;
106 return PALETTE_STATUS_OK;
107 }
108
PaletteWriteCrashThreadStacks(const char * stacks,size_t stacks_len)109 palette_status_t PaletteWriteCrashThreadStacks(/*in*/ const char* stacks, size_t stacks_len) {
110 android::base::unique_fd tombstone_fd;
111 android::base::unique_fd output_fd;
112
113 if (!tombstoned_connect(getpid(), &tombstone_fd, &output_fd, kDebuggerdJavaBacktrace)) {
114 // Failure here could be due to file descriptor resource exhaustion
115 // so write the stack trace message to the log in case it helps
116 // debug that.
117 LOG(INFO) << std::string_view(stacks, stacks_len);
118 // tombstoned_connect() logs failure reason.
119 return PALETTE_STATUS_FAILED_CHECK_LOG;
120 }
121
122 palette_status_t status = PALETTE_STATUS_OK;
123 if (!android::base::WriteFully(output_fd, stacks, stacks_len)) {
124 PLOG(ERROR) << "Failed to write tombstoned output";
125 TEMP_FAILURE_RETRY(ftruncate(output_fd, 0));
126 status = PALETTE_STATUS_FAILED_CHECK_LOG;
127 }
128
129 if (TEMP_FAILURE_RETRY(fdatasync(output_fd)) == -1 && errno != EINVAL) {
130 // Ignore EINVAL so we don't report failure if we just tried to flush a pipe
131 // or socket.
132 if (status == PALETTE_STATUS_OK) {
133 PLOG(ERROR) << "Failed to fsync tombstoned output";
134 status = PALETTE_STATUS_FAILED_CHECK_LOG;
135 }
136 TEMP_FAILURE_RETRY(ftruncate(output_fd, 0));
137 TEMP_FAILURE_RETRY(fdatasync(output_fd));
138 }
139
140 if (close(output_fd.release()) == -1 && errno != EINTR) {
141 if (status == PALETTE_STATUS_OK) {
142 PLOG(ERROR) << "Failed to close tombstoned output";
143 status = PALETTE_STATUS_FAILED_CHECK_LOG;
144 }
145 }
146
147 if (!tombstoned_notify_completion(tombstone_fd)) {
148 // tombstoned_notify_completion() logs failure.
149 status = PALETTE_STATUS_FAILED_CHECK_LOG;
150 }
151
152 return status;
153 }
154
PaletteTraceEnabled(bool * enabled)155 palette_status_t PaletteTraceEnabled(/*out*/ bool* enabled) {
156 *enabled = (ATRACE_ENABLED() != 0) ? true : false;
157 return PALETTE_STATUS_OK;
158 }
159
PaletteTraceBegin(const char * name)160 palette_status_t PaletteTraceBegin(const char* name) {
161 ATRACE_BEGIN(name);
162 return PALETTE_STATUS_OK;
163 }
164
PaletteTraceEnd()165 palette_status_t PaletteTraceEnd() {
166 ATRACE_END();
167 return PALETTE_STATUS_OK;
168 }
169
PaletteTraceIntegerValue(const char * name,int32_t value)170 palette_status_t PaletteTraceIntegerValue(const char* name, int32_t value) {
171 ATRACE_INT(name, value);
172 return PALETTE_STATUS_OK;
173 }
174
175 // Flag whether to use legacy ashmem or current (b/139855428)
176 static std::atomic_bool g_assume_legacy_ashmemd(false);
177
PaletteAshmemCreateRegion(const char * name,size_t size,int * fd)178 palette_status_t PaletteAshmemCreateRegion(const char* name, size_t size, int* fd) {
179 if (g_assume_legacy_ashmemd.load(std::memory_order_acquire) == false) {
180 // Current platform behaviour which open ashmem fd in process (b/139855428)
181 *fd = ashmem_create_region(name, size);
182 if (*fd >= 0) {
183 return PALETTE_STATUS_OK;
184 }
185 }
186
187 // Try legacy behavior just required for ART build bots which may be running tests on older
188 // platform builds.
189
190 // We implement our own ashmem creation, as the libcutils implementation does
191 // a binder call, and our only use of ashmem in ART is for zygote, which
192 // cannot communicate to binder.
193 *fd = TEMP_FAILURE_RETRY(open("/dev/ashmem", O_RDWR | O_CLOEXEC));
194 if (*fd == -1) {
195 return PALETTE_STATUS_CHECK_ERRNO;
196 }
197
198 if (TEMP_FAILURE_RETRY(ioctl(*fd, ASHMEM_SET_SIZE, size)) < 0) {
199 goto error;
200 }
201
202 if (name != nullptr) {
203 char buf[ASHMEM_NAME_LEN] = {0};
204 strlcpy(buf, name, sizeof(buf));
205 if (TEMP_FAILURE_RETRY(ioctl(*fd, ASHMEM_SET_NAME, buf)) < 0) {
206 goto error;
207 }
208 }
209
210 g_assume_legacy_ashmemd.store(true, std::memory_order_release);
211 return PALETTE_STATUS_OK;
212
213 error:
214 // Save errno before closing.
215 int save_errno = errno;
216 close(*fd);
217 errno = save_errno;
218 return PALETTE_STATUS_CHECK_ERRNO;
219 }
220
PaletteAshmemSetProtRegion(int fd,int prot)221 palette_status_t PaletteAshmemSetProtRegion(int fd, int prot) {
222 if (!g_assume_legacy_ashmemd.load(std::memory_order_acquire)) {
223 if (ashmem_set_prot_region(fd, prot) < 0) {
224 return PALETTE_STATUS_CHECK_ERRNO;
225 }
226 } else if (TEMP_FAILURE_RETRY(ioctl(fd, ASHMEM_SET_PROT_MASK, prot)) < 0) {
227 // Legacy behavior just required for ART build bots which may be running tests on older
228 // platform builds.
229 return PALETTE_STATUS_CHECK_ERRNO;
230 }
231 return PALETTE_STATUS_OK;
232 }
233
PaletteCreateOdrefreshStagingDirectory(const char ** staging_dir)234 palette_status_t PaletteCreateOdrefreshStagingDirectory(const char** staging_dir) {
235 static constexpr const char* kStagingDirectory = "/data/misc/apexdata/com.android.art/staging";
236
237 std::error_code ec;
238 if (std::filesystem::exists(kStagingDirectory, ec)) {
239 if (!std::filesystem::remove_all(kStagingDirectory, ec)) {
240 LOG(ERROR) << ec.message()
241 << "Could not remove existing staging directory: " << kStagingDirectory;
242 DCHECK_EQ(ec.value(), errno);
243 return PALETTE_STATUS_CHECK_ERRNO;
244 }
245 }
246
247 if (mkdir(kStagingDirectory, S_IRWXU) != 0) {
248 PLOG(ERROR) << "Could not set permissions on staging directory: " << kStagingDirectory;
249 return PALETTE_STATUS_CHECK_ERRNO;
250 }
251
252 if (setfilecon(kStagingDirectory, "u:object_r:apex_art_staging_data_file:s0") != 0) {
253 PLOG(ERROR) << "Could not set label on staging directory: " << kStagingDirectory;
254 return PALETTE_STATUS_CHECK_ERRNO;
255 }
256
257 *staging_dir = kStagingDirectory;
258 return PALETTE_STATUS_OK;
259 }
260