1 /*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "storaged"
18
19 #include <stdint.h>
20 #include <time.h>
21
22 #include <string>
23 #include <unordered_map>
24
25 #include <android/content/pm/IPackageManagerNative.h>
26 #include <android-base/file.h>
27 #include <android-base/logging.h>
28 #include <android-base/macros.h>
29 #include <android-base/parseint.h>
30 #include <android-base/strings.h>
31 #include <android-base/stringprintf.h>
32 #include <binder/IServiceManager.h>
33 #include <log/log_event_list.h>
34
35 #include "storaged.h"
36 #include "storaged_uid_monitor.h"
37
38 using namespace android;
39 using namespace android::base;
40 using namespace android::content::pm;
41
42 static bool refresh_uid_names;
43
get_uid_io_stats()44 std::unordered_map<uint32_t, struct uid_info> uid_monitor::get_uid_io_stats()
45 {
46 std::unique_ptr<lock_t> lock(new lock_t(&um_lock));
47 return get_uid_io_stats_locked();
48 };
49
get_uid_names(const vector<int> & uids,const vector<std::string * > & uid_names)50 static void get_uid_names(const vector<int>& uids, const vector<std::string*>& uid_names)
51 {
52 sp<IServiceManager> sm = defaultServiceManager();
53 if (sm == NULL) {
54 LOG_TO(SYSTEM, ERROR) << "defaultServiceManager failed";
55 return;
56 }
57
58 sp<IBinder> binder = sm->getService(String16("package_native"));
59 if (binder == NULL) {
60 LOG_TO(SYSTEM, ERROR) << "getService package_native failed";
61 return;
62 }
63
64 sp<IPackageManagerNative> package_mgr = interface_cast<IPackageManagerNative>(binder);
65 std::vector<std::string> names;
66 binder::Status status = package_mgr->getNamesForUids(uids, &names);
67 if (!status.isOk()) {
68 LOG_TO(SYSTEM, ERROR) << "package_native::getNamesForUids failed: "
69 << status.exceptionMessage();
70 return;
71 }
72
73 for (uint32_t i = 0; i < uid_names.size(); i++) {
74 if (!names[i].empty()) {
75 *uid_names[i] = names[i];
76 }
77 }
78
79 refresh_uid_names = false;
80 }
81
get_uid_io_stats_locked()82 std::unordered_map<uint32_t, struct uid_info> uid_monitor::get_uid_io_stats_locked()
83 {
84 std::unordered_map<uint32_t, struct uid_info> uid_io_stats;
85 std::string buffer;
86 if (!ReadFileToString(UID_IO_STATS_PATH, &buffer)) {
87 PLOG_TO(SYSTEM, ERROR) << UID_IO_STATS_PATH << ": ReadFileToString failed";
88 return uid_io_stats;
89 }
90
91 std::vector<std::string> io_stats = Split(buffer, "\n");
92 struct uid_info u;
93 vector<int> uids;
94 vector<std::string*> uid_names;
95
96 for (uint32_t i = 0; i < io_stats.size(); i++) {
97 if (io_stats[i].empty()) {
98 continue;
99 }
100 std::vector<std::string> fields = Split(io_stats[i], " ");
101 if (fields.size() < 11 ||
102 !ParseUint(fields[0], &u.uid) ||
103 !ParseUint(fields[1], &u.io[FOREGROUND].rchar) ||
104 !ParseUint(fields[2], &u.io[FOREGROUND].wchar) ||
105 !ParseUint(fields[3], &u.io[FOREGROUND].read_bytes) ||
106 !ParseUint(fields[4], &u.io[FOREGROUND].write_bytes) ||
107 !ParseUint(fields[5], &u.io[BACKGROUND].rchar) ||
108 !ParseUint(fields[6], &u.io[BACKGROUND].wchar) ||
109 !ParseUint(fields[7], &u.io[BACKGROUND].read_bytes) ||
110 !ParseUint(fields[8], &u.io[BACKGROUND].write_bytes) ||
111 !ParseUint(fields[9], &u.io[FOREGROUND].fsync) ||
112 !ParseUint(fields[10], &u.io[BACKGROUND].fsync)) {
113 LOG_TO(SYSTEM, WARNING) << "Invalid I/O stats: \""
114 << io_stats[i] << "\"";
115 continue;
116 }
117
118 uid_io_stats[u.uid] = u;
119 uid_io_stats[u.uid].name = std::to_string(u.uid);
120 uids.push_back(u.uid);
121 uid_names.push_back(&uid_io_stats[u.uid].name);
122 if (last_uid_io_stats.find(u.uid) == last_uid_io_stats.end()) {
123 refresh_uid_names = true;
124 } else {
125 uid_io_stats[u.uid].name = last_uid_io_stats[u.uid].name;
126 }
127 }
128
129 if (!uids.empty() && refresh_uid_names) {
130 get_uid_names(uids, uid_names);
131 }
132
133 return uid_io_stats;
134 }
135
136 static const int MAX_UID_RECORDS_SIZE = 1000 * 48; // 1000 uids in 48 hours
137
records_size(const std::map<uint64_t,struct uid_records> & curr_records)138 static inline int records_size(
139 const std::map<uint64_t, struct uid_records>& curr_records)
140 {
141 int count = 0;
142 for (auto const& it : curr_records) {
143 count += it.second.entries.size();
144 }
145 return count;
146 }
147
148 static struct uid_io_usage zero_io_usage;
149
add_records_locked(uint64_t curr_ts)150 void uid_monitor::add_records_locked(uint64_t curr_ts)
151 {
152 // remove records more than 5 days old
153 if (curr_ts > 5 * DAY_TO_SEC) {
154 auto it = records.lower_bound(curr_ts - 5 * DAY_TO_SEC);
155 records.erase(records.begin(), it);
156 }
157
158 struct uid_records new_records;
159 for (const auto& p : curr_io_stats) {
160 struct uid_record record = {};
161 record.name = p.first;
162 record.ios = p.second;
163 if (memcmp(&record.ios, &zero_io_usage, sizeof(struct uid_io_usage))) {
164 new_records.entries.push_back(record);
165 }
166 }
167
168 curr_io_stats.clear();
169 new_records.start_ts = start_ts;
170 start_ts = curr_ts;
171
172 if (new_records.entries.empty())
173 return;
174
175 // make some room for new records
176 int overflow = records_size(records) +
177 new_records.entries.size() - MAX_UID_RECORDS_SIZE;
178 while (overflow > 0 && records.size() > 0) {
179 auto del_it = records.begin();
180 overflow -= del_it->second.entries.size();
181 records.erase(records.begin());
182 }
183
184 records[curr_ts] = new_records;
185 }
186
dump(double hours,uint64_t threshold,bool force_report)187 std::map<uint64_t, struct uid_records> uid_monitor::dump(
188 double hours, uint64_t threshold, bool force_report)
189 {
190 if (force_report) {
191 report();
192 }
193
194 std::unique_ptr<lock_t> lock(new lock_t(&um_lock));
195
196 std::map<uint64_t, struct uid_records> dump_records;
197 uint64_t first_ts = 0;
198
199 if (hours != 0) {
200 first_ts = time(NULL) - hours * HOUR_TO_SEC;
201 }
202
203 for (auto it = records.lower_bound(first_ts); it != records.end(); ++it) {
204 const std::vector<struct uid_record>& recs = it->second.entries;
205 struct uid_records filtered;
206
207 for (const auto& rec : recs) {
208 if (rec.ios.bytes[READ][FOREGROUND][CHARGER_ON] +
209 rec.ios.bytes[READ][FOREGROUND][CHARGER_OFF] +
210 rec.ios.bytes[READ][BACKGROUND][CHARGER_ON] +
211 rec.ios.bytes[READ][BACKGROUND][CHARGER_OFF] +
212 rec.ios.bytes[WRITE][FOREGROUND][CHARGER_ON] +
213 rec.ios.bytes[WRITE][FOREGROUND][CHARGER_OFF] +
214 rec.ios.bytes[WRITE][BACKGROUND][CHARGER_ON] +
215 rec.ios.bytes[WRITE][BACKGROUND][CHARGER_OFF] > threshold) {
216 filtered.entries.push_back(rec);
217 }
218 }
219
220 if (filtered.entries.empty())
221 continue;
222
223 filtered.start_ts = it->second.start_ts;
224 dump_records.insert(
225 std::pair<uint64_t, struct uid_records>(it->first, filtered));
226 }
227
228 return dump_records;
229 }
230
update_curr_io_stats_locked()231 void uid_monitor::update_curr_io_stats_locked()
232 {
233 std::unordered_map<uint32_t, struct uid_info> uid_io_stats =
234 get_uid_io_stats_locked();
235 if (uid_io_stats.empty()) {
236 return;
237 }
238
239 for (const auto& it : uid_io_stats) {
240 const struct uid_info& uid = it.second;
241
242 if (curr_io_stats.find(uid.name) == curr_io_stats.end()) {
243 curr_io_stats[uid.name] = {};
244 }
245
246 struct uid_io_usage& usage = curr_io_stats[uid.name];
247 int64_t fg_rd_delta = uid.io[FOREGROUND].read_bytes -
248 last_uid_io_stats[uid.uid].io[FOREGROUND].read_bytes;
249 int64_t bg_rd_delta = uid.io[BACKGROUND].read_bytes -
250 last_uid_io_stats[uid.uid].io[BACKGROUND].read_bytes;
251 int64_t fg_wr_delta = uid.io[FOREGROUND].write_bytes -
252 last_uid_io_stats[uid.uid].io[FOREGROUND].write_bytes;
253 int64_t bg_wr_delta = uid.io[BACKGROUND].write_bytes -
254 last_uid_io_stats[uid.uid].io[BACKGROUND].write_bytes;
255
256 usage.bytes[READ][FOREGROUND][charger_stat] +=
257 (fg_rd_delta < 0) ? 0 : fg_rd_delta;
258 usage.bytes[READ][BACKGROUND][charger_stat] +=
259 (bg_rd_delta < 0) ? 0 : bg_rd_delta;
260 usage.bytes[WRITE][FOREGROUND][charger_stat] +=
261 (fg_wr_delta < 0) ? 0 : fg_wr_delta;
262 usage.bytes[WRITE][BACKGROUND][charger_stat] +=
263 (bg_wr_delta < 0) ? 0 : bg_wr_delta;
264 }
265
266 last_uid_io_stats = uid_io_stats;
267 }
268
report()269 void uid_monitor::report()
270 {
271 std::unique_ptr<lock_t> lock(new lock_t(&um_lock));
272
273 update_curr_io_stats_locked();
274 add_records_locked(time(NULL));
275 }
276
set_charger_state(charger_stat_t stat)277 void uid_monitor::set_charger_state(charger_stat_t stat)
278 {
279 std::unique_ptr<lock_t> lock(new lock_t(&um_lock));
280
281 if (charger_stat == stat) {
282 return;
283 }
284
285 update_curr_io_stats_locked();
286 charger_stat = stat;
287 }
288
init(charger_stat_t stat)289 void uid_monitor::init(charger_stat_t stat)
290 {
291 charger_stat = stat;
292 start_ts = time(NULL);
293 last_uid_io_stats = get_uid_io_stats();
294 }
295
uid_monitor()296 uid_monitor::uid_monitor()
297 {
298 sem_init(&um_lock, 0, 1);
299 }
300
~uid_monitor()301 uid_monitor::~uid_monitor()
302 {
303 sem_destroy(&um_lock);
304 }
305