• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2016 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #define LOG_TAG "storaged"
18 
19 #include <stdint.h>
20 #include <time.h>
21 
22 #include <string>
23 #include <unordered_map>
24 #include <unordered_set>
25 
26 #include <android/content/pm/IPackageManagerNative.h>
27 #include <android-base/file.h>
28 #include <android-base/logging.h>
29 #include <android-base/macros.h>
30 #include <android-base/parseint.h>
31 #include <android-base/strings.h>
32 #include <android-base/stringprintf.h>
33 #include <binder/IServiceManager.h>
34 #include <log/log_event_list.h>
35 
36 #include "storaged.h"
37 #include "storaged_uid_monitor.h"
38 
39 using namespace android;
40 using namespace android::base;
41 using namespace android::content::pm;
42 using namespace android::os::storaged;
43 using namespace storaged_proto;
44 
45 namespace {
46 
47 bool refresh_uid_names;
48 const char* UID_IO_STATS_PATH = "/proc/uid_io/stats";
49 
50 } // namepsace
51 
get_uid_io_stats()52 std::unordered_map<uint32_t, uid_info> uid_monitor::get_uid_io_stats()
53 {
54     Mutex::Autolock _l(uidm_mutex_);
55     return get_uid_io_stats_locked();
56 };
57 
58 /* return true on parse success and false on failure */
parse_uid_io_stats(std::string && s)59 bool uid_info::parse_uid_io_stats(std::string&& s)
60 {
61     std::vector<std::string> fields = Split(s, " ");
62     if (fields.size() < 11 ||
63         !ParseUint(fields[0],  &uid) ||
64         !ParseUint(fields[1],  &io[FOREGROUND].rchar) ||
65         !ParseUint(fields[2],  &io[FOREGROUND].wchar) ||
66         !ParseUint(fields[3],  &io[FOREGROUND].read_bytes) ||
67         !ParseUint(fields[4],  &io[FOREGROUND].write_bytes) ||
68         !ParseUint(fields[5],  &io[BACKGROUND].rchar) ||
69         !ParseUint(fields[6],  &io[BACKGROUND].wchar) ||
70         !ParseUint(fields[7],  &io[BACKGROUND].read_bytes) ||
71         !ParseUint(fields[8],  &io[BACKGROUND].write_bytes) ||
72         !ParseUint(fields[9],  &io[FOREGROUND].fsync) ||
73         !ParseUint(fields[10], &io[BACKGROUND].fsync)) {
74         LOG_TO(SYSTEM, WARNING) << "Invalid uid I/O stats: \""
75                                 << s << "\"";
76         return false;
77     }
78     return true;
79 }
80 
81 /* return true on parse success and false on failure */
parse_task_io_stats(std::string && s)82 bool task_info::parse_task_io_stats(std::string&& s)
83 {
84     std::vector<std::string> fields = Split(s, ",");
85     size_t size = fields.size();
86     if (size < 13 ||
87         !ParseInt(fields[size - 11],  &pid) ||
88         !ParseUint(fields[size - 10],  &io[FOREGROUND].rchar) ||
89         !ParseUint(fields[size - 9],  &io[FOREGROUND].wchar) ||
90         !ParseUint(fields[size - 8],  &io[FOREGROUND].read_bytes) ||
91         !ParseUint(fields[size - 7],  &io[FOREGROUND].write_bytes) ||
92         !ParseUint(fields[size - 6],  &io[BACKGROUND].rchar) ||
93         !ParseUint(fields[size - 5],  &io[BACKGROUND].wchar) ||
94         !ParseUint(fields[size - 4],  &io[BACKGROUND].read_bytes) ||
95         !ParseUint(fields[size - 3], &io[BACKGROUND].write_bytes) ||
96         !ParseUint(fields[size - 2], &io[FOREGROUND].fsync) ||
97         !ParseUint(fields[size - 1], &io[BACKGROUND].fsync)) {
98         LOG_TO(SYSTEM, WARNING) << "Invalid task I/O stats: \""
99                                 << s << "\"";
100         return false;
101     }
102     comm = Join(std::vector<std::string>(
103                 fields.begin() + 1, fields.end() - 11), ',');
104     return true;
105 }
106 
is_zero() const107 bool io_usage::is_zero() const
108 {
109     for (int i = 0; i < IO_TYPES; i++) {
110         for (int j = 0; j < UID_STATS; j++) {
111             for (int k = 0; k < CHARGER_STATS; k++) {
112                 if (bytes[i][j][k])
113                     return false;
114             }
115         }
116     }
117     return true;
118 }
119 
120 namespace {
121 
get_uid_names(const vector<int> & uids,const vector<std::string * > & uid_names)122 void get_uid_names(const vector<int>& uids, const vector<std::string*>& uid_names)
123 {
124     sp<IServiceManager> sm = defaultServiceManager();
125     if (sm == NULL) {
126         LOG_TO(SYSTEM, ERROR) << "defaultServiceManager failed";
127         return;
128     }
129 
130     sp<IBinder> binder = sm->getService(String16("package_native"));
131     if (binder == NULL) {
132         LOG_TO(SYSTEM, ERROR) << "getService package_native failed";
133         return;
134     }
135 
136     sp<IPackageManagerNative> package_mgr = interface_cast<IPackageManagerNative>(binder);
137     std::vector<std::string> names;
138     binder::Status status = package_mgr->getNamesForUids(uids, &names);
139     if (!status.isOk()) {
140         LOG_TO(SYSTEM, ERROR) << "package_native::getNamesForUids failed: "
141                               << status.exceptionMessage();
142         return;
143     }
144 
145     for (uint32_t i = 0; i < uid_names.size(); i++) {
146         if (!names[i].empty()) {
147             *uid_names[i] = names[i];
148         }
149     }
150 
151     refresh_uid_names = false;
152 }
153 
154 } // namespace
155 
get_uid_io_stats_locked()156 std::unordered_map<uint32_t, uid_info> uid_monitor::get_uid_io_stats_locked()
157 {
158     std::unordered_map<uint32_t, uid_info> uid_io_stats;
159     std::string buffer;
160     if (!ReadFileToString(UID_IO_STATS_PATH, &buffer)) {
161         PLOG_TO(SYSTEM, ERROR) << UID_IO_STATS_PATH << ": ReadFileToString failed";
162         return uid_io_stats;
163     }
164 
165     std::vector<std::string> io_stats = Split(std::move(buffer), "\n");
166     uid_info u;
167     vector<int> uids;
168     vector<std::string*> uid_names;
169 
170     for (uint32_t i = 0; i < io_stats.size(); i++) {
171         if (io_stats[i].empty()) {
172             continue;
173         }
174 
175         if (io_stats[i].compare(0, 4, "task")) {
176             if (!u.parse_uid_io_stats(std::move(io_stats[i])))
177                 continue;
178             uid_io_stats[u.uid] = u;
179             uid_io_stats[u.uid].name = std::to_string(u.uid);
180             uids.push_back(u.uid);
181             uid_names.push_back(&uid_io_stats[u.uid].name);
182             if (last_uid_io_stats_.find(u.uid) == last_uid_io_stats_.end()) {
183                 refresh_uid_names = true;
184             } else {
185                 uid_io_stats[u.uid].name = last_uid_io_stats_[u.uid].name;
186             }
187         } else {
188             task_info t;
189             if (!t.parse_task_io_stats(std::move(io_stats[i])))
190                 continue;
191             uid_io_stats[u.uid].tasks[t.pid] = t;
192         }
193     }
194 
195     if (!uids.empty() && refresh_uid_names) {
196         get_uid_names(uids, uid_names);
197     }
198 
199     return uid_io_stats;
200 }
201 
202 namespace {
203 
history_size(const std::map<uint64_t,struct uid_records> & history)204 inline size_t history_size(
205     const std::map<uint64_t, struct uid_records>& history)
206 {
207     size_t count = 0;
208     for (auto const& it : history) {
209         count += it.second.entries.size();
210     }
211     return count;
212 }
213 
214 } // namespace
215 
add_records_locked(uint64_t curr_ts)216 void uid_monitor::add_records_locked(uint64_t curr_ts)
217 {
218     // remove records more than 5 days old
219     if (curr_ts > 5 * DAY_TO_SEC) {
220         auto it = io_history_.lower_bound(curr_ts - 5 * DAY_TO_SEC);
221         io_history_.erase(io_history_.begin(), it);
222     }
223 
224     struct uid_records new_records;
225     for (const auto& p : curr_io_stats_) {
226         struct uid_record record = {};
227         record.name = p.first;
228         if (!p.second.uid_ios.is_zero()) {
229             record.ios.user_id = p.second.user_id;
230             record.ios.uid_ios = p.second.uid_ios;
231             for (const auto& p_task : p.second.task_ios) {
232                 if (!p_task.second.is_zero())
233                     record.ios.task_ios[p_task.first] = p_task.second;
234             }
235             new_records.entries.push_back(record);
236         }
237     }
238 
239     curr_io_stats_.clear();
240     new_records.start_ts = start_ts_;
241     start_ts_ = curr_ts;
242 
243     if (new_records.entries.empty())
244       return;
245 
246     // make some room for new records
247     maybe_shrink_history_for_items(new_records.entries.size());
248 
249     io_history_[curr_ts] = new_records;
250 }
251 
maybe_shrink_history_for_items(size_t nitems)252 void uid_monitor::maybe_shrink_history_for_items(size_t nitems) {
253     ssize_t overflow = history_size(io_history_) + nitems - MAX_UID_RECORDS_SIZE;
254     while (overflow > 0 && io_history_.size() > 0) {
255         auto del_it = io_history_.begin();
256         overflow -= del_it->second.entries.size();
257         io_history_.erase(io_history_.begin());
258     }
259 }
260 
dump(double hours,uint64_t threshold,bool force_report)261 std::map<uint64_t, struct uid_records> uid_monitor::dump(
262     double hours, uint64_t threshold, bool force_report)
263 {
264     if (force_report) {
265         report(nullptr);
266     }
267 
268     Mutex::Autolock _l(uidm_mutex_);
269 
270     std::map<uint64_t, struct uid_records> dump_records;
271     uint64_t first_ts = 0;
272 
273     if (hours != 0) {
274         first_ts = time(NULL) - hours * HOUR_TO_SEC;
275     }
276 
277     for (auto it = io_history_.lower_bound(first_ts); it != io_history_.end(); ++it) {
278         const std::vector<struct uid_record>& recs = it->second.entries;
279         struct uid_records filtered;
280 
281         for (const auto& rec : recs) {
282             const io_usage& uid_usage = rec.ios.uid_ios;
283             if (uid_usage.bytes[READ][FOREGROUND][CHARGER_ON] +
284                 uid_usage.bytes[READ][FOREGROUND][CHARGER_OFF] +
285                 uid_usage.bytes[READ][BACKGROUND][CHARGER_ON] +
286                 uid_usage.bytes[READ][BACKGROUND][CHARGER_OFF] +
287                 uid_usage.bytes[WRITE][FOREGROUND][CHARGER_ON] +
288                 uid_usage.bytes[WRITE][FOREGROUND][CHARGER_OFF] +
289                 uid_usage.bytes[WRITE][BACKGROUND][CHARGER_ON] +
290                 uid_usage.bytes[WRITE][BACKGROUND][CHARGER_OFF] > threshold) {
291                 filtered.entries.push_back(rec);
292             }
293         }
294 
295         if (filtered.entries.empty())
296             continue;
297 
298         filtered.start_ts = it->second.start_ts;
299         dump_records.insert(
300             std::pair<uint64_t, struct uid_records>(it->first, filtered));
301     }
302 
303     return dump_records;
304 }
305 
update_curr_io_stats_locked()306 void uid_monitor::update_curr_io_stats_locked()
307 {
308     std::unordered_map<uint32_t, uid_info> uid_io_stats =
309         get_uid_io_stats_locked();
310     if (uid_io_stats.empty()) {
311         return;
312     }
313 
314     for (const auto& it : uid_io_stats) {
315         const uid_info& uid = it.second;
316         if (curr_io_stats_.find(uid.name) == curr_io_stats_.end()) {
317             curr_io_stats_[uid.name] = {};
318         }
319 
320         struct uid_io_usage& usage = curr_io_stats_[uid.name];
321         usage.user_id = multiuser_get_user_id(uid.uid);
322 
323         int64_t fg_rd_delta = uid.io[FOREGROUND].read_bytes -
324             last_uid_io_stats_[uid.uid].io[FOREGROUND].read_bytes;
325         int64_t bg_rd_delta = uid.io[BACKGROUND].read_bytes -
326             last_uid_io_stats_[uid.uid].io[BACKGROUND].read_bytes;
327         int64_t fg_wr_delta = uid.io[FOREGROUND].write_bytes -
328             last_uid_io_stats_[uid.uid].io[FOREGROUND].write_bytes;
329         int64_t bg_wr_delta = uid.io[BACKGROUND].write_bytes -
330             last_uid_io_stats_[uid.uid].io[BACKGROUND].write_bytes;
331 
332         usage.uid_ios.bytes[READ][FOREGROUND][charger_stat_] +=
333             (fg_rd_delta < 0) ? 0 : fg_rd_delta;
334         usage.uid_ios.bytes[READ][BACKGROUND][charger_stat_] +=
335             (bg_rd_delta < 0) ? 0 : bg_rd_delta;
336         usage.uid_ios.bytes[WRITE][FOREGROUND][charger_stat_] +=
337             (fg_wr_delta < 0) ? 0 : fg_wr_delta;
338         usage.uid_ios.bytes[WRITE][BACKGROUND][charger_stat_] +=
339             (bg_wr_delta < 0) ? 0 : bg_wr_delta;
340 
341         for (const auto& task_it : uid.tasks) {
342             const task_info& task = task_it.second;
343             const pid_t pid = task_it.first;
344             const std::string& comm = task_it.second.comm;
345             int64_t task_fg_rd_delta = task.io[FOREGROUND].read_bytes -
346                 last_uid_io_stats_[uid.uid].tasks[pid].io[FOREGROUND].read_bytes;
347             int64_t task_bg_rd_delta = task.io[BACKGROUND].read_bytes -
348                 last_uid_io_stats_[uid.uid].tasks[pid].io[BACKGROUND].read_bytes;
349             int64_t task_fg_wr_delta = task.io[FOREGROUND].write_bytes -
350                 last_uid_io_stats_[uid.uid].tasks[pid].io[FOREGROUND].write_bytes;
351             int64_t task_bg_wr_delta = task.io[BACKGROUND].write_bytes -
352                 last_uid_io_stats_[uid.uid].tasks[pid].io[BACKGROUND].write_bytes;
353 
354             io_usage& task_usage = usage.task_ios[comm];
355             task_usage.bytes[READ][FOREGROUND][charger_stat_] +=
356                 (task_fg_rd_delta < 0) ? 0 : task_fg_rd_delta;
357             task_usage.bytes[READ][BACKGROUND][charger_stat_] +=
358                 (task_bg_rd_delta < 0) ? 0 : task_bg_rd_delta;
359             task_usage.bytes[WRITE][FOREGROUND][charger_stat_] +=
360                 (task_fg_wr_delta < 0) ? 0 : task_fg_wr_delta;
361             task_usage.bytes[WRITE][BACKGROUND][charger_stat_] +=
362                 (task_bg_wr_delta < 0) ? 0 : task_bg_wr_delta;
363         }
364     }
365 
366     last_uid_io_stats_ = uid_io_stats;
367 }
368 
report(unordered_map<int,StoragedProto> * protos)369 void uid_monitor::report(unordered_map<int, StoragedProto>* protos)
370 {
371     if (!enabled()) return;
372 
373     Mutex::Autolock _l(uidm_mutex_);
374 
375     update_curr_io_stats_locked();
376     add_records_locked(time(NULL));
377 
378     if (protos) {
379         update_uid_io_proto(protos);
380     }
381 }
382 
383 namespace {
384 
set_io_usage_proto(IOUsage * usage_proto,const io_usage & usage)385 void set_io_usage_proto(IOUsage* usage_proto, const io_usage& usage)
386 {
387     usage_proto->set_rd_fg_chg_on(usage.bytes[READ][FOREGROUND][CHARGER_ON]);
388     usage_proto->set_rd_fg_chg_off(usage.bytes[READ][FOREGROUND][CHARGER_OFF]);
389     usage_proto->set_rd_bg_chg_on(usage.bytes[READ][BACKGROUND][CHARGER_ON]);
390     usage_proto->set_rd_bg_chg_off(usage.bytes[READ][BACKGROUND][CHARGER_OFF]);
391     usage_proto->set_wr_fg_chg_on(usage.bytes[WRITE][FOREGROUND][CHARGER_ON]);
392     usage_proto->set_wr_fg_chg_off(usage.bytes[WRITE][FOREGROUND][CHARGER_OFF]);
393     usage_proto->set_wr_bg_chg_on(usage.bytes[WRITE][BACKGROUND][CHARGER_ON]);
394     usage_proto->set_wr_bg_chg_off(usage.bytes[WRITE][BACKGROUND][CHARGER_OFF]);
395 }
396 
get_io_usage_proto(io_usage * usage,const IOUsage & io_proto)397 void get_io_usage_proto(io_usage* usage, const IOUsage& io_proto)
398 {
399     usage->bytes[READ][FOREGROUND][CHARGER_ON] = io_proto.rd_fg_chg_on();
400     usage->bytes[READ][FOREGROUND][CHARGER_OFF] = io_proto.rd_fg_chg_off();
401     usage->bytes[READ][BACKGROUND][CHARGER_ON] = io_proto.rd_bg_chg_on();
402     usage->bytes[READ][BACKGROUND][CHARGER_OFF] = io_proto.rd_bg_chg_off();
403     usage->bytes[WRITE][FOREGROUND][CHARGER_ON] = io_proto.wr_fg_chg_on();
404     usage->bytes[WRITE][FOREGROUND][CHARGER_OFF] = io_proto.wr_fg_chg_off();
405     usage->bytes[WRITE][BACKGROUND][CHARGER_ON] = io_proto.wr_bg_chg_on();
406     usage->bytes[WRITE][BACKGROUND][CHARGER_OFF] = io_proto.wr_bg_chg_off();
407 }
408 
409 } // namespace
410 
update_uid_io_proto(unordered_map<int,StoragedProto> * protos)411 void uid_monitor::update_uid_io_proto(unordered_map<int, StoragedProto>* protos)
412 {
413     for (const auto& item : io_history_) {
414         const uint64_t& end_ts = item.first;
415         const struct uid_records& recs = item.second;
416         unordered_map<userid_t, UidIOItem*> user_items;
417 
418         for (const auto& entry : recs.entries) {
419             userid_t user_id = entry.ios.user_id;
420             UidIOItem* item_proto = user_items[user_id];
421             if (item_proto == nullptr) {
422                 item_proto = (*protos)[user_id].mutable_uid_io_usage()
423                              ->add_uid_io_items();
424                 user_items[user_id] = item_proto;
425             }
426             item_proto->set_end_ts(end_ts);
427 
428             UidIORecords* recs_proto = item_proto->mutable_records();
429             recs_proto->set_start_ts(recs.start_ts);
430 
431             UidRecord* rec_proto = recs_proto->add_entries();
432             rec_proto->set_uid_name(entry.name);
433             rec_proto->set_user_id(user_id);
434 
435             IOUsage* uid_io_proto = rec_proto->mutable_uid_io();
436             const io_usage& uio_ios = entry.ios.uid_ios;
437             set_io_usage_proto(uid_io_proto, uio_ios);
438 
439             for (const auto& task_io : entry.ios.task_ios) {
440                 const std::string& task_name = task_io.first;
441                 const io_usage& task_ios = task_io.second;
442 
443                 TaskIOUsage* task_io_proto = rec_proto->add_task_io();
444                 task_io_proto->set_task_name(task_name);
445                 set_io_usage_proto(task_io_proto->mutable_ios(), task_ios);
446             }
447         }
448     }
449 }
450 
clear_user_history(userid_t user_id)451 void uid_monitor::clear_user_history(userid_t user_id)
452 {
453     Mutex::Autolock _l(uidm_mutex_);
454 
455     for (auto& item : io_history_) {
456         vector<uid_record>* entries = &item.second.entries;
457         entries->erase(
458             remove_if(entries->begin(), entries->end(),
459                 [user_id](const uid_record& rec) {
460                     return rec.ios.user_id == user_id;}),
461             entries->end());
462     }
463 
464     for (auto it = io_history_.begin(); it != io_history_.end(); ) {
465         if (it->second.entries.empty()) {
466             it = io_history_.erase(it);
467         } else {
468             it++;
469         }
470     }
471 }
472 
load_uid_io_proto(userid_t user_id,const UidIOUsage & uid_io_proto)473 void uid_monitor::load_uid_io_proto(userid_t user_id, const UidIOUsage& uid_io_proto)
474 {
475     if (!enabled()) return;
476 
477     Mutex::Autolock _l(uidm_mutex_);
478 
479     for (const auto& item_proto : uid_io_proto.uid_io_items()) {
480         const UidIORecords& records_proto = item_proto.records();
481         struct uid_records* recs = &io_history_[item_proto.end_ts()];
482 
483         // It's possible that the same uid_io_proto file gets loaded more than
484         // once, for example, if system_server crashes. In this case we avoid
485         // adding duplicate entries, so we build a quick way to check for
486         // duplicates.
487         std::unordered_set<std::string> existing_uids;
488         for (const auto& rec : recs->entries) {
489             if (rec.ios.user_id == user_id) {
490                 existing_uids.emplace(rec.name);
491             }
492         }
493 
494         recs->start_ts = records_proto.start_ts();
495         for (const auto& rec_proto : records_proto.entries()) {
496             if (existing_uids.find(rec_proto.uid_name()) != existing_uids.end()) {
497                 continue;
498             }
499 
500             struct uid_record record;
501             record.name = rec_proto.uid_name();
502             record.ios.user_id = rec_proto.user_id();
503             get_io_usage_proto(&record.ios.uid_ios, rec_proto.uid_io());
504 
505             for (const auto& task_io_proto : rec_proto.task_io()) {
506                 get_io_usage_proto(
507                     &record.ios.task_ios[task_io_proto.task_name()],
508                     task_io_proto.ios());
509             }
510             recs->entries.push_back(record);
511         }
512 
513         // We already added items, so this will just cull down to the maximum
514         // length. We do not remove anything if there is only one entry.
515         if (io_history_.size() > 1) {
516             maybe_shrink_history_for_items(0);
517         }
518     }
519 }
520 
set_charger_state(charger_stat_t stat)521 void uid_monitor::set_charger_state(charger_stat_t stat)
522 {
523     Mutex::Autolock _l(uidm_mutex_);
524 
525     if (charger_stat_ == stat) {
526         return;
527     }
528 
529     update_curr_io_stats_locked();
530     charger_stat_ = stat;
531 }
532 
init(charger_stat_t stat)533 void uid_monitor::init(charger_stat_t stat)
534 {
535     charger_stat_ = stat;
536 
537     start_ts_ = time(NULL);
538     last_uid_io_stats_ = get_uid_io_stats();
539 }
540 
uid_monitor()541 uid_monitor::uid_monitor()
542     : enabled_(!access(UID_IO_STATS_PATH, R_OK)) {
543 }
544