• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2020 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "SerializedLogBuffer.h"
18 
19 #include <sys/prctl.h>
20 
21 #include <limits>
22 
23 #include <android-base/logging.h>
24 #include <android-base/scopeguard.h>
25 
26 #include "LogSize.h"
27 #include "LogStatistics.h"
28 #include "SerializedFlushToState.h"
29 
LogToLogBuffer(std::list<SerializedLogChunk> & log_buffer,size_t max_size,uint64_t sequence,log_time realtime,uid_t uid,pid_t pid,pid_t tid,const char * msg,uint16_t len)30 static SerializedLogEntry* LogToLogBuffer(std::list<SerializedLogChunk>& log_buffer,
31                                           size_t max_size, uint64_t sequence, log_time realtime,
32                                           uid_t uid, pid_t pid, pid_t tid, const char* msg,
33                                           uint16_t len) {
34     if (log_buffer.empty()) {
35         log_buffer.push_back(SerializedLogChunk(max_size / SerializedLogBuffer::kChunkSizeDivisor));
36     }
37 
38     auto total_len = sizeof(SerializedLogEntry) + len;
39     if (!log_buffer.back().CanLog(total_len)) {
40         log_buffer.back().FinishWriting();
41         log_buffer.push_back(SerializedLogChunk(max_size / SerializedLogBuffer::kChunkSizeDivisor));
42     }
43 
44     return log_buffer.back().Log(sequence, realtime, uid, pid, tid, msg, len);
45 }
46 
47 // Clear all logs from a particular UID by iterating through all existing logs for a log_id, and
48 // write all logs that don't match the UID into a new log buffer, then swapping the log buffers.
49 // There is an optimization that chunks are copied as-is until a log message from the UID is found,
50 // to ensure that back-to-back clears of the same UID do not require reflowing the entire buffer.
ClearLogsByUid(std::list<SerializedLogChunk> & log_buffer,uid_t uid,size_t max_size,log_id_t log_id,LogStatistics * stats)51 void ClearLogsByUid(std::list<SerializedLogChunk>& log_buffer, uid_t uid, size_t max_size,
52                     log_id_t log_id, LogStatistics* stats) REQUIRES(logd_lock) {
53     bool contains_uid_logs = false;
54     std::list<SerializedLogChunk> new_logs;
55     auto it = log_buffer.begin();
56     while (it != log_buffer.end()) {
57         auto chunk = it++;
58         chunk->NotifyReadersOfPrune(log_id);
59         chunk->IncReaderRefCount();
60 
61         if (!contains_uid_logs) {
62             for (const auto& entry : *chunk) {
63                 if (entry.uid() == uid) {
64                     contains_uid_logs = true;
65                     break;
66                 }
67             }
68             if (!contains_uid_logs) {
69                 chunk->DecReaderRefCount();
70                 new_logs.splice(new_logs.end(), log_buffer, chunk);
71                 continue;
72             }
73             // We found a UID log, so push a writable chunk to prepare for the next loop.
74             new_logs.push_back(
75                     SerializedLogChunk(max_size / SerializedLogBuffer::kChunkSizeDivisor));
76         }
77 
78         for (const auto& entry : *chunk) {
79             if (entry.uid() == uid) {
80                 if (stats != nullptr) {
81                     stats->Subtract(entry.ToLogStatisticsElement(log_id));
82                 }
83             } else {
84                 LogToLogBuffer(new_logs, max_size, entry.sequence(), entry.realtime(), entry.uid(),
85                                entry.pid(), entry.tid(), entry.msg(), entry.msg_len());
86             }
87         }
88         chunk->DecReaderRefCount();
89         log_buffer.erase(chunk);
90     }
91     std::swap(new_logs, log_buffer);
92 }
93 
SerializedLogBuffer(LogReaderList * reader_list,LogTags * tags,LogStatistics * stats)94 SerializedLogBuffer::SerializedLogBuffer(LogReaderList* reader_list, LogTags* tags,
95                                          LogStatistics* stats)
96     : reader_list_(reader_list), tags_(tags), stats_(stats) {
97     Init();
98 }
99 
Init()100 void SerializedLogBuffer::Init() {
101     log_id_for_each(i) {
102         if (!SetSize(i, GetBufferSizeFromProperties(i))) {
103             SetSize(i, kLogBufferMinSize);
104         }
105     }
106 
107     // Release any sleeping reader threads to dump their current content.
108     auto lock = std::lock_guard{logd_lock};
109     for (const auto& reader_thread : reader_list_->reader_threads()) {
110         reader_thread->TriggerReader();
111     }
112 }
113 
ShouldLog(log_id_t log_id,const char * msg,uint16_t len)114 bool SerializedLogBuffer::ShouldLog(log_id_t log_id, const char* msg, uint16_t len) {
115     if (log_id == LOG_ID_SECURITY) {
116         return true;
117     }
118 
119     int prio = ANDROID_LOG_INFO;
120     const char* tag = nullptr;
121     size_t tag_len = 0;
122     if (IsBinary(log_id)) {
123         int32_t tag_int = MsgToTag(msg, len);
124         tag = tags_->tagToName(tag_int);
125         if (tag) {
126             tag_len = strlen(tag);
127         }
128     } else {
129         prio = *msg;
130         tag = msg + 1;
131         tag_len = strnlen(tag, len - 1);
132     }
133     return __android_log_is_loggable_len(prio, tag, tag_len, ANDROID_LOG_VERBOSE);
134 }
135 
Log(log_id_t log_id,log_time realtime,uid_t uid,pid_t pid,pid_t tid,const char * msg,uint16_t len)136 int SerializedLogBuffer::Log(log_id_t log_id, log_time realtime, uid_t uid, pid_t pid, pid_t tid,
137                              const char* msg, uint16_t len) {
138     if (log_id >= LOG_ID_MAX || len == 0) {
139         return -EINVAL;
140     }
141 
142     if (len > LOGGER_ENTRY_MAX_PAYLOAD) {
143         len = LOGGER_ENTRY_MAX_PAYLOAD;
144     }
145 
146     if (!ShouldLog(log_id, msg, len)) {
147         stats_->AddTotal(log_id, len);
148         return -EACCES;
149     }
150 
151     auto sequence = sequence_.fetch_add(1, std::memory_order_relaxed);
152 
153     auto lock = std::lock_guard{logd_lock};
154     auto entry = LogToLogBuffer(logs_[log_id], max_size_[log_id], sequence, realtime, uid, pid, tid,
155                                 msg, len);
156     stats_->Add(entry->ToLogStatisticsElement(log_id));
157 
158     MaybePrune(log_id);
159 
160     reader_list_->NotifyNewLog(1 << log_id);
161     return len;
162 }
163 
MaybePrune(log_id_t log_id)164 void SerializedLogBuffer::MaybePrune(log_id_t log_id) {
165     size_t total_size = GetSizeUsed(log_id);
166     size_t after_size = total_size;
167     if (total_size > max_size_[log_id]) {
168         Prune(log_id, total_size - max_size_[log_id]);
169         after_size = GetSizeUsed(log_id);
170         LOG(VERBOSE) << "Pruned Logs from log_id: " << log_id << ", previous size: " << total_size
171                      << " after size: " << after_size;
172     }
173 
174     stats_->set_overhead(log_id, after_size);
175 }
176 
RemoveChunkFromStats(log_id_t log_id,SerializedLogChunk & chunk)177 void SerializedLogBuffer::RemoveChunkFromStats(log_id_t log_id, SerializedLogChunk& chunk) {
178     chunk.IncReaderRefCount();
179     for (const auto& entry : chunk) {
180         stats_->Subtract(entry.ToLogStatisticsElement(log_id));
181     }
182     chunk.DecReaderRefCount();
183 }
184 
Prune(log_id_t log_id,size_t bytes_to_free)185 void SerializedLogBuffer::Prune(log_id_t log_id, size_t bytes_to_free) {
186     auto& log_buffer = logs_[log_id];
187     auto it = log_buffer.begin();
188     while (it != log_buffer.end()) {
189         for (const auto& reader_thread : reader_list_->reader_threads()) {
190             if (!reader_thread->IsWatching(log_id)) {
191                 continue;
192             }
193 
194             if (reader_thread->deadline().time_since_epoch().count() != 0) {
195                 // Always wake up wrapped readers when pruning.  'Wrapped' readers are an
196                 // optimization that allows the reader to wait until logs starting at a specified
197                 // time stamp are about to be pruned.  This is error-prone however, since if that
198                 // timestamp is about to be pruned, the reader is not likely to read the messages
199                 // fast enough to not back-up logd.  Instead, we can achieve an nearly-as-efficient
200                 // but not error-prune batching effect by waking the reader whenever any chunk is
201                 // about to be pruned.
202                 reader_thread->TriggerReader();
203             }
204 
205             // Some readers may be still reading from this log chunk, log a warning that they are
206             // about to lose logs.
207             // TODO: We should forcefully disconnect the reader instead, such that the reader itself
208             // has an indication that they've lost logs.
209             if (reader_thread->start() <= it->highest_sequence_number()) {
210                 LOG(WARNING) << "Skipping entries from slow reader, " << reader_thread->name()
211                              << ", from LogBuffer::Prune()";
212             }
213         }
214 
215         // Increment ahead of time since we're going to erase this iterator from the list.
216         auto it_to_prune = it++;
217 
218         // Readers may have a reference to the chunk to track their last read log_position.
219         // Notify them to delete the reference.
220         it_to_prune->NotifyReadersOfPrune(log_id);
221 
222         size_t buffer_size = it_to_prune->PruneSize();
223         RemoveChunkFromStats(log_id, *it_to_prune);
224         log_buffer.erase(it_to_prune);
225         if (buffer_size >= bytes_to_free) {
226             return;
227         }
228         bytes_to_free -= buffer_size;
229     }
230 }
231 
UidClear(log_id_t log_id,uid_t uid)232 void SerializedLogBuffer::UidClear(log_id_t log_id, uid_t uid) {
233     // Wake all readers; see comment in Prune().
234     for (const auto& reader_thread : reader_list_->reader_threads()) {
235         if (!reader_thread->IsWatching(log_id)) {
236             continue;
237         }
238 
239         if (reader_thread->deadline().time_since_epoch().count() != 0) {
240             reader_thread->TriggerReader();
241         }
242     }
243     ClearLogsByUid(logs_[log_id], uid, max_size_[log_id], log_id, stats_);
244 }
245 
CreateFlushToState(uint64_t start,LogMask log_mask)246 std::unique_ptr<FlushToState> SerializedLogBuffer::CreateFlushToState(uint64_t start,
247                                                                       LogMask log_mask) {
248     return std::make_unique<SerializedFlushToState>(start, log_mask, logs_);
249 }
250 
FlushTo(LogWriter * writer,FlushToState & abstract_state,const std::function<FilterResult (log_id_t log_id,pid_t pid,uint64_t sequence,log_time realtime)> & filter)251 bool SerializedLogBuffer::FlushTo(
252         LogWriter* writer, FlushToState& abstract_state,
253         const std::function<FilterResult(log_id_t log_id, pid_t pid, uint64_t sequence,
254                                          log_time realtime)>& filter) {
255     auto& state = reinterpret_cast<SerializedFlushToState&>(abstract_state);
256 
257     while (state.HasUnreadLogs()) {
258         LogWithId top = state.PopNextUnreadLog();
259         auto* entry = top.entry;
260         auto log_id = top.log_id;
261 
262         if (entry->sequence() < state.start()) {
263             continue;
264         }
265         state.set_start(entry->sequence());
266 
267         if (!writer->privileged() && entry->uid() != writer->uid()) {
268             continue;
269         }
270 
271         if (filter) {
272             auto ret = filter(log_id, entry->pid(), entry->sequence(), entry->realtime());
273             if (ret == FilterResult::kSkip) {
274                 continue;
275             }
276             if (ret == FilterResult::kStop) {
277                 break;
278             }
279         }
280 
281         // We copy the log entry such that we can flush it without the lock.  We never block pruning
282         // waiting for this Flush() to complete.
283         constexpr size_t kMaxEntrySize = sizeof(*entry) + LOGGER_ENTRY_MAX_PAYLOAD + 1;
284         unsigned char entry_copy[kMaxEntrySize] __attribute__((uninitialized));
285         CHECK_LT(entry->msg_len(), LOGGER_ENTRY_MAX_PAYLOAD + 1);
286         memcpy(entry_copy, entry, sizeof(*entry) + entry->msg_len());
287         logd_lock.unlock();
288 
289         if (!reinterpret_cast<SerializedLogEntry*>(entry_copy)->Flush(writer, log_id)) {
290             logd_lock.lock();
291             return false;
292         }
293 
294         logd_lock.lock();
295     }
296 
297     state.set_start(state.start() + 1);
298     return true;
299 }
300 
Clear(log_id_t id,uid_t uid)301 bool SerializedLogBuffer::Clear(log_id_t id, uid_t uid) {
302     auto lock = std::lock_guard{logd_lock};
303     if (uid == 0) {
304         Prune(id, ULONG_MAX);
305     } else {
306         UidClear(id, uid);
307     }
308 
309     // Clearing SerializedLogBuffer never waits for readers and therefore is always successful.
310     return true;
311 }
312 
GetSizeUsed(log_id_t id)313 size_t SerializedLogBuffer::GetSizeUsed(log_id_t id) {
314     size_t total_size = 0;
315     for (const auto& chunk : logs_[id]) {
316         total_size += chunk.PruneSize();
317     }
318     return total_size;
319 }
320 
GetSize(log_id_t id)321 size_t SerializedLogBuffer::GetSize(log_id_t id) {
322     auto lock = std::lock_guard{logd_lock};
323     return max_size_[id];
324 }
325 
326 // New SerializedLogChunk objects will be allocated according to the new size, but older one are
327 // unchanged.  MaybePrune() is called on the log buffer to reduce it to an appropriate size if the
328 // new size is lower.
SetSize(log_id_t id,size_t size)329 bool SerializedLogBuffer::SetSize(log_id_t id, size_t size) {
330     // Reasonable limits ...
331     if (!IsValidBufferSize(size)) {
332         return false;
333     }
334 
335     auto lock = std::lock_guard{logd_lock};
336     max_size_[id] = size;
337 
338     MaybePrune(id);
339 
340     return true;
341 }
342