• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1// Copyright 2021 The Pigweed Authors
2//
3// Licensed under the Apache License, Version 2.0 (the "License"); you may not
4// use this file except in compliance with the License. You may obtain a copy of
5// the License at
6//
7//     https://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12// License for the specific language governing permissions and limitations under
13// the License.
14
15syntax = "proto3";
16
17package pw.log;
18
19import "pw_protobuf_protos/common.proto";
20import "pw_tokenizer/proto/options.proto";
21
22option java_outer_classname = "Log";
23
24// A log message and metadata. Logs come in a few different forms:
25//
26//  1. A tokenized log message (recommended for production)
27//  2. A non-tokenized log message (good for development)
28//  3. A "log missed" tombstone, indicating that some logs were dropped
29//
30// Size analysis for tokenized log messages, including each field's proto tag:
31//
32//  - message     - 6-12 bytes; depending on number and value of arguments
33//  - line_level  - 3 bytes; 4 bytes if line > 2048 (uncommon)
34//  - timestamp   - 3 bytes; assuming delta encoding
35//  - thread      - 2-6 bytes; depending on whether value is a token or string
36//
37// Adding the fields gives the total proto message size:
38//
39//    6-12 bytes - log
40//    9-15 bytes - log + level + line
41//   12-18 bytes - log + level + line + timestamp
42//
43// An analysis of a project's log token database revealed the following
44// distribution of the number of arguments to log messages:
45//
46//   # args   # messages
47//     0         2,700
48//     1         2,400
49//     2         1,200
50//     3+        1,000
51//
52// Note: The below proto makes some compromises compared to what one might
53// expect for a "clean" proto design, in order to shave bytes off of the
54// messages. It is critical that the log messages are as small as possible to
55// enable storing more logs in limited memory. This is why, for example, there
56// is no separate "DroppedLog" type, or a "TokenizedLog" and "StringLog", which
57// would add at least 2 extra bytes per message
58message LogEntry {
59  // The log message, which may be tokenized.
60  //
61  // If tokenized logging is used, implementations may encode metadata in the
62  // log message rather than as separate proto fields. This reduces the size of
63  // the protobuf with no overhead.
64  //
65  // The standard format for encoding metadata in the log message is defined by
66  // the pw_log_tokenized module. The message and metadata are encoded as
67  // key-value pairs using ■ and ♦ as delimiters. For example:
68  //
69  //  ■msg♦This is the log message: %d■module♦wifi■file♦../path/to/file.cc
70  //
71  // See http://pigweed.dev/pw_log_tokenized for full details. When
72  // pw_log_tokenized is used, this metadata is automatically included as
73  // described.
74  //
75  // The level and flags are not included since they may be runtime values and
76  // thus cannot always be tokenized. The line number is not included because
77  // line numbers change frequently and a new token is created for each line.
78  //
79  // Size analysis when tokenized:
80  //
81  //   tag+wire = 1 byte
82  //   size     = 1 byte; payload will almost always be < 127 bytes
83  //   payload  = N bytes; typically 4-10 in practice
84  //
85  // Total: 2 + N ~= 6-12 bytes
86  optional bytes message = 1 [(tokenizer.format) = TOKENIZATION_OPTIONAL];
87
88  // Packed log level and line number. Structure:
89  //
90  //   Level: Bottom 3 bits; level = line_level & 0x7
91  //   Line: Remaining bits; line = (line_level >> 3)
92  //
93  // Note: This packing saves two bytes per log message in most cases compared
94  // to having line and level separately; and is zero-cost if the log backend
95  // omits the line number.
96  uint32 line_level = 2;
97
98  // Some log messages have flags to indicate attributes such as whether they
99  // are from an assert or if they contain PII. The particular flags are
100  // product- and implementation-dependent.
101  uint32 flags = 3;
102
103  // Timestamps are either specified with an absolute timestamp or relative to
104  // the previous log entry.
105  oneof time {
106    // The absolute timestamp in implementation-defined ticks. Applications
107    // determine how to interpret this on the receiving end. In the simplest
108    // case, these ticks might be milliseconds or microseconds since boot.
109    // Applications could also access clock information out-of-band with a
110    // ClockParameters protobuf.
111    int64 timestamp = 4;
112
113    // Time since the last entry in implementation-defined ticks, as for the
114    // timestamp field. This enables delta encoding when batching entries
115    // together.
116    //
117    // Size analysis for this field including tag and varint, assuming 1 kHz
118    // ticks:
119    //
120    //           < 127 ms gap == 127 ms      ==  7 bits == 2 bytes
121    //        < 16,000 ms gap ==  16 seconds == 14 bits == 3 bytes
122    //     < 2,000,000 ms gap ==  35 minutes == 21 bits == 4 bytes
123    //   < 300,000,000 ms gap ==  74 hours   == 28 bits == 5 bytes
124    //
125    // Log bursts will thus consume just 2 bytes (tag + up to 127ms delta) for
126    // the timestamp, which is a good improvement over an absolute timestamp.
127    int64 time_since_last_entry = 5;
128  }
129
130  // When the log buffers are full but more logs come in, the logs are counted
131  // and a special log message is omitted with only counts for the number of
132  // messages dropped.
133  uint32 dropped = 6;
134
135  // The PW_LOG_MODULE_NAME for this log message.
136  bytes module = 7 [(tokenizer.format) = TOKENIZATION_OPTIONAL];
137
138  // The file path where this log was created, if not encoded in the message.
139  bytes file = 8 [(tokenizer.format) = TOKENIZATION_OPTIONAL];
140
141  // The task or thread name that created the log message. If the log was not
142  // created on a thread, it should use a name appropriate to that context.
143  bytes thread = 9 [(tokenizer.format) = TOKENIZATION_OPTIONAL];
144
145  // The following fields are planned but will not be added until they are
146  // needed. Protobuf field numbers over 15 use an extra byte, so these fields
147  // are left out for now to avoid reserving field numbers unnecessarily.
148
149  // Represents the device from which the log originated. The meaning of this
150  // field is implementation defined
151  // uint32 source_id = ?;
152
153  // Some messages are associated with trace events, which may carry additional
154  // contextual data. This is a tuple of a data format string which could be
155  // used by the decoder to identify the data (e.g. printf-style tokens) and the
156  // data itself in bytes.
157  // bytes data_format = ?
158  //     [(tokenizer.format) = TOKENIZATION_OPTIONAL];
159  // bytes data = ?;
160}
161
162message LogRequest {}
163
164message LogEntries {
165  repeated LogEntry entries = 1;
166  uint32 first_entry_sequence_id = 2;
167}
168
169// RPC service for accessing logs.
170service Logs {
171  rpc Listen(LogRequest) returns (stream LogEntries);
172}
173
174message FilterRule {
175  // Log level values match pw_log/levels.h. Enum names avoid collissions with
176  // possible macros.
177  enum Level {
178    ANY_LEVEL = 0;
179    DEBUG_LEVEL = 1;
180    INFO_LEVEL = 2;
181    WARN_LEVEL = 3;
182    ERROR_LEVEL = 4;
183    CRITICAL_LEVEL = 5;
184    FATAL_LEVEL = 7;
185  };
186  // Condition 1: log.level >= level_greater_than_or_equal.
187  Level level_greater_than_or_equal = 1;
188
189  // Condition 2: (module_equals.size() == 0) || (log.module == module_equals);
190  bytes module_equals = 2 [(tokenizer.format) = TOKENIZATION_OPTIONAL];
191
192  // Condition 3: (any_flags_set == 0) || (log.flags & any_flags_set) != 0))
193  uint32 any_flags_set = 3;
194
195  // Action to take if all conditions are met and rule is not inactive.
196  enum Action {
197    INACTIVE = 0;  // Ignore the rule entirely.
198    KEEP = 1;      // Keep the log entry if all conditions are met.
199    DROP = 2;      // Drop the log entry if all conditions are met
200  };
201  Action action = 4;
202
203  // Condition 4: (thread_equals.size() == 0 || log.thread == thread_equals).
204  bytes thread_equals = 5 [(tokenizer.format) = TOKENIZATION_OPTIONAL];
205}
206
207// A filter is a series of rules. First matching rule wins.
208message Filter {
209  repeated FilterRule rule = 1;
210}
211
212message SetFilterRequest {
213  // A filter can be identified by a human readable string, token, or number.
214  bytes filter_id = 1 [(tokenizer.format) = TOKENIZATION_OPTIONAL];
215
216  Filter filter = 2;
217}
218
219message GetFilterRequest {
220  bytes filter_id = 1 [(tokenizer.format) = TOKENIZATION_OPTIONAL];
221}
222
223message FilterIdListRequest {}
224
225message FilterIdListResponse {
226  repeated bytes filter_id = 1 [(tokenizer.format) = TOKENIZATION_OPTIONAL];
227}
228
229// RPC service for retrieving and modifying log filters.
230service Filters {
231  rpc SetFilter(SetFilterRequest) returns (pw.protobuf.Empty);
232  rpc GetFilter(GetFilterRequest) returns (Filter);
233  rpc ListFilterIds(FilterIdListRequest) returns (FilterIdListResponse);
234}
235