• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2020 The Pigweed Authors
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License"); you may not
4 // use this file except in compliance with the License. You may obtain a copy of
5 // the License at
6 //
7 //     https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11 // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12 // License for the specific language governing permissions and limitations under
13 // the License.
14 
15 #include "pw_metric/metric_service_nanopb.h"
16 
17 #include <cstring>
18 #include <span>
19 
20 #include "pw_assert/assert.h"
21 #include "pw_containers/vector.h"
22 #include "pw_metric/metric.h"
23 #include "pw_preprocessor/util.h"
24 
25 namespace pw::metric {
26 namespace {
27 
28 class MetricWriter {
29  public:
MetricWriter(rpc::ServerWriter<pw_metric_MetricResponse> & response_writer)30   MetricWriter(rpc::ServerWriter<pw_metric_MetricResponse>& response_writer)
31       : response_(pw_metric_MetricResponse_init_zero),
32         response_writer_(response_writer) {}
33 
34   // TODO(keir): Figure out a pw_rpc mechanism to fill a streaming packet based
35   // on transport MTU, rather than having this as a static knob. For example,
36   // some transports may be able to fit 30 metrics; others, only 5.
Write(const Metric & metric,const Vector<Token> & path)37   void Write(const Metric& metric, const Vector<Token>& path) {
38     // Nanopb doesn't offer an easy way to do bounds checking, so use span's
39     // type deduction magic to figure out the max size.
40     std::span<pw_metric_Metric> metrics(response_.metrics);
41     PW_CHECK_INT_LT(response_.metrics_count, metrics.size());
42 
43     // Grab the next available Metric slot to write to in the response.
44     pw_metric_Metric& proto_metric = response_.metrics[response_.metrics_count];
45 
46     // Copy the path.
47     std::span<Token> proto_path(proto_metric.token_path);
48     PW_CHECK_INT_LE(path.size(), proto_path.size());
49     std::copy(path.begin(), path.end(), proto_path.begin());
50     proto_metric.token_path_count = path.size();
51 
52     // Copy the metric value.
53     if (metric.is_float()) {
54       proto_metric.value.as_float = metric.as_float();
55       proto_metric.which_value = pw_metric_Metric_as_float_tag;
56     } else {
57       proto_metric.value.as_int = metric.as_int();
58       proto_metric.which_value = pw_metric_Metric_as_int_tag;
59     }
60 
61     // Move write head to the next slot.
62     response_.metrics_count++;
63 
64     // If the metric response object is full, send the response and reset.
65     // TODO(keir): Support runtime batch sizes < max proto size.
66     if (response_.metrics_count == metrics.size()) {
67       Flush();
68     }
69   }
70 
Flush()71   void Flush() {
72     if (response_.metrics_count) {
73       response_writer_.Write(response_);
74       response_ = pw_metric_MetricResponse_init_zero;
75     }
76   }
77 
78  private:
79   pw_metric_MetricResponse response_;
80   // This RPC stream writer handle must be valid for the metric writer lifetime.
81   rpc::ServerWriter<pw_metric_MetricResponse>& response_writer_;
82 };
83 
84 // Walk a metric tree recursively; passing metrics with their path (names) to a
85 // metric writer which can consume them.
86 //
87 // TODO(keir): Generalize this to support a generic visitor.
88 class MetricWalker {
89  public:
MetricWalker(MetricWriter & writer)90   MetricWalker(MetricWriter& writer) : writer_(writer) {}
91 
Walk(const IntrusiveList<Metric> & metrics)92   void Walk(const IntrusiveList<Metric>& metrics) {
93     for (const auto& m : metrics) {
94       ScopedName scoped_name(m.name(), *this);
95       writer_.Write(m, path_);
96     }
97   }
98 
Walk(const IntrusiveList<Group> & groups)99   void Walk(const IntrusiveList<Group>& groups) {
100     for (const auto& g : groups) {
101       Walk(g);
102     }
103   }
104 
Walk(const Group & group)105   void Walk(const Group& group) {
106     ScopedName scoped_name(group.name(), *this);
107     Walk(group.children());
108     Walk(group.metrics());
109   }
110 
111  private:
112   // Exists to safely push/pop parent groups from the explicit stack.
113   struct ScopedName {
ScopedNamepw::metric::__anon4653944a0111::MetricWalker::ScopedName114     ScopedName(Token name, MetricWalker& rhs) : walker(rhs) {
115       PW_CHECK_INT_LT(walker.path_.size(),
116                       walker.path_.capacity(),
117                       "Metrics are too deep; bump path_ capacity");
118       walker.path_.push_back(name);
119     }
~ScopedNamepw::metric::__anon4653944a0111::MetricWalker::ScopedName120     ~ScopedName() { walker.path_.pop_back(); }
121     MetricWalker& walker;
122   };
123 
124   Vector<Token, 4 /* max depth */> path_;
125   MetricWriter& writer_;
126 };
127 
128 }  // namespace
129 
Get(ServerContext &,const pw_metric_MetricRequest &,ServerWriter<pw_metric_MetricResponse> & response)130 void MetricService::Get(ServerContext&,
131                         const pw_metric_MetricRequest& /* request */,
132                         ServerWriter<pw_metric_MetricResponse>& response) {
133   // For now, ignore the request and just stream all the metrics back.
134   MetricWriter writer(response);
135   MetricWalker walker(writer);
136 
137   // This will stream all the metrics in the span of this Get() method call.
138   // This will have the effect of blocking the RPC thread until all the metrics
139   // are sent. That is likely to cause problems if there are many metrics, or
140   // if other RPCs are higher priority and should complete first.
141   //
142   // In the future, this should be replaced with an optional async solution
143   // that puts the application in control of when the response batches are sent.
144   walker.Walk(metrics_);
145   walker.Walk(groups_);
146   writer.Flush();
147 }
148 
149 }  // namespace pw::metric
150