1 /*
2 *
3 * Copyright 2017 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19 #include <grpc/impl/codegen/port_platform.h>
20
21 #include <algorithm>
22 #include <cstring>
23
24 #include "absl/container/inlined_vector.h"
25
26 #include "src/core/lib/channel/channel_trace.h"
27 #include "src/core/lib/channel/channelz.h"
28 #include "src/core/lib/channel/channelz_registry.h"
29 #include "src/core/lib/gpr/useful.h"
30 #include "src/core/lib/gprpp/memory.h"
31 #include "src/core/lib/gprpp/sync.h"
32
33 #include <grpc/support/alloc.h>
34 #include <grpc/support/log.h>
35 #include <grpc/support/string_util.h>
36 #include <grpc/support/sync.h>
37
38 namespace grpc_core {
39 namespace channelz {
40 namespace {
41
42 // singleton instance of the registry.
43 ChannelzRegistry* g_channelz_registry = nullptr;
44
45 const int kPaginationLimit = 100;
46
47 } // anonymous namespace
48
Init()49 void ChannelzRegistry::Init() { g_channelz_registry = new ChannelzRegistry(); }
50
Shutdown()51 void ChannelzRegistry::Shutdown() { delete g_channelz_registry; }
52
Default()53 ChannelzRegistry* ChannelzRegistry::Default() {
54 GPR_DEBUG_ASSERT(g_channelz_registry != nullptr);
55 return g_channelz_registry;
56 }
57
InternalRegister(BaseNode * node)58 void ChannelzRegistry::InternalRegister(BaseNode* node) {
59 MutexLock lock(&mu_);
60 node->uuid_ = ++uuid_generator_;
61 node_map_[node->uuid_] = node;
62 }
63
InternalUnregister(intptr_t uuid)64 void ChannelzRegistry::InternalUnregister(intptr_t uuid) {
65 GPR_ASSERT(uuid >= 1);
66 MutexLock lock(&mu_);
67 GPR_ASSERT(uuid <= uuid_generator_);
68 node_map_.erase(uuid);
69 }
70
InternalGet(intptr_t uuid)71 RefCountedPtr<BaseNode> ChannelzRegistry::InternalGet(intptr_t uuid) {
72 MutexLock lock(&mu_);
73 if (uuid < 1 || uuid > uuid_generator_) {
74 return nullptr;
75 }
76 auto it = node_map_.find(uuid);
77 if (it == node_map_.end()) return nullptr;
78 // Found node. Return only if its refcount is not zero (i.e., when we
79 // know that there is no other thread about to destroy it).
80 BaseNode* node = it->second;
81 return node->RefIfNonZero();
82 }
83
InternalGetTopChannels(intptr_t start_channel_id)84 std::string ChannelzRegistry::InternalGetTopChannels(
85 intptr_t start_channel_id) {
86 absl::InlinedVector<RefCountedPtr<BaseNode>, 10> top_level_channels;
87 RefCountedPtr<BaseNode> node_after_pagination_limit;
88 {
89 MutexLock lock(&mu_);
90 for (auto it = node_map_.lower_bound(start_channel_id);
91 it != node_map_.end(); ++it) {
92 BaseNode* node = it->second;
93 RefCountedPtr<BaseNode> node_ref;
94 if (node->type() == BaseNode::EntityType::kTopLevelChannel &&
95 (node_ref = node->RefIfNonZero()) != nullptr) {
96 // Check if we are over pagination limit to determine if we need to set
97 // the "end" element. If we don't go through this block, we know that
98 // when the loop terminates, we have <= to kPaginationLimit.
99 // Note that because we have already increased this node's
100 // refcount, we need to decrease it, but we can't unref while
101 // holding the lock, because this may lead to a deadlock.
102 if (top_level_channels.size() == kPaginationLimit) {
103 node_after_pagination_limit = std::move(node_ref);
104 break;
105 }
106 top_level_channels.emplace_back(std::move(node_ref));
107 }
108 }
109 }
110 Json::Object object;
111 if (!top_level_channels.empty()) {
112 // Create list of channels.
113 Json::Array array;
114 for (size_t i = 0; i < top_level_channels.size(); ++i) {
115 array.emplace_back(top_level_channels[i]->RenderJson());
116 }
117 object["channel"] = std::move(array);
118 }
119 if (node_after_pagination_limit == nullptr) object["end"] = true;
120 Json json(std::move(object));
121 return json.Dump();
122 }
123
InternalGetServers(intptr_t start_server_id)124 std::string ChannelzRegistry::InternalGetServers(intptr_t start_server_id) {
125 absl::InlinedVector<RefCountedPtr<BaseNode>, 10> servers;
126 RefCountedPtr<BaseNode> node_after_pagination_limit;
127 {
128 MutexLock lock(&mu_);
129 for (auto it = node_map_.lower_bound(start_server_id);
130 it != node_map_.end(); ++it) {
131 BaseNode* node = it->second;
132 RefCountedPtr<BaseNode> node_ref;
133 if (node->type() == BaseNode::EntityType::kServer &&
134 (node_ref = node->RefIfNonZero()) != nullptr) {
135 // Check if we are over pagination limit to determine if we need to set
136 // the "end" element. If we don't go through this block, we know that
137 // when the loop terminates, we have <= to kPaginationLimit.
138 // Note that because we have already increased this node's
139 // refcount, we need to decrease it, but we can't unref while
140 // holding the lock, because this may lead to a deadlock.
141 if (servers.size() == kPaginationLimit) {
142 node_after_pagination_limit = std::move(node_ref);
143 break;
144 }
145 servers.emplace_back(std::move(node_ref));
146 }
147 }
148 }
149 Json::Object object;
150 if (!servers.empty()) {
151 // Create list of servers.
152 Json::Array array;
153 for (size_t i = 0; i < servers.size(); ++i) {
154 array.emplace_back(servers[i]->RenderJson());
155 }
156 object["server"] = std::move(array);
157 }
158 if (node_after_pagination_limit == nullptr) object["end"] = true;
159 Json json(std::move(object));
160 return json.Dump();
161 }
162
InternalLogAllEntities()163 void ChannelzRegistry::InternalLogAllEntities() {
164 absl::InlinedVector<RefCountedPtr<BaseNode>, 10> nodes;
165 {
166 MutexLock lock(&mu_);
167 for (auto& p : node_map_) {
168 RefCountedPtr<BaseNode> node = p.second->RefIfNonZero();
169 if (node != nullptr) {
170 nodes.emplace_back(std::move(node));
171 }
172 }
173 }
174 for (size_t i = 0; i < nodes.size(); ++i) {
175 std::string json = nodes[i]->RenderJsonString();
176 gpr_log(GPR_INFO, "%s", json.c_str());
177 }
178 }
179
180 } // namespace channelz
181 } // namespace grpc_core
182
grpc_channelz_get_top_channels(intptr_t start_channel_id)183 char* grpc_channelz_get_top_channels(intptr_t start_channel_id) {
184 return gpr_strdup(
185 grpc_core::channelz::ChannelzRegistry::GetTopChannels(start_channel_id)
186 .c_str());
187 }
188
grpc_channelz_get_servers(intptr_t start_server_id)189 char* grpc_channelz_get_servers(intptr_t start_server_id) {
190 return gpr_strdup(
191 grpc_core::channelz::ChannelzRegistry::GetServers(start_server_id)
192 .c_str());
193 }
194
grpc_channelz_get_server(intptr_t server_id)195 char* grpc_channelz_get_server(intptr_t server_id) {
196 grpc_core::RefCountedPtr<grpc_core::channelz::BaseNode> server_node =
197 grpc_core::channelz::ChannelzRegistry::Get(server_id);
198 if (server_node == nullptr ||
199 server_node->type() !=
200 grpc_core::channelz::BaseNode::EntityType::kServer) {
201 return nullptr;
202 }
203 grpc_core::Json json = grpc_core::Json::Object{
204 {"server", server_node->RenderJson()},
205 };
206 return gpr_strdup(json.Dump().c_str());
207 }
208
grpc_channelz_get_server_sockets(intptr_t server_id,intptr_t start_socket_id,intptr_t max_results)209 char* grpc_channelz_get_server_sockets(intptr_t server_id,
210 intptr_t start_socket_id,
211 intptr_t max_results) {
212 // Validate inputs before handing them of to the renderer.
213 grpc_core::RefCountedPtr<grpc_core::channelz::BaseNode> base_node =
214 grpc_core::channelz::ChannelzRegistry::Get(server_id);
215 if (base_node == nullptr ||
216 base_node->type() != grpc_core::channelz::BaseNode::EntityType::kServer ||
217 start_socket_id < 0 || max_results < 0) {
218 return nullptr;
219 }
220 // This cast is ok since we have just checked to make sure base_node is
221 // actually a server node.
222 grpc_core::channelz::ServerNode* server_node =
223 static_cast<grpc_core::channelz::ServerNode*>(base_node.get());
224 return gpr_strdup(
225 server_node->RenderServerSockets(start_socket_id, max_results).c_str());
226 }
227
grpc_channelz_get_channel(intptr_t channel_id)228 char* grpc_channelz_get_channel(intptr_t channel_id) {
229 grpc_core::RefCountedPtr<grpc_core::channelz::BaseNode> channel_node =
230 grpc_core::channelz::ChannelzRegistry::Get(channel_id);
231 if (channel_node == nullptr ||
232 (channel_node->type() !=
233 grpc_core::channelz::BaseNode::EntityType::kTopLevelChannel &&
234 channel_node->type() !=
235 grpc_core::channelz::BaseNode::EntityType::kInternalChannel)) {
236 return nullptr;
237 }
238 grpc_core::Json json = grpc_core::Json::Object{
239 {"channel", channel_node->RenderJson()},
240 };
241 return gpr_strdup(json.Dump().c_str());
242 }
243
grpc_channelz_get_subchannel(intptr_t subchannel_id)244 char* grpc_channelz_get_subchannel(intptr_t subchannel_id) {
245 grpc_core::RefCountedPtr<grpc_core::channelz::BaseNode> subchannel_node =
246 grpc_core::channelz::ChannelzRegistry::Get(subchannel_id);
247 if (subchannel_node == nullptr ||
248 subchannel_node->type() !=
249 grpc_core::channelz::BaseNode::EntityType::kSubchannel) {
250 return nullptr;
251 }
252 grpc_core::Json json = grpc_core::Json::Object{
253 {"subchannel", subchannel_node->RenderJson()},
254 };
255 return gpr_strdup(json.Dump().c_str());
256 }
257
grpc_channelz_get_socket(intptr_t socket_id)258 char* grpc_channelz_get_socket(intptr_t socket_id) {
259 grpc_core::RefCountedPtr<grpc_core::channelz::BaseNode> socket_node =
260 grpc_core::channelz::ChannelzRegistry::Get(socket_id);
261 if (socket_node == nullptr ||
262 socket_node->type() !=
263 grpc_core::channelz::BaseNode::EntityType::kSocket) {
264 return nullptr;
265 }
266 grpc_core::Json json = grpc_core::Json::Object{
267 {"socket", socket_node->RenderJson()},
268 };
269 return gpr_strdup(json.Dump().c_str());
270 }
271