1 /*
2 *
3 * Copyright 2015 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19 #ifndef GRPC_CORE_LIB_SURFACE_CHANNEL_H
20 #define GRPC_CORE_LIB_SURFACE_CHANNEL_H
21
22 #include <grpc/support/port_platform.h>
23
24 #include <map>
25
26 #include "src/core/lib/channel/channel_stack.h"
27 #include "src/core/lib/channel/channel_stack_builder.h"
28 #include "src/core/lib/channel/channelz.h"
29 #include "src/core/lib/gprpp/manual_constructor.h"
30 #include "src/core/lib/surface/channel_stack_type.h"
31 #include "src/core/lib/transport/metadata.h"
32
33 grpc_channel* grpc_channel_create(const char* target,
34 const grpc_channel_args* args,
35 grpc_channel_stack_type channel_stack_type,
36 grpc_transport* optional_transport,
37 grpc_resource_user* resource_user = nullptr,
38 grpc_error** error = nullptr);
39
40 /** The same as grpc_channel_destroy, but doesn't create an ExecCtx, and so
41 * is safe to use from within core. */
42 void grpc_channel_destroy_internal(grpc_channel* channel);
43
44 grpc_channel* grpc_channel_create_with_builder(
45 grpc_channel_stack_builder* builder,
46 grpc_channel_stack_type channel_stack_type, grpc_error** error = nullptr);
47
48 /** Create a call given a grpc_channel, in order to call \a method.
49 Progress is tied to activity on \a pollset_set. The returned call object is
50 meant to be used with \a grpc_call_start_batch_and_execute, which relies on
51 callbacks to signal completions. \a method and \a host need
52 only live through the invocation of this function. If \a parent_call is
53 non-NULL, it must be a server-side call. It will be used to propagate
54 properties from the server call to this new client call, depending on the
55 value of \a propagation_mask (see propagation_bits.h for possible values) */
56 grpc_call* grpc_channel_create_pollset_set_call(
57 grpc_channel* channel, grpc_call* parent_call, uint32_t propagation_mask,
58 grpc_pollset_set* pollset_set, const grpc_slice& method,
59 const grpc_slice* host, grpc_millis deadline, void* reserved);
60
61 /** Get a (borrowed) pointer to this channels underlying channel stack */
62 grpc_channel_stack* grpc_channel_get_channel_stack(grpc_channel* channel);
63
64 grpc_core::channelz::ChannelNode* grpc_channel_get_channelz_node(
65 grpc_channel* channel);
66
67 size_t grpc_channel_get_call_size_estimate(grpc_channel* channel);
68 void grpc_channel_update_call_size_estimate(grpc_channel* channel, size_t size);
69
70 namespace grpc_core {
71
72 struct RegisteredCall {
73 // The method and host are kept as part of this struct just to manage their
74 // lifetime since they must outlive the mdelem contents.
75 std::string method;
76 std::string host;
77
78 grpc_mdelem path;
79 grpc_mdelem authority;
80
81 explicit RegisteredCall(const char* method_arg, const char* host_arg);
82 // TODO(vjpai): delete copy constructor once all supported compilers allow
83 // std::map value_type to be MoveConstructible.
84 RegisteredCall(const RegisteredCall& other);
85 RegisteredCall(RegisteredCall&& other) noexcept;
86 RegisteredCall& operator=(const RegisteredCall&) = delete;
87 RegisteredCall& operator=(RegisteredCall&&) = delete;
88
89 ~RegisteredCall();
90 };
91
92 struct CallRegistrationTable {
93 grpc_core::Mutex mu;
94 // The map key should be owned strings rather than unowned char*'s to
95 // guarantee that it outlives calls on the core channel (which may outlast the
96 // C++ or other wrapped language Channel that registered these calls).
97 std::map<std::pair<std::string, std::string>, RegisteredCall>
98 map /* GUARDED_BY(mu) */;
99 int method_registration_attempts /* GUARDED_BY(mu) */ = 0;
100 };
101
102 } // namespace grpc_core
103
104 struct grpc_channel {
105 int is_client;
106 grpc_compression_options compression_options;
107
108 gpr_atm call_size_estimate;
109 grpc_resource_user* resource_user;
110
111 // TODO(vjpai): Once the grpc_channel is allocated via new rather than malloc,
112 // expand the members of the CallRegistrationTable directly into
113 // the grpc_channel. For now it is kept separate so that all the
114 // manual constructing can be done with a single call rather than
115 // a separate manual construction for each field.
116 grpc_core::ManualConstructor<grpc_core::CallRegistrationTable>
117 registration_table;
118 grpc_core::RefCountedPtr<grpc_core::channelz::ChannelNode> channelz_node;
119
120 char* target;
121 };
122 #define CHANNEL_STACK_FROM_CHANNEL(c) ((grpc_channel_stack*)((c) + 1))
123
grpc_channel_compression_options(const grpc_channel * channel)124 inline grpc_compression_options grpc_channel_compression_options(
125 const grpc_channel* channel) {
126 return channel->compression_options;
127 }
128
grpc_channel_get_channel_stack(grpc_channel * channel)129 inline grpc_channel_stack* grpc_channel_get_channel_stack(
130 grpc_channel* channel) {
131 return CHANNEL_STACK_FROM_CHANNEL(channel);
132 }
133
grpc_channel_get_channelz_node(grpc_channel * channel)134 inline grpc_core::channelz::ChannelNode* grpc_channel_get_channelz_node(
135 grpc_channel* channel) {
136 return channel->channelz_node.get();
137 }
138
139 #ifndef NDEBUG
grpc_channel_internal_ref(grpc_channel * channel,const char * reason)140 inline void grpc_channel_internal_ref(grpc_channel* channel,
141 const char* reason) {
142 GRPC_CHANNEL_STACK_REF(CHANNEL_STACK_FROM_CHANNEL(channel), reason);
143 }
grpc_channel_internal_unref(grpc_channel * channel,const char * reason)144 inline void grpc_channel_internal_unref(grpc_channel* channel,
145 const char* reason) {
146 GRPC_CHANNEL_STACK_UNREF(CHANNEL_STACK_FROM_CHANNEL(channel), reason);
147 }
148 #define GRPC_CHANNEL_INTERNAL_REF(channel, reason) \
149 grpc_channel_internal_ref(channel, reason)
150 #define GRPC_CHANNEL_INTERNAL_UNREF(channel, reason) \
151 grpc_channel_internal_unref(channel, reason)
152 #else
grpc_channel_internal_ref(grpc_channel * channel)153 inline void grpc_channel_internal_ref(grpc_channel* channel) {
154 GRPC_CHANNEL_STACK_REF(CHANNEL_STACK_FROM_CHANNEL(channel), "unused");
155 }
grpc_channel_internal_unref(grpc_channel * channel)156 inline void grpc_channel_internal_unref(grpc_channel* channel) {
157 GRPC_CHANNEL_STACK_UNREF(CHANNEL_STACK_FROM_CHANNEL(channel), "unused");
158 }
159 #define GRPC_CHANNEL_INTERNAL_REF(channel, reason) \
160 grpc_channel_internal_ref(channel)
161 #define GRPC_CHANNEL_INTERNAL_UNREF(channel, reason) \
162 grpc_channel_internal_unref(channel)
163 #endif
164
165 // Return the channel's compression options.
166 grpc_compression_options grpc_channel_compression_options(
167 const grpc_channel* channel);
168
169 // Ping the channels peer (load balanced channels will select one sub-channel to
170 // ping); if the channel is not connected, posts a failed.
171 void grpc_channel_ping(grpc_channel* channel, grpc_completion_queue* cq,
172 void* tag, void* reserved);
173
174 #endif /* GRPC_CORE_LIB_SURFACE_CHANNEL_H */
175