1 /*
2 *
3 * Copyright 2015 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19 #ifndef GRPC_CORE_LIB_SURFACE_CHANNEL_H
20 #define GRPC_CORE_LIB_SURFACE_CHANNEL_H
21
22 #include <grpc/support/port_platform.h>
23
24 #include <map>
25
26 #include "src/core/lib/channel/channel_stack.h"
27 #include "src/core/lib/channel/channel_stack_builder.h"
28 #include "src/core/lib/channel/channelz.h"
29 #include "src/core/lib/gprpp/manual_constructor.h"
30 #include "src/core/lib/surface/channel_stack_type.h"
31 #include "src/core/lib/transport/metadata.h"
32
33 grpc_channel* grpc_channel_create(const char* target,
34 const grpc_channel_args* args,
35 grpc_channel_stack_type channel_stack_type,
36 grpc_transport* optional_transport,
37 grpc_resource_user* resource_user = nullptr,
38 grpc_error_handle* error = nullptr);
39
40 /** The same as grpc_channel_destroy, but doesn't create an ExecCtx, and so
41 * is safe to use from within core. */
42 void grpc_channel_destroy_internal(grpc_channel* channel);
43
44 grpc_channel* grpc_channel_create_with_builder(
45 grpc_channel_stack_builder* builder,
46 grpc_channel_stack_type channel_stack_type,
47 grpc_error_handle* error = nullptr);
48
49 /** Create a call given a grpc_channel, in order to call \a method.
50 Progress is tied to activity on \a pollset_set. The returned call object is
51 meant to be used with \a grpc_call_start_batch_and_execute, which relies on
52 callbacks to signal completions. \a method and \a host need
53 only live through the invocation of this function. If \a parent_call is
54 non-NULL, it must be a server-side call. It will be used to propagate
55 properties from the server call to this new client call, depending on the
56 value of \a propagation_mask (see propagation_bits.h for possible values) */
57 grpc_call* grpc_channel_create_pollset_set_call(
58 grpc_channel* channel, grpc_call* parent_call, uint32_t propagation_mask,
59 grpc_pollset_set* pollset_set, const grpc_slice& method,
60 const grpc_slice* host, grpc_millis deadline, void* reserved);
61
62 /** Get a (borrowed) pointer to this channels underlying channel stack */
63 grpc_channel_stack* grpc_channel_get_channel_stack(grpc_channel* channel);
64
65 grpc_core::channelz::ChannelNode* grpc_channel_get_channelz_node(
66 grpc_channel* channel);
67
68 size_t grpc_channel_get_call_size_estimate(grpc_channel* channel);
69 void grpc_channel_update_call_size_estimate(grpc_channel* channel, size_t size);
70
71 namespace grpc_core {
72
73 struct RegisteredCall {
74 // The method and host are kept as part of this struct just to manage their
75 // lifetime since they must outlive the mdelem contents.
76 std::string method;
77 std::string host;
78
79 grpc_mdelem path;
80 grpc_mdelem authority;
81
82 explicit RegisteredCall(const char* method_arg, const char* host_arg);
83 // TODO(vjpai): delete copy constructor once all supported compilers allow
84 // std::map value_type to be MoveConstructible.
85 RegisteredCall(const RegisteredCall& other);
86 RegisteredCall(RegisteredCall&& other) noexcept;
87 RegisteredCall& operator=(const RegisteredCall&) = delete;
88 RegisteredCall& operator=(RegisteredCall&&) = delete;
89
90 ~RegisteredCall();
91 };
92
93 struct CallRegistrationTable {
94 grpc_core::Mutex mu;
95 // The map key should be owned strings rather than unowned char*'s to
96 // guarantee that it outlives calls on the core channel (which may outlast the
97 // C++ or other wrapped language Channel that registered these calls).
98 std::map<std::pair<std::string, std::string>, RegisteredCall> map
99 ABSL_GUARDED_BY(mu);
100 int method_registration_attempts ABSL_GUARDED_BY(mu) = 0;
101 };
102
103 } // namespace grpc_core
104
105 struct grpc_channel {
106 int is_client;
107 grpc_compression_options compression_options;
108
109 gpr_atm call_size_estimate;
110 grpc_resource_user* resource_user;
111
112 // TODO(vjpai): Once the grpc_channel is allocated via new rather than malloc,
113 // expand the members of the CallRegistrationTable directly into
114 // the grpc_channel. For now it is kept separate so that all the
115 // manual constructing can be done with a single call rather than
116 // a separate manual construction for each field.
117 grpc_core::ManualConstructor<grpc_core::CallRegistrationTable>
118 registration_table;
119 grpc_core::RefCountedPtr<grpc_core::channelz::ChannelNode> channelz_node;
120
121 char* target;
122 };
123 #define CHANNEL_STACK_FROM_CHANNEL(c) ((grpc_channel_stack*)((c) + 1))
124
grpc_channel_compression_options(const grpc_channel * channel)125 inline grpc_compression_options grpc_channel_compression_options(
126 const grpc_channel* channel) {
127 return channel->compression_options;
128 }
129
grpc_channel_get_channel_stack(grpc_channel * channel)130 inline grpc_channel_stack* grpc_channel_get_channel_stack(
131 grpc_channel* channel) {
132 return CHANNEL_STACK_FROM_CHANNEL(channel);
133 }
134
grpc_channel_get_channelz_node(grpc_channel * channel)135 inline grpc_core::channelz::ChannelNode* grpc_channel_get_channelz_node(
136 grpc_channel* channel) {
137 return channel->channelz_node.get();
138 }
139
140 #ifndef NDEBUG
grpc_channel_internal_ref(grpc_channel * channel,const char * reason)141 inline void grpc_channel_internal_ref(grpc_channel* channel,
142 const char* reason) {
143 GRPC_CHANNEL_STACK_REF(CHANNEL_STACK_FROM_CHANNEL(channel), reason);
144 }
grpc_channel_internal_unref(grpc_channel * channel,const char * reason)145 inline void grpc_channel_internal_unref(grpc_channel* channel,
146 const char* reason) {
147 GRPC_CHANNEL_STACK_UNREF(CHANNEL_STACK_FROM_CHANNEL(channel), reason);
148 }
149 #define GRPC_CHANNEL_INTERNAL_REF(channel, reason) \
150 grpc_channel_internal_ref(channel, reason)
151 #define GRPC_CHANNEL_INTERNAL_UNREF(channel, reason) \
152 grpc_channel_internal_unref(channel, reason)
153 #else
grpc_channel_internal_ref(grpc_channel * channel)154 inline void grpc_channel_internal_ref(grpc_channel* channel) {
155 GRPC_CHANNEL_STACK_REF(CHANNEL_STACK_FROM_CHANNEL(channel), "unused");
156 }
grpc_channel_internal_unref(grpc_channel * channel)157 inline void grpc_channel_internal_unref(grpc_channel* channel) {
158 GRPC_CHANNEL_STACK_UNREF(CHANNEL_STACK_FROM_CHANNEL(channel), "unused");
159 }
160 #define GRPC_CHANNEL_INTERNAL_REF(channel, reason) \
161 grpc_channel_internal_ref(channel)
162 #define GRPC_CHANNEL_INTERNAL_UNREF(channel, reason) \
163 grpc_channel_internal_unref(channel)
164 #endif
165
166 // Return the channel's compression options.
167 grpc_compression_options grpc_channel_compression_options(
168 const grpc_channel* channel);
169
170 // Ping the channels peer (load balanced channels will select one sub-channel to
171 // ping); if the channel is not connected, posts a failed.
172 void grpc_channel_ping(grpc_channel* channel, grpc_completion_queue* cq,
173 void* tag, void* reserved);
174
175 #endif /* GRPC_CORE_LIB_SURFACE_CHANNEL_H */
176