• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "mojo/system/proxy_message_pipe_endpoint.h"
6 
7 #include <string.h>
8 
9 #include "base/logging.h"
10 #include "base/stl_util.h"
11 #include "mojo/system/channel.h"
12 
13 namespace mojo {
14 namespace system {
15 
ProxyMessagePipeEndpoint()16 ProxyMessagePipeEndpoint::ProxyMessagePipeEndpoint()
17     : local_id_(MessageInTransit::kInvalidEndpointId),
18       remote_id_(MessageInTransit::kInvalidEndpointId),
19       is_open_(true),
20       is_peer_open_(true) {
21 }
22 
~ProxyMessagePipeEndpoint()23 ProxyMessagePipeEndpoint::~ProxyMessagePipeEndpoint() {
24   DCHECK(!is_running());
25   DCHECK(!is_attached());
26   AssertConsistentState();
27   DCHECK(paused_message_queue_.empty());
28 }
29 
Close()30 void ProxyMessagePipeEndpoint::Close() {
31   DCHECK(is_open_);
32   is_open_ = false;
33 
34   DCHECK(is_attached());
35   channel_->DetachMessagePipeEndpoint(local_id_);
36   channel_ = NULL;
37   local_id_ = MessageInTransit::kInvalidEndpointId;
38   remote_id_ = MessageInTransit::kInvalidEndpointId;
39 
40   for (std::deque<MessageInTransit*>::iterator it =
41            paused_message_queue_.begin();
42        it != paused_message_queue_.end();
43        ++it) {
44     (*it)->Destroy();
45   }
46   paused_message_queue_.clear();
47 }
48 
OnPeerClose()49 bool ProxyMessagePipeEndpoint::OnPeerClose() {
50   DCHECK(is_open_);
51   DCHECK(is_peer_open_);
52 
53   is_peer_open_ = false;
54   MessageInTransit* message =
55       MessageInTransit::Create(MessageInTransit::kTypeMessagePipe,
56                                MessageInTransit::kSubtypeMessagePipePeerClosed,
57                                NULL, 0);
58   if (CanEnqueueMessage(message, NULL) == MOJO_RESULT_OK) {
59     EnqueueMessage(message, NULL);
60   } else {
61     message->Destroy();
62     // TODO(vtl): Do something more sensible on error here?
63     LOG(WARNING) << "Failed to send peer closed control message";
64   }
65 
66   // Return false -- to indicate that we should be destroyed -- if no messages
67   // are still enqueued. (Messages may still be enqueued if we're not running
68   // yet, but our peer was closed.)
69   return !paused_message_queue_.empty();
70 }
71 
CanEnqueueMessage(const MessageInTransit *,const std::vector<Dispatcher * > * dispatchers)72 MojoResult ProxyMessagePipeEndpoint::CanEnqueueMessage(
73     const MessageInTransit* /*message*/,
74     const std::vector<Dispatcher*>* dispatchers) {
75   // TODO(vtl): Support sending handles over OS pipes.
76   if (dispatchers) {
77     NOTIMPLEMENTED();
78     return MOJO_RESULT_UNIMPLEMENTED;
79   }
80   return MOJO_RESULT_OK;
81 }
82 
83 // Note: We may have to enqueue messages even when our (local) peer isn't open
84 // -- it may have been written to and closed immediately, before we were ready.
85 // This case is handled in |Run()| (which will call us).
EnqueueMessage(MessageInTransit * message,std::vector<scoped_refptr<Dispatcher>> * dispatchers)86 void ProxyMessagePipeEndpoint::EnqueueMessage(
87     MessageInTransit* message,
88     std::vector<scoped_refptr<Dispatcher> >* dispatchers) {
89   DCHECK(is_open_);
90 
91   // TODO(vtl)
92   DCHECK(!dispatchers || dispatchers->empty());
93 
94   if (is_running()) {
95     message->set_source_id(local_id_);
96     message->set_destination_id(remote_id_);
97     // TODO(vtl): Figure out error handling here (where it's rather late) --
98     // maybe move whatever checks we can into |CanEnqueueMessage()|.
99     if (!channel_->WriteMessage(message))
100       LOG(WARNING) << "Failed to write message to channel";
101   } else {
102     paused_message_queue_.push_back(message);
103   }
104 }
105 
Attach(scoped_refptr<Channel> channel,MessageInTransit::EndpointId local_id)106 void ProxyMessagePipeEndpoint::Attach(scoped_refptr<Channel> channel,
107                                       MessageInTransit::EndpointId local_id) {
108   DCHECK(channel.get());
109   DCHECK_NE(local_id, MessageInTransit::kInvalidEndpointId);
110 
111   DCHECK(!is_attached());
112 
113   AssertConsistentState();
114   channel_ = channel;
115   local_id_ = local_id;
116   AssertConsistentState();
117 }
118 
Run(MessageInTransit::EndpointId remote_id)119 bool ProxyMessagePipeEndpoint::Run(MessageInTransit::EndpointId remote_id) {
120   // Assertions about arguments:
121   DCHECK_NE(remote_id, MessageInTransit::kInvalidEndpointId);
122 
123   // Assertions about current state:
124   DCHECK(is_attached());
125   DCHECK(!is_running());
126 
127   AssertConsistentState();
128   remote_id_ = remote_id;
129   AssertConsistentState();
130 
131   for (std::deque<MessageInTransit*>::iterator it =
132            paused_message_queue_.begin();
133        it != paused_message_queue_.end();
134        ++it) {
135     if (CanEnqueueMessage(*it, NULL) == MOJO_RESULT_OK) {
136       EnqueueMessage(*it, NULL);
137     } else {
138       (*it)->Destroy();
139       // TODO(vtl): Do something more sensible on error here?
140       LOG(WARNING) << "Failed to send message";
141       // TODO(vtl): Abort?
142     }
143   }
144   paused_message_queue_.clear();
145 
146   // If the peer is not open, we should return false since we should be
147   // destroyed.
148   return is_peer_open_;
149 }
150 
151 #ifndef NDEBUG
AssertConsistentState() const152 void ProxyMessagePipeEndpoint::AssertConsistentState() const {
153   if (is_attached()) {
154     DCHECK_NE(local_id_, MessageInTransit::kInvalidEndpointId);
155   } else {  // Not attached.
156     DCHECK_EQ(local_id_, MessageInTransit::kInvalidEndpointId);
157     DCHECK_EQ(remote_id_, MessageInTransit::kInvalidEndpointId);
158   }
159 }
160 #endif
161 
162 }  // namespace system
163 }  // namespace mojo
164