1 // Copyright 2017 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "ipc/ipc_perftest_util.h"
6
7 #include "base/logging.h"
8 #include "base/run_loop.h"
9 #include "ipc/ipc_channel_proxy.h"
10 #include "ipc/ipc_perftest_messages.h"
11 #include "mojo/core/embedder/embedder.h"
12 #include "mojo/core/test/multiprocess_test_helper.h"
13
14 namespace IPC {
15
GetIOThreadTaskRunner()16 scoped_refptr<base::SingleThreadTaskRunner> GetIOThreadTaskRunner() {
17 scoped_refptr<base::TaskRunner> runner = mojo::core::GetIOTaskRunner();
18 return scoped_refptr<base::SingleThreadTaskRunner>(
19 static_cast<base::SingleThreadTaskRunner*>(runner.get()));
20 }
21
ChannelReflectorListener()22 ChannelReflectorListener::ChannelReflectorListener() : channel_(NULL) {
23 VLOG(1) << "Client listener up";
24 }
25
~ChannelReflectorListener()26 ChannelReflectorListener::~ChannelReflectorListener() {
27 VLOG(1) << "Client listener down";
28 }
29
Init(Sender * channel,const base::Closure & quit_closure)30 void ChannelReflectorListener::Init(Sender* channel,
31 const base::Closure& quit_closure) {
32 DCHECK(!channel_);
33 channel_ = channel;
34 quit_closure_ = quit_closure;
35 }
36
OnMessageReceived(const Message & message)37 bool ChannelReflectorListener::OnMessageReceived(const Message& message) {
38 CHECK(channel_);
39 bool handled = true;
40 IPC_BEGIN_MESSAGE_MAP(ChannelReflectorListener, message)
41 IPC_MESSAGE_HANDLER(TestMsg_Hello, OnHello)
42 IPC_MESSAGE_HANDLER(TestMsg_Ping, OnPing)
43 IPC_MESSAGE_HANDLER(TestMsg_SyncPing, OnSyncPing)
44 IPC_MESSAGE_HANDLER(TestMsg_Quit, OnQuit)
45 IPC_MESSAGE_UNHANDLED(handled = false)
46 IPC_END_MESSAGE_MAP()
47 return handled;
48 }
49
OnHello()50 void ChannelReflectorListener::OnHello() {
51 channel_->Send(new TestMsg_Hello);
52 }
53
OnPing(const std::string & payload)54 void ChannelReflectorListener::OnPing(const std::string& payload) {
55 channel_->Send(new TestMsg_Ping(payload));
56 }
57
OnSyncPing(const std::string & payload,std::string * response)58 void ChannelReflectorListener::OnSyncPing(const std::string& payload,
59 std::string* response) {
60 *response = payload;
61 }
62
OnQuit()63 void ChannelReflectorListener::OnQuit() {
64 quit_closure_.Run();
65 }
66
Send(IPC::Message * message)67 void ChannelReflectorListener::Send(IPC::Message* message) {
68 channel_->Send(message);
69 }
70
LockThreadAffinity(int cpu_number)71 LockThreadAffinity::LockThreadAffinity(int cpu_number)
72 : affinity_set_ok_(false) {
73 #if defined(OS_WIN)
74 const DWORD_PTR thread_mask = static_cast<DWORD_PTR>(1) << cpu_number;
75 old_affinity_ = SetThreadAffinityMask(GetCurrentThread(), thread_mask);
76 affinity_set_ok_ = old_affinity_ != 0;
77 #elif defined(OS_LINUX)
78 cpu_set_t cpuset;
79 CPU_ZERO(&cpuset);
80 CPU_SET(cpu_number, &cpuset);
81 auto get_result = sched_getaffinity(0, sizeof(old_cpuset_), &old_cpuset_);
82 DCHECK_EQ(0, get_result);
83 auto set_result = sched_setaffinity(0, sizeof(cpuset), &cpuset);
84 // Check for get_result failure, even though it should always succeed.
85 affinity_set_ok_ = (set_result == 0) && (get_result == 0);
86 #endif
87 if (!affinity_set_ok_)
88 LOG(WARNING) << "Failed to set thread affinity to CPU " << cpu_number;
89 }
90
~LockThreadAffinity()91 LockThreadAffinity::~LockThreadAffinity() {
92 if (!affinity_set_ok_)
93 return;
94 #if defined(OS_WIN)
95 auto set_result = SetThreadAffinityMask(GetCurrentThread(), old_affinity_);
96 DCHECK_NE(0u, set_result);
97 #elif defined(OS_LINUX)
98 auto set_result = sched_setaffinity(0, sizeof(old_cpuset_), &old_cpuset_);
99 DCHECK_EQ(0, set_result);
100 #endif
101 }
102
MojoPerfTestClient()103 MojoPerfTestClient::MojoPerfTestClient()
104 : listener_(new ChannelReflectorListener()) {
105 mojo::core::test::MultiprocessTestHelper::ChildSetup();
106 }
107
108 MojoPerfTestClient::~MojoPerfTestClient() = default;
109
Run(MojoHandle handle)110 int MojoPerfTestClient::Run(MojoHandle handle) {
111 handle_ = mojo::MakeScopedHandle(mojo::MessagePipeHandle(handle));
112 LockThreadAffinity thread_locker(kSharedCore);
113
114 base::RunLoop run_loop;
115 std::unique_ptr<ChannelProxy> channel = IPC::ChannelProxy::Create(
116 handle_.release(), Channel::MODE_CLIENT, listener_.get(),
117 GetIOThreadTaskRunner(), base::ThreadTaskRunnerHandle::Get());
118 listener_->Init(channel.get(), run_loop.QuitWhenIdleClosure());
119 run_loop.Run();
120 return 0;
121 }
122
ReflectorImpl(mojo::ScopedMessagePipeHandle handle,const base::Closure & quit_closure)123 ReflectorImpl::ReflectorImpl(mojo::ScopedMessagePipeHandle handle,
124 const base::Closure& quit_closure)
125 : quit_closure_(quit_closure),
126 binding_(this, IPC::mojom::ReflectorRequest(std::move(handle))) {}
127
~ReflectorImpl()128 ReflectorImpl::~ReflectorImpl() {
129 ignore_result(binding_.Unbind().PassMessagePipe().release());
130 }
131
Ping(const std::string & value,PingCallback callback)132 void ReflectorImpl::Ping(const std::string& value, PingCallback callback) {
133 std::move(callback).Run(value);
134 }
135
SyncPing(const std::string & value,PingCallback callback)136 void ReflectorImpl::SyncPing(const std::string& value, PingCallback callback) {
137 std::move(callback).Run(value);
138 }
139
Quit()140 void ReflectorImpl::Quit() {
141 if (quit_closure_)
142 quit_closure_.Run();
143 }
144
145 } // namespace IPC
146