blob: 70d3381e267ee665c37cc2da38473b9d5f6eff90 [file] [log] [blame]
morrita373af03b2014-09-09 19:35:241// Copyright 2014 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
avi246998d82015-12-22 02:39:045#include <stddef.h>
danakj03de39b22016-04-23 04:21:096#include <memory>
avi246998d82015-12-22 02:39:047
danakj03de39b22016-04-23 04:21:098#include "base/memory/ptr_util.h"
gabf64a25e2017-05-12 19:42:569#include "base/message_loop/message_loop.h"
thestig0df2bae82016-07-26 17:59:3610#include "base/process/process_metrics.h"
morritac4db5472015-03-13 20:44:3911#include "base/run_loop.h"
jamca86c9e2017-01-06 19:55:5712#include "base/strings/stringprintf.h"
13#include "base/test/perf_time_logger.h"
14#include "base/test/test_io_thread.h"
gabf08ccc02016-05-11 18:51:1115#include "base/threading/thread_task_runner_handle.h"
avi246998d82015-12-22 02:39:0416#include "build/build_config.h"
amistryd4aa70d2016-06-23 07:52:3717#include "ipc/ipc_channel_mojo.h"
jamca86c9e2017-01-06 19:55:5718#include "ipc/ipc_test.mojom.h"
19#include "ipc/ipc_test_base.h"
rockotc637caf9b2016-02-10 09:57:0820#include "mojo/edk/embedder/embedder.h"
21#include "mojo/edk/embedder/platform_channel_pair.h"
jamca86c9e2017-01-06 19:55:5722#include "mojo/edk/test/mojo_test_base.h"
sammce4d0abd2016-03-07 22:38:0423#include "mojo/edk/test/multiprocess_test_helper.h"
jamca86c9e2017-01-06 19:55:5724#include "mojo/public/cpp/bindings/binding.h"
25#include "mojo/public/cpp/system/message_pipe.h"
morrita373af03b2014-09-09 19:35:2426
sammce4d0abd2016-03-07 22:38:0427namespace IPC {
morrita373af03b2014-09-09 19:35:2428namespace {
29
jamca86c9e2017-01-06 19:55:5730// This class simply collects stats about abstract "events" (each of which has a
31// start time and an end time).
32class EventTimeTracker {
sammce4d0abd2016-03-07 22:38:0433 public:
Ken Rockot779c4642017-06-20 16:21:3134 explicit EventTimeTracker(const char* name) : name_(name), count_(0) {}
morritac4db5472015-03-13 20:44:3935
jamca86c9e2017-01-06 19:55:5736 void AddEvent(const base::TimeTicks& start, const base::TimeTicks& end) {
37 DCHECK(end >= start);
38 count_++;
39 base::TimeDelta duration = end - start;
40 total_duration_ += duration;
41 max_duration_ = std::max(max_duration_, duration);
42 }
43
44 void ShowResults() const {
45 VLOG(1) << name_ << " count: " << count_;
Ken Rockot779c4642017-06-20 16:21:3146 VLOG(1) << name_ << " total duration: " << total_duration_.InMillisecondsF()
47 << " ms";
jamca86c9e2017-01-06 19:55:5748 VLOG(1) << name_ << " average duration: "
49 << (total_duration_.InMillisecondsF() / static_cast<double>(count_))
50 << " ms";
Ken Rockot779c4642017-06-20 16:21:3151 VLOG(1) << name_ << " maximum duration: " << max_duration_.InMillisecondsF()
52 << " ms";
jamca86c9e2017-01-06 19:55:5753 }
54
55 void Reset() {
56 count_ = 0;
57 total_duration_ = base::TimeDelta();
58 max_duration_ = base::TimeDelta();
59 }
60
61 private:
62 const std::string name_;
63
64 uint64_t count_;
65 base::TimeDelta total_duration_;
66 base::TimeDelta max_duration_;
67
68 DISALLOW_COPY_AND_ASSIGN(EventTimeTracker);
69};
70
71class PerformanceChannelListener : public Listener {
72 public:
73 explicit PerformanceChannelListener(const std::string& label)
74 : label_(label),
75 sender_(NULL),
76 msg_count_(0),
77 msg_size_(0),
78 count_down_(0),
79 latency_tracker_("Server messages") {
80 VLOG(1) << "Server listener up";
81 }
82
Ken Rockot779c4642017-06-20 16:21:3183 ~PerformanceChannelListener() override { VLOG(1) << "Server listener down"; }
jamca86c9e2017-01-06 19:55:5784
85 void Init(Sender* sender) {
86 DCHECK(!sender_);
87 sender_ = sender;
88 }
89
90 // Call this before running the message loop.
91 void SetTestParams(int msg_count, size_t msg_size) {
92 DCHECK_EQ(0, count_down_);
93 msg_count_ = msg_count;
94 msg_size_ = msg_size;
95 count_down_ = msg_count_;
96 payload_ = std::string(msg_size_, 'a');
97 }
98
99 bool OnMessageReceived(const Message& message) override {
100 CHECK(sender_);
101
102 base::PickleIterator iter(message);
103 int64_t time_internal;
104 EXPECT_TRUE(iter.ReadInt64(&time_internal));
105 int msgid;
106 EXPECT_TRUE(iter.ReadInt(&msgid));
107 std::string reflected_payload;
108 EXPECT_TRUE(iter.ReadString(&reflected_payload));
109
110 // Include message deserialization in latency.
111 base::TimeTicks now = base::TimeTicks::Now();
112
113 if (reflected_payload == "hello") {
114 // Start timing on hello.
115 latency_tracker_.Reset();
116 DCHECK(!perf_logger_.get());
117 std::string test_name =
Ken Rockot779c4642017-06-20 16:21:31118 base::StringPrintf("IPC_%s_Perf_%dx_%u", label_.c_str(), msg_count_,
jamca86c9e2017-01-06 19:55:57119 static_cast<unsigned>(msg_size_));
120 perf_logger_.reset(new base::PerfTimeLogger(test_name.c_str()));
121 } else {
122 DCHECK_EQ(payload_.size(), reflected_payload.size());
123
124 latency_tracker_.AddEvent(
125 base::TimeTicks::FromInternalValue(time_internal), now);
126
127 CHECK(count_down_ > 0);
128 count_down_--;
129 if (count_down_ == 0) {
130 perf_logger_.reset(); // Stop the perf timer now.
131 latency_tracker_.ShowResults();
132 base::MessageLoop::current()->QuitWhenIdle();
133 return true;
134 }
135 }
136
137 Message* msg = new Message(0, 2, Message::PRIORITY_NORMAL);
138 msg->WriteInt64(base::TimeTicks::Now().ToInternalValue());
139 msg->WriteInt(count_down_);
140 msg->WriteString(payload_);
141 sender_->Send(msg);
142 return true;
143 }
144
145 private:
146 std::string label_;
147 Sender* sender_;
148 int msg_count_;
149 size_t msg_size_;
150
151 int count_down_;
152 std::string payload_;
153 EventTimeTracker latency_tracker_;
154 std::unique_ptr<base::PerfTimeLogger> perf_logger_;
155};
156
157// This channel listener just replies to all messages with the exact same
158// message. It assumes each message has one string parameter. When the string
159// "quit" is sent, it will exit.
160class ChannelReflectorListener : public Listener {
161 public:
162 ChannelReflectorListener()
Ken Rockot779c4642017-06-20 16:21:31163 : channel_(NULL), latency_tracker_("Client messages") {
jamca86c9e2017-01-06 19:55:57164 VLOG(1) << "Client listener up";
165 }
166
167 ~ChannelReflectorListener() override {
168 VLOG(1) << "Client listener down";
169 latency_tracker_.ShowResults();
170 }
171
172 void Init(Channel* channel) {
173 DCHECK(!channel_);
174 channel_ = channel;
175 }
176
177 bool OnMessageReceived(const Message& message) override {
178 CHECK(channel_);
179
180 base::PickleIterator iter(message);
181 int64_t time_internal;
182 EXPECT_TRUE(iter.ReadInt64(&time_internal));
183 int msgid;
184 EXPECT_TRUE(iter.ReadInt(&msgid));
185 base::StringPiece payload;
186 EXPECT_TRUE(iter.ReadStringPiece(&payload));
187
188 // Include message deserialization in latency.
189 base::TimeTicks now = base::TimeTicks::Now();
190
191 if (payload == "hello") {
192 latency_tracker_.Reset();
193 } else if (payload == "quit") {
194 latency_tracker_.ShowResults();
195 base::MessageLoop::current()->QuitWhenIdle();
196 return true;
197 } else {
198 // Don't track hello and quit messages.
199 latency_tracker_.AddEvent(
200 base::TimeTicks::FromInternalValue(time_internal), now);
201 }
202
203 Message* msg = new Message(0, 2, Message::PRIORITY_NORMAL);
204 msg->WriteInt64(base::TimeTicks::Now().ToInternalValue());
205 msg->WriteInt(msgid);
206 msg->WriteString(payload);
207 channel_->Send(msg);
208 return true;
209 }
210
211 private:
212 Channel* channel_;
213 EventTimeTracker latency_tracker_;
214};
215
216// This class locks the current thread to a particular CPU core. This is
217// important because otherwise the different threads and processes of these
218// tests end up on different CPU cores which means that all of the cores are
219// lightly loaded so the OS (Windows and Linux) fails to ramp up the CPU
220// frequency, leading to unpredictable and often poor performance.
221class LockThreadAffinity {
222 public:
223 explicit LockThreadAffinity(int cpu_number) : affinity_set_ok_(false) {
224#if defined(OS_WIN)
225 const DWORD_PTR thread_mask = static_cast<DWORD_PTR>(1) << cpu_number;
226 old_affinity_ = SetThreadAffinityMask(GetCurrentThread(), thread_mask);
227 affinity_set_ok_ = old_affinity_ != 0;
228#elif defined(OS_LINUX)
229 cpu_set_t cpuset;
230 CPU_ZERO(&cpuset);
231 CPU_SET(cpu_number, &cpuset);
232 auto get_result = sched_getaffinity(0, sizeof(old_cpuset_), &old_cpuset_);
233 DCHECK_EQ(0, get_result);
234 auto set_result = sched_setaffinity(0, sizeof(cpuset), &cpuset);
235 // Check for get_result failure, even though it should always succeed.
236 affinity_set_ok_ = (set_result == 0) && (get_result == 0);
237#endif
238 if (!affinity_set_ok_)
239 LOG(WARNING) << "Failed to set thread affinity to CPU " << cpu_number;
240 }
241
242 ~LockThreadAffinity() {
243 if (!affinity_set_ok_)
244 return;
245#if defined(OS_WIN)
246 auto set_result = SetThreadAffinityMask(GetCurrentThread(), old_affinity_);
247 DCHECK_NE(0u, set_result);
248#elif defined(OS_LINUX)
249 auto set_result = sched_setaffinity(0, sizeof(old_cpuset_), &old_cpuset_);
250 DCHECK_EQ(0, set_result);
251#endif
252 }
253
254 private:
255 bool affinity_set_ok_;
256#if defined(OS_WIN)
257 DWORD_PTR old_affinity_;
258#elif defined(OS_LINUX)
259 cpu_set_t old_cpuset_;
260#endif
261
262 DISALLOW_COPY_AND_ASSIGN(LockThreadAffinity);
263};
264
265class PingPongTestParams {
266 public:
267 PingPongTestParams(size_t size, int count)
Ken Rockot779c4642017-06-20 16:21:31268 : message_size_(size), message_count_(count) {}
jamca86c9e2017-01-06 19:55:57269
270 size_t message_size() const { return message_size_; }
271 int message_count() const { return message_count_; }
272
273 private:
274 size_t message_size_;
275 int message_count_;
276};
277
278std::vector<PingPongTestParams> GetDefaultTestParams() {
Ken Rockot779c4642017-06-20 16:21:31279// Test several sizes. We use 12^N for message size, and limit the message
280// count to keep the test duration reasonable.
jamca86c9e2017-01-06 19:55:57281#ifdef NDEBUG
282 const int kMultiplier = 100;
283#else
284 // Debug builds on Windows run these tests orders of magnitude more slowly.
285 const int kMultiplier = 1;
286#endif
287 std::vector<PingPongTestParams> list;
288 list.push_back(PingPongTestParams(12, 500 * kMultiplier));
289 list.push_back(PingPongTestParams(144, 500 * kMultiplier));
290 list.push_back(PingPongTestParams(1728, 500 * kMultiplier));
291 list.push_back(PingPongTestParams(20736, 120 * kMultiplier));
292 list.push_back(PingPongTestParams(248832, 10 * kMultiplier));
293 return list;
294}
295
296// Avoid core 0 due to conflicts with Intel's Power Gadget.
297// Setting thread affinity will fail harmlessly on single/dual core machines.
298const int kSharedCore = 2;
299
300class MojoChannelPerfTest : public IPCChannelMojoTestBase {
301 public:
302 MojoChannelPerfTest() = default;
303 ~MojoChannelPerfTest() override = default;
304
305 void RunTestChannelPingPong() {
306 Init("MojoPerfTestClient");
307
308 // Set up IPC channel and start client.
309 PerformanceChannelListener listener("Channel");
310 CreateChannel(&listener);
311 listener.Init(channel());
312 ASSERT_TRUE(ConnectChannel());
313
314 LockThreadAffinity thread_locker(kSharedCore);
315 std::vector<PingPongTestParams> params = GetDefaultTestParams();
316 for (size_t i = 0; i < params.size(); i++) {
317 listener.SetTestParams(params[i].message_count(),
318 params[i].message_size());
319
320 // This initial message will kick-start the ping-pong of messages.
Ken Rockot779c4642017-06-20 16:21:31321 Message* message = new Message(0, 2, Message::PRIORITY_NORMAL);
jamca86c9e2017-01-06 19:55:57322 message->WriteInt64(base::TimeTicks::Now().ToInternalValue());
323 message->WriteInt(-1);
324 message->WriteString("hello");
325 sender()->Send(message);
326
327 // Run message loop.
328 base::RunLoop().Run();
329 }
330
331 // Send quit message.
332 Message* message = new Message(0, 2, Message::PRIORITY_NORMAL);
333 message->WriteInt64(base::TimeTicks::Now().ToInternalValue());
334 message->WriteInt(-1);
335 message->WriteString("quit");
336 sender()->Send(message);
337
338 EXPECT_TRUE(WaitForClientShutdown());
339 DestroyChannel();
Ken Rockot779c4642017-06-20 16:21:31340 }
jamca86c9e2017-01-06 19:55:57341
342 void RunTestChannelProxyPingPong() {
343 io_thread_.reset(new base::TestIOThread(base::TestIOThread::kAutoStart));
344
345 Init("MojoPerfTestClient");
346
347 // Set up IPC channel and start client.
348 PerformanceChannelListener listener("ChannelProxy");
349 auto channel_proxy = IPC::ChannelProxy::Create(
350 TakeHandle().release(), IPC::Channel::MODE_SERVER, &listener,
351 io_thread_->task_runner());
352 listener.Init(channel_proxy.get());
353
354 LockThreadAffinity thread_locker(kSharedCore);
355 std::vector<PingPongTestParams> params = GetDefaultTestParams();
356 for (size_t i = 0; i < params.size(); i++) {
357 listener.SetTestParams(params[i].message_count(),
Ken Rockot779c4642017-06-20 16:21:31358 params[i].message_size());
jamca86c9e2017-01-06 19:55:57359
360 // This initial message will kick-start the ping-pong of messages.
361 Message* message = new Message(0, 2, Message::PRIORITY_NORMAL);
362 message->WriteInt64(base::TimeTicks::Now().ToInternalValue());
363 message->WriteInt(-1);
364 message->WriteString("hello");
365 channel_proxy->Send(message);
366
367 // Run message loop.
368 base::RunLoop().Run();
369 }
370
371 // Send quit message.
372 Message* message = new Message(0, 2, Message::PRIORITY_NORMAL);
373 message->WriteInt64(base::TimeTicks::Now().ToInternalValue());
374 message->WriteInt(-1);
375 message->WriteString("quit");
376 channel_proxy->Send(message);
377
378 EXPECT_TRUE(WaitForClientShutdown());
379 channel_proxy.reset();
380
381 io_thread_.reset();
382 }
383
384 scoped_refptr<base::TaskRunner> io_task_runner() {
385 if (io_thread_)
386 return io_thread_->task_runner();
387 return base::ThreadTaskRunnerHandle::Get();
388 }
389
390 private:
391 std::unique_ptr<base::TestIOThread> io_thread_;
morrita373af03b2014-09-09 19:35:24392};
393
morrita373af03b2014-09-09 19:35:24394TEST_F(MojoChannelPerfTest, ChannelPingPong) {
jamca86c9e2017-01-06 19:55:57395 RunTestChannelPingPong();
morritac4db5472015-03-13 20:44:39396
397 base::RunLoop run_loop;
398 run_loop.RunUntilIdle();
morrita373af03b2014-09-09 19:35:24399}
400
401TEST_F(MojoChannelPerfTest, ChannelProxyPingPong) {
jamca86c9e2017-01-06 19:55:57402 RunTestChannelProxyPingPong();
morritac4db5472015-03-13 20:44:39403
404 base::RunLoop run_loop;
405 run_loop.RunUntilIdle();
morrita373af03b2014-09-09 19:35:24406}
407
jam76bcf0c2015-10-02 21:01:28408// Test to see how many channels we can create.
409TEST_F(MojoChannelPerfTest, DISABLED_MaxChannelCount) {
410#if defined(OS_POSIX)
411 LOG(INFO) << "base::GetMaxFds " << base::GetMaxFds();
412 base::SetFdLimit(20000);
413#endif
414
rockotc637caf9b2016-02-10 09:57:08415 std::vector<mojo::edk::PlatformChannelPair*> channels;
jam76bcf0c2015-10-02 21:01:28416 for (size_t i = 0; i < 10000; ++i) {
417 LOG(INFO) << "channels size: " << channels.size();
rockotc637caf9b2016-02-10 09:57:08418 channels.push_back(new mojo::edk::PlatformChannelPair());
jam76bcf0c2015-10-02 21:01:28419 }
420}
421
jamca86c9e2017-01-06 19:55:57422class MojoPerfTestClient {
morrita373af03b2014-09-09 19:35:24423 public:
Ken Rockot779c4642017-06-20 16:21:31424 MojoPerfTestClient() : listener_(new ChannelReflectorListener()) {
jamca86c9e2017-01-06 19:55:57425 mojo::edk::test::MultiprocessTestHelper::ChildSetup();
426 }
morrita373af03b2014-09-09 19:35:24427
jamca86c9e2017-01-06 19:55:57428 ~MojoPerfTestClient() = default;
morrita373af03b2014-09-09 19:35:24429
jamca86c9e2017-01-06 19:55:57430 int Run(MojoHandle handle) {
431 handle_ = mojo::MakeScopedHandle(mojo::MessagePipeHandle(handle));
432 LockThreadAffinity thread_locker(kSharedCore);
433 std::unique_ptr<Channel> channel = ChannelMojo::Create(
434 std::move(handle_), Channel::MODE_CLIENT, listener_.get());
435 listener_->Init(channel.get());
436 CHECK(channel->Connect());
sammce4d0abd2016-03-07 22:38:04437
jamca86c9e2017-01-06 19:55:57438 base::RunLoop().Run();
439 return 0;
440 }
sammc57ed9f982016-03-10 06:28:35441
sammce4d0abd2016-03-07 22:38:04442 private:
jamca86c9e2017-01-06 19:55:57443 base::MessageLoopForIO main_message_loop_;
444 std::unique_ptr<ChannelReflectorListener> listener_;
445 std::unique_ptr<Channel> channel_;
sammc57ed9f982016-03-10 06:28:35446 mojo::ScopedMessagePipeHandle handle_;
morrita373af03b2014-09-09 19:35:24447};
448
sammce4d0abd2016-03-07 22:38:04449MULTIPROCESS_TEST_MAIN(MojoPerfTestClientTestChildMain) {
450 MojoPerfTestClient client;
sammc57ed9f982016-03-10 06:28:35451 int rv = mojo::edk::test::MultiprocessTestHelper::RunClientMain(
rockot40e95912017-03-21 18:59:35452 base::Bind(&MojoPerfTestClient::Run, base::Unretained(&client)),
453 true /* pass_pipe_ownership_to_main */);
morritac4db5472015-03-13 20:44:39454
455 base::RunLoop run_loop;
456 run_loop.RunUntilIdle();
457
458 return rv;
morrita373af03b2014-09-09 19:35:24459}
460
jamca86c9e2017-01-06 19:55:57461class ReflectorImpl : public IPC::mojom::Reflector {
462 public:
463 explicit ReflectorImpl(mojo::ScopedMessagePipeHandle handle)
Ken Rockot42472452017-05-12 05:37:03464 : binding_(this, IPC::mojom::ReflectorRequest(std::move(handle))) {}
jamca86c9e2017-01-06 19:55:57465 ~ReflectorImpl() override {
466 ignore_result(binding_.Unbind().PassMessagePipe().release());
467 }
468
469 private:
470 // IPC::mojom::Reflector:
tzikdd76ce712017-06-08 05:27:04471 void Ping(const std::string& value, PingCallback callback) override {
472 std::move(callback).Run(value);
jamca86c9e2017-01-06 19:55:57473 }
474
Ken Rockot779c4642017-06-20 16:21:31475 void Quit() override { base::MessageLoop::current()->QuitWhenIdle(); }
jamca86c9e2017-01-06 19:55:57476
477 mojo::Binding<IPC::mojom::Reflector> binding_;
478};
479
480class MojoInterfacePerfTest : public mojo::edk::test::MojoTestBase {
481 public:
482 MojoInterfacePerfTest() : message_count_(0), count_down_(0) {}
483
484 protected:
485 void RunPingPongServer(MojoHandle mp, const std::string& label) {
jamca86c9e2017-01-06 19:55:57486 label_ = label;
487
488 mojo::MessagePipeHandle mp_handle(mp);
489 mojo::ScopedMessagePipeHandle scoped_mp(mp_handle);
Ken Rockot779c4642017-06-20 16:21:31490 ping_receiver_.Bind(IPC::mojom::ReflectorPtrInfo(std::move(scoped_mp), 0u));
jamca86c9e2017-01-06 19:55:57491
492 LockThreadAffinity thread_locker(kSharedCore);
493 std::vector<PingPongTestParams> params = GetDefaultTestParams();
494 for (size_t i = 0; i < params.size(); i++) {
Ken Rockot779c4642017-06-20 16:21:31495 ping_receiver_->Ping("hello", base::Bind(&MojoInterfacePerfTest::OnPong,
496 base::Unretained(this)));
jamca86c9e2017-01-06 19:55:57497 message_count_ = count_down_ = params[i].message_count();
498 payload_ = std::string(params[i].message_size(), 'a');
499
500 base::RunLoop().Run();
501 }
502
503 ping_receiver_->Quit();
504
505 ignore_result(ping_receiver_.PassInterface().PassHandle().release());
506 }
507
508 void OnPong(const std::string& value) {
509 if (value == "hello") {
510 DCHECK(!perf_logger_.get());
511 std::string test_name =
Ken Rockot779c4642017-06-20 16:21:31512 base::StringPrintf("IPC_%s_Perf_%dx_%zu", label_.c_str(),
513 message_count_, payload_.size());
jamca86c9e2017-01-06 19:55:57514 perf_logger_.reset(new base::PerfTimeLogger(test_name.c_str()));
515 } else {
516 DCHECK_EQ(payload_.size(), value.size());
517
518 CHECK(count_down_ > 0);
519 count_down_--;
520 if (count_down_ == 0) {
521 perf_logger_.reset();
522 base::MessageLoop::current()->QuitWhenIdle();
523 return;
524 }
525 }
526
Ken Rockot779c4642017-06-20 16:21:31527 ping_receiver_->Ping(payload_, base::Bind(&MojoInterfacePerfTest::OnPong,
528 base::Unretained(this)));
jamca86c9e2017-01-06 19:55:57529 }
530
531 static int RunPingPongClient(MojoHandle mp) {
532 mojo::MessagePipeHandle mp_handle(mp);
533 mojo::ScopedMessagePipeHandle scoped_mp(mp_handle);
534
535 // In single process mode, this is running in a task and by default other
536 // tasks (in particular, the binding) won't run. To keep the single process
537 // and multi-process code paths the same, enable nestable tasks.
538 base::MessageLoop::ScopedNestableTaskAllower nest_loop(
539 base::MessageLoop::current());
540
541 LockThreadAffinity thread_locker(kSharedCore);
542 ReflectorImpl impl(std::move(scoped_mp));
543 base::RunLoop().Run();
544 return 0;
545 }
546
547 private:
548 int message_count_;
549 int count_down_;
550 std::string label_;
551 std::string payload_;
552 IPC::mojom::ReflectorPtr ping_receiver_;
553 std::unique_ptr<base::PerfTimeLogger> perf_logger_;
554
555 DISALLOW_COPY_AND_ASSIGN(MojoInterfacePerfTest);
556};
557
Ken Rockot474df0142017-07-12 13:28:56558enum class InProcessMessageMode {
559 kSerialized,
560 kUnserialized,
561};
562
563class MojoInProcessInterfacePerfTest
564 : public MojoInterfacePerfTest,
565 public testing::WithParamInterface<InProcessMessageMode> {
566 public:
567 MojoInProcessInterfacePerfTest() {
568 switch (GetParam()) {
569 case InProcessMessageMode::kSerialized:
570 mojo::Connector::OverrideDefaultSerializationBehaviorForTesting(
571 mojo::Connector::OutgoingSerializationMode::kEager,
572 mojo::Connector::IncomingSerializationMode::kDispatchAsIs);
573 break;
574 case InProcessMessageMode::kUnserialized:
575 mojo::Connector::OverrideDefaultSerializationBehaviorForTesting(
576 mojo::Connector::OutgoingSerializationMode::kLazy,
577 mojo::Connector::IncomingSerializationMode::kDispatchAsIs);
578 break;
579 }
580 }
581};
582
jamca86c9e2017-01-06 19:55:57583DEFINE_TEST_CLIENT_WITH_PIPE(PingPongClient, MojoInterfacePerfTest, h) {
584 base::MessageLoop main_message_loop;
585 return RunPingPongClient(h);
586}
587
588// Similar to MojoChannelPerfTest above, but uses a Mojo interface instead of
589// raw IPC::Messages.
590TEST_F(MojoInterfacePerfTest, MultiprocessPingPong) {
Ken Rockot779c4642017-06-20 16:21:31591 RunTestClient("PingPongClient", [&](MojoHandle h) {
jamf9fa5b802017-01-14 00:28:02592 base::MessageLoop main_message_loop;
Ken Rockot779c4642017-06-20 16:21:31593 RunPingPongServer(h, "Multiprocess");
594 });
jamca86c9e2017-01-06 19:55:57595}
596
597// A single process version of the above test.
Ken Rockot474df0142017-07-12 13:28:56598TEST_P(MojoInProcessInterfacePerfTest, MultiThreadPingPong) {
jamca86c9e2017-01-06 19:55:57599 MojoHandle server_handle, client_handle;
600 CreateMessagePipe(&server_handle, &client_handle);
601
602 base::Thread client_thread("PingPongClient");
603 client_thread.Start();
604 client_thread.task_runner()->PostTask(
605 FROM_HERE,
606 base::Bind(base::IgnoreResult(&RunPingPongClient), client_handle));
607
jamf9fa5b802017-01-14 00:28:02608 base::MessageLoop main_message_loop;
609 RunPingPongServer(server_handle, "SingleProcess");
610}
611
Ken Rockot474df0142017-07-12 13:28:56612TEST_P(MojoInProcessInterfacePerfTest, SingleThreadPingPong) {
jamf9fa5b802017-01-14 00:28:02613 MojoHandle server_handle, client_handle;
614 CreateMessagePipe(&server_handle, &client_handle);
615
616 base::MessageLoop main_message_loop;
617 mojo::MessagePipeHandle mp_handle(client_handle);
618 mojo::ScopedMessagePipeHandle scoped_mp(mp_handle);
619 LockThreadAffinity thread_locker(kSharedCore);
620 ReflectorImpl impl(std::move(scoped_mp));
621
jamca86c9e2017-01-06 19:55:57622 RunPingPongServer(server_handle, "SingleProcess");
623}
624
Ken Rockot474df0142017-07-12 13:28:56625INSTANTIATE_TEST_CASE_P(,
626 MojoInProcessInterfacePerfTest,
627 testing::Values(InProcessMessageMode::kSerialized,
628 InProcessMessageMode::kUnserialized));
629
jamca86c9e2017-01-06 19:55:57630class CallbackPerfTest : public testing::Test {
631 public:
632 CallbackPerfTest()
633 : client_thread_("PingPongClient"), message_count_(0), count_down_(0) {}
634
635 protected:
jamf9fa5b802017-01-14 00:28:02636 void RunMultiThreadPingPongServer() {
jamca86c9e2017-01-06 19:55:57637 client_thread_.Start();
638
639 LockThreadAffinity thread_locker(kSharedCore);
640 std::vector<PingPongTestParams> params = GetDefaultTestParams();
641 for (size_t i = 0; i < params.size(); i++) {
642 std::string hello("hello");
643 client_thread_.task_runner()->PostTask(
644 FROM_HERE,
645 base::Bind(&CallbackPerfTest::Ping, base::Unretained(this), hello));
646 message_count_ = count_down_ = params[i].message_count();
647 payload_ = std::string(params[i].message_size(), 'a');
648
649 base::RunLoop().Run();
650 }
651 }
652
653 void Ping(const std::string& value) {
jamf9fa5b802017-01-14 00:28:02654 main_message_loop_.task_runner()->PostTask(
jamca86c9e2017-01-06 19:55:57655 FROM_HERE,
Ken Rockot779c4642017-06-20 16:21:31656 base::Bind(&CallbackPerfTest::OnPong, base::Unretained(this), value));
jamca86c9e2017-01-06 19:55:57657 }
658
659 void OnPong(const std::string& value) {
660 if (value == "hello") {
661 DCHECK(!perf_logger_.get());
662 std::string test_name =
jamf9fa5b802017-01-14 00:28:02663 base::StringPrintf("Callback_MultiProcess_Perf_%dx_%zu",
Ken Rockot779c4642017-06-20 16:21:31664 message_count_, payload_.size());
jamca86c9e2017-01-06 19:55:57665 perf_logger_.reset(new base::PerfTimeLogger(test_name.c_str()));
666 } else {
667 DCHECK_EQ(payload_.size(), value.size());
668
669 CHECK(count_down_ > 0);
670 count_down_--;
671 if (count_down_ == 0) {
672 perf_logger_.reset();
673 base::MessageLoop::current()->QuitWhenIdle();
674 return;
675 }
676 }
677
678 client_thread_.task_runner()->PostTask(
679 FROM_HERE,
680 base::Bind(&CallbackPerfTest::Ping, base::Unretained(this), payload_));
681 }
682
jamf9fa5b802017-01-14 00:28:02683 void RunSingleThreadNoPostTaskPingPongServer() {
684 LockThreadAffinity thread_locker(kSharedCore);
685 std::vector<PingPongTestParams> params = GetDefaultTestParams();
686 base::Callback<void(const std::string&,
Ken Rockot779c4642017-06-20 16:21:31687 const base::Callback<void(const std::string&)>&)>
688 ping = base::Bind(&CallbackPerfTest::SingleThreadPingNoPostTask,
689 base::Unretained(this));
jamf9fa5b802017-01-14 00:28:02690 for (size_t i = 0; i < params.size(); i++) {
691 payload_ = std::string(params[i].message_size(), 'a');
692 std::string test_name =
693 base::StringPrintf("Callback_SingleThreadPostTask_Perf_%dx_%zu",
Ken Rockot779c4642017-06-20 16:21:31694 params[i].message_count(), payload_.size());
jamf9fa5b802017-01-14 00:28:02695 perf_logger_.reset(new base::PerfTimeLogger(test_name.c_str()));
696 for (int j = 0; j < params[i].message_count(); ++j) {
697 ping.Run(payload_,
698 base::Bind(&CallbackPerfTest::SingleThreadPongNoPostTask,
699 base::Unretained(this)));
700 }
701 perf_logger_.reset();
702 }
703 }
704
Ken Rockot779c4642017-06-20 16:21:31705 void SingleThreadPingNoPostTask(
706 const std::string& value,
707 const base::Callback<void(const std::string&)>& pong) {
jamf9fa5b802017-01-14 00:28:02708 pong.Run(value);
709 }
710
Ken Rockot779c4642017-06-20 16:21:31711 void SingleThreadPongNoPostTask(const std::string& value) {}
jamf9fa5b802017-01-14 00:28:02712
713 void RunSingleThreadPostTaskPingPongServer() {
714 LockThreadAffinity thread_locker(kSharedCore);
715 std::vector<PingPongTestParams> params = GetDefaultTestParams();
716 for (size_t i = 0; i < params.size(); i++) {
717 std::string hello("hello");
718 base::MessageLoop::current()->task_runner()->PostTask(
Ken Rockot779c4642017-06-20 16:21:31719 FROM_HERE, base::Bind(&CallbackPerfTest::SingleThreadPingPostTask,
720 base::Unretained(this), hello));
jamf9fa5b802017-01-14 00:28:02721 message_count_ = count_down_ = params[i].message_count();
722 payload_ = std::string(params[i].message_size(), 'a');
723
724 base::RunLoop().Run();
725 }
726 }
727
728 void SingleThreadPingPostTask(const std::string& value) {
729 base::MessageLoop::current()->task_runner()->PostTask(
Ken Rockot779c4642017-06-20 16:21:31730 FROM_HERE, base::Bind(&CallbackPerfTest::SingleThreadPongPostTask,
731 base::Unretained(this), value));
jamf9fa5b802017-01-14 00:28:02732 }
733
734 void SingleThreadPongPostTask(const std::string& value) {
735 if (value == "hello") {
736 DCHECK(!perf_logger_.get());
737 std::string test_name =
738 base::StringPrintf("Callback_SingleThreadNoPostTask_Perf_%dx_%zu",
Ken Rockot779c4642017-06-20 16:21:31739 message_count_, payload_.size());
jamf9fa5b802017-01-14 00:28:02740 perf_logger_.reset(new base::PerfTimeLogger(test_name.c_str()));
741 } else {
742 DCHECK_EQ(payload_.size(), value.size());
743
744 CHECK(count_down_ > 0);
745 count_down_--;
746 if (count_down_ == 0) {
747 perf_logger_.reset();
748 base::MessageLoop::current()->QuitWhenIdle();
749 return;
750 }
751 }
752
753 base::MessageLoop::current()->task_runner()->PostTask(
Ken Rockot779c4642017-06-20 16:21:31754 FROM_HERE, base::Bind(&CallbackPerfTest::SingleThreadPingPostTask,
755 base::Unretained(this), payload_));
jamf9fa5b802017-01-14 00:28:02756 }
757
jamca86c9e2017-01-06 19:55:57758 private:
759 base::Thread client_thread_;
jamf9fa5b802017-01-14 00:28:02760 base::MessageLoop main_message_loop_;
jamca86c9e2017-01-06 19:55:57761 int message_count_;
762 int count_down_;
763 std::string payload_;
764 std::unique_ptr<base::PerfTimeLogger> perf_logger_;
765
766 DISALLOW_COPY_AND_ASSIGN(CallbackPerfTest);
767};
768
jamf9fa5b802017-01-14 00:28:02769// Sends the same data as above using PostTask to a different thread instead of
770// IPCs for comparison.
771TEST_F(CallbackPerfTest, MultiThreadPingPong) {
772 RunMultiThreadPingPongServer();
773}
774
775// Sends the same data as above using PostTask to the same thread.
776TEST_F(CallbackPerfTest, SingleThreadPostTaskPingPong) {
777 RunSingleThreadPostTaskPingPongServer();
778}
779
780// Sends the same data as above without using PostTask to the same thread.
781TEST_F(CallbackPerfTest, SingleThreadNoPostTaskPingPong) {
782 RunSingleThreadNoPostTaskPingPongServer();
jamca86c9e2017-01-06 19:55:57783}
784
morrita373af03b2014-09-09 19:35:24785} // namespace
sammce4d0abd2016-03-07 22:38:04786} // namespace IPC