/external/grpc-grpc-java/benchmarks/src/generated/main/grpc/io/grpc/benchmarks/proto/ |
D | BenchmarkServiceGrpc.java | 1 package io.grpc.benchmarks.proto; 30 private static volatile io.grpc.MethodDescriptor<io.grpc.benchmarks.proto.Messages.SimpleRequest, 31 io.grpc.benchmarks.proto.Messages.SimpleResponse> getUnaryCallMethod; 35 requestType = io.grpc.benchmarks.proto.Messages.SimpleRequest.class, 36 responseType = io.grpc.benchmarks.proto.Messages.SimpleResponse.class, 38 public static io.grpc.MethodDescriptor<io.grpc.benchmarks.proto.Messages.SimpleRequest, 39 io.grpc.benchmarks.proto.Messages.SimpleResponse> getUnaryCallMethod() { in getUnaryCallMethod() 40 …io.grpc.MethodDescriptor<io.grpc.benchmarks.proto.Messages.SimpleRequest, io.grpc.benchmarks.proto… in getUnaryCallMethod() 45 …io.grpc.MethodDescriptor.<io.grpc.benchmarks.proto.Messages.SimpleRequest, io.grpc.benchmarks.prot… in getUnaryCallMethod() 51 io.grpc.benchmarks.proto.Messages.SimpleRequest.getDefaultInstance())) in getUnaryCallMethod() [all …]
|
D | WorkerServiceGrpc.java | 1 package io.grpc.benchmarks.proto; 30 private static volatile io.grpc.MethodDescriptor<io.grpc.benchmarks.proto.Control.ServerArgs, 31 io.grpc.benchmarks.proto.Control.ServerStatus> getRunServerMethod; 35 requestType = io.grpc.benchmarks.proto.Control.ServerArgs.class, 36 responseType = io.grpc.benchmarks.proto.Control.ServerStatus.class, 38 public static io.grpc.MethodDescriptor<io.grpc.benchmarks.proto.Control.ServerArgs, 39 io.grpc.benchmarks.proto.Control.ServerStatus> getRunServerMethod() { in getRunServerMethod() 40 …io.grpc.MethodDescriptor<io.grpc.benchmarks.proto.Control.ServerArgs, io.grpc.benchmarks.proto.Con… in getRunServerMethod() 45 …io.grpc.MethodDescriptor.<io.grpc.benchmarks.proto.Control.ServerArgs, io.grpc.benchmarks.proto.Co… in getRunServerMethod() 51 io.grpc.benchmarks.proto.Control.ServerArgs.getDefaultInstance())) in getRunServerMethod() [all …]
|
D | ReportQpsScenarioServiceGrpc.java | 1 package io.grpc.benchmarks.proto; 30 private static volatile io.grpc.MethodDescriptor<io.grpc.benchmarks.proto.Control.ScenarioResult, 31 io.grpc.benchmarks.proto.Control.Void> getReportScenarioMethod; 35 requestType = io.grpc.benchmarks.proto.Control.ScenarioResult.class, 36 responseType = io.grpc.benchmarks.proto.Control.Void.class, 38 public static io.grpc.MethodDescriptor<io.grpc.benchmarks.proto.Control.ScenarioResult, 39 io.grpc.benchmarks.proto.Control.Void> getReportScenarioMethod() { in getReportScenarioMethod() 40 …io.grpc.MethodDescriptor<io.grpc.benchmarks.proto.Control.ScenarioResult, io.grpc.benchmarks.proto… in getReportScenarioMethod() 45 …io.grpc.MethodDescriptor.<io.grpc.benchmarks.proto.Control.ScenarioResult, io.grpc.benchmarks.prot… in getReportScenarioMethod() 51 io.grpc.benchmarks.proto.Control.ScenarioResult.getDefaultInstance())) in getReportScenarioMethod() [all …]
|
/external/grpc-grpc-java/benchmarks/src/generated/main/java/io/grpc/benchmarks/proto/ |
D | Payloads.java | 4 package io.grpc.benchmarks.proto; 103 … return io.grpc.benchmarks.proto.Payloads.internal_static_grpc_testing_ByteBufferParams_descriptor; in getDescriptor() 108 …return io.grpc.benchmarks.proto.Payloads.internal_static_grpc_testing_ByteBufferParams_fieldAccess… in internalGetFieldAccessorTable() 110 …io.grpc.benchmarks.proto.Payloads.ByteBufferParams.class, io.grpc.benchmarks.proto.Payloads.ByteBu… in internalGetFieldAccessorTable() 175 if (!(obj instanceof io.grpc.benchmarks.proto.Payloads.ByteBufferParams)) { in equals() 178 …io.grpc.benchmarks.proto.Payloads.ByteBufferParams other = (io.grpc.benchmarks.proto.Payloads.Byte… in equals() 205 public static io.grpc.benchmarks.proto.Payloads.ByteBufferParams parseFrom( in parseFrom() 210 public static io.grpc.benchmarks.proto.Payloads.ByteBufferParams parseFrom( in parseFrom() 216 public static io.grpc.benchmarks.proto.Payloads.ByteBufferParams parseFrom( in parseFrom() 221 public static io.grpc.benchmarks.proto.Payloads.ByteBufferParams parseFrom( in parseFrom() [all …]
|
D | Messages.java | 4 package io.grpc.benchmarks.proto; 93 return io.grpc.benchmarks.proto.Messages.getDescriptor().getEnumTypes().get(0); in getDescriptor() 204 return io.grpc.benchmarks.proto.Messages.internal_static_grpc_testing_BoolValue_descriptor; in getDescriptor() 209 … return io.grpc.benchmarks.proto.Messages.internal_static_grpc_testing_BoolValue_fieldAccessorTable in internalGetFieldAccessorTable() 211 …io.grpc.benchmarks.proto.Messages.BoolValue.class, io.grpc.benchmarks.proto.Messages.BoolValue.Bui… in internalGetFieldAccessorTable() 264 if (!(obj instanceof io.grpc.benchmarks.proto.Messages.BoolValue)) { in equals() 267 …io.grpc.benchmarks.proto.Messages.BoolValue other = (io.grpc.benchmarks.proto.Messages.BoolValue) … in equals() 291 public static io.grpc.benchmarks.proto.Messages.BoolValue parseFrom( in parseFrom() 296 public static io.grpc.benchmarks.proto.Messages.BoolValue parseFrom( in parseFrom() 302 public static io.grpc.benchmarks.proto.Messages.BoolValue parseFrom( in parseFrom() [all …]
|
D | Control.java | 4 package io.grpc.benchmarks.proto; 116 return io.grpc.benchmarks.proto.Control.getDescriptor().getEnumTypes().get(0); in getDescriptor() 240 return io.grpc.benchmarks.proto.Control.getDescriptor().getEnumTypes().get(1); in getDescriptor() 365 return io.grpc.benchmarks.proto.Control.getDescriptor().getEnumTypes().get(2); in getDescriptor() 475 return io.grpc.benchmarks.proto.Control.internal_static_grpc_testing_PoissonParams_descriptor; in getDescriptor() 480 …return io.grpc.benchmarks.proto.Control.internal_static_grpc_testing_PoissonParams_fieldAccessorTa… in internalGetFieldAccessorTable() 482 …io.grpc.benchmarks.proto.Control.PoissonParams.class, io.grpc.benchmarks.proto.Control.PoissonPara… in internalGetFieldAccessorTable() 535 if (!(obj instanceof io.grpc.benchmarks.proto.Control.PoissonParams)) { in equals() 538 …io.grpc.benchmarks.proto.Control.PoissonParams other = (io.grpc.benchmarks.proto.Control.PoissonPa… in equals() 564 public static io.grpc.benchmarks.proto.Control.PoissonParams parseFrom( in parseFrom() [all …]
|
D | Stats.java | 4 package io.grpc.benchmarks.proto; 172 return io.grpc.benchmarks.proto.Stats.internal_static_grpc_testing_ServerStats_descriptor; in getDescriptor() 177 … return io.grpc.benchmarks.proto.Stats.internal_static_grpc_testing_ServerStats_fieldAccessorTable in internalGetFieldAccessorTable() 179 …io.grpc.benchmarks.proto.Stats.ServerStats.class, io.grpc.benchmarks.proto.Stats.ServerStats.Build… in internalGetFieldAccessorTable() 333 if (!(obj instanceof io.grpc.benchmarks.proto.Stats.ServerStats)) { in equals() 336 …io.grpc.benchmarks.proto.Stats.ServerStats other = (io.grpc.benchmarks.proto.Stats.ServerStats) ob… in equals() 391 public static io.grpc.benchmarks.proto.Stats.ServerStats parseFrom( in parseFrom() 396 public static io.grpc.benchmarks.proto.Stats.ServerStats parseFrom( in parseFrom() 402 public static io.grpc.benchmarks.proto.Stats.ServerStats parseFrom( in parseFrom() 407 public static io.grpc.benchmarks.proto.Stats.ServerStats parseFrom( in parseFrom() [all …]
|
/external/okhttp/okio/benchmarks/ |
D | README.md | 1 Okio Benchmarks 4 … used to measure various aspects of performance for Okio buffers. Okio benchmarks are written usin… 9 To run benchmarks locally, first build and package the project modules: 15 This should create a `benchmarks.jar` file in the `target` directory, which is a typical JMH benchm… 18 $ java -jar benchmarks/target/benchmarks.jar -l 19 Benchmarks: 20 com.squareup.okio.benchmarks.BufferPerformanceBench.cold 21 com.squareup.okio.benchmarks.BufferPerformanceBench.threads16hot 22 com.squareup.okio.benchmarks.BufferPerformanceBench.threads1hot 23 com.squareup.okio.benchmarks.BufferPerformanceBench.threads2hot [all …]
|
/external/eigen/bench/ |
D | benchmark-blocking-sizes.cpp | 345 void serialize_benchmarks(const char* filename, const vector<benchmark_t>& benchmarks, size_t first… in serialize_benchmarks() argument 353 size_t benchmarks_vector_size = benchmarks.size(); in serialize_benchmarks() 357 fwrite(benchmarks.data(), sizeof(benchmark_t), benchmarks.size(), file); in serialize_benchmarks() 361 bool deserialize_benchmarks(const char* filename, vector<benchmark_t>& benchmarks, size_t& first_be… in deserialize_benchmarks() argument 377 benchmarks.resize(benchmarks_vector_size); in deserialize_benchmarks() 378 if (benchmarks.size() != fread(benchmarks.data(), sizeof(benchmark_t), benchmarks.size(), file)) { in deserialize_benchmarks() 386 vector<benchmark_t>& benchmarks, in try_run_some_benchmarks() argument 390 if (first_benchmark_to_run == benchmarks.size()) { in try_run_some_benchmarks() 401 float ratio_done = float(benchmark_index) / benchmarks.size(); in try_run_some_benchmarks() 405 if (benchmark_index == benchmarks.size() || in try_run_some_benchmarks() [all …]
|
/external/grpc-grpc/tools/profiling/microbenchmarks/bm_diff/ |
D | bm_build.py | 16 """ Python utility to build opt and counters benchmarks """ 31 '--benchmarks', 35 help='Which benchmarks to build') 57 def _make_cmd(cfg, benchmarks, jobs): argument 58 return ['make'] + benchmarks + ['CONFIG=%s' % cfg, '-j', '%d' % jobs] 61 def build(name, benchmarks, jobs, counters): argument 65 subprocess.check_call(_make_cmd('opt', benchmarks, jobs)) 67 subprocess.check_call(_make_cmd('counters', benchmarks, jobs)) 70 subprocess.check_call(_make_cmd('opt', benchmarks, jobs)) 72 subprocess.check_call(_make_cmd('counters', benchmarks, jobs)) [all …]
|
D | bm_diff.py | 58 '--benchmarks', 62 help='Which benchmarks to run') 69 'Number of times to loops the benchmarks. Must match what was passed to bm_run.py' 76 help='Regex to filter benchmarks run') 157 benchmarks = collections.defaultdict(Benchmark) 190 benchmarks[name].add_sample(track, row, True) 195 benchmarks[name].add_sample(track, row, False) 198 for name, bm in benchmarks.items(): 205 for name in sorted(benchmarks.keys()): 206 if benchmarks[name].skip(): continue [all …]
|
/external/toolchain-utils/crosperf/ |
D | experiment_factory.py | 90 ChromeOS benchmarks, but the idea is that in the future, other types 94 def AppendBenchmarkSet(self, benchmarks, benchmark_list, test_args, argument 97 """Add all the tests in a set to the benchmarks list.""" 102 benchmarks.append(telemetry_benchmark) 154 # Construct benchmarks. 157 benchmarks = [] 172 self.AppendBenchmarkSet(benchmarks, telemetry_perfv2_tests, test_args, 176 self.AppendBenchmarkSet(benchmarks, telemetry_pagecycler_tests, 181 self.AppendBenchmarkSet(benchmarks, telemetry_toolchain_perf_tests, 185 # Add non-telemetry toolchain-perf benchmarks: [all …]
|
D | results_organizer_unittest.py | 138 benchmarks = [mock_instance.benchmark1, mock_instance.benchmark2] 140 benchmark_runs[0] = BenchmarkRun('b1', benchmarks[0], labels[0], 1, '', '', 142 benchmark_runs[1] = BenchmarkRun('b2', benchmarks[0], labels[0], 2, '', '', 144 benchmark_runs[2] = BenchmarkRun('b3', benchmarks[0], labels[1], 1, '', '', 146 benchmark_runs[3] = BenchmarkRun('b4', benchmarks[0], labels[1], 2, '', '', 148 benchmark_runs[4] = BenchmarkRun('b5', benchmarks[1], labels[0], 1, '', '', 150 benchmark_runs[5] = BenchmarkRun('b6', benchmarks[1], labels[0], 2, '', '', 152 benchmark_runs[6] = BenchmarkRun('b7', benchmarks[1], labels[1], 1, '', '', 154 benchmark_runs[7] = BenchmarkRun('b8', benchmarks[1], labels[1], 2, '', '', 163 organized = OrganizeResults(benchmark_runs, labels, benchmarks)
|
/external/grpc-grpc-java/benchmarks/src/main/java/io/grpc/benchmarks/qps/ |
D | AsyncClient.java | 17 package io.grpc.benchmarks.qps; 19 import static io.grpc.benchmarks.Utils.HISTOGRAM_MAX_VALUE; 20 import static io.grpc.benchmarks.Utils.HISTOGRAM_PRECISION; 21 import static io.grpc.benchmarks.Utils.saveHistogram; 22 import static io.grpc.benchmarks.qps.ClientConfiguration.ClientParam.ADDRESS; 23 import static io.grpc.benchmarks.qps.ClientConfiguration.ClientParam.CHANNELS; 24 import static io.grpc.benchmarks.qps.ClientConfiguration.ClientParam.CLIENT_PAYLOAD; 25 import static io.grpc.benchmarks.qps.ClientConfiguration.ClientParam.DIRECTEXECUTOR; 26 import static io.grpc.benchmarks.qps.ClientConfiguration.ClientParam.DURATION; 27 import static io.grpc.benchmarks.qps.ClientConfiguration.ClientParam.FLOW_CONTROL_WINDOW; [all …]
|
D | OpenLoopClient.java | 17 package io.grpc.benchmarks.qps; 20 import static io.grpc.benchmarks.Utils.HISTOGRAM_MAX_VALUE; 21 import static io.grpc.benchmarks.Utils.HISTOGRAM_PRECISION; 22 import static io.grpc.benchmarks.Utils.saveHistogram; 23 import static io.grpc.benchmarks.qps.ClientConfiguration.ClientParam.ADDRESS; 24 import static io.grpc.benchmarks.qps.ClientConfiguration.ClientParam.CLIENT_PAYLOAD; 25 import static io.grpc.benchmarks.qps.ClientConfiguration.ClientParam.DURATION; 26 import static io.grpc.benchmarks.qps.ClientConfiguration.ClientParam.FLOW_CONTROL_WINDOW; 27 import static io.grpc.benchmarks.qps.ClientConfiguration.ClientParam.SAVE_HISTOGRAM; 28 import static io.grpc.benchmarks.qps.ClientConfiguration.ClientParam.SERVER_PAYLOAD; [all …]
|
/external/grpc-grpc/tools/run_tests/ |
D | run_microbenchmark.py | 80 benchmarks = [] 92 benchmarks.append( 117 if len(benchmarks) >= min(16, multiprocessing.cpu_count()): 121 benchmarks, maxjobs=max(1, 125 benchmarks = [] 128 # run the remaining benchmarks that weren't flushed 129 if len(benchmarks): 130 jobset.run(benchmarks, maxjobs=max(1, multiprocessing.cpu_count() / 2)) 142 benchmarks = [] 148 benchmarks.append( [all …]
|
/external/tensorflow/tensorflow/contrib/eager/python/examples/resnet50/ |
D | README.md | 7 - `resnet50_test.py`: Sanity unittests and benchmarks for using the model with 9 - `resnet50_graph_test.py`: Sanity unittests and benchmarks when using the same 12 # Benchmarks chapter 18 python resnet50_test.py --benchmarks=. 21 python resnet50_graph_test.py --benchmarks=. 25 package. To build (and run benchmarks) from source: 29 bazel run -c opt --config=cuda :resnet50_test -- --benchmarks=. 32 bazel run -c opt --config=cuda :resnet50_graph_test -- --benchmarks=. 37 On October 31, 2017, the benchmarks demonstrated comparable performance
|
/external/libcxx/docs/ |
D | TestingLibcxx.rst | 201 Benchmarks chapter 205 The benchmarks are written using the `Google Benchmark`_ library, a copy of which 213 Building Benchmarks 216 The benchmark tests are not built by default. The benchmarks can be built using 217 the ``cxx-benchmarks`` target. 225 $ make cxx-benchmarks 227 This will build all of the benchmarks under ``<libcxx-src>/benchmarks`` to be 229 ``build/benchmarks``. 231 The benchmarks can also be built against the platforms native standard library 234 The compiled benchmarks are named ``<test>.libcxx.out`` if they test libc++ and [all …]
|
/external/tensorflow/tensorflow/contrib/eager/python/examples/rnn_ptb/ |
D | README.md | 19 Benchmarks (using synthetic data): 23 python rnn_ptb_test.py --benchmarks=. 26 python rnn_ptb_graph_test.py --benchmarks=. 30 package. To build (and run benchmarks) from source: 35 bazel run -c opt --config=cuda :rnn_ptb_test -- --benchmarks=. 38 bazel run -c opt --config=cuda :rnn_ptb_graph_test -- --benchmarks=. 43 On October 31, 2017, the benchmarks demonstrated slightly better performance
|
/external/google-benchmark/tools/ |
D | compare.py | 26 "benchmarks causing it to be overwritten") % output_file) 72 'benchmarks', 73 help='The most simple use-case, compare all the output of these two benchmarks') 176 if args.mode == 'benchmarks': 225 # Run the benchmarks and report the results 231 # Now, filter the benchmarks so that the difference report can work 261 ['benchmarks', self.testInput0, self.testInput1]) 264 self.assertEqual(parsed.mode, 'benchmarks') 271 ['--no-utest', 'benchmarks', self.testInput0, self.testInput1]) 275 self.assertEqual(parsed.mode, 'benchmarks') [all …]
|
/external/libcxx/utils/google-benchmark/tools/ |
D | compare.py | 26 "benchmarks causing it to be overwritten") % output_file) 72 'benchmarks', 73 help='The most simple use-case, compare all the output of these two benchmarks') 176 if args.mode == 'benchmarks': 225 # Run the benchmarks and report the results 231 # Now, filter the benchmarks so that the difference report can work 261 ['benchmarks', self.testInput0, self.testInput1]) 264 self.assertEqual(parsed.mode, 'benchmarks') 271 ['--no-utest', 'benchmarks', self.testInput0, self.testInput1]) 275 self.assertEqual(parsed.mode, 'benchmarks') [all …]
|
/external/v8/tools/ |
D | try_perf.py | 66 benchmarks = ['"%s"' % benchmark for benchmark in options.benchmarks] 67 cmd += ['-p \'testfilter=[%s]\'' % ','.join(benchmarks)] 74 parser.add_argument('benchmarks', nargs='+', help='The benchmarks to run.') 90 if not options.benchmarks: 91 print 'Please specify the benchmarks to run as arguments.' 94 for benchmark in options.benchmarks: 98 'Available public benchmarks: %s' % (benchmark, PUBLIC_BENCHMARKS))
|
/external/protobuf/benchmarks/ |
D | readme.txt | 10 If we end up with a lot of different benchmarks it may be worth 29 $ javac -d . -cp ../protobuf.jar benchmarks/*.java 34 benchmarks.GoogleSize$SizeMessage1 ../google_message1.dat 35 benchmarks.GoogleSpeed$SpeedMessage1 ../google_message1.dat 36 benchmarks.GoogleSize$SizeMessage2 ../google_message2.dat 37 benchmarks.GoogleSpeed$SpeedMessage2 ../google_message2.dat 44 Benchmarks available
|
/external/tensorflow/tensorflow/compiler/xla/tests/ |
D | xla_internal_test_main.cc | 32 // If the --benchmarks flag is passed in then only run the benchmarks, not the in main() 36 if (arg == "--benchmarks" || absl::StartsWith(arg, "--benchmarks=")) { in main() 38 if (absl::StartsWith(arg, "--benchmarks=")) { in main() 39 pattern = argv[i] + strlen("--benchmarks="); in main() 41 // Handle flag of the form '--benchmarks foo' (no '='). in main() 43 LOG(ERROR) << "--benchmarks flag requires an argument."; in main()
|
/external/v8/benchmarks/ |
D | revisions.html | 16 expand the scope of the benchmarks. Here is a list of revisions, with 22 <div class="subtitle"><h3>Version 7 (<a href="http://v8.googlecode.com/svn/data/benchmarks/v7/run.h… 27 <div class="subtitle"><h3>Version 6 (<a href="http://v8.googlecode.com/svn/data/benchmarks/v6/run.h… 37 <p>Furthermore, the benchmark runner was changed to run the benchmarks 41 <div class="subtitle"><h3>Version 5 (<a href="http://v8.googlecode.com/svn/data/benchmarks/v5/run.h… 49 <div class="subtitle"><h3>Version 4 (<a href="http://v8.googlecode.com/svn/data/benchmarks/v4/run.h… 63 <div class="subtitle"><h3>Version 3 (<a href="http://v8.googlecode.com/svn/data/benchmarks/v3/run.h… 76 <div class="subtitle"><h3>Version 2 (<a href="http://v8.googlecode.com/svn/data/benchmarks/v2/run.h… 88 <p>Other benchmarks were fixed to do elementary verification of the 94 <div class="subtitle"><h3>Version 1 (<a href="http://v8.googlecode.com/svn/data/benchmarks/v1/run.h…
|