Home
last modified time | relevance | path

Searched refs:experiment (Results 1 – 25 of 119) sorted by relevance

12345

/external/toolchain-utils/crosperf/
Dexperiment_runner.py30 def _WriteJSONReportToFile(experiment, results_dir, json_report): argument
32 has_llvm = any('llvm' in l.compiler for l in experiment.labels)
34 board = experiment.labels[0].board
51 experiment, argument
56 self._experiment = experiment
57 self.l = log or logger.GetLogger(experiment.log_dir)
62 if experiment.log_level != 'verbose':
100 def _LockAllMachines(self, experiment): argument
114 experiment.labels[0].chromeos_root,
130 def _UnlockAllMachines(self, experiment): argument
[all …]
Dexperiment_status.py16 def __init__(self, experiment): argument
17 self.experiment = experiment
18 self.num_total = len(self.experiment.benchmark_runs)
21 self.log_level = experiment.log_level
37 if self.experiment.start_time:
38 elapsed_time = current_time - self.experiment.start_time
42 if self.completed != self.experiment.num_complete:
43 self.completed = self.experiment.num_complete
67 eta_seconds = (float(self.num_total - self.experiment.num_complete - 1) *
68 time_completed_jobs / self.experiment.num_run_complete +
[all …]
Dresults_report.py288 def __init__(self, results, email=False, experiment=None): argument
291 self.experiment = experiment
306 def FromExperiment(experiment, email=False): argument
307 results = BenchmarkResults.FromExperiment(experiment)
308 return TextResultsReport(results, email, experiment)
318 for benchmark_run in self.experiment.benchmark_runs:
330 experiment = self.experiment
333 if experiment is not None:
334 title_contents = "Results report for '%s'" % (experiment.name,)
342 if experiment is not None:
[all …]
Dresults_report_unittest.py102 experiment = ExperimentFactory().GetExperiment(efile,
105 for label in experiment.labels:
107 return experiment
110 def _InjectSuccesses(experiment, how_many, keyvals, for_benchmark=0, argument
115 num_configs = len(experiment.benchmarks) * len(experiment.labels)
116 num_runs = len(experiment.benchmark_runs) // num_configs
120 bench = experiment.benchmarks[for_benchmark]
131 for label in experiment.labels:
142 experiment.benchmark_runs.extend(
144 return experiment
[all …]
Dcrosperf.py58 def Cleanup(experiment): argument
60 experiment.Cleanup()
113 experiment = ExperimentFactory().GetExperiment(experiment_file,
119 atexit.register(Cleanup, experiment)
122 runner = MockExperimentRunner(experiment, json_report)
125 experiment, json_report, using_schedv2=(not options.noschedv2))
Dexperiment_factory.py13 from experiment import Experiment
312 experiment = Experiment(experiment_name, all_remote, working_directory,
318 return experiment
Dschedv2.py222 def __init__(self, experiment): argument
223 self._experiment = experiment
224 self._logger = logger.GetLogger(experiment.log_dir)
/external/tensorflow/tensorflow/contrib/learn/python/learn/
Dlearn_runner.py27 from tensorflow.contrib.learn.python.learn.experiment import Experiment
34 def _execute_schedule(experiment, schedule): argument
36 if not hasattr(experiment, schedule):
38 valid_tasks = [x for x in dir(experiment)
40 and callable(getattr(experiment, x))]
44 task = getattr(experiment, schedule)
47 valid_tasks = [x for x in dir(experiment)
49 and callable(getattr(experiment, x))]
85 experiment = experiment_fn(run_config, hparams)
87 if not isinstance(experiment, Experiment):
[all …]
Dexperiment_test.py28 from tensorflow.contrib.learn.python.learn import experiment
209 experiment.Experiment(
222 ex = experiment.Experiment(
241 ex = experiment.Experiment(
255 ex = experiment.Experiment(
271 ex = experiment.Experiment(
297 ex = experiment.Experiment(
327 ex = experiment.Experiment(
343 ex = experiment.Experiment(
366 ex = experiment.Experiment(
[all …]
/external/caliper/caliper/src/main/java/com/google/caliper/runner/
DTrialModule.java44 private final Experiment experiment; field in TrialModule
46 TrialModule(UUID trialId, int trialNumber, Experiment experiment) { in TrialModule() argument
49 this.experiment = experiment; in TrialModule()
69 return experiment; in provideExperiment()
74 static BenchmarkSpec provideBenchmarkSpec(Experiment experiment) { in provideBenchmarkSpec() argument
76 .className(experiment.instrumentation().benchmarkMethod().getDeclaringClass().getName()) in provideBenchmarkSpec()
77 .methodName(experiment.instrumentation().benchmarkMethod().getName()) in provideBenchmarkSpec()
78 .addAllParameters(experiment.userParameters()) in provideBenchmarkSpec()
91 static MeasurementCollectingVisitor provideMeasurementCollectingVisitor(Experiment experiment) { in provideMeasurementCollectingVisitor() argument
92 return experiment.instrumentation().getMeasurementCollectingVisitor(); in provideMeasurementCollectingVisitor()
[all …]
DScheduledTrial.java27 private final Experiment experiment; field in ScheduledTrial
30 @Inject ScheduledTrial(Experiment experiment, TrialRunLoop runLoop, in ScheduledTrial() argument
33 this.experiment = experiment; in ScheduledTrial()
41 Experiment experiment() { in experiment() method in ScheduledTrial
42 return experiment; in experiment()
DTrialResult.java25 private final Experiment experiment; field in TrialResult
28 TrialResult(Trial trial, Experiment experiment, ImmutableList<String> trialMessages) { in TrialResult() argument
30 this.experiment = experiment; in TrialResult()
35 return experiment; in getExperiment()
DTrialOutputLogger.java43 private final Experiment experiment; field in TrialOutputLogger
48 @TrialId UUID trialId, Experiment experiment) { in TrialOutputLogger() argument
52 this.experiment = experiment; in TrialOutputLogger()
81 writer.println("Experiment: " + experiment); in printHeader()
DExperimentingCaliperRun.java104 @Override public String apply(Experiment experiment) { in run()
105 return experiment.instrumentation().benchmarkMethod().getName(); in run()
244 for (Experiment experiment : experimentsToRun) { in createScheduledTrials()
247 new TrialModule(UUID.randomUUID(), trialNumber, experiment)); in createScheduledTrials()
265 for (Experiment experiment : experiments) { in dryRun()
268 mainComponent.newExperimentComponent(ExperimentModule.forExperiment(experiment)); in dryRun()
272 experiment.instrumentation().dryRun(benchmark); in dryRun()
273 builder.add(experiment); in dryRun()
DWorkerProcess.java79 Experiment experiment, in WorkerProcess() argument
86 buildProcess(trialId, experiment, benchmarkSpec, localPort, benchmarkClass); in WorkerProcess()
145 Experiment experiment, in buildProcess() argument
150 Instrumentation instrumentation = experiment.instrumentation(); in buildProcess()
164 VirtualMachine vm = experiment.vm(); in buildProcess()
DExperimentModule.java47 public static ExperimentModule forExperiment(Experiment experiment) { in forExperiment() argument
48 Method benchmarkMethod = experiment.instrumentation().benchmarkMethod(); in forExperiment()
51 experiment.userParameters()); in forExperiment()
/external/toolchain-utils/crosperf/experiment_files/
DREADME1 To use these experiment files, replace the board, remote and images
7 The final experiment file should look something like the following (but with
23 # Paste experiment benchmarks here. Example, I pasted
26 # This experiment just runs a short autotest which measures the performance of
/external/libaom/libaom/build/cmake/
Daom_config_defaults.cmake118 # AV1 experiment flags.
119 set_aom_config_var(CONFIG_SPEED_STATS 0 NUMBER "AV1 experiment flag.")
120 set_aom_config_var(CONFIG_COLLECT_RD_STATS 0 NUMBER "AV1 experiment flag.")
121 set_aom_config_var(CONFIG_DIST_8X8 0 NUMBER "AV1 experiment flag.")
122 set_aom_config_var(CONFIG_ENTROPY_STATS 0 NUMBER "AV1 experiment flag.")
123 set_aom_config_var(CONFIG_INTER_STATS_ONLY 0 NUMBER "AV1 experiment flag.")
124 set_aom_config_var(CONFIG_RD_DEBUG 0 NUMBER "AV1 experiment flag.")
126 "AV1 experiment flag.")
128 "AV1 experiment flag.")
129 set_aom_config_var(CONFIG_SHARP_SETTINGS 0 NUMBER "AV1 experiment flag.")
/external/caliper/caliper/src/test/java/com/google/caliper/runner/
DWorkerProcessTest.java83 Experiment experiment = new Experiment( in simpleArgsTest() local
91 ProcessBuilder builder = createProcess(experiment, spec); in simpleArgsTest()
163 private ProcessBuilder createProcess(Experiment experiment, BenchmarkSpec benchmarkSpec) { in createProcess() argument
164 return WorkerProcess.buildProcess(TRIAL_ID, experiment, benchmarkSpec, PORT_NUMBER, in createProcess()
/external/toolchain-utils/bestflags/
Dexample_algorithms.py141 for experiment in experiments:
142 if experiment == 'GA':
144 _ProcessGA(experiments[experiment])
/external/toolchain-utils/bestflags/examples/omnetpp/
DREADME4 information to run the experiment.
11 the experiment will try.
/external/llvm/test/Instrumentation/AddressSanitizer/
Dexperiment-call.ll2 ; -asan-force-experiment flag turns all memory accesses into experiments.
3 ; RUN: opt < %s -asan -asan-module -asan-force-experiment=42 -asan-instrumentation-with-call-thresh…
Dexperiment.ll2 ; -asan-force-experiment flag turns all memory accesses into experiments.
3 ; RUN: opt < %s -asan -asan-module -asan-force-experiment=42 -S | FileCheck %s
/external/swiftshader/third_party/llvm-7.0/llvm/test/Instrumentation/AddressSanitizer/
Dexperiment-call.ll2 ; -asan-force-experiment flag turns all memory accesses into experiments.
3 ; RUN: opt < %s -asan -asan-module -asan-force-experiment=42 -asan-instrumentation-with-call-thresh…
Dexperiment.ll2 ; -asan-force-experiment flag turns all memory accesses into experiments.
3 ; RUN: opt < %s -asan -asan-module -asan-force-experiment=42 -S | FileCheck %s

12345