• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#!/usr/bin/env python3
2# -*- coding: utf-8 -*-
3
4# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
5# Use of this source code is governed by a BSD-style license that can be
6# found in the LICENSE file.
7
8"""Unit test for experiment_factory.py"""
9
10from __future__ import print_function
11
12import io
13import os
14import socket
15import unittest
16import unittest.mock as mock
17
18from cros_utils import command_executer
19from cros_utils.file_utils import FileUtils
20
21from experiment_file import ExperimentFile
22import test_flag
23import benchmark
24import experiment_factory
25from experiment_factory import ExperimentFactory
26import settings_factory
27
28EXPERIMENT_FILE_1 = """
29  board: x86-alex
30  remote: chromeos-alex3
31  locks_dir: /tmp
32
33  benchmark: PageCycler {
34    iterations: 3
35  }
36
37  benchmark: webrtc {
38    iterations: 1
39    test_args: --story-filter=datachannel
40  }
41
42  image1 {
43    chromeos_image: /usr/local/google/cros_image1.bin
44  }
45
46  image2 {
47    chromeos_image: /usr/local/google/cros_image2.bin
48  }
49  """
50
51EXPERIMENT_FILE_2 = """
52  board: x86-alex
53  remote: chromeos-alex3
54  locks_dir: /tmp
55
56  cwp_dso: kallsyms
57
58  benchmark: Octane {
59    iterations: 1
60    suite: telemetry_Crosperf
61    run_local: False
62    weight: 0.8
63  }
64
65  benchmark: Kraken {
66    iterations: 1
67    suite: telemetry_Crosperf
68    run_local: False
69    weight: 0.2
70  }
71
72  image1 {
73    chromeos_image: /usr/local/google/cros_image1.bin
74  }
75  """
76
77# pylint: disable=too-many-function-args
78
79
80class ExperimentFactoryTest(unittest.TestCase):
81  """Class for running experiment factory unittests."""
82
83  def setUp(self):
84    self.append_benchmark_call_args = []
85
86  def testLoadExperimentFile1(self):
87    experiment_file = ExperimentFile(io.StringIO(EXPERIMENT_FILE_1))
88    exp = ExperimentFactory().GetExperiment(
89        experiment_file, working_directory='', log_dir='')
90    self.assertEqual(exp.remote, ['chromeos-alex3'])
91
92    self.assertEqual(len(exp.benchmarks), 2)
93    self.assertEqual(exp.benchmarks[0].name, 'PageCycler')
94    self.assertEqual(exp.benchmarks[0].test_name, 'PageCycler')
95    self.assertEqual(exp.benchmarks[0].iterations, 3)
96    self.assertEqual(exp.benchmarks[1].name, 'webrtc@@datachannel')
97    self.assertEqual(exp.benchmarks[1].test_name, 'webrtc')
98    self.assertEqual(exp.benchmarks[1].iterations, 1)
99
100    self.assertEqual(len(exp.labels), 2)
101    self.assertEqual(exp.labels[0].chromeos_image,
102                     '/usr/local/google/cros_image1.bin')
103    self.assertEqual(exp.labels[0].board, 'x86-alex')
104
105  def testLoadExperimentFile2CWP(self):
106    experiment_file = ExperimentFile(io.StringIO(EXPERIMENT_FILE_2))
107    exp = ExperimentFactory().GetExperiment(
108        experiment_file, working_directory='', log_dir='')
109    self.assertEqual(exp.cwp_dso, 'kallsyms')
110    self.assertEqual(len(exp.benchmarks), 2)
111    self.assertEqual(exp.benchmarks[0].weight, 0.8)
112    self.assertEqual(exp.benchmarks[1].weight, 0.2)
113
114  def testDuplecateBenchmark(self):
115    mock_experiment_file = ExperimentFile(io.StringIO(EXPERIMENT_FILE_1))
116    mock_experiment_file.all_settings = []
117    benchmark_settings1 = settings_factory.BenchmarkSettings('name')
118    mock_experiment_file.all_settings.append(benchmark_settings1)
119    benchmark_settings2 = settings_factory.BenchmarkSettings('name')
120    mock_experiment_file.all_settings.append(benchmark_settings2)
121
122    with self.assertRaises(SyntaxError):
123      ef = ExperimentFactory()
124      ef.GetExperiment(mock_experiment_file, '', '')
125
126  def testCWPExceptions(self):
127    mock_experiment_file = ExperimentFile(io.StringIO(''))
128    mock_experiment_file.all_settings = []
129    global_settings = settings_factory.GlobalSettings('test_name')
130    global_settings.SetField('locks_dir', '/tmp')
131
132    # Test 1: DSO type not supported
133    global_settings.SetField('cwp_dso', 'test')
134    self.assertEqual(global_settings.GetField('cwp_dso'), 'test')
135    mock_experiment_file.global_settings = global_settings
136    with self.assertRaises(RuntimeError) as msg:
137      ef = ExperimentFactory()
138      ef.GetExperiment(mock_experiment_file, '', '')
139    self.assertEqual('The DSO specified is not supported', str(msg.exception))
140
141    # Test 2: No weight after DSO specified
142    global_settings.SetField('cwp_dso', 'kallsyms')
143    mock_experiment_file.global_settings = global_settings
144    benchmark_settings = settings_factory.BenchmarkSettings('name')
145    mock_experiment_file.all_settings.append(benchmark_settings)
146    with self.assertRaises(RuntimeError) as msg:
147      ef = ExperimentFactory()
148      ef.GetExperiment(mock_experiment_file, '', '')
149    self.assertEqual('With DSO specified, each benchmark should have a weight',
150                     str(msg.exception))
151
152    # Test 3: Weight is set, but no dso specified
153    global_settings.SetField('cwp_dso', '')
154    mock_experiment_file.global_settings = global_settings
155    benchmark_settings = settings_factory.BenchmarkSettings('name')
156    benchmark_settings.SetField('weight', '0.8')
157    mock_experiment_file.all_settings = []
158    mock_experiment_file.all_settings.append(benchmark_settings)
159    with self.assertRaises(RuntimeError) as msg:
160      ef = ExperimentFactory()
161      ef.GetExperiment(mock_experiment_file, '', '')
162    self.assertEqual('Weight can only be set when DSO specified',
163                     str(msg.exception))
164
165    # Test 4: cwp_dso only works for telemetry_Crosperf benchmarks
166    global_settings.SetField('cwp_dso', 'kallsyms')
167    mock_experiment_file.global_settings = global_settings
168    benchmark_settings = settings_factory.BenchmarkSettings('name')
169    benchmark_settings.SetField('weight', '0.8')
170    mock_experiment_file.all_settings = []
171    mock_experiment_file.all_settings.append(benchmark_settings)
172    with self.assertRaises(RuntimeError) as msg:
173      ef = ExperimentFactory()
174      ef.GetExperiment(mock_experiment_file, '', '')
175    self.assertEqual(
176        'CWP approximation weight only works with '
177        'telemetry_Crosperf suite', str(msg.exception))
178
179    # Test 5: cwp_dso does not work for local run
180    benchmark_settings = settings_factory.BenchmarkSettings('name')
181    benchmark_settings.SetField('weight', '0.8')
182    benchmark_settings.SetField('suite', 'telemetry_Crosperf')
183    benchmark_settings.SetField('run_local', 'True')
184    mock_experiment_file.all_settings = []
185    mock_experiment_file.all_settings.append(benchmark_settings)
186    with self.assertRaises(RuntimeError) as msg:
187      ef = ExperimentFactory()
188      ef.GetExperiment(mock_experiment_file, '', '')
189    self.assertEqual('run_local must be set to False to use CWP approximation',
190                     str(msg.exception))
191
192    # Test 6: weight should be float >=0
193    benchmark_settings = settings_factory.BenchmarkSettings('name')
194    benchmark_settings.SetField('weight', '-1.2')
195    benchmark_settings.SetField('suite', 'telemetry_Crosperf')
196    benchmark_settings.SetField('run_local', 'False')
197    mock_experiment_file.all_settings = []
198    mock_experiment_file.all_settings.append(benchmark_settings)
199    with self.assertRaises(RuntimeError) as msg:
200      ef = ExperimentFactory()
201      ef.GetExperiment(mock_experiment_file, '', '')
202    self.assertEqual('Weight should be a float >=0', str(msg.exception))
203
204    # Test 7: more than one story tag in test_args
205    benchmark_settings = settings_factory.BenchmarkSettings('name')
206    benchmark_settings.SetField('test_args',
207                                '--story-filter=a --story-tag-filter=b')
208    benchmark_settings.SetField('weight', '1.2')
209    benchmark_settings.SetField('suite', 'telemetry_Crosperf')
210    mock_experiment_file.all_settings = []
211    mock_experiment_file.all_settings.append(benchmark_settings)
212    with self.assertRaises(RuntimeError) as msg:
213      ef = ExperimentFactory()
214      ef.GetExperiment(mock_experiment_file, '', '')
215    self.assertEqual(
216        'Only one story or story-tag filter allowed in a single '
217        'benchmark run', str(msg.exception))
218
219    # Test 8: Iterations of each benchmark run are not same in cwp mode
220    mock_experiment_file.all_settings = []
221    benchmark_settings = settings_factory.BenchmarkSettings('name1')
222    benchmark_settings.SetField('iterations', '4')
223    benchmark_settings.SetField('weight', '1.2')
224    benchmark_settings.SetField('suite', 'telemetry_Crosperf')
225    benchmark_settings.SetField('run_local', 'False')
226    mock_experiment_file.all_settings.append(benchmark_settings)
227    benchmark_settings = settings_factory.BenchmarkSettings('name2')
228    benchmark_settings.SetField('iterations', '3')
229    benchmark_settings.SetField('weight', '1.2')
230    benchmark_settings.SetField('suite', 'telemetry_Crosperf')
231    benchmark_settings.SetField('run_local', 'False')
232    mock_experiment_file.all_settings.append(benchmark_settings)
233    with self.assertRaises(RuntimeError) as msg:
234      ef = ExperimentFactory()
235      ef.GetExperiment(mock_experiment_file, '', '')
236    self.assertEqual('Iterations of each benchmark run are not the same',
237                     str(msg.exception))
238
239  def test_append_benchmark_set(self):
240    ef = ExperimentFactory()
241
242    bench_list = []
243    ef.AppendBenchmarkSet(bench_list, experiment_factory.telemetry_perfv2_tests,
244                          '', 1, False, '', 'telemetry_Crosperf', False, 0,
245                          False, '', 0)
246    self.assertEqual(
247        len(bench_list), len(experiment_factory.telemetry_perfv2_tests))
248    self.assertTrue(isinstance(bench_list[0], benchmark.Benchmark))
249
250    bench_list = []
251    ef.AppendBenchmarkSet(bench_list,
252                          experiment_factory.telemetry_pagecycler_tests, '', 1,
253                          False, '', 'telemetry_Crosperf', False, 0, False, '',
254                          0)
255    self.assertEqual(
256        len(bench_list), len(experiment_factory.telemetry_pagecycler_tests))
257    self.assertTrue(isinstance(bench_list[0], benchmark.Benchmark))
258
259    bench_list = []
260    ef.AppendBenchmarkSet(bench_list,
261                          experiment_factory.telemetry_toolchain_perf_tests, '',
262                          1, False, '', 'telemetry_Crosperf', False, 0, False,
263                          '', 0)
264    self.assertEqual(
265        len(bench_list), len(experiment_factory.telemetry_toolchain_perf_tests))
266    self.assertTrue(isinstance(bench_list[0], benchmark.Benchmark))
267
268  @mock.patch.object(socket, 'gethostname')
269  def test_get_experiment(self, mock_socket):
270
271    test_flag.SetTestMode(False)
272    self.append_benchmark_call_args = []
273
274    def FakeAppendBenchmarkSet(bench_list, set_list, args, iters, rm_ch,
275                               perf_args, suite, show_all):
276      'Helper function for test_get_experiment'
277      arg_list = [
278          bench_list, set_list, args, iters, rm_ch, perf_args, suite, show_all
279      ]
280      self.append_benchmark_call_args.append(arg_list)
281
282    def FakeGetDefaultRemotes(board):
283      if not board:
284        return []
285      return ['fake_chromeos_machine1.cros', 'fake_chromeos_machine2.cros']
286
287    def FakeGetXbuddyPath(build, autotest_dir, debug_dir, board, chroot,
288                          log_level, perf_args):
289      autotest_path = autotest_dir
290      if not autotest_path:
291        autotest_path = 'fake_autotest_path'
292      debug_path = debug_dir
293      if not debug_path and perf_args:
294        debug_path = 'fake_debug_path'
295      if not build or not board or not chroot or not log_level:
296        return '', autotest_path, debug_path
297      return 'fake_image_path', autotest_path, debug_path
298
299    ef = ExperimentFactory()
300    ef.AppendBenchmarkSet = FakeAppendBenchmarkSet
301    ef.GetDefaultRemotes = FakeGetDefaultRemotes
302
303    label_settings = settings_factory.LabelSettings('image_label')
304    benchmark_settings = settings_factory.BenchmarkSettings('bench_test')
305    global_settings = settings_factory.GlobalSettings('test_name')
306
307    label_settings.GetXbuddyPath = FakeGetXbuddyPath
308
309    mock_experiment_file = ExperimentFile(io.StringIO(''))
310    mock_experiment_file.all_settings = []
311
312    test_flag.SetTestMode(True)
313    # Basic test.
314    global_settings.SetField('name', 'unittest_test')
315    global_settings.SetField('board', 'lumpy')
316    global_settings.SetField('locks_dir', '/tmp')
317    global_settings.SetField('remote', '123.45.67.89 123.45.76.80')
318    benchmark_settings.SetField('test_name', 'kraken')
319    benchmark_settings.SetField('suite', 'telemetry_Crosperf')
320    benchmark_settings.SetField('iterations', 1)
321    label_settings.SetField(
322        'chromeos_image',
323        'chromeos/src/build/images/lumpy/latest/chromiumos_test_image.bin')
324    label_settings.SetField('chrome_src', '/usr/local/google/home/chrome-top')
325    label_settings.SetField('autotest_path', '/tmp/autotest')
326
327    mock_experiment_file.global_settings = global_settings
328    mock_experiment_file.all_settings.append(label_settings)
329    mock_experiment_file.all_settings.append(benchmark_settings)
330    mock_experiment_file.all_settings.append(global_settings)
331
332    mock_socket.return_value = ''
333
334    # First test. General test.
335    exp = ef.GetExperiment(mock_experiment_file, '', '')
336    self.assertCountEqual(exp.remote, ['123.45.67.89', '123.45.76.80'])
337    self.assertEqual(exp.cache_conditions, [0, 2, 1])
338    self.assertEqual(exp.log_level, 'average')
339
340    self.assertEqual(len(exp.benchmarks), 1)
341    self.assertEqual(exp.benchmarks[0].name, 'bench_test')
342    self.assertEqual(exp.benchmarks[0].test_name, 'kraken')
343    self.assertEqual(exp.benchmarks[0].iterations, 1)
344    self.assertEqual(exp.benchmarks[0].suite, 'telemetry_Crosperf')
345    self.assertFalse(exp.benchmarks[0].show_all_results)
346
347    self.assertEqual(len(exp.labels), 1)
348    self.assertEqual(
349        exp.labels[0].chromeos_image, 'chromeos/src/build/images/lumpy/latest/'
350        'chromiumos_test_image.bin')
351    self.assertEqual(exp.labels[0].autotest_path, '/tmp/autotest')
352    self.assertEqual(exp.labels[0].board, 'lumpy')
353
354    # Second test: Remotes listed in labels.
355    test_flag.SetTestMode(True)
356    label_settings.SetField('remote', 'chromeos1.cros chromeos2.cros')
357    exp = ef.GetExperiment(mock_experiment_file, '', '')
358    self.assertCountEqual(
359        exp.remote,
360        ['123.45.67.89', '123.45.76.80', 'chromeos1.cros', 'chromeos2.cros'])
361
362    # Third test: Automatic fixing of bad  logging_level param:
363    global_settings.SetField('logging_level', 'really loud!')
364    exp = ef.GetExperiment(mock_experiment_file, '', '')
365    self.assertEqual(exp.log_level, 'verbose')
366
367    # Fourth test: Setting cache conditions; only 1 remote with "same_machine"
368    global_settings.SetField('rerun_if_failed', 'true')
369    global_settings.SetField('rerun', 'true')
370    global_settings.SetField('same_machine', 'true')
371    global_settings.SetField('same_specs', 'true')
372
373    self.assertRaises(Exception, ef.GetExperiment, mock_experiment_file, '', '')
374    label_settings.SetField('remote', '')
375    global_settings.SetField('remote', '123.45.67.89')
376    exp = ef.GetExperiment(mock_experiment_file, '', '')
377    self.assertEqual(exp.cache_conditions, [0, 2, 3, 4, 6, 1])
378
379    # Fifth Test: Adding a second label; calling GetXbuddyPath; omitting all
380    # remotes (Call GetDefaultRemotes).
381    mock_socket.return_value = 'test.corp.google.com'
382    global_settings.SetField('remote', '')
383    global_settings.SetField('same_machine', 'false')
384
385    label_settings_2 = settings_factory.LabelSettings('official_image_label')
386    label_settings_2.SetField('chromeos_root', 'chromeos')
387    label_settings_2.SetField('build', 'official-dev')
388    label_settings_2.SetField('autotest_path', '')
389    label_settings_2.GetXbuddyPath = FakeGetXbuddyPath
390
391    mock_experiment_file.all_settings.append(label_settings_2)
392    exp = ef.GetExperiment(mock_experiment_file, '', '')
393    self.assertEqual(len(exp.labels), 2)
394    self.assertEqual(exp.labels[1].chromeos_image, 'fake_image_path')
395    self.assertEqual(exp.labels[1].autotest_path, 'fake_autotest_path')
396    self.assertCountEqual(
397        exp.remote,
398        ['fake_chromeos_machine1.cros', 'fake_chromeos_machine2.cros'])
399
400  def test_get_default_remotes(self):
401    board_list = [
402        'elm', 'bob', 'chell', 'kefka', 'lulu', 'nautilus', 'snappy',
403        'veyron_tiger'
404    ]
405
406    ef = ExperimentFactory()
407    self.assertRaises(Exception, ef.GetDefaultRemotes, 'bad-board')
408
409    # Verify that we have entries for every board, and that we get at least
410    # two machines for each board.
411    for b in board_list:
412      remotes = ef.GetDefaultRemotes(b)
413      if b == 'daisy':
414        self.assertEqual(len(remotes), 1)
415      else:
416        self.assertGreaterEqual(len(remotes), 2)
417
418  @mock.patch.object(command_executer.CommandExecuter, 'RunCommand')
419  @mock.patch.object(os.path, 'exists')
420  def test_check_skylab_tool(self, mock_exists, mock_runcmd):
421    ef = ExperimentFactory()
422    chromeos_root = '/tmp/chromeos'
423    log_level = 'average'
424
425    mock_exists.return_value = True
426    ret = ef.CheckSkylabTool(chromeos_root, log_level)
427    self.assertTrue(ret)
428
429    mock_exists.return_value = False
430    mock_runcmd.return_value = 1
431    with self.assertRaises(RuntimeError) as err:
432      ef.CheckSkylabTool(chromeos_root, log_level)
433    self.assertEqual(mock_runcmd.call_count, 1)
434    self.assertEqual(
435        str(err.exception), 'Skylab tool not installed '
436        'correctly, please try to manually install it from '
437        '/tmp/chromeos/chromeos-admin/lab-tools/setup_lab_tools')
438
439    mock_runcmd.return_value = 0
440    mock_runcmd.call_count = 0
441    ret = ef.CheckSkylabTool(chromeos_root, log_level)
442    self.assertEqual(mock_runcmd.call_count, 1)
443    self.assertFalse(ret)
444
445
446if __name__ == '__main__':
447  FileUtils.Configure(True)
448  test_flag.SetTestMode(True)
449  unittest.main()
450