• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#!/usr/bin/python
2
3"""Unit tests for the perf_uploader.py module.
4
5"""
6
7import json, unittest
8
9import common
10from autotest_lib.tko import models as tko_models
11from autotest_lib.tko.perf_upload import perf_uploader
12
13
14class test_aggregate_iterations(unittest.TestCase):
15    """Tests for the aggregate_iterations function."""
16
17    _PERF_ITERATION_DATA = {
18        '1': [
19            {
20                'description': 'metric1',
21                'value': 1,
22                'stddev': 0.0,
23                'units': 'units1',
24                'higher_is_better': True,
25                'graph': None
26            },
27            {
28                'description': 'metric2',
29                'value': 10,
30                'stddev': 0.0,
31                'units': 'units2',
32                'higher_is_better': True,
33                'graph': 'graph1',
34            },
35            {
36                'description': 'metric2',
37                'value': 100,
38                'stddev': 1.7,
39                'units': 'units3',
40                'higher_is_better': False,
41                'graph': 'graph2',
42            }
43        ],
44        '2': [
45            {
46                'description': 'metric1',
47                'value': 2,
48                'stddev': 0.0,
49                'units': 'units1',
50                'higher_is_better': True,
51                'graph': None,
52            },
53            {
54                'description': 'metric2',
55                'value': 20,
56                'stddev': 0.0,
57                'units': 'units2',
58                'higher_is_better': True,
59                'graph': 'graph1',
60            },
61            {
62                'description': 'metric2',
63                'value': 200,
64                'stddev': 21.2,
65                'units': 'units3',
66                'higher_is_better': False,
67                'graph': 'graph2',
68            }
69        ],
70    }
71
72
73    def setUp(self):
74        """Sets up for each test case."""
75        self._perf_values = []
76        for iter_num, iter_data in self._PERF_ITERATION_DATA.iteritems():
77            self._perf_values.append(
78                    tko_models.perf_value_iteration(iter_num, iter_data))
79
80
81    def test_one_iteration(self):
82        """Tests that data for 1 iteration is aggregated properly."""
83        result = perf_uploader._aggregate_iterations([self._perf_values[0]])
84        self.assertEqual(len(result), 3, msg='Expected results for 3 metrics.')
85        key = [('metric1', None), ('metric2', 'graph1'), ('metric2', 'graph2')]
86        self.assertTrue(
87            all([x in result for x in key]),
88            msg='Parsed metrics not as expected.')
89        msg = 'Perf values for metric not aggregated properly.'
90        self.assertEqual(result[('metric1', None)]['value'], [1], msg=msg)
91        self.assertEqual(result[('metric2', 'graph1')]['value'], [10], msg=msg)
92        self.assertEqual(result[('metric2', 'graph2')]['value'], [100], msg=msg)
93        msg = 'Standard deviation values not retained properly.'
94        self.assertEqual(result[('metric1', None)]['stddev'], 0.0, msg=msg)
95        self.assertEqual(result[('metric2', 'graph1')]['stddev'], 0.0, msg=msg)
96        self.assertEqual(result[('metric2', 'graph2')]['stddev'], 1.7, msg=msg)
97
98
99    def test_two_iterations(self):
100        """Tests that data for 2 iterations is aggregated properly."""
101        result = perf_uploader._aggregate_iterations(self._perf_values)
102        self.assertEqual(len(result), 3, msg='Expected results for 3 metrics.')
103        key = [('metric1', None), ('metric2', 'graph1'), ('metric2', 'graph2')]
104        self.assertTrue(
105            all([x in result for x in key]),
106            msg='Parsed metrics not as expected.')
107        msg = 'Perf values for metric not aggregated properly.'
108        self.assertEqual(result[('metric1', None)]['value'], [1, 2], msg=msg)
109        self.assertEqual(result[('metric2', 'graph1')]['value'], [10, 20],
110                         msg=msg)
111        self.assertEqual(result[('metric2', 'graph2')]['value'], [100, 200],
112                         msg=msg)
113
114
115class test_compute_avg_stddev(unittest.TestCase):
116    """Tests for the compute_avg_stddev function."""
117
118    def setUp(self):
119        """Sets up for each test case."""
120        self._perf_values = {
121            'metric1': {'value': [10, 20, 30], 'stddev': 0.0},
122            'metric2': {'value': [2.0, 3.0, 4.0], 'stddev': 0.0},
123            'metric3': {'value': [1], 'stddev': 1.7},
124        }
125
126
127    def test_avg_stddev(self):
128        """Tests that averages and standard deviations are computed properly."""
129        perf_uploader._compute_avg_stddev(self._perf_values)
130        result = self._perf_values  # The input dictionary itself is modified.
131        self.assertEqual(len(result), 3, msg='Expected results for 3 metrics.')
132        self.assertTrue(
133            all([x in result for x in ['metric1', 'metric2', 'metric3']]),
134            msg='Parsed metrics not as expected.')
135        msg = 'Average value not computed properly.'
136        self.assertEqual(result['metric1']['value'], 20, msg=msg)
137        self.assertEqual(result['metric2']['value'], 3.0, msg=msg)
138        self.assertEqual(result['metric3']['value'], 1, msg=msg)
139        msg = 'Standard deviation value not computed properly.'
140        self.assertEqual(result['metric1']['stddev'], 10.0, msg=msg)
141        self.assertEqual(result['metric2']['stddev'], 1.0, msg=msg)
142        self.assertEqual(result['metric3']['stddev'], 1.7, msg=msg)
143
144
145class test_json_config_file_sanity(unittest.TestCase):
146    """Sanity tests for the JSON-formatted presentation config file."""
147
148    def test_parse_json(self):
149        """Verifies _parse_config_file function."""
150        perf_uploader._parse_config_file(
151                perf_uploader._PRESENTATION_CONFIG_FILE)
152
153
154    def test_proper_json(self):
155        """Verifies the file can be parsed as proper JSON."""
156        try:
157            with open(perf_uploader._PRESENTATION_CONFIG_FILE, 'r') as fp:
158                json.load(fp)
159        except:
160            self.fail('Presentation config file could not be parsed as JSON.')
161
162
163    def test_unique_test_names(self):
164        """Verifies that each test name appears only once in the JSON file."""
165        json_obj = []
166        try:
167            with open(perf_uploader._PRESENTATION_CONFIG_FILE, 'r') as fp:
168                json_obj = json.load(fp)
169        except:
170            self.fail('Presentation config file could not be parsed as JSON.')
171
172        name_set = set([x['autotest_name'] for x in json_obj])
173        self.assertEqual(len(name_set), len(json_obj),
174                         msg='Autotest names not unique in the JSON file.')
175
176
177    def test_required_master_name(self):
178        """Verifies that master name must be specified."""
179        json_obj = []
180        try:
181            with open(perf_uploader._PRESENTATION_CONFIG_FILE, 'r') as fp:
182                json_obj = json.load(fp)
183        except:
184            self.fail('Presentation config file could not be parsed as JSON.')
185
186        for entry in json_obj:
187            if not 'master_name' in entry:
188                self.fail('Missing master field for test %s.' %
189                          entry['autotest_name'])
190
191
192class test_gather_presentation_info(unittest.TestCase):
193    """Tests for the gather_presentation_info function."""
194
195    _PRESENT_INFO = {
196        'test_name': {
197            'master_name': 'new_master_name',
198            'dashboard_test_name': 'new_test_name',
199        }
200    }
201
202    _PRESENT_INFO_MISSING_MASTER = {
203        'test_name': {
204            'dashboard_test_name': 'new_test_name',
205        }
206    }
207
208
209    def test_test_name_specified(self):
210        """Verifies gathers presentation info correctly."""
211        result = perf_uploader._gather_presentation_info(
212                self._PRESENT_INFO, 'test_name')
213        self.assertTrue(
214                all([key in result for key in
215                     ['test_name', 'master_name']]),
216                msg='Unexpected keys in resulting dictionary: %s' % result)
217        self.assertEqual(result['master_name'], 'new_master_name',
218                         msg='Unexpected "master_name" value: %s' %
219                             result['master_name'])
220        self.assertEqual(result['test_name'], 'new_test_name',
221                         msg='Unexpected "test_name" value: %s' %
222                             result['test_name'])
223
224
225    def test_test_name_not_specified(self):
226        """Verifies exception raised if test is not there."""
227        self.assertRaises(
228                perf_uploader.PerfUploadingError,
229                perf_uploader._gather_presentation_info,
230                        self._PRESENT_INFO, 'other_test_name')
231
232
233    def test_master_not_specified(self):
234        """Verifies exception raised if master is not there."""
235        self.assertRaises(
236                perf_uploader.PerfUploadingError,
237                perf_uploader._gather_presentation_info,
238                    self._PRESENT_INFO_MISSING_MASTER, 'test_name')
239
240
241class test_get_id_from_version(unittest.TestCase):
242    """Tests for the _get_id_from_version function."""
243
244    def test_correctly_formatted_versions(self):
245        """Verifies that the expected ID is returned when input is OK."""
246        chrome_version = '27.0.1452.2'
247        cros_version = '27.3906.0.0'
248        # 1452.2 + 3906.0.0
249        # --> 01452 + 002 + 03906 + 000 + 00
250        # --> 14520020390600000
251        self.assertEqual(
252                14520020390600000,
253                perf_uploader._get_id_from_version(
254                        chrome_version, cros_version))
255
256        chrome_version = '25.10.1000.0'
257        cros_version = '25.1200.0.0'
258        # 1000.0 + 1200.0.0
259        # --> 01000 + 000 + 01200 + 000 + 00
260        # --> 10000000120000000
261        self.assertEqual(
262                10000000120000000,
263                perf_uploader._get_id_from_version(
264                        chrome_version, cros_version))
265
266    def test_returns_none_when_given_invalid_input(self):
267        """Checks the return value when invalid input is given."""
268        chrome_version = '27.0'
269        cros_version = '27.3906.0.0'
270        self.assertIsNone(perf_uploader._get_id_from_version(
271                chrome_version, cros_version))
272
273
274class test_get_version_numbers(unittest.TestCase):
275    """Tests for the _get_version_numbers function."""
276
277    def test_with_valid_versions(self):
278      """Checks the version numbers used when data is formatted as expected."""
279      self.assertEqual(
280              ('34.5678.9.0', '34.5.678.9'),
281              perf_uploader._get_version_numbers(
282                  {
283                      'CHROME_VERSION': '34.5.678.9',
284                      'CHROMEOS_RELEASE_VERSION': '5678.9.0',
285                  }))
286
287    def test_with_missing_version_raises_error(self):
288      """Checks that an error is raised when a version is missing."""
289      with self.assertRaises(perf_uploader.PerfUploadingError):
290          perf_uploader._get_version_numbers(
291              {
292                  'CHROMEOS_RELEASE_VERSION': '5678.9.0',
293              })
294
295    def test_with_unexpected_version_format_raises_error(self):
296      """Checks that an error is raised when there's a rN suffix."""
297      with self.assertRaises(perf_uploader.PerfUploadingError):
298          perf_uploader._get_version_numbers(
299              {
300                  'CHROME_VERSION': '34.5.678.9',
301                  'CHROMEOS_RELEASE_VERSION': '5678.9.0r1',
302              })
303
304
305class test_format_for_upload(unittest.TestCase):
306    """Tests for the format_for_upload function."""
307
308    _PERF_DATA = {
309        ('metric1', 'graph_name'): {
310            'value': 2.7,
311            'stddev': 0.2,
312            'units': 'msec',
313            'graph': 'graph_name',
314            'higher_is_better': False,
315        },
316        ('metric2', None): {
317            'value': 101.35,
318            'stddev': 5.78,
319            'units': 'frames_per_sec',
320            'graph': None,
321            'higher_is_better': True,
322        },
323    }
324
325    _PRESENT_INFO = {
326        'master_name': 'new_master_name',
327        'test_name': 'new_test_name',
328    }
329
330    def setUp(self):
331        self._perf_data = self._PERF_DATA
332
333    def _verify_result_string(self, actual_result, expected_result):
334        """Verifies a JSON string matches the expected result.
335
336        This function compares JSON objects rather than strings, because of
337        possible floating-point values that need to be compared using
338        assertAlmostEqual().
339
340        @param actual_result: The candidate JSON string.
341        @param expected_result: The reference JSON string that the candidate
342            must match.
343
344        """
345        actual = json.loads(actual_result)
346        expected = json.loads(expected_result)
347
348        fail_msg = 'Unexpected result string: %s' % actual_result
349        self.assertEqual(len(actual), len(expected), msg=fail_msg)
350        # Make sure the dictionaries in 'expected' are in the same order
351        # as the dictionaries in 'actual' before comparing their values.
352        actual = sorted(actual, key=lambda x: x['test'])
353        expected = sorted(expected, key=lambda x: x['test'])
354        # Now compare the results.
355        for idx in xrange(len(actual)):
356            keys_actual = set(actual[idx].keys())
357            keys_expected = set(expected[idx].keys())
358            self.assertEqual(len(keys_actual), len(keys_expected),
359                             msg=fail_msg)
360            self.assertTrue(all([key in keys_actual for key in keys_expected]),
361                            msg=fail_msg)
362
363            self.assertEqual(
364                    actual[idx]['supplemental_columns']['r_cros_version'],
365                    expected[idx]['supplemental_columns']['r_cros_version'],
366                    msg=fail_msg)
367            self.assertEqual(
368                    actual[idx]['supplemental_columns']['r_chrome_version'],
369                    expected[idx]['supplemental_columns']['r_chrome_version'],
370                    msg=fail_msg)
371            self.assertEqual(
372                    actual[idx]['supplemental_columns']['a_default_rev'],
373                    expected[idx]['supplemental_columns']['a_default_rev'],
374                    msg=fail_msg)
375            self.assertEqual(
376                    actual[idx]['supplemental_columns']['a_hardware_identifier'],
377                    expected[idx]['supplemental_columns']['a_hardware_identifier'],
378                    msg=fail_msg)
379            self.assertEqual(
380                    actual[idx]['supplemental_columns']['a_hardware_hostname'],
381                    expected[idx]['supplemental_columns']['a_hardware_hostname'],
382                    msg=fail_msg)
383            self.assertEqual(
384                    actual[idx]['bot'], expected[idx]['bot'], msg=fail_msg)
385            self.assertEqual(
386                    actual[idx]['revision'], expected[idx]['revision'], msg=fail_msg)
387            self.assertAlmostEqual(
388                    actual[idx]['value'], expected[idx]['value'], 4,
389                    msg=fail_msg)
390            self.assertEqual(
391                    actual[idx]['units'], expected[idx]['units'], msg=fail_msg)
392            self.assertEqual(
393                    actual[idx]['master'], expected[idx]['master'],
394                    msg=fail_msg)
395            self.assertAlmostEqual(
396                    actual[idx]['error'], expected[idx]['error'], 4,
397                    msg=fail_msg)
398            self.assertEqual(
399                    actual[idx]['test'], expected[idx]['test'], msg=fail_msg)
400            self.assertEqual(
401                    actual[idx]['higher_is_better'],
402                    expected[idx]['higher_is_better'], msg=fail_msg)
403
404
405    def test_format_for_upload(self):
406        """Verifies format_for_upload generates correct json data."""
407        result = perf_uploader._format_for_upload(
408                'platform', '25.1200.0.0', '25.10.1000.0', 'WINKY E2A-F2K-Q35',
409                'i7', 'test_machine', self._perf_data, self._PRESENT_INFO)
410        expected_result_string = (
411                '[{"supplemental_columns": {"r_cros_version": "25.1200.0.0", '
412                '"a_default_rev" : "r_chrome_version",'
413                '"a_hardware_identifier" : "WINKY E2A-F2K-Q35",'
414                '"a_hardware_hostname" : "test_machine",'
415                '"r_chrome_version": "25.10.1000.0"}, "bot": "cros-platform-i7", '
416                '"higher_is_better": false, "value": 2.7, '
417                '"revision": 10000000120000000, '
418                '"units": "msec", "master": "new_master_name", '
419                '"error": 0.2, "test": "new_test_name/graph_name/metric1"}, '
420                '{"supplemental_columns": {"r_cros_version": "25.1200.0.0", '
421                '"a_default_rev" : "r_chrome_version",'
422                '"a_hardware_identifier" : "WINKY E2A-F2K-Q35",'
423                '"a_hardware_hostname" : "test_machine",'
424                '"r_chrome_version": "25.10.1000.0"}, "bot": "cros-platform-i7", '
425                '"higher_is_better": true, "value": 101.35, '
426                '"revision": 10000000120000000, '
427                '"units": "frames_per_sec", "master": "new_master_name", '
428                '"error": 5.78, "test": "new_test_name/metric2"}]')
429
430        self._verify_result_string(result['data'], expected_result_string)
431
432
433if __name__ == '__main__':
434    unittest.main()
435