• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2017 The Chromium OS Authors. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5import os
6import csv
7import json
8import time
9import urllib
10import urllib2
11import logging
12import httplib
13
14import enterprise_longevity_helper
15from autotest_lib.client.common_lib import error
16from autotest_lib.client.common_lib.cros import tpm_utils
17from autotest_lib.server import autotest
18from autotest_lib.server import test
19from autotest_lib.server.cros.multimedia import remote_facade_factory
20
21
22STABILIZATION_DURATION = 60
23MEASUREMENT_DURATION_SECONDS = 10
24TMP_DIRECTORY = '/tmp/'
25PERF_FILE_NAME_PREFIX = 'perf'
26VERSION_PATTERN = r'^(\d+)\.(\d+)\.(\d+)$'
27DASHBOARD_UPLOAD_URL = 'https://chromeperf.appspot.com/add_point'
28EXPECTED_PARAMS = ['perf_capture_iterations',  'perf_capture_duration',
29                   'sample_interval', 'metric_interval', 'test_type',
30                   'kiosk_app_attributes']
31
32
33class PerfUploadingError(Exception):
34    """Exception raised in perf_uploader."""
35    pass
36
37
38class enterprise_LongevityTrackerServer(test.test):
39    """
40    Run Longevity Test: Collect performance data over long duration.
41
42    Run enterprise_KioskEnrollment and clear the TPM as necessary. After
43    enterprise enrollment is successful, collect and log cpu, memory, and
44    temperature data from the device under test.
45
46    """
47    version = 1
48
49
50    def initialize(self):
51        self.temp_dir = os.path.split(self.tmpdir)[0]
52
53
54    #TODO(krishnargv@): Add a method to retrieve the version of the
55    #                   Kiosk app from its manifest.
56    def _initialize_test_variables(self):
57        """Initialize test variables that will be uploaded to the dashboard."""
58        self.board_name = self.system_facade.get_current_board()
59        self.chromeos_version = self.system_facade.get_chromeos_release_version()
60        epoch_minutes = str(int(time.time() / 60))
61        self.point_id = enterprise_longevity_helper.get_point_id(
62                self.chromeos_version, epoch_minutes, VERSION_PATTERN)
63        self.test_suite_name = self.tagged_testname
64        self.perf_capture_duration = self.perf_params['perf_capture_duration']
65        self.sample_interval = self.perf_params['sample_interval']
66        self.metric_interval = self.perf_params['metric_interval']
67        self.perf_results = {'cpu': '0', 'mem': '0', 'temp': '0'}
68
69
70    def elapsed_time(self, mark_time):
71        """
72        Get time elapsed since |mark_time|.
73
74        @param mark_time: point in time from which elapsed time is measured.
75
76        @returns time elapsed since the marked time.
77
78        """
79        return time.time() - mark_time
80
81
82    #TODO(krishnargv):  Replace _format_data_for_upload with a call to the
83    #                   _format_for_upload method of the perf_uploader.py
84    def _format_data_for_upload(self, chart_data):
85        """
86        Collect chart data into an uploadable data JSON object.
87
88        @param chart_data: performance results formatted as chart data.
89
90        """
91        perf_values = {
92            'format_version': '1.0',
93            'benchmark_name': self.test_suite_name,
94            'charts': chart_data,
95        }
96        #TODO(krishnargv): Add a method to capture the chrome_version.
97        dash_entry = {
98            'master': 'ChromeOS_Enterprise',
99            'bot': 'cros-%s' % self.board_name,
100            'point_id': self.point_id,
101            'versions': {
102                'cros_version': self.chromeos_version,
103
104            },
105            'supplemental': {
106                'default_rev': 'r_cros_version',
107                'kiosk_app_name': 'a_' + self.kiosk_app_name,
108
109            },
110            'chart_data': perf_values
111        }
112        return {'data': json.dumps(dash_entry)}
113
114
115    #TODO(krishnargv):  Replace _send_to_dashboard with a call to the
116    #                   _send_to_dashboard method of the perf_uploader.py
117    def _send_to_dashboard(self, data_obj):
118        """
119        Send formatted perf data to the perf dashboard.
120
121        @param data_obj: data object as returned by _format_data_for_upload().
122
123        @raises PerfUploadingError if an exception was raised when uploading.
124
125        """
126        logging.debug('Data_obj to be uploaded: %s', data_obj)
127        encoded = urllib.urlencode(data_obj)
128        req = urllib2.Request(DASHBOARD_UPLOAD_URL, encoded)
129        try:
130            urllib2.urlopen(req)
131        except urllib2.HTTPError as e:
132            raise PerfUploadingError('HTTPError: %d %s for JSON %s\n' %
133                                     (e.code, e.msg, data_obj['data']))
134        except urllib2.URLError as e:
135            raise PerfUploadingError('URLError: %s for JSON %s\n' %
136                                     (str(e.reason), data_obj['data']))
137        except httplib.HTTPException:
138            raise PerfUploadingError('HTTPException for JSON %s\n' %
139                                     data_obj['data'])
140
141
142    def _write_perf_keyvals(self, perf_results):
143        """
144        Write perf results to keyval file for AutoTest results.
145
146        @param perf_results: dict of attribute performance metrics.
147
148        """
149        perf_keyval = {}
150        perf_keyval['cpu_usage'] = perf_results['cpu']
151        perf_keyval['memory_usage'] = perf_results['mem']
152        perf_keyval['temperature'] = perf_results['temp']
153        self.write_perf_keyval(perf_keyval)
154
155
156    def _write_perf_results(self, perf_results):
157        """
158        Write perf results to results-chart.json file for Perf Dashboard.
159
160        @param perf_results: dict of attribute performance metrics.
161
162        """
163        cpu_metric = perf_results['cpu']
164        mem_metric = perf_results['mem']
165        ec_metric = perf_results['temp']
166        self.output_perf_value(description='cpu_usage', value=cpu_metric,
167                               units='percent', higher_is_better=False)
168        self.output_perf_value(description='mem_usage', value=mem_metric,
169                               units='percent', higher_is_better=False)
170        self.output_perf_value(description='max_temp', value=ec_metric,
171                               units='Celsius', higher_is_better=False)
172
173
174    def _record_perf_measurements(self, perf_values, perf_writer):
175        """
176        Record attribute performance measurements, and write to file.
177
178        @param perf_values: dict of attribute performance values.
179        @param perf_writer: file to write performance measurements.
180
181        """
182        # Get performance measurements.
183        cpu_usage = '%.3f' % enterprise_longevity_helper.get_cpu_usage(
184                self.system_facade, MEASUREMENT_DURATION_SECONDS)
185        mem_usage = '%.3f' % enterprise_longevity_helper.get_memory_usage(
186                    self.system_facade)
187        max_temp = '%.3f' % enterprise_longevity_helper.get_temperature_data(
188                self.client, self.system_facade)
189
190        # Append measurements to attribute lists in perf values dictionary.
191        perf_values['cpu'].append(float(cpu_usage))
192        perf_values['mem'].append(float(mem_usage))
193        perf_values['temp'].append(float(max_temp))
194
195        # Write performance measurements to perf timestamped file.
196        time_stamp = time.strftime('%Y/%m/%d %H:%M:%S')
197        perf_writer.writerow([time_stamp, cpu_usage, mem_usage, max_temp])
198        logging.info('Time: %s, CPU: %r, Mem: %r, Temp: %r',
199                     time_stamp, cpu_usage, mem_usage, max_temp)
200
201
202    def _setup_kiosk_app_on_dut(self, kiosk_app_attributes=None):
203        """Enroll the DUT and setup a Kiosk app."""
204        info = self.client.host_info_store.get()
205        app_config_id = info.get_label_value('app_config_id')
206        if app_config_id and app_config_id.startswith(':'):
207            app_config_id = app_config_id[1:]
208        if kiosk_app_attributes:
209            kiosk_app_attributes = kiosk_app_attributes.rstrip()
210            self.kiosk_app_name, ext_id = kiosk_app_attributes.split(':')[:2]
211
212        tpm_utils.ClearTPMOwnerRequest(self.client)
213        logging.info("Enrolling the DUT to Kiosk mode")
214        autotest.Autotest(self.client).run_test(
215                'enterprise_KioskEnrollment',
216                kiosk_app_attributes=kiosk_app_attributes,
217                check_client_result=True)
218
219        #if self.kiosk_app_name == 'riseplayer':
220        #    self.kiosk_facade.config_rise_player(ext_id, app_config_id)
221
222
223    def _run_perf_capture_cycle(self):
224        """
225        Track performance of Chrome OS over a long period of time.
226
227        This method collects performance measurements, and calculates metrics
228        to upload to the performance dashboard. It creates two files to
229        collect and store performance values and results: perf_<timestamp>.csv
230        and perf_aggregated.csv.
231
232        At the start, it creates a unique perf timestamped file in the test's
233        temp_dir. As the cycle runs, it saves a time-stamped performance
234        value after each sample interval. Periodically, it calculates
235        the 90th percentile performance metrics from these values.
236
237        The perf_<timestamp> files on the device will survive multiple runs
238        of the longevity_Tracker by the server-side test, and will also
239        survive multiple runs of the server-side test.
240
241        At the end, it opens the perf aggregated file in the test's temp_dir,
242        and appends the contents of the perf timestamped file. It then
243        copies the perf aggregated file to the results directory as perf.csv.
244        This perf.csv file will be consumed by the AutoTest backend when the
245        server-side test ends.
246
247        Note that the perf_aggregated.csv file will grow larger with each run
248        of longevity_Tracker on the device by the server-side test.
249
250        This method will capture perf metrics every SAMPLE_INTERVAL secs, at
251        each METRIC_INTERVAL the 90 percentile of the collected metrics is
252        calculated and saved. The perf capture runs for PERF_CAPTURE_DURATION
253        secs. At the end of the PERF_CAPTURE_DURATION time interval the median
254        value of all 90th percentile metrics is returned.
255
256        @returns list of median performance metrics.
257
258        """
259        test_start_time = time.time()
260
261        perf_values = {'cpu': [], 'mem': [], 'temp': []}
262        perf_metrics = {'cpu': [], 'mem': [], 'temp': []}
263
264         # Create perf_<timestamp> file and writer.
265        timestamp_fname = (PERF_FILE_NAME_PREFIX +
266                           time.strftime('_%Y-%m-%d_%H-%M') + '.csv')
267        timestamp_fpath = os.path.join(self.temp_dir, timestamp_fname)
268        timestamp_file = enterprise_longevity_helper.open_perf_file(
269                timestamp_fpath)
270        timestamp_writer = csv.writer(timestamp_file)
271
272        # Align time of loop start with the sample interval.
273        test_elapsed_time = self.elapsed_time(test_start_time)
274        time.sleep(enterprise_longevity_helper.syncup_time(
275                test_elapsed_time, self.sample_interval))
276        test_elapsed_time = self.elapsed_time(test_start_time)
277
278        metric_start_time = time.time()
279        metric_prev_time = metric_start_time
280
281        metric_elapsed_prev_time = self.elapsed_time(metric_prev_time)
282        offset = enterprise_longevity_helper.modulo_time(
283                metric_elapsed_prev_time, self.metric_interval)
284        metric_timer = metric_elapsed_prev_time + offset
285
286        while self.elapsed_time(test_start_time) <= self.perf_capture_duration:
287            self._record_perf_measurements(perf_values, timestamp_writer)
288
289            # Periodically calculate and record 90th percentile metrics.
290            metric_elapsed_prev_time = self.elapsed_time(metric_prev_time)
291            metric_timer = metric_elapsed_prev_time + offset
292            if metric_timer >= self.metric_interval:
293                enterprise_longevity_helper.record_90th_metrics(
294                        perf_values, perf_metrics)
295                perf_values = {'cpu': [], 'mem': [], 'temp': []}
296
297            # Set previous time to current time.
298                metric_prev_time = time.time()
299                metric_elapsed_prev_time = self.elapsed_time(metric_prev_time)
300
301                metric_elapsed_time = self.elapsed_time(metric_start_time)
302                offset = enterprise_longevity_helper.modulo_time(
303                    metric_elapsed_time, self.metric_interval)
304
305                # Set the timer to time elapsed plus offset to next interval.
306                metric_timer = metric_elapsed_prev_time + offset
307
308            # Sync the loop time to the sample interval.
309            test_elapsed_time = self.elapsed_time(test_start_time)
310            time.sleep(enterprise_longevity_helper.syncup_time(
311                    test_elapsed_time, self.sample_interval))
312
313        # Close perf timestamp file.
314        timestamp_file.close()
315
316         # Open perf timestamp file to read, and aggregated file to append.
317        timestamp_file = open(timestamp_fpath, 'r')
318        aggregated_fname = (PERF_FILE_NAME_PREFIX + '_aggregated.csv')
319        aggregated_fpath = os.path.join(self.temp_dir, aggregated_fname)
320        aggregated_file = enterprise_longevity_helper.open_perf_file(
321                aggregated_fpath)
322
323         # Append contents of perf timestamp file to perf aggregated file.
324        enterprise_longevity_helper.append_to_aggregated_file(
325                timestamp_file, aggregated_file)
326        timestamp_file.close()
327        aggregated_file.close()
328
329        # Copy perf aggregated file to test results directory.
330        enterprise_longevity_helper.copy_aggregated_to_resultsdir(
331                self.resultsdir, aggregated_fpath, 'perf.csv')
332
333        # Return median of each attribute performance metric.
334        logging.info("Perf_metrics: %r ", perf_metrics)
335        return enterprise_longevity_helper.get_median_metrics(perf_metrics)
336
337
338    def run_once(self, host=None, perf_params=None):
339        self.client = host
340        self.kiosk_app_name = None
341        self.perf_params = perf_params
342        logging.info('Perf params: %r', self.perf_params)
343
344        if not enterprise_longevity_helper.verify_perf_params(
345                EXPECTED_PARAMS, self.perf_params):
346            raise error.TestFail('Missing or incorrect perf_params in the'
347                                 ' control file. Refer to the README.txt for'
348                                 ' info on perf params.: %r'
349                                  %(self.perf_params))
350
351        factory = remote_facade_factory.RemoteFacadeFactory(
352                host, no_chrome=True)
353        self.system_facade = factory.create_system_facade()
354        self.kiosk_facade = factory.create_kiosk_facade()
355
356        self._setup_kiosk_app_on_dut(self.perf_params['kiosk_app_attributes'])
357        time.sleep(STABILIZATION_DURATION)
358
359        self._initialize_test_variables()
360        for iteration in range(self.perf_params['perf_capture_iterations']):
361            #TODO(krishnargv@): Add a method to verify that the Kiosk app is
362            #                   active and is running on the DUT.
363            logging.info("Running perf_capture Iteration: %d", iteration+1)
364            self.perf_results = self._run_perf_capture_cycle()
365            self._write_perf_keyvals(self.perf_results)
366            self._write_perf_results(self.perf_results)
367
368            # Post perf results directly to performance dashboard. You may view
369            # uploaded data at https://chromeperf.appspot.com/new_points,
370            # with test path pattern=ChromeOS_Enterprise/cros-*/longevity*/*
371            if perf_params['test_type'] == 'multiple_samples':
372                chart_data = enterprise_longevity_helper.read_perf_results(
373                        self.resultsdir, 'results-chart.json')
374                data_obj = self._format_data_for_upload(chart_data)
375                self._send_to_dashboard(data_obj)
376        tpm_utils.ClearTPMOwnerRequest(self.client)
377