• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# SPDX-License-Identifier: Apache-2.0
2#
3# Copyright (C) 2015, ARM Limited and contributors.
4#
5# Licensed under the Apache License, Version 2.0 (the "License"); you may
6# not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# http://www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16#
17
18import fileinput
19import json
20import logging
21import os
22import re
23
24from wlgen import Workload
25
26class PerfMessaging(Workload):
27
28    def __init__(self,
29                 target,
30                 name):
31
32        # Setup logging
33        self.logger = logging.getLogger('perf_bench')
34
35        # TODO: Assume perf is pre-installed on target
36        #target.setup('perf')
37
38        super(PerfMessaging, self).__init__(target, name)
39
40        # perf "sched" executor
41        self.wtype = 'perf_bench_messaging'
42        self.executor = 'perf bench sched messaging'
43
44        # Setup post-processing callback
45        self.setCallback('postrun', self.__postrun)
46
47    def conf(self,
48             group = 1,
49             loop = 500,
50             pipe = '',
51             thread = '',
52             cpus=None,
53             cgroup=None,
54             exc_id=0,
55             run_dir=None):
56
57        if pipe is not '':
58            pipe = '--pipe'
59        if thread is not '':
60            thread = '--thread'
61
62        super(PerfMessaging, self).conf(
63            'custom',
64            params={'group': str(group),
65                    'loop': str(loop),
66                    'pipe': pipe,
67                    'thread': thread},
68            duration=0, cpus=cpus, exc_id=exc_id, run_dir=run_dir)
69
70        self.command = '{0:s}/perf bench sched messaging {1:s} {2:s} --group {3:s} --loop {4:s}'\
71                .format(self.target.executables_directory,
72                        self.params['custom']['pipe'],
73                        self.params['custom']['thread'],
74                        self.params['custom']['group'],
75                        self.params['custom']['loop'])
76
77        self.logger.debug('%14s - Command line: %s', 'PerfBench', self.command)
78
79        # Set and return the test label
80        self.test_label = '{0:s}_{1:02d}'.format(self.name, self.exc_id)
81        return self.test_label
82
83    def getCompletionTime(self):
84        results = self.getOutput()
85        match = re.search('Total time: ([0-9\.]+) \[sec\]', results)
86        return match.group(1)
87
88    def __postrun(self, params):
89        destdir = params['destdir']
90        if destdir is None:
91            return
92
93        logfile = '{}/output.log'.format(destdir)
94        self.logger.debug('%14s - Saving output on [%s]...',
95                          'PerfBench', logfile)
96        with open(logfile, 'w') as ofile:
97            for line in self.getOutput().split('\n'):
98                ofile.write(line+'\n')
99
100        # Computing performance metric
101        ctime = float(self.getCompletionTime())
102        perf = 1.0 / ctime
103        results = {
104                "ctime" : ctime,
105                "performance" : perf
106        }
107
108        self.logger.info('%14s - Completion time: %.6f, Performance %.6f',
109                         'PerfBench', ctime, perf)
110
111        perfile = '{}/performance.json'.format(destdir)
112        self.logger.debug('%14s - Saving performance into [%s]...',
113                          'PerfBench', perfile)
114        with open(perfile, 'w') as ofile:
115            json.dump(results, ofile, sort_keys=True, indent=4)
116
117
118class PerfPipe(Workload):
119
120    def __init__(self,
121                 target,
122                 name):
123
124        # TODO: Assume perf is pre-installed on target
125        #target.setup('perf')
126
127        super(PerfPipe, self).__init__(target, name)
128
129        # Setup logging
130        self.logger = logging.getLogger('perf_bench')
131
132        # perf "sched" executor
133        self.wtype = 'perf_bench_pipe'
134        self.executor = 'perf bench sched pipe'
135
136        # Setup post-processing callback
137        self.setCallback('postrun', self.__postrun)
138
139    def conf(self,
140             loop = 10,
141             cpus=None,
142             cgroup=None,
143             exc_id=0):
144
145        super(PerfPipe, self).conf('custom',
146                {'loop': str(loop)},
147                0, cpus, cgroup, exc_id)
148
149        self.command = '{0:s}/perf bench sched pipe --loop {1:s}'\
150                .format(self.target.executables_directory,
151                        self.params['custom']['loop'])
152
153        self.logger.debug('%14s - Command line: %s',
154                          'PerfBench', self.command)
155
156        # Set and return the test label
157        self.test_label = '{0:s}_{1:02d}'.format(self.name, self.exc_id)
158        return self.test_label
159
160    def getCompletionTime(self):
161        results = self.getOutput()
162        match = re.search('Total time: ([0-9\.]+) \[sec\]', results)
163        return match.group(1)
164
165    def getUsecPerOp(self):
166        results = self.getOutput()
167        match = re.search('([0-9\.]+) usecs/op', results)
168        return match.group(1)
169
170    def getOpPerSec(self):
171        results = self.getOutput()
172        match = re.search('([0-9]+) ops/sec', results)
173        return match.group(1)
174
175    def __postrun(self, params):
176        destdir = params['destdir']
177        if destdir is None:
178            return
179
180        logfile = '{}/output.log'.format(destdir)
181        self.logger.debug('%14s - Saving output on [%s]...',
182                          'PerfBench', logfile)
183        with open(logfile, 'w') as ofile:
184            for line in self.getOutput().split('\n'):
185                ofile.write(line+'\n')
186
187        # Computing performance metric
188        ctime = float(self.getCompletionTime())
189        uspo = float(self.getUsecPerOp())
190        ops = float(self.getOpPerSec())
191
192        perf = 1.0 / ctime
193        results = {
194                "ctime" : ctime,
195                "performance" : perf,
196                "usec/op" : uspo,
197                "ops/sec" : ops
198        }
199
200        self.logger.info('%14s - Completion time: %.6f, Performance %.6f',
201                         'PerfBench', ctime, perf)
202
203        # Reporting performance metric
204        perfile = '{}/performance.json'.format(destdir)
205        self.logger.debug('%14s - Saving performance into [%s]...',
206                          'PerfBench', perfile)
207        with open(perfile, 'w') as ofile:
208            json.dump(results, ofile, sort_keys=True, indent=4)
209
210
211