• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# SPDX-License-Identifier: Apache-2.0
2#
3# Copyright (C) 2015, ARM Limited and contributors.
4#
5# Licensed under the Apache License, Version 2.0 (the "License"); you may
6# not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# http://www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16#
17
18""" Tasks Analysis Module """
19
20import matplotlib.gridspec as gridspec
21import matplotlib.pyplot as plt
22import numpy as np
23import pylab as pl
24import re
25
26from analysis_module import AnalysisModule
27from devlib.utils.misc import memoized
28from trappy.utils import listify
29
30
31class TasksAnalysis(AnalysisModule):
32    """
33    Support for Tasks signals analysis.
34
35    :param trace: input Trace object
36    :type trace: :mod:`libs.utils.Trace`
37    """
38
39    def __init__(self, trace):
40        super(TasksAnalysis, self).__init__(trace)
41
42
43###############################################################################
44# DataFrame Getter Methods
45###############################################################################
46
47    def _dfg_top_big_tasks(self, min_samples=100, min_utilization=None):
48        """
49        Tasks which had 'utilization' samples bigger than the specified
50        threshold
51
52        :param min_samples: minumum number of samples over the min_utilization
53        :type min_samples: int
54
55        :param min_utilization: minimum utilization used to filter samples
56            default: capacity of a little cluster
57        :type min_utilization: int
58        """
59        if not self._trace.hasEvents('sched_load_avg_task'):
60            self._log.warning('Events [sched_load_avg_task] not found')
61            return None
62
63        if min_utilization is None:
64            min_utilization = self._little_cap
65
66        # Get utilization samples >= min_utilization
67        df = self._dfg_trace_event('sched_load_avg_task')
68        big_tasks_events = df[df.util_avg > min_utilization]
69        if not len(big_tasks_events):
70            self._log.warning('No tasks with with utilization samples > %d',
71                              min_utilization)
72            return None
73
74        # Report the number of tasks which match the min_utilization condition
75        big_tasks = big_tasks_events.pid.unique()
76        self._log.info('%5d tasks with samples of utilization > %d',
77                       len(big_tasks), min_utilization)
78
79        # Compute number of samples above threshold
80        big_tasks_stats = big_tasks_events.groupby('pid')\
81                            .describe(include=['object'])
82        big_tasks_stats = big_tasks_stats.unstack()['comm']\
83                            .sort_values(by=['count'], ascending=False)
84
85        # Filter for number of occurrences
86        big_tasks_stats = big_tasks_stats[big_tasks_stats['count'] > min_samples]
87        if not len(big_tasks_stats):
88            self._log.warning('      but none with more than %d samples',
89                              min_samples)
90            return None
91
92        self._log.info('      %d with more than %d samples',
93                       len(big_tasks_stats), min_samples)
94
95        # Add task name column
96        big_tasks_stats['comm'] = big_tasks_stats.index.map(
97            lambda pid: self._trace.getTaskByPid(pid))
98
99        # Filter columns of interest
100        big_tasks_stats = big_tasks_stats[['count', 'comm']]
101        big_tasks_stats.rename(columns={'count': 'samples'}, inplace=True)
102
103        return big_tasks_stats
104
105    def _dfg_top_wakeup_tasks(self, min_wakeups=100):
106        """
107        Tasks which wakeup more frequently than a specified threshold.
108
109        :param min_wakeups: minimum number of wakeups
110        :type min_wakeups: int
111        """
112        if not self._trace.hasEvents('sched_wakeup'):
113            self._log.warning('Events [sched_wakeup] not found')
114            return None
115
116        df = self._dfg_trace_event('sched_wakeup')
117
118        # Compute number of wakeups above threshold
119        wkp_tasks_stats = df.groupby('pid').describe(include=['object'])
120        wkp_tasks_stats = wkp_tasks_stats.unstack()['comm']\
121                          .sort_values(by=['count'], ascending=False)
122
123        # Filter for number of occurrences
124        wkp_tasks_stats = wkp_tasks_stats[
125            wkp_tasks_stats['count'] > min_wakeups]
126        if not len(df):
127            self._log.warning('No tasks with more than %d wakeups',
128                              len(wkp_tasks_stats))
129            return None
130        self._log.info('%5d tasks with more than %d wakeups',
131                       len(df), len(wkp_tasks_stats))
132
133        # Add task name column
134        wkp_tasks_stats['comm'] = wkp_tasks_stats.index.map(
135            lambda pid: self._trace.getTaskByPid(pid))
136
137        # Filter columns of interest
138        wkp_tasks_stats = wkp_tasks_stats[['count', 'comm']]
139        wkp_tasks_stats.rename(columns={'count': 'samples'}, inplace=True)
140
141        return wkp_tasks_stats
142
143    def _dfg_rt_tasks(self, min_prio=100):
144        """
145        Tasks with RT priority
146
147        NOTE: priorities uses scheduler values, thus: the lower the value the
148              higher is the task priority.
149              RT   Priorities: [  0..100]
150              FAIR Priorities: [101..120]
151
152        :param min_prio: minumum priority
153        :type min_prio: int
154        """
155        if not self._trace.hasEvents('sched_switch'):
156            self._log.warning('Events [sched_switch] not found')
157            return None
158
159        df = self._dfg_trace_event('sched_switch')
160
161        # Filters tasks which have a priority bigger than threshold
162        df = df[df.next_prio <= min_prio]
163
164        # Filter columns of interest
165        rt_tasks = df[['next_pid', 'next_prio']]
166
167        # Remove all duplicateds
168        rt_tasks = rt_tasks.drop_duplicates()
169
170        # Order by priority
171        rt_tasks.sort_values(by=['next_prio', 'next_pid'], ascending=True,
172                             inplace=True)
173        rt_tasks.rename(columns={'next_pid': 'pid', 'next_prio': 'prio'},
174                        inplace=True)
175
176        # Set PID as index
177        rt_tasks.set_index('pid', inplace=True)
178
179        # Add task name column
180        rt_tasks['comm'] = rt_tasks.index.map(
181            lambda pid: self._trace.getTaskByPid(pid))
182
183        return rt_tasks
184
185
186###############################################################################
187# Plotting Methods
188###############################################################################
189
190    def plotTasks(self, tasks, signals=None):
191        """
192        Generate a common set of useful plots for each of the specified tasks
193
194        This method allows to filter which signals should be plot, if data are
195        available in the input trace. The list of signals supported are:
196        Tasks signals plot:
197                load_avg, util_avg, boosted_util, sched_overutilized
198        Tasks residencies on CPUs:
199                residencies, sched_overutilized
200        Tasks PELT signals:
201                load_sum, util_sum, period_contrib, sched_overutilized
202
203        At least one of the previous signals must be specified to get a valid
204        plot.
205
206        Addidional custom signals can be specified and they will be represented
207        in the "Task signals plots" if they represent valid keys of the task
208        load/utilization trace event (e.g. sched_load_avg_task).
209
210        Note:
211            sched_overutilized: enable the plotting of overutilization bands on
212                                top of each subplot
213            residencies: enable the generation of the CPUs residencies plot
214
215        :param tasks: the list of task names and/or PIDs to plot.
216                      Numerical PIDs and string task names can be mixed
217                      in the same list.
218        :type tasks: list(str) or list(int)
219
220        :param signals: list of signals (and thus plots) to generate
221                        default: all the plots and signals available in the
222                        current trace
223        :type signals: list(str)
224        """
225        if not signals:
226            signals = ['load_avg', 'util_avg', 'boosted_util',
227                       'sched_overutilized',
228                       'load_sum', 'util_sum', 'period_contrib',
229                       'residencies']
230
231        # Check for the minimum required signals to be available
232        if not self._trace.hasEvents('sched_load_avg_task'):
233            self._log.warning('Events [sched_load_avg_task] not found, '
234                              'plot DISABLED!')
235            return
236
237        # Defined list of tasks to plot
238        if tasks and \
239            not isinstance(tasks, str) and \
240            not isinstance(tasks, list):
241            raise ValueError('Wrong format for tasks parameter')
242
243        if tasks:
244            tasks_to_plot = listify(tasks)
245        else:
246            raise ValueError('No tasks to plot specified')
247
248        # Compute number of plots to produce
249        plots_count = 0
250        plots_signals = [
251                # Fist plot: task's utilization
252                {'load_avg', 'util_avg', 'boosted_util'},
253                # Second plot: task residency
254                {'residencies'},
255                # Third plot: tasks's load
256                {'load_sum', 'util_sum', 'period_contrib'}
257        ]
258        hr = []
259        ysize = 0
260        for plot_id, signals_to_plot in enumerate(plots_signals):
261            signals_to_plot = signals_to_plot.intersection(signals)
262            if len(signals_to_plot):
263                plots_count = plots_count + 1
264                # Use bigger size only for the first plot
265                hr.append(3 if plot_id == 0 else 1)
266                ysize = ysize + (8 if plot_id else 4)
267
268        # Grid
269        gs = gridspec.GridSpec(plots_count, 1, height_ratios=hr)
270        gs.update(wspace=0.1, hspace=0.1)
271
272        # Build list of all PIDs for each task_name to plot
273        pids_to_plot = []
274        for task in tasks_to_plot:
275            # Add specified PIDs to the list
276            if isinstance(task, int):
277                pids_to_plot.append(task)
278                continue
279            # Otherwise: add all the PIDs for task with the specified name
280            pids_to_plot.extend(self._trace.getTaskByName(task))
281
282        for tid in pids_to_plot:
283            savefig = False
284
285            task_name = self._trace.getTaskByPid(tid)
286            self._log.info('Plotting [%d:%s]...', tid, task_name)
287            plot_id = 0
288
289            # For each task create a figure with plots_count plots
290            plt.figure(figsize=(16, ysize))
291            plt.suptitle('Task Signals',
292                         y=.94, fontsize=16, horizontalalignment='center')
293
294            # Plot load and utilization
295            signals_to_plot = {'load_avg', 'util_avg', 'boosted_util'}
296            signals_to_plot = list(signals_to_plot.intersection(signals))
297            if len(signals_to_plot) > 0:
298                axes = plt.subplot(gs[plot_id, 0])
299                axes.set_title('Task [{0:d}:{1:s}] Signals'
300                               .format(tid, task_name))
301                plot_id = plot_id + 1
302                is_last = (plot_id == plots_count)
303                self._plotTaskSignals(axes, tid, signals, is_last)
304                savefig = True
305
306            # Plot CPUs residency
307            signals_to_plot = {'residencies'}
308            signals_to_plot = list(signals_to_plot.intersection(signals))
309            if len(signals_to_plot) > 0:
310                axes = plt.subplot(gs[plot_id, 0])
311                axes.set_title(
312                    'Task [{0:d}:{1:s}] Residency (green: LITTLE, red: big)'
313                    .format(tid, task_name)
314                )
315                plot_id = plot_id + 1
316                is_last = (plot_id == plots_count)
317                if 'sched_overutilized' in signals:
318                    signals_to_plot.append('sched_overutilized')
319                self._plotTaskResidencies(axes, tid, signals_to_plot, is_last)
320                savefig = True
321
322            # Plot PELT signals
323            signals_to_plot = {'load_sum', 'util_sum', 'period_contrib'}
324            signals_to_plot = list(signals_to_plot.intersection(signals))
325            if len(signals_to_plot) > 0:
326                axes = plt.subplot(gs[plot_id, 0])
327                axes.set_title('Task [{0:d}:{1:s}] PELT Signals'
328                               .format(tid, task_name))
329                plot_id = plot_id + 1
330                if 'sched_overutilized' in signals:
331                    signals_to_plot.append('sched_overutilized')
332                self._plotTaskPelt(axes, tid, signals_to_plot)
333                savefig = True
334
335            if not savefig:
336                self._log.warning('Nothing to plot for %s', task_name)
337                continue
338
339            # Save generated plots into datadir
340            if isinstance(task_name, list):
341                task_name = re.sub('[:/]', '_', task_name[0])
342            else:
343                task_name = re.sub('[:/]', '_', task_name)
344            figname = '{}/{}task_util_{}_{}.png'\
345                      .format(self._trace.plots_dir, self._trace.plots_prefix,
346                              tid, task_name)
347            pl.savefig(figname, bbox_inches='tight')
348
349    def plotBigTasks(self, max_tasks=10, min_samples=100,
350                     min_utilization=None):
351        """
352        For each big task plot utilization and show the smallest cluster
353        capacity suitable for accommodating task utilization.
354
355        :param max_tasks: maximum number of tasks to consider
356        :type max_tasks: int
357
358        :param min_samples: minumum number of samples over the min_utilization
359        :type min_samples: int
360
361        :param min_utilization: minimum utilization used to filter samples
362            default: capacity of a little cluster
363        :type min_utilization: int
364        """
365
366        # Get PID of big tasks
367        big_frequent_task_df = self._dfg_top_big_tasks(
368            min_samples, min_utilization)
369        if max_tasks > 0:
370            big_frequent_task_df = big_frequent_task_df.head(max_tasks)
371        big_frequent_task_pids = big_frequent_task_df.index.values
372
373        big_frequent_tasks_count = len(big_frequent_task_pids)
374        if big_frequent_tasks_count == 0:
375            self._log.warning('No big/frequent tasks to plot')
376            return
377
378        # Get the list of events for all big frequent tasks
379        df = self._dfg_trace_event('sched_load_avg_task')
380        big_frequent_tasks_events = df[df.pid.isin(big_frequent_task_pids)]
381
382        # Define axes for side-by-side plottings
383        fig, axes = plt.subplots(big_frequent_tasks_count, 1,
384                                 figsize=(16, big_frequent_tasks_count*4))
385        plt.subplots_adjust(wspace=0.1, hspace=0.2)
386
387        plot_idx = 0
388        for pid, group in big_frequent_tasks_events.groupby('pid'):
389
390            # # Build task names (there could be multiple, during the task lifetime)
391            task_name = 'Task [%d:%s]'.format(pid, self._trace.getTaskByPid(pid))
392
393            # Plot title
394            if big_frequent_tasks_count == 1:
395                ax = axes
396            else:
397                ax = axes[plot_idx]
398            ax.set_title(task_name)
399
400            # Left axis: utilization
401            ax = group.plot(y=['util_avg', 'min_cluster_cap'],
402                            style=['r.', '-b'],
403                            drawstyle='steps-post',
404                            linewidth=1,
405                            ax=ax)
406            ax.set_xlim(self._trace.x_min, self._trace.x_max)
407            ax.set_ylim(0, 1100)
408            ax.set_ylabel('util_avg')
409            ax.set_xlabel('')
410            ax.grid(True)
411            self._trace.analysis.status.plotOverutilized(ax)
412
413            plot_idx += 1
414
415        ax.set_xlabel('Time [s]')
416
417        self._log.info('Tasks which have been a "utilization" of %d for at least %d samples',
418                       self._little_cap, min_samples)
419
420    def plotWakeupTasks(self, max_tasks=10, min_wakeups=0, per_cluster=False):
421        """
422        Show waking up tasks over time and newly forked tasks in two separate
423        plots.
424
425        :param max_tasks: maximum number of tasks to consider
426        :param max_tasks: int
427
428        :param min_wakeups: minimum number of wakeups of each task
429        :type min_wakeups: int
430
431        :param per_cluster: if True get per-cluster wakeup events
432        :type per_cluster: bool
433        """
434        if per_cluster is True and \
435           not self._trace.hasEvents('sched_wakeup_new'):
436            self._log.warning('Events [sched_wakeup_new] not found, '
437                              'plots DISABLED!')
438            return
439        elif  not self._trace.hasEvents('sched_wakeup') and \
440              not self._trace.hasEvents('sched_wakeup_new'):
441            self._log.warning('Events [sched_wakeup, sched_wakeup_new] not found, '
442                              'plots DISABLED!')
443            return
444
445        # Define axes for side-by-side plottings
446        fig, axes = plt.subplots(2, 1, figsize=(14, 5))
447        plt.subplots_adjust(wspace=0.2, hspace=0.3)
448
449        if per_cluster:
450
451            # Get per cluster wakeup events
452            df = self._dfg_trace_event('sched_wakeup_new')
453            big_frequent = df.target_cpu.isin(self._big_cpus)
454            ntbc = df[big_frequent]
455            ntbc_count = len(ntbc)
456            little_frequent = df.target_cpu.isin(self._little_cpus)
457            ntlc = df[little_frequent];
458            ntlc_count = len(ntlc)
459
460            self._log.info('%5d tasks forked on big cluster    (%3.1f %%)',
461                           ntbc_count,
462                           100. * ntbc_count / (ntbc_count + ntlc_count))
463            self._log.info('%5d tasks forked on LITTLE cluster (%3.1f %%)',
464                           ntlc_count,
465                           100. * ntlc_count / (ntbc_count + ntlc_count))
466
467            ax = axes[0]
468            ax.set_title('Tasks Forks on big CPUs');
469            ntbc.pid.plot(style=['g.'], ax=ax);
470            ax.set_xlim(self._trace.x_min, self._trace.x_max);
471            ax.set_xticklabels([])
472            ax.set_xlabel('')
473            ax.grid(True)
474            self._trace.analysis.status.plotOverutilized(ax)
475
476            ax = axes[1]
477            ax.set_title('Tasks Forks on LITTLE CPUs');
478            ntlc.pid.plot(style=['g.'], ax=ax);
479            ax.set_xlim(self._trace.x_min, self._trace.x_max);
480            ax.grid(True)
481            self._trace.analysis.status.plotOverutilized(ax)
482
483            return
484
485        # Keep events of defined big tasks
486        wkp_task_pids = self._dfg_top_wakeup_tasks(min_wakeups)
487        if len(wkp_task_pids):
488            wkp_task_pids = wkp_task_pids.index.values[:max_tasks]
489            self._log.info('Plotting %d frequent wakeup tasks',
490                           len(wkp_task_pids))
491
492        ax = axes[0]
493        ax.set_title('Tasks WakeUps Events')
494        df = self._dfg_trace_event('sched_wakeup')
495        if len(df):
496            df = df[df.pid.isin(wkp_task_pids)]
497            df.pid.astype(int).plot(style=['b.'], ax=ax)
498            ax.set_xlim(self._trace.x_min, self._trace.x_max)
499            ax.set_xticklabels([])
500            ax.set_xlabel('')
501            ax.grid(True)
502            self._trace.analysis.status.plotOverutilized(ax)
503
504        ax = axes[1]
505        ax.set_title('Tasks Forks Events')
506        df = self._dfg_trace_event('sched_wakeup_new')
507        if len(df):
508            df = df[df.pid.isin(wkp_task_pids)]
509            df.pid.astype(int).plot(style=['r.'], ax=ax)
510            ax.set_xlim(self._trace.x_min, self._trace.x_max)
511            ax.grid(True)
512            self._trace.analysis.status.plotOverutilized(ax)
513
514    def plotBigTasksVsCapacity(self, min_samples=1,
515                               min_utilization=None, big_cluster=True):
516        """
517        Draw a plot that shows whether tasks are placed on the correct cluster
518        based on their utilization and cluster capacity. Green dots mean the
519        task was placed on the correct cluster, Red means placement was wrong
520
521        :param min_samples: minumum number of samples over the min_utilization
522        :type min_samples: int
523
524        :param min_utilization: minimum utilization used to filter samples
525            default: capacity of a little cluster
526        :type min_utilization: int
527
528        :param big_cluster:
529        :type big_cluster: bool
530        """
531
532        if not self._trace.hasEvents('sched_load_avg_task'):
533            self._log.warning('Events [sched_load_avg_task] not found')
534            return
535        if not self._trace.hasEvents('cpu_frequency'):
536            self._log.warning('Events [cpu_frequency] not found')
537            return
538
539        if big_cluster:
540            cluster_correct = 'big'
541            cpus = self._big_cpus
542        else:
543            cluster_correct = 'LITTLE'
544            cpus = self._little_cpus
545
546        # Get all utilization update events
547        df = self._dfg_trace_event('sched_load_avg_task')
548
549        # Keep events of defined big tasks
550        big_task_pids = self._dfg_top_big_tasks(
551            min_samples, min_utilization)
552        if big_task_pids is not None:
553            big_task_pids = big_task_pids.index.values
554            df = df[df.pid.isin(big_task_pids)]
555        if not df.size:
556            self._log.warning('No events for tasks with more then %d utilization '
557                              'samples bigger than %d, plots DISABLED!')
558            return
559
560        fig, axes = plt.subplots(2, 1, figsize=(14, 5))
561        plt.subplots_adjust(wspace=0.2, hspace=0.3)
562
563        # Add column of expected cluster depending on:
564        # a) task utilization value
565        # b) capacity of the selected cluster
566        bu_bc = ((df['util_avg'] > self._little_cap) &
567                 (df['cpu'].isin(self._big_cpus)))
568        su_lc = ((df['util_avg'] <= self._little_cap) &
569                 (df['cpu'].isin(self._little_cpus)))
570
571        # The Cluster CAPacity Matches the UTILization (ccap_mutil) iff:
572        # - tasks with util_avg  > little_cap are running on a BIG cpu
573        # - tasks with util_avg <= little_cap are running on a LITTLe cpu
574        df.loc[:,'ccap_mutil'] = np.select([(bu_bc | su_lc)], [True], False)
575
576        df_freq = self._dfg_trace_event('cpu_frequency')
577        df_freq = df_freq[df_freq.cpu == cpus[0]]
578
579        ax = axes[0]
580        ax.set_title('Tasks Utilization vs Allocation')
581        for ucolor, umatch in zip('gr', [True, False]):
582            cdata = df[df['ccap_mutil'] == umatch]
583            if len(cdata) > 0:
584                cdata['util_avg'].plot(ax=ax,
585                        style=[ucolor+'.'], legend=False)
586        ax.set_xlim(self._trace.x_min, self._trace.x_max)
587        ax.set_xticklabels([])
588        ax.set_xlabel('')
589        ax.grid(True)
590        self._trace.analysis.status.plotOverutilized(ax)
591
592        ax = axes[1]
593        ax.set_title('Frequencies on "{}" cluster'.format(cluster_correct))
594        df_freq['frequency'].plot(style=['-b'], ax=ax, drawstyle='steps-post')
595        ax.set_xlim(self._trace.x_min, self._trace.x_max);
596        ax.grid(True)
597        self._trace.analysis.status.plotOverutilized(ax)
598
599        legend_y = axes[0].get_ylim()[1]
600        axes[0].annotate('Utilization-Capacity Matches',
601                         xy=(0, legend_y),
602                         xytext=(-50, 45), textcoords='offset points',
603                         fontsize=18)
604        axes[0].annotate('Task schduled (green) or not (red) on min cluster',
605                         xy=(0, legend_y),
606                         xytext=(-50, 25), textcoords='offset points',
607                         fontsize=14)
608
609
610###############################################################################
611# Utility Methods
612###############################################################################
613
614    def _plotTaskSignals(self, axes, tid, signals, is_last=False):
615        """
616        For task with ID `tid` plot the specified signals.
617
618        :param axes: axes over which to generate the plot
619        :type axes: :mod:`matplotlib.axes.Axes`
620
621        :param tid: task ID
622        :type tid: int
623
624        :param signals: signals to be plot
625        :param signals: list(str)
626
627        :param is_last: if True this is the last plot
628        :type is_last: bool
629        """
630        # Get dataframe for the required task
631        util_df = self._dfg_trace_event('sched_load_avg_task')
632
633        # Plot load and util
634        signals_to_plot = set(signals).difference({'boosted_util'})
635        for signal in signals_to_plot:
636            if signal not in util_df.columns:
637                continue
638            data = util_df[util_df.pid == tid][signal]
639            data.plot(ax=axes, drawstyle='steps-post', legend=True)
640
641        # Plot boost utilization if available
642        if 'boosted_util' in signals and \
643           self._trace.hasEvents('sched_boost_task'):
644            boost_df = self._dfg_trace_event('sched_boost_task')
645            data = boost_df[boost_df.pid == tid][['boosted_util']]
646            if len(data):
647                data.plot(ax=axes, style=['y-'], drawstyle='steps-post')
648            else:
649                task_name = self._trace.getTaskByPid(tid)
650                self._log.warning('No "boosted_util" data for task [%d:%s]',
651                                  tid, task_name)
652
653        # Add Capacities data if avilable
654        if 'nrg_model' in self._platform:
655            nrg_model = self._platform['nrg_model']
656            max_lcap = nrg_model['little']['cpu']['cap_max']
657            max_bcap = nrg_model['big']['cpu']['cap_max']
658            tip_lcap = 0.8 * max_lcap
659            tip_bcap = 0.8 * max_bcap
660            self._log.debug(
661                'LITTLE capacity tip/max: %d/%d, big capacity tip/max: %d/%d',
662                tip_lcap, max_lcap, tip_bcap, max_bcap
663            )
664            axes.axhline(tip_lcap, color='y', linestyle=':', linewidth=2)
665            axes.axhline(max_lcap, color='y', linestyle='--', linewidth=2)
666            axes.axhline(tip_bcap, color='r', linestyle=':', linewidth=2)
667            axes.axhline(max_bcap, color='r', linestyle='--', linewidth=2)
668
669        axes.set_ylim(0, 1100)
670        axes.set_xlim(self._trace.x_min, self._trace.x_max)
671        axes.grid(True)
672        if not is_last:
673            axes.set_xticklabels([])
674            axes.set_xlabel('')
675        if 'sched_overutilized' in signals:
676            self._trace.analysis.status.plotOverutilized(axes)
677
678    def _plotTaskResidencies(self, axes, tid, signals, is_last=False):
679        """
680        For task with ID `tid` plot residency information.
681
682        :param axes: axes over which to generate the plot
683        :type axes: :mod:`matplotlib.axes.Axes`
684
685        :param tid: task ID
686        :type tid: int
687
688        :param signals: signals to be plot
689        :param signals: list(str)
690
691        :param is_last: if True this is the last plot
692        :type is_last: bool
693        """
694        util_df = self._dfg_trace_event('sched_load_avg_task')
695
696        if 'cluster' in util_df:
697            data = util_df[util_df.pid == tid][['cluster', 'cpu']]
698            for ccolor, clabel in zip('gr', ['LITTLE', 'big']):
699                cdata = data[data.cluster == clabel]
700                if len(cdata) > 0:
701                    cdata.plot(ax=axes, style=[ccolor+'+'], legend=False)
702
703        # Y Axis - placeholders for legend, acutal CPUs. topmost empty lane
704        cpus = [str(n) for n in range(self._platform['cpus_count'])]
705        ylabels = [''] + cpus
706        axes.set_yticklabels(ylabels)
707        axes.set_ylim(-1, len(cpus))
708        axes.set_ylabel('CPUs')
709        # X Axis
710        axes.set_xlim(self._trace.x_min, self._trace.x_max)
711
712        axes.grid(True)
713        if not is_last:
714            axes.set_xticklabels([])
715            axes.set_xlabel('')
716        if 'sched_overutilized' in signals:
717            self._trace.analysis.status.plotOverutilized(axes)
718
719    def _plotTaskPelt(self, axes, tid, signals):
720        """
721        For task with ID `tid` plot PELT-related signals.
722
723        :param axes: axes over which to generate the plot
724        :type axes: :mod:`matplotlib.axes.Axes`
725
726        :param tid: task ID
727        :type tid: int
728
729        :param signals: signals to be plot
730        :param signals: list(str)
731        """
732        util_df = self._dfg_trace_event('sched_load_avg_task')
733        data = util_df[util_df.pid == tid][['load_sum',
734                                            'util_sum',
735                                            'period_contrib']]
736        data.plot(ax=axes, drawstyle='steps-post')
737        axes.set_xlim(self._trace.x_min, self._trace.x_max)
738        axes.ticklabel_format(style='scientific', scilimits=(0, 0),
739                              axis='y', useOffset=False)
740        axes.grid(True)
741        if 'sched_overutilized' in signals:
742            self._trace.analysis.status.plotOverutilized(axes)
743
744# vim :set tabstop=4 shiftwidth=4 expandtab
745