• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
3  * Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without modification,
6  * are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice, this list of
9  *    conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright notice, this list
12  *    of conditions and the following disclaimer in the documentation and/or other materials
13  *    provided with the distribution.
14  *
15  * 3. Neither the name of the copyright holder nor the names of its contributors may be used
16  *    to endorse or promote products derived from this software without specific prior written
17  *    permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
23  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
29  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include "perf_pmu_pri.h"
33 
34 STATIC Pmu *g_perfHw = NULL;
35 
36 STATIC CHAR *g_eventName[PERF_COUNT_HW_MAX] = {
37     [PERF_COUNT_HW_CPU_CYCLES]              = "cycles",
38     [PERF_COUNT_HW_INSTRUCTIONS]            = "instructions",
39     [PERF_COUNT_HW_ICACHE_REFERENCES]       = "icache",
40     [PERF_COUNT_HW_ICACHE_MISSES]           = "icache-misses",
41     [PERF_COUNT_HW_DCACHE_REFERENCES]       = "dcache",
42     [PERF_COUNT_HW_DCACHE_MISSES]           = "dcache-misses",
43     [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]     = "branches",
44     [PERF_COUNT_HW_BRANCH_MISSES]           = "branches-misses",
45 };
46 
47 /**
48  * 1.If config event is PERF_EVENT_TYPE_HW, then map it to the real eventId first, otherwise use the configured
49  * eventId directly.
50  * 2.Find available counter for each event.
51  * 3.Decide whether this hardware pmu need prescaler (once every 64 cycle counts).
52  */
OsPerfHwConfig(VOID)53 STATIC UINT32 OsPerfHwConfig(VOID)
54 {
55     UINT32 i;
56     HwPmu *armPmu = GET_HW_PMU(g_perfHw);
57 
58     UINT32 maxCounter = OsGetPmuMaxCounter();
59     UINT32 counter = OsGetPmuCounter0();
60     UINT32 cycleCounter = OsGetPmuCycleCounter();
61     UINT32 cycleCode = armPmu->mapEvent(PERF_COUNT_HW_CPU_CYCLES, PERF_EVENT_TO_CODE);
62     if (cycleCode == PERF_HW_INVALID_EVENT_TYPE) {
63         return LOS_NOK;
64     }
65 
66     PerfEvent *events = &g_perfHw->events;
67     UINT32 eventNum = events->nr;
68 
69     for (i = 0; i < eventNum; i++) {
70         Event *event = &(events->per[i]);
71 
72         if (!VALID_PERIOD(event->period)) {
73             PRINT_ERR("Config period: 0x%x invalid, should be in (%#x, %#x)\n", event->period,
74                 PERIOD_CALC(CCNT_PERIOD_UPPER_BOUND), PERIOD_CALC(CCNT_PERIOD_LOWER_BOUND));
75             return LOS_NOK;
76         }
77 
78         if (g_perfHw->type == PERF_EVENT_TYPE_HW) { /* do map */
79             UINT32 eventId = armPmu->mapEvent(event->eventId, PERF_EVENT_TO_CODE);
80             if (eventId == PERF_HW_INVALID_EVENT_TYPE) {
81                 return LOS_NOK;
82             }
83             event->eventId = eventId;
84         }
85 
86         if (event->eventId == cycleCode) {
87             event->counter = cycleCounter;
88         } else {
89             event->counter = counter;
90             counter++;
91         }
92 
93         if (counter >= maxCounter) {
94             PRINT_ERR("max events: %u excluding cycle event\n", maxCounter - 1);
95             return LOS_NOK;
96         }
97 
98         PRINT_DEBUG("Perf Config %u eventId = 0x%x, counter = 0x%x, period = 0x%x\n", i, event->eventId, event->counter,
99             event->period);
100     }
101 
102     armPmu->cntDivided = events->cntDivided & armPmu->canDivided;
103     return LOS_OK;
104 }
105 
OsPerfHwStart(VOID)106 STATIC UINT32 OsPerfHwStart(VOID)
107 {
108     UINT32 i;
109     UINT32 cpuid = ArchCurrCpuid();
110     HwPmu *armPmu = GET_HW_PMU(g_perfHw);
111 
112     PerfEvent *events = &g_perfHw->events;
113     UINT32 eventNum = events->nr;
114 
115     armPmu->clear();
116 
117     for (i = 0; i < eventNum; i++) {
118         Event *event = &(events->per[i]);
119         armPmu->setPeriod(event);
120         armPmu->enable(event);
121         event->count[cpuid] = 0;
122     }
123 
124     armPmu->start();
125     return LOS_OK;
126 }
127 
OsPerfHwStop(VOID)128 STATIC UINT32 OsPerfHwStop(VOID)
129 {
130     UINT32 i;
131     UINT32 cpuid = ArchCurrCpuid();
132     HwPmu *armPmu = GET_HW_PMU(g_perfHw);
133 
134     PerfEvent *events = &g_perfHw->events;
135     UINT32 eventNum = events->nr;
136 
137     armPmu->stop();
138 
139     for (i = 0; i < eventNum; i++) {
140         Event *event = &(events->per[i]);
141         UINTPTR value = armPmu->readCnt(event);
142         PRINT_DEBUG("perf stop readCnt value = 0x%x\n", value);
143         event->count[cpuid] += value;
144 
145         /* multiplier of cycle counter */
146         UINT32 eventId = armPmu->mapEvent(event->eventId, PERF_CODE_TO_EVENT);
147         if ((eventId == PERF_COUNT_HW_CPU_CYCLES) && (armPmu->cntDivided != 0)) {
148             PRINT_DEBUG("perf stop is cycle\n");
149             event->count[cpuid] = event->count[cpuid] << 6; /* CCNT counts every 64th cpu cycle */
150         }
151         PRINT_DEBUG("perf stop eventCount[0x%x] : [%s] = %llu\n", event->eventId, g_eventName[eventId],
152             event->count[cpuid]);
153     }
154     return LOS_OK;
155 }
156 
OsPerfGetEventName(Event * event)157 STATIC CHAR *OsPerfGetEventName(Event *event)
158 {
159     UINT32 eventId;
160     HwPmu *armPmu = GET_HW_PMU(g_perfHw);
161     eventId = armPmu->mapEvent(event->eventId, PERF_CODE_TO_EVENT);
162     if (eventId < PERF_COUNT_HW_MAX) {
163         return g_eventName[eventId];
164     } else {
165         return "unknown";
166     }
167 }
168 
OsPerfHwInit(HwPmu * hwPmu)169 UINT32 OsPerfHwInit(HwPmu *hwPmu)
170 {
171     UINT32 ret;
172     if (hwPmu == NULL) {
173         return LOS_NOK;
174     }
175 
176     hwPmu->pmu.type    = PERF_EVENT_TYPE_HW;
177     hwPmu->pmu.config  = OsPerfHwConfig;
178     hwPmu->pmu.start   = OsPerfHwStart;
179     hwPmu->pmu.stop    = OsPerfHwStop;
180     hwPmu->pmu.getName = OsPerfGetEventName;
181 
182     (VOID)memset_s(&hwPmu->pmu.events, sizeof(PerfEvent), 0, sizeof(PerfEvent));
183     ret = OsPerfPmuRegister(&hwPmu->pmu);
184 
185     g_perfHw = OsPerfPmuGet(PERF_EVENT_TYPE_HW);
186     return ret;
187 }
188