1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * arch/powerpc/oprofile/op_model_7450.c
4 *
5 * Freescale 745x/744x oprofile support, based on fsl_booke support
6 * Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
7 *
8 * Copyright (c) 2004 Freescale Semiconductor, Inc
9 *
10 * Author: Andy Fleming
11 * Maintainer: Kumar Gala <galak@kernel.crashing.org>
12 */
13
14 #include <linux/oprofile.h>
15 #include <linux/smp.h>
16 #include <asm/ptrace.h>
17 #include <asm/processor.h>
18 #include <asm/cputable.h>
19 #include <asm/page.h>
20 #include <asm/pmc.h>
21 #include <asm/oprofile_impl.h>
22
23 static unsigned long reset_value[OP_MAX_COUNTER];
24
25 static int oprofile_running;
26 static u32 mmcr0_val, mmcr1_val, mmcr2_val, num_pmcs;
27
28 #define MMCR0_PMC1_SHIFT 6
29 #define MMCR0_PMC2_SHIFT 0
30 #define MMCR1_PMC3_SHIFT 27
31 #define MMCR1_PMC4_SHIFT 22
32 #define MMCR1_PMC5_SHIFT 17
33 #define MMCR1_PMC6_SHIFT 11
34
35 #define mmcr0_event1(event) \
36 ((event << MMCR0_PMC1_SHIFT) & MMCR0_PMC1SEL)
37 #define mmcr0_event2(event) \
38 ((event << MMCR0_PMC2_SHIFT) & MMCR0_PMC2SEL)
39
40 #define mmcr1_event3(event) \
41 ((event << MMCR1_PMC3_SHIFT) & MMCR1_PMC3SEL)
42 #define mmcr1_event4(event) \
43 ((event << MMCR1_PMC4_SHIFT) & MMCR1_PMC4SEL)
44 #define mmcr1_event5(event) \
45 ((event << MMCR1_PMC5_SHIFT) & MMCR1_PMC5SEL)
46 #define mmcr1_event6(event) \
47 ((event << MMCR1_PMC6_SHIFT) & MMCR1_PMC6SEL)
48
49 #define MMCR0_INIT (MMCR0_FC | MMCR0_FCS | MMCR0_FCP | MMCR0_FCM1 | MMCR0_FCM0)
50
51 /* Unfreezes the counters on this CPU, enables the interrupt,
52 * enables the counters to trigger the interrupt, and sets the
53 * counters to only count when the mark bit is not set.
54 */
pmc_start_ctrs(void)55 static void pmc_start_ctrs(void)
56 {
57 u32 mmcr0 = mfspr(SPRN_MMCR0);
58
59 mmcr0 &= ~(MMCR0_FC | MMCR0_FCM0);
60 mmcr0 |= (MMCR0_FCECE | MMCR0_PMC1CE | MMCR0_PMCnCE | MMCR0_PMXE);
61
62 mtspr(SPRN_MMCR0, mmcr0);
63 }
64
65 /* Disables the counters on this CPU, and freezes them */
pmc_stop_ctrs(void)66 static void pmc_stop_ctrs(void)
67 {
68 u32 mmcr0 = mfspr(SPRN_MMCR0);
69
70 mmcr0 |= MMCR0_FC;
71 mmcr0 &= ~(MMCR0_FCECE | MMCR0_PMC1CE | MMCR0_PMCnCE | MMCR0_PMXE);
72
73 mtspr(SPRN_MMCR0, mmcr0);
74 }
75
76 /* Configures the counters on this CPU based on the global
77 * settings */
fsl7450_cpu_setup(struct op_counter_config * ctr)78 static int fsl7450_cpu_setup(struct op_counter_config *ctr)
79 {
80 /* freeze all counters */
81 pmc_stop_ctrs();
82
83 mtspr(SPRN_MMCR0, mmcr0_val);
84 mtspr(SPRN_MMCR1, mmcr1_val);
85 if (num_pmcs > 4)
86 mtspr(SPRN_MMCR2, mmcr2_val);
87
88 return 0;
89 }
90
91 /* Configures the global settings for the countes on all CPUs. */
fsl7450_reg_setup(struct op_counter_config * ctr,struct op_system_config * sys,int num_ctrs)92 static int fsl7450_reg_setup(struct op_counter_config *ctr,
93 struct op_system_config *sys,
94 int num_ctrs)
95 {
96 int i;
97
98 num_pmcs = num_ctrs;
99 /* Our counters count up, and "count" refers to
100 * how much before the next interrupt, and we interrupt
101 * on overflow. So we calculate the starting value
102 * which will give us "count" until overflow.
103 * Then we set the events on the enabled counters */
104 for (i = 0; i < num_ctrs; ++i)
105 reset_value[i] = 0x80000000UL - ctr[i].count;
106
107 /* Set events for Counters 1 & 2 */
108 mmcr0_val = MMCR0_INIT | mmcr0_event1(ctr[0].event)
109 | mmcr0_event2(ctr[1].event);
110
111 /* Setup user/kernel bits */
112 if (sys->enable_kernel)
113 mmcr0_val &= ~(MMCR0_FCS);
114
115 if (sys->enable_user)
116 mmcr0_val &= ~(MMCR0_FCP);
117
118 /* Set events for Counters 3-6 */
119 mmcr1_val = mmcr1_event3(ctr[2].event)
120 | mmcr1_event4(ctr[3].event);
121 if (num_ctrs > 4)
122 mmcr1_val |= mmcr1_event5(ctr[4].event)
123 | mmcr1_event6(ctr[5].event);
124
125 mmcr2_val = 0;
126
127 return 0;
128 }
129
130 /* Sets the counters on this CPU to the chosen values, and starts them */
fsl7450_start(struct op_counter_config * ctr)131 static int fsl7450_start(struct op_counter_config *ctr)
132 {
133 int i;
134
135 mtmsr(mfmsr() | MSR_PMM);
136
137 for (i = 0; i < num_pmcs; ++i) {
138 if (ctr[i].enabled)
139 classic_ctr_write(i, reset_value[i]);
140 else
141 classic_ctr_write(i, 0);
142 }
143
144 /* Clear the freeze bit, and enable the interrupt.
145 * The counters won't actually start until the rfi clears
146 * the PMM bit */
147 pmc_start_ctrs();
148
149 oprofile_running = 1;
150
151 return 0;
152 }
153
154 /* Stop the counters on this CPU */
fsl7450_stop(void)155 static void fsl7450_stop(void)
156 {
157 /* freeze counters */
158 pmc_stop_ctrs();
159
160 oprofile_running = 0;
161
162 mb();
163 }
164
165
166 /* Handle the interrupt on this CPU, and log a sample for each
167 * event that triggered the interrupt */
fsl7450_handle_interrupt(struct pt_regs * regs,struct op_counter_config * ctr)168 static void fsl7450_handle_interrupt(struct pt_regs *regs,
169 struct op_counter_config *ctr)
170 {
171 unsigned long pc;
172 int is_kernel;
173 int val;
174 int i;
175
176 /* set the PMM bit (see comment below) */
177 mtmsr(mfmsr() | MSR_PMM);
178
179 pc = mfspr(SPRN_SIAR);
180 is_kernel = is_kernel_addr(pc);
181
182 for (i = 0; i < num_pmcs; ++i) {
183 val = classic_ctr_read(i);
184 if (val < 0) {
185 if (oprofile_running && ctr[i].enabled) {
186 oprofile_add_ext_sample(pc, regs, i, is_kernel);
187 classic_ctr_write(i, reset_value[i]);
188 } else {
189 classic_ctr_write(i, 0);
190 }
191 }
192 }
193
194 /* The freeze bit was set by the interrupt. */
195 /* Clear the freeze bit, and reenable the interrupt.
196 * The counters won't actually start until the rfi clears
197 * the PM/M bit */
198 pmc_start_ctrs();
199 }
200
201 struct op_powerpc_model op_model_7450= {
202 .reg_setup = fsl7450_reg_setup,
203 .cpu_setup = fsl7450_cpu_setup,
204 .start = fsl7450_start,
205 .stop = fsl7450_stop,
206 .handle_interrupt = fsl7450_handle_interrupt,
207 };
208