1 /*
2 * OPAL IMC interface detection driver
3 * Supported on POWERNV platform
4 *
5 * Copyright (C) 2017 Madhavan Srinivasan, IBM Corporation.
6 * (C) 2017 Anju T Sudhakar, IBM Corporation.
7 * (C) 2017 Hemant K Shaw, IBM Corporation.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or later version.
13 */
14 #include <linux/kernel.h>
15 #include <linux/platform_device.h>
16 #include <linux/of.h>
17 #include <linux/of_address.h>
18 #include <linux/of_platform.h>
19 #include <linux/crash_dump.h>
20 #include <asm/opal.h>
21 #include <asm/io.h>
22 #include <asm/imc-pmu.h>
23 #include <asm/cputhreads.h>
24 #include <asm/debugfs.h>
25
26 static struct dentry *imc_debugfs_parent;
27
28 /* Helpers to export imc command and mode via debugfs */
imc_mem_get(void * data,u64 * val)29 static int imc_mem_get(void *data, u64 *val)
30 {
31 *val = cpu_to_be64(*(u64 *)data);
32 return 0;
33 }
34
imc_mem_set(void * data,u64 val)35 static int imc_mem_set(void *data, u64 val)
36 {
37 *(u64 *)data = cpu_to_be64(val);
38 return 0;
39 }
40 DEFINE_DEBUGFS_ATTRIBUTE(fops_imc_x64, imc_mem_get, imc_mem_set, "0x%016llx\n");
41
imc_debugfs_create_x64(const char * name,umode_t mode,struct dentry * parent,u64 * value)42 static struct dentry *imc_debugfs_create_x64(const char *name, umode_t mode,
43 struct dentry *parent, u64 *value)
44 {
45 return debugfs_create_file_unsafe(name, mode, parent,
46 value, &fops_imc_x64);
47 }
48
49 /*
50 * export_imc_mode_and_cmd: Create a debugfs interface
51 * for imc_cmd and imc_mode
52 * for each node in the system.
53 * imc_mode and imc_cmd can be changed by echo into
54 * this interface.
55 */
export_imc_mode_and_cmd(struct device_node * node,struct imc_pmu * pmu_ptr)56 static void export_imc_mode_and_cmd(struct device_node *node,
57 struct imc_pmu *pmu_ptr)
58 {
59 static u64 loc, *imc_mode_addr, *imc_cmd_addr;
60 char mode[16], cmd[16];
61 u32 cb_offset;
62 struct imc_mem_info *ptr = pmu_ptr->mem_info;
63
64 imc_debugfs_parent = debugfs_create_dir("imc", powerpc_debugfs_root);
65
66 if (!imc_debugfs_parent)
67 return;
68
69 if (of_property_read_u32(node, "cb_offset", &cb_offset))
70 cb_offset = IMC_CNTL_BLK_OFFSET;
71
72 while (ptr->vbase != NULL) {
73 loc = (u64)(ptr->vbase) + cb_offset;
74 imc_mode_addr = (u64 *)(loc + IMC_CNTL_BLK_MODE_OFFSET);
75 sprintf(mode, "imc_mode_%d", (u32)(ptr->id));
76 if (!imc_debugfs_create_x64(mode, 0600, imc_debugfs_parent,
77 imc_mode_addr))
78 goto err;
79
80 imc_cmd_addr = (u64 *)(loc + IMC_CNTL_BLK_CMD_OFFSET);
81 sprintf(cmd, "imc_cmd_%d", (u32)(ptr->id));
82 if (!imc_debugfs_create_x64(cmd, 0600, imc_debugfs_parent,
83 imc_cmd_addr))
84 goto err;
85 ptr++;
86 }
87 return;
88
89 err:
90 debugfs_remove_recursive(imc_debugfs_parent);
91 }
92
93 /*
94 * imc_get_mem_addr_nest: Function to get nest counter memory region
95 * for each chip
96 */
imc_get_mem_addr_nest(struct device_node * node,struct imc_pmu * pmu_ptr,u32 offset)97 static int imc_get_mem_addr_nest(struct device_node *node,
98 struct imc_pmu *pmu_ptr,
99 u32 offset)
100 {
101 int nr_chips = 0, i;
102 u64 *base_addr_arr, baddr;
103 u32 *chipid_arr;
104
105 nr_chips = of_property_count_u32_elems(node, "chip-id");
106 if (nr_chips <= 0)
107 return -ENODEV;
108
109 base_addr_arr = kcalloc(nr_chips, sizeof(*base_addr_arr), GFP_KERNEL);
110 if (!base_addr_arr)
111 return -ENOMEM;
112
113 chipid_arr = kcalloc(nr_chips, sizeof(*chipid_arr), GFP_KERNEL);
114 if (!chipid_arr) {
115 kfree(base_addr_arr);
116 return -ENOMEM;
117 }
118
119 if (of_property_read_u32_array(node, "chip-id", chipid_arr, nr_chips))
120 goto error;
121
122 if (of_property_read_u64_array(node, "base-addr", base_addr_arr,
123 nr_chips))
124 goto error;
125
126 pmu_ptr->mem_info = kcalloc(nr_chips + 1, sizeof(*pmu_ptr->mem_info),
127 GFP_KERNEL);
128 if (!pmu_ptr->mem_info)
129 goto error;
130
131 for (i = 0; i < nr_chips; i++) {
132 pmu_ptr->mem_info[i].id = chipid_arr[i];
133 baddr = base_addr_arr[i] + offset;
134 pmu_ptr->mem_info[i].vbase = phys_to_virt(baddr);
135 }
136
137 pmu_ptr->imc_counter_mmaped = true;
138 kfree(base_addr_arr);
139 kfree(chipid_arr);
140 return 0;
141
142 error:
143 kfree(base_addr_arr);
144 kfree(chipid_arr);
145 return -1;
146 }
147
148 /*
149 * imc_pmu_create : Takes the parent device which is the pmu unit, pmu_index
150 * and domain as the inputs.
151 * Allocates memory for the struct imc_pmu, sets up its domain, size and offsets
152 */
imc_pmu_create(struct device_node * parent,int pmu_index,int domain)153 static struct imc_pmu *imc_pmu_create(struct device_node *parent, int pmu_index, int domain)
154 {
155 int ret = 0;
156 struct imc_pmu *pmu_ptr;
157 u32 offset;
158
159 /* Return for unknown domain */
160 if (domain < 0)
161 return NULL;
162
163 /* memory for pmu */
164 pmu_ptr = kzalloc(sizeof(*pmu_ptr), GFP_KERNEL);
165 if (!pmu_ptr)
166 return NULL;
167
168 /* Set the domain */
169 pmu_ptr->domain = domain;
170
171 ret = of_property_read_u32(parent, "size", &pmu_ptr->counter_mem_size);
172 if (ret)
173 goto free_pmu;
174
175 if (!of_property_read_u32(parent, "offset", &offset)) {
176 if (imc_get_mem_addr_nest(parent, pmu_ptr, offset))
177 goto free_pmu;
178 }
179
180 /* Function to register IMC pmu */
181 ret = init_imc_pmu(parent, pmu_ptr, pmu_index);
182 if (ret) {
183 pr_err("IMC PMU %s Register failed\n", pmu_ptr->pmu.name);
184 kfree(pmu_ptr->pmu.name);
185 if (pmu_ptr->domain == IMC_DOMAIN_NEST)
186 kfree(pmu_ptr->mem_info);
187 kfree(pmu_ptr);
188 return NULL;
189 }
190
191 return pmu_ptr;
192
193 free_pmu:
194 kfree(pmu_ptr);
195 return NULL;
196 }
197
disable_nest_pmu_counters(void)198 static void disable_nest_pmu_counters(void)
199 {
200 int nid, cpu;
201 const struct cpumask *l_cpumask;
202
203 get_online_cpus();
204 for_each_node_with_cpus(nid) {
205 l_cpumask = cpumask_of_node(nid);
206 cpu = cpumask_first_and(l_cpumask, cpu_online_mask);
207 if (cpu >= nr_cpu_ids)
208 continue;
209 opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST,
210 get_hard_smp_processor_id(cpu));
211 }
212 put_online_cpus();
213 }
214
disable_core_pmu_counters(void)215 static void disable_core_pmu_counters(void)
216 {
217 cpumask_t cores_map;
218 int cpu, rc;
219
220 get_online_cpus();
221 /* Disable the IMC Core functions */
222 cores_map = cpu_online_cores_map();
223 for_each_cpu(cpu, &cores_map) {
224 rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
225 get_hard_smp_processor_id(cpu));
226 if (rc)
227 pr_err("%s: Failed to stop Core (cpu = %d)\n",
228 __FUNCTION__, cpu);
229 }
230 put_online_cpus();
231 }
232
get_max_nest_dev(void)233 int get_max_nest_dev(void)
234 {
235 struct device_node *node;
236 u32 pmu_units = 0, type;
237
238 for_each_compatible_node(node, NULL, IMC_DTB_UNIT_COMPAT) {
239 if (of_property_read_u32(node, "type", &type))
240 continue;
241
242 if (type == IMC_TYPE_CHIP)
243 pmu_units++;
244 }
245
246 return pmu_units;
247 }
248
opal_imc_counters_probe(struct platform_device * pdev)249 static int opal_imc_counters_probe(struct platform_device *pdev)
250 {
251 struct device_node *imc_dev = pdev->dev.of_node;
252 struct imc_pmu *pmu;
253 int pmu_count = 0, domain;
254 bool core_imc_reg = false, thread_imc_reg = false;
255 u32 type;
256
257 /*
258 * Check whether this is kdump kernel. If yes, force the engines to
259 * stop and return.
260 */
261 if (is_kdump_kernel()) {
262 disable_nest_pmu_counters();
263 disable_core_pmu_counters();
264 return -ENODEV;
265 }
266
267 for_each_compatible_node(imc_dev, NULL, IMC_DTB_UNIT_COMPAT) {
268 pmu = NULL;
269 if (of_property_read_u32(imc_dev, "type", &type)) {
270 pr_warn("IMC Device without type property\n");
271 continue;
272 }
273
274 switch (type) {
275 case IMC_TYPE_CHIP:
276 domain = IMC_DOMAIN_NEST;
277 break;
278 case IMC_TYPE_CORE:
279 domain =IMC_DOMAIN_CORE;
280 break;
281 case IMC_TYPE_THREAD:
282 domain = IMC_DOMAIN_THREAD;
283 break;
284 default:
285 pr_warn("IMC Unknown Device type \n");
286 domain = -1;
287 break;
288 }
289
290 pmu = imc_pmu_create(imc_dev, pmu_count, domain);
291 if (pmu != NULL) {
292 if (domain == IMC_DOMAIN_NEST) {
293 if (!imc_debugfs_parent)
294 export_imc_mode_and_cmd(imc_dev, pmu);
295 pmu_count++;
296 }
297 if (domain == IMC_DOMAIN_CORE)
298 core_imc_reg = true;
299 if (domain == IMC_DOMAIN_THREAD)
300 thread_imc_reg = true;
301 }
302 }
303
304 /* If core imc is not registered, unregister thread-imc */
305 if (!core_imc_reg && thread_imc_reg)
306 unregister_thread_imc();
307
308 return 0;
309 }
310
opal_imc_counters_shutdown(struct platform_device * pdev)311 static void opal_imc_counters_shutdown(struct platform_device *pdev)
312 {
313 /*
314 * Function only stops the engines which is bare minimum.
315 * TODO: Need to handle proper memory cleanup and pmu
316 * unregister.
317 */
318 disable_nest_pmu_counters();
319 disable_core_pmu_counters();
320 }
321
322 static const struct of_device_id opal_imc_match[] = {
323 { .compatible = IMC_DTB_COMPAT },
324 {},
325 };
326
327 static struct platform_driver opal_imc_driver = {
328 .driver = {
329 .name = "opal-imc-counters",
330 .of_match_table = opal_imc_match,
331 },
332 .probe = opal_imc_counters_probe,
333 .shutdown = opal_imc_counters_shutdown,
334 };
335
336 builtin_platform_driver(opal_imc_driver);
337