1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * AMD Platform Management Framework Driver
4 *
5 * Copyright (c) 2022, Advanced Micro Devices, Inc.
6 * All Rights Reserved.
7 *
8 * Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
9 */
10
11 #include <asm/amd_nb.h>
12 #include <linux/debugfs.h>
13 #include <linux/iopoll.h>
14 #include <linux/module.h>
15 #include <linux/pci.h>
16 #include <linux/platform_device.h>
17 #include <linux/power_supply.h>
18 #include "pmf.h"
19
20 /* PMF-SMU communication registers */
21 #define AMD_PMF_REGISTER_MESSAGE 0xA18
22 #define AMD_PMF_REGISTER_RESPONSE 0xA78
23 #define AMD_PMF_REGISTER_ARGUMENT 0xA58
24
25 /* Base address of SMU for mapping physical address to virtual address */
26 #define AMD_PMF_MAPPING_SIZE 0x01000
27 #define AMD_PMF_BASE_ADDR_OFFSET 0x10000
28 #define AMD_PMF_BASE_ADDR_LO 0x13B102E8
29 #define AMD_PMF_BASE_ADDR_HI 0x13B102EC
30 #define AMD_PMF_BASE_ADDR_LO_MASK GENMASK(15, 0)
31 #define AMD_PMF_BASE_ADDR_HI_MASK GENMASK(31, 20)
32
33 /* SMU Response Codes */
34 #define AMD_PMF_RESULT_OK 0x01
35 #define AMD_PMF_RESULT_CMD_REJECT_BUSY 0xFC
36 #define AMD_PMF_RESULT_CMD_REJECT_PREREQ 0xFD
37 #define AMD_PMF_RESULT_CMD_UNKNOWN 0xFE
38 #define AMD_PMF_RESULT_FAILED 0xFF
39
40 #define PMF_MSG_DELAY_MIN_US 50
41 #define RESPONSE_REGISTER_LOOP_MAX 20000
42
43 #define DELAY_MIN_US 2000
44 #define DELAY_MAX_US 3000
45
46 /* override Metrics Table sample size time (in ms) */
47 static int metrics_table_loop_ms = 1000;
48 module_param(metrics_table_loop_ms, int, 0644);
49 MODULE_PARM_DESC(metrics_table_loop_ms, "Metrics Table sample size time (default = 1000ms)");
50
51 /* Force load on supported older platforms */
52 static bool force_load;
53 module_param(force_load, bool, 0444);
54 MODULE_PARM_DESC(force_load, "Force load this driver on supported older platforms (experimental)");
55
amd_pmf_pwr_src_notify_call(struct notifier_block * nb,unsigned long event,void * data)56 static int amd_pmf_pwr_src_notify_call(struct notifier_block *nb, unsigned long event, void *data)
57 {
58 struct amd_pmf_dev *pmf = container_of(nb, struct amd_pmf_dev, pwr_src_notifier);
59
60 if (event != PSY_EVENT_PROP_CHANGED)
61 return NOTIFY_OK;
62
63 if (is_apmf_func_supported(pmf, APMF_FUNC_AUTO_MODE) ||
64 is_apmf_func_supported(pmf, APMF_FUNC_DYN_SLIDER_DC) ||
65 is_apmf_func_supported(pmf, APMF_FUNC_DYN_SLIDER_AC)) {
66 if ((pmf->amt_enabled || pmf->cnqf_enabled) && is_pprof_balanced(pmf))
67 return NOTIFY_DONE;
68 }
69
70 if (is_apmf_func_supported(pmf, APMF_FUNC_STATIC_SLIDER_GRANULAR))
71 amd_pmf_set_sps_power_limits(pmf);
72
73 if (is_apmf_func_supported(pmf, APMF_FUNC_OS_POWER_SLIDER_UPDATE))
74 amd_pmf_power_slider_update_event(pmf);
75
76 return NOTIFY_OK;
77 }
78
current_power_limits_show(struct seq_file * seq,void * unused)79 static int current_power_limits_show(struct seq_file *seq, void *unused)
80 {
81 struct amd_pmf_dev *dev = seq->private;
82 struct amd_pmf_static_slider_granular table;
83 int mode, src = 0;
84
85 mode = amd_pmf_get_pprof_modes(dev);
86 if (mode < 0)
87 return mode;
88
89 src = amd_pmf_get_power_source();
90 amd_pmf_update_slider(dev, SLIDER_OP_GET, mode, &table);
91 seq_printf(seq, "spl:%u fppt:%u sppt:%u sppt_apu_only:%u stt_min:%u stt[APU]:%u stt[HS2]: %u\n",
92 table.prop[src][mode].spl,
93 table.prop[src][mode].fppt,
94 table.prop[src][mode].sppt,
95 table.prop[src][mode].sppt_apu_only,
96 table.prop[src][mode].stt_min,
97 table.prop[src][mode].stt_skin_temp[STT_TEMP_APU],
98 table.prop[src][mode].stt_skin_temp[STT_TEMP_HS2]);
99 return 0;
100 }
101 DEFINE_SHOW_ATTRIBUTE(current_power_limits);
102
amd_pmf_dbgfs_unregister(struct amd_pmf_dev * dev)103 static void amd_pmf_dbgfs_unregister(struct amd_pmf_dev *dev)
104 {
105 debugfs_remove_recursive(dev->dbgfs_dir);
106 }
107
amd_pmf_dbgfs_register(struct amd_pmf_dev * dev)108 static void amd_pmf_dbgfs_register(struct amd_pmf_dev *dev)
109 {
110 dev->dbgfs_dir = debugfs_create_dir("amd_pmf", NULL);
111 if (dev->pmf_if_version == PMF_IF_V1)
112 debugfs_create_file("current_power_limits", 0644, dev->dbgfs_dir, dev,
113 ¤t_power_limits_fops);
114 }
115
amd_pmf_get_power_source(void)116 int amd_pmf_get_power_source(void)
117 {
118 if (power_supply_is_system_supplied() > 0)
119 return POWER_SOURCE_AC;
120 else
121 return POWER_SOURCE_DC;
122 }
123
amd_pmf_get_metrics(struct work_struct * work)124 static void amd_pmf_get_metrics(struct work_struct *work)
125 {
126 struct amd_pmf_dev *dev = container_of(work, struct amd_pmf_dev, work_buffer.work);
127 ktime_t time_elapsed_ms;
128 int socket_power;
129
130 mutex_lock(&dev->update_mutex);
131 /* Transfer table contents */
132 memset(dev->buf, 0, sizeof(dev->m_table));
133 amd_pmf_send_cmd(dev, SET_TRANSFER_TABLE, 0, 7, NULL);
134 memcpy(&dev->m_table, dev->buf, sizeof(dev->m_table));
135
136 time_elapsed_ms = ktime_to_ms(ktime_get()) - dev->start_time;
137 /* Calculate the avg SoC power consumption */
138 socket_power = dev->m_table.apu_power + dev->m_table.dgpu_power;
139
140 if (dev->amt_enabled) {
141 /* Apply the Auto Mode transition */
142 amd_pmf_trans_automode(dev, socket_power, time_elapsed_ms);
143 }
144
145 if (dev->cnqf_enabled) {
146 /* Apply the CnQF transition */
147 amd_pmf_trans_cnqf(dev, socket_power, time_elapsed_ms);
148 }
149
150 dev->start_time = ktime_to_ms(ktime_get());
151 schedule_delayed_work(&dev->work_buffer, msecs_to_jiffies(metrics_table_loop_ms));
152 mutex_unlock(&dev->update_mutex);
153 }
154
amd_pmf_reg_read(struct amd_pmf_dev * dev,int reg_offset)155 static inline u32 amd_pmf_reg_read(struct amd_pmf_dev *dev, int reg_offset)
156 {
157 return ioread32(dev->regbase + reg_offset);
158 }
159
amd_pmf_reg_write(struct amd_pmf_dev * dev,int reg_offset,u32 val)160 static inline void amd_pmf_reg_write(struct amd_pmf_dev *dev, int reg_offset, u32 val)
161 {
162 iowrite32(val, dev->regbase + reg_offset);
163 }
164
amd_pmf_dump_registers(struct amd_pmf_dev * dev)165 static void __maybe_unused amd_pmf_dump_registers(struct amd_pmf_dev *dev)
166 {
167 u32 value;
168
169 value = amd_pmf_reg_read(dev, AMD_PMF_REGISTER_RESPONSE);
170 dev_dbg(dev->dev, "AMD_PMF_REGISTER_RESPONSE:%x\n", value);
171
172 value = amd_pmf_reg_read(dev, AMD_PMF_REGISTER_ARGUMENT);
173 dev_dbg(dev->dev, "AMD_PMF_REGISTER_ARGUMENT:%d\n", value);
174
175 value = amd_pmf_reg_read(dev, AMD_PMF_REGISTER_MESSAGE);
176 dev_dbg(dev->dev, "AMD_PMF_REGISTER_MESSAGE:%x\n", value);
177 }
178
179 /**
180 * fixp_q88_fromint: Convert integer to Q8.8
181 * @val: input value
182 *
183 * Converts an integer into binary fixed point format where 8 bits
184 * are used for integer and 8 bits are used for the decimal.
185 *
186 * Return: unsigned integer converted to Q8.8 format
187 */
fixp_q88_fromint(u32 val)188 u32 fixp_q88_fromint(u32 val)
189 {
190 return val << 8;
191 }
192
amd_pmf_send_cmd(struct amd_pmf_dev * dev,u8 message,bool get,u32 arg,u32 * data)193 int amd_pmf_send_cmd(struct amd_pmf_dev *dev, u8 message, bool get, u32 arg, u32 *data)
194 {
195 int rc;
196 u32 val;
197
198 mutex_lock(&dev->lock);
199
200 /* Wait until we get a valid response */
201 rc = readx_poll_timeout(ioread32, dev->regbase + AMD_PMF_REGISTER_RESPONSE,
202 val, val != 0, PMF_MSG_DELAY_MIN_US,
203 PMF_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX);
204 if (rc) {
205 dev_err(dev->dev, "failed to talk to SMU\n");
206 goto out_unlock;
207 }
208
209 /* Write zero to response register */
210 amd_pmf_reg_write(dev, AMD_PMF_REGISTER_RESPONSE, 0);
211
212 /* Write argument into argument register */
213 amd_pmf_reg_write(dev, AMD_PMF_REGISTER_ARGUMENT, arg);
214
215 /* Write message ID to message ID register */
216 amd_pmf_reg_write(dev, AMD_PMF_REGISTER_MESSAGE, message);
217
218 /* Wait until we get a valid response */
219 rc = readx_poll_timeout(ioread32, dev->regbase + AMD_PMF_REGISTER_RESPONSE,
220 val, val != 0, PMF_MSG_DELAY_MIN_US,
221 PMF_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX);
222 if (rc) {
223 dev_err(dev->dev, "SMU response timed out\n");
224 goto out_unlock;
225 }
226
227 switch (val) {
228 case AMD_PMF_RESULT_OK:
229 if (get) {
230 /* PMFW may take longer time to return back the data */
231 usleep_range(DELAY_MIN_US, 10 * DELAY_MAX_US);
232 *data = amd_pmf_reg_read(dev, AMD_PMF_REGISTER_ARGUMENT);
233 }
234 break;
235 case AMD_PMF_RESULT_CMD_REJECT_BUSY:
236 dev_err(dev->dev, "SMU not ready. err: 0x%x\n", val);
237 rc = -EBUSY;
238 goto out_unlock;
239 case AMD_PMF_RESULT_CMD_UNKNOWN:
240 dev_err(dev->dev, "SMU cmd unknown. err: 0x%x\n", val);
241 rc = -EINVAL;
242 goto out_unlock;
243 case AMD_PMF_RESULT_CMD_REJECT_PREREQ:
244 case AMD_PMF_RESULT_FAILED:
245 default:
246 dev_err(dev->dev, "SMU cmd failed. err: 0x%x\n", val);
247 rc = -EIO;
248 goto out_unlock;
249 }
250
251 out_unlock:
252 mutex_unlock(&dev->lock);
253 amd_pmf_dump_registers(dev);
254 return rc;
255 }
256
257 static const struct pci_device_id pmf_pci_ids[] = {
258 { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_RMB) },
259 { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_PS) },
260 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M20H_ROOT) },
261 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M60H_ROOT) },
262 { }
263 };
264
amd_pmf_set_dram_addr(struct amd_pmf_dev * dev,bool alloc_buffer)265 int amd_pmf_set_dram_addr(struct amd_pmf_dev *dev, bool alloc_buffer)
266 {
267 u64 phys_addr;
268 u32 hi, low;
269
270 /* Get Metrics Table Address */
271 if (alloc_buffer) {
272 switch (dev->cpu_id) {
273 case AMD_CPU_ID_PS:
274 case AMD_CPU_ID_RMB:
275 dev->mtable_size = sizeof(dev->m_table);
276 break;
277 case PCI_DEVICE_ID_AMD_1AH_M20H_ROOT:
278 case PCI_DEVICE_ID_AMD_1AH_M60H_ROOT:
279 dev->mtable_size = sizeof(dev->m_table_v2);
280 break;
281 default:
282 dev_err(dev->dev, "Invalid CPU id: 0x%x", dev->cpu_id);
283 }
284
285 dev->buf = kzalloc(dev->mtable_size, GFP_KERNEL);
286 if (!dev->buf)
287 return -ENOMEM;
288 }
289
290 phys_addr = virt_to_phys(dev->buf);
291 hi = phys_addr >> 32;
292 low = phys_addr & GENMASK(31, 0);
293
294 amd_pmf_send_cmd(dev, SET_DRAM_ADDR_HIGH, 0, hi, NULL);
295 amd_pmf_send_cmd(dev, SET_DRAM_ADDR_LOW, 0, low, NULL);
296
297 return 0;
298 }
299
amd_pmf_init_metrics_table(struct amd_pmf_dev * dev)300 int amd_pmf_init_metrics_table(struct amd_pmf_dev *dev)
301 {
302 int ret;
303
304 INIT_DELAYED_WORK(&dev->work_buffer, amd_pmf_get_metrics);
305
306 ret = amd_pmf_set_dram_addr(dev, true);
307 if (ret)
308 return ret;
309
310 /*
311 * Start collecting the metrics data after a small delay
312 * or else, we might end up getting stale values from PMFW.
313 */
314 schedule_delayed_work(&dev->work_buffer, msecs_to_jiffies(metrics_table_loop_ms * 3));
315
316 return 0;
317 }
318
amd_pmf_suspend_handler(struct device * dev)319 static int amd_pmf_suspend_handler(struct device *dev)
320 {
321 struct amd_pmf_dev *pdev = dev_get_drvdata(dev);
322
323 if (pdev->smart_pc_enabled)
324 cancel_delayed_work_sync(&pdev->pb_work);
325
326 if (is_apmf_func_supported(pdev, APMF_FUNC_SBIOS_HEARTBEAT_V2))
327 amd_pmf_notify_sbios_heartbeat_event_v2(pdev, ON_SUSPEND);
328
329 return 0;
330 }
331
amd_pmf_resume_handler(struct device * dev)332 static int amd_pmf_resume_handler(struct device *dev)
333 {
334 struct amd_pmf_dev *pdev = dev_get_drvdata(dev);
335 int ret;
336
337 if (pdev->buf) {
338 ret = amd_pmf_set_dram_addr(pdev, false);
339 if (ret)
340 return ret;
341 }
342
343 if (is_apmf_func_supported(pdev, APMF_FUNC_SBIOS_HEARTBEAT_V2))
344 amd_pmf_notify_sbios_heartbeat_event_v2(pdev, ON_RESUME);
345
346 if (pdev->smart_pc_enabled)
347 schedule_delayed_work(&pdev->pb_work, msecs_to_jiffies(2000));
348
349 return 0;
350 }
351
352 static DEFINE_SIMPLE_DEV_PM_OPS(amd_pmf_pm, amd_pmf_suspend_handler, amd_pmf_resume_handler);
353
amd_pmf_init_features(struct amd_pmf_dev * dev)354 static void amd_pmf_init_features(struct amd_pmf_dev *dev)
355 {
356 int ret;
357
358 /* Enable Static Slider */
359 if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR) ||
360 is_apmf_func_supported(dev, APMF_FUNC_OS_POWER_SLIDER_UPDATE)) {
361 amd_pmf_init_sps(dev);
362 dev->pwr_src_notifier.notifier_call = amd_pmf_pwr_src_notify_call;
363 power_supply_reg_notifier(&dev->pwr_src_notifier);
364 dev_dbg(dev->dev, "SPS enabled and Platform Profiles registered\n");
365 }
366
367 amd_pmf_init_smart_pc(dev);
368 if (dev->smart_pc_enabled) {
369 dev_dbg(dev->dev, "Smart PC Solution Enabled\n");
370 /* If Smart PC is enabled, no need to check for other features */
371 return;
372 }
373
374 if (is_apmf_func_supported(dev, APMF_FUNC_AUTO_MODE)) {
375 amd_pmf_init_auto_mode(dev);
376 dev_dbg(dev->dev, "Auto Mode Init done\n");
377 } else if (is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_AC) ||
378 is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_DC)) {
379 ret = amd_pmf_init_cnqf(dev);
380 if (ret)
381 dev_warn(dev->dev, "CnQF Init failed\n");
382 }
383 }
384
amd_pmf_deinit_features(struct amd_pmf_dev * dev)385 static void amd_pmf_deinit_features(struct amd_pmf_dev *dev)
386 {
387 if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR) ||
388 is_apmf_func_supported(dev, APMF_FUNC_OS_POWER_SLIDER_UPDATE)) {
389 power_supply_unreg_notifier(&dev->pwr_src_notifier);
390 amd_pmf_deinit_sps(dev);
391 }
392
393 if (dev->smart_pc_enabled) {
394 amd_pmf_deinit_smart_pc(dev);
395 } else if (is_apmf_func_supported(dev, APMF_FUNC_AUTO_MODE)) {
396 amd_pmf_deinit_auto_mode(dev);
397 } else if (is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_AC) ||
398 is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_DC)) {
399 amd_pmf_deinit_cnqf(dev);
400 }
401 }
402
403 static const struct acpi_device_id amd_pmf_acpi_ids[] = {
404 {"AMDI0100", 0x100},
405 {"AMDI0102", 0},
406 {"AMDI0103", 0},
407 {"AMDI0105", 0},
408 {"AMDI0107", 0},
409 {"AMDI0108", 0},
410 { }
411 };
412 MODULE_DEVICE_TABLE(acpi, amd_pmf_acpi_ids);
413
amd_pmf_probe(struct platform_device * pdev)414 static int amd_pmf_probe(struct platform_device *pdev)
415 {
416 const struct acpi_device_id *id;
417 struct amd_pmf_dev *dev;
418 struct pci_dev *rdev;
419 u32 base_addr_lo;
420 u32 base_addr_hi;
421 u64 base_addr;
422 u32 val;
423 int err;
424
425 id = acpi_match_device(amd_pmf_acpi_ids, &pdev->dev);
426 if (!id)
427 return -ENODEV;
428
429 if (id->driver_data == 0x100 && !force_load)
430 return -ENODEV;
431
432 dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
433 if (!dev)
434 return -ENOMEM;
435
436 dev->dev = &pdev->dev;
437
438 rdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
439 if (!rdev || !pci_match_id(pmf_pci_ids, rdev)) {
440 pci_dev_put(rdev);
441 return -ENODEV;
442 }
443
444 dev->cpu_id = rdev->device;
445
446 err = amd_smn_read(0, AMD_PMF_BASE_ADDR_LO, &val);
447 if (err) {
448 dev_err(dev->dev, "error in reading from 0x%x\n", AMD_PMF_BASE_ADDR_LO);
449 pci_dev_put(rdev);
450 return pcibios_err_to_errno(err);
451 }
452
453 base_addr_lo = val & AMD_PMF_BASE_ADDR_HI_MASK;
454
455 err = amd_smn_read(0, AMD_PMF_BASE_ADDR_HI, &val);
456 if (err) {
457 dev_err(dev->dev, "error in reading from 0x%x\n", AMD_PMF_BASE_ADDR_HI);
458 pci_dev_put(rdev);
459 return pcibios_err_to_errno(err);
460 }
461
462 base_addr_hi = val & AMD_PMF_BASE_ADDR_LO_MASK;
463 pci_dev_put(rdev);
464 base_addr = ((u64)base_addr_hi << 32 | base_addr_lo);
465
466 dev->regbase = devm_ioremap(dev->dev, base_addr + AMD_PMF_BASE_ADDR_OFFSET,
467 AMD_PMF_MAPPING_SIZE);
468 if (!dev->regbase)
469 return -ENOMEM;
470
471 mutex_init(&dev->lock);
472 mutex_init(&dev->update_mutex);
473
474 amd_pmf_quirks_init(dev);
475 apmf_acpi_init(dev);
476 platform_set_drvdata(pdev, dev);
477 amd_pmf_dbgfs_register(dev);
478 amd_pmf_init_features(dev);
479 apmf_install_handler(dev);
480 if (is_apmf_func_supported(dev, APMF_FUNC_SBIOS_HEARTBEAT_V2))
481 amd_pmf_notify_sbios_heartbeat_event_v2(dev, ON_LOAD);
482
483 dev_info(dev->dev, "registered PMF device successfully\n");
484
485 return 0;
486 }
487
amd_pmf_remove(struct platform_device * pdev)488 static void amd_pmf_remove(struct platform_device *pdev)
489 {
490 struct amd_pmf_dev *dev = platform_get_drvdata(pdev);
491
492 amd_pmf_deinit_features(dev);
493 if (is_apmf_func_supported(dev, APMF_FUNC_SBIOS_HEARTBEAT_V2))
494 amd_pmf_notify_sbios_heartbeat_event_v2(dev, ON_UNLOAD);
495 apmf_acpi_deinit(dev);
496 amd_pmf_dbgfs_unregister(dev);
497 mutex_destroy(&dev->lock);
498 mutex_destroy(&dev->update_mutex);
499 kfree(dev->buf);
500 }
501
502 static const struct attribute_group *amd_pmf_driver_groups[] = {
503 &cnqf_feature_attribute_group,
504 NULL,
505 };
506
507 static struct platform_driver amd_pmf_driver = {
508 .driver = {
509 .name = "amd-pmf",
510 .acpi_match_table = amd_pmf_acpi_ids,
511 .dev_groups = amd_pmf_driver_groups,
512 .pm = pm_sleep_ptr(&amd_pmf_pm),
513 },
514 .probe = amd_pmf_probe,
515 .remove_new = amd_pmf_remove,
516 };
517 module_platform_driver(amd_pmf_driver);
518
519 MODULE_LICENSE("GPL");
520 MODULE_DESCRIPTION("AMD Platform Management Framework Driver");
521 MODULE_SOFTDEP("pre: amdtee");
522