1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * AMD Platform Management Framework (PMF) Driver
4 *
5 * Copyright (c) 2022, Advanced Micro Devices, Inc.
6 * All Rights Reserved.
7 *
8 * Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
9 */
10
11 #include "pmf.h"
12
13 static struct amd_pmf_static_slider_granular_v2 config_store_v2;
14 static struct amd_pmf_static_slider_granular config_store;
15 static struct amd_pmf_apts_granular apts_config_store;
16
17 #ifdef CONFIG_AMD_PMF_DEBUG
slider_v2_as_str(unsigned int state)18 static const char *slider_v2_as_str(unsigned int state)
19 {
20 switch (state) {
21 case POWER_MODE_BEST_PERFORMANCE:
22 return "Best Performance";
23 case POWER_MODE_BALANCED:
24 return "Balanced";
25 case POWER_MODE_BEST_POWER_EFFICIENCY:
26 return "Best Power Efficiency";
27 case POWER_MODE_ENERGY_SAVE:
28 return "Energy Save";
29 default:
30 return "Unknown Power Mode";
31 }
32 }
33
slider_as_str(unsigned int state)34 static const char *slider_as_str(unsigned int state)
35 {
36 switch (state) {
37 case POWER_MODE_PERFORMANCE:
38 return "PERFORMANCE";
39 case POWER_MODE_BALANCED_POWER:
40 return "BALANCED_POWER";
41 case POWER_MODE_POWER_SAVER:
42 return "POWER_SAVER";
43 default:
44 return "Unknown Slider State";
45 }
46 }
47
amd_pmf_source_as_str(unsigned int state)48 const char *amd_pmf_source_as_str(unsigned int state)
49 {
50 switch (state) {
51 case POWER_SOURCE_AC:
52 return "AC";
53 case POWER_SOURCE_DC:
54 return "DC";
55 default:
56 return "Unknown Power State";
57 }
58 }
59
amd_pmf_dump_sps_defaults(struct amd_pmf_static_slider_granular * data)60 static void amd_pmf_dump_sps_defaults(struct amd_pmf_static_slider_granular *data)
61 {
62 int i, j;
63
64 pr_debug("Static Slider Data - BEGIN\n");
65
66 for (i = 0; i < POWER_SOURCE_MAX; i++) {
67 for (j = 0; j < POWER_MODE_MAX; j++) {
68 pr_debug("--- Source:%s Mode:%s ---\n", amd_pmf_source_as_str(i),
69 slider_as_str(j));
70 pr_debug("SPL: %u mW\n", data->prop[i][j].spl);
71 pr_debug("SPPT: %u mW\n", data->prop[i][j].sppt);
72 pr_debug("SPPT_ApuOnly: %u mW\n", data->prop[i][j].sppt_apu_only);
73 pr_debug("FPPT: %u mW\n", data->prop[i][j].fppt);
74 pr_debug("STTMinLimit: %u mW\n", data->prop[i][j].stt_min);
75 pr_debug("STT_SkinTempLimit_APU: %u C\n",
76 data->prop[i][j].stt_skin_temp[STT_TEMP_APU]);
77 pr_debug("STT_SkinTempLimit_HS2: %u C\n",
78 data->prop[i][j].stt_skin_temp[STT_TEMP_HS2]);
79 }
80 }
81
82 pr_debug("Static Slider Data - END\n");
83 }
84
amd_pmf_dump_sps_defaults_v2(struct amd_pmf_static_slider_granular_v2 * data)85 static void amd_pmf_dump_sps_defaults_v2(struct amd_pmf_static_slider_granular_v2 *data)
86 {
87 unsigned int i, j;
88
89 pr_debug("Static Slider APTS state index data - BEGIN");
90 pr_debug("size: %u\n", data->size);
91
92 for (i = 0; i < POWER_SOURCE_MAX; i++)
93 for (j = 0; j < POWER_MODE_V2_MAX; j++)
94 pr_debug("%s %s: %u\n", amd_pmf_source_as_str(i), slider_v2_as_str(j),
95 data->sps_idx.power_states[i][j]);
96
97 pr_debug("Static Slider APTS state index data - END\n");
98 }
99
amd_pmf_dump_apts_sps_defaults(struct amd_pmf_apts_granular * info)100 static void amd_pmf_dump_apts_sps_defaults(struct amd_pmf_apts_granular *info)
101 {
102 int i;
103
104 pr_debug("Static Slider APTS index default values data - BEGIN");
105
106 for (i = 0; i < APTS_MAX_STATES; i++) {
107 pr_debug("Table Version[%d] = %u\n", i, info->val[i].table_version);
108 pr_debug("Fan Index[%d] = %u\n", i, info->val[i].fan_table_idx);
109 pr_debug("PPT[%d] = %u\n", i, info->val[i].pmf_ppt);
110 pr_debug("PPT APU[%d] = %u\n", i, info->val[i].ppt_pmf_apu_only);
111 pr_debug("STT Min[%d] = %u\n", i, info->val[i].stt_min_limit);
112 pr_debug("STT APU[%d] = %u\n", i, info->val[i].stt_skin_temp_limit_apu);
113 pr_debug("STT HS2[%d] = %u\n", i, info->val[i].stt_skin_temp_limit_hs2);
114 }
115
116 pr_debug("Static Slider APTS index default values data - END");
117 }
118 #else
amd_pmf_dump_sps_defaults(struct amd_pmf_static_slider_granular * data)119 static void amd_pmf_dump_sps_defaults(struct amd_pmf_static_slider_granular *data) {}
amd_pmf_dump_sps_defaults_v2(struct amd_pmf_static_slider_granular_v2 * data)120 static void amd_pmf_dump_sps_defaults_v2(struct amd_pmf_static_slider_granular_v2 *data) {}
amd_pmf_dump_apts_sps_defaults(struct amd_pmf_apts_granular * info)121 static void amd_pmf_dump_apts_sps_defaults(struct amd_pmf_apts_granular *info) {}
122 #endif
123
amd_pmf_load_apts_defaults_sps_v2(struct amd_pmf_dev * pdev)124 static void amd_pmf_load_apts_defaults_sps_v2(struct amd_pmf_dev *pdev)
125 {
126 struct amd_pmf_apts_granular_output output;
127 struct amd_pmf_apts_output *ps;
128 int i;
129
130 memset(&apts_config_store, 0, sizeof(apts_config_store));
131
132 ps = apts_config_store.val;
133
134 for (i = 0; i < APTS_MAX_STATES; i++) {
135 apts_get_static_slider_granular_v2(pdev, &output, i);
136 ps[i].table_version = output.val.table_version;
137 ps[i].fan_table_idx = output.val.fan_table_idx;
138 ps[i].pmf_ppt = output.val.pmf_ppt;
139 ps[i].ppt_pmf_apu_only = output.val.ppt_pmf_apu_only;
140 ps[i].stt_min_limit = output.val.stt_min_limit;
141 ps[i].stt_skin_temp_limit_apu = output.val.stt_skin_temp_limit_apu;
142 ps[i].stt_skin_temp_limit_hs2 = output.val.stt_skin_temp_limit_hs2;
143 }
144
145 amd_pmf_dump_apts_sps_defaults(&apts_config_store);
146 }
147
amd_pmf_load_defaults_sps_v2(struct amd_pmf_dev * dev)148 static void amd_pmf_load_defaults_sps_v2(struct amd_pmf_dev *dev)
149 {
150 struct apmf_static_slider_granular_output_v2 output;
151 unsigned int i, j;
152
153 memset(&config_store_v2, 0, sizeof(config_store_v2));
154 apmf_get_static_slider_granular_v2(dev, &output);
155
156 config_store_v2.size = output.size;
157
158 for (i = 0; i < POWER_SOURCE_MAX; i++)
159 for (j = 0; j < POWER_MODE_V2_MAX; j++)
160 config_store_v2.sps_idx.power_states[i][j] =
161 output.sps_idx.power_states[i][j];
162
163 amd_pmf_dump_sps_defaults_v2(&config_store_v2);
164 }
165
amd_pmf_load_defaults_sps(struct amd_pmf_dev * dev)166 static void amd_pmf_load_defaults_sps(struct amd_pmf_dev *dev)
167 {
168 struct apmf_static_slider_granular_output output;
169 int i, j, idx = 0;
170
171 memset(&config_store, 0, sizeof(config_store));
172 apmf_get_static_slider_granular(dev, &output);
173
174 for (i = 0; i < POWER_SOURCE_MAX; i++) {
175 for (j = 0; j < POWER_MODE_MAX; j++) {
176 config_store.prop[i][j].spl = output.prop[idx].spl;
177 config_store.prop[i][j].sppt = output.prop[idx].sppt;
178 config_store.prop[i][j].sppt_apu_only =
179 output.prop[idx].sppt_apu_only;
180 config_store.prop[i][j].fppt = output.prop[idx].fppt;
181 config_store.prop[i][j].stt_min = output.prop[idx].stt_min;
182 config_store.prop[i][j].stt_skin_temp[STT_TEMP_APU] =
183 output.prop[idx].stt_skin_temp[STT_TEMP_APU];
184 config_store.prop[i][j].stt_skin_temp[STT_TEMP_HS2] =
185 output.prop[idx].stt_skin_temp[STT_TEMP_HS2];
186 config_store.prop[i][j].fan_id = output.prop[idx].fan_id;
187 idx++;
188 }
189 }
190 amd_pmf_dump_sps_defaults(&config_store);
191 }
192
amd_pmf_update_slider_v2(struct amd_pmf_dev * dev,int idx)193 static void amd_pmf_update_slider_v2(struct amd_pmf_dev *dev, int idx)
194 {
195 amd_pmf_send_cmd(dev, SET_PMF_PPT, false, apts_config_store.val[idx].pmf_ppt, NULL);
196 amd_pmf_send_cmd(dev, SET_PMF_PPT_APU_ONLY, false,
197 apts_config_store.val[idx].ppt_pmf_apu_only, NULL);
198 amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, false,
199 apts_config_store.val[idx].stt_min_limit, NULL);
200 amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false,
201 fixp_q88_fromint(apts_config_store.val[idx].stt_skin_temp_limit_apu),
202 NULL);
203 amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false,
204 fixp_q88_fromint(apts_config_store.val[idx].stt_skin_temp_limit_hs2),
205 NULL);
206 }
207
amd_pmf_update_slider(struct amd_pmf_dev * dev,bool op,int idx,struct amd_pmf_static_slider_granular * table)208 void amd_pmf_update_slider(struct amd_pmf_dev *dev, bool op, int idx,
209 struct amd_pmf_static_slider_granular *table)
210 {
211 int src = amd_pmf_get_power_source();
212
213 if (op == SLIDER_OP_SET) {
214 amd_pmf_send_cmd(dev, SET_SPL, false, config_store.prop[src][idx].spl, NULL);
215 amd_pmf_send_cmd(dev, SET_FPPT, false, config_store.prop[src][idx].fppt, NULL);
216 amd_pmf_send_cmd(dev, SET_SPPT, false, config_store.prop[src][idx].sppt, NULL);
217 amd_pmf_send_cmd(dev, SET_SPPT_APU_ONLY, false,
218 config_store.prop[src][idx].sppt_apu_only, NULL);
219 amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, false,
220 config_store.prop[src][idx].stt_min, NULL);
221 amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false,
222 fixp_q88_fromint(config_store.prop[src][idx].stt_skin_temp[STT_TEMP_APU]),
223 NULL);
224 amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false,
225 fixp_q88_fromint(config_store.prop[src][idx].stt_skin_temp[STT_TEMP_HS2]),
226 NULL);
227 } else if (op == SLIDER_OP_GET) {
228 amd_pmf_send_cmd(dev, GET_SPL, true, ARG_NONE, &table->prop[src][idx].spl);
229 amd_pmf_send_cmd(dev, GET_FPPT, true, ARG_NONE, &table->prop[src][idx].fppt);
230 amd_pmf_send_cmd(dev, GET_SPPT, true, ARG_NONE, &table->prop[src][idx].sppt);
231 amd_pmf_send_cmd(dev, GET_SPPT_APU_ONLY, true, ARG_NONE,
232 &table->prop[src][idx].sppt_apu_only);
233 amd_pmf_send_cmd(dev, GET_STT_MIN_LIMIT, true, ARG_NONE,
234 &table->prop[src][idx].stt_min);
235 amd_pmf_send_cmd(dev, GET_STT_LIMIT_APU, true, ARG_NONE,
236 (u32 *)&table->prop[src][idx].stt_skin_temp[STT_TEMP_APU]);
237 amd_pmf_send_cmd(dev, GET_STT_LIMIT_HS2, true, ARG_NONE,
238 (u32 *)&table->prop[src][idx].stt_skin_temp[STT_TEMP_HS2]);
239 }
240 }
241
amd_pmf_update_sps_power_limits_v2(struct amd_pmf_dev * pdev,int pwr_mode)242 static int amd_pmf_update_sps_power_limits_v2(struct amd_pmf_dev *pdev, int pwr_mode)
243 {
244 int src, index;
245
246 src = amd_pmf_get_power_source();
247
248 switch (pwr_mode) {
249 case POWER_MODE_PERFORMANCE:
250 index = config_store_v2.sps_idx.power_states[src][POWER_MODE_BEST_PERFORMANCE];
251 amd_pmf_update_slider_v2(pdev, index);
252 break;
253 case POWER_MODE_BALANCED_POWER:
254 index = config_store_v2.sps_idx.power_states[src][POWER_MODE_BALANCED];
255 amd_pmf_update_slider_v2(pdev, index);
256 break;
257 case POWER_MODE_POWER_SAVER:
258 index = config_store_v2.sps_idx.power_states[src][POWER_MODE_BEST_POWER_EFFICIENCY];
259 amd_pmf_update_slider_v2(pdev, index);
260 break;
261 default:
262 return -EINVAL;
263 }
264
265 return 0;
266 }
267
amd_pmf_set_sps_power_limits(struct amd_pmf_dev * pmf)268 int amd_pmf_set_sps_power_limits(struct amd_pmf_dev *pmf)
269 {
270 int mode;
271
272 mode = amd_pmf_get_pprof_modes(pmf);
273 if (mode < 0)
274 return mode;
275
276 if (pmf->pmf_if_version == PMF_IF_V2)
277 return amd_pmf_update_sps_power_limits_v2(pmf, mode);
278
279 amd_pmf_update_slider(pmf, SLIDER_OP_SET, mode, NULL);
280
281 return 0;
282 }
283
is_pprof_balanced(struct amd_pmf_dev * pmf)284 bool is_pprof_balanced(struct amd_pmf_dev *pmf)
285 {
286 return (pmf->current_profile == PLATFORM_PROFILE_BALANCED) ? true : false;
287 }
288
amd_pmf_profile_get(struct platform_profile_handler * pprof,enum platform_profile_option * profile)289 static int amd_pmf_profile_get(struct platform_profile_handler *pprof,
290 enum platform_profile_option *profile)
291 {
292 struct amd_pmf_dev *pmf = container_of(pprof, struct amd_pmf_dev, pprof);
293
294 *profile = pmf->current_profile;
295 return 0;
296 }
297
amd_pmf_get_pprof_modes(struct amd_pmf_dev * pmf)298 int amd_pmf_get_pprof_modes(struct amd_pmf_dev *pmf)
299 {
300 int mode;
301
302 switch (pmf->current_profile) {
303 case PLATFORM_PROFILE_PERFORMANCE:
304 mode = POWER_MODE_PERFORMANCE;
305 break;
306 case PLATFORM_PROFILE_BALANCED:
307 mode = POWER_MODE_BALANCED_POWER;
308 break;
309 case PLATFORM_PROFILE_LOW_POWER:
310 mode = POWER_MODE_POWER_SAVER;
311 break;
312 default:
313 dev_err(pmf->dev, "Unknown Platform Profile.\n");
314 return -EOPNOTSUPP;
315 }
316
317 return mode;
318 }
319
amd_pmf_power_slider_update_event(struct amd_pmf_dev * dev)320 int amd_pmf_power_slider_update_event(struct amd_pmf_dev *dev)
321 {
322 u8 flag = 0;
323 int mode;
324 int src;
325
326 mode = amd_pmf_get_pprof_modes(dev);
327 if (mode < 0)
328 return mode;
329
330 src = amd_pmf_get_power_source();
331
332 if (src == POWER_SOURCE_AC) {
333 switch (mode) {
334 case POWER_MODE_PERFORMANCE:
335 flag |= BIT(AC_BEST_PERF);
336 break;
337 case POWER_MODE_BALANCED_POWER:
338 flag |= BIT(AC_BETTER_PERF);
339 break;
340 case POWER_MODE_POWER_SAVER:
341 flag |= BIT(AC_BETTER_BATTERY);
342 break;
343 default:
344 dev_err(dev->dev, "unsupported platform profile\n");
345 return -EOPNOTSUPP;
346 }
347
348 } else if (src == POWER_SOURCE_DC) {
349 switch (mode) {
350 case POWER_MODE_PERFORMANCE:
351 flag |= BIT(DC_BEST_PERF);
352 break;
353 case POWER_MODE_BALANCED_POWER:
354 flag |= BIT(DC_BETTER_PERF);
355 break;
356 case POWER_MODE_POWER_SAVER:
357 flag |= BIT(DC_BATTERY_SAVER);
358 break;
359 default:
360 dev_err(dev->dev, "unsupported platform profile\n");
361 return -EOPNOTSUPP;
362 }
363 }
364
365 apmf_os_power_slider_update(dev, flag);
366
367 return 0;
368 }
369
amd_pmf_profile_set(struct platform_profile_handler * pprof,enum platform_profile_option profile)370 static int amd_pmf_profile_set(struct platform_profile_handler *pprof,
371 enum platform_profile_option profile)
372 {
373 struct amd_pmf_dev *pmf = container_of(pprof, struct amd_pmf_dev, pprof);
374 int ret = 0;
375
376 pmf->current_profile = profile;
377
378 /* Notify EC about the slider position change */
379 if (is_apmf_func_supported(pmf, APMF_FUNC_OS_POWER_SLIDER_UPDATE)) {
380 ret = amd_pmf_power_slider_update_event(pmf);
381 if (ret)
382 return ret;
383 }
384
385 if (is_apmf_func_supported(pmf, APMF_FUNC_STATIC_SLIDER_GRANULAR)) {
386 ret = amd_pmf_set_sps_power_limits(pmf);
387 if (ret)
388 return ret;
389 }
390
391 return 0;
392 }
393
amd_pmf_init_sps(struct amd_pmf_dev * dev)394 int amd_pmf_init_sps(struct amd_pmf_dev *dev)
395 {
396 int err;
397
398 dev->current_profile = PLATFORM_PROFILE_BALANCED;
399
400 if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR)) {
401 if (dev->pmf_if_version == PMF_IF_V2) {
402 amd_pmf_load_defaults_sps_v2(dev);
403 amd_pmf_load_apts_defaults_sps_v2(dev);
404 } else {
405 amd_pmf_load_defaults_sps(dev);
406 }
407
408 /* update SPS balanced power mode thermals */
409 amd_pmf_set_sps_power_limits(dev);
410 }
411
412 dev->pprof.profile_get = amd_pmf_profile_get;
413 dev->pprof.profile_set = amd_pmf_profile_set;
414
415 /* Setup supported modes */
416 set_bit(PLATFORM_PROFILE_LOW_POWER, dev->pprof.choices);
417 set_bit(PLATFORM_PROFILE_BALANCED, dev->pprof.choices);
418 set_bit(PLATFORM_PROFILE_PERFORMANCE, dev->pprof.choices);
419
420 /* Create platform_profile structure and register */
421 err = platform_profile_register(&dev->pprof);
422 if (err)
423 dev_err(dev->dev, "Failed to register SPS support, this is most likely an SBIOS bug: %d\n",
424 err);
425
426 return err;
427 }
428
amd_pmf_deinit_sps(struct amd_pmf_dev * dev)429 void amd_pmf_deinit_sps(struct amd_pmf_dev *dev)
430 {
431 platform_profile_remove();
432 }
433