1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * AMD Platform Management Framework (PMF) Driver
4 *
5 * Copyright (c) 2022, Advanced Micro Devices, Inc.
6 * All Rights Reserved.
7 *
8 * Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
9 */
10
11 #include "pmf.h"
12
13 static struct amd_pmf_static_slider_granular config_store;
14
amd_pmf_load_defaults_sps(struct amd_pmf_dev * dev)15 static void amd_pmf_load_defaults_sps(struct amd_pmf_dev *dev)
16 {
17 struct apmf_static_slider_granular_output output;
18 int i, j, idx = 0;
19
20 memset(&config_store, 0, sizeof(config_store));
21 apmf_get_static_slider_granular(dev, &output);
22
23 for (i = 0; i < POWER_SOURCE_MAX; i++) {
24 for (j = 0; j < POWER_MODE_MAX; j++) {
25 config_store.prop[i][j].spl = output.prop[idx].spl;
26 config_store.prop[i][j].sppt = output.prop[idx].sppt;
27 config_store.prop[i][j].sppt_apu_only =
28 output.prop[idx].sppt_apu_only;
29 config_store.prop[i][j].fppt = output.prop[idx].fppt;
30 config_store.prop[i][j].stt_min = output.prop[idx].stt_min;
31 config_store.prop[i][j].stt_skin_temp[STT_TEMP_APU] =
32 output.prop[idx].stt_skin_temp[STT_TEMP_APU];
33 config_store.prop[i][j].stt_skin_temp[STT_TEMP_HS2] =
34 output.prop[idx].stt_skin_temp[STT_TEMP_HS2];
35 config_store.prop[i][j].fan_id = output.prop[idx].fan_id;
36 idx++;
37 }
38 }
39 }
40
amd_pmf_update_slider(struct amd_pmf_dev * dev,bool op,int idx,struct amd_pmf_static_slider_granular * table)41 void amd_pmf_update_slider(struct amd_pmf_dev *dev, bool op, int idx,
42 struct amd_pmf_static_slider_granular *table)
43 {
44 int src = amd_pmf_get_power_source();
45
46 if (op == SLIDER_OP_SET) {
47 amd_pmf_send_cmd(dev, SET_SPL, false, config_store.prop[src][idx].spl, NULL);
48 amd_pmf_send_cmd(dev, SET_FPPT, false, config_store.prop[src][idx].fppt, NULL);
49 amd_pmf_send_cmd(dev, SET_SPPT, false, config_store.prop[src][idx].sppt, NULL);
50 amd_pmf_send_cmd(dev, SET_SPPT_APU_ONLY, false,
51 config_store.prop[src][idx].sppt_apu_only, NULL);
52 amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, false,
53 config_store.prop[src][idx].stt_min, NULL);
54 amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false,
55 config_store.prop[src][idx].stt_skin_temp[STT_TEMP_APU], NULL);
56 amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false,
57 config_store.prop[src][idx].stt_skin_temp[STT_TEMP_HS2], NULL);
58 } else if (op == SLIDER_OP_GET) {
59 amd_pmf_send_cmd(dev, GET_SPL, true, ARG_NONE, &table->prop[src][idx].spl);
60 amd_pmf_send_cmd(dev, GET_FPPT, true, ARG_NONE, &table->prop[src][idx].fppt);
61 amd_pmf_send_cmd(dev, GET_SPPT, true, ARG_NONE, &table->prop[src][idx].sppt);
62 amd_pmf_send_cmd(dev, GET_SPPT_APU_ONLY, true, ARG_NONE,
63 &table->prop[src][idx].sppt_apu_only);
64 amd_pmf_send_cmd(dev, GET_STT_MIN_LIMIT, true, ARG_NONE,
65 &table->prop[src][idx].stt_min);
66 amd_pmf_send_cmd(dev, GET_STT_LIMIT_APU, true, ARG_NONE,
67 (u32 *)&table->prop[src][idx].stt_skin_temp[STT_TEMP_APU]);
68 amd_pmf_send_cmd(dev, GET_STT_LIMIT_HS2, true, ARG_NONE,
69 (u32 *)&table->prop[src][idx].stt_skin_temp[STT_TEMP_HS2]);
70 }
71 }
72
amd_pmf_set_sps_power_limits(struct amd_pmf_dev * pmf)73 int amd_pmf_set_sps_power_limits(struct amd_pmf_dev *pmf)
74 {
75 int mode;
76
77 mode = amd_pmf_get_pprof_modes(pmf);
78 if (mode < 0)
79 return mode;
80
81 amd_pmf_update_slider(pmf, SLIDER_OP_SET, mode, NULL);
82
83 return 0;
84 }
85
is_pprof_balanced(struct amd_pmf_dev * pmf)86 bool is_pprof_balanced(struct amd_pmf_dev *pmf)
87 {
88 return (pmf->current_profile == PLATFORM_PROFILE_BALANCED) ? true : false;
89 }
90
amd_pmf_profile_get(struct platform_profile_handler * pprof,enum platform_profile_option * profile)91 static int amd_pmf_profile_get(struct platform_profile_handler *pprof,
92 enum platform_profile_option *profile)
93 {
94 struct amd_pmf_dev *pmf = container_of(pprof, struct amd_pmf_dev, pprof);
95
96 *profile = pmf->current_profile;
97 return 0;
98 }
99
amd_pmf_get_pprof_modes(struct amd_pmf_dev * pmf)100 int amd_pmf_get_pprof_modes(struct amd_pmf_dev *pmf)
101 {
102 int mode;
103
104 switch (pmf->current_profile) {
105 case PLATFORM_PROFILE_PERFORMANCE:
106 mode = POWER_MODE_PERFORMANCE;
107 break;
108 case PLATFORM_PROFILE_BALANCED:
109 mode = POWER_MODE_BALANCED_POWER;
110 break;
111 case PLATFORM_PROFILE_LOW_POWER:
112 mode = POWER_MODE_POWER_SAVER;
113 break;
114 default:
115 dev_err(pmf->dev, "Unknown Platform Profile.\n");
116 return -EOPNOTSUPP;
117 }
118
119 return mode;
120 }
121
amd_pmf_power_slider_update_event(struct amd_pmf_dev * dev)122 int amd_pmf_power_slider_update_event(struct amd_pmf_dev *dev)
123 {
124 u8 flag = 0;
125 int mode;
126 int src;
127
128 mode = amd_pmf_get_pprof_modes(dev);
129 if (mode < 0)
130 return mode;
131
132 src = amd_pmf_get_power_source();
133
134 if (src == POWER_SOURCE_AC) {
135 switch (mode) {
136 case POWER_MODE_PERFORMANCE:
137 flag |= BIT(AC_BEST_PERF);
138 break;
139 case POWER_MODE_BALANCED_POWER:
140 flag |= BIT(AC_BETTER_PERF);
141 break;
142 case POWER_MODE_POWER_SAVER:
143 flag |= BIT(AC_BETTER_BATTERY);
144 break;
145 default:
146 dev_err(dev->dev, "unsupported platform profile\n");
147 return -EOPNOTSUPP;
148 }
149
150 } else if (src == POWER_SOURCE_DC) {
151 switch (mode) {
152 case POWER_MODE_PERFORMANCE:
153 flag |= BIT(DC_BEST_PERF);
154 break;
155 case POWER_MODE_BALANCED_POWER:
156 flag |= BIT(DC_BETTER_PERF);
157 break;
158 case POWER_MODE_POWER_SAVER:
159 flag |= BIT(DC_BATTERY_SAVER);
160 break;
161 default:
162 dev_err(dev->dev, "unsupported platform profile\n");
163 return -EOPNOTSUPP;
164 }
165 }
166
167 apmf_os_power_slider_update(dev, flag);
168
169 return 0;
170 }
171
amd_pmf_profile_set(struct platform_profile_handler * pprof,enum platform_profile_option profile)172 static int amd_pmf_profile_set(struct platform_profile_handler *pprof,
173 enum platform_profile_option profile)
174 {
175 struct amd_pmf_dev *pmf = container_of(pprof, struct amd_pmf_dev, pprof);
176 int ret = 0;
177
178 pmf->current_profile = profile;
179
180 /* Notify EC about the slider position change */
181 if (is_apmf_func_supported(pmf, APMF_FUNC_OS_POWER_SLIDER_UPDATE)) {
182 ret = amd_pmf_power_slider_update_event(pmf);
183 if (ret)
184 return ret;
185 }
186
187 if (is_apmf_func_supported(pmf, APMF_FUNC_STATIC_SLIDER_GRANULAR)) {
188 ret = amd_pmf_set_sps_power_limits(pmf);
189 if (ret)
190 return ret;
191 }
192
193 return 0;
194 }
195
amd_pmf_init_sps(struct amd_pmf_dev * dev)196 int amd_pmf_init_sps(struct amd_pmf_dev *dev)
197 {
198 int err;
199
200 dev->current_profile = PLATFORM_PROFILE_BALANCED;
201
202 if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR)) {
203 amd_pmf_load_defaults_sps(dev);
204
205 /* update SPS balanced power mode thermals */
206 amd_pmf_set_sps_power_limits(dev);
207 }
208
209 dev->pprof.profile_get = amd_pmf_profile_get;
210 dev->pprof.profile_set = amd_pmf_profile_set;
211
212 /* Setup supported modes */
213 set_bit(PLATFORM_PROFILE_LOW_POWER, dev->pprof.choices);
214 set_bit(PLATFORM_PROFILE_BALANCED, dev->pprof.choices);
215 set_bit(PLATFORM_PROFILE_PERFORMANCE, dev->pprof.choices);
216
217 /* Create platform_profile structure and register */
218 err = platform_profile_register(&dev->pprof);
219 if (err)
220 dev_err(dev->dev, "Failed to register SPS support, this is most likely an SBIOS bug: %d\n",
221 err);
222
223 return err;
224 }
225
amd_pmf_deinit_sps(struct amd_pmf_dev * dev)226 void amd_pmf_deinit_sps(struct amd_pmf_dev *dev)
227 {
228 platform_profile_remove();
229 }
230