1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include "polaris10_smc.h"
25 #include "smu7_dyn_defaults.h"
26
27 #include "smu7_hwmgr.h"
28 #include "hardwaremanager.h"
29 #include "ppatomctrl.h"
30 #include "pp_debug.h"
31 #include "cgs_common.h"
32 #include "atombios.h"
33 #include "polaris10_smumgr.h"
34 #include "pppcielanes.h"
35
36 #include "smu_ucode_xfer_vi.h"
37 #include "smu74_discrete.h"
38 #include "smu/smu_7_1_3_d.h"
39 #include "smu/smu_7_1_3_sh_mask.h"
40 #include "gmc/gmc_8_1_d.h"
41 #include "gmc/gmc_8_1_sh_mask.h"
42 #include "oss/oss_3_0_d.h"
43 #include "gca/gfx_8_0_d.h"
44 #include "bif/bif_5_0_d.h"
45 #include "bif/bif_5_0_sh_mask.h"
46 #include "dce/dce_10_0_d.h"
47 #include "dce/dce_10_0_sh_mask.h"
48 #include "polaris10_pwrvirus.h"
49 #include "smu7_ppsmc.h"
50 #include "smu7_smumgr.h"
51
52 #define POLARIS10_SMC_SIZE 0x20000
53 #define VOLTAGE_VID_OFFSET_SCALE1 625
54 #define VOLTAGE_VID_OFFSET_SCALE2 100
55 #define POWERTUNE_DEFAULT_SET_MAX 1
56 #define VDDC_VDDCI_DELTA 200
57 #define MC_CG_ARB_FREQ_F1 0x0b
58
59 static const struct polaris10_pt_defaults polaris10_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
60 /* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
61 * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT */
62 { 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
63 { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61},
64 { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } },
65 };
66
67 static const sclkFcwRange_t Range_Table[NUM_SCLK_RANGE] = {
68 {VCO_2_4, POSTDIV_DIV_BY_16, 75, 160, 112},
69 {VCO_3_6, POSTDIV_DIV_BY_16, 112, 224, 160},
70 {VCO_2_4, POSTDIV_DIV_BY_8, 75, 160, 112},
71 {VCO_3_6, POSTDIV_DIV_BY_8, 112, 224, 160},
72 {VCO_2_4, POSTDIV_DIV_BY_4, 75, 160, 112},
73 {VCO_3_6, POSTDIV_DIV_BY_4, 112, 216, 160},
74 {VCO_2_4, POSTDIV_DIV_BY_2, 75, 160, 108},
75 {VCO_3_6, POSTDIV_DIV_BY_2, 112, 216, 160} };
76
polaris10_get_dependency_volt_by_clk(struct pp_hwmgr * hwmgr,struct phm_ppt_v1_clock_voltage_dependency_table * dep_table,uint32_t clock,SMU_VoltageLevel * voltage,uint32_t * mvdd)77 static int polaris10_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
78 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table,
79 uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd)
80 {
81 uint32_t i;
82 uint16_t vddci;
83 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
84
85 *voltage = *mvdd = 0;
86
87 /* clock - voltage dependency table is empty table */
88 if (dep_table->count == 0)
89 return -EINVAL;
90
91 for (i = 0; i < dep_table->count; i++) {
92 /* find first sclk bigger than request */
93 if (dep_table->entries[i].clk >= clock) {
94 *voltage |= (dep_table->entries[i].vddc *
95 VOLTAGE_SCALE) << VDDC_SHIFT;
96 if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
97 *voltage |= (data->vbios_boot_state.vddci_bootup_value *
98 VOLTAGE_SCALE) << VDDCI_SHIFT;
99 else if (dep_table->entries[i].vddci)
100 *voltage |= (dep_table->entries[i].vddci *
101 VOLTAGE_SCALE) << VDDCI_SHIFT;
102 else {
103 vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
104 (dep_table->entries[i].vddc -
105 (uint16_t)VDDC_VDDCI_DELTA));
106 *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
107 }
108
109 if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
110 *mvdd = data->vbios_boot_state.mvdd_bootup_value *
111 VOLTAGE_SCALE;
112 else if (dep_table->entries[i].mvdd)
113 *mvdd = (uint32_t) dep_table->entries[i].mvdd *
114 VOLTAGE_SCALE;
115
116 *voltage |= 1 << PHASES_SHIFT;
117 return 0;
118 }
119 }
120
121 /* sclk is bigger than max sclk in the dependence table */
122 *voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
123
124 if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
125 *voltage |= (data->vbios_boot_state.vddci_bootup_value *
126 VOLTAGE_SCALE) << VDDCI_SHIFT;
127 else if (dep_table->entries[i-1].vddci) {
128 vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
129 (dep_table->entries[i].vddc -
130 (uint16_t)VDDC_VDDCI_DELTA));
131 *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
132 }
133
134 if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
135 *mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE;
136 else if (dep_table->entries[i].mvdd)
137 *mvdd = (uint32_t) dep_table->entries[i - 1].mvdd * VOLTAGE_SCALE;
138
139 return 0;
140 }
141
scale_fan_gain_settings(uint16_t raw_setting)142 static uint16_t scale_fan_gain_settings(uint16_t raw_setting)
143 {
144 uint32_t tmp;
145 tmp = raw_setting * 4096 / 100;
146 return (uint16_t)tmp;
147 }
148
polaris10_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr * hwmgr)149 static int polaris10_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
150 {
151 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
152
153 const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
154 SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
155 struct phm_ppt_v1_information *table_info =
156 (struct phm_ppt_v1_information *)(hwmgr->pptable);
157 struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
158 struct pp_advance_fan_control_parameters *fan_table =
159 &hwmgr->thermal_controller.advanceFanControlParameters;
160 int i, j, k;
161 const uint16_t *pdef1;
162 const uint16_t *pdef2;
163
164 table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128));
165 table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128));
166
167 PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
168 "Target Operating Temp is out of Range!",
169 );
170
171 table->TemperatureLimitEdge = PP_HOST_TO_SMC_US(
172 cac_dtp_table->usTargetOperatingTemp * 256);
173 table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US(
174 cac_dtp_table->usTemperatureLimitHotspot * 256);
175 table->FanGainEdge = PP_HOST_TO_SMC_US(
176 scale_fan_gain_settings(fan_table->usFanGainEdge));
177 table->FanGainHotspot = PP_HOST_TO_SMC_US(
178 scale_fan_gain_settings(fan_table->usFanGainHotspot));
179
180 pdef1 = defaults->BAPMTI_R;
181 pdef2 = defaults->BAPMTI_RC;
182
183 for (i = 0; i < SMU74_DTE_ITERATIONS; i++) {
184 for (j = 0; j < SMU74_DTE_SOURCES; j++) {
185 for (k = 0; k < SMU74_DTE_SINKS; k++) {
186 table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*pdef1);
187 table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*pdef2);
188 pdef1++;
189 pdef2++;
190 }
191 }
192 }
193
194 return 0;
195 }
196
polaris10_populate_svi_load_line(struct pp_hwmgr * hwmgr)197 static int polaris10_populate_svi_load_line(struct pp_hwmgr *hwmgr)
198 {
199 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
200 const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
201
202 smu_data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn;
203 smu_data->power_tune_table.SviLoadLineVddC = defaults->SviLoadLineVddC;
204 smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
205 smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
206
207 return 0;
208 }
209
polaris10_populate_tdc_limit(struct pp_hwmgr * hwmgr)210 static int polaris10_populate_tdc_limit(struct pp_hwmgr *hwmgr)
211 {
212 uint16_t tdc_limit;
213 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
214 struct phm_ppt_v1_information *table_info =
215 (struct phm_ppt_v1_information *)(hwmgr->pptable);
216 const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
217
218 tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 128);
219 smu_data->power_tune_table.TDC_VDDC_PkgLimit =
220 CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
221 smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
222 defaults->TDC_VDDC_ThrottleReleaseLimitPerc;
223 smu_data->power_tune_table.TDC_MAWt = defaults->TDC_MAWt;
224
225 return 0;
226 }
227
polaris10_populate_dw8(struct pp_hwmgr * hwmgr,uint32_t fuse_table_offset)228 static int polaris10_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
229 {
230 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
231 const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
232 uint32_t temp;
233
234 if (smu7_read_smc_sram_dword(hwmgr->smumgr,
235 fuse_table_offset +
236 offsetof(SMU74_Discrete_PmFuses, TdcWaterfallCtl),
237 (uint32_t *)&temp, SMC_RAM_END))
238 PP_ASSERT_WITH_CODE(false,
239 "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
240 return -EINVAL);
241 else {
242 smu_data->power_tune_table.TdcWaterfallCtl = defaults->TdcWaterfallCtl;
243 smu_data->power_tune_table.LPMLTemperatureMin =
244 (uint8_t)((temp >> 16) & 0xff);
245 smu_data->power_tune_table.LPMLTemperatureMax =
246 (uint8_t)((temp >> 8) & 0xff);
247 smu_data->power_tune_table.Reserved = (uint8_t)(temp & 0xff);
248 }
249 return 0;
250 }
251
polaris10_populate_temperature_scaler(struct pp_hwmgr * hwmgr)252 static int polaris10_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
253 {
254 int i;
255 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
256
257 /* Currently not used. Set all to zero. */
258 for (i = 0; i < 16; i++)
259 smu_data->power_tune_table.LPMLTemperatureScaler[i] = 0;
260
261 return 0;
262 }
263
polaris10_populate_fuzzy_fan(struct pp_hwmgr * hwmgr)264 static int polaris10_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
265 {
266 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
267
268 /* TO DO move to hwmgr */
269 if ((hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity & (1 << 15))
270 || 0 == hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity)
271 hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity =
272 hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity;
273
274 smu_data->power_tune_table.FuzzyFan_PwmSetDelta = PP_HOST_TO_SMC_US(
275 hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity);
276 return 0;
277 }
278
polaris10_populate_gnb_lpml(struct pp_hwmgr * hwmgr)279 static int polaris10_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
280 {
281 int i;
282 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
283
284 /* Currently not used. Set all to zero. */
285 for (i = 0; i < 16; i++)
286 smu_data->power_tune_table.GnbLPML[i] = 0;
287
288 return 0;
289 }
290
polaris10_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr * hwmgr)291 static int polaris10_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr)
292 {
293 return 0;
294 }
295
polaris10_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr * hwmgr)296 static int polaris10_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
297 {
298 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
299 struct phm_ppt_v1_information *table_info =
300 (struct phm_ppt_v1_information *)(hwmgr->pptable);
301 uint16_t hi_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
302 uint16_t lo_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
303 struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
304
305 hi_sidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
306 lo_sidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
307
308 smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
309 CONVERT_FROM_HOST_TO_SMC_US(hi_sidd);
310 smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
311 CONVERT_FROM_HOST_TO_SMC_US(lo_sidd);
312
313 return 0;
314 }
315
polaris10_populate_pm_fuses(struct pp_hwmgr * hwmgr)316 static int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr)
317 {
318 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
319 uint32_t pm_fuse_table_offset;
320
321 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
322 PHM_PlatformCaps_PowerContainment)) {
323 if (smu7_read_smc_sram_dword(hwmgr->smumgr,
324 SMU7_FIRMWARE_HEADER_LOCATION +
325 offsetof(SMU74_Firmware_Header, PmFuseTable),
326 &pm_fuse_table_offset, SMC_RAM_END))
327 PP_ASSERT_WITH_CODE(false,
328 "Attempt to get pm_fuse_table_offset Failed!",
329 return -EINVAL);
330
331 if (polaris10_populate_svi_load_line(hwmgr))
332 PP_ASSERT_WITH_CODE(false,
333 "Attempt to populate SviLoadLine Failed!",
334 return -EINVAL);
335
336 if (polaris10_populate_tdc_limit(hwmgr))
337 PP_ASSERT_WITH_CODE(false,
338 "Attempt to populate TDCLimit Failed!", return -EINVAL);
339
340 if (polaris10_populate_dw8(hwmgr, pm_fuse_table_offset))
341 PP_ASSERT_WITH_CODE(false,
342 "Attempt to populate TdcWaterfallCtl, "
343 "LPMLTemperature Min and Max Failed!",
344 return -EINVAL);
345
346 if (0 != polaris10_populate_temperature_scaler(hwmgr))
347 PP_ASSERT_WITH_CODE(false,
348 "Attempt to populate LPMLTemperatureScaler Failed!",
349 return -EINVAL);
350
351 if (polaris10_populate_fuzzy_fan(hwmgr))
352 PP_ASSERT_WITH_CODE(false,
353 "Attempt to populate Fuzzy Fan Control parameters Failed!",
354 return -EINVAL);
355
356 if (polaris10_populate_gnb_lpml(hwmgr))
357 PP_ASSERT_WITH_CODE(false,
358 "Attempt to populate GnbLPML Failed!",
359 return -EINVAL);
360
361 if (polaris10_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr))
362 PP_ASSERT_WITH_CODE(false,
363 "Attempt to populate GnbLPML Min and Max Vid Failed!",
364 return -EINVAL);
365
366 if (polaris10_populate_bapm_vddc_base_leakage_sidd(hwmgr))
367 PP_ASSERT_WITH_CODE(false,
368 "Attempt to populate BapmVddCBaseLeakage Hi and Lo "
369 "Sidd Failed!", return -EINVAL);
370
371 if (smu7_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
372 (uint8_t *)&smu_data->power_tune_table,
373 (sizeof(struct SMU74_Discrete_PmFuses) - 92), SMC_RAM_END))
374 PP_ASSERT_WITH_CODE(false,
375 "Attempt to download PmFuseTable Failed!",
376 return -EINVAL);
377 }
378 return 0;
379 }
380
381 /**
382 * Mvdd table preparation for SMC.
383 *
384 * @param *hwmgr The address of the hardware manager.
385 * @param *table The SMC DPM table structure to be populated.
386 * @return 0
387 */
polaris10_populate_smc_mvdd_table(struct pp_hwmgr * hwmgr,SMU74_Discrete_DpmTable * table)388 static int polaris10_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
389 SMU74_Discrete_DpmTable *table)
390 {
391 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
392 uint32_t count, level;
393
394 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
395 count = data->mvdd_voltage_table.count;
396 if (count > SMU_MAX_SMIO_LEVELS)
397 count = SMU_MAX_SMIO_LEVELS;
398 for (level = 0; level < count; level++) {
399 table->SmioTable2.Pattern[level].Voltage =
400 PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE);
401 /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
402 table->SmioTable2.Pattern[level].Smio =
403 (uint8_t) level;
404 table->Smio[level] |=
405 data->mvdd_voltage_table.entries[level].smio_low;
406 }
407 table->SmioMask2 = data->mvdd_voltage_table.mask_low;
408
409 table->MvddLevelCount = (uint32_t) PP_HOST_TO_SMC_UL(count);
410 }
411
412 return 0;
413 }
414
polaris10_populate_smc_vddci_table(struct pp_hwmgr * hwmgr,struct SMU74_Discrete_DpmTable * table)415 static int polaris10_populate_smc_vddci_table(struct pp_hwmgr *hwmgr,
416 struct SMU74_Discrete_DpmTable *table)
417 {
418 uint32_t count, level;
419 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
420
421 count = data->vddci_voltage_table.count;
422
423 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
424 if (count > SMU_MAX_SMIO_LEVELS)
425 count = SMU_MAX_SMIO_LEVELS;
426 for (level = 0; level < count; ++level) {
427 table->SmioTable1.Pattern[level].Voltage =
428 PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[level].value * VOLTAGE_SCALE);
429 table->SmioTable1.Pattern[level].Smio = (uint8_t) level;
430
431 table->Smio[level] |= data->vddci_voltage_table.entries[level].smio_low;
432 }
433 }
434
435 table->SmioMask1 = data->vddci_voltage_table.mask_low;
436
437 return 0;
438 }
439
440 /**
441 * Preparation of vddc and vddgfx CAC tables for SMC.
442 *
443 * @param hwmgr the address of the hardware manager
444 * @param table the SMC DPM table structure to be populated
445 * @return always 0
446 */
polaris10_populate_cac_table(struct pp_hwmgr * hwmgr,struct SMU74_Discrete_DpmTable * table)447 static int polaris10_populate_cac_table(struct pp_hwmgr *hwmgr,
448 struct SMU74_Discrete_DpmTable *table)
449 {
450 uint32_t count;
451 uint8_t index;
452 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
453 struct phm_ppt_v1_information *table_info =
454 (struct phm_ppt_v1_information *)(hwmgr->pptable);
455 struct phm_ppt_v1_voltage_lookup_table *lookup_table =
456 table_info->vddc_lookup_table;
457 /* tables is already swapped, so in order to use the value from it,
458 * we need to swap it back.
459 * We are populating vddc CAC data to BapmVddc table
460 * in split and merged mode
461 */
462 for (count = 0; count < lookup_table->count; count++) {
463 index = phm_get_voltage_index(lookup_table,
464 data->vddc_voltage_table.entries[count].value);
465 table->BapmVddcVidLoSidd[count] = convert_to_vid(lookup_table->entries[index].us_cac_low);
466 table->BapmVddcVidHiSidd[count] = convert_to_vid(lookup_table->entries[index].us_cac_mid);
467 table->BapmVddcVidHiSidd2[count] = convert_to_vid(lookup_table->entries[index].us_cac_high);
468 }
469
470 return 0;
471 }
472
473 /**
474 * Preparation of voltage tables for SMC.
475 *
476 * @param hwmgr the address of the hardware manager
477 * @param table the SMC DPM table structure to be populated
478 * @return always 0
479 */
480
polaris10_populate_smc_voltage_tables(struct pp_hwmgr * hwmgr,struct SMU74_Discrete_DpmTable * table)481 static int polaris10_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
482 struct SMU74_Discrete_DpmTable *table)
483 {
484 polaris10_populate_smc_vddci_table(hwmgr, table);
485 polaris10_populate_smc_mvdd_table(hwmgr, table);
486 polaris10_populate_cac_table(hwmgr, table);
487
488 return 0;
489 }
490
polaris10_populate_ulv_level(struct pp_hwmgr * hwmgr,struct SMU74_Discrete_Ulv * state)491 static int polaris10_populate_ulv_level(struct pp_hwmgr *hwmgr,
492 struct SMU74_Discrete_Ulv *state)
493 {
494 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
495 struct phm_ppt_v1_information *table_info =
496 (struct phm_ppt_v1_information *)(hwmgr->pptable);
497
498 state->CcPwrDynRm = 0;
499 state->CcPwrDynRm1 = 0;
500
501 state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset;
502 state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset *
503 VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
504
505 state->VddcPhase = (data->vddc_phase_shed_control) ? 0 : 1;
506
507 CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
508 CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
509 CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
510
511 return 0;
512 }
513
polaris10_populate_ulv_state(struct pp_hwmgr * hwmgr,struct SMU74_Discrete_DpmTable * table)514 static int polaris10_populate_ulv_state(struct pp_hwmgr *hwmgr,
515 struct SMU74_Discrete_DpmTable *table)
516 {
517 return polaris10_populate_ulv_level(hwmgr, &table->Ulv);
518 }
519
polaris10_populate_smc_link_level(struct pp_hwmgr * hwmgr,struct SMU74_Discrete_DpmTable * table)520 static int polaris10_populate_smc_link_level(struct pp_hwmgr *hwmgr,
521 struct SMU74_Discrete_DpmTable *table)
522 {
523 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
524 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
525 struct smu7_dpm_table *dpm_table = &data->dpm_table;
526 int i;
527
528 /* Index (dpm_table->pcie_speed_table.count)
529 * is reserved for PCIE boot level. */
530 for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
531 table->LinkLevel[i].PcieGenSpeed =
532 (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
533 table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width(
534 dpm_table->pcie_speed_table.dpm_levels[i].param1);
535 table->LinkLevel[i].EnabledForActivity = 1;
536 table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff);
537 table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5);
538 table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30);
539 }
540
541 smu_data->smc_state_table.LinkLevelCount =
542 (uint8_t)dpm_table->pcie_speed_table.count;
543
544 /* To Do move to hwmgr */
545 data->dpm_level_enable_mask.pcie_dpm_enable_mask =
546 phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
547
548 return 0;
549 }
550
551
polaris10_get_sclk_range_table(struct pp_hwmgr * hwmgr,SMU74_Discrete_DpmTable * table)552 static void polaris10_get_sclk_range_table(struct pp_hwmgr *hwmgr,
553 SMU74_Discrete_DpmTable *table)
554 {
555 struct pp_smumgr *smumgr = hwmgr->smumgr;
556 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
557 uint32_t i, ref_clk;
558
559 struct pp_atom_ctrl_sclk_range_table range_table_from_vbios = { { {0} } };
560
561 ref_clk = smu7_get_xclk(hwmgr);
562
563 if (0 == atomctrl_get_smc_sclk_range_table(hwmgr, &range_table_from_vbios)) {
564 for (i = 0; i < NUM_SCLK_RANGE; i++) {
565 table->SclkFcwRangeTable[i].vco_setting = range_table_from_vbios.entry[i].ucVco_setting;
566 table->SclkFcwRangeTable[i].postdiv = range_table_from_vbios.entry[i].ucPostdiv;
567 table->SclkFcwRangeTable[i].fcw_pcc = range_table_from_vbios.entry[i].usFcw_pcc;
568
569 table->SclkFcwRangeTable[i].fcw_trans_upper = range_table_from_vbios.entry[i].usFcw_trans_upper;
570 table->SclkFcwRangeTable[i].fcw_trans_lower = range_table_from_vbios.entry[i].usRcw_trans_lower;
571
572 CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc);
573 CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper);
574 CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower);
575 }
576 return;
577 }
578
579 for (i = 0; i < NUM_SCLK_RANGE; i++) {
580 smu_data->range_table[i].trans_lower_frequency = (ref_clk * Range_Table[i].fcw_trans_lower) >> Range_Table[i].postdiv;
581 smu_data->range_table[i].trans_upper_frequency = (ref_clk * Range_Table[i].fcw_trans_upper) >> Range_Table[i].postdiv;
582
583 table->SclkFcwRangeTable[i].vco_setting = Range_Table[i].vco_setting;
584 table->SclkFcwRangeTable[i].postdiv = Range_Table[i].postdiv;
585 table->SclkFcwRangeTable[i].fcw_pcc = Range_Table[i].fcw_pcc;
586
587 table->SclkFcwRangeTable[i].fcw_trans_upper = Range_Table[i].fcw_trans_upper;
588 table->SclkFcwRangeTable[i].fcw_trans_lower = Range_Table[i].fcw_trans_lower;
589
590 CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc);
591 CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper);
592 CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower);
593 }
594 }
595
596 /**
597 * Calculates the SCLK dividers using the provided engine clock
598 *
599 * @param hwmgr the address of the hardware manager
600 * @param clock the engine clock to use to populate the structure
601 * @param sclk the SMC SCLK structure to be populated
602 */
polaris10_calculate_sclk_params(struct pp_hwmgr * hwmgr,uint32_t clock,SMU_SclkSetting * sclk_setting)603 static int polaris10_calculate_sclk_params(struct pp_hwmgr *hwmgr,
604 uint32_t clock, SMU_SclkSetting *sclk_setting)
605 {
606 struct pp_smumgr *smumgr = hwmgr->smumgr;
607 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
608 const SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
609 struct pp_atomctrl_clock_dividers_ai dividers;
610 uint32_t ref_clock;
611 uint32_t pcc_target_percent, pcc_target_freq, ss_target_percent, ss_target_freq;
612 uint8_t i;
613 int result;
614 uint64_t temp;
615
616 sclk_setting->SclkFrequency = clock;
617 /* get the engine clock dividers for this clock value */
618 result = atomctrl_get_engine_pll_dividers_ai(hwmgr, clock, ÷rs);
619 if (result == 0) {
620 sclk_setting->Fcw_int = dividers.usSclk_fcw_int;
621 sclk_setting->Fcw_frac = dividers.usSclk_fcw_frac;
622 sclk_setting->Pcc_fcw_int = dividers.usPcc_fcw_int;
623 sclk_setting->PllRange = dividers.ucSclkPllRange;
624 sclk_setting->Sclk_slew_rate = 0x400;
625 sclk_setting->Pcc_up_slew_rate = dividers.usPcc_fcw_slew_frac;
626 sclk_setting->Pcc_down_slew_rate = 0xffff;
627 sclk_setting->SSc_En = dividers.ucSscEnable;
628 sclk_setting->Fcw1_int = dividers.usSsc_fcw1_int;
629 sclk_setting->Fcw1_frac = dividers.usSsc_fcw1_frac;
630 sclk_setting->Sclk_ss_slew_rate = dividers.usSsc_fcw_slew_frac;
631 return result;
632 }
633
634 ref_clock = smu7_get_xclk(hwmgr);
635
636 for (i = 0; i < NUM_SCLK_RANGE; i++) {
637 if (clock > smu_data->range_table[i].trans_lower_frequency
638 && clock <= smu_data->range_table[i].trans_upper_frequency) {
639 sclk_setting->PllRange = i;
640 break;
641 }
642 }
643
644 sclk_setting->Fcw_int = (uint16_t)((clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
645 temp = clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv;
646 temp <<= 0x10;
647 do_div(temp, ref_clock);
648 sclk_setting->Fcw_frac = temp & 0xffff;
649
650 pcc_target_percent = 10; /* Hardcode 10% for now. */
651 pcc_target_freq = clock - (clock * pcc_target_percent / 100);
652 sclk_setting->Pcc_fcw_int = (uint16_t)((pcc_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
653
654 ss_target_percent = 2; /* Hardcode 2% for now. */
655 sclk_setting->SSc_En = 0;
656 if (ss_target_percent) {
657 sclk_setting->SSc_En = 1;
658 ss_target_freq = clock - (clock * ss_target_percent / 100);
659 sclk_setting->Fcw1_int = (uint16_t)((ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
660 temp = ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv;
661 temp <<= 0x10;
662 do_div(temp, ref_clock);
663 sclk_setting->Fcw1_frac = temp & 0xffff;
664 }
665
666 return 0;
667 }
668
669 /**
670 * Populates single SMC SCLK structure using the provided engine clock
671 *
672 * @param hwmgr the address of the hardware manager
673 * @param clock the engine clock to use to populate the structure
674 * @param sclk the SMC SCLK structure to be populated
675 */
676
polaris10_populate_single_graphic_level(struct pp_hwmgr * hwmgr,uint32_t clock,uint16_t sclk_al_threshold,struct SMU74_Discrete_GraphicsLevel * level)677 static int polaris10_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
678 uint32_t clock, uint16_t sclk_al_threshold,
679 struct SMU74_Discrete_GraphicsLevel *level)
680 {
681 int result;
682 /* PP_Clocks minClocks; */
683 uint32_t mvdd;
684 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
685 struct phm_ppt_v1_information *table_info =
686 (struct phm_ppt_v1_information *)(hwmgr->pptable);
687 SMU_SclkSetting curr_sclk_setting = { 0 };
688
689 result = polaris10_calculate_sclk_params(hwmgr, clock, &curr_sclk_setting);
690
691 /* populate graphics levels */
692 result = polaris10_get_dependency_volt_by_clk(hwmgr,
693 table_info->vdd_dep_on_sclk, clock,
694 &level->MinVoltage, &mvdd);
695
696 PP_ASSERT_WITH_CODE((0 == result),
697 "can not find VDDC voltage value for "
698 "VDDC engine clock dependency table",
699 return result);
700 level->ActivityLevel = sclk_al_threshold;
701
702 level->CcPwrDynRm = 0;
703 level->CcPwrDynRm1 = 0;
704 level->EnabledForActivity = 0;
705 level->EnabledForThrottle = 1;
706 level->UpHyst = 10;
707 level->DownHyst = 0;
708 level->VoltageDownHyst = 0;
709 level->PowerThrottle = 0;
710 data->display_timing.min_clock_in_sr = hwmgr->display_config.min_core_set_clock_in_sr;
711
712 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
713 level->DeepSleepDivId = smu7_get_sleep_divider_id_from_clock(clock,
714 hwmgr->display_config.min_core_set_clock_in_sr);
715
716 /* Default to slow, highest DPM level will be
717 * set to PPSMC_DISPLAY_WATERMARK_LOW later.
718 */
719 if (data->update_up_hyst)
720 level->UpHyst = (uint8_t)data->up_hyst;
721 if (data->update_down_hyst)
722 level->DownHyst = (uint8_t)data->down_hyst;
723
724 level->SclkSetting = curr_sclk_setting;
725
726 CONVERT_FROM_HOST_TO_SMC_UL(level->MinVoltage);
727 CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm);
728 CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1);
729 CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel);
730 CONVERT_FROM_HOST_TO_SMC_UL(level->SclkSetting.SclkFrequency);
731 CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_int);
732 CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_frac);
733 CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_fcw_int);
734 CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_slew_rate);
735 CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_up_slew_rate);
736 CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_down_slew_rate);
737 CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_int);
738 CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_frac);
739 CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_ss_slew_rate);
740 return 0;
741 }
742
743 /**
744 * Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
745 *
746 * @param hwmgr the address of the hardware manager
747 */
polaris10_populate_all_graphic_levels(struct pp_hwmgr * hwmgr)748 int polaris10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
749 {
750 struct pp_smumgr *smumgr = hwmgr->smumgr;
751 struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
752 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
753 struct smu7_dpm_table *dpm_table = &hw_data->dpm_table;
754 struct phm_ppt_v1_information *table_info =
755 (struct phm_ppt_v1_information *)(hwmgr->pptable);
756 struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
757 uint8_t pcie_entry_cnt = (uint8_t) hw_data->dpm_table.pcie_speed_table.count;
758 int result = 0;
759 uint32_t array = smu_data->smu7_data.dpm_table_start +
760 offsetof(SMU74_Discrete_DpmTable, GraphicsLevel);
761 uint32_t array_size = sizeof(struct SMU74_Discrete_GraphicsLevel) *
762 SMU74_MAX_LEVELS_GRAPHICS;
763 struct SMU74_Discrete_GraphicsLevel *levels =
764 smu_data->smc_state_table.GraphicsLevel;
765 uint32_t i, max_entry;
766 uint8_t hightest_pcie_level_enabled = 0,
767 lowest_pcie_level_enabled = 0,
768 mid_pcie_level_enabled = 0,
769 count = 0;
770
771 polaris10_get_sclk_range_table(hwmgr, &(smu_data->smc_state_table));
772
773 for (i = 0; i < dpm_table->sclk_table.count; i++) {
774
775 result = polaris10_populate_single_graphic_level(hwmgr,
776 dpm_table->sclk_table.dpm_levels[i].value,
777 (uint16_t)smu_data->activity_target[i],
778 &(smu_data->smc_state_table.GraphicsLevel[i]));
779 if (result)
780 return result;
781
782 /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
783 if (i > 1)
784 levels[i].DeepSleepDivId = 0;
785 }
786 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
787 PHM_PlatformCaps_SPLLShutdownSupport))
788 smu_data->smc_state_table.GraphicsLevel[0].SclkSetting.SSc_En = 0;
789
790 smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
791 smu_data->smc_state_table.GraphicsDpmLevelCount =
792 (uint8_t)dpm_table->sclk_table.count;
793 hw_data->dpm_level_enable_mask.sclk_dpm_enable_mask =
794 phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
795
796
797 if (pcie_table != NULL) {
798 PP_ASSERT_WITH_CODE((1 <= pcie_entry_cnt),
799 "There must be 1 or more PCIE levels defined in PPTable.",
800 return -EINVAL);
801 max_entry = pcie_entry_cnt - 1;
802 for (i = 0; i < dpm_table->sclk_table.count; i++)
803 levels[i].pcieDpmLevel =
804 (uint8_t) ((i < max_entry) ? i : max_entry);
805 } else {
806 while (hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
807 ((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &
808 (1 << (hightest_pcie_level_enabled + 1))) != 0))
809 hightest_pcie_level_enabled++;
810
811 while (hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
812 ((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &
813 (1 << lowest_pcie_level_enabled)) == 0))
814 lowest_pcie_level_enabled++;
815
816 while ((count < hightest_pcie_level_enabled) &&
817 ((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &
818 (1 << (lowest_pcie_level_enabled + 1 + count))) == 0))
819 count++;
820
821 mid_pcie_level_enabled = (lowest_pcie_level_enabled + 1 + count) <
822 hightest_pcie_level_enabled ?
823 (lowest_pcie_level_enabled + 1 + count) :
824 hightest_pcie_level_enabled;
825
826 /* set pcieDpmLevel to hightest_pcie_level_enabled */
827 for (i = 2; i < dpm_table->sclk_table.count; i++)
828 levels[i].pcieDpmLevel = hightest_pcie_level_enabled;
829
830 /* set pcieDpmLevel to lowest_pcie_level_enabled */
831 levels[0].pcieDpmLevel = lowest_pcie_level_enabled;
832
833 /* set pcieDpmLevel to mid_pcie_level_enabled */
834 levels[1].pcieDpmLevel = mid_pcie_level_enabled;
835 }
836 /* level count will send to smc once at init smc table and never change */
837 result = smu7_copy_bytes_to_smc(smumgr, array, (uint8_t *)levels,
838 (uint32_t)array_size, SMC_RAM_END);
839
840 return result;
841 }
842
843
polaris10_populate_single_memory_level(struct pp_hwmgr * hwmgr,uint32_t clock,struct SMU74_Discrete_MemoryLevel * mem_level)844 static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
845 uint32_t clock, struct SMU74_Discrete_MemoryLevel *mem_level)
846 {
847 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
848 struct phm_ppt_v1_information *table_info =
849 (struct phm_ppt_v1_information *)(hwmgr->pptable);
850 int result = 0;
851 struct cgs_display_info info = {0, 0, NULL};
852 uint32_t mclk_stutter_mode_threshold = 40000;
853
854 cgs_get_active_displays_info(hwmgr->device, &info);
855
856 if (table_info->vdd_dep_on_mclk) {
857 result = polaris10_get_dependency_volt_by_clk(hwmgr,
858 table_info->vdd_dep_on_mclk, clock,
859 &mem_level->MinVoltage, &mem_level->MinMvdd);
860 PP_ASSERT_WITH_CODE((0 == result),
861 "can not find MinVddc voltage value from memory "
862 "VDDC voltage dependency table", return result);
863 }
864
865 mem_level->MclkFrequency = clock;
866 mem_level->EnabledForThrottle = 1;
867 mem_level->EnabledForActivity = 0;
868 mem_level->UpHyst = 0;
869 mem_level->DownHyst = 100;
870 mem_level->VoltageDownHyst = 0;
871 mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
872 mem_level->StutterEnable = false;
873 mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
874
875 data->display_timing.num_existing_displays = info.display_count;
876
877 if (mclk_stutter_mode_threshold &&
878 (clock <= mclk_stutter_mode_threshold) &&
879 (SMUM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL,
880 STUTTER_ENABLE) & 0x1))
881 mem_level->StutterEnable = true;
882
883 if (!result) {
884 CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinMvdd);
885 CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MclkFrequency);
886 CONVERT_FROM_HOST_TO_SMC_US(mem_level->ActivityLevel);
887 CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinVoltage);
888 }
889 return result;
890 }
891
892 /**
893 * Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states
894 *
895 * @param hwmgr the address of the hardware manager
896 */
polaris10_populate_all_memory_levels(struct pp_hwmgr * hwmgr)897 int polaris10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
898 {
899 struct pp_smumgr *smumgr = hwmgr->smumgr;
900 struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
901 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
902 struct smu7_dpm_table *dpm_table = &hw_data->dpm_table;
903 int result;
904 /* populate MCLK dpm table to SMU7 */
905 uint32_t array = smu_data->smu7_data.dpm_table_start +
906 offsetof(SMU74_Discrete_DpmTable, MemoryLevel);
907 uint32_t array_size = sizeof(SMU74_Discrete_MemoryLevel) *
908 SMU74_MAX_LEVELS_MEMORY;
909 struct SMU74_Discrete_MemoryLevel *levels =
910 smu_data->smc_state_table.MemoryLevel;
911 uint32_t i;
912
913 for (i = 0; i < dpm_table->mclk_table.count; i++) {
914 PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
915 "can not populate memory level as memory clock is zero",
916 return -EINVAL);
917 result = polaris10_populate_single_memory_level(hwmgr,
918 dpm_table->mclk_table.dpm_levels[i].value,
919 &levels[i]);
920 if (i == dpm_table->mclk_table.count - 1) {
921 levels[i].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
922 levels[i].EnabledForActivity = 1;
923 }
924 if (result)
925 return result;
926 }
927
928 /* In order to prevent MC activity from stutter mode to push DPM up,
929 * the UVD change complements this by putting the MCLK in
930 * a higher state by default such that we are not affected by
931 * up threshold or and MCLK DPM latency.
932 */
933 levels[0].ActivityLevel = 0x1f;
934 CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel);
935
936 smu_data->smc_state_table.MemoryDpmLevelCount =
937 (uint8_t)dpm_table->mclk_table.count;
938 hw_data->dpm_level_enable_mask.mclk_dpm_enable_mask =
939 phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
940
941 /* level count will send to smc once at init smc table and never change */
942 result = smu7_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
943 (uint32_t)array_size, SMC_RAM_END);
944
945 return result;
946 }
947
948 /**
949 * Populates the SMC MVDD structure using the provided memory clock.
950 *
951 * @param hwmgr the address of the hardware manager
952 * @param mclk the MCLK value to be used in the decision if MVDD should be high or low.
953 * @param voltage the SMC VOLTAGE structure to be populated
954 */
polaris10_populate_mvdd_value(struct pp_hwmgr * hwmgr,uint32_t mclk,SMIO_Pattern * smio_pat)955 static int polaris10_populate_mvdd_value(struct pp_hwmgr *hwmgr,
956 uint32_t mclk, SMIO_Pattern *smio_pat)
957 {
958 const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
959 struct phm_ppt_v1_information *table_info =
960 (struct phm_ppt_v1_information *)(hwmgr->pptable);
961 uint32_t i = 0;
962
963 if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
964 /* find mvdd value which clock is more than request */
965 for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) {
966 if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) {
967 smio_pat->Voltage = data->mvdd_voltage_table.entries[i].value;
968 break;
969 }
970 }
971 PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count,
972 "MVDD Voltage is outside the supported range.",
973 return -EINVAL);
974 } else
975 return -EINVAL;
976
977 return 0;
978 }
979
polaris10_populate_smc_acpi_level(struct pp_hwmgr * hwmgr,SMU74_Discrete_DpmTable * table)980 static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
981 SMU74_Discrete_DpmTable *table)
982 {
983 int result = 0;
984 uint32_t sclk_frequency;
985 const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
986 struct phm_ppt_v1_information *table_info =
987 (struct phm_ppt_v1_information *)(hwmgr->pptable);
988 SMIO_Pattern vol_level;
989 uint32_t mvdd;
990 uint16_t us_mvdd;
991
992 table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
993
994 /* Get MinVoltage and Frequency from DPM0,
995 * already converted to SMC_UL */
996 sclk_frequency = data->vbios_boot_state.sclk_bootup_value;
997 result = polaris10_get_dependency_volt_by_clk(hwmgr,
998 table_info->vdd_dep_on_sclk,
999 sclk_frequency,
1000 &table->ACPILevel.MinVoltage, &mvdd);
1001 PP_ASSERT_WITH_CODE((0 == result),
1002 "Cannot find ACPI VDDC voltage value "
1003 "in Clock Dependency Table",
1004 );
1005
1006 result = polaris10_calculate_sclk_params(hwmgr, sclk_frequency, &(table->ACPILevel.SclkSetting));
1007 PP_ASSERT_WITH_CODE(result == 0, "Error retrieving Engine Clock dividers from VBIOS.", return result);
1008
1009 table->ACPILevel.DeepSleepDivId = 0;
1010 table->ACPILevel.CcPwrDynRm = 0;
1011 table->ACPILevel.CcPwrDynRm1 = 0;
1012
1013 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
1014 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage);
1015 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
1016 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
1017
1018 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkSetting.SclkFrequency);
1019 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_int);
1020 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_frac);
1021 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_fcw_int);
1022 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_slew_rate);
1023 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_up_slew_rate);
1024 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_down_slew_rate);
1025 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_int);
1026 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_frac);
1027 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_ss_slew_rate);
1028
1029
1030 /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */
1031 table->MemoryACPILevel.MclkFrequency = data->vbios_boot_state.mclk_bootup_value;
1032 result = polaris10_get_dependency_volt_by_clk(hwmgr,
1033 table_info->vdd_dep_on_mclk,
1034 table->MemoryACPILevel.MclkFrequency,
1035 &table->MemoryACPILevel.MinVoltage, &mvdd);
1036 PP_ASSERT_WITH_CODE((0 == result),
1037 "Cannot find ACPI VDDCI voltage value "
1038 "in Clock Dependency Table",
1039 );
1040
1041 us_mvdd = 0;
1042 if ((SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
1043 (data->mclk_dpm_key_disabled))
1044 us_mvdd = data->vbios_boot_state.mvdd_bootup_value;
1045 else {
1046 if (!polaris10_populate_mvdd_value(hwmgr,
1047 data->dpm_table.mclk_table.dpm_levels[0].value,
1048 &vol_level))
1049 us_mvdd = vol_level.Voltage;
1050 }
1051
1052 if (0 == polaris10_populate_mvdd_value(hwmgr, 0, &vol_level))
1053 table->MemoryACPILevel.MinMvdd = PP_HOST_TO_SMC_UL(vol_level.Voltage);
1054 else
1055 table->MemoryACPILevel.MinMvdd = 0;
1056
1057 table->MemoryACPILevel.StutterEnable = false;
1058
1059 table->MemoryACPILevel.EnabledForThrottle = 0;
1060 table->MemoryACPILevel.EnabledForActivity = 0;
1061 table->MemoryACPILevel.UpHyst = 0;
1062 table->MemoryACPILevel.DownHyst = 100;
1063 table->MemoryACPILevel.VoltageDownHyst = 0;
1064 table->MemoryACPILevel.ActivityLevel =
1065 PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
1066
1067 CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency);
1068 CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);
1069
1070 return result;
1071 }
1072
polaris10_populate_smc_vce_level(struct pp_hwmgr * hwmgr,SMU74_Discrete_DpmTable * table)1073 static int polaris10_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
1074 SMU74_Discrete_DpmTable *table)
1075 {
1076 int result = -EINVAL;
1077 uint8_t count;
1078 struct pp_atomctrl_clock_dividers_vi dividers;
1079 struct phm_ppt_v1_information *table_info =
1080 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1081 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
1082 table_info->mm_dep_table;
1083 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1084 uint32_t vddci;
1085
1086 table->VceLevelCount = (uint8_t)(mm_table->count);
1087 table->VceBootLevel = 0;
1088
1089 for (count = 0; count < table->VceLevelCount; count++) {
1090 table->VceLevel[count].Frequency = mm_table->entries[count].eclk;
1091 table->VceLevel[count].MinVoltage = 0;
1092 table->VceLevel[count].MinVoltage |=
1093 (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
1094
1095 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
1096 vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
1097 mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
1098 else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
1099 vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
1100 else
1101 vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
1102
1103
1104 table->VceLevel[count].MinVoltage |=
1105 (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
1106 table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
1107
1108 /*retrieve divider value for VBIOS */
1109 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1110 table->VceLevel[count].Frequency, ÷rs);
1111 PP_ASSERT_WITH_CODE((0 == result),
1112 "can not find divide id for VCE engine clock",
1113 return result);
1114
1115 table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
1116
1117 CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
1118 CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage);
1119 }
1120 return result;
1121 }
1122
1123
polaris10_populate_smc_samu_level(struct pp_hwmgr * hwmgr,SMU74_Discrete_DpmTable * table)1124 static int polaris10_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
1125 SMU74_Discrete_DpmTable *table)
1126 {
1127 int result = -EINVAL;
1128 uint8_t count;
1129 struct pp_atomctrl_clock_dividers_vi dividers;
1130 struct phm_ppt_v1_information *table_info =
1131 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1132 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
1133 table_info->mm_dep_table;
1134 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1135 uint32_t vddci;
1136
1137 table->SamuBootLevel = 0;
1138 table->SamuLevelCount = (uint8_t)(mm_table->count);
1139
1140 for (count = 0; count < table->SamuLevelCount; count++) {
1141 /* not sure whether we need evclk or not */
1142 table->SamuLevel[count].MinVoltage = 0;
1143 table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
1144 table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
1145 VOLTAGE_SCALE) << VDDC_SHIFT;
1146
1147 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
1148 vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
1149 mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
1150 else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
1151 vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
1152 else
1153 vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
1154
1155 table->SamuLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
1156 table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
1157
1158 /* retrieve divider value for VBIOS */
1159 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1160 table->SamuLevel[count].Frequency, ÷rs);
1161 PP_ASSERT_WITH_CODE((0 == result),
1162 "can not find divide id for samu clock", return result);
1163
1164 table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
1165
1166 CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
1167 CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage);
1168 }
1169 return result;
1170 }
1171
polaris10_populate_memory_timing_parameters(struct pp_hwmgr * hwmgr,int32_t eng_clock,int32_t mem_clock,SMU74_Discrete_MCArbDramTimingTableEntry * arb_regs)1172 static int polaris10_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
1173 int32_t eng_clock, int32_t mem_clock,
1174 SMU74_Discrete_MCArbDramTimingTableEntry *arb_regs)
1175 {
1176 uint32_t dram_timing;
1177 uint32_t dram_timing2;
1178 uint32_t burst_time;
1179 int result;
1180
1181 result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
1182 eng_clock, mem_clock);
1183 PP_ASSERT_WITH_CODE(result == 0,
1184 "Error calling VBIOS to set DRAM_TIMING.", return result);
1185
1186 dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
1187 dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
1188 burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
1189
1190
1191 arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dram_timing);
1192 arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dram_timing2);
1193 arb_regs->McArbBurstTime = (uint8_t)burst_time;
1194
1195 return 0;
1196 }
1197
polaris10_program_memory_timing_parameters(struct pp_hwmgr * hwmgr)1198 static int polaris10_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
1199 {
1200 struct pp_smumgr *smumgr = hwmgr->smumgr;
1201 struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
1202 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
1203 struct SMU74_Discrete_MCArbDramTimingTable arb_regs;
1204 uint32_t i, j;
1205 int result = 0;
1206
1207 for (i = 0; i < hw_data->dpm_table.sclk_table.count; i++) {
1208 for (j = 0; j < hw_data->dpm_table.mclk_table.count; j++) {
1209 result = polaris10_populate_memory_timing_parameters(hwmgr,
1210 hw_data->dpm_table.sclk_table.dpm_levels[i].value,
1211 hw_data->dpm_table.mclk_table.dpm_levels[j].value,
1212 &arb_regs.entries[i][j]);
1213 if (result == 0)
1214 result = atomctrl_set_ac_timing_ai(hwmgr, hw_data->dpm_table.mclk_table.dpm_levels[j].value, j);
1215 if (result != 0)
1216 return result;
1217 }
1218 }
1219
1220 result = smu7_copy_bytes_to_smc(
1221 hwmgr->smumgr,
1222 smu_data->smu7_data.arb_table_start,
1223 (uint8_t *)&arb_regs,
1224 sizeof(SMU74_Discrete_MCArbDramTimingTable),
1225 SMC_RAM_END);
1226 return result;
1227 }
1228
polaris10_populate_smc_uvd_level(struct pp_hwmgr * hwmgr,struct SMU74_Discrete_DpmTable * table)1229 static int polaris10_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
1230 struct SMU74_Discrete_DpmTable *table)
1231 {
1232 int result = -EINVAL;
1233 uint8_t count;
1234 struct pp_atomctrl_clock_dividers_vi dividers;
1235 struct phm_ppt_v1_information *table_info =
1236 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1237 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
1238 table_info->mm_dep_table;
1239 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1240 uint32_t vddci;
1241
1242 table->UvdLevelCount = (uint8_t)(mm_table->count);
1243 table->UvdBootLevel = 0;
1244
1245 for (count = 0; count < table->UvdLevelCount; count++) {
1246 table->UvdLevel[count].MinVoltage = 0;
1247 table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
1248 table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
1249 table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
1250 VOLTAGE_SCALE) << VDDC_SHIFT;
1251
1252 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
1253 vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
1254 mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
1255 else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
1256 vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
1257 else
1258 vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
1259
1260 table->UvdLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
1261 table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
1262
1263 /* retrieve divider value for VBIOS */
1264 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1265 table->UvdLevel[count].VclkFrequency, ÷rs);
1266 PP_ASSERT_WITH_CODE((0 == result),
1267 "can not find divide id for Vclk clock", return result);
1268
1269 table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
1270
1271 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1272 table->UvdLevel[count].DclkFrequency, ÷rs);
1273 PP_ASSERT_WITH_CODE((0 == result),
1274 "can not find divide id for Dclk clock", return result);
1275
1276 table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
1277
1278 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
1279 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
1280 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage);
1281 }
1282
1283 return result;
1284 }
1285
polaris10_populate_smc_boot_level(struct pp_hwmgr * hwmgr,struct SMU74_Discrete_DpmTable * table)1286 static int polaris10_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
1287 struct SMU74_Discrete_DpmTable *table)
1288 {
1289 int result = 0;
1290 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1291
1292 table->GraphicsBootLevel = 0;
1293 table->MemoryBootLevel = 0;
1294
1295 /* find boot level from dpm table */
1296 result = phm_find_boot_level(&(data->dpm_table.sclk_table),
1297 data->vbios_boot_state.sclk_bootup_value,
1298 (uint32_t *)&(table->GraphicsBootLevel));
1299
1300 result = phm_find_boot_level(&(data->dpm_table.mclk_table),
1301 data->vbios_boot_state.mclk_bootup_value,
1302 (uint32_t *)&(table->MemoryBootLevel));
1303
1304 table->BootVddc = data->vbios_boot_state.vddc_bootup_value *
1305 VOLTAGE_SCALE;
1306 table->BootVddci = data->vbios_boot_state.vddci_bootup_value *
1307 VOLTAGE_SCALE;
1308 table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value *
1309 VOLTAGE_SCALE;
1310
1311 CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc);
1312 CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci);
1313 CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
1314
1315 return 0;
1316 }
1317
polaris10_populate_smc_initailial_state(struct pp_hwmgr * hwmgr)1318 static int polaris10_populate_smc_initailial_state(struct pp_hwmgr *hwmgr)
1319 {
1320 struct pp_smumgr *smumgr = hwmgr->smumgr;
1321 struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
1322 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
1323 struct phm_ppt_v1_information *table_info =
1324 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1325 uint8_t count, level;
1326
1327 count = (uint8_t)(table_info->vdd_dep_on_sclk->count);
1328
1329 for (level = 0; level < count; level++) {
1330 if (table_info->vdd_dep_on_sclk->entries[level].clk >=
1331 hw_data->vbios_boot_state.sclk_bootup_value) {
1332 smu_data->smc_state_table.GraphicsBootLevel = level;
1333 break;
1334 }
1335 }
1336
1337 count = (uint8_t)(table_info->vdd_dep_on_mclk->count);
1338 for (level = 0; level < count; level++) {
1339 if (table_info->vdd_dep_on_mclk->entries[level].clk >=
1340 hw_data->vbios_boot_state.mclk_bootup_value) {
1341 smu_data->smc_state_table.MemoryBootLevel = level;
1342 break;
1343 }
1344 }
1345
1346 return 0;
1347 }
1348
1349
polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr * hwmgr)1350 static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
1351 {
1352 uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min;
1353 struct pp_smumgr *smumgr = hwmgr->smumgr;
1354 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
1355
1356 uint8_t i, stretch_amount, stretch_amount2, volt_offset = 0;
1357 struct phm_ppt_v1_information *table_info =
1358 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1359 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
1360 table_info->vdd_dep_on_sclk;
1361
1362 stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount;
1363
1364 /* Read SMU_Eefuse to read and calculate RO and determine
1365 * if the part is SS or FF. if RO >= 1660MHz, part is FF.
1366 */
1367 efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1368 ixSMU_EFUSE_0 + (67 * 4));
1369 efuse &= 0xFF000000;
1370 efuse = efuse >> 24;
1371
1372 if (hwmgr->chip_id == CHIP_POLARIS10) {
1373 min = 1000;
1374 max = 2300;
1375 } else {
1376 min = 1100;
1377 max = 2100;
1378 }
1379
1380 ro = efuse * (max - min) / 255 + min;
1381
1382 /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
1383 for (i = 0; i < sclk_table->count; i++) {
1384 smu_data->smc_state_table.Sclk_CKS_masterEn0_7 |=
1385 sclk_table->entries[i].cks_enable << i;
1386 if (hwmgr->chip_id == CHIP_POLARIS10) {
1387 volt_without_cks = (uint32_t)((2753594000U + (sclk_table->entries[i].clk/100) * 136418 - (ro - 70) * 1000000) / \
1388 (2424180 - (sclk_table->entries[i].clk/100) * 1132925/1000));
1389 volt_with_cks = (uint32_t)((2797202000U + sclk_table->entries[i].clk/100 * 3232 - (ro - 65) * 1000000) / \
1390 (2522480 - sclk_table->entries[i].clk/100 * 115764/100));
1391 } else {
1392 volt_without_cks = (uint32_t)((2416794800U + (sclk_table->entries[i].clk/100) * 1476925/10 - (ro - 50) * 1000000) / \
1393 (2625416 - (sclk_table->entries[i].clk/100) * (12586807/10000)));
1394 volt_with_cks = (uint32_t)((2999656000U - sclk_table->entries[i].clk/100 * 392803 - (ro - 44) * 1000000) / \
1395 (3422454 - sclk_table->entries[i].clk/100 * (18886376/10000)));
1396 }
1397
1398 if (volt_without_cks >= volt_with_cks)
1399 volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
1400 sclk_table->entries[i].cks_voffset) * 100 + 624) / 625);
1401
1402 smu_data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
1403 }
1404
1405 smu_data->smc_state_table.LdoRefSel = (table_info->cac_dtp_table->ucCKS_LDO_REFSEL != 0) ? table_info->cac_dtp_table->ucCKS_LDO_REFSEL : 6;
1406 /* Populate CKS Lookup Table */
1407 if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
1408 stretch_amount2 = 0;
1409 else if (stretch_amount == 3 || stretch_amount == 4)
1410 stretch_amount2 = 1;
1411 else {
1412 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1413 PHM_PlatformCaps_ClockStretcher);
1414 PP_ASSERT_WITH_CODE(false,
1415 "Stretch Amount in PPTable not supported\n",
1416 return -EINVAL);
1417 }
1418
1419 value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL);
1420 value &= 0xFFFFFFFE;
1421 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value);
1422
1423 return 0;
1424 }
1425
1426 /**
1427 * Populates the SMC VRConfig field in DPM table.
1428 *
1429 * @param hwmgr the address of the hardware manager
1430 * @param table the SMC DPM table structure to be populated
1431 * @return always 0
1432 */
polaris10_populate_vr_config(struct pp_hwmgr * hwmgr,struct SMU74_Discrete_DpmTable * table)1433 static int polaris10_populate_vr_config(struct pp_hwmgr *hwmgr,
1434 struct SMU74_Discrete_DpmTable *table)
1435 {
1436 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1437 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
1438 uint16_t config;
1439
1440 config = VR_MERGED_WITH_VDDC;
1441 table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT);
1442
1443 /* Set Vddc Voltage Controller */
1444 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
1445 config = VR_SVI2_PLANE_1;
1446 table->VRConfig |= config;
1447 } else {
1448 PP_ASSERT_WITH_CODE(false,
1449 "VDDC should be on SVI2 control in merged mode!",
1450 );
1451 }
1452 /* Set Vddci Voltage Controller */
1453 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
1454 config = VR_SVI2_PLANE_2; /* only in merged mode */
1455 table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
1456 } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
1457 config = VR_SMIO_PATTERN_1;
1458 table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
1459 } else {
1460 config = VR_STATIC_VOLTAGE;
1461 table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
1462 }
1463 /* Set Mvdd Voltage Controller */
1464 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
1465 config = VR_SVI2_PLANE_2;
1466 table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
1467 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, smu_data->smu7_data.soft_regs_start +
1468 offsetof(SMU74_SoftRegisters, AllowMvddSwitch), 0x1);
1469 } else {
1470 config = VR_STATIC_VOLTAGE;
1471 table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
1472 }
1473
1474 return 0;
1475 }
1476
1477
polaris10_populate_avfs_parameters(struct pp_hwmgr * hwmgr)1478 static int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
1479 {
1480 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1481 struct pp_smumgr *smumgr = hwmgr->smumgr;
1482 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
1483
1484 SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
1485 int result = 0;
1486 struct pp_atom_ctrl__avfs_parameters avfs_params = {0};
1487 AVFS_meanNsigma_t AVFS_meanNsigma = { {0} };
1488 AVFS_Sclk_Offset_t AVFS_SclkOffset = { {0} };
1489 uint32_t tmp, i;
1490
1491 struct phm_ppt_v1_information *table_info =
1492 (struct phm_ppt_v1_information *)hwmgr->pptable;
1493 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
1494 table_info->vdd_dep_on_sclk;
1495
1496
1497 if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
1498 return result;
1499
1500 result = atomctrl_get_avfs_information(hwmgr, &avfs_params);
1501
1502 if (0 == result) {
1503 table->BTCGB_VDROOP_TABLE[0].a0 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a0);
1504 table->BTCGB_VDROOP_TABLE[0].a1 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a1);
1505 table->BTCGB_VDROOP_TABLE[0].a2 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a2);
1506 table->BTCGB_VDROOP_TABLE[1].a0 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0);
1507 table->BTCGB_VDROOP_TABLE[1].a1 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1);
1508 table->BTCGB_VDROOP_TABLE[1].a2 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2);
1509 table->AVFSGB_VDROOP_TABLE[0].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_m1);
1510 table->AVFSGB_VDROOP_TABLE[0].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSON_m2);
1511 table->AVFSGB_VDROOP_TABLE[0].b = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_b);
1512 table->AVFSGB_VDROOP_TABLE[0].m1_shift = 24;
1513 table->AVFSGB_VDROOP_TABLE[0].m2_shift = 12;
1514 table->AVFSGB_VDROOP_TABLE[1].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1);
1515 table->AVFSGB_VDROOP_TABLE[1].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2);
1516 table->AVFSGB_VDROOP_TABLE[1].b = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b);
1517 table->AVFSGB_VDROOP_TABLE[1].m1_shift = 24;
1518 table->AVFSGB_VDROOP_TABLE[1].m2_shift = 12;
1519 table->MaxVoltage = PP_HOST_TO_SMC_US(avfs_params.usMaxVoltage_0_25mv);
1520 AVFS_meanNsigma.Aconstant[0] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant0);
1521 AVFS_meanNsigma.Aconstant[1] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant1);
1522 AVFS_meanNsigma.Aconstant[2] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant2);
1523 AVFS_meanNsigma.DC_tol_sigma = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_DC_tol_sigma);
1524 AVFS_meanNsigma.Platform_mean = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_mean);
1525 AVFS_meanNsigma.PSM_Age_CompFactor = PP_HOST_TO_SMC_US(avfs_params.usPSM_Age_ComFactor);
1526 AVFS_meanNsigma.Platform_sigma = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_sigma);
1527
1528 for (i = 0; i < NUM_VFT_COLUMNS; i++) {
1529 AVFS_meanNsigma.Static_Voltage_Offset[i] = (uint8_t)(sclk_table->entries[i].cks_voffset * 100 / 625);
1530 AVFS_SclkOffset.Sclk_Offset[i] = PP_HOST_TO_SMC_US((uint16_t)(sclk_table->entries[i].sclk_offset) / 100);
1531 }
1532
1533 result = smu7_read_smc_sram_dword(smumgr,
1534 SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsMeanNSigma),
1535 &tmp, SMC_RAM_END);
1536
1537 smu7_copy_bytes_to_smc(smumgr,
1538 tmp,
1539 (uint8_t *)&AVFS_meanNsigma,
1540 sizeof(AVFS_meanNsigma_t),
1541 SMC_RAM_END);
1542
1543 result = smu7_read_smc_sram_dword(smumgr,
1544 SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsSclkOffsetTable),
1545 &tmp, SMC_RAM_END);
1546 smu7_copy_bytes_to_smc(smumgr,
1547 tmp,
1548 (uint8_t *)&AVFS_SclkOffset,
1549 sizeof(AVFS_Sclk_Offset_t),
1550 SMC_RAM_END);
1551
1552 data->avfs_vdroop_override_setting = (avfs_params.ucEnableGB_VDROOP_TABLE_CKSON << BTCGB0_Vdroop_Enable_SHIFT) |
1553 (avfs_params.ucEnableGB_VDROOP_TABLE_CKSOFF << BTCGB1_Vdroop_Enable_SHIFT) |
1554 (avfs_params.ucEnableGB_FUSE_TABLE_CKSON << AVFSGB0_Vdroop_Enable_SHIFT) |
1555 (avfs_params.ucEnableGB_FUSE_TABLE_CKSOFF << AVFSGB1_Vdroop_Enable_SHIFT);
1556 data->apply_avfs_cks_off_voltage = (avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage == 1) ? true : false;
1557 }
1558 return result;
1559 }
1560
1561
1562 /**
1563 * Initialize the ARB DRAM timing table's index field.
1564 *
1565 * @param hwmgr the address of the powerplay hardware manager.
1566 * @return always 0
1567 */
polaris10_init_arb_table_index(struct pp_smumgr * smumgr)1568 static int polaris10_init_arb_table_index(struct pp_smumgr *smumgr)
1569 {
1570 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
1571 uint32_t tmp;
1572 int result;
1573
1574 /* This is a read-modify-write on the first byte of the ARB table.
1575 * The first byte in the SMU73_Discrete_MCArbDramTimingTable structure
1576 * is the field 'current'.
1577 * This solution is ugly, but we never write the whole table only
1578 * individual fields in it.
1579 * In reality this field should not be in that structure
1580 * but in a soft register.
1581 */
1582 result = smu7_read_smc_sram_dword(smumgr,
1583 smu_data->smu7_data.arb_table_start, &tmp, SMC_RAM_END);
1584
1585 if (result)
1586 return result;
1587
1588 tmp &= 0x00FFFFFF;
1589 tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
1590
1591 return smu7_write_smc_sram_dword(smumgr,
1592 smu_data->smu7_data.arb_table_start, tmp, SMC_RAM_END);
1593 }
1594
polaris10_initialize_power_tune_defaults(struct pp_hwmgr * hwmgr)1595 static void polaris10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
1596 {
1597 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
1598 struct phm_ppt_v1_information *table_info =
1599 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1600
1601 if (table_info &&
1602 table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
1603 table_info->cac_dtp_table->usPowerTuneDataSetID)
1604 smu_data->power_tune_defaults =
1605 &polaris10_power_tune_data_set_array
1606 [table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
1607 else
1608 smu_data->power_tune_defaults = &polaris10_power_tune_data_set_array[0];
1609
1610 }
1611
1612 /**
1613 * Initializes the SMC table and uploads it
1614 *
1615 * @param hwmgr the address of the powerplay hardware manager.
1616 * @return always 0
1617 */
polaris10_init_smc_table(struct pp_hwmgr * hwmgr)1618 int polaris10_init_smc_table(struct pp_hwmgr *hwmgr)
1619 {
1620 int result;
1621 struct pp_smumgr *smumgr = hwmgr->smumgr;
1622 struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
1623 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
1624 struct phm_ppt_v1_information *table_info =
1625 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1626 struct SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
1627 uint8_t i;
1628 struct pp_atomctrl_gpio_pin_assignment gpio_pin;
1629 pp_atomctrl_clock_dividers_vi dividers;
1630
1631 polaris10_initialize_power_tune_defaults(hwmgr);
1632
1633 if (SMU7_VOLTAGE_CONTROL_NONE != hw_data->voltage_control)
1634 polaris10_populate_smc_voltage_tables(hwmgr, table);
1635
1636 table->SystemFlags = 0;
1637 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1638 PHM_PlatformCaps_AutomaticDCTransition))
1639 table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
1640
1641 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1642 PHM_PlatformCaps_StepVddc))
1643 table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
1644
1645 if (hw_data->is_memory_gddr5)
1646 table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
1647
1648 if (hw_data->ulv_supported && table_info->us_ulv_voltage_offset) {
1649 result = polaris10_populate_ulv_state(hwmgr, table);
1650 PP_ASSERT_WITH_CODE(0 == result,
1651 "Failed to initialize ULV state!", return result);
1652 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1653 ixCG_ULV_PARAMETER, SMU7_CGULVPARAMETER_DFLT);
1654 }
1655
1656 result = polaris10_populate_smc_link_level(hwmgr, table);
1657 PP_ASSERT_WITH_CODE(0 == result,
1658 "Failed to initialize Link Level!", return result);
1659
1660 result = polaris10_populate_all_graphic_levels(hwmgr);
1661 PP_ASSERT_WITH_CODE(0 == result,
1662 "Failed to initialize Graphics Level!", return result);
1663
1664 result = polaris10_populate_all_memory_levels(hwmgr);
1665 PP_ASSERT_WITH_CODE(0 == result,
1666 "Failed to initialize Memory Level!", return result);
1667
1668 result = polaris10_populate_smc_acpi_level(hwmgr, table);
1669 PP_ASSERT_WITH_CODE(0 == result,
1670 "Failed to initialize ACPI Level!", return result);
1671
1672 result = polaris10_populate_smc_vce_level(hwmgr, table);
1673 PP_ASSERT_WITH_CODE(0 == result,
1674 "Failed to initialize VCE Level!", return result);
1675
1676 result = polaris10_populate_smc_samu_level(hwmgr, table);
1677 PP_ASSERT_WITH_CODE(0 == result,
1678 "Failed to initialize SAMU Level!", return result);
1679
1680 /* Since only the initial state is completely set up at this point
1681 * (the other states are just copies of the boot state) we only
1682 * need to populate the ARB settings for the initial state.
1683 */
1684 result = polaris10_program_memory_timing_parameters(hwmgr);
1685 PP_ASSERT_WITH_CODE(0 == result,
1686 "Failed to Write ARB settings for the initial state.", return result);
1687
1688 result = polaris10_populate_smc_uvd_level(hwmgr, table);
1689 PP_ASSERT_WITH_CODE(0 == result,
1690 "Failed to initialize UVD Level!", return result);
1691
1692 result = polaris10_populate_smc_boot_level(hwmgr, table);
1693 PP_ASSERT_WITH_CODE(0 == result,
1694 "Failed to initialize Boot Level!", return result);
1695
1696 result = polaris10_populate_smc_initailial_state(hwmgr);
1697 PP_ASSERT_WITH_CODE(0 == result,
1698 "Failed to initialize Boot State!", return result);
1699
1700 result = polaris10_populate_bapm_parameters_in_dpm_table(hwmgr);
1701 PP_ASSERT_WITH_CODE(0 == result,
1702 "Failed to populate BAPM Parameters!", return result);
1703
1704 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1705 PHM_PlatformCaps_ClockStretcher)) {
1706 result = polaris10_populate_clock_stretcher_data_table(hwmgr);
1707 PP_ASSERT_WITH_CODE(0 == result,
1708 "Failed to populate Clock Stretcher Data Table!",
1709 return result);
1710 }
1711
1712 result = polaris10_populate_avfs_parameters(hwmgr);
1713 PP_ASSERT_WITH_CODE(0 == result, "Failed to populate AVFS Parameters!", return result;);
1714
1715 table->CurrSclkPllRange = 0xff;
1716 table->GraphicsVoltageChangeEnable = 1;
1717 table->GraphicsThermThrottleEnable = 1;
1718 table->GraphicsInterval = 1;
1719 table->VoltageInterval = 1;
1720 table->ThermalInterval = 1;
1721 table->TemperatureLimitHigh =
1722 table_info->cac_dtp_table->usTargetOperatingTemp *
1723 SMU7_Q88_FORMAT_CONVERSION_UNIT;
1724 table->TemperatureLimitLow =
1725 (table_info->cac_dtp_table->usTargetOperatingTemp - 1) *
1726 SMU7_Q88_FORMAT_CONVERSION_UNIT;
1727 table->MemoryVoltageChangeEnable = 1;
1728 table->MemoryInterval = 1;
1729 table->VoltageResponseTime = 0;
1730 table->PhaseResponseTime = 0;
1731 table->MemoryThermThrottleEnable = 1;
1732 table->PCIeBootLinkLevel = 0;
1733 table->PCIeGenInterval = 1;
1734 table->VRConfig = 0;
1735
1736 result = polaris10_populate_vr_config(hwmgr, table);
1737 PP_ASSERT_WITH_CODE(0 == result,
1738 "Failed to populate VRConfig setting!", return result);
1739
1740 table->ThermGpio = 17;
1741 table->SclkStepSize = 0x4000;
1742
1743 if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) {
1744 table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
1745 } else {
1746 table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
1747 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1748 PHM_PlatformCaps_RegulatorHot);
1749 }
1750
1751 if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
1752 &gpio_pin)) {
1753 table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift;
1754 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1755 PHM_PlatformCaps_AutomaticDCTransition);
1756 } else {
1757 table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
1758 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1759 PHM_PlatformCaps_AutomaticDCTransition);
1760 }
1761
1762 /* Thermal Output GPIO */
1763 if (atomctrl_get_pp_assign_pin(hwmgr, THERMAL_INT_OUTPUT_GPIO_PINID,
1764 &gpio_pin)) {
1765 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1766 PHM_PlatformCaps_ThermalOutGPIO);
1767
1768 table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift;
1769
1770 /* For porlarity read GPIOPAD_A with assigned Gpio pin
1771 * since VBIOS will program this register to set 'inactive state',
1772 * driver can then determine 'active state' from this and
1773 * program SMU with correct polarity
1774 */
1775 table->ThermOutPolarity = (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A)
1776 & (1 << gpio_pin.uc_gpio_pin_bit_shift))) ? 1:0;
1777 table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
1778
1779 /* if required, combine VRHot/PCC with thermal out GPIO */
1780 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_RegulatorHot)
1781 && phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_CombinePCCWithThermalSignal))
1782 table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
1783 } else {
1784 table->ThermOutGpio = 17;
1785 table->ThermOutPolarity = 1;
1786 table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
1787 }
1788
1789 /* Populate BIF_SCLK levels into SMC DPM table */
1790 for (i = 0; i <= hw_data->dpm_table.pcie_speed_table.count; i++) {
1791 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, smu_data->bif_sclk_table[i], ÷rs);
1792 PP_ASSERT_WITH_CODE((result == 0), "Can not find DFS divide id for Sclk", return result);
1793
1794 if (i == 0)
1795 table->Ulv.BifSclkDfs = PP_HOST_TO_SMC_US((USHORT)(dividers.pll_post_divider));
1796 else
1797 table->LinkLevel[i-1].BifSclkDfs = PP_HOST_TO_SMC_US((USHORT)(dividers.pll_post_divider));
1798 }
1799
1800 for (i = 0; i < SMU74_MAX_ENTRIES_SMIO; i++)
1801 table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
1802
1803 CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
1804 CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
1805 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
1806 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
1807 CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
1808 CONVERT_FROM_HOST_TO_SMC_UL(table->CurrSclkPllRange);
1809 CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
1810 CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
1811 CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
1812 CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
1813
1814 /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
1815 result = smu7_copy_bytes_to_smc(hwmgr->smumgr,
1816 smu_data->smu7_data.dpm_table_start +
1817 offsetof(SMU74_Discrete_DpmTable, SystemFlags),
1818 (uint8_t *)&(table->SystemFlags),
1819 sizeof(SMU74_Discrete_DpmTable) - 3 * sizeof(SMU74_PIDController),
1820 SMC_RAM_END);
1821 PP_ASSERT_WITH_CODE(0 == result,
1822 "Failed to upload dpm data to SMC memory!", return result);
1823
1824 result = polaris10_init_arb_table_index(hwmgr->smumgr);
1825 PP_ASSERT_WITH_CODE(0 == result,
1826 "Failed to upload arb data to SMC memory!", return result);
1827
1828 result = polaris10_populate_pm_fuses(hwmgr);
1829 PP_ASSERT_WITH_CODE(0 == result,
1830 "Failed to populate PM fuses to SMC memory!", return result);
1831 return 0;
1832 }
1833
polaris10_program_mem_timing_parameters(struct pp_hwmgr * hwmgr)1834 static int polaris10_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
1835 {
1836 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1837
1838 if (data->need_update_smu7_dpm_table &
1839 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
1840 return polaris10_program_memory_timing_parameters(hwmgr);
1841
1842 return 0;
1843 }
1844
polaris10_thermal_avfs_enable(struct pp_hwmgr * hwmgr)1845 int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
1846 {
1847 int ret;
1848 struct pp_smumgr *smumgr = (struct pp_smumgr *)(hwmgr->smumgr);
1849 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
1850 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1851
1852 if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
1853 return 0;
1854
1855 ret = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
1856 PPSMC_MSG_SetGBDroopSettings, data->avfs_vdroop_override_setting);
1857
1858 ret = (smum_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs) == 0) ?
1859 0 : -1;
1860
1861 if (!ret)
1862 /* If this param is not changed, this function could fire unnecessarily */
1863 smu_data->avfs.avfs_btc_status = AVFS_BTC_COMPLETED_PREVIOUSLY;
1864
1865 return ret;
1866 }
1867
1868 /**
1869 * Set up the fan table to control the fan using the SMC.
1870 * @param hwmgr the address of the powerplay hardware manager.
1871 * @param pInput the pointer to input data
1872 * @param pOutput the pointer to output data
1873 * @param pStorage the pointer to temporary storage
1874 * @param Result the last failure code
1875 * @return result from set temperature range routine
1876 */
polaris10_thermal_setup_fan_table(struct pp_hwmgr * hwmgr)1877 int polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
1878 {
1879 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
1880 SMU74_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
1881 uint32_t duty100;
1882 uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
1883 uint16_t fdo_min, slope1, slope2;
1884 uint32_t reference_clock;
1885 int res;
1886 uint64_t tmp64;
1887
1888 if (hwmgr->thermal_controller.fanInfo.bNoFan) {
1889 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1890 PHM_PlatformCaps_MicrocodeFanControl);
1891 return 0;
1892 }
1893
1894 if (smu_data->smu7_data.fan_table_start == 0) {
1895 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1896 PHM_PlatformCaps_MicrocodeFanControl);
1897 return 0;
1898 }
1899
1900 duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1901 CG_FDO_CTRL1, FMAX_DUTY100);
1902
1903 if (duty100 == 0) {
1904 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1905 PHM_PlatformCaps_MicrocodeFanControl);
1906 return 0;
1907 }
1908
1909 tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.
1910 usPWMMin * duty100;
1911 do_div(tmp64, 10000);
1912 fdo_min = (uint16_t)tmp64;
1913
1914 t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed -
1915 hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
1916 t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh -
1917 hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
1918
1919 pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed -
1920 hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
1921 pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh -
1922 hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
1923
1924 slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
1925 slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
1926
1927 fan_table.TempMin = cpu_to_be16((50 + hwmgr->
1928 thermal_controller.advanceFanControlParameters.usTMin) / 100);
1929 fan_table.TempMed = cpu_to_be16((50 + hwmgr->
1930 thermal_controller.advanceFanControlParameters.usTMed) / 100);
1931 fan_table.TempMax = cpu_to_be16((50 + hwmgr->
1932 thermal_controller.advanceFanControlParameters.usTMax) / 100);
1933
1934 fan_table.Slope1 = cpu_to_be16(slope1);
1935 fan_table.Slope2 = cpu_to_be16(slope2);
1936
1937 fan_table.FdoMin = cpu_to_be16(fdo_min);
1938
1939 fan_table.HystDown = cpu_to_be16(hwmgr->
1940 thermal_controller.advanceFanControlParameters.ucTHyst);
1941
1942 fan_table.HystUp = cpu_to_be16(1);
1943
1944 fan_table.HystSlope = cpu_to_be16(1);
1945
1946 fan_table.TempRespLim = cpu_to_be16(5);
1947
1948 reference_clock = smu7_get_xclk(hwmgr);
1949
1950 fan_table.RefreshPeriod = cpu_to_be32((hwmgr->
1951 thermal_controller.advanceFanControlParameters.ulCycleDelay *
1952 reference_clock) / 1600);
1953
1954 fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
1955
1956 fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(
1957 hwmgr->device, CGS_IND_REG__SMC,
1958 CG_MULT_THERMAL_CTRL, TEMP_SEL);
1959
1960 res = smu7_copy_bytes_to_smc(hwmgr->smumgr, smu_data->smu7_data.fan_table_start,
1961 (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table),
1962 SMC_RAM_END);
1963
1964 if (!res && hwmgr->thermal_controller.
1965 advanceFanControlParameters.ucMinimumPWMLimit)
1966 res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
1967 PPSMC_MSG_SetFanMinPwm,
1968 hwmgr->thermal_controller.
1969 advanceFanControlParameters.ucMinimumPWMLimit);
1970
1971 if (!res && hwmgr->thermal_controller.
1972 advanceFanControlParameters.ulMinFanSCLKAcousticLimit)
1973 res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
1974 PPSMC_MSG_SetFanSclkTarget,
1975 hwmgr->thermal_controller.
1976 advanceFanControlParameters.ulMinFanSCLKAcousticLimit);
1977
1978 if (res)
1979 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1980 PHM_PlatformCaps_MicrocodeFanControl);
1981
1982 return 0;
1983 }
1984
polaris10_update_uvd_smc_table(struct pp_hwmgr * hwmgr)1985 static int polaris10_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
1986 {
1987 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
1988 uint32_t mm_boot_level_offset, mm_boot_level_value;
1989 struct phm_ppt_v1_information *table_info =
1990 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1991
1992 smu_data->smc_state_table.UvdBootLevel = 0;
1993 if (table_info->mm_dep_table->count > 0)
1994 smu_data->smc_state_table.UvdBootLevel =
1995 (uint8_t) (table_info->mm_dep_table->count - 1);
1996 mm_boot_level_offset = smu_data->smu7_data.dpm_table_start + offsetof(SMU74_Discrete_DpmTable,
1997 UvdBootLevel);
1998 mm_boot_level_offset /= 4;
1999 mm_boot_level_offset *= 4;
2000 mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
2001 CGS_IND_REG__SMC, mm_boot_level_offset);
2002 mm_boot_level_value &= 0x00FFFFFF;
2003 mm_boot_level_value |= smu_data->smc_state_table.UvdBootLevel << 24;
2004 cgs_write_ind_register(hwmgr->device,
2005 CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
2006
2007 if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2008 PHM_PlatformCaps_UVDDPM) ||
2009 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2010 PHM_PlatformCaps_StablePState))
2011 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2012 PPSMC_MSG_UVDDPM_SetEnabledMask,
2013 (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
2014 return 0;
2015 }
2016
polaris10_update_vce_smc_table(struct pp_hwmgr * hwmgr)2017 static int polaris10_update_vce_smc_table(struct pp_hwmgr *hwmgr)
2018 {
2019 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
2020 uint32_t mm_boot_level_offset, mm_boot_level_value;
2021 struct phm_ppt_v1_information *table_info =
2022 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2023
2024 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2025 PHM_PlatformCaps_StablePState))
2026 smu_data->smc_state_table.VceBootLevel =
2027 (uint8_t) (table_info->mm_dep_table->count - 1);
2028 else
2029 smu_data->smc_state_table.VceBootLevel = 0;
2030
2031 mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
2032 offsetof(SMU74_Discrete_DpmTable, VceBootLevel);
2033 mm_boot_level_offset /= 4;
2034 mm_boot_level_offset *= 4;
2035 mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
2036 CGS_IND_REG__SMC, mm_boot_level_offset);
2037 mm_boot_level_value &= 0xFF00FFFF;
2038 mm_boot_level_value |= smu_data->smc_state_table.VceBootLevel << 16;
2039 cgs_write_ind_register(hwmgr->device,
2040 CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
2041
2042 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
2043 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2044 PPSMC_MSG_VCEDPM_SetEnabledMask,
2045 (uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
2046 return 0;
2047 }
2048
polaris10_update_samu_smc_table(struct pp_hwmgr * hwmgr)2049 static int polaris10_update_samu_smc_table(struct pp_hwmgr *hwmgr)
2050 {
2051 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
2052 uint32_t mm_boot_level_offset, mm_boot_level_value;
2053
2054
2055 smu_data->smc_state_table.SamuBootLevel = 0;
2056 mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
2057 offsetof(SMU74_Discrete_DpmTable, SamuBootLevel);
2058
2059 mm_boot_level_offset /= 4;
2060 mm_boot_level_offset *= 4;
2061 mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
2062 CGS_IND_REG__SMC, mm_boot_level_offset);
2063 mm_boot_level_value &= 0xFFFFFF00;
2064 mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
2065 cgs_write_ind_register(hwmgr->device,
2066 CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
2067
2068 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2069 PHM_PlatformCaps_StablePState))
2070 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2071 PPSMC_MSG_SAMUDPM_SetEnabledMask,
2072 (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
2073 return 0;
2074 }
2075
2076
polaris10_update_bif_smc_table(struct pp_hwmgr * hwmgr)2077 static int polaris10_update_bif_smc_table(struct pp_hwmgr *hwmgr)
2078 {
2079 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
2080 struct phm_ppt_v1_information *table_info =
2081 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2082 struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
2083 int max_entry, i;
2084
2085 max_entry = (SMU74_MAX_LEVELS_LINK < pcie_table->count) ?
2086 SMU74_MAX_LEVELS_LINK :
2087 pcie_table->count;
2088 /* Setup BIF_SCLK levels */
2089 for (i = 0; i < max_entry; i++)
2090 smu_data->bif_sclk_table[i] = pcie_table->entries[i].pcie_sclk;
2091 return 0;
2092 }
2093
polaris10_update_smc_table(struct pp_hwmgr * hwmgr,uint32_t type)2094 int polaris10_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
2095 {
2096 switch (type) {
2097 case SMU_UVD_TABLE:
2098 polaris10_update_uvd_smc_table(hwmgr);
2099 break;
2100 case SMU_VCE_TABLE:
2101 polaris10_update_vce_smc_table(hwmgr);
2102 break;
2103 case SMU_SAMU_TABLE:
2104 polaris10_update_samu_smc_table(hwmgr);
2105 break;
2106 case SMU_BIF_TABLE:
2107 polaris10_update_bif_smc_table(hwmgr);
2108 default:
2109 break;
2110 }
2111 return 0;
2112 }
2113
polaris10_update_sclk_threshold(struct pp_hwmgr * hwmgr)2114 int polaris10_update_sclk_threshold(struct pp_hwmgr *hwmgr)
2115 {
2116 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2117 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
2118
2119 int result = 0;
2120 uint32_t low_sclk_interrupt_threshold = 0;
2121
2122 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2123 PHM_PlatformCaps_SclkThrottleLowNotification)
2124 && (hwmgr->gfx_arbiter.sclk_threshold !=
2125 data->low_sclk_interrupt_threshold)) {
2126 data->low_sclk_interrupt_threshold =
2127 hwmgr->gfx_arbiter.sclk_threshold;
2128 low_sclk_interrupt_threshold =
2129 data->low_sclk_interrupt_threshold;
2130
2131 CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
2132
2133 result = smu7_copy_bytes_to_smc(
2134 hwmgr->smumgr,
2135 smu_data->smu7_data.dpm_table_start +
2136 offsetof(SMU74_Discrete_DpmTable,
2137 LowSclkInterruptThreshold),
2138 (uint8_t *)&low_sclk_interrupt_threshold,
2139 sizeof(uint32_t),
2140 SMC_RAM_END);
2141 }
2142 PP_ASSERT_WITH_CODE((result == 0),
2143 "Failed to update SCLK threshold!", return result);
2144
2145 result = polaris10_program_mem_timing_parameters(hwmgr);
2146 PP_ASSERT_WITH_CODE((result == 0),
2147 "Failed to program memory timing parameters!",
2148 );
2149
2150 return result;
2151 }
2152
polaris10_get_offsetof(uint32_t type,uint32_t member)2153 uint32_t polaris10_get_offsetof(uint32_t type, uint32_t member)
2154 {
2155 switch (type) {
2156 case SMU_SoftRegisters:
2157 switch (member) {
2158 case HandshakeDisables:
2159 return offsetof(SMU74_SoftRegisters, HandshakeDisables);
2160 case VoltageChangeTimeout:
2161 return offsetof(SMU74_SoftRegisters, VoltageChangeTimeout);
2162 case AverageGraphicsActivity:
2163 return offsetof(SMU74_SoftRegisters, AverageGraphicsActivity);
2164 case PreVBlankGap:
2165 return offsetof(SMU74_SoftRegisters, PreVBlankGap);
2166 case VBlankTimeout:
2167 return offsetof(SMU74_SoftRegisters, VBlankTimeout);
2168 case UcodeLoadStatus:
2169 return offsetof(SMU74_SoftRegisters, UcodeLoadStatus);
2170 }
2171 case SMU_Discrete_DpmTable:
2172 switch (member) {
2173 case UvdBootLevel:
2174 return offsetof(SMU74_Discrete_DpmTable, UvdBootLevel);
2175 case VceBootLevel:
2176 return offsetof(SMU74_Discrete_DpmTable, VceBootLevel);
2177 case SamuBootLevel:
2178 return offsetof(SMU74_Discrete_DpmTable, SamuBootLevel);
2179 case LowSclkInterruptThreshold:
2180 return offsetof(SMU74_Discrete_DpmTable, LowSclkInterruptThreshold);
2181 }
2182 }
2183 printk("cant't get the offset of type %x member %x \n", type, member);
2184 return 0;
2185 }
2186
polaris10_get_mac_definition(uint32_t value)2187 uint32_t polaris10_get_mac_definition(uint32_t value)
2188 {
2189 switch (value) {
2190 case SMU_MAX_LEVELS_GRAPHICS:
2191 return SMU74_MAX_LEVELS_GRAPHICS;
2192 case SMU_MAX_LEVELS_MEMORY:
2193 return SMU74_MAX_LEVELS_MEMORY;
2194 case SMU_MAX_LEVELS_LINK:
2195 return SMU74_MAX_LEVELS_LINK;
2196 case SMU_MAX_ENTRIES_SMIO:
2197 return SMU74_MAX_ENTRIES_SMIO;
2198 case SMU_MAX_LEVELS_VDDC:
2199 return SMU74_MAX_LEVELS_VDDC;
2200 case SMU_MAX_LEVELS_VDDGFX:
2201 return SMU74_MAX_LEVELS_VDDGFX;
2202 case SMU_MAX_LEVELS_VDDCI:
2203 return SMU74_MAX_LEVELS_VDDCI;
2204 case SMU_MAX_LEVELS_MVDD:
2205 return SMU74_MAX_LEVELS_MVDD;
2206 case SMU_UVD_MCLK_HANDSHAKE_DISABLE:
2207 return SMU7_UVD_MCLK_HANDSHAKE_DISABLE;
2208 }
2209
2210 printk("cant't get the mac of %x \n", value);
2211 return 0;
2212 }
2213
2214 /**
2215 * Get the location of various tables inside the FW image.
2216 *
2217 * @param hwmgr the address of the powerplay hardware manager.
2218 * @return always 0
2219 */
polaris10_process_firmware_header(struct pp_hwmgr * hwmgr)2220 int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr)
2221 {
2222 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
2223 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2224 uint32_t tmp;
2225 int result;
2226 bool error = false;
2227
2228 result = smu7_read_smc_sram_dword(hwmgr->smumgr,
2229 SMU7_FIRMWARE_HEADER_LOCATION +
2230 offsetof(SMU74_Firmware_Header, DpmTable),
2231 &tmp, SMC_RAM_END);
2232
2233 if (0 == result)
2234 smu_data->smu7_data.dpm_table_start = tmp;
2235
2236 error |= (0 != result);
2237
2238 result = smu7_read_smc_sram_dword(hwmgr->smumgr,
2239 SMU7_FIRMWARE_HEADER_LOCATION +
2240 offsetof(SMU74_Firmware_Header, SoftRegisters),
2241 &tmp, SMC_RAM_END);
2242
2243 if (!result) {
2244 data->soft_regs_start = tmp;
2245 smu_data->smu7_data.soft_regs_start = tmp;
2246 }
2247
2248 error |= (0 != result);
2249
2250 result = smu7_read_smc_sram_dword(hwmgr->smumgr,
2251 SMU7_FIRMWARE_HEADER_LOCATION +
2252 offsetof(SMU74_Firmware_Header, mcRegisterTable),
2253 &tmp, SMC_RAM_END);
2254
2255 if (!result)
2256 smu_data->smu7_data.mc_reg_table_start = tmp;
2257
2258 result = smu7_read_smc_sram_dword(hwmgr->smumgr,
2259 SMU7_FIRMWARE_HEADER_LOCATION +
2260 offsetof(SMU74_Firmware_Header, FanTable),
2261 &tmp, SMC_RAM_END);
2262
2263 if (!result)
2264 smu_data->smu7_data.fan_table_start = tmp;
2265
2266 error |= (0 != result);
2267
2268 result = smu7_read_smc_sram_dword(hwmgr->smumgr,
2269 SMU7_FIRMWARE_HEADER_LOCATION +
2270 offsetof(SMU74_Firmware_Header, mcArbDramTimingTable),
2271 &tmp, SMC_RAM_END);
2272
2273 if (!result)
2274 smu_data->smu7_data.arb_table_start = tmp;
2275
2276 error |= (0 != result);
2277
2278 result = smu7_read_smc_sram_dword(hwmgr->smumgr,
2279 SMU7_FIRMWARE_HEADER_LOCATION +
2280 offsetof(SMU74_Firmware_Header, Version),
2281 &tmp, SMC_RAM_END);
2282
2283 if (!result)
2284 hwmgr->microcode_version_info.SMC = tmp;
2285
2286 error |= (0 != result);
2287
2288 return error ? -1 : 0;
2289 }
2290
polaris10_is_dpm_running(struct pp_hwmgr * hwmgr)2291 bool polaris10_is_dpm_running(struct pp_hwmgr *hwmgr)
2292 {
2293 return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
2294 CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
2295 ? true : false;
2296 }
2297