1 /*
2 * Copyright 2018 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25 #include <linux/delay.h>
26 #include "dm_services.h"
27 #include "dcn20/dcn20_hubbub.h"
28 #include "dcn21_hubbub.h"
29 #include "reg_helper.h"
30
31 #define REG(reg)\
32 hubbub1->regs->reg
33 #define DC_LOGGER \
34 hubbub1->base.ctx->logger
35 #define CTX \
36 hubbub1->base.ctx
37
38 #undef FN
39 #define FN(reg_name, field_name) \
40 hubbub1->shifts->field_name, hubbub1->masks->field_name
41
42 #define REG(reg)\
43 hubbub1->regs->reg
44
45 #define CTX \
46 hubbub1->base.ctx
47
48 #undef FN
49 #define FN(reg_name, field_name) \
50 hubbub1->shifts->field_name, hubbub1->masks->field_name
51
convert_and_clamp(uint32_t wm_ns,uint32_t refclk_mhz,uint32_t clamp_value)52 static uint32_t convert_and_clamp(
53 uint32_t wm_ns,
54 uint32_t refclk_mhz,
55 uint32_t clamp_value)
56 {
57 uint32_t ret_val = 0;
58 ret_val = wm_ns * refclk_mhz;
59 ret_val /= 1000;
60
61 if (ret_val > clamp_value)
62 ret_val = clamp_value;
63
64 return ret_val;
65 }
66
dcn21_dchvm_init(struct hubbub * hubbub)67 void dcn21_dchvm_init(struct hubbub *hubbub)
68 {
69 struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
70 uint32_t riommu_active, prefetch_done;
71 int i;
72
73 REG_GET(DCHVM_RIOMMU_STAT0, HOSTVM_PREFETCH_DONE, &prefetch_done);
74
75 if (prefetch_done) {
76 hubbub->riommu_active = true;
77 return;
78 }
79 //Init DCHVM block
80 REG_UPDATE(DCHVM_CTRL0, HOSTVM_INIT_REQ, 1);
81
82 //Poll until RIOMMU_ACTIVE = 1
83 for (i = 0; i < 100; i++) {
84 REG_GET(DCHVM_RIOMMU_STAT0, RIOMMU_ACTIVE, &riommu_active);
85
86 if (riommu_active)
87 break;
88 else
89 udelay(5);
90 }
91
92 if (riommu_active) {
93 //Reflect the power status of DCHUBBUB
94 REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_POWERSTATUS, 1);
95
96 //Start rIOMMU prefetching
97 REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_PREFETCH_REQ, 1);
98
99 // Enable dynamic clock gating
100 REG_UPDATE_4(DCHVM_CLK_CTRL,
101 HVM_DISPCLK_R_GATE_DIS, 0,
102 HVM_DISPCLK_G_GATE_DIS, 0,
103 HVM_DCFCLK_R_GATE_DIS, 0,
104 HVM_DCFCLK_G_GATE_DIS, 0);
105
106 //Poll until HOSTVM_PREFETCH_DONE = 1
107 REG_WAIT(DCHVM_RIOMMU_STAT0, HOSTVM_PREFETCH_DONE, 1, 5, 100);
108
109 hubbub->riommu_active = true;
110 }
111 }
112
hubbub21_init_dchub(struct hubbub * hubbub,struct dcn_hubbub_phys_addr_config * pa_config)113 int hubbub21_init_dchub(struct hubbub *hubbub,
114 struct dcn_hubbub_phys_addr_config *pa_config)
115 {
116 struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
117 struct dcn_vmid_page_table_config phys_config;
118
119 REG_SET(DCN_VM_FB_LOCATION_BASE, 0,
120 FB_BASE, pa_config->system_aperture.fb_base >> 24);
121 REG_SET(DCN_VM_FB_LOCATION_TOP, 0,
122 FB_TOP, pa_config->system_aperture.fb_top >> 24);
123 REG_SET(DCN_VM_FB_OFFSET, 0,
124 FB_OFFSET, pa_config->system_aperture.fb_offset >> 24);
125 REG_SET(DCN_VM_AGP_BOT, 0,
126 AGP_BOT, pa_config->system_aperture.agp_bot >> 24);
127 REG_SET(DCN_VM_AGP_TOP, 0,
128 AGP_TOP, pa_config->system_aperture.agp_top >> 24);
129 REG_SET(DCN_VM_AGP_BASE, 0,
130 AGP_BASE, pa_config->system_aperture.agp_base >> 24);
131
132 if (pa_config->gart_config.page_table_start_addr != pa_config->gart_config.page_table_end_addr) {
133 phys_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr >> 12;
134 phys_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr >> 12;
135 phys_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr | 1; //Note: hack
136 phys_config.depth = 0;
137 phys_config.block_size = 0;
138 // Init VMID 0 based on PA config
139 dcn20_vmid_setup(&hubbub1->vmid[0], &phys_config);
140 }
141
142 dcn21_dchvm_init(hubbub);
143
144 return hubbub1->num_vmid;
145 }
146
hubbub21_program_urgent_watermarks(struct hubbub * hubbub,struct dcn_watermark_set * watermarks,unsigned int refclk_mhz,bool safe_to_lower)147 bool hubbub21_program_urgent_watermarks(
148 struct hubbub *hubbub,
149 struct dcn_watermark_set *watermarks,
150 unsigned int refclk_mhz,
151 bool safe_to_lower)
152 {
153 struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
154 uint32_t prog_wm_value;
155 bool wm_pending = false;
156
157 /* Repeat for water mark set A, B, C and D. */
158 /* clock state A */
159 if (safe_to_lower || watermarks->a.urgent_ns > hubbub1->watermarks.a.urgent_ns) {
160 hubbub1->watermarks.a.urgent_ns = watermarks->a.urgent_ns;
161 prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns,
162 refclk_mhz, 0x1fffff);
163 REG_SET_2(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0,
164 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value,
165 DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_A, prog_wm_value);
166
167 DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n"
168 "HW register value = 0x%x\n",
169 watermarks->a.urgent_ns, prog_wm_value);
170 } else if (watermarks->a.urgent_ns < hubbub1->watermarks.a.urgent_ns)
171 wm_pending = true;
172
173 /* determine the transfer time for a quantity of data for a particular requestor.*/
174 if (safe_to_lower || watermarks->a.frac_urg_bw_flip
175 > hubbub1->watermarks.a.frac_urg_bw_flip) {
176 hubbub1->watermarks.a.frac_urg_bw_flip = watermarks->a.frac_urg_bw_flip;
177
178 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, 0,
179 DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, watermarks->a.frac_urg_bw_flip);
180 } else if (watermarks->a.frac_urg_bw_flip
181 < hubbub1->watermarks.a.frac_urg_bw_flip)
182 wm_pending = true;
183
184 if (safe_to_lower || watermarks->a.frac_urg_bw_nom
185 > hubbub1->watermarks.a.frac_urg_bw_nom) {
186 hubbub1->watermarks.a.frac_urg_bw_nom = watermarks->a.frac_urg_bw_nom;
187
188 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, 0,
189 DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, watermarks->a.frac_urg_bw_nom);
190 } else if (watermarks->a.frac_urg_bw_nom
191 < hubbub1->watermarks.a.frac_urg_bw_nom)
192 wm_pending = true;
193
194 if (safe_to_lower || watermarks->a.urgent_latency_ns > hubbub1->watermarks.a.urgent_latency_ns) {
195 hubbub1->watermarks.a.urgent_latency_ns = watermarks->a.urgent_latency_ns;
196 prog_wm_value = convert_and_clamp(watermarks->a.urgent_latency_ns,
197 refclk_mhz, 0x1fffff);
198 REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, 0,
199 DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, prog_wm_value);
200 } else if (watermarks->a.urgent_latency_ns < hubbub1->watermarks.a.urgent_latency_ns)
201 wm_pending = true;
202
203 /* clock state B */
204 if (safe_to_lower || watermarks->b.urgent_ns > hubbub1->watermarks.b.urgent_ns) {
205 hubbub1->watermarks.b.urgent_ns = watermarks->b.urgent_ns;
206 prog_wm_value = convert_and_clamp(watermarks->b.urgent_ns,
207 refclk_mhz, 0x1fffff);
208 REG_SET_2(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, 0,
209 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value,
210 DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_B, prog_wm_value);
211
212 DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n"
213 "HW register value = 0x%x\n",
214 watermarks->b.urgent_ns, prog_wm_value);
215 } else if (watermarks->b.urgent_ns < hubbub1->watermarks.b.urgent_ns)
216 wm_pending = true;
217
218 /* determine the transfer time for a quantity of data for a particular requestor.*/
219 if (safe_to_lower || watermarks->a.frac_urg_bw_flip
220 > hubbub1->watermarks.a.frac_urg_bw_flip) {
221 hubbub1->watermarks.a.frac_urg_bw_flip = watermarks->a.frac_urg_bw_flip;
222
223 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, 0,
224 DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, watermarks->a.frac_urg_bw_flip);
225 } else if (watermarks->a.frac_urg_bw_flip
226 < hubbub1->watermarks.a.frac_urg_bw_flip)
227 wm_pending = true;
228
229 if (safe_to_lower || watermarks->a.frac_urg_bw_nom
230 > hubbub1->watermarks.a.frac_urg_bw_nom) {
231 hubbub1->watermarks.a.frac_urg_bw_nom = watermarks->a.frac_urg_bw_nom;
232
233 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, 0,
234 DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, watermarks->a.frac_urg_bw_nom);
235 } else if (watermarks->a.frac_urg_bw_nom
236 < hubbub1->watermarks.a.frac_urg_bw_nom)
237 wm_pending = true;
238
239 if (safe_to_lower || watermarks->b.urgent_latency_ns > hubbub1->watermarks.b.urgent_latency_ns) {
240 hubbub1->watermarks.b.urgent_latency_ns = watermarks->b.urgent_latency_ns;
241 prog_wm_value = convert_and_clamp(watermarks->b.urgent_latency_ns,
242 refclk_mhz, 0x1fffff);
243 REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, 0,
244 DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, prog_wm_value);
245 } else if (watermarks->b.urgent_latency_ns < hubbub1->watermarks.b.urgent_latency_ns)
246 wm_pending = true;
247
248 /* clock state C */
249 if (safe_to_lower || watermarks->c.urgent_ns > hubbub1->watermarks.c.urgent_ns) {
250 hubbub1->watermarks.c.urgent_ns = watermarks->c.urgent_ns;
251 prog_wm_value = convert_and_clamp(watermarks->c.urgent_ns,
252 refclk_mhz, 0x1fffff);
253 REG_SET_2(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, 0,
254 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value,
255 DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_C, prog_wm_value);
256
257 DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n"
258 "HW register value = 0x%x\n",
259 watermarks->c.urgent_ns, prog_wm_value);
260 } else if (watermarks->c.urgent_ns < hubbub1->watermarks.c.urgent_ns)
261 wm_pending = true;
262
263 /* determine the transfer time for a quantity of data for a particular requestor.*/
264 if (safe_to_lower || watermarks->a.frac_urg_bw_flip
265 > hubbub1->watermarks.a.frac_urg_bw_flip) {
266 hubbub1->watermarks.a.frac_urg_bw_flip = watermarks->a.frac_urg_bw_flip;
267
268 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, 0,
269 DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, watermarks->a.frac_urg_bw_flip);
270 } else if (watermarks->a.frac_urg_bw_flip
271 < hubbub1->watermarks.a.frac_urg_bw_flip)
272 wm_pending = true;
273
274 if (safe_to_lower || watermarks->a.frac_urg_bw_nom
275 > hubbub1->watermarks.a.frac_urg_bw_nom) {
276 hubbub1->watermarks.a.frac_urg_bw_nom = watermarks->a.frac_urg_bw_nom;
277
278 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, 0,
279 DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, watermarks->a.frac_urg_bw_nom);
280 } else if (watermarks->a.frac_urg_bw_nom
281 < hubbub1->watermarks.a.frac_urg_bw_nom)
282 wm_pending = true;
283
284 if (safe_to_lower || watermarks->c.urgent_latency_ns > hubbub1->watermarks.c.urgent_latency_ns) {
285 hubbub1->watermarks.c.urgent_latency_ns = watermarks->c.urgent_latency_ns;
286 prog_wm_value = convert_and_clamp(watermarks->c.urgent_latency_ns,
287 refclk_mhz, 0x1fffff);
288 REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, 0,
289 DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, prog_wm_value);
290 } else if (watermarks->c.urgent_latency_ns < hubbub1->watermarks.c.urgent_latency_ns)
291 wm_pending = true;
292
293 /* clock state D */
294 if (safe_to_lower || watermarks->d.urgent_ns > hubbub1->watermarks.d.urgent_ns) {
295 hubbub1->watermarks.d.urgent_ns = watermarks->d.urgent_ns;
296 prog_wm_value = convert_and_clamp(watermarks->d.urgent_ns,
297 refclk_mhz, 0x1fffff);
298 REG_SET_2(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, 0,
299 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value,
300 DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_D, prog_wm_value);
301
302 DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n"
303 "HW register value = 0x%x\n",
304 watermarks->d.urgent_ns, prog_wm_value);
305 } else if (watermarks->d.urgent_ns < hubbub1->watermarks.d.urgent_ns)
306 wm_pending = true;
307
308 /* determine the transfer time for a quantity of data for a particular requestor.*/
309 if (safe_to_lower || watermarks->a.frac_urg_bw_flip
310 > hubbub1->watermarks.a.frac_urg_bw_flip) {
311 hubbub1->watermarks.a.frac_urg_bw_flip = watermarks->a.frac_urg_bw_flip;
312
313 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, 0,
314 DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, watermarks->a.frac_urg_bw_flip);
315 } else if (watermarks->a.frac_urg_bw_flip
316 < hubbub1->watermarks.a.frac_urg_bw_flip)
317 wm_pending = true;
318
319 if (safe_to_lower || watermarks->a.frac_urg_bw_nom
320 > hubbub1->watermarks.a.frac_urg_bw_nom) {
321 hubbub1->watermarks.a.frac_urg_bw_nom = watermarks->a.frac_urg_bw_nom;
322
323 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, 0,
324 DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, watermarks->a.frac_urg_bw_nom);
325 } else if (watermarks->a.frac_urg_bw_nom
326 < hubbub1->watermarks.a.frac_urg_bw_nom)
327 wm_pending = true;
328
329 if (safe_to_lower || watermarks->d.urgent_latency_ns > hubbub1->watermarks.d.urgent_latency_ns) {
330 hubbub1->watermarks.d.urgent_latency_ns = watermarks->d.urgent_latency_ns;
331 prog_wm_value = convert_and_clamp(watermarks->d.urgent_latency_ns,
332 refclk_mhz, 0x1fffff);
333 REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, 0,
334 DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, prog_wm_value);
335 } else if (watermarks->d.urgent_latency_ns < hubbub1->watermarks.d.urgent_latency_ns)
336 wm_pending = true;
337
338 return wm_pending;
339 }
340
hubbub21_program_stutter_watermarks(struct hubbub * hubbub,struct dcn_watermark_set * watermarks,unsigned int refclk_mhz,bool safe_to_lower)341 bool hubbub21_program_stutter_watermarks(
342 struct hubbub *hubbub,
343 struct dcn_watermark_set *watermarks,
344 unsigned int refclk_mhz,
345 bool safe_to_lower)
346 {
347 struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
348 uint32_t prog_wm_value;
349 bool wm_pending = false;
350
351 /* clock state A */
352 if (safe_to_lower || watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
353 > hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns) {
354 hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns =
355 watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns;
356 prog_wm_value = convert_and_clamp(
357 watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns,
358 refclk_mhz, 0x1fffff);
359 REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, 0,
360 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value,
361 DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value);
362 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
363 "HW register value = 0x%x\n",
364 watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
365 } else if (watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
366 < hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns)
367 wm_pending = true;
368
369 if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_ns
370 > hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns) {
371 hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns =
372 watermarks->a.cstate_pstate.cstate_exit_ns;
373 prog_wm_value = convert_and_clamp(
374 watermarks->a.cstate_pstate.cstate_exit_ns,
375 refclk_mhz, 0x1fffff);
376 REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, 0,
377 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value,
378 DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
379 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
380 "HW register value = 0x%x\n",
381 watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value);
382 } else if (watermarks->a.cstate_pstate.cstate_exit_ns
383 < hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns)
384 wm_pending = true;
385
386 /* clock state B */
387 if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
388 > hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns) {
389 hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns =
390 watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns;
391 prog_wm_value = convert_and_clamp(
392 watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns,
393 refclk_mhz, 0x1fffff);
394 REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, 0,
395 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value,
396 DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value);
397 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n"
398 "HW register value = 0x%x\n",
399 watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
400 } else if (watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
401 < hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns)
402 wm_pending = true;
403
404 if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_ns
405 > hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns) {
406 hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns =
407 watermarks->b.cstate_pstate.cstate_exit_ns;
408 prog_wm_value = convert_and_clamp(
409 watermarks->b.cstate_pstate.cstate_exit_ns,
410 refclk_mhz, 0x1fffff);
411 REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, 0,
412 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value,
413 DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
414 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
415 "HW register value = 0x%x\n",
416 watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value);
417 } else if (watermarks->b.cstate_pstate.cstate_exit_ns
418 < hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns)
419 wm_pending = true;
420
421 /* clock state C */
422 if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
423 > hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns) {
424 hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns =
425 watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns;
426 prog_wm_value = convert_and_clamp(
427 watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns,
428 refclk_mhz, 0x1fffff);
429 REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, 0,
430 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value,
431 DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value);
432 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n"
433 "HW register value = 0x%x\n",
434 watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
435 } else if (watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
436 < hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns)
437 wm_pending = true;
438
439 if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_ns
440 > hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns) {
441 hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns =
442 watermarks->c.cstate_pstate.cstate_exit_ns;
443 prog_wm_value = convert_and_clamp(
444 watermarks->c.cstate_pstate.cstate_exit_ns,
445 refclk_mhz, 0x1fffff);
446 REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, 0,
447 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value,
448 DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
449 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n"
450 "HW register value = 0x%x\n",
451 watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value);
452 } else if (watermarks->c.cstate_pstate.cstate_exit_ns
453 < hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns)
454 wm_pending = true;
455
456 /* clock state D */
457 if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
458 > hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns) {
459 hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns =
460 watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns;
461 prog_wm_value = convert_and_clamp(
462 watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns,
463 refclk_mhz, 0x1fffff);
464 REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, 0,
465 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value,
466 DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value);
467 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n"
468 "HW register value = 0x%x\n",
469 watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
470 } else if (watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
471 < hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns)
472 wm_pending = true;
473
474 if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_ns
475 > hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns) {
476 hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns =
477 watermarks->d.cstate_pstate.cstate_exit_ns;
478 prog_wm_value = convert_and_clamp(
479 watermarks->d.cstate_pstate.cstate_exit_ns,
480 refclk_mhz, 0x1fffff);
481 REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, 0,
482 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value,
483 DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
484 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n"
485 "HW register value = 0x%x\n",
486 watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value);
487 } else if (watermarks->d.cstate_pstate.cstate_exit_ns
488 < hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns)
489 wm_pending = true;
490
491 return wm_pending;
492 }
493
hubbub21_program_pstate_watermarks(struct hubbub * hubbub,struct dcn_watermark_set * watermarks,unsigned int refclk_mhz,bool safe_to_lower)494 bool hubbub21_program_pstate_watermarks(
495 struct hubbub *hubbub,
496 struct dcn_watermark_set *watermarks,
497 unsigned int refclk_mhz,
498 bool safe_to_lower)
499 {
500 struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
501 uint32_t prog_wm_value;
502
503 bool wm_pending = false;
504
505 /* clock state A */
506 if (safe_to_lower || watermarks->a.cstate_pstate.pstate_change_ns
507 > hubbub1->watermarks.a.cstate_pstate.pstate_change_ns) {
508 hubbub1->watermarks.a.cstate_pstate.pstate_change_ns =
509 watermarks->a.cstate_pstate.pstate_change_ns;
510 prog_wm_value = convert_and_clamp(
511 watermarks->a.cstate_pstate.pstate_change_ns,
512 refclk_mhz, 0x1fffff);
513 REG_SET_2(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, 0,
514 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value,
515 DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value);
516 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
517 "HW register value = 0x%x\n\n",
518 watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value);
519 } else if (watermarks->a.cstate_pstate.pstate_change_ns
520 < hubbub1->watermarks.a.cstate_pstate.pstate_change_ns)
521 wm_pending = true;
522
523 /* clock state B */
524 if (safe_to_lower || watermarks->b.cstate_pstate.pstate_change_ns
525 > hubbub1->watermarks.b.cstate_pstate.pstate_change_ns) {
526 hubbub1->watermarks.b.cstate_pstate.pstate_change_ns =
527 watermarks->b.cstate_pstate.pstate_change_ns;
528 prog_wm_value = convert_and_clamp(
529 watermarks->b.cstate_pstate.pstate_change_ns,
530 refclk_mhz, 0x1fffff);
531 REG_SET_2(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, 0,
532 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value,
533 DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value);
534 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n"
535 "HW register value = 0x%x\n\n",
536 watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value);
537 } else if (watermarks->b.cstate_pstate.pstate_change_ns
538 < hubbub1->watermarks.b.cstate_pstate.pstate_change_ns)
539 wm_pending = false;
540
541 /* clock state C */
542 if (safe_to_lower || watermarks->c.cstate_pstate.pstate_change_ns
543 > hubbub1->watermarks.c.cstate_pstate.pstate_change_ns) {
544 hubbub1->watermarks.c.cstate_pstate.pstate_change_ns =
545 watermarks->c.cstate_pstate.pstate_change_ns;
546 prog_wm_value = convert_and_clamp(
547 watermarks->c.cstate_pstate.pstate_change_ns,
548 refclk_mhz, 0x1fffff);
549 REG_SET_2(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, 0,
550 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value,
551 DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value);
552 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n"
553 "HW register value = 0x%x\n\n",
554 watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value);
555 } else if (watermarks->c.cstate_pstate.pstate_change_ns
556 < hubbub1->watermarks.c.cstate_pstate.pstate_change_ns)
557 wm_pending = true;
558
559 /* clock state D */
560 if (safe_to_lower || watermarks->d.cstate_pstate.pstate_change_ns
561 > hubbub1->watermarks.d.cstate_pstate.pstate_change_ns) {
562 hubbub1->watermarks.d.cstate_pstate.pstate_change_ns =
563 watermarks->d.cstate_pstate.pstate_change_ns;
564 prog_wm_value = convert_and_clamp(
565 watermarks->d.cstate_pstate.pstate_change_ns,
566 refclk_mhz, 0x1fffff);
567 REG_SET_2(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, 0,
568 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value,
569 DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value);
570 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
571 "HW register value = 0x%x\n\n",
572 watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value);
573 } else if (watermarks->d.cstate_pstate.pstate_change_ns
574 < hubbub1->watermarks.d.cstate_pstate.pstate_change_ns)
575 wm_pending = true;
576
577 return wm_pending;
578 }
579
hubbub21_program_watermarks(struct hubbub * hubbub,struct dcn_watermark_set * watermarks,unsigned int refclk_mhz,bool safe_to_lower)580 bool hubbub21_program_watermarks(
581 struct hubbub *hubbub,
582 struct dcn_watermark_set *watermarks,
583 unsigned int refclk_mhz,
584 bool safe_to_lower)
585 {
586 struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
587 bool wm_pending = false;
588
589 if (hubbub21_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
590 wm_pending = true;
591
592 if (hubbub21_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
593 wm_pending = true;
594
595 if (hubbub21_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
596 wm_pending = true;
597
598 /*
599 * The DCHub arbiter has a mechanism to dynamically rate limit the DCHub request stream to the fabric.
600 * If the memory controller is fully utilized and the DCHub requestors are
601 * well ahead of their amortized schedule, then it is safe to prevent the next winner
602 * from being committed and sent to the fabric.
603 * The utilization of the memory controller is approximated by ensuring that
604 * the number of outstanding requests is greater than a threshold specified
605 * by the ARB_MIN_REQ_OUTSTANDING. To determine that the DCHub requestors are well ahead of the amortized schedule,
606 * the slack of the next winner is compared with the ARB_SAT_LEVEL in DLG RefClk cycles.
607 *
608 * TODO: Revisit request limit after figure out right number. request limit for Renoir isn't decided yet, set maximum value (0x1FF)
609 * to turn off it for now.
610 */
611 REG_SET(DCHUBBUB_ARB_SAT_LEVEL, 0,
612 DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz);
613 REG_UPDATE_2(DCHUBBUB_ARB_DF_REQ_OUTSTAND,
614 DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 0x1FF,
615 DCHUBBUB_ARB_MIN_REQ_OUTSTAND_COMMIT_THRESHOLD, 0xA);
616 REG_UPDATE(DCHUBBUB_ARB_HOSTVM_CNTL,
617 DCHUBBUB_ARB_MAX_QOS_COMMIT_THRESHOLD, 0xF);
618
619 hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
620
621 return wm_pending;
622 }
623
hubbub21_wm_read_state(struct hubbub * hubbub,struct dcn_hubbub_wm * wm)624 void hubbub21_wm_read_state(struct hubbub *hubbub,
625 struct dcn_hubbub_wm *wm)
626 {
627 struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
628 struct dcn_hubbub_wm_set *s;
629
630 memset(wm, 0, sizeof(struct dcn_hubbub_wm));
631
632 s = &wm->sets[0];
633 s->wm_set = 0;
634 REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A,
635 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, &s->data_urgent);
636
637 REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A,
638 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, &s->sr_enter);
639
640 REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A,
641 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, &s->sr_exit);
642
643 REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A,
644 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, &s->dram_clk_chanage);
645
646 s = &wm->sets[1];
647 s->wm_set = 1;
648 REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B,
649 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, &s->data_urgent);
650
651 REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B,
652 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, &s->sr_enter);
653
654 REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B,
655 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, &s->sr_exit);
656
657 REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B,
658 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, &s->dram_clk_chanage);
659
660 s = &wm->sets[2];
661 s->wm_set = 2;
662 REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C,
663 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, &s->data_urgent);
664
665 REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C,
666 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, &s->sr_enter);
667
668 REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C,
669 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, &s->sr_exit);
670
671 REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C,
672 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, &s->dram_clk_chanage);
673
674 s = &wm->sets[3];
675 s->wm_set = 3;
676 REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D,
677 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, &s->data_urgent);
678
679 REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D,
680 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, &s->sr_enter);
681
682 REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D,
683 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, &s->sr_exit);
684
685 REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D,
686 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, &s->dram_clk_chanage);
687 }
688
hubbub21_apply_DEDCN21_147_wa(struct hubbub * hubbub)689 void hubbub21_apply_DEDCN21_147_wa(struct hubbub *hubbub)
690 {
691 struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
692 uint32_t prog_wm_value;
693
694 prog_wm_value = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A);
695 REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);
696 }
697
698 static const struct hubbub_funcs hubbub21_funcs = {
699 .update_dchub = hubbub2_update_dchub,
700 .init_dchub_sys_ctx = hubbub21_init_dchub,
701 .init_vm_ctx = hubbub2_init_vm_ctx,
702 .dcc_support_swizzle = hubbub2_dcc_support_swizzle,
703 .dcc_support_pixel_format = hubbub2_dcc_support_pixel_format,
704 .get_dcc_compression_cap = hubbub2_get_dcc_compression_cap,
705 .wm_read_state = hubbub21_wm_read_state,
706 .get_dchub_ref_freq = hubbub2_get_dchub_ref_freq,
707 .program_watermarks = hubbub21_program_watermarks,
708 .allow_self_refresh_control = hubbub1_allow_self_refresh_control,
709 .apply_DEDCN21_147_wa = hubbub21_apply_DEDCN21_147_wa,
710 .hubbub_read_state = hubbub2_read_state,
711 };
712
hubbub21_construct(struct dcn20_hubbub * hubbub,struct dc_context * ctx,const struct dcn_hubbub_registers * hubbub_regs,const struct dcn_hubbub_shift * hubbub_shift,const struct dcn_hubbub_mask * hubbub_mask)713 void hubbub21_construct(struct dcn20_hubbub *hubbub,
714 struct dc_context *ctx,
715 const struct dcn_hubbub_registers *hubbub_regs,
716 const struct dcn_hubbub_shift *hubbub_shift,
717 const struct dcn_hubbub_mask *hubbub_mask)
718 {
719 hubbub->base.ctx = ctx;
720
721 hubbub->base.funcs = &hubbub21_funcs;
722
723 hubbub->regs = hubbub_regs;
724 hubbub->shifts = hubbub_shift;
725 hubbub->masks = hubbub_mask;
726
727 hubbub->debug_test_index_pstate = 0xB;
728 hubbub->detile_buf_size = 164 * 1024; /* 164KB for DCN2.0 */
729 }
730