1 /*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include <drm/amdgpu_drm.h>
25 #include "amdgpu.h"
26 #include "atom.h"
27 #include "atombios_encoders.h"
28 #include "amdgpu_pll.h"
29 #include <asm/div64.h>
30 #include <linux/gcd.h>
31
32 /**
33 * amdgpu_pll_reduce_ratio - fractional number reduction
34 *
35 * @nom: nominator
36 * @den: denominator
37 * @nom_min: minimum value for nominator
38 * @den_min: minimum value for denominator
39 *
40 * Find the greatest common divisor and apply it on both nominator and
41 * denominator, but make nominator and denominator are at least as large
42 * as their minimum values.
43 */
amdgpu_pll_reduce_ratio(unsigned * nom,unsigned * den,unsigned nom_min,unsigned den_min)44 static void amdgpu_pll_reduce_ratio(unsigned *nom, unsigned *den,
45 unsigned nom_min, unsigned den_min)
46 {
47 unsigned tmp;
48
49 /* reduce the numbers to a simpler ratio */
50 tmp = gcd(*nom, *den);
51 *nom /= tmp;
52 *den /= tmp;
53
54 /* make sure nominator is large enough */
55 if (*nom < nom_min) {
56 tmp = DIV_ROUND_UP(nom_min, *nom);
57 *nom *= tmp;
58 *den *= tmp;
59 }
60
61 /* make sure the denominator is large enough */
62 if (*den < den_min) {
63 tmp = DIV_ROUND_UP(den_min, *den);
64 *nom *= tmp;
65 *den *= tmp;
66 }
67 }
68
69 /**
70 * amdgpu_pll_get_fb_ref_div - feedback and ref divider calculation
71 *
72 * @nom: nominator
73 * @den: denominator
74 * @post_div: post divider
75 * @fb_div_max: feedback divider maximum
76 * @ref_div_max: reference divider maximum
77 * @fb_div: resulting feedback divider
78 * @ref_div: resulting reference divider
79 *
80 * Calculate feedback and reference divider for a given post divider. Makes
81 * sure we stay within the limits.
82 */
amdgpu_pll_get_fb_ref_div(struct amdgpu_device * adev,unsigned int nom,unsigned int den,unsigned int post_div,unsigned int fb_div_max,unsigned int ref_div_max,unsigned int * fb_div,unsigned int * ref_div)83 static void amdgpu_pll_get_fb_ref_div(struct amdgpu_device *adev, unsigned int nom,
84 unsigned int den, unsigned int post_div,
85 unsigned int fb_div_max, unsigned int ref_div_max,
86 unsigned int *fb_div, unsigned int *ref_div)
87 {
88
89 /* limit reference * post divider to a maximum */
90 if (adev->family == AMDGPU_FAMILY_SI)
91 ref_div_max = min(100 / post_div, ref_div_max);
92 else
93 ref_div_max = min(128 / post_div, ref_div_max);
94
95 /* get matching reference and feedback divider */
96 *ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max);
97 *fb_div = DIV_ROUND_CLOSEST(nom * *ref_div * post_div, den);
98
99 /* limit fb divider to its maximum */
100 if (*fb_div > fb_div_max) {
101 *ref_div = DIV_ROUND_CLOSEST(*ref_div * fb_div_max, *fb_div);
102 *fb_div = fb_div_max;
103 }
104 }
105
106 /**
107 * amdgpu_pll_compute - compute PLL paramaters
108 *
109 * @pll: information about the PLL
110 * @freq: requested frequency
111 * @dot_clock_p: resulting pixel clock
112 * @fb_div_p: resulting feedback divider
113 * @frac_fb_div_p: fractional part of the feedback divider
114 * @ref_div_p: resulting reference divider
115 * @post_div_p: resulting reference divider
116 *
117 * Try to calculate the PLL parameters to generate the given frequency:
118 * dot_clock = (ref_freq * feedback_div) / (ref_div * post_div)
119 */
amdgpu_pll_compute(struct amdgpu_device * adev,struct amdgpu_pll * pll,u32 freq,u32 * dot_clock_p,u32 * fb_div_p,u32 * frac_fb_div_p,u32 * ref_div_p,u32 * post_div_p)120 void amdgpu_pll_compute(struct amdgpu_device *adev,
121 struct amdgpu_pll *pll,
122 u32 freq,
123 u32 *dot_clock_p,
124 u32 *fb_div_p,
125 u32 *frac_fb_div_p,
126 u32 *ref_div_p,
127 u32 *post_div_p)
128 {
129 unsigned target_clock = pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV ?
130 freq : freq / 10;
131
132 unsigned fb_div_min, fb_div_max, fb_div;
133 unsigned post_div_min, post_div_max, post_div;
134 unsigned ref_div_min, ref_div_max, ref_div;
135 unsigned post_div_best, diff_best;
136 unsigned nom, den;
137
138 /* determine allowed feedback divider range */
139 fb_div_min = pll->min_feedback_div;
140 fb_div_max = pll->max_feedback_div;
141
142 if (pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV) {
143 fb_div_min *= 10;
144 fb_div_max *= 10;
145 }
146
147 /* determine allowed ref divider range */
148 if (pll->flags & AMDGPU_PLL_USE_REF_DIV)
149 ref_div_min = pll->reference_div;
150 else
151 ref_div_min = pll->min_ref_div;
152
153 if (pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV &&
154 pll->flags & AMDGPU_PLL_USE_REF_DIV)
155 ref_div_max = pll->reference_div;
156 else
157 ref_div_max = pll->max_ref_div;
158
159 /* determine allowed post divider range */
160 if (pll->flags & AMDGPU_PLL_USE_POST_DIV) {
161 post_div_min = pll->post_div;
162 post_div_max = pll->post_div;
163 } else {
164 unsigned vco_min, vco_max;
165
166 if (pll->flags & AMDGPU_PLL_IS_LCD) {
167 vco_min = pll->lcd_pll_out_min;
168 vco_max = pll->lcd_pll_out_max;
169 } else {
170 vco_min = pll->pll_out_min;
171 vco_max = pll->pll_out_max;
172 }
173
174 if (pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV) {
175 vco_min *= 10;
176 vco_max *= 10;
177 }
178
179 post_div_min = vco_min / target_clock;
180 if ((target_clock * post_div_min) < vco_min)
181 ++post_div_min;
182 if (post_div_min < pll->min_post_div)
183 post_div_min = pll->min_post_div;
184
185 post_div_max = vco_max / target_clock;
186 if ((target_clock * post_div_max) > vco_max)
187 --post_div_max;
188 if (post_div_max > pll->max_post_div)
189 post_div_max = pll->max_post_div;
190 }
191
192 /* represent the searched ratio as fractional number */
193 nom = target_clock;
194 den = pll->reference_freq;
195
196 /* reduce the numbers to a simpler ratio */
197 amdgpu_pll_reduce_ratio(&nom, &den, fb_div_min, post_div_min);
198
199 /* now search for a post divider */
200 if (pll->flags & AMDGPU_PLL_PREFER_MINM_OVER_MAXP)
201 post_div_best = post_div_min;
202 else
203 post_div_best = post_div_max;
204 diff_best = ~0;
205
206 for (post_div = post_div_min; post_div <= post_div_max; ++post_div) {
207 unsigned diff;
208 amdgpu_pll_get_fb_ref_div(adev, nom, den, post_div, fb_div_max,
209 ref_div_max, &fb_div, &ref_div);
210 diff = abs(target_clock - (pll->reference_freq * fb_div) /
211 (ref_div * post_div));
212
213 if (diff < diff_best || (diff == diff_best &&
214 !(pll->flags & AMDGPU_PLL_PREFER_MINM_OVER_MAXP))) {
215
216 post_div_best = post_div;
217 diff_best = diff;
218 }
219 }
220 post_div = post_div_best;
221
222 /* get the feedback and reference divider for the optimal value */
223 amdgpu_pll_get_fb_ref_div(adev, nom, den, post_div, fb_div_max, ref_div_max,
224 &fb_div, &ref_div);
225
226 /* reduce the numbers to a simpler ratio once more */
227 /* this also makes sure that the reference divider is large enough */
228 amdgpu_pll_reduce_ratio(&fb_div, &ref_div, fb_div_min, ref_div_min);
229
230 /* avoid high jitter with small fractional dividers */
231 if (pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV && (fb_div % 10)) {
232 fb_div_min = max(fb_div_min, (9 - (fb_div % 10)) * 20 + 60);
233 if (fb_div < fb_div_min) {
234 unsigned tmp = DIV_ROUND_UP(fb_div_min, fb_div);
235 fb_div *= tmp;
236 ref_div *= tmp;
237 }
238 }
239
240 /* and finally save the result */
241 if (pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV) {
242 *fb_div_p = fb_div / 10;
243 *frac_fb_div_p = fb_div % 10;
244 } else {
245 *fb_div_p = fb_div;
246 *frac_fb_div_p = 0;
247 }
248
249 *dot_clock_p = ((pll->reference_freq * *fb_div_p * 10) +
250 (pll->reference_freq * *frac_fb_div_p)) /
251 (ref_div * post_div * 10);
252 *ref_div_p = ref_div;
253 *post_div_p = post_div;
254
255 DRM_DEBUG_KMS("%d - %d, pll dividers - fb: %d.%d ref: %d, post %d\n",
256 freq, *dot_clock_p * 10, *fb_div_p, *frac_fb_div_p,
257 ref_div, post_div);
258 }
259
260 /**
261 * amdgpu_pll_get_use_mask - look up a mask of which pplls are in use
262 *
263 * @crtc: drm crtc
264 *
265 * Returns the mask of which PPLLs (Pixel PLLs) are in use.
266 */
amdgpu_pll_get_use_mask(struct drm_crtc * crtc)267 u32 amdgpu_pll_get_use_mask(struct drm_crtc *crtc)
268 {
269 struct drm_device *dev = crtc->dev;
270 struct drm_crtc *test_crtc;
271 struct amdgpu_crtc *test_amdgpu_crtc;
272 u32 pll_in_use = 0;
273
274 list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
275 if (crtc == test_crtc)
276 continue;
277
278 test_amdgpu_crtc = to_amdgpu_crtc(test_crtc);
279 if (test_amdgpu_crtc->pll_id != ATOM_PPLL_INVALID)
280 pll_in_use |= (1 << test_amdgpu_crtc->pll_id);
281 }
282 return pll_in_use;
283 }
284
285 /**
286 * amdgpu_pll_get_shared_dp_ppll - return the PPLL used by another crtc for DP
287 *
288 * @crtc: drm crtc
289 *
290 * Returns the PPLL (Pixel PLL) used by another crtc/encoder which is
291 * also in DP mode. For DP, a single PPLL can be used for all DP
292 * crtcs/encoders.
293 */
amdgpu_pll_get_shared_dp_ppll(struct drm_crtc * crtc)294 int amdgpu_pll_get_shared_dp_ppll(struct drm_crtc *crtc)
295 {
296 struct drm_device *dev = crtc->dev;
297 struct drm_crtc *test_crtc;
298 struct amdgpu_crtc *test_amdgpu_crtc;
299
300 list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
301 if (crtc == test_crtc)
302 continue;
303 test_amdgpu_crtc = to_amdgpu_crtc(test_crtc);
304 if (test_amdgpu_crtc->encoder &&
305 ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(test_amdgpu_crtc->encoder))) {
306 /* for DP use the same PLL for all */
307 if (test_amdgpu_crtc->pll_id != ATOM_PPLL_INVALID)
308 return test_amdgpu_crtc->pll_id;
309 }
310 }
311 return ATOM_PPLL_INVALID;
312 }
313
314 /**
315 * amdgpu_pll_get_shared_nondp_ppll - return the PPLL used by another non-DP crtc
316 *
317 * @crtc: drm crtc
318 *
319 * Returns the PPLL (Pixel PLL) used by another non-DP crtc/encoder which can
320 * be shared (i.e., same clock).
321 */
amdgpu_pll_get_shared_nondp_ppll(struct drm_crtc * crtc)322 int amdgpu_pll_get_shared_nondp_ppll(struct drm_crtc *crtc)
323 {
324 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
325 struct drm_device *dev = crtc->dev;
326 struct drm_crtc *test_crtc;
327 struct amdgpu_crtc *test_amdgpu_crtc;
328 u32 adjusted_clock, test_adjusted_clock;
329
330 adjusted_clock = amdgpu_crtc->adjusted_clock;
331
332 if (adjusted_clock == 0)
333 return ATOM_PPLL_INVALID;
334
335 list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
336 if (crtc == test_crtc)
337 continue;
338 test_amdgpu_crtc = to_amdgpu_crtc(test_crtc);
339 if (test_amdgpu_crtc->encoder &&
340 !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(test_amdgpu_crtc->encoder))) {
341 /* check if we are already driving this connector with another crtc */
342 if (test_amdgpu_crtc->connector == amdgpu_crtc->connector) {
343 /* if we are, return that pll */
344 if (test_amdgpu_crtc->pll_id != ATOM_PPLL_INVALID)
345 return test_amdgpu_crtc->pll_id;
346 }
347 /* for non-DP check the clock */
348 test_adjusted_clock = test_amdgpu_crtc->adjusted_clock;
349 if ((crtc->mode.clock == test_crtc->mode.clock) &&
350 (adjusted_clock == test_adjusted_clock) &&
351 (amdgpu_crtc->ss_enabled == test_amdgpu_crtc->ss_enabled) &&
352 (test_amdgpu_crtc->pll_id != ATOM_PPLL_INVALID))
353 return test_amdgpu_crtc->pll_id;
354 }
355 }
356 return ATOM_PPLL_INVALID;
357 }
358