1 /*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24 #define gk104_clk(p) container_of((p), struct gk104_clk, base)
25 #include "priv.h"
26 #include "pll.h"
27
28 #include <subdev/timer.h>
29 #include <subdev/bios.h>
30 #include <subdev/bios/pll.h>
31
32 struct gk104_clk_info {
33 u32 freq;
34 u32 ssel;
35 u32 mdiv;
36 u32 dsrc;
37 u32 ddiv;
38 u32 coef;
39 };
40
41 struct gk104_clk {
42 struct nvkm_clk base;
43 struct gk104_clk_info eng[16];
44 };
45
46 static u32 read_div(struct gk104_clk *, int, u32, u32);
47 static u32 read_pll(struct gk104_clk *, u32);
48
49 static u32
read_vco(struct gk104_clk * clk,u32 dsrc)50 read_vco(struct gk104_clk *clk, u32 dsrc)
51 {
52 struct nvkm_device *device = clk->base.subdev.device;
53 u32 ssrc = nvkm_rd32(device, dsrc);
54 if (!(ssrc & 0x00000100))
55 return read_pll(clk, 0x00e800);
56 return read_pll(clk, 0x00e820);
57 }
58
59 static u32
read_pll(struct gk104_clk * clk,u32 pll)60 read_pll(struct gk104_clk *clk, u32 pll)
61 {
62 struct nvkm_device *device = clk->base.subdev.device;
63 u32 ctrl = nvkm_rd32(device, pll + 0x00);
64 u32 coef = nvkm_rd32(device, pll + 0x04);
65 u32 P = (coef & 0x003f0000) >> 16;
66 u32 N = (coef & 0x0000ff00) >> 8;
67 u32 M = (coef & 0x000000ff) >> 0;
68 u32 sclk;
69 u16 fN = 0xf000;
70
71 if (!(ctrl & 0x00000001))
72 return 0;
73
74 switch (pll) {
75 case 0x00e800:
76 case 0x00e820:
77 sclk = device->crystal;
78 P = 1;
79 break;
80 case 0x132000:
81 sclk = read_pll(clk, 0x132020);
82 P = (coef & 0x10000000) ? 2 : 1;
83 break;
84 case 0x132020:
85 sclk = read_div(clk, 0, 0x137320, 0x137330);
86 fN = nvkm_rd32(device, pll + 0x10) >> 16;
87 break;
88 case 0x137000:
89 case 0x137020:
90 case 0x137040:
91 case 0x1370e0:
92 sclk = read_div(clk, (pll & 0xff) / 0x20, 0x137120, 0x137140);
93 break;
94 default:
95 return 0;
96 }
97
98 if (P == 0)
99 P = 1;
100
101 sclk = (sclk * N) + (((u16)(fN + 4096) * sclk) >> 13);
102 return sclk / (M * P);
103 }
104
105 static u32
read_div(struct gk104_clk * clk,int doff,u32 dsrc,u32 dctl)106 read_div(struct gk104_clk *clk, int doff, u32 dsrc, u32 dctl)
107 {
108 struct nvkm_device *device = clk->base.subdev.device;
109 u32 ssrc = nvkm_rd32(device, dsrc + (doff * 4));
110 u32 sctl = nvkm_rd32(device, dctl + (doff * 4));
111
112 switch (ssrc & 0x00000003) {
113 case 0:
114 if ((ssrc & 0x00030000) != 0x00030000)
115 return device->crystal;
116 return 108000;
117 case 2:
118 return 100000;
119 case 3:
120 if (sctl & 0x80000000) {
121 u32 sclk = read_vco(clk, dsrc + (doff * 4));
122 u32 sdiv = (sctl & 0x0000003f) + 2;
123 return (sclk * 2) / sdiv;
124 }
125
126 return read_vco(clk, dsrc + (doff * 4));
127 default:
128 return 0;
129 }
130 }
131
132 static u32
read_mem(struct gk104_clk * clk)133 read_mem(struct gk104_clk *clk)
134 {
135 struct nvkm_device *device = clk->base.subdev.device;
136 switch (nvkm_rd32(device, 0x1373f4) & 0x0000000f) {
137 case 1: return read_pll(clk, 0x132020);
138 case 2: return read_pll(clk, 0x132000);
139 default:
140 return 0;
141 }
142 }
143
144 static u32
read_clk(struct gk104_clk * clk,int idx)145 read_clk(struct gk104_clk *clk, int idx)
146 {
147 struct nvkm_device *device = clk->base.subdev.device;
148 u32 sctl = nvkm_rd32(device, 0x137250 + (idx * 4));
149 u32 sclk, sdiv;
150
151 if (idx < 7) {
152 u32 ssel = nvkm_rd32(device, 0x137100);
153 if (ssel & (1 << idx)) {
154 sclk = read_pll(clk, 0x137000 + (idx * 0x20));
155 sdiv = 1;
156 } else {
157 sclk = read_div(clk, idx, 0x137160, 0x1371d0);
158 sdiv = 0;
159 }
160 } else {
161 u32 ssrc = nvkm_rd32(device, 0x137160 + (idx * 0x04));
162 if ((ssrc & 0x00000003) == 0x00000003) {
163 sclk = read_div(clk, idx, 0x137160, 0x1371d0);
164 if (ssrc & 0x00000100) {
165 if (ssrc & 0x40000000)
166 sclk = read_pll(clk, 0x1370e0);
167 sdiv = 1;
168 } else {
169 sdiv = 0;
170 }
171 } else {
172 sclk = read_div(clk, idx, 0x137160, 0x1371d0);
173 sdiv = 0;
174 }
175 }
176
177 if (sctl & 0x80000000) {
178 if (sdiv)
179 sdiv = ((sctl & 0x00003f00) >> 8) + 2;
180 else
181 sdiv = ((sctl & 0x0000003f) >> 0) + 2;
182 return (sclk * 2) / sdiv;
183 }
184
185 return sclk;
186 }
187
188 static int
gk104_clk_read(struct nvkm_clk * base,enum nv_clk_src src)189 gk104_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
190 {
191 struct gk104_clk *clk = gk104_clk(base);
192 struct nvkm_subdev *subdev = &clk->base.subdev;
193 struct nvkm_device *device = subdev->device;
194
195 switch (src) {
196 case nv_clk_src_crystal:
197 return device->crystal;
198 case nv_clk_src_href:
199 return 100000;
200 case nv_clk_src_mem:
201 return read_mem(clk);
202 case nv_clk_src_gpc:
203 return read_clk(clk, 0x00);
204 case nv_clk_src_rop:
205 return read_clk(clk, 0x01);
206 case nv_clk_src_hubk07:
207 return read_clk(clk, 0x02);
208 case nv_clk_src_hubk06:
209 return read_clk(clk, 0x07);
210 case nv_clk_src_hubk01:
211 return read_clk(clk, 0x08);
212 case nv_clk_src_daemon:
213 return read_clk(clk, 0x0c);
214 case nv_clk_src_vdec:
215 return read_clk(clk, 0x0e);
216 default:
217 nvkm_error(subdev, "invalid clock source %d\n", src);
218 return -EINVAL;
219 }
220 }
221
222 static u32
calc_div(struct gk104_clk * clk,int idx,u32 ref,u32 freq,u32 * ddiv)223 calc_div(struct gk104_clk *clk, int idx, u32 ref, u32 freq, u32 *ddiv)
224 {
225 u32 div = min((ref * 2) / freq, (u32)65);
226 if (div < 2)
227 div = 2;
228
229 *ddiv = div - 2;
230 return (ref * 2) / div;
231 }
232
233 static u32
calc_src(struct gk104_clk * clk,int idx,u32 freq,u32 * dsrc,u32 * ddiv)234 calc_src(struct gk104_clk *clk, int idx, u32 freq, u32 *dsrc, u32 *ddiv)
235 {
236 u32 sclk;
237
238 /* use one of the fixed frequencies if possible */
239 *ddiv = 0x00000000;
240 switch (freq) {
241 case 27000:
242 case 108000:
243 *dsrc = 0x00000000;
244 if (freq == 108000)
245 *dsrc |= 0x00030000;
246 return freq;
247 case 100000:
248 *dsrc = 0x00000002;
249 return freq;
250 default:
251 *dsrc = 0x00000003;
252 break;
253 }
254
255 /* otherwise, calculate the closest divider */
256 sclk = read_vco(clk, 0x137160 + (idx * 4));
257 if (idx < 7)
258 sclk = calc_div(clk, idx, sclk, freq, ddiv);
259 return sclk;
260 }
261
262 static u32
calc_pll(struct gk104_clk * clk,int idx,u32 freq,u32 * coef)263 calc_pll(struct gk104_clk *clk, int idx, u32 freq, u32 *coef)
264 {
265 struct nvkm_subdev *subdev = &clk->base.subdev;
266 struct nvkm_bios *bios = subdev->device->bios;
267 struct nvbios_pll limits;
268 int N, M, P, ret;
269
270 ret = nvbios_pll_parse(bios, 0x137000 + (idx * 0x20), &limits);
271 if (ret)
272 return 0;
273
274 limits.refclk = read_div(clk, idx, 0x137120, 0x137140);
275 if (!limits.refclk)
276 return 0;
277
278 ret = gt215_pll_calc(subdev, &limits, freq, &N, NULL, &M, &P);
279 if (ret <= 0)
280 return 0;
281
282 *coef = (P << 16) | (N << 8) | M;
283 return ret;
284 }
285
286 static int
calc_clk(struct gk104_clk * clk,struct nvkm_cstate * cstate,int idx,int dom)287 calc_clk(struct gk104_clk *clk,
288 struct nvkm_cstate *cstate, int idx, int dom)
289 {
290 struct gk104_clk_info *info = &clk->eng[idx];
291 u32 freq = cstate->domain[dom];
292 u32 src0, div0, div1D, div1P = 0;
293 u32 clk0, clk1 = 0;
294
295 /* invalid clock domain */
296 if (!freq)
297 return 0;
298
299 /* first possible path, using only dividers */
300 clk0 = calc_src(clk, idx, freq, &src0, &div0);
301 clk0 = calc_div(clk, idx, clk0, freq, &div1D);
302
303 /* see if we can get any closer using PLLs */
304 if (clk0 != freq && (0x0000ff87 & (1 << idx))) {
305 if (idx <= 7)
306 clk1 = calc_pll(clk, idx, freq, &info->coef);
307 else
308 clk1 = cstate->domain[nv_clk_src_hubk06];
309 clk1 = calc_div(clk, idx, clk1, freq, &div1P);
310 }
311
312 /* select the method which gets closest to target freq */
313 if (abs((int)freq - clk0) <= abs((int)freq - clk1)) {
314 info->dsrc = src0;
315 if (div0) {
316 info->ddiv |= 0x80000000;
317 info->ddiv |= div0;
318 }
319 if (div1D) {
320 info->mdiv |= 0x80000000;
321 info->mdiv |= div1D;
322 }
323 info->ssel = 0;
324 info->freq = clk0;
325 } else {
326 if (div1P) {
327 info->mdiv |= 0x80000000;
328 info->mdiv |= div1P << 8;
329 }
330 info->ssel = (1 << idx);
331 info->dsrc = 0x40000100;
332 info->freq = clk1;
333 }
334
335 return 0;
336 }
337
338 static int
gk104_clk_calc(struct nvkm_clk * base,struct nvkm_cstate * cstate)339 gk104_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
340 {
341 struct gk104_clk *clk = gk104_clk(base);
342 int ret;
343
344 if ((ret = calc_clk(clk, cstate, 0x00, nv_clk_src_gpc)) ||
345 (ret = calc_clk(clk, cstate, 0x01, nv_clk_src_rop)) ||
346 (ret = calc_clk(clk, cstate, 0x02, nv_clk_src_hubk07)) ||
347 (ret = calc_clk(clk, cstate, 0x07, nv_clk_src_hubk06)) ||
348 (ret = calc_clk(clk, cstate, 0x08, nv_clk_src_hubk01)) ||
349 (ret = calc_clk(clk, cstate, 0x0c, nv_clk_src_daemon)) ||
350 (ret = calc_clk(clk, cstate, 0x0e, nv_clk_src_vdec)))
351 return ret;
352
353 return 0;
354 }
355
356 static void
gk104_clk_prog_0(struct gk104_clk * clk,int idx)357 gk104_clk_prog_0(struct gk104_clk *clk, int idx)
358 {
359 struct gk104_clk_info *info = &clk->eng[idx];
360 struct nvkm_device *device = clk->base.subdev.device;
361 if (!info->ssel) {
362 nvkm_mask(device, 0x1371d0 + (idx * 0x04), 0x8000003f, info->ddiv);
363 nvkm_wr32(device, 0x137160 + (idx * 0x04), info->dsrc);
364 }
365 }
366
367 static void
gk104_clk_prog_1_0(struct gk104_clk * clk,int idx)368 gk104_clk_prog_1_0(struct gk104_clk *clk, int idx)
369 {
370 struct nvkm_device *device = clk->base.subdev.device;
371 nvkm_mask(device, 0x137100, (1 << idx), 0x00000000);
372 nvkm_msec(device, 2000,
373 if (!(nvkm_rd32(device, 0x137100) & (1 << idx)))
374 break;
375 );
376 }
377
378 static void
gk104_clk_prog_1_1(struct gk104_clk * clk,int idx)379 gk104_clk_prog_1_1(struct gk104_clk *clk, int idx)
380 {
381 struct nvkm_device *device = clk->base.subdev.device;
382 nvkm_mask(device, 0x137160 + (idx * 0x04), 0x00000100, 0x00000000);
383 }
384
385 static void
gk104_clk_prog_2(struct gk104_clk * clk,int idx)386 gk104_clk_prog_2(struct gk104_clk *clk, int idx)
387 {
388 struct gk104_clk_info *info = &clk->eng[idx];
389 struct nvkm_device *device = clk->base.subdev.device;
390 const u32 addr = 0x137000 + (idx * 0x20);
391 nvkm_mask(device, addr + 0x00, 0x00000004, 0x00000000);
392 nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000000);
393 if (info->coef) {
394 nvkm_wr32(device, addr + 0x04, info->coef);
395 nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000001);
396 nvkm_msec(device, 2000,
397 if (nvkm_rd32(device, addr + 0x00) & 0x00020000)
398 break;
399 );
400 nvkm_mask(device, addr + 0x00, 0x00020004, 0x00000004);
401 }
402 }
403
404 static void
gk104_clk_prog_3(struct gk104_clk * clk,int idx)405 gk104_clk_prog_3(struct gk104_clk *clk, int idx)
406 {
407 struct gk104_clk_info *info = &clk->eng[idx];
408 struct nvkm_device *device = clk->base.subdev.device;
409 if (info->ssel)
410 nvkm_mask(device, 0x137250 + (idx * 0x04), 0x00003f00, info->mdiv);
411 else
412 nvkm_mask(device, 0x137250 + (idx * 0x04), 0x0000003f, info->mdiv);
413 }
414
415 static void
gk104_clk_prog_4_0(struct gk104_clk * clk,int idx)416 gk104_clk_prog_4_0(struct gk104_clk *clk, int idx)
417 {
418 struct gk104_clk_info *info = &clk->eng[idx];
419 struct nvkm_device *device = clk->base.subdev.device;
420 if (info->ssel) {
421 nvkm_mask(device, 0x137100, (1 << idx), info->ssel);
422 nvkm_msec(device, 2000,
423 u32 tmp = nvkm_rd32(device, 0x137100) & (1 << idx);
424 if (tmp == info->ssel)
425 break;
426 );
427 }
428 }
429
430 static void
gk104_clk_prog_4_1(struct gk104_clk * clk,int idx)431 gk104_clk_prog_4_1(struct gk104_clk *clk, int idx)
432 {
433 struct gk104_clk_info *info = &clk->eng[idx];
434 struct nvkm_device *device = clk->base.subdev.device;
435 if (info->ssel) {
436 nvkm_mask(device, 0x137160 + (idx * 0x04), 0x40000000, 0x40000000);
437 nvkm_mask(device, 0x137160 + (idx * 0x04), 0x00000100, 0x00000100);
438 }
439 }
440
441 static int
gk104_clk_prog(struct nvkm_clk * base)442 gk104_clk_prog(struct nvkm_clk *base)
443 {
444 struct gk104_clk *clk = gk104_clk(base);
445 struct {
446 u32 mask;
447 void (*exec)(struct gk104_clk *, int);
448 } stage[] = {
449 { 0x007f, gk104_clk_prog_0 }, /* div programming */
450 { 0x007f, gk104_clk_prog_1_0 }, /* select div mode */
451 { 0xff80, gk104_clk_prog_1_1 },
452 { 0x00ff, gk104_clk_prog_2 }, /* (maybe) program pll */
453 { 0xff80, gk104_clk_prog_3 }, /* final divider */
454 { 0x007f, gk104_clk_prog_4_0 }, /* (maybe) select pll mode */
455 { 0xff80, gk104_clk_prog_4_1 },
456 };
457 int i, j;
458
459 for (i = 0; i < ARRAY_SIZE(stage); i++) {
460 for (j = 0; j < ARRAY_SIZE(clk->eng); j++) {
461 if (!(stage[i].mask & (1 << j)))
462 continue;
463 if (!clk->eng[j].freq)
464 continue;
465 stage[i].exec(clk, j);
466 }
467 }
468
469 return 0;
470 }
471
472 static void
gk104_clk_tidy(struct nvkm_clk * base)473 gk104_clk_tidy(struct nvkm_clk *base)
474 {
475 struct gk104_clk *clk = gk104_clk(base);
476 memset(clk->eng, 0x00, sizeof(clk->eng));
477 }
478
479 static const struct nvkm_clk_func
480 gk104_clk = {
481 .read = gk104_clk_read,
482 .calc = gk104_clk_calc,
483 .prog = gk104_clk_prog,
484 .tidy = gk104_clk_tidy,
485 .domains = {
486 { nv_clk_src_crystal, 0xff },
487 { nv_clk_src_href , 0xff },
488 { nv_clk_src_gpc , 0x00, NVKM_CLK_DOM_FLAG_CORE, "core", 2000 },
489 { nv_clk_src_hubk07 , 0x01, NVKM_CLK_DOM_FLAG_CORE },
490 { nv_clk_src_rop , 0x02, NVKM_CLK_DOM_FLAG_CORE },
491 { nv_clk_src_mem , 0x03, 0, "memory", 500 },
492 { nv_clk_src_hubk06 , 0x04, NVKM_CLK_DOM_FLAG_CORE },
493 { nv_clk_src_hubk01 , 0x05 },
494 { nv_clk_src_vdec , 0x06 },
495 { nv_clk_src_daemon , 0x07 },
496 { nv_clk_src_max }
497 }
498 };
499
500 int
gk104_clk_new(struct nvkm_device * device,int index,struct nvkm_clk ** pclk)501 gk104_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
502 {
503 struct gk104_clk *clk;
504
505 if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL)))
506 return -ENOMEM;
507 *pclk = &clk->base;
508
509 return nvkm_clk_ctor(&gk104_clk, device, index, true, &clk->base);
510 }
511