1 /*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24 #include "gf100.h"
25 #include "ctxgf100.h"
26 #include "fuc/os.h"
27
28 #include <core/client.h>
29 #include <core/option.h>
30 #include <subdev/fb.h>
31 #include <subdev/mc.h>
32 #include <subdev/pmu.h>
33 #include <subdev/timer.h>
34 #include <engine/fifo.h>
35
36 #include <nvif/class.h>
37 #include <nvif/unpack.h>
38
39 /*******************************************************************************
40 * Zero Bandwidth Clear
41 ******************************************************************************/
42
43 static void
gf100_gr_zbc_clear_color(struct gf100_gr * gr,int zbc)44 gf100_gr_zbc_clear_color(struct gf100_gr *gr, int zbc)
45 {
46 struct nvkm_device *device = gr->base.engine.subdev.device;
47 if (gr->zbc_color[zbc].format) {
48 nvkm_wr32(device, 0x405804, gr->zbc_color[zbc].ds[0]);
49 nvkm_wr32(device, 0x405808, gr->zbc_color[zbc].ds[1]);
50 nvkm_wr32(device, 0x40580c, gr->zbc_color[zbc].ds[2]);
51 nvkm_wr32(device, 0x405810, gr->zbc_color[zbc].ds[3]);
52 }
53 nvkm_wr32(device, 0x405814, gr->zbc_color[zbc].format);
54 nvkm_wr32(device, 0x405820, zbc);
55 nvkm_wr32(device, 0x405824, 0x00000004); /* TRIGGER | WRITE | COLOR */
56 }
57
58 static int
gf100_gr_zbc_color_get(struct gf100_gr * gr,int format,const u32 ds[4],const u32 l2[4])59 gf100_gr_zbc_color_get(struct gf100_gr *gr, int format,
60 const u32 ds[4], const u32 l2[4])
61 {
62 struct nvkm_ltc *ltc = gr->base.engine.subdev.device->ltc;
63 int zbc = -ENOSPC, i;
64
65 for (i = ltc->zbc_min; i <= ltc->zbc_max; i++) {
66 if (gr->zbc_color[i].format) {
67 if (gr->zbc_color[i].format != format)
68 continue;
69 if (memcmp(gr->zbc_color[i].ds, ds, sizeof(
70 gr->zbc_color[i].ds)))
71 continue;
72 if (memcmp(gr->zbc_color[i].l2, l2, sizeof(
73 gr->zbc_color[i].l2))) {
74 WARN_ON(1);
75 return -EINVAL;
76 }
77 return i;
78 } else {
79 zbc = (zbc < 0) ? i : zbc;
80 }
81 }
82
83 if (zbc < 0)
84 return zbc;
85
86 memcpy(gr->zbc_color[zbc].ds, ds, sizeof(gr->zbc_color[zbc].ds));
87 memcpy(gr->zbc_color[zbc].l2, l2, sizeof(gr->zbc_color[zbc].l2));
88 gr->zbc_color[zbc].format = format;
89 nvkm_ltc_zbc_color_get(ltc, zbc, l2);
90 gf100_gr_zbc_clear_color(gr, zbc);
91 return zbc;
92 }
93
94 static void
gf100_gr_zbc_clear_depth(struct gf100_gr * gr,int zbc)95 gf100_gr_zbc_clear_depth(struct gf100_gr *gr, int zbc)
96 {
97 struct nvkm_device *device = gr->base.engine.subdev.device;
98 if (gr->zbc_depth[zbc].format)
99 nvkm_wr32(device, 0x405818, gr->zbc_depth[zbc].ds);
100 nvkm_wr32(device, 0x40581c, gr->zbc_depth[zbc].format);
101 nvkm_wr32(device, 0x405820, zbc);
102 nvkm_wr32(device, 0x405824, 0x00000005); /* TRIGGER | WRITE | DEPTH */
103 }
104
105 static int
gf100_gr_zbc_depth_get(struct gf100_gr * gr,int format,const u32 ds,const u32 l2)106 gf100_gr_zbc_depth_get(struct gf100_gr *gr, int format,
107 const u32 ds, const u32 l2)
108 {
109 struct nvkm_ltc *ltc = gr->base.engine.subdev.device->ltc;
110 int zbc = -ENOSPC, i;
111
112 for (i = ltc->zbc_min; i <= ltc->zbc_max; i++) {
113 if (gr->zbc_depth[i].format) {
114 if (gr->zbc_depth[i].format != format)
115 continue;
116 if (gr->zbc_depth[i].ds != ds)
117 continue;
118 if (gr->zbc_depth[i].l2 != l2) {
119 WARN_ON(1);
120 return -EINVAL;
121 }
122 return i;
123 } else {
124 zbc = (zbc < 0) ? i : zbc;
125 }
126 }
127
128 if (zbc < 0)
129 return zbc;
130
131 gr->zbc_depth[zbc].format = format;
132 gr->zbc_depth[zbc].ds = ds;
133 gr->zbc_depth[zbc].l2 = l2;
134 nvkm_ltc_zbc_depth_get(ltc, zbc, l2);
135 gf100_gr_zbc_clear_depth(gr, zbc);
136 return zbc;
137 }
138
139 /*******************************************************************************
140 * Graphics object classes
141 ******************************************************************************/
142
143 static int
gf100_fermi_mthd_zbc_color(struct nvkm_object * object,void * data,u32 size)144 gf100_fermi_mthd_zbc_color(struct nvkm_object *object, void *data, u32 size)
145 {
146 struct gf100_gr *gr = gf100_gr(nvkm_gr(object->engine));
147 union {
148 struct fermi_a_zbc_color_v0 v0;
149 } *args = data;
150 int ret;
151
152 if (nvif_unpack(args->v0, 0, 0, false)) {
153 switch (args->v0.format) {
154 case FERMI_A_ZBC_COLOR_V0_FMT_ZERO:
155 case FERMI_A_ZBC_COLOR_V0_FMT_UNORM_ONE:
156 case FERMI_A_ZBC_COLOR_V0_FMT_RF32_GF32_BF32_AF32:
157 case FERMI_A_ZBC_COLOR_V0_FMT_R16_G16_B16_A16:
158 case FERMI_A_ZBC_COLOR_V0_FMT_RN16_GN16_BN16_AN16:
159 case FERMI_A_ZBC_COLOR_V0_FMT_RS16_GS16_BS16_AS16:
160 case FERMI_A_ZBC_COLOR_V0_FMT_RU16_GU16_BU16_AU16:
161 case FERMI_A_ZBC_COLOR_V0_FMT_RF16_GF16_BF16_AF16:
162 case FERMI_A_ZBC_COLOR_V0_FMT_A8R8G8B8:
163 case FERMI_A_ZBC_COLOR_V0_FMT_A8RL8GL8BL8:
164 case FERMI_A_ZBC_COLOR_V0_FMT_A2B10G10R10:
165 case FERMI_A_ZBC_COLOR_V0_FMT_AU2BU10GU10RU10:
166 case FERMI_A_ZBC_COLOR_V0_FMT_A8B8G8R8:
167 case FERMI_A_ZBC_COLOR_V0_FMT_A8BL8GL8RL8:
168 case FERMI_A_ZBC_COLOR_V0_FMT_AN8BN8GN8RN8:
169 case FERMI_A_ZBC_COLOR_V0_FMT_AS8BS8GS8RS8:
170 case FERMI_A_ZBC_COLOR_V0_FMT_AU8BU8GU8RU8:
171 case FERMI_A_ZBC_COLOR_V0_FMT_A2R10G10B10:
172 case FERMI_A_ZBC_COLOR_V0_FMT_BF10GF11RF11:
173 ret = gf100_gr_zbc_color_get(gr, args->v0.format,
174 args->v0.ds,
175 args->v0.l2);
176 if (ret >= 0) {
177 args->v0.index = ret;
178 return 0;
179 }
180 break;
181 default:
182 return -EINVAL;
183 }
184 }
185
186 return ret;
187 }
188
189 static int
gf100_fermi_mthd_zbc_depth(struct nvkm_object * object,void * data,u32 size)190 gf100_fermi_mthd_zbc_depth(struct nvkm_object *object, void *data, u32 size)
191 {
192 struct gf100_gr *gr = gf100_gr(nvkm_gr(object->engine));
193 union {
194 struct fermi_a_zbc_depth_v0 v0;
195 } *args = data;
196 int ret;
197
198 if (nvif_unpack(args->v0, 0, 0, false)) {
199 switch (args->v0.format) {
200 case FERMI_A_ZBC_DEPTH_V0_FMT_FP32:
201 ret = gf100_gr_zbc_depth_get(gr, args->v0.format,
202 args->v0.ds,
203 args->v0.l2);
204 return (ret >= 0) ? 0 : -ENOSPC;
205 default:
206 return -EINVAL;
207 }
208 }
209
210 return ret;
211 }
212
213 static int
gf100_fermi_mthd(struct nvkm_object * object,u32 mthd,void * data,u32 size)214 gf100_fermi_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
215 {
216 switch (mthd) {
217 case FERMI_A_ZBC_COLOR:
218 return gf100_fermi_mthd_zbc_color(object, data, size);
219 case FERMI_A_ZBC_DEPTH:
220 return gf100_fermi_mthd_zbc_depth(object, data, size);
221 default:
222 break;
223 }
224 return -EINVAL;
225 }
226
227 const struct nvkm_object_func
228 gf100_fermi = {
229 .mthd = gf100_fermi_mthd,
230 };
231
232 static void
gf100_gr_mthd_set_shader_exceptions(struct nvkm_device * device,u32 data)233 gf100_gr_mthd_set_shader_exceptions(struct nvkm_device *device, u32 data)
234 {
235 nvkm_wr32(device, 0x419e44, data ? 0xffffffff : 0x00000000);
236 nvkm_wr32(device, 0x419e4c, data ? 0xffffffff : 0x00000000);
237 }
238
239 static bool
gf100_gr_mthd_sw(struct nvkm_device * device,u16 class,u32 mthd,u32 data)240 gf100_gr_mthd_sw(struct nvkm_device *device, u16 class, u32 mthd, u32 data)
241 {
242 switch (class & 0x00ff) {
243 case 0x97:
244 case 0xc0:
245 switch (mthd) {
246 case 0x1528:
247 gf100_gr_mthd_set_shader_exceptions(device, data);
248 return true;
249 default:
250 break;
251 }
252 break;
253 default:
254 break;
255 }
256 return false;
257 }
258
259 static int
gf100_gr_object_get(struct nvkm_gr * base,int index,struct nvkm_sclass * sclass)260 gf100_gr_object_get(struct nvkm_gr *base, int index, struct nvkm_sclass *sclass)
261 {
262 struct gf100_gr *gr = gf100_gr(base);
263 int c = 0;
264
265 while (gr->func->sclass[c].oclass) {
266 if (c++ == index) {
267 *sclass = gr->func->sclass[index];
268 return index;
269 }
270 }
271
272 return c;
273 }
274
275 /*******************************************************************************
276 * PGRAPH context
277 ******************************************************************************/
278
279 static int
gf100_gr_chan_bind(struct nvkm_object * object,struct nvkm_gpuobj * parent,int align,struct nvkm_gpuobj ** pgpuobj)280 gf100_gr_chan_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
281 int align, struct nvkm_gpuobj **pgpuobj)
282 {
283 struct gf100_gr_chan *chan = gf100_gr_chan(object);
284 struct gf100_gr *gr = chan->gr;
285 int ret, i;
286
287 ret = nvkm_gpuobj_new(gr->base.engine.subdev.device, gr->size,
288 align, false, parent, pgpuobj);
289 if (ret)
290 return ret;
291
292 nvkm_kmap(*pgpuobj);
293 for (i = 0; i < gr->size; i += 4)
294 nvkm_wo32(*pgpuobj, i, gr->data[i / 4]);
295
296 if (!gr->firmware) {
297 nvkm_wo32(*pgpuobj, 0x00, chan->mmio_nr / 2);
298 nvkm_wo32(*pgpuobj, 0x04, chan->mmio_vma.offset >> 8);
299 } else {
300 nvkm_wo32(*pgpuobj, 0xf4, 0);
301 nvkm_wo32(*pgpuobj, 0xf8, 0);
302 nvkm_wo32(*pgpuobj, 0x10, chan->mmio_nr / 2);
303 nvkm_wo32(*pgpuobj, 0x14, lower_32_bits(chan->mmio_vma.offset));
304 nvkm_wo32(*pgpuobj, 0x18, upper_32_bits(chan->mmio_vma.offset));
305 nvkm_wo32(*pgpuobj, 0x1c, 1);
306 nvkm_wo32(*pgpuobj, 0x20, 0);
307 nvkm_wo32(*pgpuobj, 0x28, 0);
308 nvkm_wo32(*pgpuobj, 0x2c, 0);
309 }
310 nvkm_done(*pgpuobj);
311 return 0;
312 }
313
314 static void *
gf100_gr_chan_dtor(struct nvkm_object * object)315 gf100_gr_chan_dtor(struct nvkm_object *object)
316 {
317 struct gf100_gr_chan *chan = gf100_gr_chan(object);
318 int i;
319
320 for (i = 0; i < ARRAY_SIZE(chan->data); i++) {
321 if (chan->data[i].vma.node) {
322 nvkm_vm_unmap(&chan->data[i].vma);
323 nvkm_vm_put(&chan->data[i].vma);
324 }
325 nvkm_memory_del(&chan->data[i].mem);
326 }
327
328 if (chan->mmio_vma.node) {
329 nvkm_vm_unmap(&chan->mmio_vma);
330 nvkm_vm_put(&chan->mmio_vma);
331 }
332 nvkm_memory_del(&chan->mmio);
333 return chan;
334 }
335
336 static const struct nvkm_object_func
337 gf100_gr_chan = {
338 .dtor = gf100_gr_chan_dtor,
339 .bind = gf100_gr_chan_bind,
340 };
341
342 static int
gf100_gr_chan_new(struct nvkm_gr * base,struct nvkm_fifo_chan * fifoch,const struct nvkm_oclass * oclass,struct nvkm_object ** pobject)343 gf100_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
344 const struct nvkm_oclass *oclass,
345 struct nvkm_object **pobject)
346 {
347 struct gf100_gr *gr = gf100_gr(base);
348 struct gf100_gr_data *data = gr->mmio_data;
349 struct gf100_gr_mmio *mmio = gr->mmio_list;
350 struct gf100_gr_chan *chan;
351 struct nvkm_device *device = gr->base.engine.subdev.device;
352 int ret, i;
353
354 if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
355 return -ENOMEM;
356 nvkm_object_ctor(&gf100_gr_chan, oclass, &chan->object);
357 chan->gr = gr;
358 *pobject = &chan->object;
359
360 /* allocate memory for a "mmio list" buffer that's used by the HUB
361 * fuc to modify some per-context register settings on first load
362 * of the context.
363 */
364 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x100,
365 false, &chan->mmio);
366 if (ret)
367 return ret;
368
369 ret = nvkm_vm_get(fifoch->vm, 0x1000, 12, NV_MEM_ACCESS_RW |
370 NV_MEM_ACCESS_SYS, &chan->mmio_vma);
371 if (ret)
372 return ret;
373
374 nvkm_memory_map(chan->mmio, &chan->mmio_vma, 0);
375
376 /* allocate buffers referenced by mmio list */
377 for (i = 0; data->size && i < ARRAY_SIZE(gr->mmio_data); i++) {
378 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
379 data->size, data->align, false,
380 &chan->data[i].mem);
381 if (ret)
382 return ret;
383
384 ret = nvkm_vm_get(fifoch->vm,
385 nvkm_memory_size(chan->data[i].mem), 12,
386 data->access, &chan->data[i].vma);
387 if (ret)
388 return ret;
389
390 nvkm_memory_map(chan->data[i].mem, &chan->data[i].vma, 0);
391 data++;
392 }
393
394 /* finally, fill in the mmio list and point the context at it */
395 nvkm_kmap(chan->mmio);
396 for (i = 0; mmio->addr && i < ARRAY_SIZE(gr->mmio_list); i++) {
397 u32 addr = mmio->addr;
398 u32 data = mmio->data;
399
400 if (mmio->buffer >= 0) {
401 u64 info = chan->data[mmio->buffer].vma.offset;
402 data |= info >> mmio->shift;
403 }
404
405 nvkm_wo32(chan->mmio, chan->mmio_nr++ * 4, addr);
406 nvkm_wo32(chan->mmio, chan->mmio_nr++ * 4, data);
407 mmio++;
408 }
409 nvkm_done(chan->mmio);
410 return 0;
411 }
412
413 /*******************************************************************************
414 * PGRAPH register lists
415 ******************************************************************************/
416
417 const struct gf100_gr_init
418 gf100_gr_init_main_0[] = {
419 { 0x400080, 1, 0x04, 0x003083c2 },
420 { 0x400088, 1, 0x04, 0x00006fe7 },
421 { 0x40008c, 1, 0x04, 0x00000000 },
422 { 0x400090, 1, 0x04, 0x00000030 },
423 { 0x40013c, 1, 0x04, 0x013901f7 },
424 { 0x400140, 1, 0x04, 0x00000100 },
425 { 0x400144, 1, 0x04, 0x00000000 },
426 { 0x400148, 1, 0x04, 0x00000110 },
427 { 0x400138, 1, 0x04, 0x00000000 },
428 { 0x400130, 2, 0x04, 0x00000000 },
429 { 0x400124, 1, 0x04, 0x00000002 },
430 {}
431 };
432
433 const struct gf100_gr_init
434 gf100_gr_init_fe_0[] = {
435 { 0x40415c, 1, 0x04, 0x00000000 },
436 { 0x404170, 1, 0x04, 0x00000000 },
437 {}
438 };
439
440 const struct gf100_gr_init
441 gf100_gr_init_pri_0[] = {
442 { 0x404488, 2, 0x04, 0x00000000 },
443 {}
444 };
445
446 const struct gf100_gr_init
447 gf100_gr_init_rstr2d_0[] = {
448 { 0x407808, 1, 0x04, 0x00000000 },
449 {}
450 };
451
452 const struct gf100_gr_init
453 gf100_gr_init_pd_0[] = {
454 { 0x406024, 1, 0x04, 0x00000000 },
455 {}
456 };
457
458 const struct gf100_gr_init
459 gf100_gr_init_ds_0[] = {
460 { 0x405844, 1, 0x04, 0x00ffffff },
461 { 0x405850, 1, 0x04, 0x00000000 },
462 { 0x405908, 1, 0x04, 0x00000000 },
463 {}
464 };
465
466 const struct gf100_gr_init
467 gf100_gr_init_scc_0[] = {
468 { 0x40803c, 1, 0x04, 0x00000000 },
469 {}
470 };
471
472 const struct gf100_gr_init
473 gf100_gr_init_prop_0[] = {
474 { 0x4184a0, 1, 0x04, 0x00000000 },
475 {}
476 };
477
478 const struct gf100_gr_init
479 gf100_gr_init_gpc_unk_0[] = {
480 { 0x418604, 1, 0x04, 0x00000000 },
481 { 0x418680, 1, 0x04, 0x00000000 },
482 { 0x418714, 1, 0x04, 0x80000000 },
483 { 0x418384, 1, 0x04, 0x00000000 },
484 {}
485 };
486
487 const struct gf100_gr_init
488 gf100_gr_init_setup_0[] = {
489 { 0x418814, 3, 0x04, 0x00000000 },
490 {}
491 };
492
493 const struct gf100_gr_init
494 gf100_gr_init_crstr_0[] = {
495 { 0x418b04, 1, 0x04, 0x00000000 },
496 {}
497 };
498
499 const struct gf100_gr_init
500 gf100_gr_init_setup_1[] = {
501 { 0x4188c8, 1, 0x04, 0x80000000 },
502 { 0x4188cc, 1, 0x04, 0x00000000 },
503 { 0x4188d0, 1, 0x04, 0x00010000 },
504 { 0x4188d4, 1, 0x04, 0x00000001 },
505 {}
506 };
507
508 const struct gf100_gr_init
509 gf100_gr_init_zcull_0[] = {
510 { 0x418910, 1, 0x04, 0x00010001 },
511 { 0x418914, 1, 0x04, 0x00000301 },
512 { 0x418918, 1, 0x04, 0x00800000 },
513 { 0x418980, 1, 0x04, 0x77777770 },
514 { 0x418984, 3, 0x04, 0x77777777 },
515 {}
516 };
517
518 const struct gf100_gr_init
519 gf100_gr_init_gpm_0[] = {
520 { 0x418c04, 1, 0x04, 0x00000000 },
521 { 0x418c88, 1, 0x04, 0x00000000 },
522 {}
523 };
524
525 const struct gf100_gr_init
526 gf100_gr_init_gpc_unk_1[] = {
527 { 0x418d00, 1, 0x04, 0x00000000 },
528 { 0x418f08, 1, 0x04, 0x00000000 },
529 { 0x418e00, 1, 0x04, 0x00000050 },
530 { 0x418e08, 1, 0x04, 0x00000000 },
531 {}
532 };
533
534 const struct gf100_gr_init
535 gf100_gr_init_gcc_0[] = {
536 { 0x41900c, 1, 0x04, 0x00000000 },
537 { 0x419018, 1, 0x04, 0x00000000 },
538 {}
539 };
540
541 const struct gf100_gr_init
542 gf100_gr_init_tpccs_0[] = {
543 { 0x419d08, 2, 0x04, 0x00000000 },
544 { 0x419d10, 1, 0x04, 0x00000014 },
545 {}
546 };
547
548 const struct gf100_gr_init
549 gf100_gr_init_tex_0[] = {
550 { 0x419ab0, 1, 0x04, 0x00000000 },
551 { 0x419ab8, 1, 0x04, 0x000000e7 },
552 { 0x419abc, 2, 0x04, 0x00000000 },
553 {}
554 };
555
556 const struct gf100_gr_init
557 gf100_gr_init_pe_0[] = {
558 { 0x41980c, 3, 0x04, 0x00000000 },
559 { 0x419844, 1, 0x04, 0x00000000 },
560 { 0x41984c, 1, 0x04, 0x00005bc5 },
561 { 0x419850, 4, 0x04, 0x00000000 },
562 {}
563 };
564
565 const struct gf100_gr_init
566 gf100_gr_init_l1c_0[] = {
567 { 0x419c98, 1, 0x04, 0x00000000 },
568 { 0x419ca8, 1, 0x04, 0x80000000 },
569 { 0x419cb4, 1, 0x04, 0x00000000 },
570 { 0x419cb8, 1, 0x04, 0x00008bf4 },
571 { 0x419cbc, 1, 0x04, 0x28137606 },
572 { 0x419cc0, 2, 0x04, 0x00000000 },
573 {}
574 };
575
576 const struct gf100_gr_init
577 gf100_gr_init_wwdx_0[] = {
578 { 0x419bd4, 1, 0x04, 0x00800000 },
579 { 0x419bdc, 1, 0x04, 0x00000000 },
580 {}
581 };
582
583 const struct gf100_gr_init
584 gf100_gr_init_tpccs_1[] = {
585 { 0x419d2c, 1, 0x04, 0x00000000 },
586 {}
587 };
588
589 const struct gf100_gr_init
590 gf100_gr_init_mpc_0[] = {
591 { 0x419c0c, 1, 0x04, 0x00000000 },
592 {}
593 };
594
595 static const struct gf100_gr_init
596 gf100_gr_init_sm_0[] = {
597 { 0x419e00, 1, 0x04, 0x00000000 },
598 { 0x419ea0, 1, 0x04, 0x00000000 },
599 { 0x419ea4, 1, 0x04, 0x00000100 },
600 { 0x419ea8, 1, 0x04, 0x00001100 },
601 { 0x419eac, 1, 0x04, 0x11100702 },
602 { 0x419eb0, 1, 0x04, 0x00000003 },
603 { 0x419eb4, 4, 0x04, 0x00000000 },
604 { 0x419ec8, 1, 0x04, 0x06060618 },
605 { 0x419ed0, 1, 0x04, 0x0eff0e38 },
606 { 0x419ed4, 1, 0x04, 0x011104f1 },
607 { 0x419edc, 1, 0x04, 0x00000000 },
608 { 0x419f00, 1, 0x04, 0x00000000 },
609 { 0x419f2c, 1, 0x04, 0x00000000 },
610 {}
611 };
612
613 const struct gf100_gr_init
614 gf100_gr_init_be_0[] = {
615 { 0x40880c, 1, 0x04, 0x00000000 },
616 { 0x408910, 9, 0x04, 0x00000000 },
617 { 0x408950, 1, 0x04, 0x00000000 },
618 { 0x408954, 1, 0x04, 0x0000ffff },
619 { 0x408984, 1, 0x04, 0x00000000 },
620 { 0x408988, 1, 0x04, 0x08040201 },
621 { 0x40898c, 1, 0x04, 0x80402010 },
622 {}
623 };
624
625 const struct gf100_gr_init
626 gf100_gr_init_fe_1[] = {
627 { 0x4040f0, 1, 0x04, 0x00000000 },
628 {}
629 };
630
631 const struct gf100_gr_init
632 gf100_gr_init_pe_1[] = {
633 { 0x419880, 1, 0x04, 0x00000002 },
634 {}
635 };
636
637 static const struct gf100_gr_pack
638 gf100_gr_pack_mmio[] = {
639 { gf100_gr_init_main_0 },
640 { gf100_gr_init_fe_0 },
641 { gf100_gr_init_pri_0 },
642 { gf100_gr_init_rstr2d_0 },
643 { gf100_gr_init_pd_0 },
644 { gf100_gr_init_ds_0 },
645 { gf100_gr_init_scc_0 },
646 { gf100_gr_init_prop_0 },
647 { gf100_gr_init_gpc_unk_0 },
648 { gf100_gr_init_setup_0 },
649 { gf100_gr_init_crstr_0 },
650 { gf100_gr_init_setup_1 },
651 { gf100_gr_init_zcull_0 },
652 { gf100_gr_init_gpm_0 },
653 { gf100_gr_init_gpc_unk_1 },
654 { gf100_gr_init_gcc_0 },
655 { gf100_gr_init_tpccs_0 },
656 { gf100_gr_init_tex_0 },
657 { gf100_gr_init_pe_0 },
658 { gf100_gr_init_l1c_0 },
659 { gf100_gr_init_wwdx_0 },
660 { gf100_gr_init_tpccs_1 },
661 { gf100_gr_init_mpc_0 },
662 { gf100_gr_init_sm_0 },
663 { gf100_gr_init_be_0 },
664 { gf100_gr_init_fe_1 },
665 { gf100_gr_init_pe_1 },
666 {}
667 };
668
669 /*******************************************************************************
670 * PGRAPH engine/subdev functions
671 ******************************************************************************/
672
673 void
gf100_gr_zbc_init(struct gf100_gr * gr)674 gf100_gr_zbc_init(struct gf100_gr *gr)
675 {
676 const u32 zero[] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000,
677 0x00000000, 0x00000000, 0x00000000, 0x00000000 };
678 const u32 one[] = { 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000,
679 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff };
680 const u32 f32_0[] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000,
681 0x00000000, 0x00000000, 0x00000000, 0x00000000 };
682 const u32 f32_1[] = { 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000,
683 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000 };
684 struct nvkm_ltc *ltc = gr->base.engine.subdev.device->ltc;
685 int index;
686
687 if (!gr->zbc_color[0].format) {
688 gf100_gr_zbc_color_get(gr, 1, & zero[0], &zero[4]);
689 gf100_gr_zbc_color_get(gr, 2, & one[0], &one[4]);
690 gf100_gr_zbc_color_get(gr, 4, &f32_0[0], &f32_0[4]);
691 gf100_gr_zbc_color_get(gr, 4, &f32_1[0], &f32_1[4]);
692 gf100_gr_zbc_depth_get(gr, 1, 0x00000000, 0x00000000);
693 gf100_gr_zbc_depth_get(gr, 1, 0x3f800000, 0x3f800000);
694 }
695
696 for (index = ltc->zbc_min; index <= ltc->zbc_max; index++)
697 gf100_gr_zbc_clear_color(gr, index);
698 for (index = ltc->zbc_min; index <= ltc->zbc_max; index++)
699 gf100_gr_zbc_clear_depth(gr, index);
700 }
701
702 /**
703 * Wait until GR goes idle. GR is considered idle if it is disabled by the
704 * MC (0x200) register, or GR is not busy and a context switch is not in
705 * progress.
706 */
707 int
gf100_gr_wait_idle(struct gf100_gr * gr)708 gf100_gr_wait_idle(struct gf100_gr *gr)
709 {
710 struct nvkm_subdev *subdev = &gr->base.engine.subdev;
711 struct nvkm_device *device = subdev->device;
712 unsigned long end_jiffies = jiffies + msecs_to_jiffies(2000);
713 bool gr_enabled, ctxsw_active, gr_busy;
714
715 do {
716 /*
717 * required to make sure FIFO_ENGINE_STATUS (0x2640) is
718 * up-to-date
719 */
720 nvkm_rd32(device, 0x400700);
721
722 gr_enabled = nvkm_rd32(device, 0x200) & 0x1000;
723 ctxsw_active = nvkm_rd32(device, 0x2640) & 0x8000;
724 gr_busy = nvkm_rd32(device, 0x40060c) & 0x1;
725
726 if (!gr_enabled || (!gr_busy && !ctxsw_active))
727 return 0;
728 } while (time_before(jiffies, end_jiffies));
729
730 nvkm_error(subdev,
731 "wait for idle timeout (en: %d, ctxsw: %d, busy: %d)\n",
732 gr_enabled, ctxsw_active, gr_busy);
733 return -EAGAIN;
734 }
735
736 void
gf100_gr_mmio(struct gf100_gr * gr,const struct gf100_gr_pack * p)737 gf100_gr_mmio(struct gf100_gr *gr, const struct gf100_gr_pack *p)
738 {
739 struct nvkm_device *device = gr->base.engine.subdev.device;
740 const struct gf100_gr_pack *pack;
741 const struct gf100_gr_init *init;
742
743 pack_for_each_init(init, pack, p) {
744 u32 next = init->addr + init->count * init->pitch;
745 u32 addr = init->addr;
746 while (addr < next) {
747 nvkm_wr32(device, addr, init->data);
748 addr += init->pitch;
749 }
750 }
751 }
752
753 void
gf100_gr_icmd(struct gf100_gr * gr,const struct gf100_gr_pack * p)754 gf100_gr_icmd(struct gf100_gr *gr, const struct gf100_gr_pack *p)
755 {
756 struct nvkm_device *device = gr->base.engine.subdev.device;
757 const struct gf100_gr_pack *pack;
758 const struct gf100_gr_init *init;
759 u32 data = 0;
760
761 nvkm_wr32(device, 0x400208, 0x80000000);
762
763 pack_for_each_init(init, pack, p) {
764 u32 next = init->addr + init->count * init->pitch;
765 u32 addr = init->addr;
766
767 if ((pack == p && init == p->init) || data != init->data) {
768 nvkm_wr32(device, 0x400204, init->data);
769 data = init->data;
770 }
771
772 while (addr < next) {
773 nvkm_wr32(device, 0x400200, addr);
774 /**
775 * Wait for GR to go idle after submitting a
776 * GO_IDLE bundle
777 */
778 if ((addr & 0xffff) == 0xe100)
779 gf100_gr_wait_idle(gr);
780 nvkm_msec(device, 2000,
781 if (!(nvkm_rd32(device, 0x400700) & 0x00000004))
782 break;
783 );
784 addr += init->pitch;
785 }
786 }
787
788 nvkm_wr32(device, 0x400208, 0x00000000);
789 }
790
791 void
gf100_gr_mthd(struct gf100_gr * gr,const struct gf100_gr_pack * p)792 gf100_gr_mthd(struct gf100_gr *gr, const struct gf100_gr_pack *p)
793 {
794 struct nvkm_device *device = gr->base.engine.subdev.device;
795 const struct gf100_gr_pack *pack;
796 const struct gf100_gr_init *init;
797 u32 data = 0;
798
799 pack_for_each_init(init, pack, p) {
800 u32 ctrl = 0x80000000 | pack->type;
801 u32 next = init->addr + init->count * init->pitch;
802 u32 addr = init->addr;
803
804 if ((pack == p && init == p->init) || data != init->data) {
805 nvkm_wr32(device, 0x40448c, init->data);
806 data = init->data;
807 }
808
809 while (addr < next) {
810 nvkm_wr32(device, 0x404488, ctrl | (addr << 14));
811 addr += init->pitch;
812 }
813 }
814 }
815
816 u64
gf100_gr_units(struct nvkm_gr * base)817 gf100_gr_units(struct nvkm_gr *base)
818 {
819 struct gf100_gr *gr = gf100_gr(base);
820 u64 cfg;
821
822 cfg = (u32)gr->gpc_nr;
823 cfg |= (u32)gr->tpc_total << 8;
824 cfg |= (u64)gr->rop_nr << 32;
825
826 return cfg;
827 }
828
829 static const struct nvkm_bitfield gk104_sked_error[] = {
830 { 0x00000080, "CONSTANT_BUFFER_SIZE" },
831 { 0x00000200, "LOCAL_MEMORY_SIZE_POS" },
832 { 0x00000400, "LOCAL_MEMORY_SIZE_NEG" },
833 { 0x00000800, "WARP_CSTACK_SIZE" },
834 { 0x00001000, "TOTAL_TEMP_SIZE" },
835 { 0x00002000, "REGISTER_COUNT" },
836 { 0x00040000, "TOTAL_THREADS" },
837 { 0x00100000, "PROGRAM_OFFSET" },
838 { 0x00200000, "SHARED_MEMORY_SIZE" },
839 { 0x02000000, "SHARED_CONFIG_TOO_SMALL" },
840 { 0x04000000, "TOTAL_REGISTER_COUNT" },
841 {}
842 };
843
844 static const struct nvkm_bitfield gf100_gpc_rop_error[] = {
845 { 0x00000002, "RT_PITCH_OVERRUN" },
846 { 0x00000010, "RT_WIDTH_OVERRUN" },
847 { 0x00000020, "RT_HEIGHT_OVERRUN" },
848 { 0x00000080, "ZETA_STORAGE_TYPE_MISMATCH" },
849 { 0x00000100, "RT_STORAGE_TYPE_MISMATCH" },
850 { 0x00000400, "RT_LINEAR_MISMATCH" },
851 {}
852 };
853
854 static void
gf100_gr_trap_gpc_rop(struct gf100_gr * gr,int gpc)855 gf100_gr_trap_gpc_rop(struct gf100_gr *gr, int gpc)
856 {
857 struct nvkm_subdev *subdev = &gr->base.engine.subdev;
858 struct nvkm_device *device = subdev->device;
859 char error[128];
860 u32 trap[4];
861
862 trap[0] = nvkm_rd32(device, GPC_UNIT(gpc, 0x0420)) & 0x3fffffff;
863 trap[1] = nvkm_rd32(device, GPC_UNIT(gpc, 0x0434));
864 trap[2] = nvkm_rd32(device, GPC_UNIT(gpc, 0x0438));
865 trap[3] = nvkm_rd32(device, GPC_UNIT(gpc, 0x043c));
866
867 nvkm_snprintbf(error, sizeof(error), gf100_gpc_rop_error, trap[0]);
868
869 nvkm_error(subdev, "GPC%d/PROP trap: %08x [%s] x = %u, y = %u, "
870 "format = %x, storage type = %x\n",
871 gpc, trap[0], error, trap[1] & 0xffff, trap[1] >> 16,
872 (trap[2] >> 8) & 0x3f, trap[3] & 0xff);
873 nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000);
874 }
875
876 static const struct nvkm_enum gf100_mp_warp_error[] = {
877 { 0x01, "STACK_ERROR" },
878 { 0x02, "API_STACK_ERROR" },
879 { 0x03, "RET_EMPTY_STACK_ERROR" },
880 { 0x04, "PC_WRAP" },
881 { 0x05, "MISALIGNED_PC" },
882 { 0x06, "PC_OVERFLOW" },
883 { 0x07, "MISALIGNED_IMMC_ADDR" },
884 { 0x08, "MISALIGNED_REG" },
885 { 0x09, "ILLEGAL_INSTR_ENCODING" },
886 { 0x0a, "ILLEGAL_SPH_INSTR_COMBO" },
887 { 0x0b, "ILLEGAL_INSTR_PARAM" },
888 { 0x0c, "INVALID_CONST_ADDR" },
889 { 0x0d, "OOR_REG" },
890 { 0x0e, "OOR_ADDR" },
891 { 0x0f, "MISALIGNED_ADDR" },
892 { 0x10, "INVALID_ADDR_SPACE" },
893 { 0x11, "ILLEGAL_INSTR_PARAM2" },
894 { 0x12, "INVALID_CONST_ADDR_LDC" },
895 { 0x13, "GEOMETRY_SM_ERROR" },
896 { 0x14, "DIVERGENT" },
897 { 0x15, "WARP_EXIT" },
898 {}
899 };
900
901 static const struct nvkm_bitfield gf100_mp_global_error[] = {
902 { 0x00000001, "SM_TO_SM_FAULT" },
903 { 0x00000002, "L1_ERROR" },
904 { 0x00000004, "MULTIPLE_WARP_ERRORS" },
905 { 0x00000008, "PHYSICAL_STACK_OVERFLOW" },
906 { 0x00000010, "BPT_INT" },
907 { 0x00000020, "BPT_PAUSE" },
908 { 0x00000040, "SINGLE_STEP_COMPLETE" },
909 { 0x20000000, "ECC_SEC_ERROR" },
910 { 0x40000000, "ECC_DED_ERROR" },
911 { 0x80000000, "TIMEOUT" },
912 {}
913 };
914
915 static void
gf100_gr_trap_mp(struct gf100_gr * gr,int gpc,int tpc)916 gf100_gr_trap_mp(struct gf100_gr *gr, int gpc, int tpc)
917 {
918 struct nvkm_subdev *subdev = &gr->base.engine.subdev;
919 struct nvkm_device *device = subdev->device;
920 u32 werr = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x648));
921 u32 gerr = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x650));
922 const struct nvkm_enum *warp;
923 char glob[128];
924
925 nvkm_snprintbf(glob, sizeof(glob), gf100_mp_global_error, gerr);
926 warp = nvkm_enum_find(gf100_mp_warp_error, werr & 0xffff);
927
928 nvkm_error(subdev, "GPC%i/TPC%i/MP trap: "
929 "global %08x [%s] warp %04x [%s]\n",
930 gpc, tpc, gerr, glob, werr, warp ? warp->name : "");
931
932 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x648), 0x00000000);
933 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x650), gerr);
934 }
935
936 static void
gf100_gr_trap_tpc(struct gf100_gr * gr,int gpc,int tpc)937 gf100_gr_trap_tpc(struct gf100_gr *gr, int gpc, int tpc)
938 {
939 struct nvkm_subdev *subdev = &gr->base.engine.subdev;
940 struct nvkm_device *device = subdev->device;
941 u32 stat = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x0508));
942
943 if (stat & 0x00000001) {
944 u32 trap = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x0224));
945 nvkm_error(subdev, "GPC%d/TPC%d/TEX: %08x\n", gpc, tpc, trap);
946 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x0224), 0xc0000000);
947 stat &= ~0x00000001;
948 }
949
950 if (stat & 0x00000002) {
951 gf100_gr_trap_mp(gr, gpc, tpc);
952 stat &= ~0x00000002;
953 }
954
955 if (stat & 0x00000004) {
956 u32 trap = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x0084));
957 nvkm_error(subdev, "GPC%d/TPC%d/POLY: %08x\n", gpc, tpc, trap);
958 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x0084), 0xc0000000);
959 stat &= ~0x00000004;
960 }
961
962 if (stat & 0x00000008) {
963 u32 trap = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x048c));
964 nvkm_error(subdev, "GPC%d/TPC%d/L1C: %08x\n", gpc, tpc, trap);
965 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x048c), 0xc0000000);
966 stat &= ~0x00000008;
967 }
968
969 if (stat) {
970 nvkm_error(subdev, "GPC%d/TPC%d/%08x: unknown\n", gpc, tpc, stat);
971 }
972 }
973
974 static void
gf100_gr_trap_gpc(struct gf100_gr * gr,int gpc)975 gf100_gr_trap_gpc(struct gf100_gr *gr, int gpc)
976 {
977 struct nvkm_subdev *subdev = &gr->base.engine.subdev;
978 struct nvkm_device *device = subdev->device;
979 u32 stat = nvkm_rd32(device, GPC_UNIT(gpc, 0x2c90));
980 int tpc;
981
982 if (stat & 0x00000001) {
983 gf100_gr_trap_gpc_rop(gr, gpc);
984 stat &= ~0x00000001;
985 }
986
987 if (stat & 0x00000002) {
988 u32 trap = nvkm_rd32(device, GPC_UNIT(gpc, 0x0900));
989 nvkm_error(subdev, "GPC%d/ZCULL: %08x\n", gpc, trap);
990 nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000);
991 stat &= ~0x00000002;
992 }
993
994 if (stat & 0x00000004) {
995 u32 trap = nvkm_rd32(device, GPC_UNIT(gpc, 0x1028));
996 nvkm_error(subdev, "GPC%d/CCACHE: %08x\n", gpc, trap);
997 nvkm_wr32(device, GPC_UNIT(gpc, 0x1028), 0xc0000000);
998 stat &= ~0x00000004;
999 }
1000
1001 if (stat & 0x00000008) {
1002 u32 trap = nvkm_rd32(device, GPC_UNIT(gpc, 0x0824));
1003 nvkm_error(subdev, "GPC%d/ESETUP: %08x\n", gpc, trap);
1004 nvkm_wr32(device, GPC_UNIT(gpc, 0x0824), 0xc0000000);
1005 stat &= ~0x00000009;
1006 }
1007
1008 for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) {
1009 u32 mask = 0x00010000 << tpc;
1010 if (stat & mask) {
1011 gf100_gr_trap_tpc(gr, gpc, tpc);
1012 nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), mask);
1013 stat &= ~mask;
1014 }
1015 }
1016
1017 if (stat) {
1018 nvkm_error(subdev, "GPC%d/%08x: unknown\n", gpc, stat);
1019 }
1020 }
1021
1022 static void
gf100_gr_trap_intr(struct gf100_gr * gr)1023 gf100_gr_trap_intr(struct gf100_gr *gr)
1024 {
1025 struct nvkm_subdev *subdev = &gr->base.engine.subdev;
1026 struct nvkm_device *device = subdev->device;
1027 u32 trap = nvkm_rd32(device, 0x400108);
1028 int rop, gpc;
1029
1030 if (trap & 0x00000001) {
1031 u32 stat = nvkm_rd32(device, 0x404000);
1032 nvkm_error(subdev, "DISPATCH %08x\n", stat);
1033 nvkm_wr32(device, 0x404000, 0xc0000000);
1034 nvkm_wr32(device, 0x400108, 0x00000001);
1035 trap &= ~0x00000001;
1036 }
1037
1038 if (trap & 0x00000002) {
1039 u32 stat = nvkm_rd32(device, 0x404600);
1040 nvkm_error(subdev, "M2MF %08x\n", stat);
1041 nvkm_wr32(device, 0x404600, 0xc0000000);
1042 nvkm_wr32(device, 0x400108, 0x00000002);
1043 trap &= ~0x00000002;
1044 }
1045
1046 if (trap & 0x00000008) {
1047 u32 stat = nvkm_rd32(device, 0x408030);
1048 nvkm_error(subdev, "CCACHE %08x\n", stat);
1049 nvkm_wr32(device, 0x408030, 0xc0000000);
1050 nvkm_wr32(device, 0x400108, 0x00000008);
1051 trap &= ~0x00000008;
1052 }
1053
1054 if (trap & 0x00000010) {
1055 u32 stat = nvkm_rd32(device, 0x405840);
1056 nvkm_error(subdev, "SHADER %08x\n", stat);
1057 nvkm_wr32(device, 0x405840, 0xc0000000);
1058 nvkm_wr32(device, 0x400108, 0x00000010);
1059 trap &= ~0x00000010;
1060 }
1061
1062 if (trap & 0x00000040) {
1063 u32 stat = nvkm_rd32(device, 0x40601c);
1064 nvkm_error(subdev, "UNK6 %08x\n", stat);
1065 nvkm_wr32(device, 0x40601c, 0xc0000000);
1066 nvkm_wr32(device, 0x400108, 0x00000040);
1067 trap &= ~0x00000040;
1068 }
1069
1070 if (trap & 0x00000080) {
1071 u32 stat = nvkm_rd32(device, 0x404490);
1072 nvkm_error(subdev, "MACRO %08x\n", stat);
1073 nvkm_wr32(device, 0x404490, 0xc0000000);
1074 nvkm_wr32(device, 0x400108, 0x00000080);
1075 trap &= ~0x00000080;
1076 }
1077
1078 if (trap & 0x00000100) {
1079 u32 stat = nvkm_rd32(device, 0x407020) & 0x3fffffff;
1080 char sked[128];
1081
1082 nvkm_snprintbf(sked, sizeof(sked), gk104_sked_error, stat);
1083 nvkm_error(subdev, "SKED: %08x [%s]\n", stat, sked);
1084
1085 if (stat)
1086 nvkm_wr32(device, 0x407020, 0x40000000);
1087 nvkm_wr32(device, 0x400108, 0x00000100);
1088 trap &= ~0x00000100;
1089 }
1090
1091 if (trap & 0x01000000) {
1092 u32 stat = nvkm_rd32(device, 0x400118);
1093 for (gpc = 0; stat && gpc < gr->gpc_nr; gpc++) {
1094 u32 mask = 0x00000001 << gpc;
1095 if (stat & mask) {
1096 gf100_gr_trap_gpc(gr, gpc);
1097 nvkm_wr32(device, 0x400118, mask);
1098 stat &= ~mask;
1099 }
1100 }
1101 nvkm_wr32(device, 0x400108, 0x01000000);
1102 trap &= ~0x01000000;
1103 }
1104
1105 if (trap & 0x02000000) {
1106 for (rop = 0; rop < gr->rop_nr; rop++) {
1107 u32 statz = nvkm_rd32(device, ROP_UNIT(rop, 0x070));
1108 u32 statc = nvkm_rd32(device, ROP_UNIT(rop, 0x144));
1109 nvkm_error(subdev, "ROP%d %08x %08x\n",
1110 rop, statz, statc);
1111 nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0xc0000000);
1112 nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0xc0000000);
1113 }
1114 nvkm_wr32(device, 0x400108, 0x02000000);
1115 trap &= ~0x02000000;
1116 }
1117
1118 if (trap) {
1119 nvkm_error(subdev, "TRAP UNHANDLED %08x\n", trap);
1120 nvkm_wr32(device, 0x400108, trap);
1121 }
1122 }
1123
1124 static void
gf100_gr_ctxctl_debug_unit(struct gf100_gr * gr,u32 base)1125 gf100_gr_ctxctl_debug_unit(struct gf100_gr *gr, u32 base)
1126 {
1127 struct nvkm_subdev *subdev = &gr->base.engine.subdev;
1128 struct nvkm_device *device = subdev->device;
1129 nvkm_error(subdev, "%06x - done %08x\n", base,
1130 nvkm_rd32(device, base + 0x400));
1131 nvkm_error(subdev, "%06x - stat %08x %08x %08x %08x\n", base,
1132 nvkm_rd32(device, base + 0x800),
1133 nvkm_rd32(device, base + 0x804),
1134 nvkm_rd32(device, base + 0x808),
1135 nvkm_rd32(device, base + 0x80c));
1136 nvkm_error(subdev, "%06x - stat %08x %08x %08x %08x\n", base,
1137 nvkm_rd32(device, base + 0x810),
1138 nvkm_rd32(device, base + 0x814),
1139 nvkm_rd32(device, base + 0x818),
1140 nvkm_rd32(device, base + 0x81c));
1141 }
1142
1143 void
gf100_gr_ctxctl_debug(struct gf100_gr * gr)1144 gf100_gr_ctxctl_debug(struct gf100_gr *gr)
1145 {
1146 struct nvkm_device *device = gr->base.engine.subdev.device;
1147 u32 gpcnr = nvkm_rd32(device, 0x409604) & 0xffff;
1148 u32 gpc;
1149
1150 gf100_gr_ctxctl_debug_unit(gr, 0x409000);
1151 for (gpc = 0; gpc < gpcnr; gpc++)
1152 gf100_gr_ctxctl_debug_unit(gr, 0x502000 + (gpc * 0x8000));
1153 }
1154
1155 static void
gf100_gr_ctxctl_isr(struct gf100_gr * gr)1156 gf100_gr_ctxctl_isr(struct gf100_gr *gr)
1157 {
1158 struct nvkm_subdev *subdev = &gr->base.engine.subdev;
1159 struct nvkm_device *device = subdev->device;
1160 u32 stat = nvkm_rd32(device, 0x409c18);
1161
1162 if (stat & 0x00000001) {
1163 u32 code = nvkm_rd32(device, 0x409814);
1164 if (code == E_BAD_FWMTHD) {
1165 u32 class = nvkm_rd32(device, 0x409808);
1166 u32 addr = nvkm_rd32(device, 0x40980c);
1167 u32 subc = (addr & 0x00070000) >> 16;
1168 u32 mthd = (addr & 0x00003ffc);
1169 u32 data = nvkm_rd32(device, 0x409810);
1170
1171 nvkm_error(subdev, "FECS MTHD subc %d class %04x "
1172 "mthd %04x data %08x\n",
1173 subc, class, mthd, data);
1174
1175 nvkm_wr32(device, 0x409c20, 0x00000001);
1176 stat &= ~0x00000001;
1177 } else {
1178 nvkm_error(subdev, "FECS ucode error %d\n", code);
1179 }
1180 }
1181
1182 if (stat & 0x00080000) {
1183 nvkm_error(subdev, "FECS watchdog timeout\n");
1184 gf100_gr_ctxctl_debug(gr);
1185 nvkm_wr32(device, 0x409c20, 0x00080000);
1186 stat &= ~0x00080000;
1187 }
1188
1189 if (stat) {
1190 nvkm_error(subdev, "FECS %08x\n", stat);
1191 gf100_gr_ctxctl_debug(gr);
1192 nvkm_wr32(device, 0x409c20, stat);
1193 }
1194 }
1195
1196 static void
gf100_gr_intr(struct nvkm_gr * base)1197 gf100_gr_intr(struct nvkm_gr *base)
1198 {
1199 struct gf100_gr *gr = gf100_gr(base);
1200 struct nvkm_subdev *subdev = &gr->base.engine.subdev;
1201 struct nvkm_device *device = subdev->device;
1202 struct nvkm_fifo_chan *chan;
1203 unsigned long flags;
1204 u64 inst = nvkm_rd32(device, 0x409b00) & 0x0fffffff;
1205 u32 stat = nvkm_rd32(device, 0x400100);
1206 u32 addr = nvkm_rd32(device, 0x400704);
1207 u32 mthd = (addr & 0x00003ffc);
1208 u32 subc = (addr & 0x00070000) >> 16;
1209 u32 data = nvkm_rd32(device, 0x400708);
1210 u32 code = nvkm_rd32(device, 0x400110);
1211 u32 class;
1212 const char *name = "unknown";
1213 int chid = -1;
1214
1215 chan = nvkm_fifo_chan_inst(device->fifo, (u64)inst << 12, &flags);
1216 if (chan) {
1217 name = chan->object.client->name;
1218 chid = chan->chid;
1219 }
1220
1221 if (device->card_type < NV_E0 || subc < 4)
1222 class = nvkm_rd32(device, 0x404200 + (subc * 4));
1223 else
1224 class = 0x0000;
1225
1226 if (stat & 0x00000001) {
1227 /*
1228 * notifier interrupt, only needed for cyclestats
1229 * can be safely ignored
1230 */
1231 nvkm_wr32(device, 0x400100, 0x00000001);
1232 stat &= ~0x00000001;
1233 }
1234
1235 if (stat & 0x00000010) {
1236 if (!gf100_gr_mthd_sw(device, class, mthd, data)) {
1237 nvkm_error(subdev, "ILLEGAL_MTHD ch %d [%010llx %s] "
1238 "subc %d class %04x mthd %04x data %08x\n",
1239 chid, inst << 12, name, subc,
1240 class, mthd, data);
1241 }
1242 nvkm_wr32(device, 0x400100, 0x00000010);
1243 stat &= ~0x00000010;
1244 }
1245
1246 if (stat & 0x00000020) {
1247 nvkm_error(subdev, "ILLEGAL_CLASS ch %d [%010llx %s] "
1248 "subc %d class %04x mthd %04x data %08x\n",
1249 chid, inst << 12, name, subc, class, mthd, data);
1250 nvkm_wr32(device, 0x400100, 0x00000020);
1251 stat &= ~0x00000020;
1252 }
1253
1254 if (stat & 0x00100000) {
1255 const struct nvkm_enum *en =
1256 nvkm_enum_find(nv50_data_error_names, code);
1257 nvkm_error(subdev, "DATA_ERROR %08x [%s] ch %d [%010llx %s] "
1258 "subc %d class %04x mthd %04x data %08x\n",
1259 code, en ? en->name : "", chid, inst << 12,
1260 name, subc, class, mthd, data);
1261 nvkm_wr32(device, 0x400100, 0x00100000);
1262 stat &= ~0x00100000;
1263 }
1264
1265 if (stat & 0x00200000) {
1266 nvkm_error(subdev, "TRAP ch %d [%010llx %s]\n",
1267 chid, inst << 12, name);
1268 gf100_gr_trap_intr(gr);
1269 nvkm_wr32(device, 0x400100, 0x00200000);
1270 stat &= ~0x00200000;
1271 }
1272
1273 if (stat & 0x00080000) {
1274 gf100_gr_ctxctl_isr(gr);
1275 nvkm_wr32(device, 0x400100, 0x00080000);
1276 stat &= ~0x00080000;
1277 }
1278
1279 if (stat) {
1280 nvkm_error(subdev, "intr %08x\n", stat);
1281 nvkm_wr32(device, 0x400100, stat);
1282 }
1283
1284 nvkm_wr32(device, 0x400500, 0x00010001);
1285 nvkm_fifo_chan_put(device->fifo, flags, &chan);
1286 }
1287
1288 void
gf100_gr_init_fw(struct gf100_gr * gr,u32 fuc_base,struct gf100_gr_fuc * code,struct gf100_gr_fuc * data)1289 gf100_gr_init_fw(struct gf100_gr *gr, u32 fuc_base,
1290 struct gf100_gr_fuc *code, struct gf100_gr_fuc *data)
1291 {
1292 struct nvkm_device *device = gr->base.engine.subdev.device;
1293 int i;
1294
1295 nvkm_wr32(device, fuc_base + 0x01c0, 0x01000000);
1296 for (i = 0; i < data->size / 4; i++)
1297 nvkm_wr32(device, fuc_base + 0x01c4, data->data[i]);
1298
1299 nvkm_wr32(device, fuc_base + 0x0180, 0x01000000);
1300 for (i = 0; i < code->size / 4; i++) {
1301 if ((i & 0x3f) == 0)
1302 nvkm_wr32(device, fuc_base + 0x0188, i >> 6);
1303 nvkm_wr32(device, fuc_base + 0x0184, code->data[i]);
1304 }
1305
1306 /* code must be padded to 0x40 words */
1307 for (; i & 0x3f; i++)
1308 nvkm_wr32(device, fuc_base + 0x0184, 0);
1309 }
1310
1311 static void
gf100_gr_init_csdata(struct gf100_gr * gr,const struct gf100_gr_pack * pack,u32 falcon,u32 starstar,u32 base)1312 gf100_gr_init_csdata(struct gf100_gr *gr,
1313 const struct gf100_gr_pack *pack,
1314 u32 falcon, u32 starstar, u32 base)
1315 {
1316 struct nvkm_device *device = gr->base.engine.subdev.device;
1317 const struct gf100_gr_pack *iter;
1318 const struct gf100_gr_init *init;
1319 u32 addr = ~0, prev = ~0, xfer = 0;
1320 u32 star, temp;
1321
1322 nvkm_wr32(device, falcon + 0x01c0, 0x02000000 + starstar);
1323 star = nvkm_rd32(device, falcon + 0x01c4);
1324 temp = nvkm_rd32(device, falcon + 0x01c4);
1325 if (temp > star)
1326 star = temp;
1327 nvkm_wr32(device, falcon + 0x01c0, 0x01000000 + star);
1328
1329 pack_for_each_init(init, iter, pack) {
1330 u32 head = init->addr - base;
1331 u32 tail = head + init->count * init->pitch;
1332 while (head < tail) {
1333 if (head != prev + 4 || xfer >= 32) {
1334 if (xfer) {
1335 u32 data = ((--xfer << 26) | addr);
1336 nvkm_wr32(device, falcon + 0x01c4, data);
1337 star += 4;
1338 }
1339 addr = head;
1340 xfer = 0;
1341 }
1342 prev = head;
1343 xfer = xfer + 1;
1344 head = head + init->pitch;
1345 }
1346 }
1347
1348 nvkm_wr32(device, falcon + 0x01c4, (--xfer << 26) | addr);
1349 nvkm_wr32(device, falcon + 0x01c0, 0x01000004 + starstar);
1350 nvkm_wr32(device, falcon + 0x01c4, star + 4);
1351 }
1352
1353 int
gf100_gr_init_ctxctl(struct gf100_gr * gr)1354 gf100_gr_init_ctxctl(struct gf100_gr *gr)
1355 {
1356 const struct gf100_grctx_func *grctx = gr->func->grctx;
1357 struct nvkm_subdev *subdev = &gr->base.engine.subdev;
1358 struct nvkm_device *device = subdev->device;
1359 int i;
1360
1361 if (gr->firmware) {
1362 /* load fuc microcode */
1363 nvkm_mc_unk260(device->mc, 0);
1364 gf100_gr_init_fw(gr, 0x409000, &gr->fuc409c, &gr->fuc409d);
1365 gf100_gr_init_fw(gr, 0x41a000, &gr->fuc41ac, &gr->fuc41ad);
1366 nvkm_mc_unk260(device->mc, 1);
1367
1368 /* start both of them running */
1369 nvkm_wr32(device, 0x409840, 0xffffffff);
1370 nvkm_wr32(device, 0x41a10c, 0x00000000);
1371 nvkm_wr32(device, 0x40910c, 0x00000000);
1372 nvkm_wr32(device, 0x41a100, 0x00000002);
1373 nvkm_wr32(device, 0x409100, 0x00000002);
1374 if (nvkm_msec(device, 2000,
1375 if (nvkm_rd32(device, 0x409800) & 0x00000001)
1376 break;
1377 ) < 0)
1378 return -EBUSY;
1379
1380 nvkm_wr32(device, 0x409840, 0xffffffff);
1381 nvkm_wr32(device, 0x409500, 0x7fffffff);
1382 nvkm_wr32(device, 0x409504, 0x00000021);
1383
1384 nvkm_wr32(device, 0x409840, 0xffffffff);
1385 nvkm_wr32(device, 0x409500, 0x00000000);
1386 nvkm_wr32(device, 0x409504, 0x00000010);
1387 if (nvkm_msec(device, 2000,
1388 if ((gr->size = nvkm_rd32(device, 0x409800)))
1389 break;
1390 ) < 0)
1391 return -EBUSY;
1392
1393 nvkm_wr32(device, 0x409840, 0xffffffff);
1394 nvkm_wr32(device, 0x409500, 0x00000000);
1395 nvkm_wr32(device, 0x409504, 0x00000016);
1396 if (nvkm_msec(device, 2000,
1397 if (nvkm_rd32(device, 0x409800))
1398 break;
1399 ) < 0)
1400 return -EBUSY;
1401
1402 nvkm_wr32(device, 0x409840, 0xffffffff);
1403 nvkm_wr32(device, 0x409500, 0x00000000);
1404 nvkm_wr32(device, 0x409504, 0x00000025);
1405 if (nvkm_msec(device, 2000,
1406 if (nvkm_rd32(device, 0x409800))
1407 break;
1408 ) < 0)
1409 return -EBUSY;
1410
1411 if (device->chipset >= 0xe0) {
1412 nvkm_wr32(device, 0x409800, 0x00000000);
1413 nvkm_wr32(device, 0x409500, 0x00000001);
1414 nvkm_wr32(device, 0x409504, 0x00000030);
1415 if (nvkm_msec(device, 2000,
1416 if (nvkm_rd32(device, 0x409800))
1417 break;
1418 ) < 0)
1419 return -EBUSY;
1420
1421 nvkm_wr32(device, 0x409810, 0xb00095c8);
1422 nvkm_wr32(device, 0x409800, 0x00000000);
1423 nvkm_wr32(device, 0x409500, 0x00000001);
1424 nvkm_wr32(device, 0x409504, 0x00000031);
1425 if (nvkm_msec(device, 2000,
1426 if (nvkm_rd32(device, 0x409800))
1427 break;
1428 ) < 0)
1429 return -EBUSY;
1430
1431 nvkm_wr32(device, 0x409810, 0x00080420);
1432 nvkm_wr32(device, 0x409800, 0x00000000);
1433 nvkm_wr32(device, 0x409500, 0x00000001);
1434 nvkm_wr32(device, 0x409504, 0x00000032);
1435 if (nvkm_msec(device, 2000,
1436 if (nvkm_rd32(device, 0x409800))
1437 break;
1438 ) < 0)
1439 return -EBUSY;
1440
1441 nvkm_wr32(device, 0x409614, 0x00000070);
1442 nvkm_wr32(device, 0x409614, 0x00000770);
1443 nvkm_wr32(device, 0x40802c, 0x00000001);
1444 }
1445
1446 if (gr->data == NULL) {
1447 int ret = gf100_grctx_generate(gr);
1448 if (ret) {
1449 nvkm_error(subdev, "failed to construct context\n");
1450 return ret;
1451 }
1452 }
1453
1454 return 0;
1455 } else
1456 if (!gr->func->fecs.ucode) {
1457 return -ENOSYS;
1458 }
1459
1460 /* load HUB microcode */
1461 nvkm_mc_unk260(device->mc, 0);
1462 nvkm_wr32(device, 0x4091c0, 0x01000000);
1463 for (i = 0; i < gr->func->fecs.ucode->data.size / 4; i++)
1464 nvkm_wr32(device, 0x4091c4, gr->func->fecs.ucode->data.data[i]);
1465
1466 nvkm_wr32(device, 0x409180, 0x01000000);
1467 for (i = 0; i < gr->func->fecs.ucode->code.size / 4; i++) {
1468 if ((i & 0x3f) == 0)
1469 nvkm_wr32(device, 0x409188, i >> 6);
1470 nvkm_wr32(device, 0x409184, gr->func->fecs.ucode->code.data[i]);
1471 }
1472
1473 /* load GPC microcode */
1474 nvkm_wr32(device, 0x41a1c0, 0x01000000);
1475 for (i = 0; i < gr->func->gpccs.ucode->data.size / 4; i++)
1476 nvkm_wr32(device, 0x41a1c4, gr->func->gpccs.ucode->data.data[i]);
1477
1478 nvkm_wr32(device, 0x41a180, 0x01000000);
1479 for (i = 0; i < gr->func->gpccs.ucode->code.size / 4; i++) {
1480 if ((i & 0x3f) == 0)
1481 nvkm_wr32(device, 0x41a188, i >> 6);
1482 nvkm_wr32(device, 0x41a184, gr->func->gpccs.ucode->code.data[i]);
1483 }
1484 nvkm_mc_unk260(device->mc, 1);
1485
1486 /* load register lists */
1487 gf100_gr_init_csdata(gr, grctx->hub, 0x409000, 0x000, 0x000000);
1488 gf100_gr_init_csdata(gr, grctx->gpc, 0x41a000, 0x000, 0x418000);
1489 gf100_gr_init_csdata(gr, grctx->tpc, 0x41a000, 0x004, 0x419800);
1490 gf100_gr_init_csdata(gr, grctx->ppc, 0x41a000, 0x008, 0x41be00);
1491
1492 /* start HUB ucode running, it'll init the GPCs */
1493 nvkm_wr32(device, 0x40910c, 0x00000000);
1494 nvkm_wr32(device, 0x409100, 0x00000002);
1495 if (nvkm_msec(device, 2000,
1496 if (nvkm_rd32(device, 0x409800) & 0x80000000)
1497 break;
1498 ) < 0) {
1499 gf100_gr_ctxctl_debug(gr);
1500 return -EBUSY;
1501 }
1502
1503 gr->size = nvkm_rd32(device, 0x409804);
1504 if (gr->data == NULL) {
1505 int ret = gf100_grctx_generate(gr);
1506 if (ret) {
1507 nvkm_error(subdev, "failed to construct context\n");
1508 return ret;
1509 }
1510 }
1511
1512 return 0;
1513 }
1514
1515 static int
gf100_gr_oneinit(struct nvkm_gr * base)1516 gf100_gr_oneinit(struct nvkm_gr *base)
1517 {
1518 struct gf100_gr *gr = gf100_gr(base);
1519 struct nvkm_device *device = gr->base.engine.subdev.device;
1520 int ret, i, j;
1521
1522 nvkm_pmu_pgob(device->pmu, false);
1523
1524 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 256, false,
1525 &gr->unk4188b4);
1526 if (ret)
1527 return ret;
1528
1529 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 256, false,
1530 &gr->unk4188b8);
1531 if (ret)
1532 return ret;
1533
1534 nvkm_kmap(gr->unk4188b4);
1535 for (i = 0; i < 0x1000; i += 4)
1536 nvkm_wo32(gr->unk4188b4, i, 0x00000010);
1537 nvkm_done(gr->unk4188b4);
1538
1539 nvkm_kmap(gr->unk4188b8);
1540 for (i = 0; i < 0x1000; i += 4)
1541 nvkm_wo32(gr->unk4188b8, i, 0x00000010);
1542 nvkm_done(gr->unk4188b8);
1543
1544 gr->rop_nr = (nvkm_rd32(device, 0x409604) & 0x001f0000) >> 16;
1545 gr->gpc_nr = nvkm_rd32(device, 0x409604) & 0x0000001f;
1546 for (i = 0; i < gr->gpc_nr; i++) {
1547 gr->tpc_nr[i] = nvkm_rd32(device, GPC_UNIT(i, 0x2608));
1548 gr->tpc_total += gr->tpc_nr[i];
1549 gr->ppc_nr[i] = gr->func->ppc_nr;
1550 for (j = 0; j < gr->ppc_nr[i]; j++) {
1551 u8 mask = nvkm_rd32(device, GPC_UNIT(i, 0x0c30 + (j * 4)));
1552 if (mask)
1553 gr->ppc_mask[i] |= (1 << j);
1554 gr->ppc_tpc_nr[i][j] = hweight8(mask);
1555 }
1556 }
1557
1558 /*XXX: these need figuring out... though it might not even matter */
1559 switch (device->chipset) {
1560 case 0xc0:
1561 if (gr->tpc_total == 11) { /* 465, 3/4/4/0, 4 */
1562 gr->magic_not_rop_nr = 0x07;
1563 } else
1564 if (gr->tpc_total == 14) { /* 470, 3/3/4/4, 5 */
1565 gr->magic_not_rop_nr = 0x05;
1566 } else
1567 if (gr->tpc_total == 15) { /* 480, 3/4/4/4, 6 */
1568 gr->magic_not_rop_nr = 0x06;
1569 }
1570 break;
1571 case 0xc3: /* 450, 4/0/0/0, 2 */
1572 gr->magic_not_rop_nr = 0x03;
1573 break;
1574 case 0xc4: /* 460, 3/4/0/0, 4 */
1575 gr->magic_not_rop_nr = 0x01;
1576 break;
1577 case 0xc1: /* 2/0/0/0, 1 */
1578 gr->magic_not_rop_nr = 0x01;
1579 break;
1580 case 0xc8: /* 4/4/3/4, 5 */
1581 gr->magic_not_rop_nr = 0x06;
1582 break;
1583 case 0xce: /* 4/4/0/0, 4 */
1584 gr->magic_not_rop_nr = 0x03;
1585 break;
1586 case 0xcf: /* 4/0/0/0, 3 */
1587 gr->magic_not_rop_nr = 0x03;
1588 break;
1589 case 0xd7:
1590 case 0xd9: /* 1/0/0/0, 1 */
1591 case 0xea: /* gk20a */
1592 case 0x12b: /* gm20b */
1593 gr->magic_not_rop_nr = 0x01;
1594 break;
1595 }
1596
1597 return 0;
1598 }
1599
1600 int
gf100_gr_init_(struct nvkm_gr * base)1601 gf100_gr_init_(struct nvkm_gr *base)
1602 {
1603 struct gf100_gr *gr = gf100_gr(base);
1604 nvkm_pmu_pgob(gr->base.engine.subdev.device->pmu, false);
1605 return gr->func->init(gr);
1606 }
1607
1608 void
gf100_gr_dtor_fw(struct gf100_gr_fuc * fuc)1609 gf100_gr_dtor_fw(struct gf100_gr_fuc *fuc)
1610 {
1611 kfree(fuc->data);
1612 fuc->data = NULL;
1613 }
1614
1615 void *
gf100_gr_dtor(struct nvkm_gr * base)1616 gf100_gr_dtor(struct nvkm_gr *base)
1617 {
1618 struct gf100_gr *gr = gf100_gr(base);
1619
1620 if (gr->func->dtor)
1621 gr->func->dtor(gr);
1622 kfree(gr->data);
1623
1624 gf100_gr_dtor_fw(&gr->fuc409c);
1625 gf100_gr_dtor_fw(&gr->fuc409d);
1626 gf100_gr_dtor_fw(&gr->fuc41ac);
1627 gf100_gr_dtor_fw(&gr->fuc41ad);
1628
1629 nvkm_memory_del(&gr->unk4188b8);
1630 nvkm_memory_del(&gr->unk4188b4);
1631 return gr;
1632 }
1633
1634 static const struct nvkm_gr_func
1635 gf100_gr_ = {
1636 .dtor = gf100_gr_dtor,
1637 .oneinit = gf100_gr_oneinit,
1638 .init = gf100_gr_init_,
1639 .intr = gf100_gr_intr,
1640 .units = gf100_gr_units,
1641 .chan_new = gf100_gr_chan_new,
1642 .object_get = gf100_gr_object_get,
1643 };
1644
1645 int
gf100_gr_ctor_fw(struct gf100_gr * gr,const char * fwname,struct gf100_gr_fuc * fuc)1646 gf100_gr_ctor_fw(struct gf100_gr *gr, const char *fwname,
1647 struct gf100_gr_fuc *fuc)
1648 {
1649 struct nvkm_subdev *subdev = &gr->base.engine.subdev;
1650 struct nvkm_device *device = subdev->device;
1651 const struct firmware *fw;
1652 char f[64];
1653 char cname[16];
1654 int ret;
1655 int i;
1656
1657 /* Convert device name to lowercase */
1658 strncpy(cname, device->chip->name, sizeof(cname));
1659 cname[sizeof(cname) - 1] = '\0';
1660 i = strlen(cname);
1661 while (i) {
1662 --i;
1663 cname[i] = tolower(cname[i]);
1664 }
1665
1666 snprintf(f, sizeof(f), "nvidia/%s/%s.bin", cname, fwname);
1667 ret = request_firmware(&fw, f, device->dev);
1668 if (ret) {
1669 nvkm_error(subdev, "failed to load %s\n", fwname);
1670 return ret;
1671 }
1672
1673 fuc->size = fw->size;
1674 fuc->data = kmemdup(fw->data, fuc->size, GFP_KERNEL);
1675 release_firmware(fw);
1676 return (fuc->data != NULL) ? 0 : -ENOMEM;
1677 }
1678
1679 int
gf100_gr_ctor(const struct gf100_gr_func * func,struct nvkm_device * device,int index,struct gf100_gr * gr)1680 gf100_gr_ctor(const struct gf100_gr_func *func, struct nvkm_device *device,
1681 int index, struct gf100_gr *gr)
1682 {
1683 int ret;
1684
1685 gr->func = func;
1686 gr->firmware = nvkm_boolopt(device->cfgopt, "NvGrUseFW",
1687 func->fecs.ucode == NULL);
1688
1689 ret = nvkm_gr_ctor(&gf100_gr_, device, index, 0x08001000,
1690 gr->firmware || func->fecs.ucode != NULL,
1691 &gr->base);
1692 if (ret)
1693 return ret;
1694
1695 if (gr->firmware) {
1696 nvkm_info(&gr->base.engine.subdev, "using external firmware\n");
1697 if (gf100_gr_ctor_fw(gr, "fecs_inst", &gr->fuc409c) ||
1698 gf100_gr_ctor_fw(gr, "fecs_data", &gr->fuc409d) ||
1699 gf100_gr_ctor_fw(gr, "gpccs_inst", &gr->fuc41ac) ||
1700 gf100_gr_ctor_fw(gr, "gpccs_data", &gr->fuc41ad))
1701 return -ENODEV;
1702 }
1703
1704 return 0;
1705 }
1706
1707 int
gf100_gr_new_(const struct gf100_gr_func * func,struct nvkm_device * device,int index,struct nvkm_gr ** pgr)1708 gf100_gr_new_(const struct gf100_gr_func *func, struct nvkm_device *device,
1709 int index, struct nvkm_gr **pgr)
1710 {
1711 struct gf100_gr *gr;
1712 if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL)))
1713 return -ENOMEM;
1714 *pgr = &gr->base;
1715 return gf100_gr_ctor(func, device, index, gr);
1716 }
1717
1718 int
gf100_gr_init(struct gf100_gr * gr)1719 gf100_gr_init(struct gf100_gr *gr)
1720 {
1721 struct nvkm_device *device = gr->base.engine.subdev.device;
1722 const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
1723 u32 data[TPC_MAX / 8] = {};
1724 u8 tpcnr[GPC_MAX];
1725 int gpc, tpc, rop;
1726 int i;
1727
1728 nvkm_wr32(device, GPC_BCAST(0x0880), 0x00000000);
1729 nvkm_wr32(device, GPC_BCAST(0x08a4), 0x00000000);
1730 nvkm_wr32(device, GPC_BCAST(0x0888), 0x00000000);
1731 nvkm_wr32(device, GPC_BCAST(0x088c), 0x00000000);
1732 nvkm_wr32(device, GPC_BCAST(0x0890), 0x00000000);
1733 nvkm_wr32(device, GPC_BCAST(0x0894), 0x00000000);
1734 nvkm_wr32(device, GPC_BCAST(0x08b4), nvkm_memory_addr(gr->unk4188b4) >> 8);
1735 nvkm_wr32(device, GPC_BCAST(0x08b8), nvkm_memory_addr(gr->unk4188b8) >> 8);
1736
1737 gf100_gr_mmio(gr, gr->func->mmio);
1738
1739 nvkm_mask(device, TPC_UNIT(0, 0, 0x05c), 0x00000001, 0x00000001);
1740
1741 memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
1742 for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
1743 do {
1744 gpc = (gpc + 1) % gr->gpc_nr;
1745 } while (!tpcnr[gpc]);
1746 tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
1747
1748 data[i / 8] |= tpc << ((i % 8) * 4);
1749 }
1750
1751 nvkm_wr32(device, GPC_BCAST(0x0980), data[0]);
1752 nvkm_wr32(device, GPC_BCAST(0x0984), data[1]);
1753 nvkm_wr32(device, GPC_BCAST(0x0988), data[2]);
1754 nvkm_wr32(device, GPC_BCAST(0x098c), data[3]);
1755
1756 for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
1757 nvkm_wr32(device, GPC_UNIT(gpc, 0x0914),
1758 gr->magic_not_rop_nr << 8 | gr->tpc_nr[gpc]);
1759 nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 |
1760 gr->tpc_total);
1761 nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918);
1762 }
1763
1764 if (device->chipset != 0xd7)
1765 nvkm_wr32(device, GPC_BCAST(0x1bd4), magicgpc918);
1766 else
1767 nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918);
1768
1769 nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));
1770
1771 nvkm_wr32(device, 0x400500, 0x00010001);
1772
1773 nvkm_wr32(device, 0x400100, 0xffffffff);
1774 nvkm_wr32(device, 0x40013c, 0xffffffff);
1775
1776 nvkm_wr32(device, 0x409c24, 0x000f0000);
1777 nvkm_wr32(device, 0x404000, 0xc0000000);
1778 nvkm_wr32(device, 0x404600, 0xc0000000);
1779 nvkm_wr32(device, 0x408030, 0xc0000000);
1780 nvkm_wr32(device, 0x40601c, 0xc0000000);
1781 nvkm_wr32(device, 0x404490, 0xc0000000);
1782 nvkm_wr32(device, 0x406018, 0xc0000000);
1783 nvkm_wr32(device, 0x405840, 0xc0000000);
1784 nvkm_wr32(device, 0x405844, 0x00ffffff);
1785 nvkm_mask(device, 0x419cc0, 0x00000008, 0x00000008);
1786 nvkm_mask(device, 0x419eb4, 0x00001000, 0x00001000);
1787
1788 for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
1789 nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000);
1790 nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000);
1791 nvkm_wr32(device, GPC_UNIT(gpc, 0x1028), 0xc0000000);
1792 nvkm_wr32(device, GPC_UNIT(gpc, 0x0824), 0xc0000000);
1793 for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) {
1794 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
1795 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
1796 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
1797 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
1798 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
1799 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x001ffffe);
1800 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x0000000f);
1801 }
1802 nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
1803 nvkm_wr32(device, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
1804 }
1805
1806 for (rop = 0; rop < gr->rop_nr; rop++) {
1807 nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0xc0000000);
1808 nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0xc0000000);
1809 nvkm_wr32(device, ROP_UNIT(rop, 0x204), 0xffffffff);
1810 nvkm_wr32(device, ROP_UNIT(rop, 0x208), 0xffffffff);
1811 }
1812
1813 nvkm_wr32(device, 0x400108, 0xffffffff);
1814 nvkm_wr32(device, 0x400138, 0xffffffff);
1815 nvkm_wr32(device, 0x400118, 0xffffffff);
1816 nvkm_wr32(device, 0x400130, 0xffffffff);
1817 nvkm_wr32(device, 0x40011c, 0xffffffff);
1818 nvkm_wr32(device, 0x400134, 0xffffffff);
1819
1820 nvkm_wr32(device, 0x400054, 0x34ce3464);
1821
1822 gf100_gr_zbc_init(gr);
1823
1824 return gf100_gr_init_ctxctl(gr);
1825 }
1826
1827 #include "fuc/hubgf100.fuc3.h"
1828
1829 struct gf100_gr_ucode
1830 gf100_gr_fecs_ucode = {
1831 .code.data = gf100_grhub_code,
1832 .code.size = sizeof(gf100_grhub_code),
1833 .data.data = gf100_grhub_data,
1834 .data.size = sizeof(gf100_grhub_data),
1835 };
1836
1837 #include "fuc/gpcgf100.fuc3.h"
1838
1839 struct gf100_gr_ucode
1840 gf100_gr_gpccs_ucode = {
1841 .code.data = gf100_grgpc_code,
1842 .code.size = sizeof(gf100_grgpc_code),
1843 .data.data = gf100_grgpc_data,
1844 .data.size = sizeof(gf100_grgpc_data),
1845 };
1846
1847 static const struct gf100_gr_func
1848 gf100_gr = {
1849 .init = gf100_gr_init,
1850 .mmio = gf100_gr_pack_mmio,
1851 .fecs.ucode = &gf100_gr_fecs_ucode,
1852 .gpccs.ucode = &gf100_gr_gpccs_ucode,
1853 .grctx = &gf100_grctx,
1854 .sclass = {
1855 { -1, -1, FERMI_TWOD_A },
1856 { -1, -1, FERMI_MEMORY_TO_MEMORY_FORMAT_A },
1857 { -1, -1, FERMI_A, &gf100_fermi },
1858 { -1, -1, FERMI_COMPUTE_A },
1859 {}
1860 }
1861 };
1862
1863 int
gf100_gr_new(struct nvkm_device * device,int index,struct nvkm_gr ** pgr)1864 gf100_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
1865 {
1866 return gf100_gr_new_(&gf100_gr, device, index, pgr);
1867 }
1868