1 /*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24 #include "priv.h"
25
26 #include <core/enum.h>
27 #include <subdev/fb.h>
28 #include <subdev/timer.h>
29
30 void
gf100_ltc_cbc_clear(struct nvkm_ltc * ltc,u32 start,u32 limit)31 gf100_ltc_cbc_clear(struct nvkm_ltc *ltc, u32 start, u32 limit)
32 {
33 struct nvkm_device *device = ltc->subdev.device;
34 nvkm_wr32(device, 0x17e8cc, start);
35 nvkm_wr32(device, 0x17e8d0, limit);
36 nvkm_wr32(device, 0x17e8c8, 0x00000004);
37 }
38
39 void
gf100_ltc_cbc_wait(struct nvkm_ltc * ltc)40 gf100_ltc_cbc_wait(struct nvkm_ltc *ltc)
41 {
42 struct nvkm_device *device = ltc->subdev.device;
43 int c, s;
44 for (c = 0; c < ltc->ltc_nr; c++) {
45 for (s = 0; s < ltc->lts_nr; s++) {
46 const u32 addr = 0x1410c8 + (c * 0x2000) + (s * 0x400);
47 nvkm_msec(device, 2000,
48 if (!nvkm_rd32(device, addr))
49 break;
50 );
51 }
52 }
53 }
54
55 void
gf100_ltc_zbc_clear_color(struct nvkm_ltc * ltc,int i,const u32 color[4])56 gf100_ltc_zbc_clear_color(struct nvkm_ltc *ltc, int i, const u32 color[4])
57 {
58 struct nvkm_device *device = ltc->subdev.device;
59 nvkm_mask(device, 0x17ea44, 0x0000000f, i);
60 nvkm_wr32(device, 0x17ea48, color[0]);
61 nvkm_wr32(device, 0x17ea4c, color[1]);
62 nvkm_wr32(device, 0x17ea50, color[2]);
63 nvkm_wr32(device, 0x17ea54, color[3]);
64 }
65
66 void
gf100_ltc_zbc_clear_depth(struct nvkm_ltc * ltc,int i,const u32 depth)67 gf100_ltc_zbc_clear_depth(struct nvkm_ltc *ltc, int i, const u32 depth)
68 {
69 struct nvkm_device *device = ltc->subdev.device;
70 nvkm_mask(device, 0x17ea44, 0x0000000f, i);
71 nvkm_wr32(device, 0x17ea58, depth);
72 }
73
74 static const struct nvkm_bitfield
75 gf100_ltc_lts_intr_name[] = {
76 { 0x00000001, "IDLE_ERROR_IQ" },
77 { 0x00000002, "IDLE_ERROR_CBC" },
78 { 0x00000004, "IDLE_ERROR_TSTG" },
79 { 0x00000008, "IDLE_ERROR_DSTG" },
80 { 0x00000010, "EVICTED_CB" },
81 { 0x00000020, "ILLEGAL_COMPSTAT" },
82 { 0x00000040, "BLOCKLINEAR_CB" },
83 { 0x00000100, "ECC_SEC_ERROR" },
84 { 0x00000200, "ECC_DED_ERROR" },
85 { 0x00000400, "DEBUG" },
86 { 0x00000800, "ATOMIC_TO_Z" },
87 { 0x00001000, "ILLEGAL_ATOMIC" },
88 { 0x00002000, "BLKACTIVITY_ERR" },
89 {}
90 };
91
92 static void
gf100_ltc_lts_intr(struct nvkm_ltc * ltc,int c,int s)93 gf100_ltc_lts_intr(struct nvkm_ltc *ltc, int c, int s)
94 {
95 struct nvkm_subdev *subdev = <c->subdev;
96 struct nvkm_device *device = subdev->device;
97 u32 base = 0x141000 + (c * 0x2000) + (s * 0x400);
98 u32 intr = nvkm_rd32(device, base + 0x020);
99 u32 stat = intr & 0x0000ffff;
100 char msg[128];
101
102 if (stat) {
103 nvkm_snprintbf(msg, sizeof(msg), gf100_ltc_lts_intr_name, stat);
104 nvkm_error(subdev, "LTC%d_LTS%d: %08x [%s]\n", c, s, stat, msg);
105 }
106
107 nvkm_wr32(device, base + 0x020, intr);
108 }
109
110 void
gf100_ltc_intr(struct nvkm_ltc * ltc)111 gf100_ltc_intr(struct nvkm_ltc *ltc)
112 {
113 struct nvkm_device *device = ltc->subdev.device;
114 u32 mask;
115
116 mask = nvkm_rd32(device, 0x00017c);
117 while (mask) {
118 u32 s, c = __ffs(mask);
119 for (s = 0; s < ltc->lts_nr; s++)
120 gf100_ltc_lts_intr(ltc, c, s);
121 mask &= ~(1 << c);
122 }
123 }
124
125 void
gf100_ltc_invalidate(struct nvkm_ltc * ltc)126 gf100_ltc_invalidate(struct nvkm_ltc *ltc)
127 {
128 struct nvkm_device *device = ltc->subdev.device;
129 s64 taken;
130
131 nvkm_wr32(device, 0x70004, 0x00000001);
132 taken = nvkm_wait_msec(device, 2, 0x70004, 0x00000003, 0x00000000);
133 if (taken < 0)
134 nvkm_warn(<c->subdev, "LTC invalidate timeout\n");
135
136 if (taken > 0)
137 nvkm_debug(<c->subdev, "LTC invalidate took %lld ns\n", taken);
138 }
139
140 void
gf100_ltc_flush(struct nvkm_ltc * ltc)141 gf100_ltc_flush(struct nvkm_ltc *ltc)
142 {
143 struct nvkm_device *device = ltc->subdev.device;
144 s64 taken;
145
146 nvkm_wr32(device, 0x70010, 0x00000001);
147 taken = nvkm_wait_msec(device, 2, 0x70010, 0x00000003, 0x00000000);
148 if (taken < 0)
149 nvkm_warn(<c->subdev, "LTC flush timeout\n");
150
151 if (taken > 0)
152 nvkm_debug(<c->subdev, "LTC flush took %lld ns\n", taken);
153 }
154
155 /* TODO: Figure out tag memory details and drop the over-cautious allocation.
156 */
157 int
gf100_ltc_oneinit_tag_ram(struct nvkm_ltc * ltc)158 gf100_ltc_oneinit_tag_ram(struct nvkm_ltc *ltc)
159 {
160 struct nvkm_ram *ram = ltc->subdev.device->fb->ram;
161 u32 tag_size, tag_margin, tag_align;
162 int ret;
163
164 /* No VRAM, no tags for now. */
165 if (!ram) {
166 ltc->num_tags = 0;
167 goto mm_init;
168 }
169
170 /* tags for 1/4 of VRAM should be enough (8192/4 per GiB of VRAM) */
171 ltc->num_tags = (ram->size >> 17) / 4;
172 if (ltc->num_tags > (1 << 17))
173 ltc->num_tags = 1 << 17; /* we have 17 bits in PTE */
174 ltc->num_tags = (ltc->num_tags + 63) & ~63; /* round up to 64 */
175
176 tag_align = ltc->ltc_nr * 0x800;
177 tag_margin = (tag_align < 0x6000) ? 0x6000 : tag_align;
178
179 /* 4 part 4 sub: 0x2000 bytes for 56 tags */
180 /* 3 part 4 sub: 0x6000 bytes for 168 tags */
181 /*
182 * About 147 bytes per tag. Let's be safe and allocate x2, which makes
183 * 0x4980 bytes for 64 tags, and round up to 0x6000 bytes for 64 tags.
184 *
185 * For 4 GiB of memory we'll have 8192 tags which makes 3 MiB, < 0.1 %.
186 */
187 tag_size = (ltc->num_tags / 64) * 0x6000 + tag_margin;
188 tag_size += tag_align;
189 tag_size = (tag_size + 0xfff) >> 12; /* round up */
190
191 ret = nvkm_mm_tail(&ram->vram, 1, 1, tag_size, tag_size, 1,
192 <c->tag_ram);
193 if (ret) {
194 ltc->num_tags = 0;
195 } else {
196 u64 tag_base = ((u64)ltc->tag_ram->offset << 12) + tag_margin;
197
198 tag_base += tag_align - 1;
199 do_div(tag_base, tag_align);
200
201 ltc->tag_base = tag_base;
202 }
203
204 mm_init:
205 return nvkm_mm_init(<c->tags, 0, ltc->num_tags, 1);
206 }
207
208 int
gf100_ltc_oneinit(struct nvkm_ltc * ltc)209 gf100_ltc_oneinit(struct nvkm_ltc *ltc)
210 {
211 struct nvkm_device *device = ltc->subdev.device;
212 const u32 parts = nvkm_rd32(device, 0x022438);
213 const u32 mask = nvkm_rd32(device, 0x022554);
214 const u32 slice = nvkm_rd32(device, 0x17e8dc) >> 28;
215 int i;
216
217 for (i = 0; i < parts; i++) {
218 if (!(mask & (1 << i)))
219 ltc->ltc_nr++;
220 }
221 ltc->lts_nr = slice;
222
223 return gf100_ltc_oneinit_tag_ram(ltc);
224 }
225
226 static void
gf100_ltc_init(struct nvkm_ltc * ltc)227 gf100_ltc_init(struct nvkm_ltc *ltc)
228 {
229 struct nvkm_device *device = ltc->subdev.device;
230 u32 lpg128 = !(nvkm_rd32(device, 0x100c80) & 0x00000001);
231
232 nvkm_mask(device, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */
233 nvkm_wr32(device, 0x17e8d8, ltc->ltc_nr);
234 nvkm_wr32(device, 0x17e8d4, ltc->tag_base);
235 nvkm_mask(device, 0x17e8c0, 0x00000002, lpg128 ? 0x00000002 : 0x00000000);
236 }
237
238 static const struct nvkm_ltc_func
239 gf100_ltc = {
240 .oneinit = gf100_ltc_oneinit,
241 .init = gf100_ltc_init,
242 .intr = gf100_ltc_intr,
243 .cbc_clear = gf100_ltc_cbc_clear,
244 .cbc_wait = gf100_ltc_cbc_wait,
245 .zbc = 16,
246 .zbc_clear_color = gf100_ltc_zbc_clear_color,
247 .zbc_clear_depth = gf100_ltc_zbc_clear_depth,
248 .invalidate = gf100_ltc_invalidate,
249 .flush = gf100_ltc_flush,
250 };
251
252 int
gf100_ltc_new(struct nvkm_device * device,int index,struct nvkm_ltc ** pltc)253 gf100_ltc_new(struct nvkm_device *device, int index, struct nvkm_ltc **pltc)
254 {
255 return nvkm_ltc_new_(&gf100_ltc, device, index, pltc);
256 }
257