1 /*
2 * Copyright 2010 Christoph Bumiller
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #include <xf86drm.h>
24 #include <nouveau_drm.h>
25 #include <nvif/class.h>
26 #include "util/format/u_format.h"
27 #include "util/format/u_format_s3tc.h"
28 #include "util/u_screen.h"
29 #include "pipe/p_screen.h"
30
31 #include "nouveau_vp3_video.h"
32
33 #include "nv50_ir_driver.h"
34
35 #include "nvc0/nvc0_context.h"
36 #include "nvc0/nvc0_screen.h"
37
38 #include "nvc0/mme/com9097.mme.h"
39 #include "nvc0/mme/com90c0.mme.h"
40 #include "nvc0/mme/comc597.mme.h"
41
42 #include "nv50/g80_texture.xml.h"
43
44 static bool
nvc0_screen_is_format_supported(struct pipe_screen * pscreen,enum pipe_format format,enum pipe_texture_target target,unsigned sample_count,unsigned storage_sample_count,unsigned bindings)45 nvc0_screen_is_format_supported(struct pipe_screen *pscreen,
46 enum pipe_format format,
47 enum pipe_texture_target target,
48 unsigned sample_count,
49 unsigned storage_sample_count,
50 unsigned bindings)
51 {
52 const struct util_format_description *desc = util_format_description(format);
53
54 if (sample_count > 8)
55 return false;
56 if (!(0x117 & (1 << sample_count))) /* 0, 1, 2, 4 or 8 */
57 return false;
58
59 if (MAX2(1, sample_count) != MAX2(1, storage_sample_count))
60 return false;
61
62 /* Short-circuit the rest of the logic -- this is used by the gallium frontend
63 * to determine valid MS levels in a no-attachments scenario.
64 */
65 if (format == PIPE_FORMAT_NONE && bindings & PIPE_BIND_RENDER_TARGET)
66 return true;
67
68 if ((bindings & PIPE_BIND_SAMPLER_VIEW) && (target != PIPE_BUFFER))
69 if (util_format_get_blocksizebits(format) == 3 * 32)
70 return false;
71
72 if (bindings & PIPE_BIND_LINEAR)
73 if (util_format_is_depth_or_stencil(format) ||
74 (target != PIPE_TEXTURE_1D &&
75 target != PIPE_TEXTURE_2D &&
76 target != PIPE_TEXTURE_RECT) ||
77 sample_count > 1)
78 return false;
79
80 /* Restrict ETC2 and ASTC formats here. These are only supported on GK20A
81 * and GM20B.
82 */
83 if ((desc->layout == UTIL_FORMAT_LAYOUT_ETC ||
84 desc->layout == UTIL_FORMAT_LAYOUT_ASTC) &&
85 nouveau_screen(pscreen)->device->chipset != 0x12b &&
86 nouveau_screen(pscreen)->class_3d != NVEA_3D_CLASS)
87 return false;
88
89 /* shared is always supported */
90 bindings &= ~(PIPE_BIND_LINEAR |
91 PIPE_BIND_SHARED);
92
93 if (bindings & PIPE_BIND_SHADER_IMAGE) {
94 if (format == PIPE_FORMAT_B8G8R8A8_UNORM &&
95 nouveau_screen(pscreen)->class_3d < NVE4_3D_CLASS) {
96 /* This should work on Fermi, but for currently unknown reasons it
97 * does not and results in breaking reads from pbos. */
98 return false;
99 }
100 }
101
102 if (bindings & PIPE_BIND_INDEX_BUFFER) {
103 if (format != PIPE_FORMAT_R8_UINT &&
104 format != PIPE_FORMAT_R16_UINT &&
105 format != PIPE_FORMAT_R32_UINT)
106 return false;
107 bindings &= ~PIPE_BIND_INDEX_BUFFER;
108 }
109
110 return (( nvc0_format_table[format].usage |
111 nvc0_vertex_format[format].usage) & bindings) == bindings;
112 }
113
114 static int
nvc0_screen_get_param(struct pipe_screen * pscreen,enum pipe_cap param)115 nvc0_screen_get_param(struct pipe_screen *pscreen, enum pipe_cap param)
116 {
117 const uint16_t class_3d = nouveau_screen(pscreen)->class_3d;
118 const struct nouveau_screen *screen = nouveau_screen(pscreen);
119 struct nouveau_device *dev = screen->device;
120
121 switch (param) {
122 /* non-boolean caps */
123 case PIPE_CAP_MAX_TEXTURE_2D_SIZE:
124 return 16384;
125 case PIPE_CAP_MAX_TEXTURE_CUBE_LEVELS:
126 return 15;
127 case PIPE_CAP_MAX_TEXTURE_3D_LEVELS:
128 return 12;
129 case PIPE_CAP_MAX_TEXTURE_ARRAY_LAYERS:
130 return 2048;
131 case PIPE_CAP_MIN_TEXEL_OFFSET:
132 return -8;
133 case PIPE_CAP_MAX_TEXEL_OFFSET:
134 return 7;
135 case PIPE_CAP_MIN_TEXTURE_GATHER_OFFSET:
136 return -32;
137 case PIPE_CAP_MAX_TEXTURE_GATHER_OFFSET:
138 return 31;
139 case PIPE_CAP_MAX_TEXEL_BUFFER_ELEMENTS_UINT:
140 return 128 * 1024 * 1024;
141 case PIPE_CAP_GLSL_FEATURE_LEVEL:
142 return 430;
143 case PIPE_CAP_GLSL_FEATURE_LEVEL_COMPATIBILITY:
144 return 430;
145 case PIPE_CAP_MAX_RENDER_TARGETS:
146 return 8;
147 case PIPE_CAP_MAX_DUAL_SOURCE_RENDER_TARGETS:
148 return 1;
149 case PIPE_CAP_VIEWPORT_SUBPIXEL_BITS:
150 case PIPE_CAP_RASTERIZER_SUBPIXEL_BITS:
151 return 8;
152 case PIPE_CAP_MAX_STREAM_OUTPUT_BUFFERS:
153 return 4;
154 case PIPE_CAP_MAX_STREAM_OUTPUT_SEPARATE_COMPONENTS:
155 case PIPE_CAP_MAX_STREAM_OUTPUT_INTERLEAVED_COMPONENTS:
156 return 128;
157 case PIPE_CAP_MAX_GEOMETRY_OUTPUT_VERTICES:
158 case PIPE_CAP_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS:
159 return 1024;
160 case PIPE_CAP_MAX_VERTEX_STREAMS:
161 return 4;
162 case PIPE_CAP_MAX_GS_INVOCATIONS:
163 return 32;
164 case PIPE_CAP_MAX_SHADER_BUFFER_SIZE_UINT:
165 return 1 << 27;
166 case PIPE_CAP_MAX_VERTEX_ATTRIB_STRIDE:
167 return 2048;
168 case PIPE_CAP_MAX_VERTEX_ELEMENT_SRC_OFFSET:
169 return 2047;
170 case PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT:
171 return 256;
172 case PIPE_CAP_TEXTURE_BUFFER_OFFSET_ALIGNMENT:
173 if (class_3d < GM107_3D_CLASS)
174 return 256; /* IMAGE bindings require alignment to 256 */
175 return 16;
176 case PIPE_CAP_SHADER_BUFFER_OFFSET_ALIGNMENT:
177 return 16;
178 case PIPE_CAP_MIN_MAP_BUFFER_ALIGNMENT:
179 return NOUVEAU_MIN_BUFFER_MAP_ALIGN;
180 case PIPE_CAP_MAX_VIEWPORTS:
181 return NVC0_MAX_VIEWPORTS;
182 case PIPE_CAP_MAX_TEXTURE_GATHER_COMPONENTS:
183 return 4;
184 case PIPE_CAP_TEXTURE_BORDER_COLOR_QUIRK:
185 return PIPE_QUIRK_TEXTURE_BORDER_COLOR_SWIZZLE_NV50;
186 case PIPE_CAP_ENDIANNESS:
187 return PIPE_ENDIAN_LITTLE;
188 case PIPE_CAP_MAX_SHADER_PATCH_VARYINGS:
189 return 30;
190 case PIPE_CAP_MAX_WINDOW_RECTANGLES:
191 return NVC0_MAX_WINDOW_RECTANGLES;
192 case PIPE_CAP_MAX_CONSERVATIVE_RASTER_SUBPIXEL_PRECISION_BIAS:
193 return class_3d >= GM200_3D_CLASS ? 8 : 0;
194 case PIPE_CAP_MAX_TEXTURE_UPLOAD_MEMORY_BUDGET:
195 return 64 * 1024 * 1024;
196 case PIPE_CAP_MAX_VARYINGS:
197 /* NOTE: These only count our slots for GENERIC varyings.
198 * The address space may be larger, but the actual hard limit seems to be
199 * less than what the address space layout permits, so don't add TEXCOORD,
200 * COLOR, etc. here.
201 */
202 return 0x1f0 / 16;
203 case PIPE_CAP_MAX_VERTEX_BUFFERS:
204 return 16;
205 case PIPE_CAP_GL_BEGIN_END_BUFFER_SIZE:
206 return 512 * 1024; /* TODO: Investigate tuning this */
207 case PIPE_CAP_MAX_TEXTURE_MB:
208 return 0; /* TODO: use 1/2 of VRAM for this? */
209
210 case PIPE_CAP_TIMER_RESOLUTION:
211 return 1000;
212
213 case PIPE_CAP_SUPPORTED_PRIM_MODES_WITH_RESTART:
214 case PIPE_CAP_SUPPORTED_PRIM_MODES:
215 return BITFIELD_MASK(MESA_PRIM_COUNT);
216
217 /* supported caps */
218 case PIPE_CAP_TEXTURE_MIRROR_CLAMP:
219 case PIPE_CAP_TEXTURE_MIRROR_CLAMP_TO_EDGE:
220 case PIPE_CAP_TEXTURE_SWIZZLE:
221 case PIPE_CAP_NPOT_TEXTURES:
222 case PIPE_CAP_MIXED_FRAMEBUFFER_SIZES:
223 case PIPE_CAP_MIXED_COLOR_DEPTH_BITS:
224 case PIPE_CAP_ANISOTROPIC_FILTER:
225 case PIPE_CAP_SEAMLESS_CUBE_MAP:
226 case PIPE_CAP_CUBE_MAP_ARRAY:
227 case PIPE_CAP_TEXTURE_BUFFER_OBJECTS:
228 case PIPE_CAP_TEXTURE_MULTISAMPLE:
229 case PIPE_CAP_DEPTH_CLIP_DISABLE:
230 case PIPE_CAP_TGSI_TEXCOORD:
231 case PIPE_CAP_FRAGMENT_SHADER_TEXTURE_LOD:
232 case PIPE_CAP_FRAGMENT_SHADER_DERIVATIVES:
233 case PIPE_CAP_FRAGMENT_COLOR_CLAMPED:
234 case PIPE_CAP_VERTEX_COLOR_UNCLAMPED:
235 case PIPE_CAP_VERTEX_COLOR_CLAMPED:
236 case PIPE_CAP_QUERY_TIMESTAMP:
237 case PIPE_CAP_QUERY_TIME_ELAPSED:
238 case PIPE_CAP_OCCLUSION_QUERY:
239 case PIPE_CAP_STREAM_OUTPUT_PAUSE_RESUME:
240 case PIPE_CAP_STREAM_OUTPUT_INTERLEAVE_BUFFERS:
241 case PIPE_CAP_QUERY_PIPELINE_STATISTICS:
242 case PIPE_CAP_BLEND_EQUATION_SEPARATE:
243 case PIPE_CAP_INDEP_BLEND_ENABLE:
244 case PIPE_CAP_INDEP_BLEND_FUNC:
245 case PIPE_CAP_FS_COORD_ORIGIN_UPPER_LEFT:
246 case PIPE_CAP_FS_COORD_PIXEL_CENTER_HALF_INTEGER:
247 case PIPE_CAP_PRIMITIVE_RESTART:
248 case PIPE_CAP_PRIMITIVE_RESTART_FIXED_INDEX:
249 case PIPE_CAP_VS_INSTANCEID:
250 case PIPE_CAP_VERTEX_ELEMENT_INSTANCE_DIVISOR:
251 case PIPE_CAP_CONDITIONAL_RENDER:
252 case PIPE_CAP_TEXTURE_BARRIER:
253 case PIPE_CAP_QUADS_FOLLOW_PROVOKING_VERTEX_CONVENTION:
254 case PIPE_CAP_START_INSTANCE:
255 case PIPE_CAP_DRAW_INDIRECT:
256 case PIPE_CAP_USER_VERTEX_BUFFERS:
257 case PIPE_CAP_TEXTURE_QUERY_LOD:
258 case PIPE_CAP_SAMPLE_SHADING:
259 case PIPE_CAP_TEXTURE_GATHER_OFFSETS:
260 case PIPE_CAP_TEXTURE_GATHER_SM5:
261 case PIPE_CAP_FS_FINE_DERIVATIVE:
262 case PIPE_CAP_CONDITIONAL_RENDER_INVERTED:
263 case PIPE_CAP_SAMPLER_VIEW_TARGET:
264 case PIPE_CAP_CLIP_HALFZ:
265 case PIPE_CAP_POLYGON_OFFSET_CLAMP:
266 case PIPE_CAP_MULTISAMPLE_Z_RESOLVE:
267 case PIPE_CAP_TEXTURE_FLOAT_LINEAR:
268 case PIPE_CAP_TEXTURE_HALF_FLOAT_LINEAR:
269 case PIPE_CAP_DEPTH_BOUNDS_TEST:
270 case PIPE_CAP_TEXTURE_QUERY_SAMPLES:
271 case PIPE_CAP_COPY_BETWEEN_COMPRESSED_AND_PLAIN_FORMATS:
272 case PIPE_CAP_FORCE_PERSAMPLE_INTERP:
273 case PIPE_CAP_DRAW_PARAMETERS:
274 case PIPE_CAP_SHADER_PACK_HALF_FLOAT:
275 case PIPE_CAP_MULTI_DRAW_INDIRECT:
276 case PIPE_CAP_MEMOBJ:
277 case PIPE_CAP_MULTI_DRAW_INDIRECT_PARAMS:
278 case PIPE_CAP_FS_FACE_IS_INTEGER_SYSVAL:
279 case PIPE_CAP_QUERY_BUFFER_OBJECT:
280 case PIPE_CAP_INVALIDATE_BUFFER:
281 case PIPE_CAP_STRING_MARKER:
282 case PIPE_CAP_FRAMEBUFFER_NO_ATTACHMENT:
283 case PIPE_CAP_CULL_DISTANCE:
284 case PIPE_CAP_ROBUST_BUFFER_ACCESS_BEHAVIOR:
285 case PIPE_CAP_SHADER_GROUP_VOTE:
286 case PIPE_CAP_POLYGON_OFFSET_UNITS_UNSCALED:
287 case PIPE_CAP_SHADER_ARRAY_COMPONENTS:
288 case PIPE_CAP_LEGACY_MATH_RULES:
289 case PIPE_CAP_DOUBLES:
290 case PIPE_CAP_INT64:
291 case PIPE_CAP_TGSI_TEX_TXF_LZ:
292 case PIPE_CAP_SHADER_CLOCK:
293 case PIPE_CAP_COMPUTE:
294 case PIPE_CAP_CAN_BIND_CONST_BUFFER_AS_VERTEX:
295 case PIPE_CAP_QUERY_SO_OVERFLOW:
296 case PIPE_CAP_TGSI_DIV:
297 case PIPE_CAP_IMAGE_ATOMIC_INC_WRAP:
298 case PIPE_CAP_DEMOTE_TO_HELPER_INVOCATION:
299 case PIPE_CAP_DEVICE_RESET_STATUS_QUERY:
300 case PIPE_CAP_TEXTURE_SHADOW_LOD:
301 case PIPE_CAP_CLEAR_SCISSORED:
302 case PIPE_CAP_IMAGE_STORE_FORMATTED:
303 case PIPE_CAP_QUERY_MEMORY_INFO:
304 return 1;
305 case PIPE_CAP_TEXTURE_TRANSFER_MODES:
306 return nouveau_screen(pscreen)->vram_domain & NOUVEAU_BO_VRAM ? PIPE_TEXTURE_TRANSFER_BLIT : 0;
307 case PIPE_CAP_FBFETCH:
308 return class_3d >= NVE4_3D_CLASS ? 1 : 0; /* needs testing on fermi */
309 case PIPE_CAP_SEAMLESS_CUBE_MAP_PER_TEXTURE:
310 case PIPE_CAP_SHADER_BALLOT:
311 return class_3d >= NVE4_3D_CLASS;
312 case PIPE_CAP_BINDLESS_TEXTURE:
313 return class_3d >= NVE4_3D_CLASS;
314 case PIPE_CAP_IMAGE_ATOMIC_FLOAT_ADD:
315 return class_3d < GM107_3D_CLASS; /* needs additional lowering */
316 case PIPE_CAP_POLYGON_MODE_FILL_RECTANGLE:
317 case PIPE_CAP_VS_LAYER_VIEWPORT:
318 case PIPE_CAP_TES_LAYER_VIEWPORT:
319 case PIPE_CAP_POST_DEPTH_COVERAGE:
320 case PIPE_CAP_CONSERVATIVE_RASTER_POST_SNAP_TRIANGLES:
321 case PIPE_CAP_CONSERVATIVE_RASTER_POST_SNAP_POINTS_LINES:
322 case PIPE_CAP_CONSERVATIVE_RASTER_POST_DEPTH_COVERAGE:
323 case PIPE_CAP_PROGRAMMABLE_SAMPLE_LOCATIONS:
324 case PIPE_CAP_VIEWPORT_SWIZZLE:
325 case PIPE_CAP_VIEWPORT_MASK:
326 case PIPE_CAP_SAMPLER_REDUCTION_MINMAX:
327 return class_3d >= GM200_3D_CLASS;
328 case PIPE_CAP_CONSERVATIVE_RASTER_PRE_SNAP_TRIANGLES:
329 return class_3d >= GP100_3D_CLASS;
330 case PIPE_CAP_RESOURCE_FROM_USER_MEMORY_COMPUTE_ONLY:
331 case PIPE_CAP_SYSTEM_SVM:
332 return screen->has_svm ? 1 : 0;
333
334 case PIPE_CAP_GL_SPIRV:
335 case PIPE_CAP_GL_SPIRV_VARIABLE_POINTERS:
336 return 1;
337
338 /* nir related caps */
339 case PIPE_CAP_NIR_IMAGES_AS_DEREF:
340 return 0;
341
342 case PIPE_CAP_PCI_GROUP:
343 case PIPE_CAP_PCI_BUS:
344 case PIPE_CAP_PCI_DEVICE:
345 case PIPE_CAP_PCI_FUNCTION:
346 return 0;
347
348 case PIPE_CAP_OPENCL_INTEGER_FUNCTIONS: /* could be done */
349 case PIPE_CAP_INTEGER_MULTIPLY_32X16: /* could be done */
350 case PIPE_CAP_MAP_UNSYNCHRONIZED_THREAD_SAFE: /* when we fix MT stuff */
351 case PIPE_CAP_ALPHA_TO_COVERAGE_DITHER_CONTROL: /* TODO */
352 case PIPE_CAP_SHADER_ATOMIC_INT64: /* TODO */
353 case PIPE_CAP_HARDWARE_GL_SELECT:
354 return 0;
355
356 case PIPE_CAP_VENDOR_ID:
357 return 0x10de;
358 case PIPE_CAP_DEVICE_ID: {
359 uint64_t device_id;
360 if (nouveau_getparam(dev, NOUVEAU_GETPARAM_PCI_DEVICE, &device_id)) {
361 NOUVEAU_ERR("NOUVEAU_GETPARAM_PCI_DEVICE failed.\n");
362 return -1;
363 }
364 return device_id;
365 }
366 case PIPE_CAP_ACCELERATED:
367 return 1;
368 case PIPE_CAP_VIDEO_MEMORY:
369 return dev->vram_size >> 20;
370 case PIPE_CAP_UMA:
371 return nouveau_screen(pscreen)->is_uma;
372
373 default:
374 return u_pipe_screen_get_param_defaults(pscreen, param);
375 }
376 }
377
378 static int
nvc0_screen_get_shader_param(struct pipe_screen * pscreen,enum pipe_shader_type shader,enum pipe_shader_cap param)379 nvc0_screen_get_shader_param(struct pipe_screen *pscreen,
380 enum pipe_shader_type shader,
381 enum pipe_shader_cap param)
382 {
383 const struct nouveau_screen *screen = nouveau_screen(pscreen);
384 const uint16_t class_3d = screen->class_3d;
385
386 switch (shader) {
387 case PIPE_SHADER_VERTEX:
388 case PIPE_SHADER_GEOMETRY:
389 case PIPE_SHADER_FRAGMENT:
390 case PIPE_SHADER_COMPUTE:
391 case PIPE_SHADER_TESS_CTRL:
392 case PIPE_SHADER_TESS_EVAL:
393 break;
394 default:
395 return 0;
396 }
397
398 switch (param) {
399 case PIPE_SHADER_CAP_SUPPORTED_IRS: {
400 uint32_t irs = 1 << PIPE_SHADER_IR_NIR;
401 if (screen->force_enable_cl)
402 irs |= 1 << PIPE_SHADER_IR_NIR_SERIALIZED;
403 return irs;
404 }
405 case PIPE_SHADER_CAP_MAX_INSTRUCTIONS:
406 case PIPE_SHADER_CAP_MAX_ALU_INSTRUCTIONS:
407 case PIPE_SHADER_CAP_MAX_TEX_INSTRUCTIONS:
408 case PIPE_SHADER_CAP_MAX_TEX_INDIRECTIONS:
409 return 16384;
410 case PIPE_SHADER_CAP_MAX_CONTROL_FLOW_DEPTH:
411 return 16;
412 case PIPE_SHADER_CAP_MAX_INPUTS:
413 return 0x200 / 16;
414 case PIPE_SHADER_CAP_MAX_OUTPUTS:
415 return 32;
416 case PIPE_SHADER_CAP_MAX_CONST_BUFFER0_SIZE:
417 return NVC0_MAX_CONSTBUF_SIZE;
418 case PIPE_SHADER_CAP_MAX_CONST_BUFFERS:
419 return NVC0_MAX_PIPE_CONSTBUFS;
420 case PIPE_SHADER_CAP_INDIRECT_OUTPUT_ADDR:
421 return shader != PIPE_SHADER_FRAGMENT;
422 case PIPE_SHADER_CAP_INDIRECT_INPUT_ADDR:
423 /* HW doesn't support indirect addressing of fragment program inputs
424 * on Volta. The binary driver generates a function to handle every
425 * possible indirection, and indirectly calls the function to handle
426 * this instead.
427 */
428 if (class_3d >= GV100_3D_CLASS)
429 return shader != PIPE_SHADER_FRAGMENT;
430 return 1;
431 case PIPE_SHADER_CAP_INDIRECT_TEMP_ADDR:
432 case PIPE_SHADER_CAP_INDIRECT_CONST_ADDR:
433 return 1;
434 case PIPE_SHADER_CAP_MAX_TEMPS:
435 return NVC0_CAP_MAX_PROGRAM_TEMPS;
436 case PIPE_SHADER_CAP_CONT_SUPPORTED:
437 return 1;
438 case PIPE_SHADER_CAP_TGSI_SQRT_SUPPORTED:
439 return 1;
440 case PIPE_SHADER_CAP_SUBROUTINES:
441 return 1;
442 case PIPE_SHADER_CAP_INTEGERS:
443 return 1;
444 case PIPE_SHADER_CAP_TGSI_ANY_INOUT_DECL_RANGE:
445 case PIPE_SHADER_CAP_INT64_ATOMICS:
446 case PIPE_SHADER_CAP_FP16:
447 case PIPE_SHADER_CAP_FP16_DERIVATIVES:
448 case PIPE_SHADER_CAP_FP16_CONST_BUFFERS:
449 case PIPE_SHADER_CAP_INT16:
450 case PIPE_SHADER_CAP_GLSL_16BIT_CONSTS:
451 case PIPE_SHADER_CAP_MAX_HW_ATOMIC_COUNTERS:
452 case PIPE_SHADER_CAP_MAX_HW_ATOMIC_COUNTER_BUFFERS:
453 return 0;
454 case PIPE_SHADER_CAP_MAX_SHADER_BUFFERS:
455 return NVC0_MAX_BUFFERS;
456 case PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS:
457 return (class_3d >= NVE4_3D_CLASS) ? 32 : 16;
458 case PIPE_SHADER_CAP_MAX_SAMPLER_VIEWS:
459 return (class_3d >= NVE4_3D_CLASS) ? 32 : 16;
460 case PIPE_SHADER_CAP_MAX_SHADER_IMAGES:
461 if (class_3d >= NVE4_3D_CLASS)
462 return NVC0_MAX_IMAGES;
463 if (shader == PIPE_SHADER_FRAGMENT || shader == PIPE_SHADER_COMPUTE)
464 return NVC0_MAX_IMAGES;
465 return 0;
466 default:
467 NOUVEAU_ERR("unknown PIPE_SHADER_CAP %d\n", param);
468 return 0;
469 }
470 }
471
472 static float
nvc0_screen_get_paramf(struct pipe_screen * pscreen,enum pipe_capf param)473 nvc0_screen_get_paramf(struct pipe_screen *pscreen, enum pipe_capf param)
474 {
475 const uint16_t class_3d = nouveau_screen(pscreen)->class_3d;
476
477 switch (param) {
478 case PIPE_CAPF_MIN_LINE_WIDTH:
479 case PIPE_CAPF_MIN_LINE_WIDTH_AA:
480 case PIPE_CAPF_MIN_POINT_SIZE:
481 case PIPE_CAPF_MIN_POINT_SIZE_AA:
482 return 1;
483 case PIPE_CAPF_POINT_SIZE_GRANULARITY:
484 case PIPE_CAPF_LINE_WIDTH_GRANULARITY:
485 return 0.1;
486 case PIPE_CAPF_MAX_LINE_WIDTH:
487 case PIPE_CAPF_MAX_LINE_WIDTH_AA:
488 return 10.0f;
489 case PIPE_CAPF_MAX_POINT_SIZE:
490 return 63.0f;
491 case PIPE_CAPF_MAX_POINT_SIZE_AA:
492 return 63.375f;
493 case PIPE_CAPF_MAX_TEXTURE_ANISOTROPY:
494 return 16.0f;
495 case PIPE_CAPF_MAX_TEXTURE_LOD_BIAS:
496 return 15.0f;
497 case PIPE_CAPF_MIN_CONSERVATIVE_RASTER_DILATE:
498 return 0.0f;
499 case PIPE_CAPF_MAX_CONSERVATIVE_RASTER_DILATE:
500 return class_3d >= GM200_3D_CLASS ? 0.75f : 0.0f;
501 case PIPE_CAPF_CONSERVATIVE_RASTER_DILATE_GRANULARITY:
502 return class_3d >= GM200_3D_CLASS ? 0.25f : 0.0f;
503 }
504
505 NOUVEAU_ERR("unknown PIPE_CAPF %d\n", param);
506 return 0.0f;
507 }
508
509 static int
nvc0_screen_get_compute_param(struct pipe_screen * pscreen,enum pipe_shader_ir ir_type,enum pipe_compute_cap param,void * data)510 nvc0_screen_get_compute_param(struct pipe_screen *pscreen,
511 enum pipe_shader_ir ir_type,
512 enum pipe_compute_cap param, void *data)
513 {
514 struct nvc0_screen *screen = nvc0_screen(pscreen);
515 struct nouveau_device *dev = screen->base.device;
516 const uint16_t obj_class = screen->compute->oclass;
517
518 #define RET(x) do { \
519 if (data) \
520 memcpy(data, x, sizeof(x)); \
521 return sizeof(x); \
522 } while (0)
523
524 switch (param) {
525 case PIPE_COMPUTE_CAP_GRID_DIMENSION:
526 RET((uint64_t []) { 3 });
527 case PIPE_COMPUTE_CAP_MAX_GRID_SIZE:
528 if (obj_class >= NVE4_COMPUTE_CLASS) {
529 RET(((uint64_t []) { 0x7fffffff, 65535, 65535 }));
530 } else {
531 RET(((uint64_t []) { 65535, 65535, 65535 }));
532 }
533 case PIPE_COMPUTE_CAP_MAX_BLOCK_SIZE:
534 RET(((uint64_t []) { 1024, 1024, 64 }));
535 case PIPE_COMPUTE_CAP_MAX_THREADS_PER_BLOCK:
536 RET((uint64_t []) { 1024 });
537 case PIPE_COMPUTE_CAP_MAX_VARIABLE_THREADS_PER_BLOCK:
538 if (obj_class >= NVE4_COMPUTE_CLASS) {
539 RET((uint64_t []) { 1024 });
540 } else {
541 RET((uint64_t []) { 512 });
542 }
543 case PIPE_COMPUTE_CAP_MAX_GLOBAL_SIZE: /* g[] */
544 RET((uint64_t []) { nouveau_device_get_global_mem_size(dev) });
545 case PIPE_COMPUTE_CAP_MAX_LOCAL_SIZE: /* s[] */
546 switch (obj_class) {
547 case GM200_COMPUTE_CLASS:
548 RET((uint64_t []) { 96 << 10 });
549 case GM107_COMPUTE_CLASS:
550 RET((uint64_t []) { 64 << 10 });
551 default:
552 RET((uint64_t []) { 48 << 10 });
553 }
554 case PIPE_COMPUTE_CAP_MAX_PRIVATE_SIZE: /* l[] */
555 RET((uint64_t []) { 512 << 10 });
556 case PIPE_COMPUTE_CAP_MAX_INPUT_SIZE: /* c[], arbitrary limit */
557 RET((uint64_t []) { 4096 });
558 case PIPE_COMPUTE_CAP_SUBGROUP_SIZES:
559 RET((uint32_t []) { 32 });
560 case PIPE_COMPUTE_CAP_MAX_SUBGROUPS:
561 RET((uint32_t []) { 0 });
562 case PIPE_COMPUTE_CAP_MAX_MEM_ALLOC_SIZE:
563 RET((uint64_t []) { nouveau_device_get_global_mem_size(dev) });
564 case PIPE_COMPUTE_CAP_IMAGES_SUPPORTED:
565 RET((uint32_t []) { NVC0_MAX_IMAGES });
566 case PIPE_COMPUTE_CAP_MAX_COMPUTE_UNITS:
567 RET((uint32_t []) { screen->mp_count_compute });
568 case PIPE_COMPUTE_CAP_MAX_CLOCK_FREQUENCY:
569 RET((uint32_t []) { 512 }); /* FIXME: arbitrary limit */
570 case PIPE_COMPUTE_CAP_ADDRESS_BITS:
571 RET((uint32_t []) { 64 });
572 default:
573 return 0;
574 }
575
576 #undef RET
577 }
578
579 static void
nvc0_screen_get_sample_pixel_grid(struct pipe_screen * pscreen,unsigned sample_count,unsigned * width,unsigned * height)580 nvc0_screen_get_sample_pixel_grid(struct pipe_screen *pscreen,
581 unsigned sample_count,
582 unsigned *width, unsigned *height)
583 {
584 switch (sample_count) {
585 case 0:
586 case 1:
587 /* this could be 4x4, but the GL state tracker makes it difficult to
588 * create a 1x MSAA texture and smaller grids save CB space */
589 *width = 2;
590 *height = 4;
591 break;
592 case 2:
593 *width = 2;
594 *height = 4;
595 break;
596 case 4:
597 *width = 2;
598 *height = 2;
599 break;
600 case 8:
601 *width = 1;
602 *height = 2;
603 break;
604 default:
605 assert(0);
606 }
607 }
608
609 static void
nvc0_screen_destroy(struct pipe_screen * pscreen)610 nvc0_screen_destroy(struct pipe_screen *pscreen)
611 {
612 struct nvc0_screen *screen = nvc0_screen(pscreen);
613
614 if (!nouveau_drm_screen_unref(&screen->base))
615 return;
616
617 if (screen->blitter)
618 nvc0_blitter_destroy(screen);
619 if (screen->pm.prog) {
620 screen->pm.prog->code = NULL; /* hardcoded, don't FREE */
621 nvc0_program_destroy(NULL, screen->pm.prog);
622 FREE(screen->pm.prog);
623 }
624
625 nouveau_bo_ref(NULL, &screen->text);
626 nouveau_bo_ref(NULL, &screen->uniform_bo);
627 nouveau_bo_ref(NULL, &screen->tls);
628 nouveau_bo_ref(NULL, &screen->txc);
629 nouveau_bo_ref(NULL, &screen->fence.bo);
630 nouveau_bo_ref(NULL, &screen->poly_cache);
631
632 nouveau_heap_free(&screen->lib_code);
633 nouveau_heap_destroy(&screen->text_heap);
634
635 FREE(screen->tic.entries);
636
637 nouveau_object_del(&screen->eng3d);
638 nouveau_object_del(&screen->eng2d);
639 nouveau_object_del(&screen->m2mf);
640 nouveau_object_del(&screen->copy);
641 nouveau_object_del(&screen->compute);
642 nouveau_object_del(&screen->nvsw);
643
644 nouveau_screen_fini(&screen->base);
645 simple_mtx_destroy(&screen->state_lock);
646
647 FREE(screen);
648 }
649
650 static int
nvc0_graph_set_macro(struct nvc0_screen * screen,uint32_t m,unsigned pos,unsigned size,const uint32_t * data)651 nvc0_graph_set_macro(struct nvc0_screen *screen, uint32_t m, unsigned pos,
652 unsigned size, const uint32_t *data)
653 {
654 struct nouveau_pushbuf *push = screen->base.pushbuf;
655
656 size /= 4;
657
658 assert((pos + size) <= 0x800);
659
660 BEGIN_NVC0(push, SUBC_3D(NVC0_GRAPH_MACRO_ID), 2);
661 PUSH_DATA (push, (m - 0x3800) / 8);
662 PUSH_DATA (push, pos);
663 BEGIN_1IC0(push, SUBC_3D(NVC0_GRAPH_MACRO_UPLOAD_POS), size + 1);
664 PUSH_DATA (push, pos);
665 PUSH_DATAp(push, data, size);
666
667 return pos + size;
668 }
669
670 static int
tu102_graph_set_macro(struct nvc0_screen * screen,uint32_t m,unsigned pos,unsigned size,const uint32_t * data)671 tu102_graph_set_macro(struct nvc0_screen *screen, uint32_t m, unsigned pos,
672 unsigned size, const uint32_t *data)
673 {
674 struct nouveau_pushbuf *push = screen->base.pushbuf;
675
676 size /= 4;
677
678 assert((pos + size) <= 0x800);
679
680 BEGIN_NVC0(push, SUBC_3D(NVC0_GRAPH_MACRO_ID), 2);
681 PUSH_DATA (push, (m - 0x3800) / 8);
682 PUSH_DATA (push, pos);
683 BEGIN_1IC0(push, SUBC_3D(NVC0_GRAPH_MACRO_UPLOAD_POS), size + 1);
684 PUSH_DATA (push, pos);
685 PUSH_DATAp(push, data, size);
686
687 return pos + (size / 3);
688 }
689
690 static void
nvc0_magic_3d_init(struct nouveau_pushbuf * push,uint16_t obj_class)691 nvc0_magic_3d_init(struct nouveau_pushbuf *push, uint16_t obj_class)
692 {
693 BEGIN_NVC0(push, SUBC_3D(0x10cc), 1);
694 PUSH_DATA (push, 0xff);
695 BEGIN_NVC0(push, SUBC_3D(0x10e0), 2);
696 PUSH_DATA (push, 0xff);
697 PUSH_DATA (push, 0xff);
698 BEGIN_NVC0(push, SUBC_3D(0x10ec), 2);
699 PUSH_DATA (push, 0xff);
700 PUSH_DATA (push, 0xff);
701 if (obj_class < GV100_3D_CLASS) {
702 BEGIN_NVC0(push, SUBC_3D(0x074c), 1);
703 PUSH_DATA (push, 0x3f);
704 }
705
706 BEGIN_NVC0(push, SUBC_3D(0x16a8), 1);
707 PUSH_DATA (push, (3 << 16) | 3);
708 BEGIN_NVC0(push, SUBC_3D(0x1794), 1);
709 PUSH_DATA (push, (2 << 16) | 2);
710
711 if (obj_class < GM107_3D_CLASS) {
712 BEGIN_NVC0(push, SUBC_3D(0x12ac), 1);
713 PUSH_DATA (push, 0);
714 }
715 BEGIN_NVC0(push, SUBC_3D(0x0218), 1);
716 PUSH_DATA (push, 0x10);
717 BEGIN_NVC0(push, SUBC_3D(0x10fc), 1);
718 PUSH_DATA (push, 0x10);
719 BEGIN_NVC0(push, SUBC_3D(0x1290), 1);
720 PUSH_DATA (push, 0x10);
721 BEGIN_NVC0(push, SUBC_3D(0x12d8), 2);
722 PUSH_DATA (push, 0x10);
723 PUSH_DATA (push, 0x10);
724 BEGIN_NVC0(push, SUBC_3D(0x1140), 1);
725 PUSH_DATA (push, 0x10);
726 BEGIN_NVC0(push, SUBC_3D(0x1610), 1);
727 PUSH_DATA (push, 0xe);
728
729 BEGIN_NVC0(push, NVC0_3D(VERTEX_ID_GEN_MODE), 1);
730 PUSH_DATA (push, NVC0_3D_VERTEX_ID_GEN_MODE_DRAW_ARRAYS_ADD_START);
731 BEGIN_NVC0(push, SUBC_3D(0x030c), 1);
732 PUSH_DATA (push, 0);
733 BEGIN_NVC0(push, SUBC_3D(0x0300), 1);
734 PUSH_DATA (push, 3);
735
736 if (obj_class < GV100_3D_CLASS) {
737 BEGIN_NVC0(push, SUBC_3D(0x02d0), 1);
738 PUSH_DATA (push, 0x3fffff);
739 }
740 BEGIN_NVC0(push, SUBC_3D(0x0fdc), 1);
741 PUSH_DATA (push, 1);
742 BEGIN_NVC0(push, SUBC_3D(0x19c0), 1);
743 PUSH_DATA (push, 1);
744
745 if (obj_class < GM107_3D_CLASS) {
746 BEGIN_NVC0(push, SUBC_3D(0x075c), 1);
747 PUSH_DATA (push, 3);
748
749 if (obj_class >= NVE4_3D_CLASS) {
750 BEGIN_NVC0(push, SUBC_3D(0x07fc), 1);
751 PUSH_DATA (push, 1);
752 }
753 }
754
755 /* TODO: find out what software methods 0x1528, 0x1280 and (on nve4) 0x02dc
756 * are supposed to do */
757 }
758
759 static void
nvc0_screen_fence_emit(struct pipe_context * pcontext,u32 * sequence,struct nouveau_bo * wait)760 nvc0_screen_fence_emit(struct pipe_context *pcontext, u32 *sequence,
761 struct nouveau_bo *wait)
762 {
763 struct nvc0_context *nvc0 = nvc0_context(pcontext);
764 struct nvc0_screen *screen = nvc0->screen;
765 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
766 struct nouveau_pushbuf_refn ref = { wait, NOUVEAU_BO_GART | NOUVEAU_BO_RDWR };
767
768 /* we need to do it after possible flush in MARK_RING */
769 *sequence = ++screen->base.fence.sequence;
770
771 assert(PUSH_AVAIL(push) + push->rsvd_kick >= 5);
772 PUSH_DATA (push, NVC0_FIFO_PKHDR_SQ(NVC0_3D(QUERY_ADDRESS_HIGH), 4));
773 PUSH_DATAh(push, screen->fence.bo->offset);
774 PUSH_DATA (push, screen->fence.bo->offset);
775 PUSH_DATA (push, *sequence);
776 PUSH_DATA (push, NVC0_3D_QUERY_GET_FENCE | NVC0_3D_QUERY_GET_SHORT |
777 (0xf << NVC0_3D_QUERY_GET_UNIT__SHIFT));
778
779 nouveau_pushbuf_refn(push, &ref, 1);
780 }
781
782 static u32
nvc0_screen_fence_update(struct pipe_screen * pscreen)783 nvc0_screen_fence_update(struct pipe_screen *pscreen)
784 {
785 struct nvc0_screen *screen = nvc0_screen(pscreen);
786 return screen->fence.map[0];
787 }
788
789 static int
nvc0_screen_init_compute(struct nvc0_screen * screen)790 nvc0_screen_init_compute(struct nvc0_screen *screen)
791 {
792 const struct nouveau_mclass computes[] = {
793 { AD102_COMPUTE_CLASS, -1 },
794 { GA102_COMPUTE_CLASS, -1 },
795 { TU102_COMPUTE_CLASS, -1 },
796 { GV100_COMPUTE_CLASS, -1 },
797 { GP104_COMPUTE_CLASS, -1 },
798 { GP100_COMPUTE_CLASS, -1 },
799 { GM200_COMPUTE_CLASS, -1 },
800 { GM107_COMPUTE_CLASS, -1 },
801 { NVF0_COMPUTE_CLASS, -1 },
802 { NVE4_COMPUTE_CLASS, -1 },
803 /* In theory, GF110+ should also support NVC8_COMPUTE_CLASS but,
804 * in practice, a ILLEGAL_CLASS dmesg fail appears when using it. */
805 // { NVC8_COMPUTE_CLASS, -1 },
806 { NVC0_COMPUTE_CLASS, -1 },
807 {}
808 };
809 struct nouveau_object *chan = screen->base.channel;
810 int ret;
811
812 screen->base.base.get_compute_param = nvc0_screen_get_compute_param;
813
814 ret = nouveau_object_mclass(chan, computes);
815 if (ret < 0) {
816 NOUVEAU_ERR("No supported compute class: %d\n", ret);
817 return ret;
818 }
819
820 ret = nouveau_object_new(chan, 0xbeef00c0, computes[ret].oclass, NULL, 0, &screen->compute);
821 if (ret) {
822 NOUVEAU_ERR("Failed to allocate compute class: %d\n", ret);
823 return ret;
824 }
825
826 if (screen->compute->oclass < NVE4_COMPUTE_CLASS)
827 return nvc0_screen_compute_setup(screen, screen->base.pushbuf);
828
829 return nve4_screen_compute_setup(screen, screen->base.pushbuf);
830 }
831
832 static int
nvc0_screen_resize_tls_area(struct nvc0_screen * screen,uint32_t lpos,uint32_t lneg,uint32_t cstack)833 nvc0_screen_resize_tls_area(struct nvc0_screen *screen,
834 uint32_t lpos, uint32_t lneg, uint32_t cstack)
835 {
836 struct nouveau_bo *bo = NULL;
837 int ret;
838 uint64_t size = (lpos + lneg) * 32 + cstack;
839
840 if (size >= (1 << 20)) {
841 NOUVEAU_ERR("requested TLS size too large: 0x%"PRIx64"\n", size);
842 return -1;
843 }
844
845 size *= (screen->base.device->chipset >= 0xe0) ? 64 : 48; /* max warps */
846 size = align(size, 0x8000);
847 size *= screen->mp_count;
848
849 size = align(size, 1 << 17);
850
851 ret = nouveau_bo_new(screen->base.device, NV_VRAM_DOMAIN(&screen->base), 1 << 17, size,
852 NULL, &bo);
853 if (ret)
854 return ret;
855
856 /* Make sure that the pushbuf has acquired a reference to the old tls
857 * segment, as it may have commands that will reference it.
858 */
859 if (screen->tls)
860 PUSH_REF1(screen->base.pushbuf, screen->tls,
861 NV_VRAM_DOMAIN(&screen->base) | NOUVEAU_BO_RDWR);
862 nouveau_bo_ref(NULL, &screen->tls);
863 screen->tls = bo;
864 return 0;
865 }
866
867 int
nvc0_screen_resize_text_area(struct nvc0_screen * screen,struct nouveau_pushbuf * push,uint64_t size)868 nvc0_screen_resize_text_area(struct nvc0_screen *screen, struct nouveau_pushbuf *push,
869 uint64_t size)
870 {
871 struct nouveau_bo *bo;
872 int ret;
873
874 ret = nouveau_bo_new(screen->base.device, NV_VRAM_DOMAIN(&screen->base),
875 1 << 17, size, NULL, &bo);
876 if (ret)
877 return ret;
878
879 /* Make sure that the pushbuf has acquired a reference to the old text
880 * segment, as it may have commands that will reference it.
881 */
882 if (screen->text)
883 PUSH_REF1(screen->base.pushbuf, screen->text,
884 NV_VRAM_DOMAIN(&screen->base) | NOUVEAU_BO_RD);
885 nouveau_bo_ref(NULL, &screen->text);
886 screen->text = bo;
887
888 nouveau_heap_free(&screen->lib_code);
889 nouveau_heap_destroy(&screen->text_heap);
890
891 /* XXX: getting a page fault at the end of the code buffer every few
892 * launches, don't use the last 256 bytes to work around them - prefetch ?
893 */
894 nouveau_heap_init(&screen->text_heap, 0, size - 0x100);
895
896 /* update the code segment setup */
897 if (screen->eng3d->oclass < GV100_3D_CLASS) {
898 BEGIN_NVC0(push, NVC0_3D(CODE_ADDRESS_HIGH), 2);
899 PUSH_DATAh(push, screen->text->offset);
900 PUSH_DATA (push, screen->text->offset);
901 if (screen->compute) {
902 BEGIN_NVC0(push, NVC0_CP(CODE_ADDRESS_HIGH), 2);
903 PUSH_DATAh(push, screen->text->offset);
904 PUSH_DATA (push, screen->text->offset);
905 }
906 }
907
908 return 0;
909 }
910
911 void
nvc0_screen_bind_cb_3d(struct nvc0_screen * screen,struct nouveau_pushbuf * push,bool * can_serialize,int stage,int index,int size,uint64_t addr)912 nvc0_screen_bind_cb_3d(struct nvc0_screen *screen, struct nouveau_pushbuf *push,
913 bool *can_serialize, int stage, int index, int size, uint64_t addr)
914 {
915 assert(stage != 5);
916
917 if (screen->base.class_3d >= GM107_3D_CLASS) {
918 struct nvc0_cb_binding *binding = &screen->cb_bindings[stage][index];
919
920 // TODO: Better figure out the conditions in which this is needed
921 bool serialize = binding->addr == addr && binding->size != size;
922 if (can_serialize)
923 serialize = serialize && *can_serialize;
924 if (serialize) {
925 IMMED_NVC0(push, NVC0_3D(SERIALIZE), 0);
926 if (can_serialize)
927 *can_serialize = false;
928 }
929
930 binding->addr = addr;
931 binding->size = size;
932 }
933
934 if (size >= 0) {
935 BEGIN_NVC0(push, NVC0_3D(CB_SIZE), 3);
936 PUSH_DATA (push, size);
937 PUSH_DATAh(push, addr);
938 PUSH_DATA (push, addr);
939 }
940 IMMED_NVC0(push, NVC0_3D(CB_BIND(stage)), (index << 4) | (size >= 0));
941 }
942
943 static const void *
nvc0_screen_get_compiler_options(struct pipe_screen * pscreen,enum pipe_shader_ir ir,enum pipe_shader_type shader)944 nvc0_screen_get_compiler_options(struct pipe_screen *pscreen,
945 enum pipe_shader_ir ir,
946 enum pipe_shader_type shader)
947 {
948 struct nvc0_screen *screen = nvc0_screen(pscreen);
949 if (ir == PIPE_SHADER_IR_NIR)
950 return nv50_ir_nir_shader_compiler_options(screen->base.device->chipset, shader);
951 return NULL;
952 }
953
954 #define FAIL_SCREEN_INIT(str, err) \
955 do { \
956 NOUVEAU_ERR(str, err); \
957 goto fail; \
958 } while(0)
959
960 struct nouveau_screen *
nvc0_screen_create(struct nouveau_device * dev)961 nvc0_screen_create(struct nouveau_device *dev)
962 {
963 struct nvc0_screen *screen;
964 struct pipe_screen *pscreen;
965 struct nouveau_object *chan;
966
967 struct nouveau_pushbuf *push;
968 uint64_t value;
969 uint32_t flags;
970 int ret;
971 unsigned i;
972
973 switch (dev->chipset & ~0xf) {
974 case 0xc0:
975 case 0xd0:
976 case 0xe0:
977 case 0xf0:
978 case 0x100:
979 case 0x110:
980 case 0x120:
981 case 0x130:
982 case 0x140:
983 case 0x160:
984 case 0x170:
985 case 0x190:
986 break;
987 default:
988 return NULL;
989 }
990
991 screen = CALLOC_STRUCT(nvc0_screen);
992 if (!screen)
993 return NULL;
994 pscreen = &screen->base.base;
995 pscreen->destroy = nvc0_screen_destroy;
996
997 simple_mtx_init(&screen->state_lock, mtx_plain);
998
999 ret = nouveau_screen_init(&screen->base, dev);
1000 if (ret)
1001 FAIL_SCREEN_INIT("Base screen init failed: %d\n", ret);
1002 chan = screen->base.channel;
1003 push = screen->base.pushbuf;
1004 push->rsvd_kick = 5;
1005
1006 /* TODO: could this be higher on Kepler+? how does reclocking vs no
1007 * reclocking affect performance?
1008 * TODO: could this be higher on Fermi?
1009 */
1010 if (dev->chipset >= 0xe0)
1011 screen->base.transfer_pushbuf_threshold = 1024;
1012
1013 screen->base.vidmem_bindings |= PIPE_BIND_CONSTANT_BUFFER |
1014 PIPE_BIND_SHADER_BUFFER |
1015 PIPE_BIND_VERTEX_BUFFER | PIPE_BIND_INDEX_BUFFER |
1016 PIPE_BIND_COMMAND_ARGS_BUFFER | PIPE_BIND_QUERY_BUFFER;
1017 screen->base.sysmem_bindings |=
1018 PIPE_BIND_VERTEX_BUFFER | PIPE_BIND_INDEX_BUFFER;
1019
1020 if (screen->base.vram_domain & NOUVEAU_BO_GART) {
1021 screen->base.sysmem_bindings |= screen->base.vidmem_bindings;
1022 screen->base.vidmem_bindings = 0;
1023 }
1024
1025 pscreen->context_create = nvc0_create;
1026 pscreen->is_format_supported = nvc0_screen_is_format_supported;
1027 pscreen->get_param = nvc0_screen_get_param;
1028 pscreen->get_shader_param = nvc0_screen_get_shader_param;
1029 pscreen->get_paramf = nvc0_screen_get_paramf;
1030 pscreen->get_sample_pixel_grid = nvc0_screen_get_sample_pixel_grid;
1031 pscreen->get_driver_query_info = nvc0_screen_get_driver_query_info;
1032 pscreen->get_driver_query_group_info = nvc0_screen_get_driver_query_group_info;
1033 /* nir stuff */
1034 pscreen->get_compiler_options = nvc0_screen_get_compiler_options;
1035
1036 nvc0_screen_init_resource_functions(pscreen);
1037
1038 screen->base.base.get_video_param = nouveau_vp3_screen_get_video_param;
1039 screen->base.base.is_video_format_supported = nouveau_vp3_screen_video_supported;
1040
1041 flags = NOUVEAU_BO_GART | NOUVEAU_BO_MAP;
1042 if (screen->base.drm->version >= 0x01000202)
1043 flags |= NOUVEAU_BO_COHERENT;
1044
1045 ret = nouveau_bo_new(dev, flags, 0, 4096, NULL, &screen->fence.bo);
1046 if (ret)
1047 FAIL_SCREEN_INIT("Error allocating fence BO: %d\n", ret);
1048 BO_MAP(&screen->base, screen->fence.bo, 0, NULL);
1049 screen->fence.map = screen->fence.bo->map;
1050 screen->base.fence.emit = nvc0_screen_fence_emit;
1051 screen->base.fence.update = nvc0_screen_fence_update;
1052
1053 if (dev->chipset < 0x140) {
1054 ret = nouveau_object_new(chan, (dev->chipset < 0xe0) ? 0x1f906e : 0x906e,
1055 NVIF_CLASS_SW_GF100, NULL, 0, &screen->nvsw);
1056 if (ret)
1057 FAIL_SCREEN_INIT("Error creating SW object: %d\n", ret);
1058
1059 BEGIN_NVC0(push, SUBC_SW(NV01_SUBCHAN_OBJECT), 1);
1060 PUSH_DATA (push, screen->nvsw->handle);
1061 }
1062
1063 const struct nouveau_mclass m2mfs[] = {
1064 { NVF0_P2MF_CLASS, -1 },
1065 { NVE4_P2MF_CLASS, -1 },
1066 { NVC0_M2MF_CLASS, -1 },
1067 {}
1068 };
1069
1070 ret = nouveau_object_mclass(chan, m2mfs);
1071 if (ret < 0)
1072 FAIL_SCREEN_INIT("No supported m2mf class: %d\n", ret);
1073
1074 ret = nouveau_object_new(chan, 0xbeef323f, m2mfs[ret].oclass, NULL, 0,
1075 &screen->m2mf);
1076 if (ret)
1077 FAIL_SCREEN_INIT("Error allocating PGRAPH context for M2MF: %d\n", ret);
1078
1079 BEGIN_NVC0(push, SUBC_M2MF(NV01_SUBCHAN_OBJECT), 1);
1080 PUSH_DATA (push, screen->m2mf->oclass);
1081
1082 if (screen->m2mf->oclass >= NVE4_P2MF_CLASS) {
1083 const struct nouveau_mclass copys[] = {
1084 { AMPERE_DMA_COPY_B, -1 },
1085 { AMPERE_DMA_COPY_A, -1 },
1086 { TURING_DMA_COPY_A, -1 },
1087 { VOLTA_DMA_COPY_A, -1 },
1088 { PASCAL_DMA_COPY_B, -1 },
1089 { PASCAL_DMA_COPY_A, -1 },
1090 { MAXWELL_DMA_COPY_A, -1 },
1091 { KEPLER_DMA_COPY_A, -1 },
1092 {}
1093 };
1094
1095 ret = nouveau_object_mclass(chan, copys);
1096 if (ret < 0)
1097 FAIL_SCREEN_INIT("No supported copy engine class: %d\n", ret);
1098
1099 ret = nouveau_object_new(chan, 0, copys[ret].oclass, NULL, 0, &screen->copy);
1100 if (ret)
1101 FAIL_SCREEN_INIT("Error allocating copy engine class: %d\n", ret);
1102
1103 BEGIN_NVC0(push, SUBC_COPY(NV01_SUBCHAN_OBJECT), 1);
1104 PUSH_DATA (push, screen->copy->oclass);
1105 }
1106
1107 ret = nouveau_object_new(chan, 0xbeef902d, NVC0_2D_CLASS, NULL, 0,
1108 &screen->eng2d);
1109 if (ret)
1110 FAIL_SCREEN_INIT("Error allocating PGRAPH context for 2D: %d\n", ret);
1111
1112 BEGIN_NVC0(push, SUBC_2D(NV01_SUBCHAN_OBJECT), 1);
1113 PUSH_DATA (push, screen->eng2d->oclass);
1114 BEGIN_NVC0(push, SUBC_2D(NVC0_2D_SINGLE_GPC), 1);
1115 PUSH_DATA (push, 0);
1116 BEGIN_NVC0(push, NVC0_2D(OPERATION), 1);
1117 PUSH_DATA (push, NV50_2D_OPERATION_SRCCOPY);
1118 BEGIN_NVC0(push, NVC0_2D(CLIP_ENABLE), 1);
1119 PUSH_DATA (push, 0);
1120 BEGIN_NVC0(push, NVC0_2D(COLOR_KEY_ENABLE), 1);
1121 PUSH_DATA (push, 0);
1122 BEGIN_NVC0(push, NVC0_2D(SET_PIXELS_FROM_MEMORY_CORRAL_SIZE), 1);
1123 PUSH_DATA (push, 0x3f);
1124 BEGIN_NVC0(push, NVC0_2D(SET_PIXELS_FROM_MEMORY_SAFE_OVERLAP), 1);
1125 PUSH_DATA (push, 1);
1126 BEGIN_NVC0(push, NVC0_2D(COND_MODE), 1);
1127 PUSH_DATA (push, NV50_2D_COND_MODE_ALWAYS);
1128
1129 BEGIN_NVC0(push, SUBC_2D(NVC0_GRAPH_NOTIFY_ADDRESS_HIGH), 2);
1130 PUSH_DATAh(push, screen->fence.bo->offset + 16);
1131 PUSH_DATA (push, screen->fence.bo->offset + 16);
1132
1133 const struct nouveau_mclass threeds[] = {
1134 { AD102_3D_CLASS, -1 },
1135 { GA102_3D_CLASS, -1 },
1136 { TU102_3D_CLASS, -1 },
1137 { GV100_3D_CLASS, -1 },
1138 { GP102_3D_CLASS, -1 },
1139 { GP100_3D_CLASS, -1 },
1140 { GM200_3D_CLASS, -1 },
1141 { GM107_3D_CLASS, -1 },
1142 { NVF0_3D_CLASS, -1 },
1143 { NVEA_3D_CLASS, -1 },
1144 { NVE4_3D_CLASS, -1 },
1145 { NVC8_3D_CLASS, -1 },
1146 { NVC1_3D_CLASS, -1 },
1147 { NVC0_3D_CLASS, -1 },
1148 {}
1149 };
1150
1151 ret = nouveau_object_mclass(chan, threeds);
1152 if (ret < 0)
1153 FAIL_SCREEN_INIT("No supported 3d class: %d\n", ret);
1154
1155 ret = nouveau_object_new(chan, 0xbeef003d, threeds[ret].oclass, NULL, 0,
1156 &screen->eng3d);
1157 if (ret)
1158 FAIL_SCREEN_INIT("Error allocating PGRAPH context for 3D: %d\n", ret);
1159 screen->base.class_3d = screen->eng3d->oclass;
1160
1161 BEGIN_NVC0(push, SUBC_3D(NV01_SUBCHAN_OBJECT), 1);
1162 PUSH_DATA (push, screen->eng3d->oclass);
1163
1164 BEGIN_NVC0(push, NVC0_3D(COND_MODE), 1);
1165 PUSH_DATA (push, NVC0_3D_COND_MODE_ALWAYS);
1166
1167 if (debug_get_bool_option("NOUVEAU_SHADER_WATCHDOG", true)) {
1168 /* kill shaders after about 1 second (at 100 MHz) */
1169 BEGIN_NVC0(push, NVC0_3D(WATCHDOG_TIMER), 1);
1170 PUSH_DATA (push, 0x17);
1171 }
1172
1173 IMMED_NVC0(push, NVC0_3D(ZETA_COMP_ENABLE),
1174 screen->base.drm->version >= 0x01000101);
1175 BEGIN_NVC0(push, NVC0_3D(RT_COMP_ENABLE(0)), 8);
1176 for (i = 0; i < 8; ++i)
1177 PUSH_DATA(push, screen->base.drm->version >= 0x01000101);
1178
1179 BEGIN_NVC0(push, NVC0_3D(RT_CONTROL), 1);
1180 PUSH_DATA (push, 1);
1181
1182 BEGIN_NVC0(push, NVC0_3D(CSAA_ENABLE), 1);
1183 PUSH_DATA (push, 0);
1184 BEGIN_NVC0(push, NVC0_3D(MULTISAMPLE_ENABLE), 1);
1185 PUSH_DATA (push, 0);
1186 BEGIN_NVC0(push, NVC0_3D(MULTISAMPLE_MODE), 1);
1187 PUSH_DATA (push, NVC0_3D_MULTISAMPLE_MODE_MS1);
1188 BEGIN_NVC0(push, NVC0_3D(MULTISAMPLE_CTRL), 1);
1189 PUSH_DATA (push, 0);
1190 BEGIN_NVC0(push, NVC0_3D(LINE_WIDTH_SEPARATE), 1);
1191 PUSH_DATA (push, 1);
1192 BEGIN_NVC0(push, NVC0_3D(PRIM_RESTART_WITH_DRAW_ARRAYS), 1);
1193 PUSH_DATA (push, 1);
1194 BEGIN_NVC0(push, NVC0_3D(BLEND_SEPARATE_ALPHA), 1);
1195 PUSH_DATA (push, 1);
1196 BEGIN_NVC0(push, NVC0_3D(BLEND_ENABLE_COMMON), 1);
1197 PUSH_DATA (push, 0);
1198 BEGIN_NVC0(push, NVC0_3D(SHADE_MODEL), 1);
1199 PUSH_DATA (push, NVC0_3D_SHADE_MODEL_SMOOTH);
1200 if (screen->eng3d->oclass < NVE4_3D_CLASS) {
1201 IMMED_NVC0(push, NVC0_3D(TEX_MISC), 0);
1202 } else if (screen->eng3d->oclass < GA102_3D_CLASS) {
1203 BEGIN_NVC0(push, NVE4_3D(TEX_CB_INDEX), 1);
1204 PUSH_DATA (push, 15);
1205 }
1206 BEGIN_NVC0(push, NVC0_3D(CALL_LIMIT_LOG), 1);
1207 PUSH_DATA (push, 8); /* 128 */
1208 BEGIN_NVC0(push, NVC0_3D(ZCULL_STATCTRS_ENABLE), 1);
1209 PUSH_DATA (push, 1);
1210 if (screen->eng3d->oclass >= NVC1_3D_CLASS) {
1211 BEGIN_NVC0(push, NVC0_3D(CACHE_SPLIT), 1);
1212 PUSH_DATA (push, NVC0_3D_CACHE_SPLIT_48K_SHARED_16K_L1);
1213 }
1214
1215 nvc0_magic_3d_init(push, screen->eng3d->oclass);
1216
1217 ret = nvc0_screen_resize_text_area(screen, push, 1 << 19);
1218 if (ret)
1219 FAIL_SCREEN_INIT("Error allocating TEXT area: %d\n", ret);
1220
1221 /* 6 user uniform areas, 6 driver areas, and 1 for the runout */
1222 ret = nouveau_bo_new(dev, NV_VRAM_DOMAIN(&screen->base), 1 << 12, 13 << 16, NULL,
1223 &screen->uniform_bo);
1224 if (ret)
1225 FAIL_SCREEN_INIT("Error allocating uniform BO: %d\n", ret);
1226
1227 PUSH_REF1 (push, screen->uniform_bo, NV_VRAM_DOMAIN(&screen->base) | NOUVEAU_BO_WR);
1228
1229 /* return { 0.0, 0.0, 0.0, 0.0 } for out-of-bounds vtxbuf access */
1230 BEGIN_NVC0(push, NVC0_3D(CB_SIZE), 3);
1231 PUSH_DATA (push, 256);
1232 PUSH_DATAh(push, screen->uniform_bo->offset + NVC0_CB_AUX_RUNOUT_INFO);
1233 PUSH_DATA (push, screen->uniform_bo->offset + NVC0_CB_AUX_RUNOUT_INFO);
1234 BEGIN_1IC0(push, NVC0_3D(CB_POS), 5);
1235 PUSH_DATA (push, 0);
1236 PUSH_DATAf(push, 0.0f);
1237 PUSH_DATAf(push, 0.0f);
1238 PUSH_DATAf(push, 0.0f);
1239 PUSH_DATAf(push, 0.0f);
1240 BEGIN_NVC0(push, NVC0_3D(VERTEX_RUNOUT_ADDRESS_HIGH), 2);
1241 PUSH_DATAh(push, screen->uniform_bo->offset + NVC0_CB_AUX_RUNOUT_INFO);
1242 PUSH_DATA (push, screen->uniform_bo->offset + NVC0_CB_AUX_RUNOUT_INFO);
1243
1244 if (screen->base.drm->version >= 0x01000101) {
1245 ret = nouveau_getparam(dev, NOUVEAU_GETPARAM_GRAPH_UNITS, &value);
1246 if (ret)
1247 FAIL_SCREEN_INIT("NOUVEAU_GETPARAM_GRAPH_UNITS failed: %d\n", ret);
1248 } else {
1249 if (dev->chipset >= 0xe0 && dev->chipset < 0xf0)
1250 value = (8 << 8) | 4;
1251 else
1252 value = (16 << 8) | 4;
1253 }
1254 screen->gpc_count = value & 0x000000ff;
1255 screen->mp_count = value >> 8;
1256 screen->mp_count_compute = screen->mp_count;
1257
1258 ret = nvc0_screen_resize_tls_area(screen, 128 * 16, 0, 0x200);
1259 if (ret)
1260 FAIL_SCREEN_INIT("Error allocating TLS area: %d\n", ret);
1261
1262 BEGIN_NVC0(push, NVC0_3D(TEMP_ADDRESS_HIGH), 4);
1263 PUSH_DATAh(push, screen->tls->offset);
1264 PUSH_DATA (push, screen->tls->offset);
1265 PUSH_DATA (push, screen->tls->size >> 32);
1266 PUSH_DATA (push, screen->tls->size);
1267 BEGIN_NVC0(push, NVC0_3D(WARP_TEMP_ALLOC), 1);
1268 PUSH_DATA (push, 0);
1269 /* Reduce likelihood of collision with real buffers by placing the hole at
1270 * the top of the 4G area. This will have to be dealt with for real
1271 * eventually by blocking off that area from the VM.
1272 */
1273 BEGIN_NVC0(push, NVC0_3D(LOCAL_BASE), 1);
1274 PUSH_DATA (push, 0xff << 24);
1275
1276 if (screen->eng3d->oclass < GM107_3D_CLASS) {
1277 ret = nouveau_bo_new(dev, NV_VRAM_DOMAIN(&screen->base), 1 << 17, 1 << 20, NULL,
1278 &screen->poly_cache);
1279 if (ret)
1280 FAIL_SCREEN_INIT("Error allocating poly cache BO: %d\n", ret);
1281
1282 BEGIN_NVC0(push, NVC0_3D(VERTEX_QUARANTINE_ADDRESS_HIGH), 3);
1283 PUSH_DATAh(push, screen->poly_cache->offset);
1284 PUSH_DATA (push, screen->poly_cache->offset);
1285 PUSH_DATA (push, 3);
1286 }
1287
1288 ret = nouveau_bo_new(dev, NV_VRAM_DOMAIN(&screen->base), 1 << 17, 1 << 17, NULL,
1289 &screen->txc);
1290 if (ret)
1291 FAIL_SCREEN_INIT("Error allocating txc BO: %d\n", ret);
1292
1293 BEGIN_NVC0(push, NVC0_3D(TIC_ADDRESS_HIGH), 3);
1294 PUSH_DATAh(push, screen->txc->offset);
1295 PUSH_DATA (push, screen->txc->offset);
1296 PUSH_DATA (push, NVC0_TIC_MAX_ENTRIES - 1);
1297 if (screen->eng3d->oclass >= GM107_3D_CLASS) {
1298 screen->tic.maxwell = true;
1299 if (screen->eng3d->oclass == GM107_3D_CLASS) {
1300 screen->tic.maxwell =
1301 debug_get_bool_option("NOUVEAU_MAXWELL_TIC", true);
1302 IMMED_NVC0(push, SUBC_3D(0x0f10), screen->tic.maxwell);
1303 }
1304 }
1305
1306 BEGIN_NVC0(push, NVC0_3D(TSC_ADDRESS_HIGH), 3);
1307 PUSH_DATAh(push, screen->txc->offset + 65536);
1308 PUSH_DATA (push, screen->txc->offset + 65536);
1309 PUSH_DATA (push, NVC0_TSC_MAX_ENTRIES - 1);
1310
1311 BEGIN_NVC0(push, NVC0_3D(SCREEN_Y_CONTROL), 1);
1312 PUSH_DATA (push, 0);
1313 BEGIN_NVC0(push, NVC0_3D(WINDOW_OFFSET_X), 2);
1314 PUSH_DATA (push, 0);
1315 PUSH_DATA (push, 0);
1316 BEGIN_NVC0(push, NVC0_3D(ZCULL_REGION), 1); /* deactivate ZCULL */
1317 PUSH_DATA (push, 0x3f);
1318
1319 BEGIN_NVC0(push, NVC0_3D(CLIP_RECTS_MODE), 1);
1320 PUSH_DATA (push, NVC0_3D_CLIP_RECTS_MODE_INSIDE_ANY);
1321 BEGIN_NVC0(push, NVC0_3D(CLIP_RECT_HORIZ(0)), 8 * 2);
1322 for (i = 0; i < 8 * 2; ++i)
1323 PUSH_DATA(push, 0);
1324 BEGIN_NVC0(push, NVC0_3D(CLIP_RECTS_EN), 1);
1325 PUSH_DATA (push, 0);
1326 BEGIN_NVC0(push, NVC0_3D(CLIPID_ENABLE), 1);
1327 PUSH_DATA (push, 0);
1328
1329 /* neither scissors, viewport nor stencil mask should affect clears */
1330 BEGIN_NVC0(push, NVC0_3D(CLEAR_FLAGS), 1);
1331 PUSH_DATA (push, 0);
1332
1333 BEGIN_NVC0(push, NVC0_3D(VIEWPORT_TRANSFORM_EN), 1);
1334 PUSH_DATA (push, 1);
1335 for (i = 0; i < NVC0_MAX_VIEWPORTS; i++) {
1336 BEGIN_NVC0(push, NVC0_3D(DEPTH_RANGE_NEAR(i)), 2);
1337 PUSH_DATAf(push, 0.0f);
1338 PUSH_DATAf(push, 1.0f);
1339 }
1340 BEGIN_NVC0(push, NVC0_3D(VIEW_VOLUME_CLIP_CTRL), 1);
1341 PUSH_DATA (push, NVC0_3D_VIEW_VOLUME_CLIP_CTRL_UNK1_UNK1);
1342
1343 /* We use scissors instead of exact view volume clipping,
1344 * so they're always enabled.
1345 */
1346 for (i = 0; i < NVC0_MAX_VIEWPORTS; i++) {
1347 BEGIN_NVC0(push, NVC0_3D(SCISSOR_ENABLE(i)), 3);
1348 PUSH_DATA (push, 1);
1349 PUSH_DATA (push, 16384 << 16);
1350 PUSH_DATA (push, 16384 << 16);
1351 }
1352
1353 if (screen->eng3d->oclass < TU102_3D_CLASS) {
1354 #define MK_MACRO(m, n) i = nvc0_graph_set_macro(screen, m, i, sizeof(n), n);
1355
1356 i = 0;
1357 MK_MACRO(NVC0_3D_MACRO_VERTEX_ARRAY_PER_INSTANCE, mme9097_per_instance_bf);
1358 MK_MACRO(NVC0_3D_MACRO_BLEND_ENABLES, mme9097_blend_enables);
1359 MK_MACRO(NVC0_3D_MACRO_VERTEX_ARRAY_SELECT, mme9097_vertex_array_select);
1360 MK_MACRO(NVC0_3D_MACRO_TEP_SELECT, mme9097_tep_select);
1361 MK_MACRO(NVC0_3D_MACRO_GP_SELECT, mme9097_gp_select);
1362 MK_MACRO(NVC0_3D_MACRO_POLYGON_MODE_FRONT, mme9097_poly_mode_front);
1363 MK_MACRO(NVC0_3D_MACRO_POLYGON_MODE_BACK, mme9097_poly_mode_back);
1364 MK_MACRO(NVC0_3D_MACRO_DRAW_ARRAYS_INDIRECT, mme9097_draw_arrays_indirect);
1365 MK_MACRO(NVC0_3D_MACRO_DRAW_ELEMENTS_INDIRECT, mme9097_draw_elts_indirect);
1366 MK_MACRO(NVC0_3D_MACRO_DRAW_ARRAYS_INDIRECT_COUNT, mme9097_draw_arrays_indirect_count);
1367 MK_MACRO(NVC0_3D_MACRO_DRAW_ELEMENTS_INDIRECT_COUNT, mme9097_draw_elts_indirect_count);
1368 MK_MACRO(NVC0_3D_MACRO_QUERY_BUFFER_WRITE, mme9097_query_buffer_write);
1369 MK_MACRO(NVC0_3D_MACRO_CONSERVATIVE_RASTER_STATE, mme9097_conservative_raster_state);
1370 MK_MACRO(NVC0_3D_MACRO_SET_PRIV_REG, mme9097_set_priv_reg);
1371 MK_MACRO(NVC0_3D_MACRO_COMPUTE_COUNTER, mme9097_compute_counter);
1372 MK_MACRO(NVC0_3D_MACRO_COMPUTE_COUNTER_TO_QUERY, mme9097_compute_counter_to_query);
1373 MK_MACRO(NVC0_CP_MACRO_LAUNCH_GRID_INDIRECT, mme90c0_launch_grid_indirect);
1374 } else {
1375 #undef MK_MACRO
1376 #define MK_MACRO(m, n) i = tu102_graph_set_macro(screen, m, i, sizeof(n), n);
1377
1378 i = 0;
1379 MK_MACRO(NVC0_3D_MACRO_VERTEX_ARRAY_PER_INSTANCE, mmec597_per_instance_bf);
1380 MK_MACRO(NVC0_3D_MACRO_BLEND_ENABLES, mmec597_blend_enables);
1381 MK_MACRO(NVC0_3D_MACRO_VERTEX_ARRAY_SELECT, mmec597_vertex_array_select);
1382 MK_MACRO(NVC0_3D_MACRO_TEP_SELECT, mmec597_tep_select);
1383 MK_MACRO(NVC0_3D_MACRO_GP_SELECT, mmec597_gp_select);
1384 MK_MACRO(NVC0_3D_MACRO_POLYGON_MODE_FRONT, mmec597_poly_mode_front);
1385 MK_MACRO(NVC0_3D_MACRO_POLYGON_MODE_BACK, mmec597_poly_mode_back);
1386 MK_MACRO(NVC0_3D_MACRO_DRAW_ARRAYS_INDIRECT, mmec597_draw_arrays_indirect);
1387 MK_MACRO(NVC0_3D_MACRO_DRAW_ELEMENTS_INDIRECT, mmec597_draw_elts_indirect);
1388 MK_MACRO(NVC0_3D_MACRO_DRAW_ARRAYS_INDIRECT_COUNT, mmec597_draw_arrays_indirect_count);
1389 MK_MACRO(NVC0_3D_MACRO_DRAW_ELEMENTS_INDIRECT_COUNT, mmec597_draw_elts_indirect_count);
1390 MK_MACRO(NVC0_3D_MACRO_QUERY_BUFFER_WRITE, mmec597_query_buffer_write);
1391 MK_MACRO(NVC0_3D_MACRO_CONSERVATIVE_RASTER_STATE, mmec597_conservative_raster_state);
1392 MK_MACRO(NVC0_3D_MACRO_SET_PRIV_REG, mmec597_set_priv_reg);
1393 MK_MACRO(NVC0_3D_MACRO_COMPUTE_COUNTER, mmec597_compute_counter);
1394 MK_MACRO(NVC0_3D_MACRO_COMPUTE_COUNTER_TO_QUERY, mmec597_compute_counter_to_query);
1395 }
1396
1397 BEGIN_NVC0(push, NVC0_3D(RASTERIZE_ENABLE), 1);
1398 PUSH_DATA (push, 1);
1399 BEGIN_NVC0(push, NVC0_3D(RT_SEPARATE_FRAG_DATA), 1);
1400 PUSH_DATA (push, 1);
1401 BEGIN_NVC0(push, NVC0_3D(MACRO_GP_SELECT), 1);
1402 PUSH_DATA (push, 0x40);
1403 BEGIN_NVC0(push, NVC0_3D(LAYER), 1);
1404 PUSH_DATA (push, 0);
1405 BEGIN_NVC0(push, NVC0_3D(MACRO_TEP_SELECT), 1);
1406 PUSH_DATA (push, 0x30);
1407 BEGIN_NVC0(push, NVC0_3D(PATCH_VERTICES), 1);
1408 PUSH_DATA (push, 3);
1409 BEGIN_NVC0(push, NVC0_3D(SP_SELECT(2)), 1);
1410 PUSH_DATA (push, 0x20);
1411 BEGIN_NVC0(push, NVC0_3D(SP_SELECT(0)), 1);
1412 PUSH_DATA (push, 0x00);
1413 screen->save_state.patch_vertices = 3;
1414
1415 BEGIN_NVC0(push, NVC0_3D(POINT_COORD_REPLACE), 1);
1416 PUSH_DATA (push, 0);
1417 BEGIN_NVC0(push, NVC0_3D(POINT_RASTER_RULES), 1);
1418 PUSH_DATA (push, NVC0_3D_POINT_RASTER_RULES_OGL);
1419
1420 IMMED_NVC0(push, NVC0_3D(EDGEFLAG), 1);
1421
1422 if (nvc0_screen_init_compute(screen))
1423 goto fail;
1424
1425 /* XXX: Compute and 3D are somehow aliased on Fermi. */
1426 for (i = 0; i < 5; ++i) {
1427 unsigned j = 0;
1428 for (j = 0; j < 16; j++)
1429 screen->cb_bindings[i][j].size = -1;
1430
1431 /* TIC and TSC entries for each unit (nve4+ only) */
1432 /* auxiliary constants (6 user clip planes, base instance id) */
1433 nvc0_screen_bind_cb_3d(screen, push, NULL, i, 15, NVC0_CB_AUX_SIZE,
1434 screen->uniform_bo->offset + NVC0_CB_AUX_INFO(i));
1435 if (screen->eng3d->oclass >= NVE4_3D_CLASS) {
1436 unsigned j;
1437 BEGIN_1IC0(push, NVC0_3D(CB_POS), 9);
1438 PUSH_DATA (push, NVC0_CB_AUX_UNK_INFO);
1439 for (j = 0; j < 8; ++j)
1440 PUSH_DATA(push, j);
1441 } else {
1442 BEGIN_NVC0(push, NVC0_3D(TEX_LIMITS(i)), 1);
1443 PUSH_DATA (push, 0x54);
1444 }
1445
1446 /* MS sample coordinate offsets: these do not work with _ALT modes ! */
1447 BEGIN_1IC0(push, NVC0_3D(CB_POS), 1 + 2 * 8);
1448 PUSH_DATA (push, NVC0_CB_AUX_MS_INFO);
1449 PUSH_DATA (push, 0); /* 0 */
1450 PUSH_DATA (push, 0);
1451 PUSH_DATA (push, 1); /* 1 */
1452 PUSH_DATA (push, 0);
1453 PUSH_DATA (push, 0); /* 2 */
1454 PUSH_DATA (push, 1);
1455 PUSH_DATA (push, 1); /* 3 */
1456 PUSH_DATA (push, 1);
1457 PUSH_DATA (push, 2); /* 4 */
1458 PUSH_DATA (push, 0);
1459 PUSH_DATA (push, 3); /* 5 */
1460 PUSH_DATA (push, 0);
1461 PUSH_DATA (push, 2); /* 6 */
1462 PUSH_DATA (push, 1);
1463 PUSH_DATA (push, 3); /* 7 */
1464 PUSH_DATA (push, 1);
1465 }
1466 BEGIN_NVC0(push, NVC0_3D(LINKED_TSC), 1);
1467 PUSH_DATA (push, 0);
1468
1469 /* requires Nvidia provided firmware */
1470 if (screen->eng3d->oclass >= GM200_3D_CLASS) {
1471 unsigned reg = screen->eng3d->oclass >= GV100_3D_CLASS ? 0x419ba4 : 0x419f78;
1472 BEGIN_1IC0(push, NVC0_3D(MACRO_SET_PRIV_REG), 3);
1473 PUSH_DATA (push, reg);
1474 PUSH_DATA (push, 0x00000000);
1475 PUSH_DATA (push, 0x00000008);
1476 }
1477
1478 PUSH_KICK (push);
1479
1480 screen->tic.entries = CALLOC(
1481 NVC0_TIC_MAX_ENTRIES + NVC0_TSC_MAX_ENTRIES + NVE4_IMG_MAX_HANDLES,
1482 sizeof(void *));
1483 screen->tsc.entries = screen->tic.entries + NVC0_TIC_MAX_ENTRIES;
1484 screen->img.entries = (void *)(screen->tsc.entries + NVC0_TSC_MAX_ENTRIES);
1485
1486 if (!nvc0_blitter_create(screen))
1487 goto fail;
1488
1489 return &screen->base;
1490
1491 fail:
1492 screen->base.base.context_create = NULL;
1493 return &screen->base;
1494 }
1495
1496 int
nvc0_screen_tic_alloc(struct nvc0_screen * screen,void * entry)1497 nvc0_screen_tic_alloc(struct nvc0_screen *screen, void *entry)
1498 {
1499 int i = screen->tic.next;
1500
1501 while (screen->tic.lock[i / 32] & (1 << (i % 32)))
1502 i = (i + 1) & (NVC0_TIC_MAX_ENTRIES - 1);
1503
1504 screen->tic.next = (i + 1) & (NVC0_TIC_MAX_ENTRIES - 1);
1505
1506 if (screen->tic.entries[i])
1507 nv50_tic_entry(screen->tic.entries[i])->id = -1;
1508
1509 screen->tic.entries[i] = entry;
1510 return i;
1511 }
1512
1513 int
nvc0_screen_tsc_alloc(struct nvc0_screen * screen,void * entry)1514 nvc0_screen_tsc_alloc(struct nvc0_screen *screen, void *entry)
1515 {
1516 int i = screen->tsc.next;
1517
1518 while (screen->tsc.lock[i / 32] & (1 << (i % 32)))
1519 i = (i + 1) & (NVC0_TSC_MAX_ENTRIES - 1);
1520
1521 screen->tsc.next = (i + 1) & (NVC0_TSC_MAX_ENTRIES - 1);
1522
1523 if (screen->tsc.entries[i])
1524 nv50_tsc_entry(screen->tsc.entries[i])->id = -1;
1525
1526 screen->tsc.entries[i] = entry;
1527 return i;
1528 }
1529