1 #include "pipe/p_defines.h"
2 #include "pipe/p_screen.h"
3 #include "pipe/p_state.h"
4
5 #include "util/u_memory.h"
6 #include "util/u_inlines.h"
7 #include "util/format/u_format.h"
8 #include "util/format/u_format_s3tc.h"
9 #include "util/u_string.h"
10 #include "util/hex.h"
11
12 #include "util/os_mman.h"
13 #include "util/os_time.h"
14
15 #include <stdio.h>
16 #include <errno.h>
17 #include <stdlib.h>
18
19 #include <nouveau_drm.h>
20 #include <xf86drm.h>
21 #include <nvif/class.h>
22 #include <nvif/cl0080.h>
23
24 #include "nouveau_winsys.h"
25 #include "nouveau_screen.h"
26 #include "nouveau_context.h"
27 #include "nouveau_fence.h"
28 #include "nouveau_mm.h"
29 #include "nouveau_buffer.h"
30
31 #include <compiler/glsl_types.h>
32
33 /* XXX this should go away */
34 #include "frontend/drm_driver.h"
35
36 /* Even though GPUs might allow addresses with more bits, some engines do not.
37 * Stick with 40 for compatibility.
38 */
39 #define NV_GENERIC_VM_LIMIT_SHIFT 39
40
41 int nouveau_mesa_debug = 0;
42
43 static const char *
nouveau_screen_get_name(struct pipe_screen * pscreen)44 nouveau_screen_get_name(struct pipe_screen *pscreen)
45 {
46 struct nouveau_screen *screen = nouveau_screen(pscreen);
47 return screen->chipset_name;
48 }
49
50 static const char *
nouveau_screen_get_vendor(struct pipe_screen * pscreen)51 nouveau_screen_get_vendor(struct pipe_screen *pscreen)
52 {
53 return "Mesa";
54 }
55
56 static const char *
nouveau_screen_get_device_vendor(struct pipe_screen * pscreen)57 nouveau_screen_get_device_vendor(struct pipe_screen *pscreen)
58 {
59 return "NVIDIA";
60 }
61
62 static uint64_t
nouveau_screen_get_timestamp(struct pipe_screen * pscreen)63 nouveau_screen_get_timestamp(struct pipe_screen *pscreen)
64 {
65 int64_t cpu_time = os_time_get_nano();
66
67 /* getparam of PTIMER_TIME takes about x10 as long (several usecs) */
68
69 return cpu_time + nouveau_screen(pscreen)->cpu_gpu_time_delta;
70 }
71
72 static struct disk_cache *
nouveau_screen_get_disk_shader_cache(struct pipe_screen * pscreen)73 nouveau_screen_get_disk_shader_cache(struct pipe_screen *pscreen)
74 {
75 return nouveau_screen(pscreen)->disk_shader_cache;
76 }
77
78 static void
nouveau_screen_fence_ref(struct pipe_screen * pscreen,struct pipe_fence_handle ** ptr,struct pipe_fence_handle * pfence)79 nouveau_screen_fence_ref(struct pipe_screen *pscreen,
80 struct pipe_fence_handle **ptr,
81 struct pipe_fence_handle *pfence)
82 {
83 nouveau_fence_ref(nouveau_fence(pfence), (struct nouveau_fence **)ptr);
84 }
85
86 static bool
nouveau_screen_fence_finish(struct pipe_screen * screen,struct pipe_context * ctx,struct pipe_fence_handle * pfence,uint64_t timeout)87 nouveau_screen_fence_finish(struct pipe_screen *screen,
88 struct pipe_context *ctx,
89 struct pipe_fence_handle *pfence,
90 uint64_t timeout)
91 {
92 if (!timeout)
93 return nouveau_fence_signalled(nouveau_fence(pfence));
94
95 return nouveau_fence_wait(nouveau_fence(pfence), NULL);
96 }
97
98
99 struct nouveau_bo *
nouveau_screen_bo_from_handle(struct pipe_screen * pscreen,struct winsys_handle * whandle,unsigned * out_stride)100 nouveau_screen_bo_from_handle(struct pipe_screen *pscreen,
101 struct winsys_handle *whandle,
102 unsigned *out_stride)
103 {
104 struct nouveau_device *dev = nouveau_screen(pscreen)->device;
105 struct nouveau_bo *bo = NULL;
106 int ret;
107
108 if (whandle->offset != 0) {
109 debug_printf("%s: attempt to import unsupported winsys offset %d\n",
110 __func__, whandle->offset);
111 return NULL;
112 }
113
114 if (whandle->type != WINSYS_HANDLE_TYPE_SHARED &&
115 whandle->type != WINSYS_HANDLE_TYPE_FD) {
116 debug_printf("%s: attempt to import unsupported handle type %d\n",
117 __func__, whandle->type);
118 return NULL;
119 }
120
121 if (whandle->type == WINSYS_HANDLE_TYPE_SHARED)
122 ret = nouveau_bo_name_ref(dev, whandle->handle, &bo);
123 else
124 ret = nouveau_bo_prime_handle_ref(dev, whandle->handle, &bo);
125
126 if (ret) {
127 debug_printf("%s: ref name 0x%08x failed with %d\n",
128 __func__, whandle->handle, ret);
129 return NULL;
130 }
131
132 *out_stride = whandle->stride;
133 return bo;
134 }
135
136
137 bool
nouveau_screen_bo_get_handle(struct pipe_screen * pscreen,struct nouveau_bo * bo,unsigned stride,struct winsys_handle * whandle)138 nouveau_screen_bo_get_handle(struct pipe_screen *pscreen,
139 struct nouveau_bo *bo,
140 unsigned stride,
141 struct winsys_handle *whandle)
142 {
143 whandle->stride = stride;
144
145 if (whandle->type == WINSYS_HANDLE_TYPE_SHARED) {
146 return nouveau_bo_name_get(bo, &whandle->handle) == 0;
147 } else if (whandle->type == WINSYS_HANDLE_TYPE_KMS) {
148 int fd;
149 int ret;
150
151 /* The handle is exported in this case, but the global list of
152 * handles is in libdrm and there is no libdrm API to add
153 * handles to the list without additional side effects. The
154 * closest API available also gets a fd for the handle, which
155 * is not necessary in this case. Call it and close the fd.
156 */
157 ret = nouveau_bo_set_prime(bo, &fd);
158 if (ret != 0)
159 return false;
160
161 close(fd);
162
163 whandle->handle = bo->handle;
164 return true;
165 } else if (whandle->type == WINSYS_HANDLE_TYPE_FD) {
166 return nouveau_bo_set_prime(bo, (int *)&whandle->handle) == 0;
167 } else {
168 return false;
169 }
170 }
171
172 static void
nouveau_disk_cache_create(struct nouveau_screen * screen)173 nouveau_disk_cache_create(struct nouveau_screen *screen)
174 {
175 struct mesa_sha1 ctx;
176 unsigned char sha1[20];
177 char cache_id[20 * 2 + 1];
178 uint64_t driver_flags = 0;
179
180 _mesa_sha1_init(&ctx);
181 if (!disk_cache_get_function_identifier(nouveau_disk_cache_create,
182 &ctx))
183 return;
184
185 _mesa_sha1_final(&ctx, sha1);
186 mesa_bytes_to_hex(cache_id, sha1, 20);
187
188 driver_flags |= NOUVEAU_SHADER_CACHE_FLAGS_IR_NIR;
189
190 screen->disk_shader_cache =
191 disk_cache_create(nouveau_screen_get_name(&screen->base),
192 cache_id, driver_flags);
193 }
194
195 static void*
reserve_vma(uintptr_t start,uint64_t reserved_size)196 reserve_vma(uintptr_t start, uint64_t reserved_size)
197 {
198 void *reserved = os_mmap((void*)start, reserved_size, PROT_NONE,
199 MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
200 if (reserved == MAP_FAILED)
201 return NULL;
202 return reserved;
203 }
204
205 static void
nouveau_query_memory_info(struct pipe_screen * pscreen,struct pipe_memory_info * info)206 nouveau_query_memory_info(struct pipe_screen *pscreen,
207 struct pipe_memory_info *info)
208 {
209 const struct nouveau_screen *screen = nouveau_screen(pscreen);
210 struct nouveau_device *dev = screen->device;
211
212 info->total_device_memory = dev->vram_size / 1024;
213 info->total_staging_memory = dev->gart_size / 1024;
214
215 info->avail_device_memory = dev->vram_limit / 1024;
216 info->avail_staging_memory = dev->gart_limit / 1024;
217 }
218
219 static void
nouveau_pushbuf_cb(struct nouveau_pushbuf * push)220 nouveau_pushbuf_cb(struct nouveau_pushbuf *push)
221 {
222 struct nouveau_pushbuf_priv *p = (struct nouveau_pushbuf_priv *)push->user_priv;
223
224 if (p->context)
225 p->context->kick_notify(p->context);
226 else
227 _nouveau_fence_update(p->screen, true);
228
229 NOUVEAU_DRV_STAT(p->screen, pushbuf_count, 1);
230 }
231
232 int
nouveau_pushbuf_create(struct nouveau_screen * screen,struct nouveau_context * context,struct nouveau_client * client,struct nouveau_object * chan,int nr,uint32_t size,bool immediate,struct nouveau_pushbuf ** push)233 nouveau_pushbuf_create(struct nouveau_screen *screen, struct nouveau_context *context,
234 struct nouveau_client *client, struct nouveau_object *chan, int nr,
235 uint32_t size, bool immediate, struct nouveau_pushbuf **push)
236 {
237 int ret;
238 ret = nouveau_pushbuf_new(client, chan, nr, size, immediate, push);
239 if (ret)
240 return ret;
241
242 struct nouveau_pushbuf_priv *p = MALLOC_STRUCT(nouveau_pushbuf_priv);
243 if (!p) {
244 nouveau_pushbuf_del(push);
245 return -ENOMEM;
246 }
247 p->screen = screen;
248 p->context = context;
249 (*push)->kick_notify = nouveau_pushbuf_cb;
250 (*push)->user_priv = p;
251 return 0;
252 }
253
254 void
nouveau_pushbuf_destroy(struct nouveau_pushbuf ** push)255 nouveau_pushbuf_destroy(struct nouveau_pushbuf **push)
256 {
257 if (!*push)
258 return;
259 FREE((*push)->user_priv);
260 nouveau_pushbuf_del(push);
261 }
262
263 static bool
nouveau_check_for_uma(int chipset,struct nouveau_object * obj)264 nouveau_check_for_uma(int chipset, struct nouveau_object *obj)
265 {
266 struct nv_device_info_v0 info = {
267 .version = 0,
268 };
269
270 nouveau_object_mthd(obj, NV_DEVICE_V0_INFO, &info, sizeof(info));
271
272 return (info.platform == NV_DEVICE_INFO_V0_IGP) || (info.platform == NV_DEVICE_INFO_V0_SOC);
273 }
274
275 static int
nouveau_screen_get_fd(struct pipe_screen * pscreen)276 nouveau_screen_get_fd(struct pipe_screen *pscreen)
277 {
278 const struct nouveau_screen *screen = nouveau_screen(pscreen);
279
280 return screen->drm->fd;
281 }
282
283 int
nouveau_screen_init(struct nouveau_screen * screen,struct nouveau_device * dev)284 nouveau_screen_init(struct nouveau_screen *screen, struct nouveau_device *dev)
285 {
286 struct pipe_screen *pscreen = &screen->base;
287 struct nv04_fifo nv04_data = { .vram = 0xbeef0201, .gart = 0xbeef0202 };
288 struct nvc0_fifo nvc0_data = { };
289 uint64_t time;
290 int size, ret;
291 void *data;
292 union nouveau_bo_config mm_config;
293
294 char *nv_dbg = getenv("NOUVEAU_MESA_DEBUG");
295 if (nv_dbg)
296 nouveau_mesa_debug = atoi(nv_dbg);
297
298 screen->force_enable_cl = debug_get_bool_option("NOUVEAU_ENABLE_CL", false);
299 screen->disable_fences = debug_get_bool_option("NOUVEAU_DISABLE_FENCES", false);
300
301 /* These must be set before any failure is possible, as the cleanup
302 * paths assume they're responsible for deleting them.
303 */
304 screen->drm = nouveau_drm(&dev->object);
305 screen->device = dev;
306
307 /*
308 * this is initialized to 1 in nouveau_drm_screen_create after screen
309 * is fully constructed and added to the global screen list.
310 */
311 screen->refcount = -1;
312
313 if (dev->chipset < 0xc0) {
314 data = &nv04_data;
315 size = sizeof(nv04_data);
316 } else {
317 data = &nvc0_data;
318 size = sizeof(nvc0_data);
319 }
320
321 bool enable_svm = debug_get_bool_option("NOUVEAU_SVM", false);
322 screen->has_svm = false;
323 /* we only care about HMM with OpenCL enabled */
324 if (dev->chipset > 0x130 && enable_svm) {
325 /* Before being able to enable SVM we need to carve out some memory for
326 * driver bo allocations. Let's just base the size on the available VRAM.
327 *
328 * 40 bit is the biggest we care about and for 32 bit systems we don't
329 * want to allocate all of the available memory either.
330 *
331 * Also we align the size we want to reserve to the next POT to make use
332 * of hugepages.
333 */
334 const int vram_shift = util_logbase2_ceil64(dev->vram_size);
335 const int limit_bit =
336 MIN2(sizeof(void*) * 8 - 1, NV_GENERIC_VM_LIMIT_SHIFT);
337 screen->svm_cutout_size =
338 BITFIELD64_BIT(MIN2(sizeof(void*) == 4 ? 26 : NV_GENERIC_VM_LIMIT_SHIFT, vram_shift));
339
340 size_t start = screen->svm_cutout_size;
341 do {
342 screen->svm_cutout = reserve_vma(start, screen->svm_cutout_size);
343 if (!screen->svm_cutout) {
344 start += screen->svm_cutout_size;
345 continue;
346 }
347
348 struct drm_nouveau_svm_init svm_args = {
349 .unmanaged_addr = (uintptr_t)screen->svm_cutout,
350 .unmanaged_size = screen->svm_cutout_size,
351 };
352
353 ret = drmCommandWrite(screen->drm->fd, DRM_NOUVEAU_SVM_INIT,
354 &svm_args, sizeof(svm_args));
355 screen->has_svm = !ret;
356 if (!screen->has_svm)
357 os_munmap(screen->svm_cutout, screen->svm_cutout_size);
358 break;
359 } while ((start + screen->svm_cutout_size) < BITFIELD64_MASK(limit_bit));
360 }
361
362 switch (dev->chipset) {
363 case 0x0ea: /* TK1, GK20A */
364 case 0x12b: /* TX1, GM20B */
365 case 0x13b: /* TX2, GP10B */
366 screen->tegra_sector_layout = true;
367 break;
368 default:
369 /* Xavier's GPU and everything else */
370 screen->tegra_sector_layout = false;
371 break;
372 }
373
374 /*
375 * Set default VRAM domain if not overridden
376 */
377 if (!screen->vram_domain) {
378 if (dev->vram_size > 0)
379 screen->vram_domain = NOUVEAU_BO_VRAM;
380 else
381 screen->vram_domain = NOUVEAU_BO_GART;
382 }
383
384 ret = nouveau_object_new(&dev->object, 0, NOUVEAU_FIFO_CHANNEL_CLASS,
385 data, size, &screen->channel);
386 if (ret)
387 goto err;
388
389 ret = nouveau_client_new(screen->device, &screen->client);
390 if (ret)
391 goto err;
392 ret = nouveau_pushbuf_create(screen, NULL, screen->client, screen->channel,
393 4, 512 * 1024, 1,
394 &screen->pushbuf);
395 if (ret)
396 goto err;
397
398 /* getting CPU time first appears to be more accurate */
399 screen->cpu_gpu_time_delta = os_time_get();
400
401 ret = nouveau_getparam(dev, NOUVEAU_GETPARAM_PTIMER_TIME, &time);
402 if (!ret)
403 screen->cpu_gpu_time_delta = time - screen->cpu_gpu_time_delta * 1000;
404
405 snprintf(screen->chipset_name, sizeof(screen->chipset_name), "NV%02X", dev->chipset);
406 pscreen->get_name = nouveau_screen_get_name;
407 pscreen->get_screen_fd = nouveau_screen_get_fd;
408 pscreen->get_vendor = nouveau_screen_get_vendor;
409 pscreen->get_device_vendor = nouveau_screen_get_device_vendor;
410 pscreen->get_disk_shader_cache = nouveau_screen_get_disk_shader_cache;
411
412 pscreen->get_timestamp = nouveau_screen_get_timestamp;
413
414 pscreen->fence_reference = nouveau_screen_fence_ref;
415 pscreen->fence_finish = nouveau_screen_fence_finish;
416
417 pscreen->query_memory_info = nouveau_query_memory_info;
418
419 nouveau_disk_cache_create(screen);
420
421 screen->transfer_pushbuf_threshold = 192;
422 screen->lowmem_bindings = PIPE_BIND_GLOBAL; /* gallium limit */
423 screen->vidmem_bindings =
424 PIPE_BIND_RENDER_TARGET | PIPE_BIND_DEPTH_STENCIL |
425 PIPE_BIND_DISPLAY_TARGET | PIPE_BIND_SCANOUT |
426 PIPE_BIND_CURSOR |
427 PIPE_BIND_SAMPLER_VIEW |
428 PIPE_BIND_SHADER_BUFFER | PIPE_BIND_SHADER_IMAGE |
429 PIPE_BIND_COMPUTE_RESOURCE |
430 PIPE_BIND_GLOBAL;
431 screen->sysmem_bindings =
432 PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_STREAM_OUTPUT |
433 PIPE_BIND_COMMAND_ARGS_BUFFER;
434
435 screen->is_uma = nouveau_check_for_uma(dev->chipset, &dev->object);
436
437 memset(&mm_config, 0, sizeof(mm_config));
438 nouveau_fence_list_init(&screen->fence);
439
440 screen->mm_GART = nouveau_mm_create(dev,
441 NOUVEAU_BO_GART | NOUVEAU_BO_MAP,
442 &mm_config);
443 screen->mm_VRAM = nouveau_mm_create(dev, NOUVEAU_BO_VRAM, &mm_config);
444
445 glsl_type_singleton_init_or_ref();
446
447 return 0;
448
449 err:
450 if (screen->svm_cutout)
451 os_munmap(screen->svm_cutout, screen->svm_cutout_size);
452 return ret;
453 }
454
455 void
nouveau_screen_fini(struct nouveau_screen * screen)456 nouveau_screen_fini(struct nouveau_screen *screen)
457 {
458 int fd = screen->drm->fd;
459
460 glsl_type_singleton_decref();
461 if (screen->has_svm)
462 os_munmap(screen->svm_cutout, screen->svm_cutout_size);
463
464 nouveau_mm_destroy(screen->mm_GART);
465 nouveau_mm_destroy(screen->mm_VRAM);
466
467 nouveau_pushbuf_destroy(&screen->pushbuf);
468
469 nouveau_client_del(&screen->client);
470 nouveau_object_del(&screen->channel);
471
472 nouveau_device_del(&screen->device);
473 nouveau_drm_del(&screen->drm);
474 close(fd);
475
476 disk_cache_destroy(screen->disk_shader_cache);
477 nouveau_fence_list_destroy(&screen->fence);
478 }
479
480 static void
nouveau_set_debug_callback(struct pipe_context * pipe,const struct util_debug_callback * cb)481 nouveau_set_debug_callback(struct pipe_context *pipe,
482 const struct util_debug_callback *cb)
483 {
484 struct nouveau_context *context = nouveau_context(pipe);
485
486 if (cb)
487 context->debug = *cb;
488 else
489 memset(&context->debug, 0, sizeof(context->debug));
490 }
491
492 int
nouveau_context_init(struct nouveau_context * context,struct nouveau_screen * screen)493 nouveau_context_init(struct nouveau_context *context, struct nouveau_screen *screen)
494 {
495 int ret;
496
497 context->pipe.set_debug_callback = nouveau_set_debug_callback;
498 context->screen = screen;
499
500 ret = nouveau_client_new(screen->device, &context->client);
501 if (ret)
502 return ret;
503
504 ret = nouveau_pushbuf_create(screen, context, context->client, screen->channel,
505 4, 512 * 1024, 1,
506 &context->pushbuf);
507 if (ret)
508 return ret;
509
510 return 0;
511 }
512