1 /*
2 * Copyright © 2017 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23 /**
24 * @file iris_resource.c
25 *
26 * Resources are images, buffers, and other objects used by the GPU.
27 *
28 * XXX: explain resources
29 */
30
31 #include <stdio.h>
32 #include <errno.h>
33 #include "pipe/p_defines.h"
34 #include "pipe/p_state.h"
35 #include "pipe/p_context.h"
36 #include "pipe/p_screen.h"
37 #include "util/detect_os.h"
38 #include "util/os_memory.h"
39 #include "util/u_cpu_detect.h"
40 #include "util/u_inlines.h"
41 #include "util/format/u_format.h"
42 #include "util/u_memory.h"
43 #include "util/u_resource.h"
44 #include "util/u_threaded_context.h"
45 #include "util/u_transfer.h"
46 #include "util/u_transfer_helper.h"
47 #include "util/u_upload_mgr.h"
48 #include "util/ralloc.h"
49 #include "i915/iris_bufmgr.h"
50 #include "iris_batch.h"
51 #include "iris_context.h"
52 #include "iris_resource.h"
53 #include "iris_screen.h"
54 #include "intel/common/intel_aux_map.h"
55 #include "intel/dev/intel_debug.h"
56 #include "isl/isl.h"
57 #include "drm-uapi/drm_fourcc.h"
58
59 enum modifier_priority {
60 MODIFIER_PRIORITY_INVALID = 0,
61 MODIFIER_PRIORITY_LINEAR,
62 MODIFIER_PRIORITY_X,
63 MODIFIER_PRIORITY_Y,
64 MODIFIER_PRIORITY_Y_CCS,
65 MODIFIER_PRIORITY_Y_GFX12_RC_CCS,
66 MODIFIER_PRIORITY_Y_GFX12_RC_CCS_CC,
67 MODIFIER_PRIORITY_4,
68 MODIFIER_PRIORITY_4_DG2_RC_CCS,
69 MODIFIER_PRIORITY_4_DG2_RC_CCS_CC,
70 MODIFIER_PRIORITY_4_MTL_RC_CCS,
71 MODIFIER_PRIORITY_4_MTL_RC_CCS_CC,
72 };
73
74 static const uint64_t priority_to_modifier[] = {
75 [MODIFIER_PRIORITY_INVALID] = DRM_FORMAT_MOD_INVALID,
76 [MODIFIER_PRIORITY_LINEAR] = DRM_FORMAT_MOD_LINEAR,
77 [MODIFIER_PRIORITY_X] = I915_FORMAT_MOD_X_TILED,
78 [MODIFIER_PRIORITY_Y] = I915_FORMAT_MOD_Y_TILED,
79 [MODIFIER_PRIORITY_Y_CCS] = I915_FORMAT_MOD_Y_TILED_CCS,
80 [MODIFIER_PRIORITY_Y_GFX12_RC_CCS] = I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS,
81 [MODIFIER_PRIORITY_Y_GFX12_RC_CCS_CC] = I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC,
82 [MODIFIER_PRIORITY_4] = I915_FORMAT_MOD_4_TILED,
83 [MODIFIER_PRIORITY_4_DG2_RC_CCS] = I915_FORMAT_MOD_4_TILED_DG2_RC_CCS,
84 [MODIFIER_PRIORITY_4_DG2_RC_CCS_CC] = I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC,
85 [MODIFIER_PRIORITY_4_MTL_RC_CCS] = I915_FORMAT_MOD_4_TILED_MTL_RC_CCS,
86 [MODIFIER_PRIORITY_4_MTL_RC_CCS_CC] = I915_FORMAT_MOD_4_TILED_MTL_RC_CCS_CC,
87 };
88
89 static bool
modifier_is_supported(const struct intel_device_info * devinfo,enum pipe_format pfmt,unsigned bind,uint64_t modifier)90 modifier_is_supported(const struct intel_device_info *devinfo,
91 enum pipe_format pfmt, unsigned bind,
92 uint64_t modifier)
93 {
94 /* Check for basic device support. */
95 switch (modifier) {
96 case DRM_FORMAT_MOD_LINEAR:
97 case I915_FORMAT_MOD_X_TILED:
98 break;
99 case I915_FORMAT_MOD_Y_TILED:
100 if (devinfo->ver <= 8 && (bind & PIPE_BIND_SCANOUT))
101 return false;
102 if (devinfo->verx10 >= 125)
103 return false;
104 break;
105 case I915_FORMAT_MOD_Y_TILED_CCS:
106 if (devinfo->ver <= 8 || devinfo->ver >= 12)
107 return false;
108 break;
109 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
110 case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
111 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
112 if (devinfo->verx10 != 120)
113 return false;
114 break;
115 case I915_FORMAT_MOD_4_TILED:
116 if (devinfo->verx10 < 125)
117 return false;
118 break;
119 case I915_FORMAT_MOD_4_TILED_DG2_RC_CCS:
120 case I915_FORMAT_MOD_4_TILED_DG2_MC_CCS:
121 case I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC:
122 if (!intel_device_info_is_dg2(devinfo))
123 return false;
124 break;
125 case I915_FORMAT_MOD_4_TILED_MTL_RC_CCS:
126 case I915_FORMAT_MOD_4_TILED_MTL_MC_CCS:
127 case I915_FORMAT_MOD_4_TILED_MTL_RC_CCS_CC:
128 if (!intel_device_info_is_mtl_or_arl(devinfo))
129 return false;
130 break;
131 case DRM_FORMAT_MOD_INVALID:
132 default:
133 return false;
134 }
135
136 bool no_ccs = INTEL_DEBUG(DEBUG_NO_CCS) || (bind & PIPE_BIND_CONST_BW);
137
138 /* Check remaining requirements. */
139 switch (modifier) {
140 case I915_FORMAT_MOD_4_TILED_MTL_MC_CCS:
141 case I915_FORMAT_MOD_4_TILED_DG2_MC_CCS:
142 case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
143 if (no_ccs)
144 return false;
145
146 if (pfmt != PIPE_FORMAT_BGRA8888_UNORM &&
147 pfmt != PIPE_FORMAT_RGBA8888_UNORM &&
148 pfmt != PIPE_FORMAT_BGRX8888_UNORM &&
149 pfmt != PIPE_FORMAT_RGBX8888_UNORM &&
150 pfmt != PIPE_FORMAT_NV12 &&
151 pfmt != PIPE_FORMAT_P010 &&
152 pfmt != PIPE_FORMAT_P012 &&
153 pfmt != PIPE_FORMAT_P016 &&
154 pfmt != PIPE_FORMAT_YUYV &&
155 pfmt != PIPE_FORMAT_UYVY) {
156 return false;
157 }
158 break;
159 case I915_FORMAT_MOD_4_TILED_MTL_RC_CCS:
160 case I915_FORMAT_MOD_4_TILED_MTL_RC_CCS_CC:
161 case I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC:
162 case I915_FORMAT_MOD_4_TILED_DG2_RC_CCS:
163 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
164 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
165 case I915_FORMAT_MOD_Y_TILED_CCS: {
166 if (no_ccs)
167 return false;
168
169 enum isl_format rt_format =
170 iris_format_for_usage(devinfo, pfmt,
171 ISL_SURF_USAGE_RENDER_TARGET_BIT).fmt;
172
173 if (rt_format == ISL_FORMAT_UNSUPPORTED ||
174 !isl_format_supports_ccs_e(devinfo, rt_format))
175 return false;
176 break;
177 }
178 default:
179 break;
180 }
181
182 return true;
183 }
184
185 static uint64_t
select_best_modifier(const struct intel_device_info * devinfo,const struct pipe_resource * templ,const uint64_t * modifiers,int count)186 select_best_modifier(const struct intel_device_info *devinfo,
187 const struct pipe_resource *templ,
188 const uint64_t *modifiers,
189 int count)
190 {
191 enum modifier_priority prio = MODIFIER_PRIORITY_INVALID;
192
193 for (int i = 0; i < count; i++) {
194 if (!modifier_is_supported(devinfo, templ->format, templ->bind,
195 modifiers[i]))
196 continue;
197
198 switch (modifiers[i]) {
199 case I915_FORMAT_MOD_4_TILED_MTL_RC_CCS_CC:
200 prio = MAX2(prio, MODIFIER_PRIORITY_4_MTL_RC_CCS_CC);
201 break;
202 case I915_FORMAT_MOD_4_TILED_MTL_RC_CCS:
203 prio = MAX2(prio, MODIFIER_PRIORITY_4_MTL_RC_CCS);
204 break;
205 case I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC:
206 prio = MAX2(prio, MODIFIER_PRIORITY_4_DG2_RC_CCS_CC);
207 break;
208 case I915_FORMAT_MOD_4_TILED_DG2_RC_CCS:
209 prio = MAX2(prio, MODIFIER_PRIORITY_4_DG2_RC_CCS);
210 break;
211 case I915_FORMAT_MOD_4_TILED:
212 prio = MAX2(prio, MODIFIER_PRIORITY_4);
213 break;
214 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
215 prio = MAX2(prio, MODIFIER_PRIORITY_Y_GFX12_RC_CCS_CC);
216 break;
217 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
218 prio = MAX2(prio, MODIFIER_PRIORITY_Y_GFX12_RC_CCS);
219 break;
220 case I915_FORMAT_MOD_Y_TILED_CCS:
221 prio = MAX2(prio, MODIFIER_PRIORITY_Y_CCS);
222 break;
223 case I915_FORMAT_MOD_Y_TILED:
224 prio = MAX2(prio, MODIFIER_PRIORITY_Y);
225 break;
226 case I915_FORMAT_MOD_X_TILED:
227 prio = MAX2(prio, MODIFIER_PRIORITY_X);
228 break;
229 case DRM_FORMAT_MOD_LINEAR:
230 prio = MAX2(prio, MODIFIER_PRIORITY_LINEAR);
231 break;
232 case DRM_FORMAT_MOD_INVALID:
233 default:
234 break;
235 }
236 }
237
238 return priority_to_modifier[prio];
239 }
240
is_modifier_external_only(enum pipe_format pfmt,uint64_t modifier)241 static inline bool is_modifier_external_only(enum pipe_format pfmt,
242 uint64_t modifier)
243 {
244 /* Only allow external usage for the following cases: YUV formats
245 * and the media-compression modifier. The render engine lacks
246 * support for rendering to a media-compressed surface if the
247 * compression ratio is large enough. By requiring external usage
248 * of media-compressed surfaces, resolves are avoided.
249 */
250 return util_format_is_yuv(pfmt) ||
251 isl_drm_modifier_get_info(modifier)->supports_media_compression;
252 }
253
254 static void
iris_query_dmabuf_modifiers(struct pipe_screen * pscreen,enum pipe_format pfmt,int max,uint64_t * modifiers,unsigned int * external_only,int * count)255 iris_query_dmabuf_modifiers(struct pipe_screen *pscreen,
256 enum pipe_format pfmt,
257 int max,
258 uint64_t *modifiers,
259 unsigned int *external_only,
260 int *count)
261 {
262 struct iris_screen *screen = (void *) pscreen;
263 const struct intel_device_info *devinfo = screen->devinfo;
264
265 uint64_t all_modifiers[] = {
266 DRM_FORMAT_MOD_LINEAR,
267 I915_FORMAT_MOD_X_TILED,
268 I915_FORMAT_MOD_4_TILED,
269 I915_FORMAT_MOD_4_TILED_DG2_RC_CCS,
270 I915_FORMAT_MOD_4_TILED_DG2_MC_CCS,
271 I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC,
272 I915_FORMAT_MOD_4_TILED_MTL_RC_CCS,
273 I915_FORMAT_MOD_4_TILED_MTL_RC_CCS_CC,
274 I915_FORMAT_MOD_4_TILED_MTL_MC_CCS,
275 I915_FORMAT_MOD_Y_TILED,
276 I915_FORMAT_MOD_Y_TILED_CCS,
277 I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS,
278 I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS,
279 I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC,
280 };
281
282 int supported_mods = 0;
283
284 for (int i = 0; i < ARRAY_SIZE(all_modifiers); i++) {
285 if (!modifier_is_supported(devinfo, pfmt, 0, all_modifiers[i]))
286 continue;
287
288 if (supported_mods < max) {
289 if (modifiers)
290 modifiers[supported_mods] = all_modifiers[i];
291
292 if (external_only) {
293 external_only[supported_mods] =
294 is_modifier_external_only(pfmt, all_modifiers[i]);
295 }
296 }
297
298 supported_mods++;
299 }
300
301 *count = supported_mods;
302 }
303
304 static bool
iris_is_dmabuf_modifier_supported(struct pipe_screen * pscreen,uint64_t modifier,enum pipe_format pfmt,bool * external_only)305 iris_is_dmabuf_modifier_supported(struct pipe_screen *pscreen,
306 uint64_t modifier, enum pipe_format pfmt,
307 bool *external_only)
308 {
309 struct iris_screen *screen = (void *) pscreen;
310 const struct intel_device_info *devinfo = screen->devinfo;
311
312 if (modifier_is_supported(devinfo, pfmt, 0, modifier)) {
313 if (external_only)
314 *external_only = is_modifier_external_only(pfmt, modifier);
315
316 return true;
317 }
318
319 return false;
320 }
321
322 static unsigned int
iris_get_dmabuf_modifier_planes(struct pipe_screen * pscreen,uint64_t modifier,enum pipe_format format)323 iris_get_dmabuf_modifier_planes(struct pipe_screen *pscreen, uint64_t modifier,
324 enum pipe_format format)
325 {
326 unsigned int planes = util_format_get_num_planes(format);
327
328 switch (modifier) {
329 case I915_FORMAT_MOD_4_TILED_MTL_RC_CCS_CC:
330 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
331 return 3;
332 case I915_FORMAT_MOD_4_TILED_MTL_RC_CCS:
333 case I915_FORMAT_MOD_4_TILED_MTL_MC_CCS:
334 case I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC:
335 case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
336 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
337 case I915_FORMAT_MOD_Y_TILED_CCS:
338 return 2 * planes;
339 case I915_FORMAT_MOD_4_TILED_DG2_RC_CCS:
340 case I915_FORMAT_MOD_4_TILED_DG2_MC_CCS:
341 default:
342 return planes;
343 }
344 }
345
346 enum isl_format
iris_image_view_get_format(struct iris_context * ice,const struct pipe_image_view * img)347 iris_image_view_get_format(struct iris_context *ice,
348 const struct pipe_image_view *img)
349 {
350 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
351 const struct intel_device_info *devinfo = screen->devinfo;
352
353 isl_surf_usage_flags_t usage = ISL_SURF_USAGE_STORAGE_BIT;
354 enum isl_format isl_fmt =
355 iris_format_for_usage(devinfo, img->format, usage).fmt;
356
357 if (img->shader_access & PIPE_IMAGE_ACCESS_READ) {
358 /* On Gfx8, try to use typed surfaces reads (which support a
359 * limited number of formats), and if not possible, fall back
360 * to untyped reads.
361 */
362 if (devinfo->ver == 8 &&
363 !isl_has_matching_typed_storage_image_format(devinfo, isl_fmt))
364 return ISL_FORMAT_RAW;
365 else
366 return isl_lower_storage_image_format(devinfo, isl_fmt);
367 }
368
369 return isl_fmt;
370 }
371
372 static struct pipe_memory_object *
iris_memobj_create_from_handle(struct pipe_screen * pscreen,struct winsys_handle * whandle,bool dedicated)373 iris_memobj_create_from_handle(struct pipe_screen *pscreen,
374 struct winsys_handle *whandle,
375 bool dedicated)
376 {
377 struct iris_screen *screen = (struct iris_screen *)pscreen;
378 struct iris_memory_object *memobj = CALLOC_STRUCT(iris_memory_object);
379 if (!memobj)
380 return NULL;
381
382 assert(whandle->type == WINSYS_HANDLE_TYPE_FD);
383 assert(whandle->modifier == DRM_FORMAT_MOD_INVALID);
384 struct iris_bo *bo = iris_bo_import_dmabuf(screen->bufmgr, whandle->handle,
385 DRM_FORMAT_MOD_INVALID);
386 if (!bo) {
387 free(memobj);
388 return NULL;
389 }
390
391 memobj->b.dedicated = dedicated;
392 memobj->bo = bo;
393 memobj->format = whandle->format;
394 memobj->stride = whandle->stride;
395
396 return &memobj->b;
397 }
398
399 static void
iris_memobj_destroy(struct pipe_screen * pscreen,struct pipe_memory_object * pmemobj)400 iris_memobj_destroy(struct pipe_screen *pscreen,
401 struct pipe_memory_object *pmemobj)
402 {
403 struct iris_memory_object *memobj = (struct iris_memory_object *)pmemobj;
404
405 iris_bo_unreference(memobj->bo);
406 free(memobj);
407 }
408
409 struct pipe_resource *
iris_resource_get_separate_stencil(struct pipe_resource * p_res)410 iris_resource_get_separate_stencil(struct pipe_resource *p_res)
411 {
412 /* For packed depth-stencil, we treat depth as the primary resource
413 * and store S8 as the "second plane" resource.
414 */
415 if (p_res->next && p_res->next->format == PIPE_FORMAT_S8_UINT)
416 return p_res->next;
417
418 return NULL;
419
420 }
421
422 static void
iris_resource_set_separate_stencil(struct pipe_resource * p_res,struct pipe_resource * stencil)423 iris_resource_set_separate_stencil(struct pipe_resource *p_res,
424 struct pipe_resource *stencil)
425 {
426 assert(util_format_has_depth(util_format_description(p_res->format)));
427 pipe_resource_reference(&p_res->next, stencil);
428 }
429
430 void
iris_get_depth_stencil_resources(struct pipe_resource * res,struct iris_resource ** out_z,struct iris_resource ** out_s)431 iris_get_depth_stencil_resources(struct pipe_resource *res,
432 struct iris_resource **out_z,
433 struct iris_resource **out_s)
434 {
435 if (!res) {
436 *out_z = NULL;
437 *out_s = NULL;
438 return;
439 }
440
441 if (res->format != PIPE_FORMAT_S8_UINT) {
442 *out_z = (void *) res;
443 *out_s = (void *) iris_resource_get_separate_stencil(res);
444 } else {
445 *out_z = NULL;
446 *out_s = (void *) res;
447 }
448 }
449
450 void
iris_resource_disable_aux(struct iris_resource * res)451 iris_resource_disable_aux(struct iris_resource *res)
452 {
453 iris_bo_unreference(res->aux.bo);
454 iris_bo_unreference(res->aux.clear_color_bo);
455 free(res->aux.state);
456
457 res->aux.usage = ISL_AUX_USAGE_NONE;
458 res->aux.surf.size_B = 0;
459 res->aux.bo = NULL;
460 res->aux.clear_color_bo = NULL;
461 res->aux.state = NULL;
462 }
463
464 static unsigned
iris_resource_alloc_flags(const struct iris_screen * screen,const struct pipe_resource * templ,struct iris_resource * res)465 iris_resource_alloc_flags(const struct iris_screen *screen,
466 const struct pipe_resource *templ,
467 struct iris_resource *res)
468 {
469 if (templ->flags & IRIS_RESOURCE_FLAG_DEVICE_MEM)
470 return BO_ALLOC_PLAIN;
471
472 unsigned flags = BO_ALLOC_PLAIN;
473
474 switch (templ->usage) {
475 case PIPE_USAGE_STAGING:
476 flags |= BO_ALLOC_SMEM | BO_ALLOC_CACHED_COHERENT;
477 break;
478 case PIPE_USAGE_STREAM:
479 flags |= BO_ALLOC_SMEM;
480 break;
481 case PIPE_USAGE_DYNAMIC:
482 case PIPE_USAGE_DEFAULT:
483 case PIPE_USAGE_IMMUTABLE:
484 /* Use LMEM for these if possible */
485 break;
486 }
487
488 if (templ->bind & PIPE_BIND_SCANOUT)
489 flags |= BO_ALLOC_SCANOUT;
490
491 if (templ->flags & (PIPE_RESOURCE_FLAG_MAP_COHERENT |
492 PIPE_RESOURCE_FLAG_MAP_PERSISTENT))
493 flags |= BO_ALLOC_SMEM | BO_ALLOC_CACHED_COHERENT;
494
495 if (screen->devinfo->verx10 >= 125 && screen->devinfo->has_local_mem &&
496 isl_aux_usage_has_ccs(res->aux.usage)) {
497 assert((flags & BO_ALLOC_SMEM) == 0);
498 flags |= BO_ALLOC_LMEM;
499 /* For displayable surfaces with clear color,
500 * the KMD will need to access the clear color via CPU.
501 */
502 if (res->mod_info && res->mod_info->supports_clear_color)
503 flags |= BO_ALLOC_CPU_VISIBLE;
504 }
505
506 if ((templ->bind & PIPE_BIND_SHARED) ||
507 util_format_get_num_planes(templ->format) > 1)
508 flags |= BO_ALLOC_NO_SUBALLOC;
509
510 if (templ->bind & PIPE_BIND_PROTECTED)
511 flags |= BO_ALLOC_PROTECTED;
512
513 if (templ->bind & PIPE_BIND_SHARED) {
514 flags |= BO_ALLOC_SHARED;
515
516 /* We request that the bufmgr zero because, if a buffer gets re-used
517 * from the pool, we don't want to leak random garbage from our process
518 * to some other.
519 */
520 flags |= BO_ALLOC_ZEROED;
521 }
522
523 return flags;
524 }
525
526 static void
iris_resource_destroy(struct pipe_screen * screen,struct pipe_resource * p_res)527 iris_resource_destroy(struct pipe_screen *screen,
528 struct pipe_resource *p_res)
529 {
530 struct iris_resource *res = (struct iris_resource *) p_res;
531
532 if (p_res->target == PIPE_BUFFER)
533 util_range_destroy(&res->valid_buffer_range);
534
535 iris_resource_disable_aux(res);
536
537 threaded_resource_deinit(p_res);
538 iris_bo_unreference(res->bo);
539 iris_pscreen_unref(res->orig_screen);
540
541 free(res);
542 }
543
544 static struct iris_resource *
iris_alloc_resource(struct pipe_screen * pscreen,const struct pipe_resource * templ)545 iris_alloc_resource(struct pipe_screen *pscreen,
546 const struct pipe_resource *templ)
547 {
548 struct iris_resource *res = CALLOC_STRUCT(iris_resource);
549 if (!res)
550 return NULL;
551
552 res->base.b = *templ;
553 res->base.b.screen = pscreen;
554 res->orig_screen = iris_pscreen_ref(pscreen);
555 pipe_reference_init(&res->base.b.reference, 1);
556 threaded_resource_init(&res->base.b, false);
557
558 if (templ->target == PIPE_BUFFER)
559 util_range_init(&res->valid_buffer_range);
560
561 return res;
562 }
563
564 unsigned
iris_get_num_logical_layers(const struct iris_resource * res,unsigned level)565 iris_get_num_logical_layers(const struct iris_resource *res, unsigned level)
566 {
567 if (res->surf.dim == ISL_SURF_DIM_3D)
568 return u_minify(res->surf.logical_level0_px.depth, level);
569 else
570 return res->surf.logical_level0_px.array_len;
571 }
572
573 static enum isl_aux_state **
create_aux_state_map(struct iris_resource * res,enum isl_aux_state initial)574 create_aux_state_map(struct iris_resource *res, enum isl_aux_state initial)
575 {
576 assert(res->aux.state == NULL);
577
578 uint32_t total_slices = 0;
579 for (uint32_t level = 0; level < res->surf.levels; level++)
580 total_slices += iris_get_num_logical_layers(res, level);
581
582 const size_t per_level_array_size =
583 res->surf.levels * sizeof(enum isl_aux_state *);
584
585 /* We're going to allocate a single chunk of data for both the per-level
586 * reference array and the arrays of aux_state. This makes cleanup
587 * significantly easier.
588 */
589 const size_t total_size =
590 per_level_array_size + total_slices * sizeof(enum isl_aux_state);
591
592 void *data = malloc(total_size);
593 if (!data)
594 return NULL;
595
596 enum isl_aux_state **per_level_arr = data;
597 enum isl_aux_state *s = data + per_level_array_size;
598 for (uint32_t level = 0; level < res->surf.levels; level++) {
599 per_level_arr[level] = s;
600 const unsigned level_layers = iris_get_num_logical_layers(res, level);
601 for (uint32_t a = 0; a < level_layers; a++)
602 *(s++) = initial;
603 }
604 assert((void *)s == data + total_size);
605
606 return per_level_arr;
607 }
608
609 static unsigned
iris_get_aux_clear_color_state_size(struct iris_screen * screen,struct iris_resource * res)610 iris_get_aux_clear_color_state_size(struct iris_screen *screen,
611 struct iris_resource *res)
612 {
613 if (!isl_aux_usage_has_fast_clears(res->aux.usage))
614 return 0;
615
616 assert(!isl_surf_usage_is_stencil(res->surf.usage));
617
618 /* Depth packets can't specify indirect clear values. The only time depth
619 * buffers can use indirect clear values is when they're accessed by the
620 * sampler via render surface state objects.
621 */
622 if (isl_surf_usage_is_depth(res->surf.usage) &&
623 !iris_sample_with_depth_aux(screen->devinfo, res))
624 return 0;
625
626 return screen->isl_dev.ss.clear_color_state_size;
627 }
628
629 static void
map_aux_addresses(struct iris_screen * screen,struct iris_resource * res,enum pipe_format pfmt,unsigned plane)630 map_aux_addresses(struct iris_screen *screen, struct iris_resource *res,
631 enum pipe_format pfmt, unsigned plane)
632 {
633 void *aux_map_ctx = iris_bufmgr_get_aux_map_context(screen->bufmgr);
634 if (!aux_map_ctx)
635 return;
636
637 if (isl_aux_usage_has_ccs(res->aux.usage)) {
638 const enum isl_format format =
639 iris_format_for_usage(screen->devinfo, pfmt, res->surf.usage).fmt;
640 const uint64_t format_bits =
641 intel_aux_map_format_bits(res->surf.tiling, format, plane);
642 const bool mapped =
643 intel_aux_map_add_mapping(aux_map_ctx,
644 res->bo->address + res->offset,
645 res->aux.bo->address +
646 res->aux.comp_ctrl_surf_offset,
647 res->surf.size_B, format_bits);
648 assert(mapped);
649 res->bo->aux_map_address = res->aux.bo->address;
650 }
651 }
652
653 static bool
want_ccs_e_for_format(const struct intel_device_info * devinfo,enum isl_format format)654 want_ccs_e_for_format(const struct intel_device_info *devinfo,
655 enum isl_format format)
656 {
657 if (!isl_format_supports_ccs_e(devinfo, format))
658 return false;
659
660 const struct isl_format_layout *fmtl = isl_format_get_layout(format);
661
662 /* Prior to TGL, CCS_E seems to significantly hurt performance with 32-bit
663 * floating point formats. For example, Paraview's "Wavelet Volume" case
664 * uses both R32_FLOAT and R32G32B32A32_FLOAT, and enabling CCS_E for those
665 * formats causes a 62% FPS drop.
666 *
667 * However, many benchmarks seem to use 16-bit float with no issues.
668 */
669 if (devinfo->ver <= 11 &&
670 fmtl->channels.r.bits == 32 && fmtl->channels.r.type == ISL_SFLOAT)
671 return false;
672
673 return true;
674 }
675
676 static bool
want_hiz_wt_for_res(const struct intel_device_info * devinfo,const struct iris_resource * res)677 want_hiz_wt_for_res(const struct intel_device_info *devinfo,
678 const struct iris_resource *res)
679 {
680 /* Gen12 only supports single-sampled while Gen20+ supports
681 * multi-sampled images.
682 */
683 if (devinfo->ver < 20 && res->surf.samples > 1)
684 return false;
685
686 if (!(res->surf.usage & ISL_SURF_USAGE_TEXTURE_BIT))
687 return false;
688
689 /* If this resource has the maximum number of samples supported by
690 * running platform and will be used as a texture, put the HiZ surface
691 * in write-through mode so that we can sample from it.
692 */
693 return true;
694 }
695
696 static enum isl_surf_dim
target_to_isl_surf_dim(enum pipe_texture_target target)697 target_to_isl_surf_dim(enum pipe_texture_target target)
698 {
699 switch (target) {
700 case PIPE_BUFFER:
701 case PIPE_TEXTURE_1D:
702 case PIPE_TEXTURE_1D_ARRAY:
703 return ISL_SURF_DIM_1D;
704 case PIPE_TEXTURE_2D:
705 case PIPE_TEXTURE_CUBE:
706 case PIPE_TEXTURE_RECT:
707 case PIPE_TEXTURE_2D_ARRAY:
708 case PIPE_TEXTURE_CUBE_ARRAY:
709 return ISL_SURF_DIM_2D;
710 case PIPE_TEXTURE_3D:
711 return ISL_SURF_DIM_3D;
712 case PIPE_MAX_TEXTURE_TYPES:
713 break;
714 }
715 unreachable("invalid texture type");
716 }
717
718 static bool
iris_resource_configure_main(const struct iris_screen * screen,struct iris_resource * res,const struct pipe_resource * templ,uint64_t modifier,uint32_t row_pitch_B)719 iris_resource_configure_main(const struct iris_screen *screen,
720 struct iris_resource *res,
721 const struct pipe_resource *templ,
722 uint64_t modifier, uint32_t row_pitch_B)
723 {
724 res->mod_info = isl_drm_modifier_get_info(modifier);
725
726 if (modifier != DRM_FORMAT_MOD_INVALID && res->mod_info == NULL)
727 return false;
728
729 isl_tiling_flags_t tiling_flags = 0;
730
731 if (res->mod_info != NULL) {
732 tiling_flags = 1 << res->mod_info->tiling;
733 } else if (templ->usage == PIPE_USAGE_STAGING ||
734 templ->bind & (PIPE_BIND_LINEAR | PIPE_BIND_CURSOR)) {
735 tiling_flags = ISL_TILING_LINEAR_BIT;
736 } else if (res->external_format != PIPE_FORMAT_NONE) {
737 /* This came from iris_resource_from_memobj and didn't have
738 * PIPE_BIND_LINEAR set, so "optimal" tiling is desired. Let isl
739 * select the tiling. The implicit contract is that both drivers
740 * will arrive at the same tiling by using the same code to decide.
741 */
742 assert(modifier == DRM_FORMAT_MOD_INVALID);
743 tiling_flags = ISL_TILING_ANY_MASK;
744 } else if (!screen->devinfo->has_tiling_uapi &&
745 (templ->bind & (PIPE_BIND_SCANOUT | PIPE_BIND_SHARED))) {
746 tiling_flags = ISL_TILING_LINEAR_BIT;
747 } else if (templ->bind & PIPE_BIND_SCANOUT) {
748 tiling_flags = ISL_TILING_X_BIT;
749 } else {
750 tiling_flags = ISL_TILING_ANY_MASK;
751 }
752
753 /* We don't support Yf or Ys tiling yet */
754 tiling_flags &= ~ISL_TILING_STD_Y_MASK;
755 assert(tiling_flags != 0);
756
757 isl_surf_usage_flags_t usage = 0;
758
759 if (res->mod_info && !isl_drm_modifier_has_aux(modifier))
760 usage |= ISL_SURF_USAGE_DISABLE_AUX_BIT;
761
762 else if (!res->mod_info && res->external_format != PIPE_FORMAT_NONE)
763 usage |= ISL_SURF_USAGE_DISABLE_AUX_BIT;
764
765 else if (templ->bind & PIPE_BIND_CONST_BW)
766 usage |= ISL_SURF_USAGE_DISABLE_AUX_BIT;
767
768 if (templ->usage == PIPE_USAGE_STAGING)
769 usage |= ISL_SURF_USAGE_STAGING_BIT;
770
771 if (templ->bind & PIPE_BIND_RENDER_TARGET)
772 usage |= ISL_SURF_USAGE_RENDER_TARGET_BIT;
773
774 if (templ->bind & PIPE_BIND_SAMPLER_VIEW)
775 usage |= ISL_SURF_USAGE_TEXTURE_BIT;
776
777 if (templ->bind & PIPE_BIND_SHADER_IMAGE)
778 usage |= ISL_SURF_USAGE_STORAGE_BIT;
779
780 if (templ->bind & PIPE_BIND_SCANOUT)
781 usage |= ISL_SURF_USAGE_DISPLAY_BIT;
782
783 else if (isl_drm_modifier_needs_display_layout(modifier))
784 usage |= ISL_SURF_USAGE_DISPLAY_BIT;
785
786 if (templ->target == PIPE_TEXTURE_CUBE ||
787 templ->target == PIPE_TEXTURE_CUBE_ARRAY) {
788 usage |= ISL_SURF_USAGE_CUBE_BIT;
789 }
790
791 if (templ->usage != PIPE_USAGE_STAGING &&
792 util_format_is_depth_or_stencil(templ->format)) {
793
794 /* Should be handled by u_transfer_helper */
795 assert(!util_format_is_depth_and_stencil(templ->format));
796
797 usage |= templ->format == PIPE_FORMAT_S8_UINT ?
798 ISL_SURF_USAGE_STENCIL_BIT : ISL_SURF_USAGE_DEPTH_BIT;
799 }
800
801 if ((usage & ISL_SURF_USAGE_TEXTURE_BIT) ||
802 !isl_surf_usage_is_depth_or_stencil(usage)) {
803 /* Notify ISL that iris may access this image from different engines.
804 * The reads and writes performed by the engines are guaranteed to be
805 * sequential with respect to each other. This is due to the
806 * implementation of flush_for_cross_batch_dependencies().
807 */
808 usage |= ISL_SURF_USAGE_MULTI_ENGINE_SEQ_BIT;
809 } else {
810 /* Depth/stencil render buffers are the only surfaces which are not
811 * accessed by compute shaders. Also, iris does not use the blitter on
812 * such surfaces.
813 */
814 assert(!(templ->bind & PIPE_BIND_SHADER_IMAGE));
815 assert(!(templ->bind & PIPE_BIND_PRIME_BLIT_DST));
816 }
817
818 const enum isl_format format =
819 iris_format_for_usage(screen->devinfo, templ->format, usage).fmt;
820
821 const struct isl_surf_init_info init_info = {
822 .dim = target_to_isl_surf_dim(templ->target),
823 .format = format,
824 .width = templ->width0,
825 .height = templ->height0,
826 .depth = templ->depth0,
827 .levels = templ->last_level + 1,
828 .array_len = templ->array_size,
829 .samples = MAX2(templ->nr_samples, 1),
830 .min_alignment_B = 0,
831 .row_pitch_B = row_pitch_B,
832 .usage = usage,
833 .tiling_flags = tiling_flags
834 };
835
836 if (!isl_surf_init_s(&screen->isl_dev, &res->surf, &init_info))
837 return false;
838
839 res->internal_format = templ->format;
840
841 return true;
842 }
843
844 /**
845 * Configure aux for the resource, but don't allocate it. For images which
846 * might be shared with modifiers, we must allocate the image and aux data in
847 * a single bo.
848 *
849 * Returns false on unexpected error (e.g. allocation failed, or invalid
850 * configuration result).
851 */
852 static bool
iris_resource_configure_aux(struct iris_screen * screen,struct iris_resource * res)853 iris_resource_configure_aux(struct iris_screen *screen,
854 struct iris_resource *res)
855 {
856 const struct intel_device_info *devinfo = screen->devinfo;
857
858 const bool has_mcs =
859 isl_surf_get_mcs_surf(&screen->isl_dev, &res->surf, &res->aux.surf);
860
861 const bool has_hiz =
862 isl_surf_get_hiz_surf(&screen->isl_dev, &res->surf, &res->aux.surf);
863
864 const bool has_ccs = devinfo->has_aux_map || devinfo->has_flat_ccs ?
865 isl_surf_supports_ccs(&screen->isl_dev, &res->surf, &res->aux.surf) :
866 isl_surf_get_ccs_surf(&screen->isl_dev, &res->surf, &res->aux.surf, 0);
867
868 if (has_mcs) {
869 assert(!res->mod_info);
870 assert(!has_hiz);
871 /* We are seeing failures with CCS compression on top of MSAA
872 * compression, so just enable MSAA compression for now on DG2.
873 */
874 if (!intel_device_info_is_dg2(devinfo) && has_ccs) {
875 res->aux.usage = ISL_AUX_USAGE_MCS_CCS;
876 } else {
877 res->aux.usage = ISL_AUX_USAGE_MCS;
878 }
879 } else if (has_hiz) {
880 assert(!res->mod_info);
881 assert(!has_mcs);
882 if (!has_ccs) {
883 res->aux.usage = ISL_AUX_USAGE_HIZ;
884 } else if (want_hiz_wt_for_res(devinfo, res)) {
885 res->aux.usage = ISL_AUX_USAGE_HIZ_CCS_WT;
886 } else {
887 res->aux.usage = ISL_AUX_USAGE_HIZ_CCS;
888 }
889 } else if (has_ccs) {
890 if (isl_surf_usage_is_stencil(res->surf.usage)) {
891 assert(!res->mod_info);
892 res->aux.usage = ISL_AUX_USAGE_STC_CCS;
893 } else if (res->mod_info && res->mod_info->supports_media_compression) {
894 res->aux.usage = ISL_AUX_USAGE_MC;
895 } else if (want_ccs_e_for_format(devinfo, res->surf.format)) {
896 res->aux.usage = intel_needs_workaround(devinfo, 1607794140) ?
897 ISL_AUX_USAGE_FCV_CCS_E : ISL_AUX_USAGE_CCS_E;
898 } else {
899 assert(isl_format_supports_ccs_d(devinfo, res->surf.format));
900 res->aux.usage = ISL_AUX_USAGE_CCS_D;
901 }
902 }
903
904 if (res->mod_info &&
905 isl_drm_modifier_has_aux(res->mod_info->modifier) != has_ccs) {
906 return false;
907 }
908
909 return true;
910 }
911
912 /**
913 * Initialize the aux buffer contents.
914 *
915 * Returns false on unexpected error (e.g. mapping a BO failed).
916 */
917 static bool
iris_resource_init_aux_buf(struct iris_screen * screen,struct iris_resource * res)918 iris_resource_init_aux_buf(struct iris_screen *screen,
919 struct iris_resource *res)
920 {
921 const struct intel_device_info *devinfo = screen->devinfo;
922
923 if (isl_aux_usage_has_ccs(res->aux.usage) && devinfo->ver <= 11) {
924 /* Initialize the CCS on BDW-ICL to the PASS_THROUGH state. This avoids
925 * the need to ambiguate in some cases.
926 */
927 void* map = iris_bo_map(NULL, res->bo, MAP_WRITE | MAP_RAW);
928 if (!map)
929 return false;
930
931 memset((char*)map + res->aux.offset, 0, res->aux.surf.size_B);
932 iris_bo_unmap(res->bo);
933
934 res->aux.state = create_aux_state_map(res, ISL_AUX_STATE_PASS_THROUGH);
935 } else {
936 const enum isl_aux_state initial_state =
937 isl_aux_get_initial_state(devinfo, res->aux.usage, res->bo->zeroed);
938 res->aux.state = create_aux_state_map(res, initial_state);
939 }
940 if (!res->aux.state)
941 return false;
942
943 if (res->aux.offset > 0 || res->aux.comp_ctrl_surf_offset > 0) {
944 res->aux.bo = res->bo;
945 iris_bo_reference(res->aux.bo);
946 map_aux_addresses(screen, res, res->internal_format, 0);
947 }
948
949 if (res->aux.clear_color_offset > 0) {
950 res->aux.clear_color_bo = res->bo;
951 iris_bo_reference(res->aux.clear_color_bo);
952 res->aux.clear_color_unknown = !res->aux.clear_color_bo->zeroed;
953 }
954
955 return true;
956 }
957
958 static uint32_t
iris_buffer_alignment(uint64_t size)959 iris_buffer_alignment(uint64_t size)
960 {
961 /* Some buffer operations want some amount of alignment. The largest
962 * buffer texture pixel size is 4 * 4 = 16B. OpenCL data is also supposed
963 * to be aligned and largest OpenCL data type is a double16 which is
964 * 8 * 16 = 128B. Align to the largest power of 2 which fits in the size,
965 * up to 128B.
966 */
967 uint32_t align = MAX2(4 * 4, 8 * 16);
968 while (align > size)
969 align >>= 1;
970
971 return align;
972 }
973
974 static struct pipe_resource *
iris_resource_create_for_buffer(struct pipe_screen * pscreen,const struct pipe_resource * templ)975 iris_resource_create_for_buffer(struct pipe_screen *pscreen,
976 const struct pipe_resource *templ)
977 {
978 struct iris_screen *screen = (struct iris_screen *)pscreen;
979 struct iris_resource *res = iris_alloc_resource(pscreen, templ);
980
981 assert(templ->target == PIPE_BUFFER);
982 assert(templ->height0 <= 1);
983 assert(templ->depth0 <= 1);
984 assert(templ->format == PIPE_FORMAT_NONE ||
985 util_format_get_blocksize(templ->format) == 1);
986
987 res->internal_format = templ->format;
988 res->surf.tiling = ISL_TILING_LINEAR;
989
990 enum iris_memory_zone memzone = IRIS_MEMZONE_OTHER;
991 const char *name = templ->target == PIPE_BUFFER ? "buffer" : "miptree";
992 if (templ->flags & IRIS_RESOURCE_FLAG_SHADER_MEMZONE) {
993 memzone = IRIS_MEMZONE_SHADER;
994 name = "shader kernels";
995 } else if (templ->flags & IRIS_RESOURCE_FLAG_SURFACE_MEMZONE) {
996 memzone = IRIS_MEMZONE_SURFACE;
997 name = "surface state";
998 } else if (templ->flags & IRIS_RESOURCE_FLAG_DYNAMIC_MEMZONE) {
999 memzone = IRIS_MEMZONE_DYNAMIC;
1000 name = "dynamic state";
1001 } else if (templ->flags & IRIS_RESOURCE_FLAG_SCRATCH_MEMZONE) {
1002 memzone = IRIS_MEMZONE_SCRATCH;
1003 name = "scratch surface state";
1004 }
1005
1006 unsigned flags = iris_resource_alloc_flags(screen, templ, res);
1007
1008 res->bo = iris_bo_alloc(screen->bufmgr, name, templ->width0,
1009 iris_buffer_alignment(templ->width0),
1010 memzone, flags);
1011
1012 if (!res->bo) {
1013 iris_resource_destroy(pscreen, &res->base.b);
1014 return NULL;
1015 }
1016
1017 if (templ->bind & PIPE_BIND_SHARED) {
1018 iris_bo_mark_exported(res->bo);
1019 res->base.is_shared = true;
1020 }
1021
1022 return &res->base.b;
1023 }
1024
1025 static bool
iris_resource_image_is_pat_compressible(const struct iris_screen * screen,const struct pipe_resource * templ,struct iris_resource * res,unsigned flags)1026 iris_resource_image_is_pat_compressible(const struct iris_screen *screen,
1027 const struct pipe_resource *templ,
1028 struct iris_resource *res,
1029 unsigned flags)
1030 {
1031 assert(templ->target != PIPE_BUFFER);
1032
1033 if (INTEL_DEBUG(DEBUG_NO_CCS))
1034 return false;
1035
1036 if (screen->devinfo->ver < 20)
1037 return false;
1038
1039 if (flags & (BO_ALLOC_PROTECTED |
1040 BO_ALLOC_CACHED_COHERENT |
1041 BO_ALLOC_CPU_VISIBLE))
1042 return false;
1043
1044 struct iris_bufmgr *bufmgr = screen->bufmgr;
1045 if ((iris_bufmgr_vram_size(bufmgr) > 0) && (flags & BO_ALLOC_SMEM))
1046 return false;
1047
1048 /* We don't have modifiers with compression enabled on Xe2 so far. */
1049 if (res->mod_info) {
1050 assert(!isl_drm_modifier_has_aux(res->mod_info->modifier));
1051 return false;
1052 }
1053
1054 /* Bspec 58797 (r58646):
1055 *
1056 * Enabling compression is not legal for TileX surfaces.
1057 */
1058 if (res->surf.tiling == ISL_TILING_X)
1059 return false;
1060
1061 /* Bspec 71650 (r59764):
1062 *
1063 * 3 SW must disable or resolve compression
1064 * Display: Access to anything except Tile4 Framebuffers...
1065 * Display Page Tables
1066 * Display State Buffers
1067 * Linear/TileX Framebuffers
1068 * Display Write-Back Buffers
1069 * Etc.
1070 *
1071 * So far, we don't support resolving on Xe2 and may not want to enable
1072 * compression under these conditions later, so we only enable it when
1073 * a TILING_4 image is to display.
1074 */
1075 if ((flags & BO_ALLOC_SCANOUT) && res->surf.tiling != ISL_TILING_4) {
1076 assert(res->surf.tiling == ISL_TILING_LINEAR);
1077 return false;
1078 }
1079
1080 return true;
1081 }
1082
1083 static struct pipe_resource *
iris_resource_create_for_image(struct pipe_screen * pscreen,const struct pipe_resource * templ,const uint64_t * modifiers,int modifiers_count,unsigned row_pitch_B)1084 iris_resource_create_for_image(struct pipe_screen *pscreen,
1085 const struct pipe_resource *templ,
1086 const uint64_t *modifiers,
1087 int modifiers_count,
1088 unsigned row_pitch_B)
1089 {
1090 struct iris_screen *screen = (struct iris_screen *)pscreen;
1091 const struct intel_device_info *devinfo = screen->devinfo;
1092 struct iris_resource *res = iris_alloc_resource(pscreen, templ);
1093
1094 if (!res)
1095 return NULL;
1096
1097 uint64_t modifier =
1098 select_best_modifier(devinfo, templ, modifiers, modifiers_count);
1099
1100 if (modifier == DRM_FORMAT_MOD_INVALID && modifiers_count > 0) {
1101 fprintf(stderr, "Unsupported modifier, resource creation failed.\n");
1102 goto fail;
1103 }
1104
1105 const bool isl_surf_created_successfully =
1106 iris_resource_configure_main(screen, res, templ, modifier, row_pitch_B);
1107 if (!isl_surf_created_successfully)
1108 goto fail;
1109
1110 /* Don't create staging surfaces that will use over half the sram,
1111 * since staging implies you are copying data to another resource that's
1112 * at least as large, and then both wouldn't fit in system memory.
1113 *
1114 * Skip this for discrete cards, as the destination buffer might be in
1115 * device local memory while the staging buffer would be in system memory,
1116 * so both would fit.
1117 */
1118 if (templ->usage == PIPE_USAGE_STAGING && !devinfo->has_local_mem &&
1119 res->surf.size_B > (iris_bufmgr_sram_size(screen->bufmgr) / 2))
1120 goto fail;
1121
1122 if (!iris_resource_configure_aux(screen, res))
1123 goto fail;
1124
1125 const char *name = "miptree";
1126 enum iris_memory_zone memzone = IRIS_MEMZONE_OTHER;
1127
1128 unsigned flags = iris_resource_alloc_flags(screen, templ, res);
1129
1130 if (iris_resource_image_is_pat_compressible(screen, templ, res, flags))
1131 flags |= BO_ALLOC_COMPRESSED;
1132
1133 /* These are for u_upload_mgr buffers only */
1134 assert(!(templ->flags & (IRIS_RESOURCE_FLAG_SHADER_MEMZONE |
1135 IRIS_RESOURCE_FLAG_SURFACE_MEMZONE |
1136 IRIS_RESOURCE_FLAG_DYNAMIC_MEMZONE |
1137 IRIS_RESOURCE_FLAG_SCRATCH_MEMZONE)));
1138
1139 /* Modifiers require the aux data to be in the same buffer as the main
1140 * surface, but we combine them even when a modifier is not being used.
1141 */
1142 uint64_t bo_size = res->surf.size_B;
1143
1144 /* Allocate space for the aux buffer. */
1145 if (res->aux.surf.size_B > 0) {
1146 res->aux.offset = (uint32_t)align64(bo_size, res->aux.surf.alignment_B);
1147 bo_size = res->aux.offset + res->aux.surf.size_B;
1148 }
1149
1150 /* Allocate space for the compression control surface. */
1151 if (devinfo->has_aux_map && isl_aux_usage_has_ccs(res->aux.usage)) {
1152 res->aux.comp_ctrl_surf_offset =
1153 (uint32_t)align64(bo_size, INTEL_AUX_MAP_META_ALIGNMENT_B);
1154 bo_size = res->aux.comp_ctrl_surf_offset +
1155 res->surf.size_B / INTEL_AUX_MAP_MAIN_SIZE_SCALEDOWN;
1156 }
1157
1158 /* Allocate space for the indirect clear color. */
1159 if (iris_get_aux_clear_color_state_size(screen, res) > 0) {
1160 /* Kernel expects a 4k alignment, otherwise the display rejects the
1161 * surface.
1162 */
1163 const uint64_t clear_color_alignment =
1164 (res->mod_info && res->mod_info->supports_clear_color) ? 4096 : 64;
1165 res->aux.clear_color_offset = align64(bo_size, clear_color_alignment);
1166 bo_size = res->aux.clear_color_offset +
1167 iris_get_aux_clear_color_state_size(screen, res);
1168 }
1169
1170 /* The ISL alignment already includes AUX-TT requirements, so no additional
1171 * attention required here :)
1172 */
1173 uint32_t alignment = MAX2(4096, res->surf.alignment_B);
1174 res->bo =
1175 iris_bo_alloc(screen->bufmgr, name, bo_size, alignment, memzone, flags);
1176
1177 if (!res->bo)
1178 goto fail;
1179
1180 if (res->aux.usage != ISL_AUX_USAGE_NONE &&
1181 !iris_resource_init_aux_buf(screen, res))
1182 goto fail;
1183
1184 if (templ->bind & PIPE_BIND_SHARED) {
1185 iris_bo_mark_exported(res->bo);
1186 res->base.is_shared = true;
1187 }
1188
1189 return &res->base.b;
1190
1191 fail:
1192 iris_resource_destroy(pscreen, &res->base.b);
1193 return NULL;
1194 }
1195
1196 static struct pipe_resource *
iris_resource_create_with_modifiers(struct pipe_screen * pscreen,const struct pipe_resource * templ,const uint64_t * modifiers,int modifier_count)1197 iris_resource_create_with_modifiers(struct pipe_screen *pscreen,
1198 const struct pipe_resource *templ,
1199 const uint64_t *modifiers,
1200 int modifier_count)
1201 {
1202 return iris_resource_create_for_image(pscreen, templ, modifiers,
1203 modifier_count, 0);
1204 }
1205
1206 static struct pipe_resource *
iris_resource_create(struct pipe_screen * pscreen,const struct pipe_resource * templ)1207 iris_resource_create(struct pipe_screen *pscreen,
1208 const struct pipe_resource *templ)
1209 {
1210 if (templ->target == PIPE_BUFFER)
1211 return iris_resource_create_for_buffer(pscreen, templ);
1212 else
1213 return iris_resource_create_with_modifiers(pscreen, templ, NULL, 0);
1214 }
1215
1216 static uint64_t
tiling_to_modifier(struct iris_bufmgr * bufmgr,uint32_t tiling)1217 tiling_to_modifier(struct iris_bufmgr *bufmgr, uint32_t tiling)
1218 {
1219 if (iris_bufmgr_get_device_info(bufmgr)->kmd_type != INTEL_KMD_TYPE_I915) {
1220 assert(tiling == 0);
1221 return DRM_FORMAT_MOD_LINEAR;
1222 }
1223
1224 return iris_i915_tiling_to_modifier(tiling);
1225 }
1226
1227 static struct pipe_resource *
iris_resource_from_user_memory(struct pipe_screen * pscreen,const struct pipe_resource * templ,void * user_memory)1228 iris_resource_from_user_memory(struct pipe_screen *pscreen,
1229 const struct pipe_resource *templ,
1230 void *user_memory)
1231 {
1232 if (templ->target != PIPE_BUFFER &&
1233 templ->target != PIPE_TEXTURE_1D &&
1234 templ->target != PIPE_TEXTURE_2D)
1235 return NULL;
1236
1237 if (templ->array_size > 1)
1238 return NULL;
1239
1240 struct iris_screen *screen = (struct iris_screen *)pscreen;
1241 struct iris_bufmgr *bufmgr = screen->bufmgr;
1242 struct iris_resource *res = iris_alloc_resource(pscreen, templ);
1243 if (!res)
1244 return NULL;
1245
1246 size_t res_size = templ->width0;
1247 if (templ->target != PIPE_BUFFER) {
1248 const uint32_t row_pitch_B =
1249 templ->width0 * util_format_get_blocksize(templ->format);
1250 res_size = templ->height0 * row_pitch_B;
1251
1252 if (!iris_resource_configure_main(screen, res, templ,
1253 DRM_FORMAT_MOD_LINEAR,
1254 row_pitch_B)) {
1255 iris_resource_destroy(pscreen, &res->base.b);
1256 return NULL;
1257 }
1258 assert(res->surf.size_B <= res_size);
1259 }
1260
1261 /* The userptr ioctl only works on whole pages. Because we know that
1262 * things will exist in memory at a page granularity, we can expand the
1263 * range given by the client into the whole number of pages and use an
1264 * offset on the resource to make it looks like it starts at the user's
1265 * pointer.
1266 */
1267 size_t page_size = getpagesize();
1268 assert(util_is_power_of_two_nonzero_uintptr(page_size));
1269 size_t offset = (uintptr_t)user_memory & (page_size - 1);
1270 void *mem_start = (char *)user_memory - offset;
1271 size_t mem_size = offset + res_size;
1272 mem_size = ALIGN_NPOT(mem_size, page_size);
1273
1274 res->internal_format = templ->format;
1275 res->base.is_user_ptr = true;
1276 res->bo = iris_bo_create_userptr(bufmgr, "user", mem_start, mem_size,
1277 IRIS_MEMZONE_OTHER);
1278 res->offset = offset;
1279 if (!res->bo) {
1280 iris_resource_destroy(pscreen, &res->base.b);
1281 return NULL;
1282 }
1283
1284 util_range_add(&res->base.b, &res->valid_buffer_range, 0, templ->width0);
1285
1286 return &res->base.b;
1287 }
1288
1289 static unsigned
get_num_planes(const struct pipe_resource * resource)1290 get_num_planes(const struct pipe_resource *resource)
1291 {
1292 unsigned count = 0;
1293 for (const struct pipe_resource *cur = resource; cur; cur = cur->next)
1294 count++;
1295
1296 return count;
1297 }
1298
1299 static unsigned
get_main_plane_for_plane(enum pipe_format format,unsigned plane)1300 get_main_plane_for_plane(enum pipe_format format,
1301 unsigned plane)
1302 {
1303 if (format == PIPE_FORMAT_NONE) {
1304 /* Created dmabuf resources have this format. */
1305 return 0;
1306 } else if (isl_format_for_pipe_format(format) == ISL_FORMAT_UNSUPPORTED) {
1307 /* This format has been lowered to more planes than are native to it.
1308 * So, compression modifiers are not enabled and the plane index is used
1309 * as-is.
1310 */
1311 return plane;
1312 } else {
1313 unsigned int n_planes = util_format_get_num_planes(format);
1314 return plane % n_planes;
1315 }
1316 }
1317
1318 static struct pipe_resource *
iris_resource_from_handle(struct pipe_screen * pscreen,const struct pipe_resource * templ,struct winsys_handle * whandle,unsigned usage)1319 iris_resource_from_handle(struct pipe_screen *pscreen,
1320 const struct pipe_resource *templ,
1321 struct winsys_handle *whandle,
1322 unsigned usage)
1323 {
1324 struct iris_screen *screen = (struct iris_screen *)pscreen;
1325 const struct intel_device_info *devinfo = screen->devinfo;
1326 struct iris_bufmgr *bufmgr = screen->bufmgr;
1327
1328 /* The gallium dri layer creates a pipe resource for each plane specified
1329 * by the format and modifier. Once all planes are present, we will merge
1330 * the separate parameters into the iris_resource(s) for the main plane(s).
1331 * Save the modifier import information now to reconstruct later.
1332 */
1333 struct iris_resource *res = iris_alloc_resource(pscreen, templ);
1334 if (!res)
1335 return NULL;
1336
1337 switch (whandle->type) {
1338 case WINSYS_HANDLE_TYPE_FD:
1339 res->bo = iris_bo_import_dmabuf(bufmgr, whandle->handle,
1340 whandle->modifier);
1341 break;
1342 case WINSYS_HANDLE_TYPE_SHARED:
1343 res->bo = iris_bo_gem_create_from_name(bufmgr, "winsys image",
1344 whandle->handle);
1345 break;
1346 default:
1347 unreachable("invalid winsys handle type");
1348 }
1349 if (!res->bo)
1350 goto fail;
1351
1352 res->offset = whandle->offset;
1353 res->surf.row_pitch_B = whandle->stride;
1354
1355 if (whandle->plane == 0) {
1356 /* All planes are present. Fill out the main plane resource(s). */
1357 for (unsigned plane = 0; plane < util_resource_num(templ); plane++) {
1358 const unsigned main_plane =
1359 get_main_plane_for_plane(whandle->format, plane);
1360 struct iris_resource *main_res = (struct iris_resource *)
1361 util_resource_at_index(&res->base.b, main_plane);
1362 const struct iris_resource *plane_res = (struct iris_resource *)
1363 util_resource_at_index(&res->base.b, plane);
1364
1365 if (isl_drm_modifier_plane_is_clear_color(whandle->modifier,
1366 plane)) {
1367 /* Fill out the clear color fields. */
1368 assert(plane_res->bo->size >= plane_res->offset +
1369 screen->isl_dev.ss.clear_color_state_size);
1370
1371 iris_bo_reference(plane_res->bo);
1372 main_res->aux.clear_color_bo = plane_res->bo;
1373 main_res->aux.clear_color_offset = plane_res->offset;
1374 main_res->aux.clear_color_unknown = true;
1375 } else if (plane > main_plane) {
1376 /* Fill out some aux surface fields. */
1377 assert(isl_drm_modifier_has_aux(whandle->modifier));
1378 assert(!devinfo->has_flat_ccs);
1379
1380 iris_bo_reference(plane_res->bo);
1381 res->aux.bo = plane_res->bo;
1382
1383 if (devinfo->has_aux_map) {
1384 assert(plane_res->surf.row_pitch_B ==
1385 main_res->surf.row_pitch_B /
1386 INTEL_AUX_MAP_MAIN_PITCH_SCALEDOWN);
1387 assert(plane_res->bo->size >= plane_res->offset +
1388 main_res->surf.size_B /
1389 INTEL_AUX_MAP_MAIN_SIZE_SCALEDOWN);
1390
1391 main_res->aux.comp_ctrl_surf_offset = plane_res->offset;
1392 map_aux_addresses(screen, main_res, whandle->format,
1393 main_plane);
1394 } else {
1395 assert(plane_res->surf.row_pitch_B ==
1396 main_res->aux.surf.row_pitch_B);
1397 assert(plane_res->bo->size >= plane_res->offset +
1398 main_res->aux.surf.size_B);
1399
1400 main_res->aux.offset = plane_res->offset;
1401 }
1402 } else {
1403 /* Fill out fields that are convenient to initialize now. */
1404 assert(plane == main_plane);
1405
1406 main_res->external_format = whandle->format;
1407
1408 if (templ->target == PIPE_BUFFER) {
1409 main_res->surf.tiling = ISL_TILING_LINEAR;
1410 return &main_res->base.b;
1411 }
1412
1413 uint64_t modifier;
1414 if (whandle->modifier == DRM_FORMAT_MOD_INVALID) {
1415 /* We have no modifier; match whatever GEM_GET_TILING says */
1416 uint32_t tiling;
1417 iris_gem_get_tiling(main_res->bo, &tiling);
1418 modifier = tiling_to_modifier(bufmgr, tiling);
1419 } else {
1420 modifier = whandle->modifier;
1421 }
1422
1423 const bool isl_surf_created_successfully =
1424 iris_resource_configure_main(screen, main_res,
1425 &main_res->base.b, modifier,
1426 main_res->surf.row_pitch_B);
1427 if (!isl_surf_created_successfully)
1428 goto fail;
1429
1430 assert(main_res->bo->size >= main_res->offset +
1431 main_res->surf.size_B);
1432
1433 if (!iris_resource_configure_aux(screen, main_res))
1434 goto fail;
1435
1436 if (res->aux.usage != ISL_AUX_USAGE_NONE) {
1437 const enum isl_aux_state aux_state =
1438 isl_drm_modifier_get_default_aux_state(modifier);
1439 main_res->aux.state =
1440 create_aux_state_map(main_res, aux_state);
1441 if (!main_res->aux.state)
1442 goto fail;
1443 }
1444
1445 /* Add on a clear color BO if needed. */
1446 if (!main_res->mod_info->supports_clear_color &&
1447 iris_get_aux_clear_color_state_size(screen, main_res) > 0) {
1448 main_res->aux.clear_color_bo =
1449 iris_bo_alloc(screen->bufmgr, "clear color buffer",
1450 screen->isl_dev.ss.clear_color_state_size,
1451 64, IRIS_MEMZONE_OTHER, BO_ALLOC_ZEROED);
1452 if (!main_res->aux.clear_color_bo)
1453 goto fail;
1454 }
1455 }
1456 }
1457 }
1458
1459 return &res->base.b;
1460
1461 fail:
1462 iris_resource_destroy(pscreen, &res->base.b);
1463 return NULL;
1464 }
1465
1466 static struct pipe_resource *
iris_resource_from_memobj(struct pipe_screen * pscreen,const struct pipe_resource * templ,struct pipe_memory_object * pmemobj,uint64_t offset)1467 iris_resource_from_memobj(struct pipe_screen *pscreen,
1468 const struct pipe_resource *templ,
1469 struct pipe_memory_object *pmemobj,
1470 uint64_t offset)
1471 {
1472 struct iris_screen *screen = (struct iris_screen *)pscreen;
1473 struct iris_memory_object *memobj = (struct iris_memory_object *)pmemobj;
1474 struct iris_resource *res = iris_alloc_resource(pscreen, templ);
1475
1476 if (!res)
1477 return NULL;
1478
1479 res->bo = memobj->bo;
1480 res->offset = offset;
1481 res->external_format = templ->format;
1482 res->internal_format = templ->format;
1483
1484 if (templ->flags & PIPE_RESOURCE_FLAG_TEXTURING_MORE_LIKELY) {
1485 UNUSED const bool isl_surf_created_successfully =
1486 iris_resource_configure_main(screen, res, templ, DRM_FORMAT_MOD_INVALID, 0);
1487 assert(isl_surf_created_successfully);
1488 }
1489
1490 iris_bo_reference(memobj->bo);
1491
1492 return &res->base.b;
1493 }
1494
1495 /* Handle combined depth/stencil with memory objects.
1496 *
1497 * This function is modeled after u_transfer_helper_resource_create.
1498 */
1499 static struct pipe_resource *
iris_resource_from_memobj_wrapper(struct pipe_screen * pscreen,const struct pipe_resource * templ,struct pipe_memory_object * pmemobj,uint64_t offset)1500 iris_resource_from_memobj_wrapper(struct pipe_screen *pscreen,
1501 const struct pipe_resource *templ,
1502 struct pipe_memory_object *pmemobj,
1503 uint64_t offset)
1504 {
1505 enum pipe_format format = templ->format;
1506
1507 /* Normal case, no special handling: */
1508 if (!(util_format_is_depth_and_stencil(format)))
1509 return iris_resource_from_memobj(pscreen, templ, pmemobj, offset);
1510
1511 struct pipe_resource t = *templ;
1512 t.format = util_format_get_depth_only(format);
1513
1514 struct pipe_resource *prsc =
1515 iris_resource_from_memobj(pscreen, &t, pmemobj, offset);
1516 if (!prsc)
1517 return NULL;
1518
1519 struct iris_resource *res = (struct iris_resource *) prsc;
1520
1521 /* Stencil offset in the buffer without aux. */
1522 uint64_t s_offset = offset +
1523 align64(res->surf.size_B, res->surf.alignment_B);
1524
1525 prsc->format = format; /* frob the format back to the "external" format */
1526
1527 t.format = PIPE_FORMAT_S8_UINT;
1528 struct pipe_resource *stencil =
1529 iris_resource_from_memobj(pscreen, &t, pmemobj, s_offset);
1530 if (!stencil) {
1531 iris_resource_destroy(pscreen, prsc);
1532 return NULL;
1533 }
1534
1535 iris_resource_set_separate_stencil(prsc, stencil);
1536 return prsc;
1537 }
1538
1539 /**
1540 * Reallocate a (non-external) resource into new storage, copying the data
1541 * and modifying the original resource to point at the new storage.
1542 *
1543 * This is useful for e.g. moving a suballocated internal resource to a
1544 * dedicated allocation that can be exported by itself.
1545 */
1546 static void
iris_reallocate_resource_inplace(struct iris_context * ice,struct iris_resource * old_res,unsigned new_bind_flag)1547 iris_reallocate_resource_inplace(struct iris_context *ice,
1548 struct iris_resource *old_res,
1549 unsigned new_bind_flag)
1550 {
1551 struct pipe_screen *pscreen = ice->ctx.screen;
1552
1553 if (iris_bo_is_external(old_res->bo))
1554 return;
1555
1556 assert(old_res->mod_info == NULL);
1557 assert(old_res->bo == old_res->aux.bo || old_res->aux.bo == NULL);
1558 assert(old_res->bo == old_res->aux.clear_color_bo ||
1559 old_res->aux.clear_color_bo == NULL);
1560 assert(old_res->external_format == PIPE_FORMAT_NONE);
1561
1562 struct pipe_resource templ = old_res->base.b;
1563 templ.bind |= new_bind_flag;
1564
1565 struct iris_resource *new_res =
1566 (void *) pscreen->resource_create(pscreen, &templ);
1567
1568 assert(iris_bo_is_real(new_res->bo));
1569
1570 struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER];
1571
1572 if (old_res->base.b.target == PIPE_BUFFER) {
1573 struct pipe_box box = (struct pipe_box) {
1574 .width = old_res->base.b.width0,
1575 .height = 1,
1576 };
1577
1578 iris_copy_region(&ice->blorp, batch, &new_res->base.b, 0, 0, 0, 0,
1579 &old_res->base.b, 0, &box);
1580 } else {
1581 for (unsigned l = 0; l <= templ.last_level; l++) {
1582 struct pipe_box box = (struct pipe_box) {
1583 .width = u_minify(templ.width0, l),
1584 .height = u_minify(templ.height0, l),
1585 .depth = util_num_layers(&templ, l),
1586 };
1587
1588 iris_copy_region(&ice->blorp, batch, &new_res->base.b, l, 0, 0, 0,
1589 &old_res->base.b, l, &box);
1590 }
1591 }
1592
1593 struct iris_bo *old_bo = old_res->bo;
1594 struct iris_bo *old_aux_bo = old_res->aux.bo;
1595 struct iris_bo *old_clear_color_bo = old_res->aux.clear_color_bo;
1596
1597 /* Replace the structure fields with the new ones */
1598 old_res->base.b.bind = templ.bind;
1599 old_res->surf = new_res->surf;
1600 old_res->bo = new_res->bo;
1601 old_res->aux.surf = new_res->aux.surf;
1602 old_res->aux.bo = new_res->aux.bo;
1603 old_res->aux.offset = new_res->aux.offset;
1604 old_res->aux.comp_ctrl_surf_offset = new_res->aux.comp_ctrl_surf_offset;
1605 old_res->aux.clear_color_bo = new_res->aux.clear_color_bo;
1606 old_res->aux.clear_color_offset = new_res->aux.clear_color_offset;
1607 old_res->aux.usage = new_res->aux.usage;
1608
1609 if (new_res->aux.state) {
1610 assert(old_res->aux.state);
1611 for (unsigned l = 0; l <= templ.last_level; l++) {
1612 unsigned layers = util_num_layers(&templ, l);
1613 for (unsigned z = 0; z < layers; z++) {
1614 enum isl_aux_state aux =
1615 iris_resource_get_aux_state(new_res, l, z);
1616 iris_resource_set_aux_state(ice, old_res, l, z, 1, aux);
1617 }
1618 }
1619 }
1620
1621 /* old_res now points at the new BOs, make new_res point at the old ones
1622 * so they'll be freed when we unreference the resource below.
1623 */
1624 new_res->bo = old_bo;
1625 new_res->aux.bo = old_aux_bo;
1626 new_res->aux.clear_color_bo = old_clear_color_bo;
1627
1628 pipe_resource_reference((struct pipe_resource **)&new_res, NULL);
1629 }
1630
1631 static void
iris_flush_resource(struct pipe_context * ctx,struct pipe_resource * resource)1632 iris_flush_resource(struct pipe_context *ctx, struct pipe_resource *resource)
1633 {
1634 struct iris_context *ice = (struct iris_context *)ctx;
1635 struct iris_resource *res = (void *) resource;
1636 const struct isl_drm_modifier_info *mod = res->mod_info;
1637 bool newly_external = false;
1638
1639 /* flush_resource() may be used to prepare an image for sharing externally
1640 * with other clients (e.g. via eglCreateImage). To account for this, we
1641 * make sure to eliminate suballocation and any compression that a consumer
1642 * wouldn't know how to handle.
1643 */
1644 if (!iris_bo_is_real(res->bo)) {
1645 assert(!(res->base.b.bind & PIPE_BIND_SHARED));
1646 iris_reallocate_resource_inplace(ice, res, PIPE_BIND_SHARED);
1647 assert(res->base.b.bind & PIPE_BIND_SHARED);
1648 newly_external = true;
1649 }
1650
1651 iris_resource_prepare_access(ice, res,
1652 0, INTEL_REMAINING_LEVELS,
1653 0, INTEL_REMAINING_LAYERS,
1654 mod ? res->aux.usage : ISL_AUX_USAGE_NONE,
1655 mod ? mod->supports_clear_color : false);
1656
1657 bool disable_aux = !res->mod_info && res->aux.usage != ISL_AUX_USAGE_NONE;
1658
1659 if (newly_external || disable_aux) {
1660 iris_foreach_batch(ice, batch) {
1661 if (iris_batch_references(batch, res->bo))
1662 iris_batch_flush(batch);
1663 }
1664 }
1665
1666 if (disable_aux)
1667 iris_resource_disable_aux(res);
1668 }
1669
1670 static void
iris_resource_disable_aux_on_first_query(struct pipe_resource * resource,unsigned usage)1671 iris_resource_disable_aux_on_first_query(struct pipe_resource *resource,
1672 unsigned usage)
1673 {
1674 struct iris_resource *res = (struct iris_resource *)resource;
1675 bool mod_with_aux =
1676 res->mod_info && isl_drm_modifier_has_aux(res->mod_info->modifier);
1677
1678 /* Disable aux usage if explicit flush not set and this is the first time
1679 * we are dealing with this resource and the resource was not created with
1680 * a modifier with aux.
1681 */
1682 if (!mod_with_aux &&
1683 (!(usage & PIPE_HANDLE_USAGE_EXPLICIT_FLUSH) && res->aux.usage != 0) &&
1684 p_atomic_read(&resource->reference.count) == 1) {
1685 iris_resource_disable_aux(res);
1686 }
1687 }
1688
1689 static bool
iris_resource_get_param(struct pipe_screen * pscreen,struct pipe_context * ctx,struct pipe_resource * resource,unsigned plane,unsigned layer,unsigned level,enum pipe_resource_param param,unsigned handle_usage,uint64_t * value)1690 iris_resource_get_param(struct pipe_screen *pscreen,
1691 struct pipe_context *ctx,
1692 struct pipe_resource *resource,
1693 unsigned plane,
1694 unsigned layer,
1695 unsigned level,
1696 enum pipe_resource_param param,
1697 unsigned handle_usage,
1698 uint64_t *value)
1699 {
1700 struct iris_screen *screen = (struct iris_screen *)pscreen;
1701 struct iris_resource *base_res = (struct iris_resource *)resource;
1702 unsigned main_plane = get_main_plane_for_plane(base_res->external_format,
1703 plane);
1704 struct iris_resource *res =
1705 (struct iris_resource *)util_resource_at_index(resource, main_plane);
1706 assert(res);
1707
1708 bool mod_with_aux =
1709 res->mod_info && isl_drm_modifier_has_aux(res->mod_info->modifier);
1710 bool wants_aux = mod_with_aux && plane != main_plane;
1711 bool wants_cc = mod_with_aux &&
1712 isl_drm_modifier_plane_is_clear_color(res->mod_info->modifier, plane);
1713 bool result;
1714 unsigned handle;
1715
1716 iris_resource_disable_aux_on_first_query(resource, handle_usage);
1717
1718 struct iris_bo *bo = wants_cc ? res->aux.clear_color_bo :
1719 wants_aux ? res->aux.bo : res->bo;
1720
1721 assert(iris_bo_is_real(bo));
1722
1723 switch (param) {
1724 case PIPE_RESOURCE_PARAM_NPLANES:
1725 if (mod_with_aux) {
1726 *value = iris_get_dmabuf_modifier_planes(pscreen,
1727 res->mod_info->modifier,
1728 res->external_format);
1729 } else {
1730 *value = get_num_planes(&res->base.b);
1731 }
1732 return true;
1733 case PIPE_RESOURCE_PARAM_STRIDE:
1734 if (wants_cc) {
1735 *value = ISL_DRM_CC_PLANE_PITCH_B;
1736 } else if (wants_aux) {
1737 *value = screen->devinfo->has_aux_map ?
1738 res->surf.row_pitch_B / INTEL_AUX_MAP_MAIN_PITCH_SCALEDOWN :
1739 res->aux.surf.row_pitch_B;
1740 } else {
1741 *value = res->surf.row_pitch_B;
1742 }
1743
1744 /* Mesa's implementation of eglCreateImage rejects strides of zero (see
1745 * dri2_check_dma_buf_attribs). Ensure we return a non-zero stride as
1746 * this value may be queried from GBM and passed into EGL.
1747 *
1748 * We make an exception for buffers. For OpenCL gl_sharing we have to
1749 * support exporting buffers, for which we report a stride of 0 here.
1750 */
1751 assert(*value != 0 || resource->target == PIPE_BUFFER);
1752
1753 return true;
1754 case PIPE_RESOURCE_PARAM_OFFSET:
1755 if (wants_cc) {
1756 *value = res->aux.clear_color_offset;
1757 } else if (wants_aux) {
1758 *value = screen->devinfo->has_aux_map ?
1759 res->aux.comp_ctrl_surf_offset :
1760 res->aux.offset;
1761 } else {
1762 *value = res->offset;
1763 }
1764 return true;
1765 case PIPE_RESOURCE_PARAM_MODIFIER:
1766 if (res->mod_info) {
1767 *value = res->mod_info->modifier;
1768 } else {
1769 /* We restrict ourselves to modifiers without CCS for several
1770 * reasons:
1771 *
1772 * - Mesa's implementation of EGL_MESA_image_dma_buf_export
1773 * currently only exports a single plane (see
1774 * dri2_export_dma_buf_image_mesa), but for some modifiers,
1775 * CCS exists in a second plane.
1776 *
1777 * - Even if we returned CCS modifiers, iris currently
1778 * resolves away compression during the export/flushing process
1779 * (see iris_flush_resource). So, only uncompressed data is
1780 * exposed anyways.
1781 */
1782 switch (res->surf.tiling) {
1783 case ISL_TILING_4: *value = I915_FORMAT_MOD_4_TILED; break;
1784 case ISL_TILING_Y0: *value = I915_FORMAT_MOD_Y_TILED; break;
1785 case ISL_TILING_X: *value = I915_FORMAT_MOD_X_TILED; break;
1786 case ISL_TILING_LINEAR: *value = DRM_FORMAT_MOD_LINEAR; break;
1787 default:
1788 assert("no modifier mapped for resource's tiling");
1789 return false;
1790 }
1791 }
1792 return true;
1793 case PIPE_RESOURCE_PARAM_HANDLE_TYPE_SHARED:
1794 if (!wants_aux)
1795 iris_gem_set_tiling(bo, &res->surf);
1796
1797 result = iris_bo_flink(bo, &handle) == 0;
1798 if (result)
1799 *value = handle;
1800 return result;
1801 case PIPE_RESOURCE_PARAM_HANDLE_TYPE_KMS: {
1802 if (!wants_aux)
1803 iris_gem_set_tiling(bo, &res->surf);
1804
1805 /* Because we share the same drm file across multiple iris_screen, when
1806 * we export a GEM handle we must make sure it is valid in the DRM file
1807 * descriptor the caller is using (this is the FD given at screen
1808 * creation).
1809 */
1810 uint32_t handle;
1811 if (iris_bo_export_gem_handle_for_device(bo, screen->winsys_fd, &handle))
1812 return false;
1813 *value = handle;
1814 return true;
1815 }
1816
1817 case PIPE_RESOURCE_PARAM_HANDLE_TYPE_FD:
1818 if (!wants_aux)
1819 iris_gem_set_tiling(bo, &res->surf);
1820
1821 result = iris_bo_export_dmabuf(bo, (int *) &handle) == 0;
1822 if (result)
1823 *value = handle;
1824 return result;
1825 default:
1826 return false;
1827 }
1828 }
1829
1830 static bool
iris_resource_get_handle(struct pipe_screen * pscreen,struct pipe_context * ctx,struct pipe_resource * resource,struct winsys_handle * whandle,unsigned usage)1831 iris_resource_get_handle(struct pipe_screen *pscreen,
1832 struct pipe_context *ctx,
1833 struct pipe_resource *resource,
1834 struct winsys_handle *whandle,
1835 unsigned usage)
1836 {
1837 struct iris_screen *screen = (struct iris_screen *) pscreen;
1838 struct iris_resource *res = (struct iris_resource *)resource;
1839 bool mod_with_aux =
1840 res->mod_info && isl_drm_modifier_has_aux(res->mod_info->modifier);
1841
1842 iris_resource_disable_aux_on_first_query(resource, usage);
1843
1844 assert(iris_bo_is_real(res->bo));
1845
1846 struct iris_bo *bo;
1847 if (res->mod_info &&
1848 isl_drm_modifier_plane_is_clear_color(res->mod_info->modifier,
1849 whandle->plane)) {
1850 bo = res->aux.clear_color_bo;
1851 } else if (mod_with_aux && whandle->plane > 0) {
1852 bo = res->aux.bo;
1853 } else {
1854 bo = res->bo;
1855 }
1856
1857 uint64_t stride;
1858 iris_resource_get_param(pscreen, ctx, resource, whandle->plane, 0, 0,
1859 PIPE_RESOURCE_PARAM_STRIDE, usage, &stride);
1860
1861 uint64_t offset;
1862 iris_resource_get_param(pscreen, ctx, resource, whandle->plane, 0, 0,
1863 PIPE_RESOURCE_PARAM_OFFSET, usage, &offset);
1864
1865 uint64_t modifier;
1866 iris_resource_get_param(pscreen, ctx, resource, whandle->plane, 0, 0,
1867 PIPE_RESOURCE_PARAM_MODIFIER, usage, &modifier);
1868
1869 whandle->stride = stride;
1870 whandle->offset = offset;
1871 whandle->modifier = modifier;
1872 whandle->format = res->external_format;
1873
1874 #ifndef NDEBUG
1875 enum isl_aux_usage allowed_usage =
1876 (usage & PIPE_HANDLE_USAGE_EXPLICIT_FLUSH) || mod_with_aux ?
1877 res->aux.usage : ISL_AUX_USAGE_NONE;
1878
1879 if (res->aux.usage != allowed_usage) {
1880 enum isl_aux_state aux_state = iris_resource_get_aux_state(res, 0, 0);
1881 assert(aux_state == ISL_AUX_STATE_RESOLVED ||
1882 aux_state == ISL_AUX_STATE_PASS_THROUGH);
1883 }
1884 #endif
1885
1886 /* TODO: TILE64 modifier support in the KMD */
1887 assert(res->surf.tiling != ISL_TILING_64);
1888
1889 switch (whandle->type) {
1890 case WINSYS_HANDLE_TYPE_SHARED:
1891 iris_gem_set_tiling(bo, &res->surf);
1892 return iris_bo_flink(bo, &whandle->handle) == 0;
1893 case WINSYS_HANDLE_TYPE_KMS: {
1894 iris_gem_set_tiling(bo, &res->surf);
1895
1896 /* Because we share the same drm file across multiple iris_screen, when
1897 * we export a GEM handle we must make sure it is valid in the DRM file
1898 * descriptor the caller is using (this is the FD given at screen
1899 * creation).
1900 */
1901 uint32_t handle;
1902 if (iris_bo_export_gem_handle_for_device(bo, screen->winsys_fd, &handle))
1903 return false;
1904 whandle->handle = handle;
1905 return true;
1906 }
1907 case WINSYS_HANDLE_TYPE_FD:
1908 iris_gem_set_tiling(bo, &res->surf);
1909 return iris_bo_export_dmabuf(bo, (int *) &whandle->handle) == 0;
1910 }
1911
1912 return false;
1913 }
1914
1915 static bool
resource_is_busy(struct iris_context * ice,struct iris_resource * res)1916 resource_is_busy(struct iris_context *ice,
1917 struct iris_resource *res)
1918 {
1919 bool busy = iris_bo_busy(res->bo);
1920
1921 iris_foreach_batch(ice, batch)
1922 busy |= iris_batch_references(batch, res->bo);
1923
1924 return busy;
1925 }
1926
1927 void
iris_replace_buffer_storage(struct pipe_context * ctx,struct pipe_resource * p_dst,struct pipe_resource * p_src,unsigned num_rebinds,uint32_t rebind_mask,uint32_t delete_buffer_id)1928 iris_replace_buffer_storage(struct pipe_context *ctx,
1929 struct pipe_resource *p_dst,
1930 struct pipe_resource *p_src,
1931 unsigned num_rebinds,
1932 uint32_t rebind_mask,
1933 uint32_t delete_buffer_id)
1934 {
1935 struct iris_screen *screen = (void *) ctx->screen;
1936 struct iris_context *ice = (void *) ctx;
1937 struct iris_resource *dst = (void *) p_dst;
1938 struct iris_resource *src = (void *) p_src;
1939
1940 assert(memcmp(&dst->surf, &src->surf, sizeof(dst->surf)) == 0);
1941
1942 struct iris_bo *old_bo = dst->bo;
1943
1944 /* Swap out the backing storage */
1945 iris_bo_reference(src->bo);
1946 dst->bo = src->bo;
1947
1948 /* Rebind the buffer, replacing any state referring to the old BO's
1949 * address, and marking state dirty so it's reemitted.
1950 */
1951 screen->vtbl.rebind_buffer(ice, dst);
1952
1953 iris_bo_unreference(old_bo);
1954 }
1955
1956 /**
1957 * Discard a buffer's contents and replace it's backing storage with a
1958 * fresh, idle buffer if necessary.
1959 *
1960 * Returns true if the storage can be considered idle.
1961 */
1962 static bool
iris_invalidate_buffer(struct iris_context * ice,struct iris_resource * res)1963 iris_invalidate_buffer(struct iris_context *ice, struct iris_resource *res)
1964 {
1965 struct iris_screen *screen = (void *) ice->ctx.screen;
1966
1967 if (res->base.b.target != PIPE_BUFFER)
1968 return false;
1969
1970 /* If it's already invalidated, don't bother doing anything.
1971 * We consider the storage to be idle, because either it was freshly
1972 * allocated (and not busy), or a previous call here was what cleared
1973 * the range, and that call replaced the storage with an idle buffer.
1974 */
1975 if (res->valid_buffer_range.start > res->valid_buffer_range.end)
1976 return true;
1977
1978 if (!resource_is_busy(ice, res)) {
1979 /* The resource is idle, so just mark that it contains no data and
1980 * keep using the same underlying buffer object.
1981 */
1982 util_range_set_empty(&res->valid_buffer_range);
1983 return true;
1984 }
1985
1986 /* Otherwise, try and replace the backing storage with a new BO. */
1987
1988 /* We can't reallocate memory we didn't allocate in the first place. */
1989 if (res->bo->gem_handle && res->bo->real.userptr)
1990 return false;
1991
1992 /* Nor can we allocate buffers we imported or exported. */
1993 if (iris_bo_is_external(res->bo))
1994 return false;
1995
1996 struct iris_bo *old_bo = res->bo;
1997 unsigned flags = old_bo->real.protected ? BO_ALLOC_PROTECTED : BO_ALLOC_PLAIN;
1998 struct iris_bo *new_bo =
1999 iris_bo_alloc(screen->bufmgr, res->bo->name, res->base.b.width0,
2000 iris_buffer_alignment(res->base.b.width0),
2001 iris_memzone_for_address(old_bo->address),
2002 flags);
2003 if (!new_bo)
2004 return false;
2005
2006 /* Swap out the backing storage */
2007 res->bo = new_bo;
2008
2009 /* Rebind the buffer, replacing any state referring to the old BO's
2010 * address, and marking state dirty so it's reemitted.
2011 */
2012 screen->vtbl.rebind_buffer(ice, res);
2013
2014 util_range_set_empty(&res->valid_buffer_range);
2015
2016 iris_bo_unreference(old_bo);
2017
2018 /* The new buffer is idle. */
2019 return true;
2020 }
2021
2022 static void
iris_invalidate_resource(struct pipe_context * ctx,struct pipe_resource * resource)2023 iris_invalidate_resource(struct pipe_context *ctx,
2024 struct pipe_resource *resource)
2025 {
2026 struct iris_context *ice = (void *) ctx;
2027 struct iris_resource *res = (void *) resource;
2028
2029 iris_invalidate_buffer(ice, res);
2030 }
2031
2032 static void
iris_flush_staging_region(struct pipe_transfer * xfer,const struct pipe_box * flush_box)2033 iris_flush_staging_region(struct pipe_transfer *xfer,
2034 const struct pipe_box *flush_box)
2035 {
2036 if (!(xfer->usage & PIPE_MAP_WRITE))
2037 return;
2038
2039 struct iris_transfer *map = (void *) xfer;
2040
2041 struct pipe_box src_box = *flush_box;
2042
2043 /* Account for extra alignment padding in staging buffer */
2044 if (xfer->resource->target == PIPE_BUFFER)
2045 src_box.x += xfer->box.x % IRIS_MAP_BUFFER_ALIGNMENT;
2046
2047 struct pipe_box dst_box = (struct pipe_box) {
2048 .x = xfer->box.x + flush_box->x,
2049 .y = xfer->box.y + flush_box->y,
2050 .z = xfer->box.z + flush_box->z,
2051 .width = flush_box->width,
2052 .height = flush_box->height,
2053 .depth = flush_box->depth,
2054 };
2055
2056 iris_copy_region(map->blorp, map->batch, xfer->resource, xfer->level,
2057 dst_box.x, dst_box.y, dst_box.z, map->staging, 0,
2058 &src_box);
2059 }
2060
2061 static void
iris_unmap_copy_region(struct iris_transfer * map)2062 iris_unmap_copy_region(struct iris_transfer *map)
2063 {
2064 iris_resource_destroy(map->staging->screen, map->staging);
2065
2066 map->ptr = NULL;
2067 }
2068
2069 static void
iris_map_copy_region(struct iris_transfer * map)2070 iris_map_copy_region(struct iris_transfer *map)
2071 {
2072 struct pipe_screen *pscreen = &map->batch->screen->base;
2073 struct pipe_transfer *xfer = &map->base.b;
2074 struct pipe_box *box = &xfer->box;
2075 struct iris_resource *res = (void *) xfer->resource;
2076
2077 unsigned extra = xfer->resource->target == PIPE_BUFFER ?
2078 box->x % IRIS_MAP_BUFFER_ALIGNMENT : 0;
2079
2080 struct pipe_resource templ = (struct pipe_resource) {
2081 .usage = PIPE_USAGE_STAGING,
2082 .width0 = box->width + extra,
2083 .height0 = box->height,
2084 .depth0 = 1,
2085 .nr_samples = xfer->resource->nr_samples,
2086 .nr_storage_samples = xfer->resource->nr_storage_samples,
2087 .array_size = box->depth,
2088 .format = res->internal_format,
2089 };
2090
2091 if (xfer->resource->target == PIPE_BUFFER) {
2092 templ.target = PIPE_BUFFER;
2093 map->staging = iris_resource_create_for_buffer(pscreen, &templ);
2094 } else {
2095 templ.target = templ.array_size > 1 ? PIPE_TEXTURE_2D_ARRAY
2096 : PIPE_TEXTURE_2D;
2097
2098 unsigned row_pitch_B = 0;
2099
2100 #if DETECT_OS_ANDROID
2101 /* Staging buffers for stall-avoidance blits don't always have the
2102 * same restrictions on stride as the original buffer. For example,
2103 * the original buffer may be used for scanout, while the staging
2104 * buffer will not be. So we may compute a smaller stride for the
2105 * staging buffer than the original.
2106 *
2107 * Normally, this is good, as it saves memory. Unfortunately, for
2108 * Android, gbm_gralloc incorrectly asserts that the stride returned
2109 * by gbm_bo_map() must equal the result of gbm_bo_get_stride(),
2110 * which simply isn't always the case.
2111 *
2112 * Because gralloc is unlikely to be fixed, we hack around it in iris
2113 * by forcing the staging buffer to have a matching stride.
2114 */
2115 if (iris_bo_is_external(res->bo))
2116 row_pitch_B = res->surf.row_pitch_B;
2117 #endif
2118
2119 map->staging =
2120 iris_resource_create_for_image(pscreen, &templ, NULL, 0, row_pitch_B);
2121 }
2122
2123 /* If we fail to create a staging resource, the caller will fallback
2124 * to mapping directly on the CPU.
2125 */
2126 if (!map->staging)
2127 return;
2128
2129 if (templ.target != PIPE_BUFFER) {
2130 struct isl_surf *surf = &((struct iris_resource *) map->staging)->surf;
2131 xfer->stride = isl_surf_get_row_pitch_B(surf);
2132 xfer->layer_stride = isl_surf_get_array_pitch(surf);
2133 }
2134
2135 if ((xfer->usage & PIPE_MAP_READ) ||
2136 (res->base.b.target == PIPE_BUFFER &&
2137 !(xfer->usage & PIPE_MAP_DISCARD_RANGE))) {
2138 iris_copy_region(map->blorp, map->batch, map->staging, 0, extra, 0, 0,
2139 xfer->resource, xfer->level, box);
2140 /* Ensure writes to the staging BO land before we map it below. */
2141 iris_emit_pipe_control_flush(map->batch,
2142 "transfer read: flush before mapping",
2143 PIPE_CONTROL_RENDER_TARGET_FLUSH |
2144 PIPE_CONTROL_TILE_CACHE_FLUSH |
2145 PIPE_CONTROL_CS_STALL);
2146 }
2147
2148 struct iris_bo *staging_bo = iris_resource_bo(map->staging);
2149
2150 if (iris_batch_references(map->batch, staging_bo))
2151 iris_batch_flush(map->batch);
2152
2153 assert(((struct iris_resource *)map->staging)->offset == 0);
2154 map->ptr =
2155 iris_bo_map(map->dbg, staging_bo, xfer->usage & MAP_FLAGS) + extra;
2156
2157 map->unmap = iris_unmap_copy_region;
2158 }
2159
2160 static void
get_image_offset_el(const struct isl_surf * surf,unsigned level,unsigned z,unsigned * out_x0_el,unsigned * out_y0_el)2161 get_image_offset_el(const struct isl_surf *surf, unsigned level, unsigned z,
2162 unsigned *out_x0_el, unsigned *out_y0_el)
2163 {
2164 ASSERTED uint32_t z0_el, a0_el;
2165 if (surf->dim == ISL_SURF_DIM_3D) {
2166 isl_surf_get_image_offset_el(surf, level, 0, z,
2167 out_x0_el, out_y0_el, &z0_el, &a0_el);
2168 } else {
2169 isl_surf_get_image_offset_el(surf, level, z, 0,
2170 out_x0_el, out_y0_el, &z0_el, &a0_el);
2171 }
2172 assert(z0_el == 0 && a0_el == 0);
2173 }
2174
2175 /* Compute extent parameters for use with tiled_memcpy functions.
2176 * xs are in units of bytes and ys are in units of strides.
2177 */
2178 static inline void
tile_extents(const struct isl_surf * surf,const struct pipe_box * box,unsigned level,int z,unsigned * x1_B,unsigned * x2_B,unsigned * y1_el,unsigned * y2_el)2179 tile_extents(const struct isl_surf *surf,
2180 const struct pipe_box *box,
2181 unsigned level, int z,
2182 unsigned *x1_B, unsigned *x2_B,
2183 unsigned *y1_el, unsigned *y2_el)
2184 {
2185 const struct isl_format_layout *fmtl = isl_format_get_layout(surf->format);
2186 const unsigned cpp = fmtl->bpb / 8;
2187
2188 assert(box->x % fmtl->bw == 0);
2189 assert(box->y % fmtl->bh == 0);
2190
2191 unsigned x0_el, y0_el;
2192 get_image_offset_el(surf, level, box->z + z, &x0_el, &y0_el);
2193
2194 *x1_B = (box->x / fmtl->bw + x0_el) * cpp;
2195 *y1_el = box->y / fmtl->bh + y0_el;
2196 *x2_B = (DIV_ROUND_UP(box->x + box->width, fmtl->bw) + x0_el) * cpp;
2197 *y2_el = DIV_ROUND_UP(box->y + box->height, fmtl->bh) + y0_el;
2198 }
2199
2200 static void
iris_unmap_tiled_memcpy(struct iris_transfer * map)2201 iris_unmap_tiled_memcpy(struct iris_transfer *map)
2202 {
2203 struct pipe_transfer *xfer = &map->base.b;
2204 const struct pipe_box *box = &xfer->box;
2205 struct iris_resource *res = (struct iris_resource *) xfer->resource;
2206 struct isl_surf *surf = &res->surf;
2207
2208 const bool has_swizzling = false;
2209
2210 if (xfer->usage & PIPE_MAP_WRITE) {
2211 char *dst = res->offset +
2212 iris_bo_map(map->dbg, res->bo, (xfer->usage | MAP_RAW) & MAP_FLAGS);
2213
2214 for (int s = 0; s < box->depth; s++) {
2215 unsigned x1, x2, y1, y2;
2216 tile_extents(surf, box, xfer->level, s, &x1, &x2, &y1, &y2);
2217
2218 void *ptr = map->ptr + s * xfer->layer_stride;
2219
2220 isl_memcpy_linear_to_tiled(x1, x2, y1, y2, dst, ptr,
2221 surf->row_pitch_B, xfer->stride,
2222 has_swizzling, surf->tiling, ISL_MEMCPY);
2223 }
2224 }
2225 os_free_aligned(map->buffer);
2226 map->buffer = map->ptr = NULL;
2227 }
2228
2229 static void
iris_map_tiled_memcpy(struct iris_transfer * map)2230 iris_map_tiled_memcpy(struct iris_transfer *map)
2231 {
2232 struct pipe_transfer *xfer = &map->base.b;
2233 const struct pipe_box *box = &xfer->box;
2234 struct iris_resource *res = (struct iris_resource *) xfer->resource;
2235 struct isl_surf *surf = &res->surf;
2236
2237 xfer->stride = ALIGN(surf->row_pitch_B, 16);
2238 xfer->layer_stride = xfer->stride * box->height;
2239
2240 unsigned x1, x2, y1, y2;
2241 tile_extents(surf, box, xfer->level, 0, &x1, &x2, &y1, &y2);
2242
2243 /* The tiling and detiling functions require that the linear buffer has
2244 * a 16-byte alignment (that is, its `x0` is 16-byte aligned). Here we
2245 * over-allocate the linear buffer to get the proper alignment.
2246 */
2247 map->buffer =
2248 os_malloc_aligned(xfer->layer_stride * box->depth, 16);
2249 assert(map->buffer);
2250 map->ptr = (char *)map->buffer + (x1 & 0xf);
2251
2252 const bool has_swizzling = false;
2253
2254 if (xfer->usage & PIPE_MAP_READ) {
2255 char *src = res->offset +
2256 iris_bo_map(map->dbg, res->bo, (xfer->usage | MAP_RAW) & MAP_FLAGS);
2257
2258 for (int s = 0; s < box->depth; s++) {
2259 unsigned x1, x2, y1, y2;
2260 tile_extents(surf, box, xfer->level, s, &x1, &x2, &y1, &y2);
2261
2262 /* Use 's' rather than 'box->z' to rebase the first slice to 0. */
2263 void *ptr = map->ptr + s * xfer->layer_stride;
2264
2265 isl_memcpy_tiled_to_linear(x1, x2, y1, y2, ptr, src, xfer->stride,
2266 surf->row_pitch_B, has_swizzling,
2267 surf->tiling,
2268 #if defined(USE_SSE41)
2269 util_get_cpu_caps()->has_sse4_1 ?
2270 ISL_MEMCPY_STREAMING_LOAD :
2271 #endif
2272 ISL_MEMCPY);
2273 }
2274 }
2275
2276 map->unmap = iris_unmap_tiled_memcpy;
2277 }
2278
2279 static void
iris_map_direct(struct iris_transfer * map)2280 iris_map_direct(struct iris_transfer *map)
2281 {
2282 struct pipe_transfer *xfer = &map->base.b;
2283 struct pipe_box *box = &xfer->box;
2284 struct iris_resource *res = (struct iris_resource *) xfer->resource;
2285
2286 void *ptr = res->offset +
2287 iris_bo_map(map->dbg, res->bo, xfer->usage & MAP_FLAGS);
2288
2289 if (res->base.b.target == PIPE_BUFFER) {
2290 xfer->stride = 0;
2291 xfer->layer_stride = 0;
2292
2293 map->ptr = ptr + box->x;
2294 } else {
2295 struct isl_surf *surf = &res->surf;
2296 const struct isl_format_layout *fmtl =
2297 isl_format_get_layout(surf->format);
2298 const unsigned cpp = fmtl->bpb / 8;
2299 unsigned x0_el, y0_el;
2300
2301 assert(box->x % fmtl->bw == 0);
2302 assert(box->y % fmtl->bh == 0);
2303 get_image_offset_el(surf, xfer->level, box->z, &x0_el, &y0_el);
2304
2305 x0_el += box->x / fmtl->bw;
2306 y0_el += box->y / fmtl->bh;
2307
2308 xfer->stride = isl_surf_get_row_pitch_B(surf);
2309 xfer->layer_stride = isl_surf_get_array_pitch(surf);
2310
2311 map->ptr = ptr + y0_el * xfer->stride + x0_el * cpp;
2312 }
2313 }
2314
2315 static bool
can_promote_to_async(const struct iris_resource * res,const struct pipe_box * box,enum pipe_map_flags usage)2316 can_promote_to_async(const struct iris_resource *res,
2317 const struct pipe_box *box,
2318 enum pipe_map_flags usage)
2319 {
2320 /* If we're writing to a section of the buffer that hasn't even been
2321 * initialized with useful data, then we can safely promote this write
2322 * to be unsynchronized. This helps the common pattern of appending data.
2323 */
2324 return res->base.b.target == PIPE_BUFFER && (usage & PIPE_MAP_WRITE) &&
2325 !(usage & TC_TRANSFER_MAP_NO_INFER_UNSYNCHRONIZED) &&
2326 !util_ranges_intersect(&res->valid_buffer_range, box->x,
2327 box->x + box->width);
2328 }
2329
2330 static bool
prefer_cpu_access(const struct iris_resource * res,const struct pipe_box * box,enum pipe_map_flags usage,unsigned level,bool map_would_stall)2331 prefer_cpu_access(const struct iris_resource *res,
2332 const struct pipe_box *box,
2333 enum pipe_map_flags usage,
2334 unsigned level,
2335 bool map_would_stall)
2336 {
2337 const enum iris_mmap_mode mmap_mode = iris_bo_mmap_mode(res->bo);
2338
2339 /* We must be able to map it. */
2340 if (mmap_mode == IRIS_MMAP_NONE)
2341 return false;
2342
2343 const bool write = usage & PIPE_MAP_WRITE;
2344 const bool read = usage & PIPE_MAP_READ;
2345 const bool preserve =
2346 res->base.b.target == PIPE_BUFFER && !(usage & PIPE_MAP_DISCARD_RANGE);
2347
2348 /* We want to avoid uncached reads because they are slow. */
2349 if (read && mmap_mode != IRIS_MMAP_WB)
2350 return false;
2351
2352 /* We want to avoid stalling. We can't avoid stalling for reads, though,
2353 * because the destination of a GPU staging copy would be busy and stall
2354 * in the exact same manner. So don't consider it for those.
2355 *
2356 * For buffer maps which aren't invalidating the destination, the GPU
2357 * staging copy path would have to read the existing buffer contents in
2358 * order to preserve them, effectively making it a read. But a direct
2359 * mapping would be able to just write the necessary parts without the
2360 * overhead of the copy. It may stall, but we would anyway.
2361 */
2362 if (map_would_stall && !read && !preserve)
2363 return false;
2364
2365 /* Use the GPU for writes if it would compress the data. */
2366 if (write && isl_aux_usage_has_compression(res->aux.usage))
2367 return false;
2368
2369 /* Writes & Cached CPU reads are fine as long as the primary is valid. */
2370 return !iris_has_invalid_primary(res, level, 1, box->z, box->depth);
2371 }
2372
2373 static void *
iris_transfer_map(struct pipe_context * ctx,struct pipe_resource * resource,unsigned level,enum pipe_map_flags usage,const struct pipe_box * box,struct pipe_transfer ** ptransfer)2374 iris_transfer_map(struct pipe_context *ctx,
2375 struct pipe_resource *resource,
2376 unsigned level,
2377 enum pipe_map_flags usage,
2378 const struct pipe_box *box,
2379 struct pipe_transfer **ptransfer)
2380 {
2381 struct iris_context *ice = (struct iris_context *)ctx;
2382 struct iris_resource *res = (struct iris_resource *)resource;
2383 struct isl_surf *surf = &res->surf;
2384
2385 /* From GL_AMD_pinned_memory issues:
2386 *
2387 * 4) Is glMapBuffer on a shared buffer guaranteed to return the
2388 * same system address which was specified at creation time?
2389 *
2390 * RESOLVED: NO. The GL implementation might return a different
2391 * virtual mapping of that memory, although the same physical
2392 * page will be used.
2393 *
2394 * So don't ever use staging buffers.
2395 */
2396 if (res->base.is_user_ptr)
2397 usage |= PIPE_MAP_PERSISTENT;
2398
2399 /* Promote discarding a range to discarding the entire buffer where
2400 * possible. This may allow us to replace the backing storage entirely
2401 * and let us do an unsynchronized map when we otherwise wouldn't.
2402 */
2403 if (resource->target == PIPE_BUFFER &&
2404 (usage & PIPE_MAP_DISCARD_RANGE) &&
2405 box->x == 0 && box->width == resource->width0) {
2406 usage |= PIPE_MAP_DISCARD_WHOLE_RESOURCE;
2407 }
2408
2409 if (usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE) {
2410 /* Replace the backing storage with a fresh buffer for non-async maps */
2411 if (!(usage & (PIPE_MAP_UNSYNCHRONIZED | TC_TRANSFER_MAP_NO_INVALIDATE))
2412 && iris_invalidate_buffer(ice, res))
2413 usage |= PIPE_MAP_UNSYNCHRONIZED;
2414
2415 /* If we can discard the whole resource, we can discard the range. */
2416 usage |= PIPE_MAP_DISCARD_RANGE;
2417 }
2418
2419 if (!(usage & PIPE_MAP_UNSYNCHRONIZED) &&
2420 can_promote_to_async(res, box, usage)) {
2421 usage |= PIPE_MAP_UNSYNCHRONIZED;
2422 }
2423
2424 /* We are dealing with external memory object PIPE_BUFFER, disable
2425 * async mapping because of sync issues.
2426 */
2427 if (!res->mod_info &&
2428 res->external_format != PIPE_FORMAT_NONE &&
2429 resource->target == PIPE_BUFFER) {
2430 usage &= ~PIPE_MAP_UNSYNCHRONIZED;
2431 }
2432
2433 /* Avoid using GPU copies for persistent/coherent buffers, as the idea
2434 * there is to access them simultaneously on the CPU & GPU. This also
2435 * avoids trying to use GPU copies for our u_upload_mgr buffers which
2436 * contain state we're constructing for a GPU draw call, which would
2437 * kill us with infinite stack recursion.
2438 */
2439 if (usage & (PIPE_MAP_PERSISTENT | PIPE_MAP_COHERENT))
2440 usage |= PIPE_MAP_DIRECTLY;
2441
2442 /* We cannot provide a direct mapping of tiled resources, and we
2443 * may not be able to mmap imported BOs since they may come from
2444 * other devices that I915_GEM_MMAP cannot work with.
2445 */
2446 if ((usage & PIPE_MAP_DIRECTLY) &&
2447 (surf->tiling != ISL_TILING_LINEAR || iris_bo_is_imported(res->bo)))
2448 return NULL;
2449
2450 bool map_would_stall = false;
2451
2452 if (!(usage & PIPE_MAP_UNSYNCHRONIZED)) {
2453 map_would_stall =
2454 resource_is_busy(ice, res) ||
2455 iris_has_invalid_primary(res, level, 1, box->z, box->depth);
2456
2457 if (map_would_stall && (usage & PIPE_MAP_DONTBLOCK) &&
2458 (usage & PIPE_MAP_DIRECTLY))
2459 return NULL;
2460 }
2461
2462 struct iris_transfer *map;
2463
2464 if (usage & PIPE_MAP_THREAD_SAFE)
2465 map = CALLOC_STRUCT(iris_transfer);
2466 else if (usage & TC_TRANSFER_MAP_THREADED_UNSYNC)
2467 map = slab_zalloc(&ice->transfer_pool_unsync);
2468 else
2469 map = slab_zalloc(&ice->transfer_pool);
2470
2471 if (!map)
2472 return NULL;
2473
2474 struct pipe_transfer *xfer = &map->base.b;
2475
2476 map->dbg = &ice->dbg;
2477
2478 pipe_resource_reference(&xfer->resource, resource);
2479 xfer->level = level;
2480 xfer->usage = usage;
2481 xfer->box = *box;
2482 *ptransfer = xfer;
2483
2484 if (usage & PIPE_MAP_WRITE)
2485 util_range_add(&res->base.b, &res->valid_buffer_range, box->x, box->x + box->width);
2486
2487 if (prefer_cpu_access(res, box, usage, level, map_would_stall))
2488 usage |= PIPE_MAP_DIRECTLY;
2489
2490 /* TODO: Teach iris_map_tiled_memcpy about Tile64... */
2491 if (isl_tiling_is_64(res->surf.tiling))
2492 usage &= ~PIPE_MAP_DIRECTLY;
2493
2494 if (!(usage & PIPE_MAP_DIRECTLY)) {
2495 /* If we need a synchronous mapping and the resource is busy, or needs
2496 * resolving, we copy to/from a linear temporary buffer using the GPU.
2497 */
2498 map->batch = &ice->batches[IRIS_BATCH_RENDER];
2499 map->blorp = &ice->blorp;
2500 iris_map_copy_region(map);
2501 }
2502
2503 /* If we've requested a direct mapping, or iris_map_copy_region failed
2504 * to create a staging resource, then map it directly on the CPU.
2505 */
2506 if (!map->ptr) {
2507 if (resource->target != PIPE_BUFFER) {
2508 iris_resource_access_raw(ice, res, level, box->z, box->depth,
2509 usage & PIPE_MAP_WRITE);
2510 }
2511
2512 if (!(usage & PIPE_MAP_UNSYNCHRONIZED)) {
2513 iris_foreach_batch(ice, batch) {
2514 if (iris_batch_references(batch, res->bo))
2515 iris_batch_flush(batch);
2516 }
2517 }
2518
2519 if (surf->tiling != ISL_TILING_LINEAR) {
2520 iris_map_tiled_memcpy(map);
2521 } else {
2522 iris_map_direct(map);
2523 }
2524 }
2525
2526 return map->ptr;
2527 }
2528
2529 static void
iris_transfer_flush_region(struct pipe_context * ctx,struct pipe_transfer * xfer,const struct pipe_box * box)2530 iris_transfer_flush_region(struct pipe_context *ctx,
2531 struct pipe_transfer *xfer,
2532 const struct pipe_box *box)
2533 {
2534 struct iris_context *ice = (struct iris_context *)ctx;
2535 struct iris_resource *res = (struct iris_resource *) xfer->resource;
2536 struct iris_transfer *map = (void *) xfer;
2537
2538 if (map->staging)
2539 iris_flush_staging_region(xfer, box);
2540
2541 if (res->base.b.target == PIPE_BUFFER) {
2542 util_range_add(&res->base.b, &res->valid_buffer_range, box->x, box->x + box->width);
2543 }
2544
2545 /* Make sure we flag constants dirty even if there's no need to emit
2546 * any PIPE_CONTROLs to a batch.
2547 */
2548 iris_dirty_for_history(ice, res);
2549 }
2550
2551 static void
iris_transfer_unmap(struct pipe_context * ctx,struct pipe_transfer * xfer)2552 iris_transfer_unmap(struct pipe_context *ctx, struct pipe_transfer *xfer)
2553 {
2554 struct iris_context *ice = (struct iris_context *)ctx;
2555 struct iris_transfer *map = (void *) xfer;
2556
2557 if (!(xfer->usage & (PIPE_MAP_FLUSH_EXPLICIT |
2558 PIPE_MAP_COHERENT))) {
2559 struct pipe_box flush_box = {
2560 .x = 0, .y = 0, .z = 0,
2561 .width = xfer->box.width,
2562 .height = xfer->box.height,
2563 .depth = xfer->box.depth,
2564 };
2565 iris_transfer_flush_region(ctx, xfer, &flush_box);
2566 }
2567
2568 if (map->unmap)
2569 map->unmap(map);
2570
2571 pipe_resource_reference(&xfer->resource, NULL);
2572
2573 if (xfer->usage & PIPE_MAP_THREAD_SAFE) {
2574 free(map);
2575 } else {
2576 /* transfer_unmap is called from the driver thread, so we have to use
2577 * transfer_pool, not transfer_pool_unsync. Freeing an object into a
2578 * different pool is allowed, however.
2579 */
2580 slab_free(&ice->transfer_pool, map);
2581 }
2582 }
2583
2584 /**
2585 * The pipe->texture_subdata() driver hook.
2586 *
2587 * Mesa's state tracker takes this path whenever possible, even with
2588 * pipe_caps.texture_transfer_modes set.
2589 */
2590 static void
iris_texture_subdata(struct pipe_context * ctx,struct pipe_resource * resource,unsigned level,unsigned usage,const struct pipe_box * box,const void * data,unsigned stride,uintptr_t layer_stride)2591 iris_texture_subdata(struct pipe_context *ctx,
2592 struct pipe_resource *resource,
2593 unsigned level,
2594 unsigned usage,
2595 const struct pipe_box *box,
2596 const void *data,
2597 unsigned stride,
2598 uintptr_t layer_stride)
2599 {
2600 struct iris_context *ice = (struct iris_context *)ctx;
2601 struct iris_resource *res = (struct iris_resource *)resource;
2602 const struct isl_surf *surf = &res->surf;
2603
2604 assert(resource->target != PIPE_BUFFER);
2605
2606 /* Just use the transfer-based path for linear buffers - it will already
2607 * do a direct mapping, or a simple linear staging buffer.
2608 *
2609 * Linear staging buffers appear to be better than tiled ones, too, so
2610 * take that path if we need the GPU to perform color compression, or
2611 * stall-avoidance blits.
2612 *
2613 * TODO: Teach isl_memcpy_linear_to_tiled about Tile64...
2614 */
2615 if (surf->tiling == ISL_TILING_LINEAR ||
2616 isl_tiling_is_64(res->surf.tiling) ||
2617 isl_aux_usage_has_compression(res->aux.usage) ||
2618 resource_is_busy(ice, res) ||
2619 iris_bo_mmap_mode(res->bo) == IRIS_MMAP_NONE) {
2620 return u_default_texture_subdata(ctx, resource, level, usage, box,
2621 data, stride, layer_stride);
2622 }
2623
2624 /* No state trackers pass any flags other than PIPE_MAP_WRITE */
2625
2626 iris_resource_access_raw(ice, res, level, box->z, box->depth, true);
2627
2628 iris_foreach_batch(ice, batch) {
2629 if (iris_batch_references(batch, res->bo))
2630 iris_batch_flush(batch);
2631 }
2632
2633 uint8_t *dst = iris_bo_map(&ice->dbg, res->bo, MAP_WRITE | MAP_RAW);
2634
2635 for (int s = 0; s < box->depth; s++) {
2636 const uint8_t *src = data + s * layer_stride;
2637
2638 unsigned x1, x2, y1, y2;
2639 tile_extents(surf, box, level, s, &x1, &x2, &y1, &y2);
2640
2641 isl_memcpy_linear_to_tiled(x1, x2, y1, y2,
2642 (void *)dst, (void *)src,
2643 surf->row_pitch_B, stride,
2644 false, surf->tiling, ISL_MEMCPY);
2645 }
2646 }
2647
2648 /**
2649 * Mark state dirty that needs to be re-emitted when a resource is written.
2650 */
2651 void
iris_dirty_for_history(struct iris_context * ice,struct iris_resource * res)2652 iris_dirty_for_history(struct iris_context *ice,
2653 struct iris_resource *res)
2654 {
2655 const uint64_t stages = res->bind_stages;
2656 uint64_t dirty = 0ull;
2657 uint64_t stage_dirty = 0ull;
2658
2659 if (res->bind_history & PIPE_BIND_CONSTANT_BUFFER) {
2660 for (unsigned stage = 0; stage < MESA_SHADER_STAGES; stage++) {
2661 if (stages & (1u << stage)) {
2662 struct iris_shader_state *shs = &ice->state.shaders[stage];
2663 shs->dirty_cbufs |= ~0u;
2664 }
2665 }
2666 dirty |= IRIS_DIRTY_RENDER_MISC_BUFFER_FLUSHES |
2667 IRIS_DIRTY_COMPUTE_MISC_BUFFER_FLUSHES;
2668 stage_dirty |= (stages << IRIS_SHIFT_FOR_STAGE_DIRTY_CONSTANTS);
2669 }
2670
2671 if (res->bind_history & (PIPE_BIND_SAMPLER_VIEW |
2672 PIPE_BIND_SHADER_IMAGE)) {
2673 dirty |= IRIS_DIRTY_RENDER_RESOLVES_AND_FLUSHES |
2674 IRIS_DIRTY_COMPUTE_RESOLVES_AND_FLUSHES;
2675 stage_dirty |= (stages << IRIS_SHIFT_FOR_STAGE_DIRTY_BINDINGS);
2676 }
2677
2678 if (res->bind_history & PIPE_BIND_SHADER_BUFFER) {
2679 dirty |= IRIS_DIRTY_RENDER_MISC_BUFFER_FLUSHES |
2680 IRIS_DIRTY_COMPUTE_MISC_BUFFER_FLUSHES;
2681 stage_dirty |= (stages << IRIS_SHIFT_FOR_STAGE_DIRTY_BINDINGS);
2682 }
2683
2684 if (res->bind_history & PIPE_BIND_VERTEX_BUFFER)
2685 dirty |= IRIS_DIRTY_VERTEX_BUFFER_FLUSHES;
2686
2687 if (ice->state.streamout_active && (res->bind_history & PIPE_BIND_STREAM_OUTPUT))
2688 dirty |= IRIS_DIRTY_SO_BUFFERS;
2689
2690 ice->state.dirty |= dirty;
2691 ice->state.stage_dirty |= stage_dirty;
2692 }
2693
2694 bool
iris_resource_set_clear_color(struct iris_context * ice,struct iris_resource * res,union isl_color_value color)2695 iris_resource_set_clear_color(struct iris_context *ice,
2696 struct iris_resource *res,
2697 union isl_color_value color)
2698 {
2699 if (res->aux.clear_color_unknown ||
2700 memcmp(&res->aux.clear_color, &color, sizeof(color)) != 0) {
2701 res->aux.clear_color = color;
2702 res->aux.clear_color_unknown = false;
2703 return true;
2704 }
2705
2706 return false;
2707 }
2708
2709 static enum pipe_format
iris_resource_get_internal_format(struct pipe_resource * p_res)2710 iris_resource_get_internal_format(struct pipe_resource *p_res)
2711 {
2712 struct iris_resource *res = (void *) p_res;
2713 return res->internal_format;
2714 }
2715
2716 static const struct u_transfer_vtbl transfer_vtbl = {
2717 .resource_create = iris_resource_create,
2718 .resource_destroy = iris_resource_destroy,
2719 .transfer_map = iris_transfer_map,
2720 .transfer_unmap = iris_transfer_unmap,
2721 .transfer_flush_region = iris_transfer_flush_region,
2722 .get_internal_format = iris_resource_get_internal_format,
2723 .set_stencil = iris_resource_set_separate_stencil,
2724 .get_stencil = iris_resource_get_separate_stencil,
2725 };
2726
2727 void
iris_init_screen_resource_functions(struct pipe_screen * pscreen)2728 iris_init_screen_resource_functions(struct pipe_screen *pscreen)
2729 {
2730 pscreen->query_dmabuf_modifiers = iris_query_dmabuf_modifiers;
2731 pscreen->is_dmabuf_modifier_supported = iris_is_dmabuf_modifier_supported;
2732 pscreen->get_dmabuf_modifier_planes = iris_get_dmabuf_modifier_planes;
2733 pscreen->resource_create_with_modifiers =
2734 iris_resource_create_with_modifiers;
2735 pscreen->resource_create = u_transfer_helper_resource_create;
2736 pscreen->resource_from_user_memory = iris_resource_from_user_memory;
2737 pscreen->resource_from_handle = iris_resource_from_handle;
2738 pscreen->resource_from_memobj = iris_resource_from_memobj_wrapper;
2739 pscreen->resource_get_handle = iris_resource_get_handle;
2740 pscreen->resource_get_param = iris_resource_get_param;
2741 pscreen->resource_destroy = u_transfer_helper_resource_destroy;
2742 pscreen->memobj_create_from_handle = iris_memobj_create_from_handle;
2743 pscreen->memobj_destroy = iris_memobj_destroy;
2744 pscreen->transfer_helper =
2745 u_transfer_helper_create(&transfer_vtbl,
2746 U_TRANSFER_HELPER_SEPARATE_Z32S8 |
2747 U_TRANSFER_HELPER_SEPARATE_STENCIL |
2748 U_TRANSFER_HELPER_MSAA_MAP);
2749 }
2750
2751 void
iris_init_resource_functions(struct pipe_context * ctx)2752 iris_init_resource_functions(struct pipe_context *ctx)
2753 {
2754 ctx->flush_resource = iris_flush_resource;
2755 ctx->invalidate_resource = iris_invalidate_resource;
2756 ctx->buffer_map = u_transfer_helper_transfer_map;
2757 ctx->texture_map = u_transfer_helper_transfer_map;
2758 ctx->transfer_flush_region = u_transfer_helper_transfer_flush_region;
2759 ctx->buffer_unmap = u_transfer_helper_transfer_unmap;
2760 ctx->texture_unmap = u_transfer_helper_transfer_unmap;
2761 ctx->buffer_subdata = u_default_buffer_subdata;
2762 ctx->clear_buffer = u_default_clear_buffer;
2763 ctx->texture_subdata = iris_texture_subdata;
2764 }
2765