• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2012 Rob Clark <robclark@freedesktop.org>
3  * SPDX-License-Identifier: MIT
4  *
5  * Authors:
6  *    Rob Clark <robclark@freedesktop.org>
7  */
8 
9 #ifndef FREEDRENO_RESOURCE_H_
10 #define FREEDRENO_RESOURCE_H_
11 
12 #include "util/list.h"
13 #include "util/simple_mtx.h"
14 #include "util/u_dump.h"
15 #include "util/u_range.h"
16 #include "util/u_transfer_helper.h"
17 
18 #include "freedreno/fdl/freedreno_layout.h"
19 #include "freedreno/fdl/freedreno_lrz_layout.h"
20 #include "freedreno_batch.h"
21 #include "freedreno_util.h"
22 
23 BEGINC;
24 
25 #define PRSC_FMT                                                               \
26    "p: target=%s, format=%s, %ux%ux%u, "                                       \
27    "array_size=%u, last_level=%u, "                                            \
28    "nr_samples=%u, usage=%u, bind=%x, flags=%x"
29 #define PRSC_ARGS(p)                                                           \
30    (p), util_str_tex_target((p)->target, true),                                \
31       util_format_short_name((p)->format), (p)->width0, (p)->height0,          \
32       (p)->depth0, (p)->array_size, (p)->last_level, (p)->nr_samples,          \
33       (p)->usage, (p)->bind, (p)->flags
34 
35 enum fd_lrz_direction {
36    FD_LRZ_UNKNOWN,
37    /* Depth func less/less-than: */
38    FD_LRZ_LESS,
39    /* Depth func greater/greater-than: */
40    FD_LRZ_GREATER,
41 };
42 
43 /**
44  * State related to batch/resource tracking.
45  *
46  * With threaded_context we need to support replace_buffer_storage, in
47  * which case we can end up in transfer_map with tres->latest, but other
48  * pipe_context APIs using the original prsc pointer.  This allows TC to
49  * not have to synchronize the front-end thread with the buffer storage
50  * replacement called on driver thread.  But it complicates the batch/
51  * resource tracking.
52  *
53  * To handle this, we need to split the tracking out into it's own ref-
54  * counted structure, so as needed both "versions" of the resource can
55  * point to the same tracking.
56  *
57  * We could *almost* just push this down to fd_bo, except for a3xx/a4xx
58  * hw queries, where we don't know up-front the size to allocate for
59  * per-tile query results.
60  */
61 struct fd_resource_tracking {
62    struct pipe_reference reference;
63 
64    /* bitmask of in-flight batches which reference this resource.  Note
65     * that the batch doesn't hold reference to resources (but instead
66     * the fd_ringbuffer holds refs to the underlying fd_bo), but in case
67     * the resource is destroyed we need to clean up the batch's weak
68     * references to us.
69     */
70    uint32_t batch_mask;
71 
72    /* reference to batch that writes this resource: */
73    struct fd_batch *write_batch;
74 
75    /* Set of batches whose batch-cache key references this resource.
76     * We need to track this to know which batch-cache entries to
77     * invalidate if, for example, the resource is invalidated or
78     * shadowed.
79     */
80    uint32_t bc_batch_mask;
81 };
82 
83 void __fd_resource_tracking_destroy(struct fd_resource_tracking *track);
84 
85 static inline void
fd_resource_tracking_reference(struct fd_resource_tracking ** ptr,struct fd_resource_tracking * track)86 fd_resource_tracking_reference(struct fd_resource_tracking **ptr,
87                                struct fd_resource_tracking *track)
88 {
89    struct fd_resource_tracking *old_track = *ptr;
90 
91    if (pipe_reference(&(*ptr)->reference, &track->reference)) {
92       assert(!old_track->write_batch);
93       free(old_track);
94    }
95 
96    *ptr = track;
97 }
98 
99 /**
100  * A resource (any buffer/texture/image/etc)
101  */
102 struct fd_resource {
103    struct threaded_resource b;
104    struct fd_bo *bo; /* use fd_resource_set_bo() to write */
105    enum pipe_format internal_format;
106    uint32_t hash; /* _mesa_hash_pointer() on this resource's address. */
107    struct fdl_layout layout;
108 
109    /* buffer range that has been initialized */
110    struct util_range valid_buffer_range;
111    bool valid;
112    struct renderonly_scanout *scanout;
113 
114    /* reference to the resource holding stencil data for a z32_s8 texture */
115    /* TODO rename to secondary or auxiliary? */
116    struct fd_resource *stencil;
117 
118    struct fd_resource_tracking *track;
119 
120    simple_mtx_t lock;
121 
122    /* bitmask of state this resource could potentially dirty when rebound,
123     * see rebind_resource()
124     */
125    BITMASK_ENUM(fd_dirty_3d_state) dirty;
126 
127    /* Sequence # incremented each time bo changes: */
128    uint16_t seqno;
129 
130    /* Is this buffer a replacement created by threaded_context to avoid
131     * a stall in PIPE_MAP_DISCARD_WHOLE_RESOURCE|PIPE_MAP_WRITE case?
132     * If so, it no longer "owns" it's rsc->track, and so should not
133     * invalidate when the rsc is destroyed.
134     */
135    bool is_replacement : 1;
136 
137    /* Uninitialized resources with UBWC format need their UBWC flag data
138     * cleared before writes, as the UBWC state is read and used during
139     * writes, so undefined UBWC flag data results in undefined results.
140     */
141    bool needs_ubwc_clear : 1;
142 
143    /* LRZ */
144    struct fdl_lrz_layout lrz_layout;
145    bool lrz_valid : 1;
146    enum fd_lrz_direction lrz_direction : 2;
147    struct fd_bo *lrz;
148 };
149 
150 struct fd_memory_object {
151    struct pipe_memory_object b;
152    struct fd_bo *bo;
153 };
154 
155 static inline struct fd_resource *
fd_resource(struct pipe_resource * ptex)156 fd_resource(struct pipe_resource *ptex)
157 {
158    return (struct fd_resource *)ptex;
159 }
160 
161 static inline struct fd_memory_object *
fd_memory_object(struct pipe_memory_object * pmemobj)162 fd_memory_object(struct pipe_memory_object *pmemobj)
163 {
164    return (struct fd_memory_object *)pmemobj;
165 }
166 
167 static inline bool
pending(struct fd_resource * rsc,bool write)168 pending(struct fd_resource *rsc, bool write)
169 {
170    /* if we have a pending GPU write, we are busy in any case: */
171    if (rsc->track->write_batch)
172       return true;
173 
174    /* if CPU wants to write, but we are pending a GPU read, we are busy: */
175    if (write && rsc->track->batch_mask)
176       return true;
177 
178    if (rsc->stencil && pending(rsc->stencil, write))
179       return true;
180 
181    return false;
182 }
183 
184 static inline bool
resource_busy(struct fd_resource * rsc,unsigned op)185 resource_busy(struct fd_resource *rsc, unsigned op)
186 {
187    return fd_bo_cpu_prep(rsc->bo, NULL, op | FD_BO_PREP_NOSYNC) != 0;
188 }
189 
190 int __fd_resource_wait(struct fd_context *ctx, struct fd_resource *rsc,
191                        unsigned op, const char *func);
192 #define fd_resource_wait(ctx, rsc, op) ({                                      \
193    MESA_TRACE_FUNC();                                                          \
194    __fd_resource_wait(ctx, rsc, op, __func__);                                 \
195 })
196 
197 static inline void
fd_resource_lock(struct fd_resource * rsc)198 fd_resource_lock(struct fd_resource *rsc)
199 {
200    simple_mtx_lock(&rsc->lock);
201 }
202 
203 static inline void
fd_resource_unlock(struct fd_resource * rsc)204 fd_resource_unlock(struct fd_resource *rsc)
205 {
206    simple_mtx_unlock(&rsc->lock);
207 }
208 
209 static inline void
fd_resource_set_usage(struct pipe_resource * prsc,enum fd_dirty_3d_state usage)210 fd_resource_set_usage(struct pipe_resource *prsc, enum fd_dirty_3d_state usage)
211 {
212    if (!prsc)
213       return;
214    struct fd_resource *rsc = fd_resource(prsc);
215    /* Bits are only ever ORed in, and we expect many set_usage() per
216     * resource, so do the quick check outside of the lock.
217     */
218    if (likely(rsc->dirty & usage))
219       return;
220    fd_resource_lock(rsc);
221    rsc->dirty |= usage;
222    fd_resource_unlock(rsc);
223 }
224 
225 static inline bool
has_depth(enum pipe_format format)226 has_depth(enum pipe_format format)
227 {
228    const struct util_format_description *desc = util_format_description(format);
229    return util_format_has_depth(desc);
230 }
231 
232 static inline bool
is_z32(enum pipe_format format)233 is_z32(enum pipe_format format)
234 {
235    switch (format) {
236    case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT:
237    case PIPE_FORMAT_Z32_UNORM:
238    case PIPE_FORMAT_Z32_FLOAT:
239       return true;
240    default:
241       return false;
242    }
243 }
244 
245 struct fd_transfer {
246    struct threaded_transfer b;
247    struct pipe_resource *staging_prsc;
248    struct pipe_box staging_box;
249    void *upload_ptr;
250 };
251 
252 static inline struct fd_transfer *
fd_transfer(struct pipe_transfer * ptrans)253 fd_transfer(struct pipe_transfer *ptrans)
254 {
255    return (struct fd_transfer *)ptrans;
256 }
257 
258 static inline struct fdl_slice *
fd_resource_slice(struct fd_resource * rsc,unsigned level)259 fd_resource_slice(struct fd_resource *rsc, unsigned level)
260 {
261    assert(level <= rsc->b.b.last_level);
262    return &rsc->layout.slices[level];
263 }
264 
265 static inline uint32_t
fd_resource_layer_stride(struct fd_resource * rsc,unsigned level)266 fd_resource_layer_stride(struct fd_resource *rsc, unsigned level)
267 {
268    return fdl_layer_stride(&rsc->layout, level);
269 }
270 
271 /* get pitch (in bytes) for specified mipmap level */
272 static inline uint32_t
fd_resource_pitch(struct fd_resource * rsc,unsigned level)273 fd_resource_pitch(struct fd_resource *rsc, unsigned level)
274 {
275    if (is_a2xx(fd_screen(rsc->b.b.screen)))
276       return fdl2_pitch(&rsc->layout, level);
277 
278    return fdl_pitch(&rsc->layout, level);
279 }
280 
281 /* get offset for specified mipmap level and texture/array layer */
282 static inline uint32_t
fd_resource_offset(struct fd_resource * rsc,unsigned level,unsigned layer)283 fd_resource_offset(struct fd_resource *rsc, unsigned level, unsigned layer)
284 {
285    uint32_t offset = fdl_surface_offset(&rsc->layout, level, layer);
286    assert(offset < fd_bo_size(rsc->bo));
287    return offset;
288 }
289 
290 static inline uint32_t
fd_resource_ubwc_offset(struct fd_resource * rsc,unsigned level,unsigned layer)291 fd_resource_ubwc_offset(struct fd_resource *rsc, unsigned level, unsigned layer)
292 {
293    uint32_t offset = fdl_ubwc_offset(&rsc->layout, level, layer);
294    assert(offset < fd_bo_size(rsc->bo));
295    return offset;
296 }
297 
298 static inline uint32_t
fd_resource_tile_mode(struct pipe_resource * prsc,int level)299 fd_resource_tile_mode(struct pipe_resource *prsc, int level)
300 {
301    return fdl_tile_mode(&fd_resource(prsc)->layout, level);
302 }
303 
304 static inline const char *
fd_resource_tile_mode_desc(const struct fd_resource * rsc,int level)305 fd_resource_tile_mode_desc(const struct fd_resource *rsc, int level)
306 {
307    return fdl_tile_mode_desc(&rsc->layout, level);
308 }
309 
310 static inline bool
fd_resource_ubwc_enabled(struct fd_resource * rsc,int level)311 fd_resource_ubwc_enabled(struct fd_resource *rsc, int level)
312 {
313    return fdl_ubwc_enabled(&rsc->layout, level);
314 }
315 
316 /* access # of samples, with 0 normalized to 1 (which is what we care about
317  * most of the time)
318  */
319 static inline unsigned
fd_resource_nr_samples(const struct pipe_resource * prsc)320 fd_resource_nr_samples(const struct pipe_resource *prsc)
321 {
322    return MAX2(1, prsc->nr_samples);
323 }
324 
325 void fd_resource_screen_init(struct pipe_screen *pscreen);
326 void fd_resource_context_init(struct pipe_context *pctx);
327 
328 uint32_t fd_setup_slices(struct fd_resource *rsc);
329 void fd_resource_resize(struct pipe_resource *prsc, uint32_t sz);
330 void fd_replace_buffer_storage(struct pipe_context *ctx,
331                                struct pipe_resource *dst,
332                                struct pipe_resource *src,
333                                unsigned num_rebinds,
334                                uint32_t rebind_mask,
335                                uint32_t delete_buffer_id) in_dt;
336 bool fd_resource_busy(struct pipe_screen *pscreen, struct pipe_resource *prsc,
337                       unsigned usage);
338 
339 void fd_resource_uncompress(struct fd_context *ctx,
340                             struct fd_resource *rsc,
341                             bool linear) assert_dt;
342 void fd_resource_dump(struct fd_resource *rsc, const char *name);
343 
344 bool fd_render_condition_check(struct pipe_context *pctx) assert_dt;
345 
346 static inline bool
fd_batch_references_resource(struct fd_batch * batch,struct fd_resource * rsc)347 fd_batch_references_resource(struct fd_batch *batch, struct fd_resource *rsc)
348 {
349    return rsc->track->batch_mask & (1 << batch->idx);
350 }
351 
352 static inline void
fd_batch_write_prep(struct fd_batch * batch,struct fd_resource * rsc)353 fd_batch_write_prep(struct fd_batch *batch, struct fd_resource *rsc) assert_dt
354 {
355    if (unlikely(rsc->needs_ubwc_clear)) {
356       batch->ctx->clear_ubwc(batch, rsc);
357       rsc->needs_ubwc_clear = false;
358    }
359 }
360 
361 static inline void
fd_batch_resource_read(struct fd_batch * batch,struct fd_resource * rsc)362 fd_batch_resource_read(struct fd_batch *batch,
363                        struct fd_resource *rsc) assert_dt
364 {
365    /* Fast path: if we hit this then we know we don't have anyone else
366     * writing to it (since both _write and _read flush other writers), and
367     * that we've already recursed for stencil.
368     */
369    if (unlikely(!fd_batch_references_resource(batch, rsc)))
370       fd_batch_resource_read_slowpath(batch, rsc);
371 }
372 
373 static inline bool
needs_dirty_resource(struct fd_context * ctx,struct pipe_resource * prsc,bool write)374 needs_dirty_resource(struct fd_context *ctx, struct pipe_resource *prsc, bool write)
375    assert_dt
376 {
377    if (!prsc)
378       return false;
379 
380    struct fd_resource *rsc = fd_resource(prsc);
381 
382    /* Switching between draw and non_draw will dirty all state, so if
383     * we pick the wrong one, all the bits in the dirty_resource state
384     * will be set anyways.. so no harm, no foul.
385     */
386    struct fd_batch *batch = ctx->batch_nondraw ? ctx->batch_nondraw : ctx->batch;
387 
388    if (!batch)
389       return false;
390 
391    if (write)
392       return rsc->track->write_batch != batch;
393 
394    return !fd_batch_references_resource(batch, rsc);
395 }
396 
397 static inline void
fd_dirty_resource(struct fd_context * ctx,struct pipe_resource * prsc,BITMASK_ENUM (fd_dirty_3d_state)dirty,bool write)398 fd_dirty_resource(struct fd_context *ctx, struct pipe_resource *prsc,
399                   BITMASK_ENUM(fd_dirty_3d_state) dirty, bool write)
400    assert_dt
401 {
402    fd_context_dirty(ctx, dirty);
403 
404    if (ctx->dirty_resource & dirty)
405       return;
406 
407    if (!needs_dirty_resource(ctx, prsc, write))
408       return;
409 
410    ctx->dirty_resource |= dirty;
411 }
412 
413 static inline void
fd_dirty_shader_resource(struct fd_context * ctx,struct pipe_resource * prsc,enum pipe_shader_type shader,BITMASK_ENUM (fd_dirty_shader_state)dirty,bool write)414 fd_dirty_shader_resource(struct fd_context *ctx, struct pipe_resource *prsc,
415                          enum pipe_shader_type shader,
416                          BITMASK_ENUM(fd_dirty_shader_state) dirty,
417                          bool write)
418    assert_dt
419 {
420    fd_context_dirty_shader(ctx, shader, dirty);
421 
422    if (ctx->dirty_shader_resource[shader] & dirty)
423       return;
424 
425    if (!needs_dirty_resource(ctx, prsc, write))
426       return;
427 
428    ctx->dirty_shader_resource[shader] |= dirty;
429    ctx->dirty_resource |= dirty_shader_to_dirty_state(dirty);
430 }
431 
432 static inline enum fdl_view_type
fdl_type_from_pipe_target(enum pipe_texture_target target)433 fdl_type_from_pipe_target(enum pipe_texture_target target) {
434    switch (target) {
435    case PIPE_TEXTURE_1D:
436    case PIPE_TEXTURE_1D_ARRAY:
437       return FDL_VIEW_TYPE_1D;
438    case PIPE_TEXTURE_2D:
439    case PIPE_TEXTURE_RECT:
440    case PIPE_TEXTURE_2D_ARRAY:
441       return FDL_VIEW_TYPE_2D;
442    case PIPE_TEXTURE_CUBE:
443    case PIPE_TEXTURE_CUBE_ARRAY:
444       return FDL_VIEW_TYPE_CUBE;
445    case PIPE_TEXTURE_3D:
446       return FDL_VIEW_TYPE_3D;
447    case PIPE_MAX_TEXTURE_TYPES:
448    default:
449       unreachable("bad texture type");
450    }
451 }
452 
453 ENDC;
454 
455 #endif /* FREEDRENO_RESOURCE_H_ */
456