• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *    Rob Clark <robclark@freedesktop.org>
25  */
26 
27 #ifndef FREEDRENO_RESOURCE_H_
28 #define FREEDRENO_RESOURCE_H_
29 
30 #include "util/list.h"
31 #include "util/simple_mtx.h"
32 #include "util/u_dump.h"
33 #include "util/u_range.h"
34 #include "util/u_transfer_helper.h"
35 
36 #include "freedreno/fdl/freedreno_layout.h"
37 #include "freedreno_batch.h"
38 #include "freedreno_util.h"
39 
40 #define PRSC_FMT                                                               \
41    "p: target=%s, format=%s, %ux%ux%u, "                                       \
42    "array_size=%u, last_level=%u, "                                            \
43    "nr_samples=%u, usage=%u, bind=%x, flags=%x"
44 #define PRSC_ARGS(p)                                                           \
45    (p), util_str_tex_target((p)->target, true),                                \
46       util_format_short_name((p)->format), (p)->width0, (p)->height0,          \
47       (p)->depth0, (p)->array_size, (p)->last_level, (p)->nr_samples,          \
48       (p)->usage, (p)->bind, (p)->flags
49 
50 enum fd_lrz_direction {
51    FD_LRZ_UNKNOWN,
52    /* Depth func less/less-than: */
53    FD_LRZ_LESS,
54    /* Depth func greater/greater-than: */
55    FD_LRZ_GREATER,
56 };
57 
58 /**
59  * State related to batch/resource tracking.
60  *
61  * With threaded_context we need to support replace_buffer_storage, in
62  * which case we can end up in transfer_map with tres->latest, but other
63  * pipe_context APIs using the original prsc pointer.  This allows TC to
64  * not have to synchronize the front-end thread with the buffer storage
65  * replacement called on driver thread.  But it complicates the batch/
66  * resource tracking.
67  *
68  * To handle this, we need to split the tracking out into it's own ref-
69  * counted structure, so as needed both "versions" of the resource can
70  * point to the same tracking.
71  *
72  * We could *almost* just push this down to fd_bo, except for a3xx/a4xx
73  * hw queries, where we don't know up-front the size to allocate for
74  * per-tile query results.
75  */
76 struct fd_resource_tracking {
77    struct pipe_reference reference;
78 
79    /* bitmask of in-flight batches which reference this resource.  Note
80     * that the batch doesn't hold reference to resources (but instead
81     * the fd_ringbuffer holds refs to the underlying fd_bo), but in case
82     * the resource is destroyed we need to clean up the batch's weak
83     * references to us.
84     */
85    uint32_t batch_mask;
86 
87    /* reference to batch that writes this resource: */
88    struct fd_batch *write_batch;
89 
90    /* Set of batches whose batch-cache key references this resource.
91     * We need to track this to know which batch-cache entries to
92     * invalidate if, for example, the resource is invalidated or
93     * shadowed.
94     */
95    uint32_t bc_batch_mask;
96 };
97 
98 void __fd_resource_tracking_destroy(struct fd_resource_tracking *track);
99 
100 static inline void
fd_resource_tracking_reference(struct fd_resource_tracking ** ptr,struct fd_resource_tracking * track)101 fd_resource_tracking_reference(struct fd_resource_tracking **ptr,
102                                struct fd_resource_tracking *track)
103 {
104    struct fd_resource_tracking *old_track = *ptr;
105 
106    if (pipe_reference(&(*ptr)->reference, &track->reference)) {
107       assert(!old_track->write_batch);
108       free(old_track);
109    }
110 
111    *ptr = track;
112 }
113 
114 /**
115  * A resource (any buffer/texture/image/etc)
116  */
117 struct fd_resource {
118    struct threaded_resource b;
119    struct fd_bo *bo; /* use fd_resource_set_bo() to write */
120    enum pipe_format internal_format;
121    uint32_t hash; /* _mesa_hash_pointer() on this resource's address. */
122    struct fdl_layout layout;
123 
124    /* buffer range that has been initialized */
125    struct util_range valid_buffer_range;
126    bool valid;
127    struct renderonly_scanout *scanout;
128 
129    /* reference to the resource holding stencil data for a z32_s8 texture */
130    /* TODO rename to secondary or auxiliary? */
131    struct fd_resource *stencil;
132 
133    struct fd_resource_tracking *track;
134 
135    simple_mtx_t lock;
136 
137    /* bitmask of state this resource could potentially dirty when rebound,
138     * see rebind_resource()
139     */
140    enum fd_dirty_3d_state dirty;
141 
142    /* Sequence # incremented each time bo changes: */
143    uint16_t seqno;
144 
145    /* Is this buffer a replacement created by threaded_context to avoid
146     * a stall in PIPE_MAP_DISCARD_WHOLE_RESOURCE|PIPE_MAP_WRITE case?
147     * If so, it no longer "owns" it's rsc->track, and so should not
148     * invalidate when the rsc is destroyed.
149     */
150    bool is_replacement : 1;
151 
152    /* Uninitialized resources with UBWC format need their UBWC flag data
153     * cleared before writes, as the UBWC state is read and used during
154     * writes, so undefined UBWC flag data results in undefined results.
155     */
156    bool needs_ubwc_clear : 1;
157 
158    /*
159     * LRZ
160     *
161     * TODO lrz width/height/pitch should probably also move to
162     * fdl_layout
163     */
164    bool lrz_valid : 1;
165    enum fd_lrz_direction lrz_direction : 2;
166    uint16_t lrz_width; // for lrz clear, does this differ from lrz_pitch?
167    uint16_t lrz_height;
168    uint16_t lrz_pitch;
169    struct fd_bo *lrz;
170 };
171 
172 struct fd_memory_object {
173    struct pipe_memory_object b;
174    struct fd_bo *bo;
175 };
176 
177 static inline struct fd_resource *
fd_resource(struct pipe_resource * ptex)178 fd_resource(struct pipe_resource *ptex)
179 {
180    return (struct fd_resource *)ptex;
181 }
182 
183 static inline const struct fd_resource *
fd_resource_const(const struct pipe_resource * ptex)184 fd_resource_const(const struct pipe_resource *ptex)
185 {
186    return (const struct fd_resource *)ptex;
187 }
188 
189 static inline struct fd_memory_object *
fd_memory_object(struct pipe_memory_object * pmemobj)190 fd_memory_object(struct pipe_memory_object *pmemobj)
191 {
192    return (struct fd_memory_object *)pmemobj;
193 }
194 
195 static inline bool
pending(struct fd_resource * rsc,bool write)196 pending(struct fd_resource *rsc, bool write)
197 {
198    /* if we have a pending GPU write, we are busy in any case: */
199    if (rsc->track->write_batch)
200       return true;
201 
202    /* if CPU wants to write, but we are pending a GPU read, we are busy: */
203    if (write && rsc->track->batch_mask)
204       return true;
205 
206    if (rsc->stencil && pending(rsc->stencil, write))
207       return true;
208 
209    return false;
210 }
211 
212 static inline bool
resource_busy(struct fd_resource * rsc,unsigned op)213 resource_busy(struct fd_resource *rsc, unsigned op)
214 {
215    return fd_bo_cpu_prep(rsc->bo, NULL, op | FD_BO_PREP_NOSYNC) != 0;
216 }
217 
218 int __fd_resource_wait(struct fd_context *ctx, struct fd_resource *rsc,
219                        unsigned op, const char *func);
220 #define fd_resource_wait(ctx, rsc, op)                                         \
221    __fd_resource_wait(ctx, rsc, op, __func__)
222 
223 static inline void
fd_resource_lock(struct fd_resource * rsc)224 fd_resource_lock(struct fd_resource *rsc)
225 {
226    simple_mtx_lock(&rsc->lock);
227 }
228 
229 static inline void
fd_resource_unlock(struct fd_resource * rsc)230 fd_resource_unlock(struct fd_resource *rsc)
231 {
232    simple_mtx_unlock(&rsc->lock);
233 }
234 
235 static inline void
fd_resource_set_usage(struct pipe_resource * prsc,enum fd_dirty_3d_state usage)236 fd_resource_set_usage(struct pipe_resource *prsc, enum fd_dirty_3d_state usage)
237 {
238    if (!prsc)
239       return;
240    struct fd_resource *rsc = fd_resource(prsc);
241    /* Bits are only ever ORed in, and we expect many set_usage() per
242     * resource, so do the quick check outside of the lock.
243     */
244    if (likely(rsc->dirty & usage))
245       return;
246    fd_resource_lock(rsc);
247    rsc->dirty |= usage;
248    fd_resource_unlock(rsc);
249 }
250 
251 static inline bool
has_depth(enum pipe_format format)252 has_depth(enum pipe_format format)
253 {
254    const struct util_format_description *desc = util_format_description(format);
255    return util_format_has_depth(desc);
256 }
257 
258 struct fd_transfer {
259    struct threaded_transfer b;
260    struct pipe_resource *staging_prsc;
261    struct pipe_box staging_box;
262 };
263 
264 static inline struct fd_transfer *
fd_transfer(struct pipe_transfer * ptrans)265 fd_transfer(struct pipe_transfer *ptrans)
266 {
267    return (struct fd_transfer *)ptrans;
268 }
269 
270 static inline struct fdl_slice *
fd_resource_slice(struct fd_resource * rsc,unsigned level)271 fd_resource_slice(struct fd_resource *rsc, unsigned level)
272 {
273    assert(level <= rsc->b.b.last_level);
274    return &rsc->layout.slices[level];
275 }
276 
277 static inline uint32_t
fd_resource_layer_stride(struct fd_resource * rsc,unsigned level)278 fd_resource_layer_stride(struct fd_resource *rsc, unsigned level)
279 {
280    return fdl_layer_stride(&rsc->layout, level);
281 }
282 
283 /* get pitch (in bytes) for specified mipmap level */
284 static inline uint32_t
fd_resource_pitch(struct fd_resource * rsc,unsigned level)285 fd_resource_pitch(struct fd_resource *rsc, unsigned level)
286 {
287    if (is_a2xx(fd_screen(rsc->b.b.screen)))
288       return fdl2_pitch(&rsc->layout, level);
289 
290    return fdl_pitch(&rsc->layout, level);
291 }
292 
293 /* get offset for specified mipmap level and texture/array layer */
294 static inline uint32_t
fd_resource_offset(struct fd_resource * rsc,unsigned level,unsigned layer)295 fd_resource_offset(struct fd_resource *rsc, unsigned level, unsigned layer)
296 {
297    uint32_t offset = fdl_surface_offset(&rsc->layout, level, layer);
298    debug_assert(offset < fd_bo_size(rsc->bo));
299    return offset;
300 }
301 
302 static inline uint32_t
fd_resource_ubwc_offset(struct fd_resource * rsc,unsigned level,unsigned layer)303 fd_resource_ubwc_offset(struct fd_resource *rsc, unsigned level, unsigned layer)
304 {
305    uint32_t offset = fdl_ubwc_offset(&rsc->layout, level, layer);
306    debug_assert(offset < fd_bo_size(rsc->bo));
307    return offset;
308 }
309 
310 /* This might be a5xx specific, but higher mipmap levels are always linear: */
311 static inline bool
fd_resource_level_linear(const struct pipe_resource * prsc,int level)312 fd_resource_level_linear(const struct pipe_resource *prsc, int level)
313 {
314    struct fd_screen *screen = fd_screen(prsc->screen);
315    debug_assert(!is_a3xx(screen));
316 
317    return fdl_level_linear(&fd_resource_const(prsc)->layout, level);
318 }
319 
320 static inline uint32_t
fd_resource_tile_mode(struct pipe_resource * prsc,int level)321 fd_resource_tile_mode(struct pipe_resource *prsc, int level)
322 {
323    return fdl_tile_mode(&fd_resource(prsc)->layout, level);
324 }
325 
326 static inline const char *
fd_resource_tile_mode_desc(const struct fd_resource * rsc,int level)327 fd_resource_tile_mode_desc(const struct fd_resource *rsc, int level)
328 {
329    return fdl_tile_mode_desc(&rsc->layout, level);
330 }
331 
332 static inline bool
fd_resource_ubwc_enabled(struct fd_resource * rsc,int level)333 fd_resource_ubwc_enabled(struct fd_resource *rsc, int level)
334 {
335    return fdl_ubwc_enabled(&rsc->layout, level);
336 }
337 
338 /* access # of samples, with 0 normalized to 1 (which is what we care about
339  * most of the time)
340  */
341 static inline unsigned
fd_resource_nr_samples(struct pipe_resource * prsc)342 fd_resource_nr_samples(struct pipe_resource *prsc)
343 {
344    return MAX2(1, prsc->nr_samples);
345 }
346 
347 void fd_resource_screen_init(struct pipe_screen *pscreen);
348 void fd_resource_context_init(struct pipe_context *pctx);
349 
350 uint32_t fd_setup_slices(struct fd_resource *rsc);
351 void fd_resource_resize(struct pipe_resource *prsc, uint32_t sz);
352 void fd_replace_buffer_storage(struct pipe_context *ctx,
353                                struct pipe_resource *dst,
354                                struct pipe_resource *src,
355                                unsigned num_rebinds,
356                                uint32_t rebind_mask,
357                                uint32_t delete_buffer_id) in_dt;
358 bool fd_resource_busy(struct pipe_screen *pscreen, struct pipe_resource *prsc,
359                       unsigned usage);
360 
361 void fd_resource_uncompress(struct fd_context *ctx,
362                             struct fd_resource *rsc,
363                             bool linear) assert_dt;
364 void fd_resource_dump(struct fd_resource *rsc, const char *name);
365 
366 bool fd_render_condition_check(struct pipe_context *pctx) assert_dt;
367 
368 static inline bool
fd_batch_references_resource(struct fd_batch * batch,struct fd_resource * rsc)369 fd_batch_references_resource(struct fd_batch *batch, struct fd_resource *rsc)
370 {
371    return rsc->track->batch_mask & (1 << batch->idx);
372 }
373 
374 static inline void
fd_batch_write_prep(struct fd_batch * batch,struct fd_resource * rsc)375 fd_batch_write_prep(struct fd_batch *batch, struct fd_resource *rsc) assert_dt
376 {
377    if (unlikely(rsc->needs_ubwc_clear)) {
378       batch->ctx->clear_ubwc(batch, rsc);
379       rsc->needs_ubwc_clear = false;
380    }
381 }
382 
383 static inline void
fd_batch_resource_read(struct fd_batch * batch,struct fd_resource * rsc)384 fd_batch_resource_read(struct fd_batch *batch,
385                        struct fd_resource *rsc) assert_dt
386 {
387    /* Fast path: if we hit this then we know we don't have anyone else
388     * writing to it (since both _write and _read flush other writers), and
389     * that we've already recursed for stencil.
390     */
391    if (unlikely(!fd_batch_references_resource(batch, rsc)))
392       fd_batch_resource_read_slowpath(batch, rsc);
393 }
394 
395 #endif /* FREEDRENO_RESOURCE_H_ */
396