1 /*
2 * Copyright © 2011 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #pragma once
25
26 #include <stdint.h>
27 #include "blorp/blorp.h"
28 #include "compiler/glsl/list.h"
29
30 #ifdef __cplusplus
31 extern "C" {
32 #endif
33
34 /**
35 * Enum for keeping track of the fast clear state of a buffer associated with
36 * a miptree.
37 *
38 * Fast clear works by deferring the memory writes that would be used to clear
39 * the buffer, so that instead of performing them at the time of the clear
40 * operation, the hardware automatically performs them at the time that the
41 * buffer is later accessed for rendering. The MCS buffer keeps track of
42 * which regions of the buffer still have pending clear writes.
43 *
44 * This enum keeps track of the driver's knowledge of pending fast clears in
45 * the MCS buffer.
46 *
47 * MCS buffers only exist on Gen7+.
48 */
49 enum intel_fast_clear_state
50 {
51 /**
52 * No deferred clears are pending for this miptree, and the contents of the
53 * color buffer are entirely correct. An MCS buffer may or may not exist
54 * for this miptree. If it does exist, it is entirely in the "no deferred
55 * clears pending" state. If it does not exist, it will be created the
56 * first time a fast color clear is executed.
57 *
58 * In this state, the color buffer can be used for purposes other than
59 * rendering without needing a render target resolve.
60 *
61 * Since there is no such thing as a "fast color clear resolve" for MSAA
62 * buffers, an MSAA buffer will never be in this state.
63 */
64 INTEL_FAST_CLEAR_STATE_RESOLVED,
65
66 /**
67 * An MCS buffer exists for this miptree, and deferred clears are pending
68 * for some regions of the color buffer, as indicated by the MCS buffer.
69 * The contents of the color buffer are only correct for the regions where
70 * the MCS buffer doesn't indicate a deferred clear.
71 *
72 * If a single-sample buffer is in this state, a render target resolve must
73 * be performed before it can be used for purposes other than rendering.
74 */
75 INTEL_FAST_CLEAR_STATE_UNRESOLVED,
76
77 /**
78 * An MCS buffer exists for this miptree, and deferred clears are pending
79 * for the entire color buffer, and the contents of the MCS buffer reflect
80 * this. The contents of the color buffer are undefined.
81 *
82 * If a single-sample buffer is in this state, a render target resolve must
83 * be performed before it can be used for purposes other than rendering.
84 *
85 * If the client attempts to clear a buffer which is already in this state,
86 * the clear can be safely skipped, since the buffer is already clear.
87 */
88 INTEL_FAST_CLEAR_STATE_CLEAR,
89 };
90
91 /**
92 * \brief Map of miptree slices to needed resolves.
93 *
94 * The map is implemented as a linear doubly-linked list.
95 *
96 * In the intel_resolve_map*() functions, the \c head argument is not
97 * inspected for its data. It only serves as an anchor for the list.
98 *
99 * \par Design Discussion
100 *
101 * There are two possible ways to record which miptree slices need
102 * resolves. 1) Maintain a flag for every miptree slice in the texture,
103 * likely in intel_mipmap_level::slice, or 2) maintain a list of only
104 * those slices that need a resolve.
105 *
106 * Immediately before drawing, a full depth resolve performed on each
107 * enabled depth texture. If design 1 were chosen, then at each draw call
108 * it would be necessary to iterate over each miptree slice of each
109 * enabled depth texture in order to query if each slice needed a resolve.
110 * In the worst case, this would require 2^16 iterations: 16 texture
111 * units, 16 miplevels, and 256 depth layers (assuming maximums for OpenGL
112 * 2.1).
113 *
114 * By choosing design 2, the number of iterations is exactly the minimum
115 * necessary.
116 */
117 struct intel_resolve_map {
118 struct exec_node link;
119
120 uint32_t level;
121 uint32_t layer;
122
123 union {
124 enum blorp_hiz_op need;
125 enum intel_fast_clear_state fast_clear_state;
126 };
127 };
128
129 void
130 intel_resolve_map_set(struct exec_list *resolve_map,
131 uint32_t level,
132 uint32_t layer,
133 unsigned new_state);
134
135 const struct intel_resolve_map *
136 intel_resolve_map_find_any(const struct exec_list *resolve_map,
137 uint32_t start_level, uint32_t num_levels,
138 uint32_t start_layer, uint32_t num_layers);
139
140 static inline const struct intel_resolve_map *
intel_resolve_map_const_get(const struct exec_list * resolve_map,uint32_t level,uint32_t layer)141 intel_resolve_map_const_get(const struct exec_list *resolve_map,
142 uint32_t level,
143 uint32_t layer)
144 {
145 return intel_resolve_map_find_any(resolve_map, level, 1, layer, 1);
146 }
147
148 static inline struct intel_resolve_map *
intel_resolve_map_get(struct exec_list * resolve_map,uint32_t level,uint32_t layer)149 intel_resolve_map_get(struct exec_list *resolve_map,
150 uint32_t level,
151 uint32_t layer)
152 {
153 return (struct intel_resolve_map *)intel_resolve_map_find_any(
154 resolve_map, level, 1, layer, 1);
155 }
156
157 void
158 intel_resolve_map_remove(struct intel_resolve_map *resolve_map);
159
160 void
161 intel_resolve_map_clear(struct exec_list *resolve_map);
162
163 #ifdef __cplusplus
164 } /* extern "C" */
165 #endif
166
167