• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2019 Collabora, Ltd.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors (Collabora):
24  *   Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
25  */
26 
27 #ifndef __PAN_ENCODER_H
28 #define __PAN_ENCODER_H
29 
30 #include "util/macros.h"
31 
32 #include <stdbool.h>
33 #include "genxml/gen_macros.h"
34 #include "util/format/u_format.h"
35 
36 #include "pan_pool.h"
37 
38 /* Tiler structure size computation */
39 
40 unsigned panfrost_tiler_header_size(unsigned width, unsigned height,
41                                     unsigned mask, bool hierarchy);
42 
43 unsigned panfrost_tiler_full_size(unsigned width, unsigned height,
44                                   unsigned mask, bool hierarchy);
45 
46 unsigned panfrost_choose_hierarchy_mask(unsigned width, unsigned height,
47                                         unsigned vertex_count, bool hierarchy);
48 
49 #if defined(PAN_ARCH) && PAN_ARCH <= 5
50 static inline unsigned
panfrost_tiler_get_polygon_list_size(unsigned fb_width,unsigned fb_height,unsigned vertex_count,bool hierarchy)51 panfrost_tiler_get_polygon_list_size(unsigned fb_width, unsigned fb_height,
52                                      unsigned vertex_count, bool hierarchy)
53 {
54    if (!vertex_count)
55       return MALI_MIDGARD_TILER_MINIMUM_HEADER_SIZE + 4;
56 
57    unsigned hierarchy_mask = panfrost_choose_hierarchy_mask(
58       fb_width, fb_height, vertex_count, hierarchy);
59 
60    return panfrost_tiler_full_size(fb_width, fb_height, hierarchy_mask,
61                                    hierarchy) +
62           panfrost_tiler_header_size(fb_width, fb_height, hierarchy_mask,
63                                      hierarchy);
64 }
65 #endif
66 
67 /* Stack sizes */
68 
69 unsigned panfrost_get_stack_shift(unsigned stack_size);
70 
71 unsigned panfrost_get_total_stack_size(unsigned thread_size,
72                                        unsigned threads_per_core,
73                                        unsigned core_id_range);
74 
75 /* Attributes / instancing */
76 
77 unsigned panfrost_padded_vertex_count(unsigned vertex_count);
78 
79 unsigned panfrost_compute_magic_divisor(unsigned hw_divisor, unsigned *o_shift,
80                                         unsigned *extra_flags);
81 
82 #ifdef PAN_ARCH
83 /* Records for gl_VertexID and gl_InstanceID use special encodings on Midgard */
84 
85 #if PAN_ARCH <= 5
86 static inline void
panfrost_vertex_id(unsigned padded_count,struct mali_attribute_vertex_id_packed * attr,bool instanced)87 panfrost_vertex_id(unsigned padded_count,
88                    struct mali_attribute_vertex_id_packed *attr, bool instanced)
89 {
90    pan_pack(attr, ATTRIBUTE_VERTEX_ID, cfg) {
91       if (instanced) {
92          cfg.divisor_r = __builtin_ctz(padded_count);
93          cfg.divisor_p = padded_count >> (cfg.divisor_r + 1);
94       } else {
95          /* Large values so the modulo is a no-op */
96          cfg.divisor_r = 0x1F;
97          cfg.divisor_p = 0x4;
98       }
99    }
100 }
101 
102 static inline void
panfrost_instance_id(unsigned padded_count,struct mali_attribute_instance_id_packed * attr,bool instanced)103 panfrost_instance_id(unsigned padded_count,
104                      struct mali_attribute_instance_id_packed *attr,
105                      bool instanced)
106 {
107    pan_pack(attr, ATTRIBUTE_INSTANCE_ID, cfg) {
108       if (!instanced || padded_count <= 1) {
109          /* Divide by large number to force to 0 */
110          cfg.divisor_p = ((1u << 31) - 1);
111          cfg.divisor_r = 0x1F;
112          cfg.divisor_e = 0x1;
113       } else if (util_is_power_of_two_or_zero(padded_count)) {
114          /* Can't underflow since padded_count >= 2 */
115          cfg.divisor_r = __builtin_ctz(padded_count) - 1;
116       } else {
117          cfg.divisor_p = panfrost_compute_magic_divisor(
118             padded_count, &cfg.divisor_r, &cfg.divisor_e);
119       }
120    }
121 }
122 #endif /* PAN_ARCH <= 5 */
123 
124 /* Sampler comparison functions are flipped in OpenGL from the hardware, so we
125  * need to be able to flip accordingly */
126 
127 static inline enum mali_func
panfrost_flip_compare_func(enum mali_func f)128 panfrost_flip_compare_func(enum mali_func f)
129 {
130    switch (f) {
131    case MALI_FUNC_LESS:
132       return MALI_FUNC_GREATER;
133    case MALI_FUNC_GREATER:
134       return MALI_FUNC_LESS;
135    case MALI_FUNC_LEQUAL:
136       return MALI_FUNC_GEQUAL;
137    case MALI_FUNC_GEQUAL:
138       return MALI_FUNC_LEQUAL;
139    default:
140       return f;
141    }
142 }
143 
144 #if PAN_ARCH <= 7
145 /* Compute shaders are invoked with a gl_NumWorkGroups X/Y/Z triplet. Vertex
146  * shaders are invoked as (1, vertex_count, instance_count). Compute shaders
147  * also have a gl_WorkGroupSize X/Y/Z triplet. These 6 values are packed
148  * together in a dynamic bitfield, packed by this routine. */
149 
150 static inline void
panfrost_pack_work_groups_compute(struct mali_invocation_packed * out,unsigned num_x,unsigned num_y,unsigned num_z,unsigned size_x,unsigned size_y,unsigned size_z,bool quirk_graphics,bool indirect_dispatch)151 panfrost_pack_work_groups_compute(struct mali_invocation_packed *out,
152                                   unsigned num_x, unsigned num_y,
153                                   unsigned num_z, unsigned size_x,
154                                   unsigned size_y, unsigned size_z,
155                                   bool quirk_graphics, bool indirect_dispatch)
156 {
157    /* The values needing packing, in order, and the corresponding shifts.
158     * Indicies into shift are off-by-one to make the logic easier */
159 
160    unsigned values[6] = {size_x, size_y, size_z, num_x, num_y, num_z};
161    unsigned shifts[7] = {0};
162    uint32_t packed = 0;
163 
164    for (unsigned i = 0; i < 6; ++i) {
165       /* Must be positive, otherwise we underflow */
166       assert(values[i] >= 1);
167 
168       /* OR it in, shifting as required */
169       packed |= ((values[i] - 1) << shifts[i]);
170 
171       /* How many bits did we use? */
172       unsigned bit_count = util_logbase2_ceil(values[i]);
173 
174       /* Set the next shift accordingly */
175       shifts[i + 1] = shifts[i] + bit_count;
176    }
177 
178    pan_pack(out, INVOCATION, cfg) {
179       cfg.invocations = packed;
180       cfg.size_y_shift = shifts[1];
181       cfg.size_z_shift = shifts[2];
182       cfg.workgroups_x_shift = shifts[3];
183 
184       if (!indirect_dispatch) {
185          /* Leave zero for the dispatch shader */
186          cfg.workgroups_y_shift = shifts[4];
187          cfg.workgroups_z_shift = shifts[5];
188       }
189 
190       /* Quirk: for non-instanced graphics, the blob sets
191        * workgroups_z_shift = 32. This doesn't appear to matter to
192        * the hardware, but it's good to be bit-identical. */
193 
194       if (quirk_graphics && (num_z <= 1))
195          cfg.workgroups_z_shift = 32;
196 
197       /* For graphics, set to the minimum efficient value. For
198        * compute, must equal the workgroup X shift for barriers to
199        * function correctly */
200 
201       cfg.thread_group_split =
202          quirk_graphics ? MALI_SPLIT_MIN_EFFICIENT : cfg.workgroups_x_shift;
203    }
204 }
205 #endif
206 
207 #if PAN_ARCH >= 5
208 /* Format conversion */
209 static inline enum mali_z_internal_format
panfrost_get_z_internal_format(enum pipe_format fmt)210 panfrost_get_z_internal_format(enum pipe_format fmt)
211 {
212    switch (fmt) {
213    case PIPE_FORMAT_Z16_UNORM:
214    case PIPE_FORMAT_Z16_UNORM_S8_UINT:
215       return MALI_Z_INTERNAL_FORMAT_D16;
216    case PIPE_FORMAT_Z24_UNORM_S8_UINT:
217    case PIPE_FORMAT_Z24X8_UNORM:
218       return MALI_Z_INTERNAL_FORMAT_D24;
219    case PIPE_FORMAT_Z32_FLOAT:
220    case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT:
221       return MALI_Z_INTERNAL_FORMAT_D32;
222    default:
223       unreachable("Unsupported depth/stencil format.");
224    }
225 }
226 #endif
227 
228 #endif /* PAN_ARCH */
229 
230 #if PAN_ARCH >= 9
231 static inline void
panfrost_make_resource_table(struct panfrost_ptr base,unsigned index,uint64_t address,unsigned resource_count)232 panfrost_make_resource_table(struct panfrost_ptr base, unsigned index,
233                              uint64_t address, unsigned resource_count)
234 {
235    if (resource_count == 0)
236       return;
237 
238    struct mali_resource_packed *res = base.cpu;
239    pan_pack(&res[index], RESOURCE, cfg) {
240       cfg.address = address;
241       cfg.size = resource_count * pan_size(BUFFER);
242    }
243 }
244 #endif
245 
246 #endif
247