• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2021 Alyssa Rosenzweig
3  * Copyright (C) 2020-2021 Collabora, Ltd.
4  * Copyright (C) 2014 Broadcom
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the next
14  * paragraph) shall be included in all copies or substantial portions of the
15  * Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23  * SOFTWARE.
24  */
25 
26 #include "agx_state.h"
27 #include "compiler/nir/nir_builder.h"
28 #include "asahi/compiler/agx_compile.h"
29 #include "gallium/auxiliary/util/u_blitter.h"
30 
31 static void
agx_build_reload_shader(struct agx_device * dev)32 agx_build_reload_shader(struct agx_device *dev)
33 {
34    nir_builder b = nir_builder_init_simple_shader(MESA_SHADER_FRAGMENT,
35          &agx_nir_options, "agx_reload");
36 
37    nir_variable *out = nir_variable_create(b.shader, nir_var_shader_out,
38          glsl_vector_type(GLSL_TYPE_FLOAT, 4), "output");
39    out->data.location = FRAG_RESULT_DATA0;
40 
41    nir_ssa_def *fragcoord = nir_load_frag_coord(&b);
42    nir_ssa_def *coord = nir_channels(&b, fragcoord, 0x3);
43 
44    nir_tex_instr *tex = nir_tex_instr_create(b.shader, 1);
45    tex->dest_type = nir_type_float32;
46    tex->sampler_dim = GLSL_SAMPLER_DIM_RECT;
47    tex->op = nir_texop_tex;
48    tex->src[0].src_type = nir_tex_src_coord;
49    tex->src[0].src = nir_src_for_ssa(coord);
50    tex->coord_components = 2;
51    nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32, NULL);
52    nir_builder_instr_insert(&b, &tex->instr);
53    nir_store_var(&b, out, &tex->dest.ssa, 0xFF);
54 
55    unsigned offset = 0;
56    unsigned bo_size = 4096;
57 
58    struct agx_bo *bo = agx_bo_create(dev, bo_size, AGX_MEMORY_TYPE_SHADER);
59    dev->reload.bo = bo;
60 
61    for (unsigned i = 0; i < AGX_NUM_FORMATS; ++i) {
62       struct util_dynarray binary;
63       util_dynarray_init(&binary, NULL);
64 
65       nir_shader *s = nir_shader_clone(NULL, b.shader);
66       struct agx_shader_info info;
67 
68       struct agx_shader_key key = {
69          .fs.tib_formats[0] = i
70       };
71 
72       agx_compile_shader_nir(s, &key, &binary, &info);
73 
74       assert(offset + binary.size < bo_size);
75       memcpy(((uint8_t *) bo->ptr.cpu) + offset, binary.data, binary.size);
76 
77       dev->reload.format[i] = bo->ptr.gpu + offset;
78       offset += ALIGN_POT(binary.size, 128);
79 
80       util_dynarray_fini(&binary);
81    }
82 }
83 
84 static void
agx_blitter_save(struct agx_context * ctx,struct blitter_context * blitter,bool render_cond)85 agx_blitter_save(struct agx_context *ctx, struct blitter_context *blitter,
86                  bool render_cond)
87 {
88    util_blitter_save_vertex_buffer_slot(blitter, ctx->vertex_buffers);
89    util_blitter_save_vertex_elements(blitter, ctx->attributes);
90    util_blitter_save_vertex_shader(blitter, ctx->stage[PIPE_SHADER_VERTEX].shader);
91    util_blitter_save_rasterizer(blitter, ctx->rast);
92    util_blitter_save_viewport(blitter, &ctx->viewport);
93    util_blitter_save_scissor(blitter, &ctx->scissor);
94    util_blitter_save_fragment_shader(blitter, ctx->stage[PIPE_SHADER_FRAGMENT].shader);
95    util_blitter_save_blend(blitter, ctx->blend);
96    util_blitter_save_depth_stencil_alpha(blitter, &ctx->zs);
97    util_blitter_save_stencil_ref(blitter, &ctx->stencil_ref);
98    util_blitter_save_so_targets(blitter, 0, NULL);
99    util_blitter_save_sample_mask(blitter, ctx->sample_mask, 0);
100 
101    util_blitter_save_framebuffer(blitter, &ctx->framebuffer);
102    util_blitter_save_fragment_sampler_states(blitter,
103          ctx->stage[PIPE_SHADER_FRAGMENT].sampler_count,
104          (void **)(ctx->stage[PIPE_SHADER_FRAGMENT].samplers));
105    util_blitter_save_fragment_sampler_views(blitter,
106          ctx->stage[PIPE_SHADER_FRAGMENT].texture_count,
107          (struct pipe_sampler_view **)ctx->stage[PIPE_SHADER_FRAGMENT].textures);
108    util_blitter_save_fragment_constant_buffer_slot(blitter,
109          ctx->stage[PIPE_SHADER_FRAGMENT].cb);
110 
111    if (!render_cond) {
112       util_blitter_save_render_condition(blitter,
113             (struct pipe_query *) ctx->cond_query,
114             ctx->cond_cond, ctx->cond_mode);
115    }
116 }
117 
118 void
agx_blit(struct pipe_context * pipe,const struct pipe_blit_info * info)119 agx_blit(struct pipe_context *pipe,
120               const struct pipe_blit_info *info)
121 {
122    //if (info->render_condition_enable &&
123    //    !agx_render_condition_check(pan_context(pipe)))
124    //        return;
125 
126    struct agx_context *ctx = agx_context(pipe);
127 
128    if (!util_blitter_is_blit_supported(ctx->blitter, info))
129       unreachable("Unsupported blit\n");
130 
131    agx_blitter_save(ctx, ctx->blitter, info->render_condition_enable);
132    util_blitter_blit(ctx->blitter, info);
133 }
134 
135 /* We need some fixed shaders for common rendering tasks. When colour buffer
136  * reload is not in use, a shader is used to clear a particular colour. At the
137  * end of rendering a tile, a shader is used to write it out. These shaders are
138  * too trivial to go through the compiler at this stage. */
139 #define AGX_STOP \
140 	0x88, 0x00, 0x08, 0x00, 0x08, 0x00, 0x08, 0x00, 0x08, \
141 	0x00, 0x08, 0x00, 0x08, 0x00, 0x08, 0x00, 0x08, 0x00 \
142 
143 #define AGX_BLEND \
144 	0x09, 0x00, 0x00, 0x04, 0xf0, 0xfc, 0x80, 0x03
145 
146 /* Clears the tilebuffer, where u6-u7 are preloaded with the FP16 clear colour
147 
148    0: 7e018c098040         bitop_mov        r0, u6
149    6: 7e058e098000         bitop_mov        r1, u7
150    c: 09000004f0fc8003     TODO.blend
151    */
152 
153 static uint8_t shader_clear[] = {
154    0x7e, 0x01, 0x8c, 0x09, 0x80, 0x40,
155    0x7e, 0x05, 0x8e, 0x09, 0x80, 0x00,
156    AGX_BLEND,
157    AGX_STOP
158 };
159 
160 static uint8_t shader_store[] = {
161    0x7e, 0x00, 0x04, 0x09, 0x80, 0x00,
162    0xb1, 0x80, 0x00, 0x80, 0x00, 0x4a, 0x00, 0x00, 0x0a, 0x00,
163    AGX_STOP
164 };
165 
166 void
agx_internal_shaders(struct agx_device * dev)167 agx_internal_shaders(struct agx_device *dev)
168 {
169    unsigned clear_offset = 0;
170    unsigned store_offset = 1024;
171 
172    struct agx_bo *bo = agx_bo_create(dev, 4096, AGX_MEMORY_TYPE_SHADER);
173    memcpy(((uint8_t *) bo->ptr.cpu) + clear_offset, shader_clear, sizeof(shader_clear));
174    memcpy(((uint8_t *) bo->ptr.cpu) + store_offset, shader_store, sizeof(shader_store));
175 
176    dev->internal.bo = bo;
177    dev->internal.clear = bo->ptr.gpu + clear_offset;
178    dev->internal.store = bo->ptr.gpu + store_offset;
179 
180    agx_build_reload_shader(dev);
181 }
182