1 /*
2 * Copyright 2021 Alyssa Rosenzweig
3 * SPDX-License-Identifier: MIT
4 */
5 #include <stdio.h>
6 #include "asahi/genxml/agx_pack.h"
7 #include "pipe/p_state.h"
8 #include "util/format/u_format.h"
9 #include "util/half_float.h"
10 #include "util/macros.h"
11 #include "agx_device.h"
12 #include "agx_state.h"
13 #include "pool.h"
14
15 static uint64_t
agx_const_buffer_ptr(struct agx_batch * batch,struct pipe_constant_buffer * cb)16 agx_const_buffer_ptr(struct agx_batch *batch, struct pipe_constant_buffer *cb)
17 {
18 if (cb->buffer) {
19 struct agx_resource *rsrc = agx_resource(cb->buffer);
20 agx_batch_reads(batch, rsrc);
21
22 return rsrc->bo->va->addr + cb->buffer_offset;
23 } else {
24 return 0;
25 }
26 }
27
28 void
agx_upload_vbos(struct agx_batch * batch)29 agx_upload_vbos(struct agx_batch *batch)
30 {
31 struct agx_context *ctx = batch->ctx;
32 struct agx_vertex_elements *attribs = ctx->attributes;
33 struct agx_device *dev = agx_device(ctx->base.screen);
34 uint64_t buffers[PIPE_MAX_ATTRIBS] = {0};
35 size_t buf_sizes[PIPE_MAX_ATTRIBS] = {0};
36
37 u_foreach_bit(vbo, ctx->vb_mask) {
38 struct pipe_vertex_buffer vb = ctx->vertex_buffers[vbo];
39 assert(!vb.is_user_buffer);
40
41 if (vb.buffer.resource) {
42 struct agx_resource *rsrc = agx_resource(vb.buffer.resource);
43 agx_batch_reads(batch, rsrc);
44
45 buffers[vbo] = rsrc->bo->va->addr + vb.buffer_offset;
46 buf_sizes[vbo] = rsrc->layout.size_B - vb.buffer_offset;
47 }
48 }
49
50 /* NULL vertex buffers read zeroes from NULL. This depends on soft fault.
51 * Without soft fault, we just upload zeroes to read from.
52 */
53 uint64_t sink = 0;
54
55 if (!agx_has_soft_fault(dev)) {
56 uint32_t zeroes[4] = {0};
57 sink = agx_pool_upload_aligned(&batch->pool, &zeroes, 16, 16);
58 }
59
60 for (unsigned i = 0; i < PIPE_MAX_ATTRIBS; ++i) {
61 unsigned buf = attribs->buffers[i];
62 uint64_t addr;
63
64 batch->uniforms.attrib_clamp[i] = agx_calculate_vbo_clamp(
65 buffers[buf], sink, attribs->key[i].format, buf_sizes[buf],
66 attribs->key[i].stride, attribs->src_offsets[i], &addr);
67
68 batch->uniforms.attrib_base[i] = addr;
69 }
70 }
71
72 void
agx_upload_uniforms(struct agx_batch * batch)73 agx_upload_uniforms(struct agx_batch *batch)
74 {
75 struct agx_context *ctx = batch->ctx;
76
77 struct agx_ptr root_ptr = agx_pool_alloc_aligned(
78 &batch->pool, sizeof(struct agx_draw_uniforms), 16);
79
80 batch->uniforms.tables[AGX_SYSVAL_TABLE_ROOT] = root_ptr.gpu;
81 batch->uniforms.sample_mask = ctx->sample_mask;
82
83 assert(_mesa_float_to_half(0.5) == 0x3800);
84 batch->uniforms.clip_z_coeff =
85 (ctx->rast && !ctx->rast->base.clip_halfz) ? 0x3800 : 0x0;
86
87 batch->uniforms.sprite_mask =
88 (batch->reduced_prim == MESA_PRIM_POINTS && ctx->rast)
89 ? ctx->rast->base.sprite_coord_enable
90 : 0;
91
92 memcpy(root_ptr.cpu, &batch->uniforms, sizeof(batch->uniforms));
93 }
94
95 void
agx_set_sampler_uniforms(struct agx_batch * batch,enum pipe_shader_type stage)96 agx_set_sampler_uniforms(struct agx_batch *batch, enum pipe_shader_type stage)
97 {
98 struct agx_context *ctx = batch->ctx;
99 struct agx_stage *st = &ctx->stage[stage];
100 struct agx_stage_uniforms *unif = &batch->stage_uniforms[stage];
101 struct agx_device *dev = agx_device(ctx->base.screen);
102
103 u_foreach_bit(s, st->valid_samplers) {
104 unif->lod_bias[s] = st->samplers[s]->lod_bias_as_fp16;
105 }
106
107 /* If we use bindless samplers, insert sampler into the heap */
108 if (st->shader && st->shader->uses_bindless_samplers) {
109 u_foreach_bit(s, st->valid_samplers) {
110 unif->sampler_handle[s] =
111 28 +
112 agx_sampler_heap_add(dev, &batch->sampler_heap,
113 &st->samplers[s]->desc_without_custom_border);
114 }
115 }
116 }
117
118 void
agx_set_cbuf_uniforms(struct agx_batch * batch,enum pipe_shader_type stage)119 agx_set_cbuf_uniforms(struct agx_batch *batch, enum pipe_shader_type stage)
120 {
121 struct agx_stage *st = &batch->ctx->stage[stage];
122 struct agx_stage_uniforms *unif = &batch->stage_uniforms[stage];
123
124 u_foreach_bit(cb, st->cb_mask) {
125 unif->ubo_base[cb] = agx_const_buffer_ptr(batch, &st->cb[cb]);
126 unif->ubo_size[cb] = st->cb[cb].buffer_size;
127 }
128 }
129
130 void
agx_set_ssbo_uniforms(struct agx_batch * batch,enum pipe_shader_type stage)131 agx_set_ssbo_uniforms(struct agx_batch *batch, enum pipe_shader_type stage)
132 {
133 struct agx_stage *st = &batch->ctx->stage[stage];
134 struct agx_stage_uniforms *unif = &batch->stage_uniforms[stage];
135
136 /* Single element sink. TODO: Optimize with soft fault. */
137 uint32_t zeroes[4] = {0};
138 uint64_t sink = agx_pool_upload_aligned(&batch->pool, &zeroes, 16, 16);
139
140 /* Consider all shader buffers, needed to avoid faults with
141 * e.g. arb_shader_storage_buffer_object-array-ssbo-binding.
142 */
143 for (unsigned cb = 0; cb < PIPE_MAX_SHADER_BUFFERS; ++cb) {
144 struct pipe_shader_buffer *sb = &st->ssbo[cb];
145
146 if (sb->buffer && st->ssbo[cb].buffer_size) {
147 struct agx_resource *rsrc = agx_resource(sb->buffer);
148
149 if (st->ssbo_writable_mask & BITFIELD_BIT(cb)) {
150 agx_batch_writes_range(batch, rsrc, sb->buffer_offset,
151 sb->buffer_size);
152 batch->incoherent_writes = true;
153 } else {
154 agx_batch_reads(batch, rsrc);
155 }
156
157 unif->ssbo_base[cb] = rsrc->bo->va->addr + sb->buffer_offset;
158 unif->ssbo_size[cb] = st->ssbo[cb].buffer_size;
159 } else {
160 /* Invalid, so use the sink */
161 unif->ssbo_base[cb] = sink;
162 unif->ssbo_size[cb] = 0;
163 }
164 }
165 }
166