1 /*
2 * Copyright 2021 Google LLC
3 * SPDX-License-Identifier: MIT
4 */
5
6 #include "vn_ring.h"
7
8 #include "vn_cs.h"
9 #include "vn_renderer.h"
10
11 enum vn_ring_status_flag {
12 VN_RING_STATUS_IDLE = 1u << 0,
13 };
14
15 static uint32_t
vn_ring_load_head(const struct vn_ring * ring)16 vn_ring_load_head(const struct vn_ring *ring)
17 {
18 /* the renderer is expected to store the head with memory_order_release,
19 * forming a release-acquire ordering
20 */
21 return atomic_load_explicit(ring->shared.head, memory_order_acquire);
22 }
23
24 static void
vn_ring_store_tail(struct vn_ring * ring)25 vn_ring_store_tail(struct vn_ring *ring)
26 {
27 /* the renderer is expected to load the tail with memory_order_acquire,
28 * forming a release-acquire ordering
29 */
30 return atomic_store_explicit(ring->shared.tail, ring->cur,
31 memory_order_release);
32 }
33
34 static uint32_t
vn_ring_load_status(const struct vn_ring * ring)35 vn_ring_load_status(const struct vn_ring *ring)
36 {
37 /* this must be called and ordered after vn_ring_store_tail */
38 return atomic_load_explicit(ring->shared.status, memory_order_seq_cst);
39 }
40
41 static void
vn_ring_write_buffer(struct vn_ring * ring,const void * data,uint32_t size)42 vn_ring_write_buffer(struct vn_ring *ring, const void *data, uint32_t size)
43 {
44 assert(ring->cur + size - vn_ring_load_head(ring) <= ring->buffer_size);
45
46 const uint32_t offset = ring->cur & ring->buffer_mask;
47 if (offset + size <= ring->buffer_size) {
48 memcpy(ring->shared.buffer + offset, data, size);
49 } else {
50 const uint32_t s = ring->buffer_size - offset;
51 memcpy(ring->shared.buffer + offset, data, s);
52 memcpy(ring->shared.buffer, data + s, size - s);
53 }
54
55 ring->cur += size;
56 }
57
58 static bool
vn_ring_ge_seqno(const struct vn_ring * ring,uint32_t a,uint32_t b)59 vn_ring_ge_seqno(const struct vn_ring *ring, uint32_t a, uint32_t b)
60 {
61 /* this can return false negative when not called fast enough (e.g., when
62 * called once every couple hours), but following calls with larger a's
63 * will correct itself
64 *
65 * TODO use real seqnos?
66 */
67 if (a >= b)
68 return ring->cur >= a || ring->cur < b;
69 else
70 return ring->cur >= a && ring->cur < b;
71 }
72
73 static void
vn_ring_retire_submits(struct vn_ring * ring,uint32_t seqno)74 vn_ring_retire_submits(struct vn_ring *ring, uint32_t seqno)
75 {
76 list_for_each_entry_safe(struct vn_ring_submit, submit, &ring->submits,
77 head) {
78 if (!vn_ring_ge_seqno(ring, seqno, submit->seqno))
79 break;
80
81 for (uint32_t i = 0; i < submit->shmem_count; i++)
82 vn_renderer_shmem_unref(ring->renderer, submit->shmems[i]);
83
84 list_del(&submit->head);
85 list_add(&submit->head, &ring->free_submits);
86 }
87 }
88
89 static uint32_t
vn_ring_wait_seqno(const struct vn_ring * ring,uint32_t seqno)90 vn_ring_wait_seqno(const struct vn_ring *ring, uint32_t seqno)
91 {
92 /* A renderer wait incurs several hops and the renderer might poll
93 * repeatedly anyway. Let's just poll here.
94 */
95 uint32_t iter = 0;
96 do {
97 const uint32_t head = vn_ring_load_head(ring);
98 if (vn_ring_ge_seqno(ring, head, seqno))
99 return head;
100 vn_relax(&iter, "ring seqno");
101 } while (true);
102 }
103
104 static bool
vn_ring_has_space(const struct vn_ring * ring,uint32_t size,uint32_t * out_head)105 vn_ring_has_space(const struct vn_ring *ring,
106 uint32_t size,
107 uint32_t *out_head)
108 {
109 const uint32_t head = vn_ring_load_head(ring);
110 if (likely(ring->cur + size - head <= ring->buffer_size)) {
111 *out_head = head;
112 return true;
113 }
114
115 return false;
116 }
117
118 static uint32_t
vn_ring_wait_space(const struct vn_ring * ring,uint32_t size)119 vn_ring_wait_space(const struct vn_ring *ring, uint32_t size)
120 {
121 assert(size <= ring->buffer_size);
122
123 uint32_t head;
124 if (likely(vn_ring_has_space(ring, size, &head)))
125 return head;
126
127 {
128 VN_TRACE_FUNC();
129
130 /* see the reasoning in vn_ring_wait_seqno */
131 uint32_t iter = 0;
132 do {
133 vn_relax(&iter, "ring space");
134 if (vn_ring_has_space(ring, size, &head))
135 return head;
136 } while (true);
137 }
138 }
139
140 void
vn_ring_get_layout(size_t buf_size,size_t extra_size,struct vn_ring_layout * layout)141 vn_ring_get_layout(size_t buf_size,
142 size_t extra_size,
143 struct vn_ring_layout *layout)
144 {
145 /* this can be changed/extended quite freely */
146 struct layout {
147 uint32_t head __attribute__((aligned(64)));
148 uint32_t tail __attribute__((aligned(64)));
149 uint32_t status __attribute__((aligned(64)));
150
151 uint8_t buffer[] __attribute__((aligned(64)));
152 };
153
154 assert(buf_size && util_is_power_of_two_or_zero(buf_size));
155
156 layout->head_offset = offsetof(struct layout, head);
157 layout->tail_offset = offsetof(struct layout, tail);
158 layout->status_offset = offsetof(struct layout, status);
159
160 layout->buffer_offset = offsetof(struct layout, buffer);
161 layout->buffer_size = buf_size;
162
163 layout->extra_offset = layout->buffer_offset + layout->buffer_size;
164 layout->extra_size = extra_size;
165
166 layout->shmem_size = layout->extra_offset + layout->extra_size;
167 }
168
169 void
vn_ring_init(struct vn_ring * ring,struct vn_renderer * renderer,const struct vn_ring_layout * layout,void * shared)170 vn_ring_init(struct vn_ring *ring,
171 struct vn_renderer *renderer,
172 const struct vn_ring_layout *layout,
173 void *shared)
174 {
175 memset(ring, 0, sizeof(*ring));
176 memset(shared, 0, layout->shmem_size);
177
178 ring->renderer = renderer;
179
180 assert(layout->buffer_size &&
181 util_is_power_of_two_or_zero(layout->buffer_size));
182 ring->buffer_size = layout->buffer_size;
183 ring->buffer_mask = ring->buffer_size - 1;
184
185 ring->shared.head = shared + layout->head_offset;
186 ring->shared.tail = shared + layout->tail_offset;
187 ring->shared.status = shared + layout->status_offset;
188 ring->shared.buffer = shared + layout->buffer_offset;
189 ring->shared.extra = shared + layout->extra_offset;
190
191 list_inithead(&ring->submits);
192 list_inithead(&ring->free_submits);
193 }
194
195 void
vn_ring_fini(struct vn_ring * ring)196 vn_ring_fini(struct vn_ring *ring)
197 {
198 vn_ring_retire_submits(ring, ring->cur);
199 assert(list_is_empty(&ring->submits));
200
201 list_for_each_entry_safe(struct vn_ring_submit, submit,
202 &ring->free_submits, head)
203 free(submit);
204 }
205
206 struct vn_ring_submit *
vn_ring_get_submit(struct vn_ring * ring,uint32_t shmem_count)207 vn_ring_get_submit(struct vn_ring *ring, uint32_t shmem_count)
208 {
209 const uint32_t min_shmem_count = 2;
210 struct vn_ring_submit *submit;
211
212 /* TODO this could be simplified if we could omit shmem_count */
213 if (shmem_count <= min_shmem_count &&
214 !list_is_empty(&ring->free_submits)) {
215 submit =
216 list_first_entry(&ring->free_submits, struct vn_ring_submit, head);
217 list_del(&submit->head);
218 } else {
219 shmem_count = MAX2(shmem_count, min_shmem_count);
220 submit =
221 malloc(sizeof(*submit) + sizeof(submit->shmems[0]) * shmem_count);
222 }
223
224 return submit;
225 }
226
227 bool
vn_ring_submit(struct vn_ring * ring,struct vn_ring_submit * submit,const struct vn_cs_encoder * cs,uint32_t * seqno)228 vn_ring_submit(struct vn_ring *ring,
229 struct vn_ring_submit *submit,
230 const struct vn_cs_encoder *cs,
231 uint32_t *seqno)
232 {
233 /* write cs to the ring */
234 assert(!vn_cs_encoder_is_empty(cs));
235 uint32_t cur_seqno;
236 for (uint32_t i = 0; i < cs->buffer_count; i++) {
237 const struct vn_cs_encoder_buffer *buf = &cs->buffers[i];
238 cur_seqno = vn_ring_wait_space(ring, buf->committed_size);
239 vn_ring_write_buffer(ring, buf->base, buf->committed_size);
240 }
241
242 vn_ring_store_tail(ring);
243 const bool notify = vn_ring_load_status(ring) & VN_RING_STATUS_IDLE;
244
245 vn_ring_retire_submits(ring, cur_seqno);
246
247 submit->seqno = ring->cur;
248 list_addtail(&submit->head, &ring->submits);
249
250 *seqno = submit->seqno;
251 return notify;
252 }
253
254 /**
255 * This is thread-safe.
256 */
257 void
vn_ring_wait(const struct vn_ring * ring,uint32_t seqno)258 vn_ring_wait(const struct vn_ring *ring, uint32_t seqno)
259 {
260 vn_ring_wait_seqno(ring, seqno);
261 }
262
263 void
vn_ring_wait_all(const struct vn_ring * ring)264 vn_ring_wait_all(const struct vn_ring *ring)
265 {
266 /* load from tail rather than ring->cur for atomicity */
267 const uint32_t pending_seqno =
268 atomic_load_explicit(ring->shared.tail, memory_order_relaxed);
269 vn_ring_wait(ring, pending_seqno);
270 }
271