• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2022 Imagination Technologies Ltd.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a copy
5  * of this software and associated documentation files (the "Software"), to deal
6  * in the Software without restriction, including without limitation the rights
7  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8  * copies of the Software, and to permit persons to whom the Software is
9  * furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  */
23 
24 #include <stdbool.h>
25 #include <stddef.h>
26 #include <stdint.h>
27 #include <unistd.h>
28 #include <poll.h>
29 #include <vulkan/vulkan.h>
30 
31 #include "pvr_private.h"
32 #include "pvr_srv.h"
33 #include "pvr_srv_sync.h"
34 #include "util/libsync.h"
35 #include "util/macros.h"
36 #include "util/timespec.h"
37 #include "vk_alloc.h"
38 #include "vk_log.h"
39 #include "vk_sync.h"
40 #include "vk_util.h"
41 
pvr_srv_sync_init(struct vk_device * device,struct vk_sync * sync,uint64_t initial_value)42 static VkResult pvr_srv_sync_init(struct vk_device *device,
43                                   struct vk_sync *sync,
44                                   uint64_t initial_value)
45 {
46    struct pvr_srv_sync *srv_sync = to_srv_sync(sync);
47 
48    srv_sync->signaled = initial_value ? true : false;
49    srv_sync->fd = -1;
50 
51    return VK_SUCCESS;
52 }
53 
pvr_srv_sync_finish(struct vk_device * device,struct vk_sync * sync)54 void pvr_srv_sync_finish(struct vk_device *device, struct vk_sync *sync)
55 {
56    struct pvr_srv_sync *srv_sync = to_srv_sync(sync);
57 
58    if (srv_sync->fd != -1)
59       close(srv_sync->fd);
60 }
61 
62 /* Note: function closes the fd. */
pvr_set_sync_state(struct pvr_srv_sync * srv_sync,bool signaled)63 static void pvr_set_sync_state(struct pvr_srv_sync *srv_sync, bool signaled)
64 {
65    if (srv_sync->fd != -1) {
66       close(srv_sync->fd);
67       srv_sync->fd = -1;
68    }
69 
70    srv_sync->signaled = signaled;
71 }
72 
pvr_srv_set_sync_payload(struct pvr_srv_sync * srv_sync,int payload)73 void pvr_srv_set_sync_payload(struct pvr_srv_sync *srv_sync, int payload)
74 {
75    if (srv_sync->fd != -1)
76       close(srv_sync->fd);
77 
78    srv_sync->fd = payload;
79    srv_sync->signaled = (payload == -1);
80 }
81 
pvr_srv_sync_signal(struct vk_device * device,struct vk_sync * sync,UNUSED uint64_t value)82 static VkResult pvr_srv_sync_signal(struct vk_device *device,
83                                     struct vk_sync *sync,
84                                     UNUSED uint64_t value)
85 {
86    struct pvr_srv_sync *srv_sync = to_srv_sync(sync);
87 
88    pvr_set_sync_state(srv_sync, true);
89 
90    return VK_SUCCESS;
91 }
92 
pvr_srv_sync_reset(struct vk_device * device,struct vk_sync * sync)93 static VkResult pvr_srv_sync_reset(struct vk_device *device,
94                                    struct vk_sync *sync)
95 {
96    struct pvr_srv_sync *srv_sync = to_srv_sync(sync);
97 
98    pvr_set_sync_state(srv_sync, false);
99 
100    return VK_SUCCESS;
101 }
102 
103 /* Careful, timeout might overflow. */
pvr_start_timeout(struct timespec * timeout,uint64_t timeout_ns)104 static inline void pvr_start_timeout(struct timespec *timeout,
105                                      uint64_t timeout_ns)
106 {
107    clock_gettime(CLOCK_MONOTONIC, timeout);
108    timespec_add_nsec(timeout, timeout, timeout_ns);
109 }
110 
111 /* Careful, a negative value might be returned. */
112 static inline struct timespec
pvr_get_remaining_time(const struct timespec * timeout)113 pvr_get_remaining_time(const struct timespec *timeout)
114 {
115    struct timespec time;
116 
117    clock_gettime(CLOCK_MONOTONIC, &time);
118    timespec_sub(&time, timeout, &time);
119 
120    return time;
121 }
122 
123 /* abs_timeout_ns == 0 -> Get status without waiting.
124  * abs_timeout_ns == ~0 -> Wait infinitely.
125  * else wait for the given abs_timeout_ns in nanoseconds. */
pvr_srv_sync_wait_many(struct vk_device * device,uint32_t wait_count,const struct vk_sync_wait * waits,enum vk_sync_wait_flags wait_flags,uint64_t abs_timeout_ns)126 static VkResult pvr_srv_sync_wait_many(struct vk_device *device,
127                                        uint32_t wait_count,
128                                        const struct vk_sync_wait *waits,
129                                        enum vk_sync_wait_flags wait_flags,
130                                        uint64_t abs_timeout_ns)
131 {
132    uint32_t unsignaled_count = 0U;
133    struct timespec end_time;
134    VkResult result;
135    int ppoll_ret;
136 
137    STACK_ARRAY(struct pollfd, poll_fds, wait_count);
138    if (!poll_fds)
139       return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
140 
141    if (abs_timeout_ns != 0U && abs_timeout_ns != ~0U) {
142       /* Syncobj timeouts are signed. */
143       abs_timeout_ns = MIN2(abs_timeout_ns, (uint64_t)INT64_MAX);
144       pvr_start_timeout(&end_time, abs_timeout_ns);
145    }
146 
147    for (uint32_t i = 0U; i < wait_count; i++) {
148       struct pvr_srv_sync *srv_sync = to_srv_sync(waits[i].sync);
149 
150       /* -1 in case if fence is signaled or uninitialized, ppoll will skip the
151        * fence.
152        */
153       /* FIXME: We don't currently support wait-for-fd path, so the caller
154        * should make sure all the sync have been assigned before calling this
155        * function.
156        */
157       if (srv_sync->signaled || srv_sync->fd == -1) {
158          poll_fds[i].fd = -1;
159       } else {
160          poll_fds[i].fd = srv_sync->fd;
161          unsignaled_count++;
162       }
163 
164       poll_fds[i].events = POLLIN;
165       poll_fds[i].revents = 0U;
166    }
167 
168    if (unsignaled_count == 0U) {
169       result = VK_SUCCESS;
170       goto end_wait_for_fences;
171    }
172 
173    /* FIXME: Fix device loss handling. */
174    do {
175       if (abs_timeout_ns == ~0U) {
176          ppoll_ret = ppoll(poll_fds, wait_count, NULL, NULL);
177       } else {
178          struct timespec remaining_time;
179 
180          if (abs_timeout_ns == 0U) {
181             remaining_time = (struct timespec){ 0UL, 0UL };
182          } else {
183             /* ppoll() returns EINVAL on negative timeout. Nothing to worry.
184              */
185             remaining_time = pvr_get_remaining_time(&end_time);
186          }
187 
188          ppoll_ret = ppoll(poll_fds, wait_count, &remaining_time, NULL);
189       }
190 
191       if (ppoll_ret > 0U) {
192          /* ppoll_ret contains the amount of structs updated by poll(). */
193          unsignaled_count -= ppoll_ret;
194 
195          /* ppoll_ret > 0 is for early loop termination. */
196          for (uint32_t i = 0; ppoll_ret > 0 && i < wait_count; i++) {
197             if (poll_fds[i].revents == 0)
198                continue;
199 
200             if (poll_fds[i].revents & (POLLNVAL | POLLERR)) {
201                result = vk_error(NULL, VK_ERROR_DEVICE_LOST);
202                goto end_wait_for_fences;
203             }
204 
205             pvr_srv_sync_signal(device, waits[i].sync, 0U);
206 
207             if (wait_flags & VK_SYNC_WAIT_ANY) {
208                result = VK_SUCCESS;
209                goto end_wait_for_fences;
210             }
211 
212             /* -1 makes ppoll ignore it and set revents to 0. */
213             poll_fds[i].fd = -1;
214             ppoll_ret--;
215          }
216 
217          /* For zero timeout, just return even if we still have unsignaled
218           * syncs.
219           */
220          if (abs_timeout_ns == 0U && unsignaled_count != 0U) {
221             result = VK_TIMEOUT;
222             goto end_wait_for_fences;
223          }
224       } else if (ppoll_ret == 0) {
225          result = VK_TIMEOUT;
226          goto end_wait_for_fences;
227       }
228 
229       /* Careful as we might have decremented ppoll_ret to 0. */
230    } while ((ppoll_ret != -1 && unsignaled_count != 0) ||
231             (ppoll_ret == -1 && (errno == EINTR || errno == EAGAIN)));
232 
233    /* We assume device loss in case of an unknown error or invalid fd. */
234    if (ppoll_ret != -1)
235       result = VK_SUCCESS;
236    else if (errno == EINVAL)
237       result = VK_TIMEOUT;
238    else if (errno == ENOMEM)
239       result = vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
240    else
241       result = vk_error(NULL, VK_ERROR_DEVICE_LOST);
242 
243 end_wait_for_fences:
244    STACK_ARRAY_FINISH(poll_fds);
245 
246    return result;
247 }
248 
pvr_srv_sync_move(struct vk_device * device,struct vk_sync * dst,struct vk_sync * src)249 static VkResult pvr_srv_sync_move(struct vk_device *device,
250                                   struct vk_sync *dst,
251                                   struct vk_sync *src)
252 {
253    struct pvr_srv_sync *srv_dst_sync = to_srv_sync(dst);
254    struct pvr_srv_sync *srv_src_sync = to_srv_sync(src);
255 
256    if (!(dst->flags & VK_SYNC_IS_SHARED) && !(src->flags & VK_SYNC_IS_SHARED)) {
257       pvr_srv_set_sync_payload(srv_dst_sync, srv_src_sync->fd);
258       srv_src_sync->fd = -1;
259       pvr_srv_sync_reset(device, src);
260       return VK_SUCCESS;
261    }
262 
263    unreachable("srv_sync doesn't support move for shared sync objects.");
264    return VK_ERROR_UNKNOWN;
265 }
266 
pvr_srv_sync_import_sync_file(struct vk_device * device,struct vk_sync * sync,int sync_file)267 static VkResult pvr_srv_sync_import_sync_file(struct vk_device *device,
268                                               struct vk_sync *sync,
269                                               int sync_file)
270 {
271    struct pvr_srv_sync *srv_sync = to_srv_sync(sync);
272    int fd = -1;
273 
274    if (sync_file >= 0) {
275       fd = dup(sync_file);
276       if (fd < 0)
277          return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
278    }
279 
280    pvr_srv_set_sync_payload(srv_sync, fd);
281 
282    return VK_SUCCESS;
283 }
284 
pvr_srv_sync_export_sync_file(struct vk_device * device,struct vk_sync * sync,int * sync_file)285 static VkResult pvr_srv_sync_export_sync_file(struct vk_device *device,
286                                               struct vk_sync *sync,
287                                               int *sync_file)
288 {
289    struct pvr_srv_sync *srv_sync = to_srv_sync(sync);
290    int fd;
291 
292    if (srv_sync->fd < 0) {
293       *sync_file = -1;
294       return VK_SUCCESS;
295    }
296 
297    fd = dup(srv_sync->fd);
298    if (fd < 0)
299       return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
300 
301    *sync_file = fd;
302 
303    return VK_SUCCESS;
304 }
305 
306 const struct vk_sync_type pvr_srv_sync_type = {
307    .size = sizeof(struct pvr_srv_sync),
308    /* clang-format off */
309    .features = VK_SYNC_FEATURE_BINARY |
310                VK_SYNC_FEATURE_GPU_WAIT |
311                VK_SYNC_FEATURE_GPU_MULTI_WAIT |
312                VK_SYNC_FEATURE_CPU_WAIT |
313                VK_SYNC_FEATURE_CPU_RESET |
314                VK_SYNC_FEATURE_CPU_SIGNAL |
315                VK_SYNC_FEATURE_WAIT_ANY,
316    /* clang-format on */
317    .init = pvr_srv_sync_init,
318    .finish = pvr_srv_sync_finish,
319    .signal = pvr_srv_sync_signal,
320    .reset = pvr_srv_sync_reset,
321    .wait_many = pvr_srv_sync_wait_many,
322    .move = pvr_srv_sync_move,
323    .import_sync_file = pvr_srv_sync_import_sync_file,
324    .export_sync_file = pvr_srv_sync_export_sync_file,
325 };
326