1 /*
2 * Copyright © 2018 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23 /**
24 * @file iris_fence.c
25 *
26 * Fences for driver and IPC serialisation, scheduling and synchronisation.
27 */
28
29 #include "drm-uapi/sync_file.h"
30 #include "util/u_debug.h"
31 #include "util/u_inlines.h"
32 #include "intel/common/intel_gem.h"
33
34 #include "iris_batch.h"
35 #include "iris_bufmgr.h"
36 #include "iris_context.h"
37 #include "iris_fence.h"
38 #include "iris_screen.h"
39
40 static uint32_t
gem_syncobj_create(int fd,uint32_t flags)41 gem_syncobj_create(int fd, uint32_t flags)
42 {
43 struct drm_syncobj_create args = {
44 .flags = flags,
45 };
46
47 intel_ioctl(fd, DRM_IOCTL_SYNCOBJ_CREATE, &args);
48
49 return args.handle;
50 }
51
52 static void
gem_syncobj_destroy(int fd,uint32_t handle)53 gem_syncobj_destroy(int fd, uint32_t handle)
54 {
55 struct drm_syncobj_destroy args = {
56 .handle = handle,
57 };
58
59 intel_ioctl(fd, DRM_IOCTL_SYNCOBJ_DESTROY, &args);
60 }
61
62 /**
63 * Make a new sync-point.
64 */
65 struct iris_syncobj *
iris_create_syncobj(struct iris_bufmgr * bufmgr)66 iris_create_syncobj(struct iris_bufmgr *bufmgr)
67 {
68 int fd = iris_bufmgr_get_fd(bufmgr);
69 struct iris_syncobj *syncobj = malloc(sizeof(*syncobj));
70
71 if (!syncobj)
72 return NULL;
73
74 syncobj->handle = gem_syncobj_create(fd, 0);
75 assert(syncobj->handle);
76
77 pipe_reference_init(&syncobj->ref, 1);
78
79 return syncobj;
80 }
81
82 void
iris_syncobj_destroy(struct iris_bufmgr * bufmgr,struct iris_syncobj * syncobj)83 iris_syncobj_destroy(struct iris_bufmgr *bufmgr, struct iris_syncobj *syncobj)
84 {
85 int fd = iris_bufmgr_get_fd(bufmgr);
86 gem_syncobj_destroy(fd, syncobj->handle);
87 free(syncobj);
88 }
89
90 void
iris_syncobj_signal(struct iris_bufmgr * bufmgr,struct iris_syncobj * syncobj)91 iris_syncobj_signal(struct iris_bufmgr *bufmgr, struct iris_syncobj *syncobj)
92 {
93 int fd = iris_bufmgr_get_fd(bufmgr);
94 struct drm_syncobj_array args = {
95 .handles = (uintptr_t)&syncobj->handle,
96 .count_handles = 1,
97 };
98
99 if (intel_ioctl(fd, DRM_IOCTL_SYNCOBJ_SIGNAL, &args)) {
100 fprintf(stderr, "failed to signal syncobj %"PRIu32"\n",
101 syncobj->handle);
102 }
103 }
104
105 /**
106 * Add a sync-point to the batch, with the given flags.
107 *
108 * \p flags One of I915_EXEC_FENCE_WAIT or I915_EXEC_FENCE_SIGNAL.
109 */
110 void
iris_batch_add_syncobj(struct iris_batch * batch,struct iris_syncobj * syncobj,unsigned flags)111 iris_batch_add_syncobj(struct iris_batch *batch,
112 struct iris_syncobj *syncobj,
113 unsigned flags)
114 {
115 struct drm_i915_gem_exec_fence *fence =
116 util_dynarray_grow(&batch->exec_fences, struct drm_i915_gem_exec_fence, 1);
117
118 *fence = (struct drm_i915_gem_exec_fence) {
119 .handle = syncobj->handle,
120 .flags = flags,
121 };
122
123 struct iris_syncobj **store =
124 util_dynarray_grow(&batch->syncobjs, struct iris_syncobj *, 1);
125
126 *store = NULL;
127 iris_syncobj_reference(batch->screen->bufmgr, store, syncobj);
128 }
129
130 /**
131 * Walk through a batch's dependencies (any I915_EXEC_FENCE_WAIT syncobjs)
132 * and unreference any which have already passed.
133 *
134 * Sometimes the compute batch is seldom used, and accumulates references
135 * to stale render batches that are no longer of interest, so we can free
136 * those up.
137 */
138 static void
clear_stale_syncobjs(struct iris_batch * batch)139 clear_stale_syncobjs(struct iris_batch *batch)
140 {
141 struct iris_screen *screen = batch->screen;
142 struct iris_bufmgr *bufmgr = screen->bufmgr;
143
144 int n = util_dynarray_num_elements(&batch->syncobjs, struct iris_syncobj *);
145
146 assert(n == util_dynarray_num_elements(&batch->exec_fences,
147 struct drm_i915_gem_exec_fence));
148
149 /* Skip the first syncobj, as it's the signalling one. */
150 for (int i = n - 1; i > 0; i--) {
151 struct iris_syncobj **syncobj =
152 util_dynarray_element(&batch->syncobjs, struct iris_syncobj *, i);
153 struct drm_i915_gem_exec_fence *fence =
154 util_dynarray_element(&batch->exec_fences,
155 struct drm_i915_gem_exec_fence, i);
156 assert(fence->flags & I915_EXEC_FENCE_WAIT);
157
158 if (iris_wait_syncobj(bufmgr, *syncobj, 0))
159 continue;
160
161 /* This sync object has already passed, there's no need to continue
162 * marking it as a dependency; we can stop holding on to the reference.
163 */
164 iris_syncobj_reference(bufmgr, syncobj, NULL);
165
166 /* Remove it from the lists; move the last element here. */
167 struct iris_syncobj **nth_syncobj =
168 util_dynarray_pop_ptr(&batch->syncobjs, struct iris_syncobj *);
169 struct drm_i915_gem_exec_fence *nth_fence =
170 util_dynarray_pop_ptr(&batch->exec_fences,
171 struct drm_i915_gem_exec_fence);
172
173 if (syncobj != nth_syncobj) {
174 *syncobj = *nth_syncobj;
175 memcpy(fence, nth_fence, sizeof(*fence));
176 }
177 }
178 }
179
180 /* ------------------------------------------------------------------- */
181
182 struct pipe_fence_handle {
183 struct pipe_reference ref;
184
185 struct pipe_context *unflushed_ctx;
186
187 struct iris_fine_fence *fine[IRIS_BATCH_COUNT];
188 };
189
190 static void
iris_fence_destroy(struct pipe_screen * p_screen,struct pipe_fence_handle * fence)191 iris_fence_destroy(struct pipe_screen *p_screen,
192 struct pipe_fence_handle *fence)
193 {
194 struct iris_screen *screen = (struct iris_screen *)p_screen;
195
196 for (unsigned i = 0; i < ARRAY_SIZE(fence->fine); i++)
197 iris_fine_fence_reference(screen, &fence->fine[i], NULL);
198
199 free(fence);
200 }
201
202 static void
iris_fence_reference(struct pipe_screen * p_screen,struct pipe_fence_handle ** dst,struct pipe_fence_handle * src)203 iris_fence_reference(struct pipe_screen *p_screen,
204 struct pipe_fence_handle **dst,
205 struct pipe_fence_handle *src)
206 {
207 if (pipe_reference(*dst ? &(*dst)->ref : NULL,
208 src ? &src->ref : NULL))
209 iris_fence_destroy(p_screen, *dst);
210
211 *dst = src;
212 }
213
214 bool
iris_wait_syncobj(struct iris_bufmgr * bufmgr,struct iris_syncobj * syncobj,int64_t timeout_nsec)215 iris_wait_syncobj(struct iris_bufmgr *bufmgr,
216 struct iris_syncobj *syncobj,
217 int64_t timeout_nsec)
218 {
219 if (!syncobj)
220 return false;
221
222 int fd = iris_bufmgr_get_fd(bufmgr);
223
224 struct drm_syncobj_wait args = {
225 .handles = (uintptr_t)&syncobj->handle,
226 .count_handles = 1,
227 .timeout_nsec = timeout_nsec,
228 };
229 return intel_ioctl(fd, DRM_IOCTL_SYNCOBJ_WAIT, &args);
230 }
231
232 #define CSI "\e["
233 #define BLUE_HEADER CSI "0;97;44m"
234 #define NORMAL CSI "0m"
235
236 static void
iris_fence_flush(struct pipe_context * ctx,struct pipe_fence_handle ** out_fence,unsigned flags)237 iris_fence_flush(struct pipe_context *ctx,
238 struct pipe_fence_handle **out_fence,
239 unsigned flags)
240 {
241 struct iris_screen *screen = (void *) ctx->screen;
242 struct iris_context *ice = (struct iris_context *)ctx;
243
244 /* We require DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT (kernel 5.2+) for
245 * deferred flushes. Just ignore the request to defer on older kernels.
246 */
247 if (!(screen->kernel_features & KERNEL_HAS_WAIT_FOR_SUBMIT))
248 flags &= ~PIPE_FLUSH_DEFERRED;
249
250 const bool deferred = flags & PIPE_FLUSH_DEFERRED;
251
252 if (flags & PIPE_FLUSH_END_OF_FRAME) {
253 ice->frame++;
254
255 if (INTEL_DEBUG(DEBUG_SUBMIT)) {
256 fprintf(stderr, "%s ::: FRAME %-10u (ctx %p)%-35c%s\n",
257 INTEL_DEBUG(DEBUG_COLOR) ? BLUE_HEADER : "",
258 ice->frame, ctx, ' ',
259 INTEL_DEBUG(DEBUG_COLOR) ? NORMAL : "");
260 }
261 }
262
263 iris_flush_dirty_dmabufs(ice);
264
265 if (!deferred) {
266 iris_foreach_batch(ice, batch)
267 iris_batch_flush(batch);
268 }
269
270 if (flags & PIPE_FLUSH_END_OF_FRAME) {
271 iris_measure_frame_end(ice);
272 }
273
274 u_trace_context_process(&ice->ds.trace_context,
275 flags & PIPE_FLUSH_END_OF_FRAME);
276
277 if (!out_fence)
278 return;
279
280 struct pipe_fence_handle *fence = calloc(1, sizeof(*fence));
281 if (!fence)
282 return;
283
284 pipe_reference_init(&fence->ref, 1);
285
286 if (deferred)
287 fence->unflushed_ctx = ctx;
288
289 iris_foreach_batch(ice, batch) {
290 unsigned b = batch->name;
291
292 if (deferred && iris_batch_bytes_used(batch) > 0) {
293 struct iris_fine_fence *fine =
294 iris_fine_fence_new(batch, IRIS_FENCE_BOTTOM_OF_PIPE);
295 iris_fine_fence_reference(screen, &fence->fine[b], fine);
296 iris_fine_fence_reference(screen, &fine, NULL);
297 } else {
298 /* This batch has no commands queued up (perhaps we just flushed,
299 * or all the commands are on the other batch). Wait for the last
300 * syncobj on this engine - unless it's already finished by now.
301 */
302 if (iris_fine_fence_signaled(batch->last_fence))
303 continue;
304
305 iris_fine_fence_reference(screen, &fence->fine[b], batch->last_fence);
306 }
307 }
308
309 iris_fence_reference(ctx->screen, out_fence, NULL);
310 *out_fence = fence;
311 }
312
313 static void
iris_fence_await(struct pipe_context * ctx,struct pipe_fence_handle * fence)314 iris_fence_await(struct pipe_context *ctx,
315 struct pipe_fence_handle *fence)
316 {
317 struct iris_context *ice = (struct iris_context *)ctx;
318
319 /* Unflushed fences from the same context are no-ops. */
320 if (ctx && ctx == fence->unflushed_ctx)
321 return;
322
323 /* XXX: We can't safely flush the other context, because it might be
324 * bound to another thread, and poking at its internals wouldn't
325 * be safe. In the future we should use MI_SEMAPHORE_WAIT and
326 * block until the other job has been submitted, relying on
327 * kernel timeslicing to preempt us until the other job is
328 * actually flushed and the seqno finally passes.
329 */
330 if (fence->unflushed_ctx) {
331 util_debug_message(&ice->dbg, CONFORMANCE, "%s",
332 "glWaitSync on unflushed fence from another context "
333 "is unlikely to work without kernel 5.8+\n");
334 }
335
336 for (unsigned i = 0; i < ARRAY_SIZE(fence->fine); i++) {
337 struct iris_fine_fence *fine = fence->fine[i];
338
339 if (iris_fine_fence_signaled(fine))
340 continue;
341
342 iris_foreach_batch(ice, batch) {
343 /* We're going to make any future work in this batch wait for our
344 * fence to have gone by. But any currently queued work doesn't
345 * need to wait. Flush the batch now, so it can happen sooner.
346 */
347 iris_batch_flush(batch);
348
349 /* Before adding a new reference, clean out any stale ones. */
350 clear_stale_syncobjs(batch);
351
352 iris_batch_add_syncobj(batch, fine->syncobj, I915_EXEC_FENCE_WAIT);
353 }
354 }
355 }
356
357 #define NSEC_PER_SEC (1000 * USEC_PER_SEC)
358 #define USEC_PER_SEC (1000 * MSEC_PER_SEC)
359 #define MSEC_PER_SEC (1000)
360
361 static uint64_t
gettime_ns(void)362 gettime_ns(void)
363 {
364 struct timespec current;
365 clock_gettime(CLOCK_MONOTONIC, ¤t);
366 return (uint64_t)current.tv_sec * NSEC_PER_SEC + current.tv_nsec;
367 }
368
369 static uint64_t
rel2abs(uint64_t timeout)370 rel2abs(uint64_t timeout)
371 {
372 if (timeout == 0)
373 return 0;
374
375 uint64_t current_time = gettime_ns();
376 uint64_t max_timeout = (uint64_t) INT64_MAX - current_time;
377
378 timeout = MIN2(max_timeout, timeout);
379
380 return current_time + timeout;
381 }
382
383 static bool
iris_fence_finish(struct pipe_screen * p_screen,struct pipe_context * ctx,struct pipe_fence_handle * fence,uint64_t timeout)384 iris_fence_finish(struct pipe_screen *p_screen,
385 struct pipe_context *ctx,
386 struct pipe_fence_handle *fence,
387 uint64_t timeout)
388 {
389 ctx = threaded_context_unwrap_sync(ctx);
390
391 struct iris_context *ice = (struct iris_context *)ctx;
392 struct iris_screen *screen = (struct iris_screen *)p_screen;
393
394 /* If we created the fence with PIPE_FLUSH_DEFERRED, we may not have
395 * flushed yet. Check if our syncobj is the current batch's signalling
396 * syncobj - if so, we haven't flushed and need to now.
397 *
398 * The Gallium docs mention that a flush will occur if \p ctx matches
399 * the context the fence was created with. It may be NULL, so we check
400 * that it matches first.
401 */
402 if (ctx && ctx == fence->unflushed_ctx) {
403 iris_foreach_batch(ice, batch) {
404 struct iris_fine_fence *fine = fence->fine[batch->name];
405
406 if (iris_fine_fence_signaled(fine))
407 continue;
408
409 if (fine->syncobj == iris_batch_get_signal_syncobj(batch))
410 iris_batch_flush(batch);
411 }
412
413 /* The fence is no longer deferred. */
414 fence->unflushed_ctx = NULL;
415 }
416
417 unsigned int handle_count = 0;
418 uint32_t handles[ARRAY_SIZE(fence->fine)];
419 for (unsigned i = 0; i < ARRAY_SIZE(fence->fine); i++) {
420 struct iris_fine_fence *fine = fence->fine[i];
421
422 if (iris_fine_fence_signaled(fine))
423 continue;
424
425 handles[handle_count++] = fine->syncobj->handle;
426 }
427
428 if (handle_count == 0)
429 return true;
430
431 struct drm_syncobj_wait args = {
432 .handles = (uintptr_t)handles,
433 .count_handles = handle_count,
434 .timeout_nsec = rel2abs(timeout),
435 .flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL
436 };
437
438 if (fence->unflushed_ctx) {
439 /* This fence had a deferred flush from another context. We can't
440 * safely flush it here, because the context might be bound to a
441 * different thread, and poking at its internals wouldn't be safe.
442 *
443 * Instead, use the WAIT_FOR_SUBMIT flag to block and hope that
444 * another thread submits the work.
445 */
446 args.flags |= DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT;
447 }
448
449 return intel_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_WAIT, &args) == 0;
450 }
451
452 static int
sync_merge_fd(int sync_fd,int new_fd)453 sync_merge_fd(int sync_fd, int new_fd)
454 {
455 if (sync_fd == -1)
456 return new_fd;
457
458 if (new_fd == -1)
459 return sync_fd;
460
461 struct sync_merge_data args = {
462 .name = "iris fence",
463 .fd2 = new_fd,
464 .fence = -1,
465 };
466
467 intel_ioctl(sync_fd, SYNC_IOC_MERGE, &args);
468 close(new_fd);
469 close(sync_fd);
470
471 return args.fence;
472 }
473
474 static int
iris_fence_get_fd(struct pipe_screen * p_screen,struct pipe_fence_handle * fence)475 iris_fence_get_fd(struct pipe_screen *p_screen,
476 struct pipe_fence_handle *fence)
477 {
478 struct iris_screen *screen = (struct iris_screen *)p_screen;
479 int fd = -1;
480
481 /* Deferred fences aren't supported. */
482 if (fence->unflushed_ctx)
483 return -1;
484
485 for (unsigned i = 0; i < ARRAY_SIZE(fence->fine); i++) {
486 struct iris_fine_fence *fine = fence->fine[i];
487
488 if (iris_fine_fence_signaled(fine))
489 continue;
490
491 struct drm_syncobj_handle args = {
492 .handle = fine->syncobj->handle,
493 .flags = DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE,
494 .fd = -1,
495 };
496
497 intel_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &args);
498 fd = sync_merge_fd(fd, args.fd);
499 }
500
501 if (fd == -1) {
502 /* Our fence has no syncobj's recorded. This means that all of the
503 * batches had already completed, their syncobj's had been signalled,
504 * and so we didn't bother to record them. But we're being asked to
505 * export such a fence. So export a dummy already-signalled syncobj.
506 */
507 struct drm_syncobj_handle args = {
508 .flags = DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE, .fd = -1,
509 };
510
511 args.handle = gem_syncobj_create(screen->fd, DRM_SYNCOBJ_CREATE_SIGNALED);
512 intel_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &args);
513 gem_syncobj_destroy(screen->fd, args.handle);
514 return args.fd;
515 }
516
517 return fd;
518 }
519
520 static void
iris_fence_create_fd(struct pipe_context * ctx,struct pipe_fence_handle ** out,int fd,enum pipe_fd_type type)521 iris_fence_create_fd(struct pipe_context *ctx,
522 struct pipe_fence_handle **out,
523 int fd,
524 enum pipe_fd_type type)
525 {
526 assert(type == PIPE_FD_TYPE_NATIVE_SYNC || type == PIPE_FD_TYPE_SYNCOBJ);
527
528 struct iris_screen *screen = (struct iris_screen *)ctx->screen;
529 struct drm_syncobj_handle args = {
530 .fd = fd,
531 };
532
533 if (type == PIPE_FD_TYPE_NATIVE_SYNC) {
534 args.flags = DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE;
535 args.handle = gem_syncobj_create(screen->fd, DRM_SYNCOBJ_CREATE_SIGNALED);
536 }
537
538 if (intel_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, &args) == -1) {
539 fprintf(stderr, "DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE failed: %s\n",
540 strerror(errno));
541 if (type == PIPE_FD_TYPE_NATIVE_SYNC)
542 gem_syncobj_destroy(screen->fd, args.handle);
543 *out = NULL;
544 return;
545 }
546
547 struct iris_syncobj *syncobj = malloc(sizeof(*syncobj));
548 if (!syncobj) {
549 *out = NULL;
550 return;
551 }
552 syncobj->handle = args.handle;
553 pipe_reference_init(&syncobj->ref, 1);
554
555 struct iris_fine_fence *fine = calloc(1, sizeof(*fine));
556 if (!fine) {
557 free(syncobj);
558 *out = NULL;
559 return;
560 }
561
562 static const uint32_t zero = 0;
563
564 /* Fences work in terms of iris_fine_fence, but we don't actually have a
565 * seqno for an imported fence. So, create a fake one which always
566 * returns as 'not signaled' so we fall back to using the sync object.
567 */
568 fine->seqno = UINT32_MAX;
569 fine->map = &zero;
570 fine->syncobj = syncobj;
571 fine->flags = IRIS_FENCE_END;
572 pipe_reference_init(&fine->reference, 1);
573
574 struct pipe_fence_handle *fence = calloc(1, sizeof(*fence));
575 if (!fence) {
576 free(fine);
577 free(syncobj);
578 *out = NULL;
579 return;
580 }
581 pipe_reference_init(&fence->ref, 1);
582 fence->fine[0] = fine;
583
584 *out = fence;
585 }
586
587 static void
iris_fence_signal(struct pipe_context * ctx,struct pipe_fence_handle * fence)588 iris_fence_signal(struct pipe_context *ctx,
589 struct pipe_fence_handle *fence)
590 {
591 struct iris_context *ice = (struct iris_context *)ctx;
592
593 if (ctx == fence->unflushed_ctx)
594 return;
595
596 iris_foreach_batch(ice, batch) {
597 for (unsigned i = 0; i < ARRAY_SIZE(fence->fine); i++) {
598 struct iris_fine_fence *fine = fence->fine[i];
599
600 /* already signaled fence skipped */
601 if (iris_fine_fence_signaled(fine))
602 continue;
603
604 batch->contains_fence_signal = true;
605 iris_batch_add_syncobj(batch, fine->syncobj, I915_EXEC_FENCE_SIGNAL);
606 }
607 if (batch->contains_fence_signal)
608 iris_batch_flush(batch);
609 }
610 }
611
612 void
iris_init_screen_fence_functions(struct pipe_screen * screen)613 iris_init_screen_fence_functions(struct pipe_screen *screen)
614 {
615 screen->fence_reference = iris_fence_reference;
616 screen->fence_finish = iris_fence_finish;
617 screen->fence_get_fd = iris_fence_get_fd;
618 }
619
620 void
iris_init_context_fence_functions(struct pipe_context * ctx)621 iris_init_context_fence_functions(struct pipe_context *ctx)
622 {
623 ctx->flush = iris_fence_flush;
624 ctx->create_fence_fd = iris_fence_create_fd;
625 ctx->fence_server_sync = iris_fence_await;
626 ctx->fence_server_signal = iris_fence_signal;
627 }
628