/drivers/gpu/drm/v3d/ |
D | v3d_trace.h | 39 uint64_t seqno, 41 TP_ARGS(dev, is_render, seqno, ctnqba, ctnqea), 46 __field(u64, seqno) 54 __entry->seqno = seqno; 62 __entry->seqno, 69 uint64_t seqno), 70 TP_ARGS(dev, seqno), 74 __field(u64, seqno) 79 __entry->seqno = seqno; 84 __entry->seqno) [all …]
|
/drivers/media/pci/saa7164/ |
D | saa7164-cmd.c | 22 ret = dev->cmds[i].seqno; in saa7164_cmd_alloc_seqno() 31 static void saa7164_cmd_free_seqno(struct saa7164_dev *dev, u8 seqno) in saa7164_cmd_free_seqno() argument 34 if ((dev->cmds[seqno].inuse == 1) && in saa7164_cmd_free_seqno() 35 (dev->cmds[seqno].seqno == seqno)) { in saa7164_cmd_free_seqno() 36 dev->cmds[seqno].inuse = 0; in saa7164_cmd_free_seqno() 37 dev->cmds[seqno].signalled = 0; in saa7164_cmd_free_seqno() 38 dev->cmds[seqno].timeout = 0; in saa7164_cmd_free_seqno() 43 static void saa7164_cmd_timeout_seqno(struct saa7164_dev *dev, u8 seqno) in saa7164_cmd_timeout_seqno() argument 46 if ((dev->cmds[seqno].inuse == 1) && in saa7164_cmd_timeout_seqno() 47 (dev->cmds[seqno].seqno == seqno)) { in saa7164_cmd_timeout_seqno() [all …]
|
/drivers/gpu/drm/vc4/ |
D | vc4_trace.h | 18 TP_PROTO(struct drm_device *dev, uint64_t seqno, uint64_t timeout), 19 TP_ARGS(dev, seqno, timeout), 23 __field(u64, seqno) 29 __entry->seqno = seqno; 34 __entry->dev, __entry->seqno, __entry->timeout) 38 TP_PROTO(struct drm_device *dev, uint64_t seqno), 39 TP_ARGS(dev, seqno), 43 __field(u64, seqno) 48 __entry->seqno = seqno; 52 __entry->dev, __entry->seqno)
|
D | vc4_gem.c | 381 vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno, uint64_t timeout_ns, in vc4_wait_for_seqno() argument 389 if (vc4->finished_seqno >= seqno) in vc4_wait_for_seqno() 397 trace_vc4_wait_for_seqno_begin(dev, seqno, timeout_ns); in vc4_wait_for_seqno() 408 if (vc4->finished_seqno >= seqno) in vc4_wait_for_seqno() 423 trace_vc4_wait_for_seqno_end(dev, seqno); in vc4_wait_for_seqno() 537 vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno) in vc4_update_bo_seqnos() argument 544 bo->seqno = seqno; in vc4_update_bo_seqnos() 550 bo->seqno = seqno; in vc4_update_bo_seqnos() 555 bo->write_seqno = seqno; in vc4_update_bo_seqnos() 671 uint64_t seqno; in vc4_queue_submit() local [all …]
|
/drivers/gpu/drm/vmwgfx/ |
D | vmwgfx_irq.c | 111 static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno) in vmw_fifo_idle() argument 121 uint32_t seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE); in vmw_update_seqno() local 123 if (dev_priv->last_read_seqno != seqno) { in vmw_update_seqno() 124 dev_priv->last_read_seqno = seqno; in vmw_update_seqno() 125 vmw_marker_pull(&fifo_state->marker_queue, seqno); in vmw_update_seqno() 131 uint32_t seqno) in vmw_seqno_passed() argument 136 if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP)) in vmw_seqno_passed() 141 if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP)) in vmw_seqno_passed() 145 vmw_fifo_idle(dev_priv, seqno)) in vmw_seqno_passed() 153 ret = ((atomic_read(&dev_priv->marker_seq) - seqno) in vmw_seqno_passed() [all …]
|
D | vmwgfx_marker.c | 33 uint32_t seqno; member 57 uint32_t seqno) in vmw_marker_push() argument 64 marker->seqno = seqno; in vmw_marker_push() 91 if (signaled_seqno - marker->seqno > (1 << 30)) in vmw_marker_pull() 132 uint32_t seqno; in vmw_wait_lag() local 138 seqno = atomic_read(&dev_priv->marker_seq); in vmw_wait_lag() 142 seqno = marker->seqno; in vmw_wait_lag() 146 ret = vmw_wait_seqno(dev_priv, false, seqno, true, in vmw_wait_lag() 152 (void) vmw_marker_pull(queue, seqno); in vmw_wait_lag()
|
D | vmwgfx_fence.c | 145 u32 seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE); in vmw_fence_enable_signaling() local 146 if (seqno - fence->base.seqno < VMW_FENCE_WRAP) in vmw_fence_enable_signaling() 340 struct vmw_fence_obj *fence, u32 seqno, in vmw_fence_obj_init() argument 346 fman->ctx, seqno); in vmw_fence_obj_init() 419 vmw_mmio_write(fence->base.seqno, in vmw_fence_goal_new_locked() 456 goal_seqno - fence->base.seqno < VMW_FENCE_WRAP)) in vmw_fence_goal_check_locked() 459 vmw_mmio_write(fence->base.seqno, fifo_mem + SVGA_FIFO_FENCE_GOAL); in vmw_fence_goal_check_locked() 470 uint32_t seqno, new_seqno; in __vmw_fences_update() local 473 seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE); in __vmw_fences_update() 476 if (seqno - fence->base.seqno < VMW_FENCE_WRAP) { in __vmw_fences_update() [all …]
|
/drivers/dma-buf/ |
D | dma-fence-chain.c | 89 int dma_fence_chain_find_seqno(struct dma_fence **pfence, uint64_t seqno) in dma_fence_chain_find_seqno() argument 93 if (!seqno) in dma_fence_chain_find_seqno() 97 if (!chain || chain->base.seqno < seqno) in dma_fence_chain_find_seqno() 102 to_dma_fence_chain(*pfence)->prev_seqno < seqno) in dma_fence_chain_find_seqno() 231 uint64_t seqno) in dma_fence_chain_init() argument 243 if (prev_chain && __dma_fence_is_later(seqno, prev->seqno, prev->ops)) { in dma_fence_chain_init() 245 chain->prev_seqno = prev->seqno; in dma_fence_chain_init() 250 seqno = max(prev->seqno, seqno); in dma_fence_chain_init() 254 &chain->lock, context, seqno); in dma_fence_chain_init()
|
/drivers/gpu/drm/i915/selftests/ |
D | i915_syncmap.c | 146 static int check_seqno(struct i915_syncmap *leaf, unsigned int idx, u32 seqno) in check_seqno() argument 154 if (__sync_seqno(leaf)[idx] != seqno) { in check_seqno() 156 __func__, idx, __sync_seqno(leaf)[idx], seqno); in check_seqno() 163 static int check_one(struct i915_syncmap **sync, u64 context, u32 seqno) in check_one() argument 167 err = i915_syncmap_set(sync, context, seqno); in check_one() 189 err = check_seqno((*sync), ilog2((*sync)->bitmap), seqno); in check_one() 193 if (!i915_syncmap_is_later(sync, context, seqno)) { in check_one() 195 context, seqno); in check_one() 238 static int check_leaf(struct i915_syncmap **sync, u64 context, u32 seqno) in check_leaf() argument 242 err = i915_syncmap_set(sync, context, seqno); in check_leaf() [all …]
|
D | igt_spinner.c | 41 spin->seqno = memset(vaddr, 0xff, PAGE_SIZE); in igt_spinner_init() 138 *batch++ = rq->fence.seqno; in igt_spinner_create_request() 173 u32 *seqno = spin->seqno + seqno_offset(rq->fence.context); in hws_seqno() local 175 return READ_ONCE(*seqno); in hws_seqno() 198 rq->fence.seqno), in igt_wait_for_spinner() 201 rq->fence.seqno), in igt_wait_for_spinner()
|
/drivers/gpu/drm/radeon/ |
D | radeon_trace.h | 127 TP_PROTO(struct drm_device *dev, int ring, u32 seqno), 129 TP_ARGS(dev, ring, seqno), 134 __field(u32, seqno) 140 __entry->seqno = seqno; 144 __entry->dev, __entry->ring, __entry->seqno) 149 TP_PROTO(struct drm_device *dev, int ring, u32 seqno), 151 TP_ARGS(dev, ring, seqno) 156 TP_PROTO(struct drm_device *dev, int ring, u32 seqno), 158 TP_ARGS(dev, ring, seqno) 163 TP_PROTO(struct drm_device *dev, int ring, u32 seqno), [all …]
|
/drivers/gpu/drm/i915/gt/ |
D | intel_timeline.h | 56 u64 context, u32 seqno) in __intel_timeline_sync_set() argument 58 return i915_syncmap_set(&tl->sync, context, seqno); in __intel_timeline_sync_set() 64 return __intel_timeline_sync_set(tl, fence->context, fence->seqno); in intel_timeline_sync_set() 68 u64 context, u32 seqno) in __intel_timeline_sync_is_later() argument 70 return i915_syncmap_is_later(&tl->sync, context, seqno); in __intel_timeline_sync_is_later() 76 return __intel_timeline_sync_is_later(tl, fence->context, fence->seqno); in intel_timeline_sync_is_later() 83 u32 *seqno);
|
D | selftest_timeline.c | 161 u32 seqno; member 173 if (__intel_timeline_sync_is_later(tl, ctx, p->seqno) != p->expected) { in __igt_sync() 175 name, p->name, ctx, p->seqno, yesno(p->expected)); in __igt_sync() 180 ret = __intel_timeline_sync_set(tl, ctx, p->seqno); in __igt_sync() 358 u32 seqno = prandom_u32_state(&prng); in bench_sync() local 360 if (!__intel_timeline_sync_is_later(&tl, id, seqno)) in bench_sync() 361 __intel_timeline_sync_set(&tl, id, seqno); in bench_sync() 486 if (*tl->hwsp_seqno != tl->seqno) { in checked_intel_timeline_create() 488 *tl->hwsp_seqno, tl->seqno); in checked_intel_timeline_create() 679 u32 seqno[2]; in live_hwsp_wrap() local [all …]
|
D | intel_breadcrumbs.c | 83 return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno); in __request_completed() 90 i915_seqno_passed(rq->fence.seqno, in check_signal_order() 91 list_next_entry(rq, signal_link)->fence.seqno)) in check_signal_order() 95 i915_seqno_passed(list_prev_entry(rq, signal_link)->fence.seqno, in check_signal_order() 96 rq->fence.seqno)) in check_signal_order() 308 if (i915_seqno_passed(rq->fence.seqno, it->fence.seqno)) in i915_request_enable_breadcrumb() 365 rq->fence.context, rq->fence.seqno, in intel_engine_print_breadcrumbs()
|
D | intel_timeline.c | 380 GEM_BUG_ON(tl->seqno & tl->has_initial_breadcrumb); in timeline_advance() 382 return tl->seqno += 1 + tl->has_initial_breadcrumb; in timeline_advance() 387 tl->seqno -= 1 + tl->has_initial_breadcrumb; in timeline_rollback() 393 u32 *seqno) in __intel_timeline_get_seqno() argument 467 *seqno = timeline_advance(tl); in __intel_timeline_get_seqno() 468 GEM_BUG_ON(i915_seqno_passed(*tl->hwsp_seqno, *seqno)); in __intel_timeline_get_seqno() 482 u32 *seqno) in intel_timeline_get_seqno() argument 484 *seqno = timeline_advance(tl); in intel_timeline_get_seqno() 487 if (unlikely(!*seqno && tl->hwsp_cacheline)) in intel_timeline_get_seqno() 488 return __intel_timeline_get_seqno(tl, rq, seqno); in intel_timeline_get_seqno()
|
/drivers/gpu/drm/msm/ |
D | msm_gpu_trace.h | 40 __field(u32, seqno) 47 __entry->seqno = submit->seqno; 51 __entry->id, __entry->pid, __entry->ringid, __entry->seqno, 64 __field(u32, seqno) 74 __entry->seqno = submit->seqno; 81 __entry->id, __entry->pid, __entry->ringid, __entry->seqno,
|
/drivers/net/wireless/mediatek/mt76/ |
D | agg-rx.c | 77 mt76_rx_aggr_release_frames(tid, frames, status->seqno); in mt76_rx_aggr_check_release() 118 u16 seqno; in mt76_rx_aggr_check_ctl() local 127 seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(bar->start_seq_num)); in mt76_rx_aggr_check_ctl() 133 mt76_rx_aggr_release_frames(tid, frames, seqno); in mt76_rx_aggr_check_ctl() 146 u16 seqno, head, size; in mt76_rx_aggr_reorder() local 177 seqno = status->seqno; in mt76_rx_aggr_reorder() 179 sn_less = ieee80211_sn_less(seqno, head); in mt76_rx_aggr_reorder() 194 if (seqno == head) { in mt76_rx_aggr_reorder() 207 if (!ieee80211_sn_less(seqno, head + size)) { in mt76_rx_aggr_reorder() 208 head = ieee80211_sn_inc(ieee80211_sn_sub(seqno, size)); in mt76_rx_aggr_reorder() [all …]
|
/drivers/gpu/drm/i915/ |
D | i915_syncmap.c | 154 bool i915_syncmap_is_later(struct i915_syncmap **root, u64 id, u32 seqno) in i915_syncmap_is_later() argument 195 return seqno_later(__sync_seqno(p)[idx], seqno); in i915_syncmap_is_later() 214 static inline void __sync_set_seqno(struct i915_syncmap *p, u64 id, u32 seqno) in __sync_set_seqno() argument 219 __sync_seqno(p)[idx] = seqno; in __sync_set_seqno() 230 static noinline int __sync_set(struct i915_syncmap **root, u64 id, u32 seqno) in __sync_set() argument 335 __sync_set_seqno(p, id, seqno); in __sync_set() 353 int i915_syncmap_set(struct i915_syncmap **root, u64 id, u32 seqno) in i915_syncmap_set() argument 362 __sync_set_seqno(p, id, seqno); in i915_syncmap_set() 366 return __sync_set(root, id, seqno); in i915_syncmap_set()
|
D | i915_request.h | 356 u32 seqno; in hwsp_seqno() local 359 seqno = __hwsp_seqno(rq); in hwsp_seqno() 362 return seqno; in hwsp_seqno() 367 return i915_seqno_passed(hwsp_seqno(rq), rq->fence.seqno - 1); in __i915_request_has_started() 426 return i915_seqno_passed(hwsp_seqno(rq), rq->fence.seqno); in i915_request_completed() 431 rq->hwsp_seqno = (u32 *)&rq->fence.seqno; /* decouple from HWSP */ in i915_request_mark_complete()
|
D | i915_trace.h | 672 __field(u32, seqno) 682 __entry->seqno = rq->fence.seqno; 688 __entry->hw_id, __entry->ctx, __entry->seqno, 702 __field(u32, seqno) 711 __entry->seqno = rq->fence.seqno; 716 __entry->hw_id, __entry->ctx, __entry->seqno) 745 __field(u32, seqno) 756 __entry->seqno = rq->fence.seqno; 763 __entry->hw_id, __entry->ctx, __entry->seqno, 777 __field(u32, seqno) [all …]
|
/drivers/gpu/drm/virtio/ |
D | virtgpu_fence.c | 44 if (atomic64_read(&fence->drv->last_seq) >= fence->f.seqno) in virtio_fence_signaled() 51 snprintf(str, size, "%llu", f->seqno); in virtio_fence_value_str() 96 fence->f.seqno = ++drv->sync_seq; in virtio_gpu_fence_emit() 104 cmd_hdr->fence_id = cpu_to_le64(fence->f.seqno); in virtio_gpu_fence_emit() 117 if (last_seq < fence->f.seqno) in virtio_gpu_fence_event_process()
|
/drivers/net/ppp/ |
D | ppp_deflate.c | 25 int seqno; member 150 state->seqno = 0; in z_comp_init() 170 state->seqno = 0; in z_comp_reset() 213 put_unaligned_be16(state->seqno, wptr); in z_compress() 218 ++state->seqno; in z_compress() 365 state->seqno = 0; in z_decomp_init() 386 state->seqno = 0; in z_decomp_reset() 428 if (seq != (state->seqno & 0xffff)) { in z_decompress() 431 state->unit, seq, state->seqno & 0xffff); in z_decompress() 434 ++state->seqno; in z_decompress() [all …]
|
/drivers/gpu/drm/scheduler/ |
D | gpu_scheduler_trace.h | 83 __field(unsigned, seqno) 91 __entry->seqno = fence->seqno; 96 __entry->seqno)
|
/drivers/gpu/drm/amd/amdgpu/ |
D | amdgpu_trace.h | 171 __field(unsigned int, seqno) 181 __entry->seqno = job->base.s_fence->finished.seqno; 187 __entry->seqno, __get_str(ring), __entry->num_ibs) 197 __field(unsigned int, seqno) 206 __entry->seqno = job->base.s_fence->finished.seqno; 212 __entry->seqno, __get_str(ring), __entry->num_ibs) 475 __field(unsigned, seqno) 483 __entry->seqno = fence->seqno; 488 __entry->seqno)
|
/drivers/tty/hvc/ |
D | hvsi.c | 75 atomic_t seqno; /* HVSI packet sequence number */ member 211 header->seqno); in dump_packet() 287 packet.hdr.seqno = cpu_to_be16(atomic_inc_return(&hp->seqno)); in hvsi_version_respond() 311 hvsi_version_respond(hp, be16_to_cpu(query->hdr.seqno)); in hvsi_recv_query() 547 packet.hdr.seqno = cpu_to_be16(atomic_inc_return(&hp->seqno)); in hvsi_query() 589 packet.hdr.seqno = cpu_to_be16(atomic_inc_return(&hp->seqno)); in hvsi_set_mctrl() 672 packet.hdr.seqno = cpu_to_be16(atomic_inc_return(&hp->seqno)); in hvsi_put_chars() 689 packet.hdr.seqno = cpu_to_be16(atomic_inc_return(&hp->seqno)); in hvsi_close_protocol() 718 atomic_set(&hp->seqno, 0); in hvsi_open()
|