1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
5 */
6
7 #ifndef __MSM_RINGBUFFER_H__
8 #define __MSM_RINGBUFFER_H__
9
10 #include "msm_drv.h"
11
12 #define rbmemptr(ring, member) \
13 ((ring)->memptrs_iova + offsetof(struct msm_rbmemptrs, member))
14
15 #define rbmemptr_stats(ring, index, member) \
16 (rbmemptr((ring), stats) + \
17 ((index) * sizeof(struct msm_gpu_submit_stats)) + \
18 offsetof(struct msm_gpu_submit_stats, member))
19
20 struct msm_gpu_submit_stats {
21 u64 cpcycles_start;
22 u64 cpcycles_end;
23 u64 alwayson_start;
24 u64 alwayson_end;
25 };
26
27 #define MSM_GPU_SUBMIT_STATS_COUNT 64
28
29 struct msm_rbmemptrs {
30 volatile uint32_t rptr;
31 volatile uint32_t fence;
32
33 volatile struct msm_gpu_submit_stats stats[MSM_GPU_SUBMIT_STATS_COUNT];
34 volatile u64 ttbr0;
35 };
36
37 struct msm_ringbuffer {
38 struct msm_gpu *gpu;
39 int id;
40 struct drm_gem_object *bo;
41 uint32_t *start, *end, *cur, *next;
42 struct list_head submits;
43 uint64_t iova;
44 uint32_t seqno;
45 uint32_t hangcheck_fence;
46 struct msm_rbmemptrs *memptrs;
47 uint64_t memptrs_iova;
48 struct msm_fence_context *fctx;
49
50 /*
51 * preempt_lock protects preemption and serializes wptr updates against
52 * preemption. Can be aquired from irq context.
53 */
54 spinlock_t preempt_lock;
55 };
56
57 struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
58 void *memptrs, uint64_t memptrs_iova);
59 void msm_ringbuffer_destroy(struct msm_ringbuffer *ring);
60
61 /* ringbuffer helpers (the parts that are same for a3xx/a2xx/z180..) */
62
63 static inline void
OUT_RING(struct msm_ringbuffer * ring,uint32_t data)64 OUT_RING(struct msm_ringbuffer *ring, uint32_t data)
65 {
66 /*
67 * ring->next points to the current command being written - it won't be
68 * committed as ring->cur until the flush
69 */
70 if (ring->next == ring->end)
71 ring->next = ring->start;
72 *(ring->next++) = data;
73 }
74
75 #endif /* __MSM_RINGBUFFER_H__ */
76