1 /*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Christian König
23 */
24 #ifndef __AMDGPU_RING_H__
25 #define __AMDGPU_RING_H__
26
27 #include <drm/amdgpu_drm.h>
28 #include <drm/gpu_scheduler.h>
29 #include <drm/drm_print.h>
30
31 /* max number of rings */
32 #define AMDGPU_MAX_RINGS 28
33 #define AMDGPU_MAX_HWIP_RINGS 8
34 #define AMDGPU_MAX_GFX_RINGS 2
35 #define AMDGPU_MAX_COMPUTE_RINGS 8
36 #define AMDGPU_MAX_VCE_RINGS 3
37 #define AMDGPU_MAX_UVD_ENC_RINGS 2
38
39 #define AMDGPU_RING_PRIO_DEFAULT 1
40 #define AMDGPU_RING_PRIO_MAX AMDGPU_GFX_PIPE_PRIO_MAX
41
42 /* some special values for the owner field */
43 #define AMDGPU_FENCE_OWNER_UNDEFINED ((void *)0ul)
44 #define AMDGPU_FENCE_OWNER_VM ((void *)1ul)
45 #define AMDGPU_FENCE_OWNER_KFD ((void *)2ul)
46
47 #define AMDGPU_FENCE_FLAG_64BIT (1 << 0)
48 #define AMDGPU_FENCE_FLAG_INT (1 << 1)
49 #define AMDGPU_FENCE_FLAG_TC_WB_ONLY (1 << 2)
50
51 #define to_amdgpu_ring(s) container_of((s), struct amdgpu_ring, sched)
52
53 #define AMDGPU_IB_POOL_SIZE (1024 * 1024)
54
55 enum amdgpu_ring_type {
56 AMDGPU_RING_TYPE_GFX = AMDGPU_HW_IP_GFX,
57 AMDGPU_RING_TYPE_COMPUTE = AMDGPU_HW_IP_COMPUTE,
58 AMDGPU_RING_TYPE_SDMA = AMDGPU_HW_IP_DMA,
59 AMDGPU_RING_TYPE_UVD = AMDGPU_HW_IP_UVD,
60 AMDGPU_RING_TYPE_VCE = AMDGPU_HW_IP_VCE,
61 AMDGPU_RING_TYPE_UVD_ENC = AMDGPU_HW_IP_UVD_ENC,
62 AMDGPU_RING_TYPE_VCN_DEC = AMDGPU_HW_IP_VCN_DEC,
63 AMDGPU_RING_TYPE_VCN_ENC = AMDGPU_HW_IP_VCN_ENC,
64 AMDGPU_RING_TYPE_VCN_JPEG = AMDGPU_HW_IP_VCN_JPEG,
65 AMDGPU_RING_TYPE_KIQ,
66 AMDGPU_RING_TYPE_MES
67 };
68
69 enum amdgpu_ib_pool_type {
70 /* Normal submissions to the top of the pipeline. */
71 AMDGPU_IB_POOL_DELAYED,
72 /* Immediate submissions to the bottom of the pipeline. */
73 AMDGPU_IB_POOL_IMMEDIATE,
74 /* Direct submission to the ring buffer during init and reset. */
75 AMDGPU_IB_POOL_DIRECT,
76
77 AMDGPU_IB_POOL_MAX
78 };
79
80 struct amdgpu_device;
81 struct amdgpu_ring;
82 struct amdgpu_ib;
83 struct amdgpu_cs_parser;
84 struct amdgpu_job;
85
86 struct amdgpu_sched {
87 u32 num_scheds;
88 struct drm_gpu_scheduler *sched[AMDGPU_MAX_HWIP_RINGS];
89 };
90
91 /*
92 * Fences.
93 */
94 struct amdgpu_fence_driver {
95 uint64_t gpu_addr;
96 volatile uint32_t *cpu_addr;
97 /* sync_seq is protected by ring emission lock */
98 uint32_t sync_seq;
99 atomic_t last_seq;
100 bool initialized;
101 struct amdgpu_irq_src *irq_src;
102 unsigned irq_type;
103 struct timer_list fallback_timer;
104 unsigned num_fences_mask;
105 spinlock_t lock;
106 struct dma_fence **fences;
107 };
108
109 int amdgpu_fence_driver_init(struct amdgpu_device *adev);
110 void amdgpu_fence_driver_fini(struct amdgpu_device *adev);
111 void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring);
112
113 int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
114 unsigned num_hw_submission);
115 int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
116 struct amdgpu_irq_src *irq_src,
117 unsigned irq_type);
118 void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
119 void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
120 int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence,
121 unsigned flags);
122 int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s,
123 uint32_t timeout);
124 bool amdgpu_fence_process(struct amdgpu_ring *ring);
125 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
126 signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring,
127 uint32_t wait_seq,
128 signed long timeout);
129 unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
130
131 /*
132 * Rings.
133 */
134
135 /* provided by hw blocks that expose a ring buffer for commands */
136 struct amdgpu_ring_funcs {
137 enum amdgpu_ring_type type;
138 uint32_t align_mask;
139 u32 nop;
140 bool support_64bit_ptrs;
141 bool no_user_fence;
142 unsigned vmhub;
143 unsigned extra_dw;
144
145 /* ring read/write ptr handling */
146 u64 (*get_rptr)(struct amdgpu_ring *ring);
147 u64 (*get_wptr)(struct amdgpu_ring *ring);
148 void (*set_wptr)(struct amdgpu_ring *ring);
149 /* validating and patching of IBs */
150 int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx);
151 int (*patch_cs_in_place)(struct amdgpu_cs_parser *p, uint32_t ib_idx);
152 /* constants to calculate how many DW are needed for an emit */
153 unsigned emit_frame_size;
154 unsigned emit_ib_size;
155 /* command emit functions */
156 void (*emit_ib)(struct amdgpu_ring *ring,
157 struct amdgpu_job *job,
158 struct amdgpu_ib *ib,
159 uint32_t flags);
160 void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
161 uint64_t seq, unsigned flags);
162 void (*emit_pipeline_sync)(struct amdgpu_ring *ring);
163 void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vmid,
164 uint64_t pd_addr);
165 void (*emit_hdp_flush)(struct amdgpu_ring *ring);
166 void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid,
167 uint32_t gds_base, uint32_t gds_size,
168 uint32_t gws_base, uint32_t gws_size,
169 uint32_t oa_base, uint32_t oa_size);
170 /* testing functions */
171 int (*test_ring)(struct amdgpu_ring *ring);
172 int (*test_ib)(struct amdgpu_ring *ring, long timeout);
173 /* insert NOP packets */
174 void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count);
175 void (*insert_start)(struct amdgpu_ring *ring);
176 void (*insert_end)(struct amdgpu_ring *ring);
177 /* pad the indirect buffer to the necessary number of dw */
178 void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
179 unsigned (*init_cond_exec)(struct amdgpu_ring *ring);
180 void (*patch_cond_exec)(struct amdgpu_ring *ring, unsigned offset);
181 /* note usage for clock and power gating */
182 void (*begin_use)(struct amdgpu_ring *ring);
183 void (*end_use)(struct amdgpu_ring *ring);
184 void (*emit_switch_buffer) (struct amdgpu_ring *ring);
185 void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags);
186 void (*emit_rreg)(struct amdgpu_ring *ring, uint32_t reg,
187 uint32_t reg_val_offs);
188 void (*emit_wreg)(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
189 void (*emit_reg_wait)(struct amdgpu_ring *ring, uint32_t reg,
190 uint32_t val, uint32_t mask);
191 void (*emit_reg_write_reg_wait)(struct amdgpu_ring *ring,
192 uint32_t reg0, uint32_t reg1,
193 uint32_t ref, uint32_t mask);
194 void (*emit_frame_cntl)(struct amdgpu_ring *ring, bool start,
195 bool secure);
196 /* Try to soft recover the ring to make the fence signal */
197 void (*soft_recovery)(struct amdgpu_ring *ring, unsigned vmid);
198 int (*preempt_ib)(struct amdgpu_ring *ring);
199 void (*emit_mem_sync)(struct amdgpu_ring *ring);
200 };
201
202 struct amdgpu_ring {
203 struct amdgpu_device *adev;
204 const struct amdgpu_ring_funcs *funcs;
205 struct amdgpu_fence_driver fence_drv;
206 struct drm_gpu_scheduler sched;
207
208 struct amdgpu_bo *ring_obj;
209 volatile uint32_t *ring;
210 unsigned rptr_offs;
211 u64 wptr;
212 u64 wptr_old;
213 unsigned ring_size;
214 unsigned max_dw;
215 int count_dw;
216 uint64_t gpu_addr;
217 uint64_t ptr_mask;
218 uint32_t buf_mask;
219 u32 idx;
220 u32 me;
221 u32 pipe;
222 u32 queue;
223 struct amdgpu_bo *mqd_obj;
224 uint64_t mqd_gpu_addr;
225 void *mqd_ptr;
226 uint64_t eop_gpu_addr;
227 u32 doorbell_index;
228 bool use_doorbell;
229 bool use_pollmem;
230 unsigned wptr_offs;
231 unsigned fence_offs;
232 uint64_t current_ctx;
233 char name[16];
234 u32 trail_seq;
235 unsigned trail_fence_offs;
236 u64 trail_fence_gpu_addr;
237 volatile u32 *trail_fence_cpu_addr;
238 unsigned cond_exe_offs;
239 u64 cond_exe_gpu_addr;
240 volatile u32 *cond_exe_cpu_addr;
241 unsigned vm_inv_eng;
242 struct dma_fence *vmid_wait;
243 bool has_compute_vm_bug;
244 bool no_scheduler;
245
246 atomic_t num_jobs[DRM_SCHED_PRIORITY_COUNT];
247 struct mutex priority_mutex;
248 /* protected by priority_mutex */
249 int priority;
250
251 #if defined(CONFIG_DEBUG_FS)
252 struct dentry *ent;
253 #endif
254 };
255
256 #define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
257 #define amdgpu_ring_patch_cs_in_place(r, p, ib) ((r)->funcs->patch_cs_in_place((p), (ib)))
258 #define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
259 #define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t))
260 #define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
261 #define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
262 #define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
263 #define amdgpu_ring_emit_ib(r, job, ib, flags) ((r)->funcs->emit_ib((r), (job), (ib), (flags)))
264 #define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r))
265 #define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr))
266 #define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags))
267 #define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as))
268 #define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r))
269 #define amdgpu_ring_emit_switch_buffer(r) (r)->funcs->emit_switch_buffer((r))
270 #define amdgpu_ring_emit_cntxcntl(r, d) (r)->funcs->emit_cntxcntl((r), (d))
271 #define amdgpu_ring_emit_rreg(r, d, o) (r)->funcs->emit_rreg((r), (d), (o))
272 #define amdgpu_ring_emit_wreg(r, d, v) (r)->funcs->emit_wreg((r), (d), (v))
273 #define amdgpu_ring_emit_reg_wait(r, d, v, m) (r)->funcs->emit_reg_wait((r), (d), (v), (m))
274 #define amdgpu_ring_emit_reg_write_reg_wait(r, d0, d1, v, m) (r)->funcs->emit_reg_write_reg_wait((r), (d0), (d1), (v), (m))
275 #define amdgpu_ring_emit_frame_cntl(r, b, s) (r)->funcs->emit_frame_cntl((r), (b), (s))
276 #define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
277 #define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
278 #define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o))
279 #define amdgpu_ring_preempt_ib(r) (r)->funcs->preempt_ib(r)
280
281 int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
282 void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
283 void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
284 void amdgpu_ring_commit(struct amdgpu_ring *ring);
285 void amdgpu_ring_undo(struct amdgpu_ring *ring);
286 int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
287 unsigned int ring_size, struct amdgpu_irq_src *irq_src,
288 unsigned int irq_type, unsigned int prio);
289 void amdgpu_ring_fini(struct amdgpu_ring *ring);
290 void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring,
291 uint32_t reg0, uint32_t val0,
292 uint32_t reg1, uint32_t val1);
293 bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
294 struct dma_fence *fence);
295
amdgpu_ring_set_preempt_cond_exec(struct amdgpu_ring * ring,bool cond_exec)296 static inline void amdgpu_ring_set_preempt_cond_exec(struct amdgpu_ring *ring,
297 bool cond_exec)
298 {
299 *ring->cond_exe_cpu_addr = cond_exec;
300 }
301
amdgpu_ring_clear_ring(struct amdgpu_ring * ring)302 static inline void amdgpu_ring_clear_ring(struct amdgpu_ring *ring)
303 {
304 int i = 0;
305 while (i <= ring->buf_mask)
306 ring->ring[i++] = ring->funcs->nop;
307
308 }
309
amdgpu_ring_write(struct amdgpu_ring * ring,uint32_t v)310 static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v)
311 {
312 if (ring->count_dw <= 0)
313 DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n");
314 ring->ring[ring->wptr++ & ring->buf_mask] = v;
315 ring->wptr &= ring->ptr_mask;
316 ring->count_dw--;
317 }
318
amdgpu_ring_write_multiple(struct amdgpu_ring * ring,void * src,int count_dw)319 static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring,
320 void *src, int count_dw)
321 {
322 unsigned occupied, chunk1, chunk2;
323 void *dst;
324
325 if (unlikely(ring->count_dw < count_dw))
326 DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n");
327
328 occupied = ring->wptr & ring->buf_mask;
329 dst = (void *)&ring->ring[occupied];
330 chunk1 = ring->buf_mask + 1 - occupied;
331 chunk1 = (chunk1 >= count_dw) ? count_dw: chunk1;
332 chunk2 = count_dw - chunk1;
333 chunk1 <<= 2;
334 chunk2 <<= 2;
335
336 if (chunk1)
337 memcpy(dst, src, chunk1);
338
339 if (chunk2) {
340 src += chunk1;
341 dst = (void *)ring->ring;
342 memcpy(dst, src, chunk2);
343 }
344
345 ring->wptr += count_dw;
346 ring->wptr &= ring->ptr_mask;
347 ring->count_dw -= count_dw;
348 }
349
350 int amdgpu_ring_test_helper(struct amdgpu_ring *ring);
351
352 int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
353 struct amdgpu_ring *ring);
354 void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring);
355
356 #endif
357