• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Christian König
23  */
24 #ifndef __AMDGPU_RING_H__
25 #define __AMDGPU_RING_H__
26 
27 #include <drm/amdgpu_drm.h>
28 #include <drm/gpu_scheduler.h>
29 #include <drm/drm_print.h>
30 #include <drm/drm_suballoc.h>
31 
32 struct amdgpu_device;
33 struct amdgpu_ring;
34 struct amdgpu_ib;
35 struct amdgpu_cs_parser;
36 struct amdgpu_job;
37 struct amdgpu_vm;
38 
39 /* max number of rings */
40 #define AMDGPU_MAX_RINGS		124
41 #define AMDGPU_MAX_HWIP_RINGS		64
42 #define AMDGPU_MAX_GFX_RINGS		2
43 #define AMDGPU_MAX_SW_GFX_RINGS         2
44 #define AMDGPU_MAX_COMPUTE_RINGS	8
45 #define AMDGPU_MAX_VCE_RINGS		3
46 #define AMDGPU_MAX_UVD_ENC_RINGS	2
47 #define AMDGPU_MAX_VPE_RINGS		2
48 
49 enum amdgpu_ring_priority_level {
50 	AMDGPU_RING_PRIO_0,
51 	AMDGPU_RING_PRIO_1,
52 	AMDGPU_RING_PRIO_DEFAULT = 1,
53 	AMDGPU_RING_PRIO_2,
54 	AMDGPU_RING_PRIO_MAX
55 };
56 
57 /* some special values for the owner field */
58 #define AMDGPU_FENCE_OWNER_UNDEFINED	((void *)0ul)
59 #define AMDGPU_FENCE_OWNER_VM		((void *)1ul)
60 #define AMDGPU_FENCE_OWNER_KFD		((void *)2ul)
61 
62 #define AMDGPU_FENCE_FLAG_64BIT         (1 << 0)
63 #define AMDGPU_FENCE_FLAG_INT           (1 << 1)
64 #define AMDGPU_FENCE_FLAG_TC_WB_ONLY    (1 << 2)
65 #define AMDGPU_FENCE_FLAG_EXEC          (1 << 3)
66 
67 #define to_amdgpu_ring(s) container_of((s), struct amdgpu_ring, sched)
68 
69 #define AMDGPU_IB_POOL_SIZE	(1024 * 1024)
70 
71 enum amdgpu_ring_type {
72 	AMDGPU_RING_TYPE_GFX		= AMDGPU_HW_IP_GFX,
73 	AMDGPU_RING_TYPE_COMPUTE	= AMDGPU_HW_IP_COMPUTE,
74 	AMDGPU_RING_TYPE_SDMA		= AMDGPU_HW_IP_DMA,
75 	AMDGPU_RING_TYPE_UVD		= AMDGPU_HW_IP_UVD,
76 	AMDGPU_RING_TYPE_VCE		= AMDGPU_HW_IP_VCE,
77 	AMDGPU_RING_TYPE_UVD_ENC	= AMDGPU_HW_IP_UVD_ENC,
78 	AMDGPU_RING_TYPE_VCN_DEC	= AMDGPU_HW_IP_VCN_DEC,
79 	AMDGPU_RING_TYPE_VCN_ENC	= AMDGPU_HW_IP_VCN_ENC,
80 	AMDGPU_RING_TYPE_VCN_JPEG	= AMDGPU_HW_IP_VCN_JPEG,
81 	AMDGPU_RING_TYPE_VPE		= AMDGPU_HW_IP_VPE,
82 	AMDGPU_RING_TYPE_KIQ,
83 	AMDGPU_RING_TYPE_MES,
84 	AMDGPU_RING_TYPE_UMSCH_MM,
85 };
86 
87 enum amdgpu_ib_pool_type {
88 	/* Normal submissions to the top of the pipeline. */
89 	AMDGPU_IB_POOL_DELAYED,
90 	/* Immediate submissions to the bottom of the pipeline. */
91 	AMDGPU_IB_POOL_IMMEDIATE,
92 	/* Direct submission to the ring buffer during init and reset. */
93 	AMDGPU_IB_POOL_DIRECT,
94 
95 	AMDGPU_IB_POOL_MAX
96 };
97 
98 struct amdgpu_ib {
99 	struct drm_suballoc		*sa_bo;
100 	uint32_t			length_dw;
101 	uint64_t			gpu_addr;
102 	uint32_t			*ptr;
103 	uint32_t			flags;
104 };
105 
106 struct amdgpu_sched {
107 	u32				num_scheds;
108 	struct drm_gpu_scheduler	*sched[AMDGPU_MAX_HWIP_RINGS];
109 };
110 
111 /*
112  * Fences.
113  */
114 struct amdgpu_fence_driver {
115 	uint64_t			gpu_addr;
116 	volatile uint32_t		*cpu_addr;
117 	/* sync_seq is protected by ring emission lock */
118 	uint32_t			sync_seq;
119 	atomic_t			last_seq;
120 	bool				initialized;
121 	struct amdgpu_irq_src		*irq_src;
122 	unsigned			irq_type;
123 	struct timer_list		fallback_timer;
124 	unsigned			num_fences_mask;
125 	spinlock_t			lock;
126 	struct dma_fence		**fences;
127 };
128 
129 /*
130  * Fences mark an event in the GPUs pipeline and are used
131  * for GPU/CPU synchronization.  When the fence is written,
132  * it is expected that all buffers associated with that fence
133  * are no longer in use by the associated ring on the GPU and
134  * that the relevant GPU caches have been flushed.
135  */
136 
137 struct amdgpu_fence {
138 	struct dma_fence base;
139 
140 	/* RB, DMA, etc. */
141 	struct amdgpu_ring		*ring;
142 	ktime_t				start_timestamp;
143 };
144 
145 extern const struct drm_sched_backend_ops amdgpu_sched_ops;
146 
147 void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring);
148 void amdgpu_fence_driver_set_error(struct amdgpu_ring *ring, int error);
149 void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring);
150 
151 int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring);
152 int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
153 				   struct amdgpu_irq_src *irq_src,
154 				   unsigned irq_type);
155 void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev);
156 void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev);
157 int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev);
158 void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev);
159 int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence, struct amdgpu_job *job,
160 		      unsigned flags);
161 int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s,
162 			      uint32_t timeout);
163 bool amdgpu_fence_process(struct amdgpu_ring *ring);
164 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
165 signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring,
166 				      uint32_t wait_seq,
167 				      signed long timeout);
168 unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
169 
170 void amdgpu_fence_driver_isr_toggle(struct amdgpu_device *adev, bool stop);
171 
172 u64 amdgpu_fence_last_unsignaled_time_us(struct amdgpu_ring *ring);
173 void amdgpu_fence_update_start_timestamp(struct amdgpu_ring *ring, uint32_t seq,
174 					 ktime_t timestamp);
175 
176 /*
177  * Rings.
178  */
179 
180 /* provided by hw blocks that expose a ring buffer for commands */
181 struct amdgpu_ring_funcs {
182 	enum amdgpu_ring_type	type;
183 	uint32_t		align_mask;
184 	u32			nop;
185 	bool			support_64bit_ptrs;
186 	bool			no_user_fence;
187 	bool			secure_submission_supported;
188 	unsigned		extra_dw;
189 
190 	/* ring read/write ptr handling */
191 	u64 (*get_rptr)(struct amdgpu_ring *ring);
192 	u64 (*get_wptr)(struct amdgpu_ring *ring);
193 	void (*set_wptr)(struct amdgpu_ring *ring);
194 	/* validating and patching of IBs */
195 	int (*parse_cs)(struct amdgpu_cs_parser *p,
196 			struct amdgpu_job *job,
197 			struct amdgpu_ib *ib);
198 	int (*patch_cs_in_place)(struct amdgpu_cs_parser *p,
199 				 struct amdgpu_job *job,
200 				 struct amdgpu_ib *ib);
201 	/* constants to calculate how many DW are needed for an emit */
202 	unsigned emit_frame_size;
203 	unsigned emit_ib_size;
204 	/* command emit functions */
205 	void (*emit_ib)(struct amdgpu_ring *ring,
206 			struct amdgpu_job *job,
207 			struct amdgpu_ib *ib,
208 			uint32_t flags);
209 	void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
210 			   uint64_t seq, unsigned flags);
211 	void (*emit_pipeline_sync)(struct amdgpu_ring *ring);
212 	void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vmid,
213 			      uint64_t pd_addr);
214 	void (*emit_hdp_flush)(struct amdgpu_ring *ring);
215 	void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid,
216 				uint32_t gds_base, uint32_t gds_size,
217 				uint32_t gws_base, uint32_t gws_size,
218 				uint32_t oa_base, uint32_t oa_size);
219 	/* testing functions */
220 	int (*test_ring)(struct amdgpu_ring *ring);
221 	int (*test_ib)(struct amdgpu_ring *ring, long timeout);
222 	/* insert NOP packets */
223 	void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count);
224 	void (*insert_start)(struct amdgpu_ring *ring);
225 	void (*insert_end)(struct amdgpu_ring *ring);
226 	/* pad the indirect buffer to the necessary number of dw */
227 	void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
228 	unsigned (*init_cond_exec)(struct amdgpu_ring *ring, uint64_t addr);
229 	/* note usage for clock and power gating */
230 	void (*begin_use)(struct amdgpu_ring *ring);
231 	void (*end_use)(struct amdgpu_ring *ring);
232 	void (*emit_switch_buffer) (struct amdgpu_ring *ring);
233 	void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags);
234 	void (*emit_gfx_shadow)(struct amdgpu_ring *ring, u64 shadow_va, u64 csa_va,
235 				u64 gds_va, bool init_shadow, int vmid);
236 	void (*emit_rreg)(struct amdgpu_ring *ring, uint32_t reg,
237 			  uint32_t reg_val_offs);
238 	void (*emit_wreg)(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
239 	void (*emit_reg_wait)(struct amdgpu_ring *ring, uint32_t reg,
240 			      uint32_t val, uint32_t mask);
241 	void (*emit_reg_write_reg_wait)(struct amdgpu_ring *ring,
242 					uint32_t reg0, uint32_t reg1,
243 					uint32_t ref, uint32_t mask);
244 	void (*emit_frame_cntl)(struct amdgpu_ring *ring, bool start,
245 				bool secure);
246 	/* Try to soft recover the ring to make the fence signal */
247 	void (*soft_recovery)(struct amdgpu_ring *ring, unsigned vmid);
248 	int (*preempt_ib)(struct amdgpu_ring *ring);
249 	void (*emit_mem_sync)(struct amdgpu_ring *ring);
250 	void (*emit_wave_limit)(struct amdgpu_ring *ring, bool enable);
251 	void (*patch_cntl)(struct amdgpu_ring *ring, unsigned offset);
252 	void (*patch_ce)(struct amdgpu_ring *ring, unsigned offset);
253 	void (*patch_de)(struct amdgpu_ring *ring, unsigned offset);
254 	int (*reset)(struct amdgpu_ring *ring, unsigned int vmid);
255 	void (*emit_cleaner_shader)(struct amdgpu_ring *ring);
256 };
257 
258 struct amdgpu_ring {
259 	struct amdgpu_device		*adev;
260 	const struct amdgpu_ring_funcs	*funcs;
261 	struct amdgpu_fence_driver	fence_drv;
262 	struct drm_gpu_scheduler	sched;
263 
264 	struct amdgpu_bo	*ring_obj;
265 	volatile uint32_t	*ring;
266 	unsigned		rptr_offs;
267 	u64			rptr_gpu_addr;
268 	volatile u32		*rptr_cpu_addr;
269 	u64			wptr;
270 	u64			wptr_old;
271 	unsigned		ring_size;
272 	unsigned		max_dw;
273 	int			count_dw;
274 	uint64_t		gpu_addr;
275 	uint64_t		ptr_mask;
276 	uint32_t		buf_mask;
277 	u32			idx;
278 	u32			xcc_id;
279 	u32			xcp_id;
280 	u32			me;
281 	u32			pipe;
282 	u32			queue;
283 	struct amdgpu_bo	*mqd_obj;
284 	uint64_t                mqd_gpu_addr;
285 	void                    *mqd_ptr;
286 	unsigned                mqd_size;
287 	uint64_t                eop_gpu_addr;
288 	u32			doorbell_index;
289 	bool			use_doorbell;
290 	bool			use_pollmem;
291 	unsigned		wptr_offs;
292 	u64			wptr_gpu_addr;
293 	volatile u32		*wptr_cpu_addr;
294 	unsigned		fence_offs;
295 	u64			fence_gpu_addr;
296 	volatile u32		*fence_cpu_addr;
297 	uint64_t		current_ctx;
298 	char			name[16];
299 	u32                     trail_seq;
300 	unsigned		trail_fence_offs;
301 	u64			trail_fence_gpu_addr;
302 	volatile u32		*trail_fence_cpu_addr;
303 	unsigned		cond_exe_offs;
304 	u64			cond_exe_gpu_addr;
305 	volatile u32		*cond_exe_cpu_addr;
306 	unsigned int		set_q_mode_offs;
307 	volatile u32		*set_q_mode_ptr;
308 	u64			set_q_mode_token;
309 	unsigned		vm_hub;
310 	unsigned		vm_inv_eng;
311 	struct dma_fence	*vmid_wait;
312 	bool			has_compute_vm_bug;
313 	bool			no_scheduler;
314 	int			hw_prio;
315 	unsigned 		num_hw_submission;
316 	atomic_t		*sched_score;
317 
318 	/* used for mes */
319 	bool			is_mes_queue;
320 	uint32_t		hw_queue_id;
321 	struct amdgpu_mes_ctx_data *mes_ctx;
322 
323 	bool            is_sw_ring;
324 	unsigned int    entry_index;
325 
326 };
327 
328 #define amdgpu_ring_parse_cs(r, p, job, ib) ((r)->funcs->parse_cs((p), (job), (ib)))
329 #define amdgpu_ring_patch_cs_in_place(r, p, job, ib) ((r)->funcs->patch_cs_in_place((p), (job), (ib)))
330 #define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
331 #define amdgpu_ring_test_ib(r, t) ((r)->funcs->test_ib ? (r)->funcs->test_ib((r), (t)) : 0)
332 #define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
333 #define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
334 #define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
335 #define amdgpu_ring_emit_ib(r, job, ib, flags) ((r)->funcs->emit_ib((r), (job), (ib), (flags)))
336 #define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r))
337 #define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr))
338 #define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags))
339 #define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as))
340 #define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r))
341 #define amdgpu_ring_emit_switch_buffer(r) (r)->funcs->emit_switch_buffer((r))
342 #define amdgpu_ring_emit_cntxcntl(r, d) (r)->funcs->emit_cntxcntl((r), (d))
343 #define amdgpu_ring_emit_gfx_shadow(r, s, c, g, i, v) ((r)->funcs->emit_gfx_shadow((r), (s), (c), (g), (i), (v)))
344 #define amdgpu_ring_emit_rreg(r, d, o) (r)->funcs->emit_rreg((r), (d), (o))
345 #define amdgpu_ring_emit_wreg(r, d, v) (r)->funcs->emit_wreg((r), (d), (v))
346 #define amdgpu_ring_emit_reg_wait(r, d, v, m) (r)->funcs->emit_reg_wait((r), (d), (v), (m))
347 #define amdgpu_ring_emit_reg_write_reg_wait(r, d0, d1, v, m) (r)->funcs->emit_reg_write_reg_wait((r), (d0), (d1), (v), (m))
348 #define amdgpu_ring_emit_frame_cntl(r, b, s) (r)->funcs->emit_frame_cntl((r), (b), (s))
349 #define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
350 #define amdgpu_ring_init_cond_exec(r, a) (r)->funcs->init_cond_exec((r), (a))
351 #define amdgpu_ring_preempt_ib(r) (r)->funcs->preempt_ib(r)
352 #define amdgpu_ring_patch_cntl(r, o) ((r)->funcs->patch_cntl((r), (o)))
353 #define amdgpu_ring_patch_ce(r, o) ((r)->funcs->patch_ce((r), (o)))
354 #define amdgpu_ring_patch_de(r, o) ((r)->funcs->patch_de((r), (o)))
355 #define amdgpu_ring_reset(r, v) (r)->funcs->reset((r), (v))
356 
357 unsigned int amdgpu_ring_max_ibs(enum amdgpu_ring_type type);
358 int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
359 void amdgpu_ring_ib_begin(struct amdgpu_ring *ring);
360 void amdgpu_ring_ib_end(struct amdgpu_ring *ring);
361 void amdgpu_ring_ib_on_emit_cntl(struct amdgpu_ring *ring);
362 void amdgpu_ring_ib_on_emit_ce(struct amdgpu_ring *ring);
363 void amdgpu_ring_ib_on_emit_de(struct amdgpu_ring *ring);
364 
365 void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
366 void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
367 void amdgpu_ring_commit(struct amdgpu_ring *ring);
368 void amdgpu_ring_undo(struct amdgpu_ring *ring);
369 int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
370 		     unsigned int max_dw, struct amdgpu_irq_src *irq_src,
371 		     unsigned int irq_type, unsigned int hw_prio,
372 		     atomic_t *sched_score);
373 void amdgpu_ring_fini(struct amdgpu_ring *ring);
374 void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring,
375 						uint32_t reg0, uint32_t val0,
376 						uint32_t reg1, uint32_t val1);
377 bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
378 			       struct dma_fence *fence);
379 
amdgpu_ring_set_preempt_cond_exec(struct amdgpu_ring * ring,bool cond_exec)380 static inline void amdgpu_ring_set_preempt_cond_exec(struct amdgpu_ring *ring,
381 							bool cond_exec)
382 {
383 	*ring->cond_exe_cpu_addr = cond_exec;
384 }
385 
amdgpu_ring_clear_ring(struct amdgpu_ring * ring)386 static inline void amdgpu_ring_clear_ring(struct amdgpu_ring *ring)
387 {
388 	int i = 0;
389 	while (i <= ring->buf_mask)
390 		ring->ring[i++] = ring->funcs->nop;
391 
392 }
393 
amdgpu_ring_write(struct amdgpu_ring * ring,uint32_t v)394 static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v)
395 {
396 	if (ring->count_dw <= 0)
397 		DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n");
398 	ring->ring[ring->wptr++ & ring->buf_mask] = v;
399 	ring->wptr &= ring->ptr_mask;
400 	ring->count_dw--;
401 }
402 
amdgpu_ring_write_multiple(struct amdgpu_ring * ring,void * src,int count_dw)403 static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring,
404 					      void *src, int count_dw)
405 {
406 	unsigned occupied, chunk1, chunk2;
407 	void *dst;
408 
409 	if (unlikely(ring->count_dw < count_dw))
410 		DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n");
411 
412 	occupied = ring->wptr & ring->buf_mask;
413 	dst = (void *)&ring->ring[occupied];
414 	chunk1 = ring->buf_mask + 1 - occupied;
415 	chunk1 = (chunk1 >= count_dw) ? count_dw : chunk1;
416 	chunk2 = count_dw - chunk1;
417 	chunk1 <<= 2;
418 	chunk2 <<= 2;
419 
420 	if (chunk1)
421 		memcpy(dst, src, chunk1);
422 
423 	if (chunk2) {
424 		src += chunk1;
425 		dst = (void *)ring->ring;
426 		memcpy(dst, src, chunk2);
427 	}
428 
429 	ring->wptr += count_dw;
430 	ring->wptr &= ring->ptr_mask;
431 	ring->count_dw -= count_dw;
432 }
433 
434 /**
435  * amdgpu_ring_patch_cond_exec - patch dw count of conditional execute
436  * @ring: amdgpu_ring structure
437  * @offset: offset returned by amdgpu_ring_init_cond_exec
438  *
439  * Calculate the dw count and patch it into a cond_exec command.
440  */
amdgpu_ring_patch_cond_exec(struct amdgpu_ring * ring,unsigned int offset)441 static inline void amdgpu_ring_patch_cond_exec(struct amdgpu_ring *ring,
442 					       unsigned int offset)
443 {
444 	unsigned cur;
445 
446 	if (!ring->funcs->init_cond_exec)
447 		return;
448 
449 	WARN_ON(offset > ring->buf_mask);
450 	WARN_ON(ring->ring[offset] != 0);
451 
452 	cur = (ring->wptr - 1) & ring->buf_mask;
453 	if (cur < offset)
454 		cur += ring->ring_size >> 2;
455 	ring->ring[offset] = cur - offset;
456 }
457 
458 #define amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset)			\
459 	(ring->is_mes_queue && ring->mes_ctx ?				\
460 	 (ring->mes_ctx->meta_data_gpu_addr + offset) : 0)
461 
462 #define amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset)			\
463 	(ring->is_mes_queue && ring->mes_ctx ?				\
464 	 (void *)((uint8_t *)(ring->mes_ctx->meta_data_ptr) + offset) : \
465 	 NULL)
466 
467 int amdgpu_ring_test_helper(struct amdgpu_ring *ring);
468 
469 void amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
470 			      struct amdgpu_ring *ring);
471 
472 int amdgpu_ring_init_mqd(struct amdgpu_ring *ring);
473 
amdgpu_ib_get_value(struct amdgpu_ib * ib,int idx)474 static inline u32 amdgpu_ib_get_value(struct amdgpu_ib *ib, int idx)
475 {
476 	return ib->ptr[idx];
477 }
478 
amdgpu_ib_set_value(struct amdgpu_ib * ib,int idx,uint32_t value)479 static inline void amdgpu_ib_set_value(struct amdgpu_ib *ib, int idx,
480 				       uint32_t value)
481 {
482 	ib->ptr[idx] = value;
483 }
484 
485 int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
486 		  unsigned size,
487 		  enum amdgpu_ib_pool_type pool,
488 		  struct amdgpu_ib *ib);
489 void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
490 		    struct dma_fence *f);
491 int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
492 		       struct amdgpu_ib *ibs, struct amdgpu_job *job,
493 		       struct dma_fence **f);
494 int amdgpu_ib_pool_init(struct amdgpu_device *adev);
495 void amdgpu_ib_pool_fini(struct amdgpu_device *adev);
496 int amdgpu_ib_ring_tests(struct amdgpu_device *adev);
497 bool amdgpu_ring_sched_ready(struct amdgpu_ring *ring);
498 #endif
499