• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2013 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * on the rights to use, copy, modify, merge, publish, distribute, sub
8  * license, and/or sell copies of the Software, and to permit persons to whom
9  * the Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21  * USE OR OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * Authors: Marek Olšák <maraeo@gmail.com>
24  */
25 
26 /**
27  * This file contains helpers for writing commands to commands streams.
28  */
29 
30 #ifndef R600_CS_H
31 #define R600_CS_H
32 
33 #include "r600_pipe_common.h"
34 #include "amd/common/r600d_common.h"
35 
36 /**
37  * Return true if there is enough memory in VRAM and GTT for the buffers
38  * added so far.
39  *
40  * \param vram      VRAM memory size not added to the buffer list yet
41  * \param gtt       GTT memory size not added to the buffer list yet
42  */
43 static inline bool
radeon_cs_memory_below_limit(struct r600_common_screen * screen,struct radeon_winsys_cs * cs,uint64_t vram,uint64_t gtt)44 radeon_cs_memory_below_limit(struct r600_common_screen *screen,
45 			     struct radeon_winsys_cs *cs,
46 			     uint64_t vram, uint64_t gtt)
47 {
48 	vram += cs->used_vram;
49 	gtt += cs->used_gart;
50 
51 	/* Anything that goes above the VRAM size should go to GTT. */
52 	if (vram > screen->info.vram_size)
53 		gtt += vram - screen->info.vram_size;
54 
55 	/* Now we just need to check if we have enough GTT. */
56 	return gtt < screen->info.gart_size * 0.7;
57 }
58 
59 /**
60  * Add a buffer to the buffer list for the given command stream (CS).
61  *
62  * All buffers used by a CS must be added to the list. This tells the kernel
63  * driver which buffers are used by GPU commands. Other buffers can
64  * be swapped out (not accessible) during execution.
65  *
66  * The buffer list becomes empty after every context flush and must be
67  * rebuilt.
68  */
radeon_add_to_buffer_list(struct r600_common_context * rctx,struct r600_ring * ring,struct r600_resource * rbo,enum radeon_bo_usage usage,enum radeon_bo_priority priority)69 static inline unsigned radeon_add_to_buffer_list(struct r600_common_context *rctx,
70 						 struct r600_ring *ring,
71 						 struct r600_resource *rbo,
72 						 enum radeon_bo_usage usage,
73 						 enum radeon_bo_priority priority)
74 {
75 	assert(usage);
76 	return rctx->ws->cs_add_buffer(
77 		ring->cs, rbo->buf,
78 		(enum radeon_bo_usage)(usage | RADEON_USAGE_SYNCHRONIZED),
79 		rbo->domains, priority) * 4;
80 }
81 
82 /**
83  * Same as above, but also checks memory usage and flushes the context
84  * accordingly.
85  *
86  * When this SHOULD NOT be used:
87  *
88  * - if r600_context_add_resource_size has been called for the buffer
89  *   followed by *_need_cs_space for checking the memory usage
90  *
91  * - if r600_need_dma_space has been called for the buffer
92  *
93  * - when emitting state packets and draw packets (because preceding packets
94  *   can't be re-emitted at that point)
95  *
96  * - if shader resource "enabled_mask" is not up-to-date or there is
97  *   a different constraint disallowing a context flush
98  */
99 static inline unsigned
radeon_add_to_buffer_list_check_mem(struct r600_common_context * rctx,struct r600_ring * ring,struct r600_resource * rbo,enum radeon_bo_usage usage,enum radeon_bo_priority priority,bool check_mem)100 radeon_add_to_buffer_list_check_mem(struct r600_common_context *rctx,
101 				    struct r600_ring *ring,
102 				    struct r600_resource *rbo,
103 				    enum radeon_bo_usage usage,
104 				    enum radeon_bo_priority priority,
105 				    bool check_mem)
106 {
107 	if (check_mem &&
108 	    !radeon_cs_memory_below_limit(rctx->screen, ring->cs,
109 					  rctx->vram + rbo->vram_usage,
110 					  rctx->gtt + rbo->gart_usage))
111 		ring->flush(rctx, RADEON_FLUSH_ASYNC, NULL);
112 
113 	return radeon_add_to_buffer_list(rctx, ring, rbo, usage, priority);
114 }
115 
r600_emit_reloc(struct r600_common_context * rctx,struct r600_ring * ring,struct r600_resource * rbo,enum radeon_bo_usage usage,enum radeon_bo_priority priority)116 static inline void r600_emit_reloc(struct r600_common_context *rctx,
117 				   struct r600_ring *ring, struct r600_resource *rbo,
118 				   enum radeon_bo_usage usage,
119 				   enum radeon_bo_priority priority)
120 {
121 	struct radeon_winsys_cs *cs = ring->cs;
122 	bool has_vm = ((struct r600_common_screen*)rctx->b.screen)->info.has_virtual_memory;
123 	unsigned reloc = radeon_add_to_buffer_list(rctx, ring, rbo, usage, priority);
124 
125 	if (!has_vm) {
126 		radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
127 		radeon_emit(cs, reloc);
128 	}
129 }
130 
radeon_set_config_reg_seq(struct radeon_winsys_cs * cs,unsigned reg,unsigned num)131 static inline void radeon_set_config_reg_seq(struct radeon_winsys_cs *cs, unsigned reg, unsigned num)
132 {
133 	assert(reg < R600_CONTEXT_REG_OFFSET);
134 	assert(cs->current.cdw + 2 + num <= cs->current.max_dw);
135 	radeon_emit(cs, PKT3(PKT3_SET_CONFIG_REG, num, 0));
136 	radeon_emit(cs, (reg - R600_CONFIG_REG_OFFSET) >> 2);
137 }
138 
radeon_set_config_reg(struct radeon_winsys_cs * cs,unsigned reg,unsigned value)139 static inline void radeon_set_config_reg(struct radeon_winsys_cs *cs, unsigned reg, unsigned value)
140 {
141 	radeon_set_config_reg_seq(cs, reg, 1);
142 	radeon_emit(cs, value);
143 }
144 
radeon_set_context_reg_seq(struct radeon_winsys_cs * cs,unsigned reg,unsigned num)145 static inline void radeon_set_context_reg_seq(struct radeon_winsys_cs *cs, unsigned reg, unsigned num)
146 {
147 	assert(reg >= R600_CONTEXT_REG_OFFSET);
148 	assert(cs->current.cdw + 2 + num <= cs->current.max_dw);
149 	radeon_emit(cs, PKT3(PKT3_SET_CONTEXT_REG, num, 0));
150 	radeon_emit(cs, (reg - R600_CONTEXT_REG_OFFSET) >> 2);
151 }
152 
radeon_set_context_reg(struct radeon_winsys_cs * cs,unsigned reg,unsigned value)153 static inline void radeon_set_context_reg(struct radeon_winsys_cs *cs, unsigned reg, unsigned value)
154 {
155 	radeon_set_context_reg_seq(cs, reg, 1);
156 	radeon_emit(cs, value);
157 }
158 
radeon_set_context_reg_idx(struct radeon_winsys_cs * cs,unsigned reg,unsigned idx,unsigned value)159 static inline void radeon_set_context_reg_idx(struct radeon_winsys_cs *cs,
160 					      unsigned reg, unsigned idx,
161 					      unsigned value)
162 {
163 	assert(reg >= R600_CONTEXT_REG_OFFSET);
164 	assert(cs->current.cdw + 3 <= cs->current.max_dw);
165 	radeon_emit(cs, PKT3(PKT3_SET_CONTEXT_REG, 1, 0));
166 	radeon_emit(cs, (reg - R600_CONTEXT_REG_OFFSET) >> 2 | (idx << 28));
167 	radeon_emit(cs, value);
168 }
169 
radeon_set_sh_reg_seq(struct radeon_winsys_cs * cs,unsigned reg,unsigned num)170 static inline void radeon_set_sh_reg_seq(struct radeon_winsys_cs *cs, unsigned reg, unsigned num)
171 {
172 	assert(reg >= SI_SH_REG_OFFSET && reg < SI_SH_REG_END);
173 	assert(cs->current.cdw + 2 + num <= cs->current.max_dw);
174 	radeon_emit(cs, PKT3(PKT3_SET_SH_REG, num, 0));
175 	radeon_emit(cs, (reg - SI_SH_REG_OFFSET) >> 2);
176 }
177 
radeon_set_sh_reg(struct radeon_winsys_cs * cs,unsigned reg,unsigned value)178 static inline void radeon_set_sh_reg(struct radeon_winsys_cs *cs, unsigned reg, unsigned value)
179 {
180 	radeon_set_sh_reg_seq(cs, reg, 1);
181 	radeon_emit(cs, value);
182 }
183 
radeon_set_uconfig_reg_seq(struct radeon_winsys_cs * cs,unsigned reg,unsigned num)184 static inline void radeon_set_uconfig_reg_seq(struct radeon_winsys_cs *cs, unsigned reg, unsigned num)
185 {
186 	assert(reg >= CIK_UCONFIG_REG_OFFSET && reg < CIK_UCONFIG_REG_END);
187 	assert(cs->current.cdw + 2 + num <= cs->current.max_dw);
188 	radeon_emit(cs, PKT3(PKT3_SET_UCONFIG_REG, num, 0));
189 	radeon_emit(cs, (reg - CIK_UCONFIG_REG_OFFSET) >> 2);
190 }
191 
radeon_set_uconfig_reg(struct radeon_winsys_cs * cs,unsigned reg,unsigned value)192 static inline void radeon_set_uconfig_reg(struct radeon_winsys_cs *cs, unsigned reg, unsigned value)
193 {
194 	radeon_set_uconfig_reg_seq(cs, reg, 1);
195 	radeon_emit(cs, value);
196 }
197 
radeon_set_uconfig_reg_idx(struct radeon_winsys_cs * cs,unsigned reg,unsigned idx,unsigned value)198 static inline void radeon_set_uconfig_reg_idx(struct radeon_winsys_cs *cs,
199 					      unsigned reg, unsigned idx,
200 					      unsigned value)
201 {
202 	assert(reg >= CIK_UCONFIG_REG_OFFSET && reg < CIK_UCONFIG_REG_END);
203 	assert(cs->current.cdw + 3 <= cs->current.max_dw);
204 	radeon_emit(cs, PKT3(PKT3_SET_UCONFIG_REG, 1, 0));
205 	radeon_emit(cs, (reg - CIK_UCONFIG_REG_OFFSET) >> 2 | (idx << 28));
206 	radeon_emit(cs, value);
207 }
208 
209 #endif
210