• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * on the rights to use, copy, modify, merge, publish, distribute, sub
8  * license, and/or sell copies of the Software, and to permit persons to whom
9  * the Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21  * USE OR OTHER DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include "sid.h"
25 #include "si_pipe.h"
26 
27 #include "util/u_format.h"
28 
si_dma_copy_buffer(struct si_context * ctx,struct pipe_resource * dst,struct pipe_resource * src,uint64_t dst_offset,uint64_t src_offset,uint64_t size)29 static void si_dma_copy_buffer(struct si_context *ctx,
30 				struct pipe_resource *dst,
31 				struct pipe_resource *src,
32 				uint64_t dst_offset,
33 				uint64_t src_offset,
34 				uint64_t size)
35 {
36 	struct radeon_winsys_cs *cs = ctx->b.dma.cs;
37 	unsigned i, ncopy, count, max_size, sub_cmd, shift;
38 	struct r600_resource *rdst = (struct r600_resource*)dst;
39 	struct r600_resource *rsrc = (struct r600_resource*)src;
40 
41 	/* Mark the buffer range of destination as valid (initialized),
42 	 * so that transfer_map knows it should wait for the GPU when mapping
43 	 * that range. */
44 	util_range_add(&rdst->valid_buffer_range, dst_offset,
45 		       dst_offset + size);
46 
47 	dst_offset += rdst->gpu_address;
48 	src_offset += rsrc->gpu_address;
49 
50 	/* see whether we should use the dword-aligned or byte-aligned copy */
51 	if (!(dst_offset % 4) && !(src_offset % 4) && !(size % 4)) {
52 		sub_cmd = SI_DMA_COPY_DWORD_ALIGNED;
53 		shift = 2;
54 		max_size = SI_DMA_COPY_MAX_DWORD_ALIGNED_SIZE;
55 	} else {
56 		sub_cmd = SI_DMA_COPY_BYTE_ALIGNED;
57 		shift = 0;
58 		max_size = SI_DMA_COPY_MAX_BYTE_ALIGNED_SIZE;
59 	}
60 
61 	ncopy = DIV_ROUND_UP(size, max_size);
62 	si_need_dma_space(&ctx->b, ncopy * 5, rdst, rsrc);
63 
64 	for (i = 0; i < ncopy; i++) {
65 		count = MIN2(size, max_size);
66 		radeon_emit(cs, SI_DMA_PACKET(SI_DMA_PACKET_COPY, sub_cmd,
67 					      count >> shift));
68 		radeon_emit(cs, dst_offset);
69 		radeon_emit(cs, src_offset);
70 		radeon_emit(cs, (dst_offset >> 32UL) & 0xff);
71 		radeon_emit(cs, (src_offset >> 32UL) & 0xff);
72 		dst_offset += count;
73 		src_offset += count;
74 		size -= count;
75 	}
76 }
77 
si_dma_clear_buffer(struct pipe_context * ctx,struct pipe_resource * dst,uint64_t offset,uint64_t size,unsigned clear_value)78 static void si_dma_clear_buffer(struct pipe_context *ctx,
79 				struct pipe_resource *dst,
80 				uint64_t offset,
81 				uint64_t size,
82 				unsigned clear_value)
83 {
84 	struct si_context *sctx = (struct si_context *)ctx;
85 	struct radeon_winsys_cs *cs = sctx->b.dma.cs;
86 	unsigned i, ncopy, csize;
87 	struct r600_resource *rdst = r600_resource(dst);
88 
89 	if (!cs || offset % 4 != 0 || size % 4 != 0 ||
90 	    dst->flags & PIPE_RESOURCE_FLAG_SPARSE) {
91 		ctx->clear_buffer(ctx, dst, offset, size, &clear_value, 4);
92 		return;
93 	}
94 
95 	/* Mark the buffer range of destination as valid (initialized),
96 	 * so that transfer_map knows it should wait for the GPU when mapping
97 	 * that range. */
98 	util_range_add(&rdst->valid_buffer_range, offset, offset + size);
99 
100 	offset += rdst->gpu_address;
101 
102 	/* the same maximum size as for copying */
103 	ncopy = DIV_ROUND_UP(size, SI_DMA_COPY_MAX_DWORD_ALIGNED_SIZE);
104 	si_need_dma_space(&sctx->b, ncopy * 4, rdst, NULL);
105 
106 	for (i = 0; i < ncopy; i++) {
107 		csize = MIN2(size, SI_DMA_COPY_MAX_DWORD_ALIGNED_SIZE);
108 		radeon_emit(cs, SI_DMA_PACKET(SI_DMA_PACKET_CONSTANT_FILL, 0,
109 					      csize / 4));
110 		radeon_emit(cs, offset);
111 		radeon_emit(cs, clear_value);
112 		radeon_emit(cs, (offset >> 32) << 16);
113 		offset += csize;
114 		size -= csize;
115 	}
116 }
117 
si_dma_copy_tile(struct si_context * ctx,struct pipe_resource * dst,unsigned dst_level,unsigned dst_x,unsigned dst_y,unsigned dst_z,struct pipe_resource * src,unsigned src_level,unsigned src_x,unsigned src_y,unsigned src_z,unsigned copy_height,unsigned pitch,unsigned bpp)118 static void si_dma_copy_tile(struct si_context *ctx,
119 			     struct pipe_resource *dst,
120 			     unsigned dst_level,
121 			     unsigned dst_x,
122 			     unsigned dst_y,
123 			     unsigned dst_z,
124 			     struct pipe_resource *src,
125 			     unsigned src_level,
126 			     unsigned src_x,
127 			     unsigned src_y,
128 			     unsigned src_z,
129 			     unsigned copy_height,
130 			     unsigned pitch,
131 			     unsigned bpp)
132 {
133 	struct radeon_winsys_cs *cs = ctx->b.dma.cs;
134 	struct r600_texture *rsrc = (struct r600_texture*)src;
135 	struct r600_texture *rdst = (struct r600_texture*)dst;
136 	unsigned dst_mode = rdst->surface.u.legacy.level[dst_level].mode;
137 	bool detile = dst_mode == RADEON_SURF_MODE_LINEAR_ALIGNED;
138 	struct r600_texture *rlinear = detile ? rdst : rsrc;
139 	struct r600_texture *rtiled = detile ? rsrc : rdst;
140 	unsigned linear_lvl = detile ? dst_level : src_level;
141 	unsigned tiled_lvl = detile ? src_level : dst_level;
142 	struct radeon_info *info = &ctx->screen->info;
143 	unsigned index = rtiled->surface.u.legacy.tiling_index[tiled_lvl];
144 	unsigned tile_mode = info->si_tile_mode_array[index];
145 	unsigned array_mode, lbpp, pitch_tile_max, slice_tile_max, size;
146 	unsigned ncopy, height, cheight, i;
147 	unsigned linear_x, linear_y, linear_z,  tiled_x, tiled_y, tiled_z;
148 	unsigned sub_cmd, bank_h, bank_w, mt_aspect, nbanks, tile_split, mt;
149 	uint64_t base, addr;
150 	unsigned pipe_config;
151 
152 	assert(dst_mode != rsrc->surface.u.legacy.level[src_level].mode);
153 
154 	sub_cmd = SI_DMA_COPY_TILED;
155 	lbpp = util_logbase2(bpp);
156 	pitch_tile_max = ((pitch / bpp) / 8) - 1;
157 
158 	linear_x = detile ? dst_x : src_x;
159 	linear_y = detile ? dst_y : src_y;
160 	linear_z = detile ? dst_z : src_z;
161 	tiled_x = detile ? src_x : dst_x;
162 	tiled_y = detile ? src_y : dst_y;
163 	tiled_z = detile ? src_z : dst_z;
164 
165 	assert(!util_format_is_depth_and_stencil(rtiled->resource.b.b.format));
166 
167 	array_mode = G_009910_ARRAY_MODE(tile_mode);
168 	slice_tile_max = (rtiled->surface.u.legacy.level[tiled_lvl].nblk_x *
169 			  rtiled->surface.u.legacy.level[tiled_lvl].nblk_y) / (8*8) - 1;
170 	/* linear height must be the same as the slice tile max height, it's ok even
171 	 * if the linear destination/source have smaller heigh as the size of the
172 	 * dma packet will be using the copy_height which is always smaller or equal
173 	 * to the linear height
174 	 */
175 	height = rtiled->surface.u.legacy.level[tiled_lvl].nblk_y;
176 	base = rtiled->surface.u.legacy.level[tiled_lvl].offset;
177 	addr = rlinear->surface.u.legacy.level[linear_lvl].offset;
178 	addr += (uint64_t)rlinear->surface.u.legacy.level[linear_lvl].slice_size_dw * 4 * linear_z;
179 	addr += linear_y * pitch + linear_x * bpp;
180 	bank_h = G_009910_BANK_HEIGHT(tile_mode);
181 	bank_w = G_009910_BANK_WIDTH(tile_mode);
182 	mt_aspect = G_009910_MACRO_TILE_ASPECT(tile_mode);
183 	/* Non-depth modes don't have TILE_SPLIT set. */
184 	tile_split = util_logbase2(rtiled->surface.u.legacy.tile_split >> 6);
185 	nbanks = G_009910_NUM_BANKS(tile_mode);
186 	base += rtiled->resource.gpu_address;
187 	addr += rlinear->resource.gpu_address;
188 
189 	pipe_config = G_009910_PIPE_CONFIG(tile_mode);
190 	mt = G_009910_MICRO_TILE_MODE(tile_mode);
191 	size = copy_height * pitch;
192 	ncopy = DIV_ROUND_UP(size, SI_DMA_COPY_MAX_DWORD_ALIGNED_SIZE);
193 	si_need_dma_space(&ctx->b, ncopy * 9, &rdst->resource, &rsrc->resource);
194 
195 	for (i = 0; i < ncopy; i++) {
196 		cheight = copy_height;
197 		if (cheight * pitch > SI_DMA_COPY_MAX_DWORD_ALIGNED_SIZE) {
198 			cheight = SI_DMA_COPY_MAX_DWORD_ALIGNED_SIZE / pitch;
199 		}
200 		size = cheight * pitch;
201 		radeon_emit(cs, SI_DMA_PACKET(SI_DMA_PACKET_COPY, sub_cmd, size / 4));
202 		radeon_emit(cs, base >> 8);
203 		radeon_emit(cs, (detile << 31) | (array_mode << 27) |
204 				(lbpp << 24) | (bank_h << 21) |
205 				(bank_w << 18) | (mt_aspect << 16));
206 		radeon_emit(cs, (pitch_tile_max << 0) | ((height - 1) << 16));
207 		radeon_emit(cs, (slice_tile_max << 0) | (pipe_config << 26));
208 		radeon_emit(cs, (tiled_x << 0) | (tiled_z << 18));
209 		radeon_emit(cs, (tiled_y << 0) | (tile_split << 21) | (nbanks << 25) | (mt << 27));
210 		radeon_emit(cs, addr & 0xfffffffc);
211 		radeon_emit(cs, (addr >> 32UL) & 0xff);
212 		copy_height -= cheight;
213 		addr += cheight * pitch;
214 		tiled_y += cheight;
215 	}
216 }
217 
si_dma_copy(struct pipe_context * ctx,struct pipe_resource * dst,unsigned dst_level,unsigned dstx,unsigned dsty,unsigned dstz,struct pipe_resource * src,unsigned src_level,const struct pipe_box * src_box)218 static void si_dma_copy(struct pipe_context *ctx,
219 			struct pipe_resource *dst,
220 			unsigned dst_level,
221 			unsigned dstx, unsigned dsty, unsigned dstz,
222 			struct pipe_resource *src,
223 			unsigned src_level,
224 			const struct pipe_box *src_box)
225 {
226 	struct si_context *sctx = (struct si_context *)ctx;
227 	struct r600_texture *rsrc = (struct r600_texture*)src;
228 	struct r600_texture *rdst = (struct r600_texture*)dst;
229 	unsigned dst_pitch, src_pitch, bpp, dst_mode, src_mode;
230 	unsigned src_w, dst_w;
231 	unsigned src_x, src_y;
232 	unsigned dst_x = dstx, dst_y = dsty, dst_z = dstz;
233 
234 	if (sctx->b.dma.cs == NULL ||
235 	    src->flags & PIPE_RESOURCE_FLAG_SPARSE ||
236 	    dst->flags & PIPE_RESOURCE_FLAG_SPARSE) {
237 		goto fallback;
238 	}
239 
240 	if (dst->target == PIPE_BUFFER && src->target == PIPE_BUFFER) {
241 		si_dma_copy_buffer(sctx, dst, src, dst_x, src_box->x, src_box->width);
242 		return;
243 	}
244 
245 	/* XXX: Using the asynchronous DMA engine for multi-dimensional
246 	 * operations seems to cause random GPU lockups for various people.
247 	 * While the root cause for this might need to be fixed in the kernel,
248 	 * let's disable it for now.
249 	 *
250 	 * Before re-enabling this, please make sure you can hit all newly
251 	 * enabled paths in your testing, preferably with both piglit and real
252 	 * world apps, and get in touch with people on the bug reports below
253 	 * for stability testing.
254 	 *
255 	 * https://bugs.freedesktop.org/show_bug.cgi?id=85647
256 	 * https://bugs.freedesktop.org/show_bug.cgi?id=83500
257 	 */
258 	goto fallback;
259 
260 	if (src_box->depth > 1 ||
261 	    !si_prepare_for_dma_blit(&sctx->b, rdst, dst_level, dstx, dsty,
262 					dstz, rsrc, src_level, src_box))
263 		goto fallback;
264 
265 	src_x = util_format_get_nblocksx(src->format, src_box->x);
266 	dst_x = util_format_get_nblocksx(src->format, dst_x);
267 	src_y = util_format_get_nblocksy(src->format, src_box->y);
268 	dst_y = util_format_get_nblocksy(src->format, dst_y);
269 
270 	bpp = rdst->surface.bpe;
271 	dst_pitch = rdst->surface.u.legacy.level[dst_level].nblk_x * rdst->surface.bpe;
272 	src_pitch = rsrc->surface.u.legacy.level[src_level].nblk_x * rsrc->surface.bpe;
273 	src_w = u_minify(rsrc->resource.b.b.width0, src_level);
274 	dst_w = u_minify(rdst->resource.b.b.width0, dst_level);
275 
276 	dst_mode = rdst->surface.u.legacy.level[dst_level].mode;
277 	src_mode = rsrc->surface.u.legacy.level[src_level].mode;
278 
279 	if (src_pitch != dst_pitch || src_box->x || dst_x || src_w != dst_w ||
280 	    src_box->width != src_w ||
281 	    src_box->height != u_minify(rsrc->resource.b.b.height0, src_level) ||
282 	    src_box->height != u_minify(rdst->resource.b.b.height0, dst_level) ||
283 	    rsrc->surface.u.legacy.level[src_level].nblk_y !=
284 	    rdst->surface.u.legacy.level[dst_level].nblk_y) {
285 		/* FIXME si can do partial blit */
286 		goto fallback;
287 	}
288 	/* the x test here are currently useless (because we don't support partial blit)
289 	 * but keep them around so we don't forget about those
290 	 */
291 	if ((src_pitch % 8) || (src_box->x % 8) || (dst_x % 8) ||
292 	    (src_box->y % 8) || (dst_y % 8) || (src_box->height % 8)) {
293 		goto fallback;
294 	}
295 
296 	if (src_mode == dst_mode) {
297 		uint64_t dst_offset, src_offset;
298 		/* simple dma blit would do NOTE code here assume :
299 		 *   src_box.x/y == 0
300 		 *   dst_x/y == 0
301 		 *   dst_pitch == src_pitch
302 		 */
303 		src_offset= rsrc->surface.u.legacy.level[src_level].offset;
304 		src_offset += (uint64_t)rsrc->surface.u.legacy.level[src_level].slice_size_dw * 4 * src_box->z;
305 		src_offset += src_y * src_pitch + src_x * bpp;
306 		dst_offset = rdst->surface.u.legacy.level[dst_level].offset;
307 		dst_offset += (uint64_t)rdst->surface.u.legacy.level[dst_level].slice_size_dw * 4 * dst_z;
308 		dst_offset += dst_y * dst_pitch + dst_x * bpp;
309 		si_dma_copy_buffer(sctx, dst, src, dst_offset, src_offset,
310 				   (uint64_t)rsrc->surface.u.legacy.level[src_level].slice_size_dw * 4);
311 	} else {
312 		si_dma_copy_tile(sctx, dst, dst_level, dst_x, dst_y, dst_z,
313 				 src, src_level, src_x, src_y, src_box->z,
314 				 src_box->height / rsrc->surface.blk_h,
315 				 dst_pitch, bpp);
316 	}
317 	return;
318 
319 fallback:
320 	si_resource_copy_region(ctx, dst, dst_level, dstx, dsty, dstz,
321 				src, src_level, src_box);
322 }
323 
si_init_dma_functions(struct si_context * sctx)324 void si_init_dma_functions(struct si_context *sctx)
325 {
326 	sctx->b.dma_copy = si_dma_copy;
327 	sctx->b.dma_clear_buffer = si_dma_clear_buffer;
328 }
329