• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2013 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * on the rights to use, copy, modify, merge, publish, distribute, sub
8  * license, and/or sell copies of the Software, and to permit persons to whom
9  * the Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21  * USE OR OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Marek Olšák
25  */
26 
27 #include "r600_cs.h"
28 #include "evergreen_compute.h"
29 #include "compute_memory_pool.h"
30 #include "util/u_memory.h"
31 #include "util/u_upload_mgr.h"
32 #include <inttypes.h>
33 #include <stdio.h>
34 
r600_rings_is_buffer_referenced(struct r600_common_context * ctx,struct pb_buffer * buf,unsigned usage)35 bool r600_rings_is_buffer_referenced(struct r600_common_context *ctx,
36 				     struct pb_buffer *buf,
37 				     unsigned usage)
38 {
39 	if (ctx->ws->cs_is_buffer_referenced(&ctx->gfx.cs, buf, usage)) {
40 		return true;
41 	}
42 	if (radeon_emitted(&ctx->dma.cs, 0) &&
43 	    ctx->ws->cs_is_buffer_referenced(&ctx->dma.cs, buf, usage)) {
44 		return true;
45 	}
46 	return false;
47 }
48 
r600_buffer_map_sync_with_rings(struct r600_common_context * ctx,struct r600_resource * resource,unsigned usage)49 void *r600_buffer_map_sync_with_rings(struct r600_common_context *ctx,
50                                       struct r600_resource *resource,
51                                       unsigned usage)
52 {
53 	unsigned rusage = RADEON_USAGE_READWRITE;
54 	bool busy = false;
55 
56 	assert(!(resource->flags & RADEON_FLAG_SPARSE));
57 
58 	if (usage & PIPE_MAP_UNSYNCHRONIZED) {
59 		return ctx->ws->buffer_map(ctx->ws, resource->buf, NULL, usage);
60 	}
61 
62 	if (!(usage & PIPE_MAP_WRITE)) {
63 		/* have to wait for the last write */
64 		rusage = RADEON_USAGE_WRITE;
65 	}
66 
67 	if (radeon_emitted(&ctx->gfx.cs, ctx->initial_gfx_cs_size) &&
68 	    ctx->ws->cs_is_buffer_referenced(&ctx->gfx.cs,
69 					     resource->buf, rusage)) {
70 		if (usage & PIPE_MAP_DONTBLOCK) {
71 			ctx->gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
72 			return NULL;
73 		} else {
74 			ctx->gfx.flush(ctx, 0, NULL);
75 			busy = true;
76 		}
77 	}
78 	if (radeon_emitted(&ctx->dma.cs, 0) &&
79 	    ctx->ws->cs_is_buffer_referenced(&ctx->dma.cs,
80 					     resource->buf, rusage)) {
81 		if (usage & PIPE_MAP_DONTBLOCK) {
82 			ctx->dma.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
83 			return NULL;
84 		} else {
85 			ctx->dma.flush(ctx, 0, NULL);
86 			busy = true;
87 		}
88 	}
89 
90 	if (busy || !ctx->ws->buffer_wait(ctx->ws, resource->buf, 0, rusage)) {
91 		if (usage & PIPE_MAP_DONTBLOCK) {
92 			return NULL;
93 		} else {
94 			/* We will be wait for the GPU. Wait for any offloaded
95 			 * CS flush to complete to avoid busy-waiting in the winsys. */
96 			ctx->ws->cs_sync_flush(&ctx->gfx.cs);
97 			if (ctx->dma.cs.priv)
98 				ctx->ws->cs_sync_flush(&ctx->dma.cs);
99 		}
100 	}
101 
102 	/* Setting the CS to NULL will prevent doing checks we have done already. */
103 	return ctx->ws->buffer_map(ctx->ws, resource->buf, NULL, usage);
104 }
105 
r600_init_resource_fields(struct r600_common_screen * rscreen,struct r600_resource * res,uint64_t size,unsigned alignment)106 void r600_init_resource_fields(struct r600_common_screen *rscreen,
107 			       struct r600_resource *res,
108 			       uint64_t size, unsigned alignment)
109 {
110 	struct r600_texture *rtex = (struct r600_texture*)res;
111 
112 	res->bo_size = size;
113 	res->bo_alignment = alignment;
114 	res->flags = 0;
115 	res->texture_handle_allocated = false;
116 	res->image_handle_allocated = false;
117 
118 	switch (res->b.b.usage) {
119 	case PIPE_USAGE_STREAM:
120 		res->flags = RADEON_FLAG_GTT_WC;
121 		FALLTHROUGH;
122 	case PIPE_USAGE_STAGING:
123 		/* Transfers are likely to occur more often with these
124 		 * resources. */
125 		res->domains = RADEON_DOMAIN_GTT;
126 		break;
127 	case PIPE_USAGE_DYNAMIC:
128 	case PIPE_USAGE_DEFAULT:
129 	case PIPE_USAGE_IMMUTABLE:
130 	default:
131 		/* Not listing GTT here improves performance in some
132 		 * apps. */
133 		res->domains = RADEON_DOMAIN_VRAM;
134 		res->flags |= RADEON_FLAG_GTT_WC;
135 		break;
136 	}
137 
138 	/* Tiled textures are unmappable. Always put them in VRAM. */
139 	if ((res->b.b.target != PIPE_BUFFER && !rtex->surface.is_linear) ||
140 	    res->flags & R600_RESOURCE_FLAG_UNMAPPABLE) {
141 		res->domains = RADEON_DOMAIN_VRAM;
142 		res->flags |= RADEON_FLAG_NO_CPU_ACCESS |
143 			 RADEON_FLAG_GTT_WC;
144 	}
145 
146 	/* Displayable and shareable surfaces are not suballocated. */
147 	if (res->b.b.bind & (PIPE_BIND_SHARED | PIPE_BIND_SCANOUT))
148 		res->flags |= RADEON_FLAG_NO_SUBALLOC; /* shareable */
149 	else
150 		res->flags |= RADEON_FLAG_NO_INTERPROCESS_SHARING;
151 
152 	if (rscreen->debug_flags & DBG_NO_WC)
153 		res->flags &= ~RADEON_FLAG_GTT_WC;
154 
155 	/* Set expected VRAM and GART usage for the buffer. */
156 	res->vram_usage = 0;
157 	res->gart_usage = 0;
158 
159 	if (res->domains & RADEON_DOMAIN_VRAM)
160 		res->vram_usage = size;
161 	else if (res->domains & RADEON_DOMAIN_GTT)
162 		res->gart_usage = size;
163 }
164 
r600_alloc_resource(struct r600_common_screen * rscreen,struct r600_resource * res)165 bool r600_alloc_resource(struct r600_common_screen *rscreen,
166 			 struct r600_resource *res)
167 {
168 	struct pb_buffer *old_buf, *new_buf;
169 
170 	/* Allocate a new resource. */
171 	new_buf = rscreen->ws->buffer_create(rscreen->ws, res->bo_size,
172 					     res->bo_alignment,
173 					     res->domains, res->flags);
174 	if (!new_buf) {
175 		return false;
176 	}
177 
178 	/* Replace the pointer such that if res->buf wasn't NULL, it won't be
179 	 * NULL. This should prevent crashes with multiple contexts using
180 	 * the same buffer where one of the contexts invalidates it while
181 	 * the others are using it. */
182 	old_buf = res->buf;
183 	res->buf = new_buf; /* should be atomic */
184 
185 	if (rscreen->info.r600_has_virtual_memory)
186 		res->gpu_address = rscreen->ws->buffer_get_virtual_address(res->buf);
187 	else
188 		res->gpu_address = 0;
189 
190 	pb_reference(&old_buf, NULL);
191 
192 	util_range_set_empty(&res->valid_buffer_range);
193 
194 	/* Print debug information. */
195 	if (rscreen->debug_flags & DBG_VM && res->b.b.target == PIPE_BUFFER) {
196 		fprintf(stderr, "VM start=0x%"PRIX64"  end=0x%"PRIX64" | Buffer %"PRIu64" bytes\n",
197 			res->gpu_address, res->gpu_address + res->buf->size,
198 			res->buf->size);
199 	}
200 	return true;
201 }
202 
r600_buffer_destroy(struct pipe_screen * screen,struct pipe_resource * buf)203 void r600_buffer_destroy(struct pipe_screen *screen, struct pipe_resource *buf)
204 {
205 	struct r600_resource *rbuffer = r600_resource(buf);
206 
207 	threaded_resource_deinit(buf);
208 	util_range_destroy(&rbuffer->valid_buffer_range);
209 	pipe_resource_reference((struct pipe_resource**)&rbuffer->immed_buffer, NULL);
210 	pb_reference(&rbuffer->buf, NULL);
211 	FREE(rbuffer);
212 }
213 
214 static bool
r600_invalidate_buffer(struct r600_common_context * rctx,struct r600_resource * rbuffer)215 r600_invalidate_buffer(struct r600_common_context *rctx,
216 		       struct r600_resource *rbuffer)
217 {
218 	/* Shared buffers can't be reallocated. */
219 	if (rbuffer->b.is_shared)
220 		return false;
221 
222 	/* Sparse buffers can't be reallocated. */
223 	if (rbuffer->flags & RADEON_FLAG_SPARSE)
224 		return false;
225 
226 	/* In AMD_pinned_memory, the user pointer association only gets
227 	 * broken when the buffer is explicitly re-allocated.
228 	 */
229 	if (rbuffer->b.is_user_ptr)
230 		return false;
231 
232 	/* Check if mapping this buffer would cause waiting for the GPU. */
233 	if (r600_rings_is_buffer_referenced(rctx, rbuffer->buf, RADEON_USAGE_READWRITE) ||
234 	    !rctx->ws->buffer_wait(rctx->ws, rbuffer->buf, 0, RADEON_USAGE_READWRITE)) {
235 		rctx->invalidate_buffer(&rctx->b, &rbuffer->b.b);
236 	} else {
237 		util_range_set_empty(&rbuffer->valid_buffer_range);
238 	}
239 
240 	return true;
241 }
242 
243 /* Replace the storage of dst with src. */
r600_replace_buffer_storage(struct pipe_context * ctx,struct pipe_resource * dst,struct pipe_resource * src)244 void r600_replace_buffer_storage(struct pipe_context *ctx,
245 				 struct pipe_resource *dst,
246 				 struct pipe_resource *src)
247 {
248 	struct r600_common_context *rctx = (struct r600_common_context *)ctx;
249 	struct r600_resource *rdst = r600_resource(dst);
250 	struct r600_resource *rsrc = r600_resource(src);
251 	uint64_t old_gpu_address = rdst->gpu_address;
252 
253 	pb_reference(&rdst->buf, rsrc->buf);
254 	rdst->gpu_address = rsrc->gpu_address;
255 	rdst->b.b.bind = rsrc->b.b.bind;
256 	rdst->flags = rsrc->flags;
257 
258 	assert(rdst->vram_usage == rsrc->vram_usage);
259 	assert(rdst->gart_usage == rsrc->gart_usage);
260 	assert(rdst->bo_size == rsrc->bo_size);
261 	assert(rdst->bo_alignment == rsrc->bo_alignment);
262 	assert(rdst->domains == rsrc->domains);
263 
264 	rctx->rebind_buffer(ctx, dst, old_gpu_address);
265 }
266 
r600_invalidate_resource(struct pipe_context * ctx,struct pipe_resource * resource)267 void r600_invalidate_resource(struct pipe_context *ctx,
268 			      struct pipe_resource *resource)
269 {
270 	struct r600_common_context *rctx = (struct r600_common_context*)ctx;
271 	struct r600_resource *rbuffer = r600_resource(resource);
272 
273 	/* We currently only do anyting here for buffers */
274 	if (resource->target == PIPE_BUFFER)
275 		(void)r600_invalidate_buffer(rctx, rbuffer);
276 }
277 
r600_buffer_get_transfer(struct pipe_context * ctx,struct pipe_resource * resource,unsigned usage,const struct pipe_box * box,struct pipe_transfer ** ptransfer,void * data,struct r600_resource * staging,unsigned offset)278 static void *r600_buffer_get_transfer(struct pipe_context *ctx,
279 				      struct pipe_resource *resource,
280                                       unsigned usage,
281                                       const struct pipe_box *box,
282 				      struct pipe_transfer **ptransfer,
283 				      void *data, struct r600_resource *staging,
284 				      unsigned offset)
285 {
286 	struct r600_common_context *rctx = (struct r600_common_context*)ctx;
287 	struct r600_transfer *transfer;
288 
289 	if (usage & TC_TRANSFER_MAP_THREADED_UNSYNC)
290 		transfer = slab_zalloc(&rctx->pool_transfers_unsync);
291 	else
292 		transfer = slab_zalloc(&rctx->pool_transfers);
293 
294 	pipe_resource_reference(&transfer->b.b.resource, resource);
295 	transfer->b.b.usage = usage;
296 	transfer->b.b.box = *box;
297 	transfer->b.b.offset = offset;
298 	transfer->staging = staging;
299 	*ptransfer = &transfer->b.b;
300 	return data;
301 }
302 
r600_can_dma_copy_buffer(struct r600_common_context * rctx,unsigned dstx,unsigned srcx,unsigned size)303 static bool r600_can_dma_copy_buffer(struct r600_common_context *rctx,
304 				     unsigned dstx, unsigned srcx, unsigned size)
305 {
306 	bool dword_aligned = !(dstx % 4) && !(srcx % 4) && !(size % 4);
307 
308 	return rctx->screen->has_cp_dma ||
309 	       (dword_aligned && (rctx->dma.cs.priv ||
310 				  rctx->screen->has_streamout));
311 
312 }
313 
r600_buffer_transfer_map(struct pipe_context * ctx,struct pipe_resource * resource,unsigned level,unsigned usage,const struct pipe_box * box,struct pipe_transfer ** ptransfer)314 void *r600_buffer_transfer_map(struct pipe_context *ctx,
315                                struct pipe_resource *resource,
316                                unsigned level,
317                                unsigned usage,
318                                const struct pipe_box *box,
319                                struct pipe_transfer **ptransfer)
320 {
321 	struct r600_common_context *rctx = (struct r600_common_context*)ctx;
322 	struct r600_common_screen *rscreen = (struct r600_common_screen*)ctx->screen;
323 	struct r600_resource *rbuffer = r600_resource(resource);
324 	uint8_t *data;
325 
326 	if (r600_resource(resource)->compute_global_bo) {
327 		if ((data = r600_compute_global_transfer_map(ctx, resource, level, usage, box, ptransfer)))
328 			return data;
329 	}
330 
331 	assert(box->x + box->width <= resource->width0);
332 
333 	/* From GL_AMD_pinned_memory issues:
334 	 *
335 	 *     4) Is glMapBuffer on a shared buffer guaranteed to return the
336 	 *        same system address which was specified at creation time?
337 	 *
338 	 *        RESOLVED: NO. The GL implementation might return a different
339 	 *        virtual mapping of that memory, although the same physical
340 	 *        page will be used.
341 	 *
342 	 * So don't ever use staging buffers.
343 	 */
344 	if (rbuffer->b.is_user_ptr)
345 		usage |= PIPE_MAP_PERSISTENT;
346 
347 	/* See if the buffer range being mapped has never been initialized,
348 	 * in which case it can be mapped unsynchronized. */
349 	if (!(usage & (PIPE_MAP_UNSYNCHRONIZED |
350 		       TC_TRANSFER_MAP_NO_INFER_UNSYNCHRONIZED)) &&
351 	    usage & PIPE_MAP_WRITE &&
352 	    !rbuffer->b.is_shared &&
353 	    !util_ranges_intersect(&rbuffer->valid_buffer_range, box->x, box->x + box->width)) {
354 		usage |= PIPE_MAP_UNSYNCHRONIZED;
355 	}
356 
357 	/* If discarding the entire range, discard the whole resource instead. */
358 	if (usage & PIPE_MAP_DISCARD_RANGE &&
359 	    box->x == 0 && box->width == resource->width0) {
360 		usage |= PIPE_MAP_DISCARD_WHOLE_RESOURCE;
361 	}
362 
363 	if (usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE &&
364 	    !(usage & (PIPE_MAP_UNSYNCHRONIZED |
365 		       TC_TRANSFER_MAP_NO_INVALIDATE))) {
366 		assert(usage & PIPE_MAP_WRITE);
367 
368 		if (r600_invalidate_buffer(rctx, rbuffer)) {
369 			/* At this point, the buffer is always idle. */
370 			usage |= PIPE_MAP_UNSYNCHRONIZED;
371 		} else {
372 			/* Fall back to a temporary buffer. */
373 			usage |= PIPE_MAP_DISCARD_RANGE;
374 		}
375 	}
376 
377 	if ((usage & PIPE_MAP_DISCARD_RANGE) &&
378 	    !(rscreen->debug_flags & DBG_NO_DISCARD_RANGE) &&
379 	    ((!(usage & (PIPE_MAP_UNSYNCHRONIZED |
380 			 PIPE_MAP_PERSISTENT)) &&
381 	      r600_can_dma_copy_buffer(rctx, box->x, 0, box->width)) ||
382 	     (rbuffer->flags & RADEON_FLAG_SPARSE))) {
383 		assert(usage & PIPE_MAP_WRITE);
384 
385 		/* Check if mapping this buffer would cause waiting for the GPU.
386 		 */
387 		if (rbuffer->flags & RADEON_FLAG_SPARSE ||
388 		    r600_rings_is_buffer_referenced(rctx, rbuffer->buf, RADEON_USAGE_READWRITE) ||
389 		    !rctx->ws->buffer_wait(rctx->ws, rbuffer->buf, 0, RADEON_USAGE_READWRITE)) {
390 			/* Do a wait-free write-only transfer using a temporary buffer. */
391 			unsigned offset;
392 			struct r600_resource *staging = NULL;
393 
394 			u_upload_alloc(ctx->stream_uploader, 0,
395                                        box->width + (box->x % R600_MAP_BUFFER_ALIGNMENT),
396 				       rctx->screen->info.tcc_cache_line_size,
397 				       &offset, (struct pipe_resource**)&staging,
398                                        (void**)&data);
399 
400 			if (staging) {
401 				data += box->x % R600_MAP_BUFFER_ALIGNMENT;
402 				return r600_buffer_get_transfer(ctx, resource, usage, box,
403 								ptransfer, data, staging, offset);
404 			} else if (rbuffer->flags & RADEON_FLAG_SPARSE) {
405 				return NULL;
406 			}
407 		} else {
408 			/* At this point, the buffer is always idle (we checked it above). */
409 			usage |= PIPE_MAP_UNSYNCHRONIZED;
410 		}
411 	}
412 	/* Use a staging buffer in cached GTT for reads. */
413 	else if (((usage & PIPE_MAP_READ) &&
414 		  !(usage & PIPE_MAP_PERSISTENT) &&
415 		  (rbuffer->domains & RADEON_DOMAIN_VRAM ||
416 		   rbuffer->flags & RADEON_FLAG_GTT_WC) &&
417 		  r600_can_dma_copy_buffer(rctx, 0, box->x, box->width)) ||
418 		 (rbuffer->flags & RADEON_FLAG_SPARSE)) {
419 		struct r600_resource *staging;
420 
421 		assert(!(usage & TC_TRANSFER_MAP_THREADED_UNSYNC));
422 		staging = (struct r600_resource*) pipe_buffer_create(
423 				ctx->screen, 0, PIPE_USAGE_STAGING,
424 				box->width + (box->x % R600_MAP_BUFFER_ALIGNMENT));
425 		if (staging) {
426 			/* Copy the VRAM buffer to the staging buffer. */
427 			rctx->dma_copy(ctx, &staging->b.b, 0,
428 				       box->x % R600_MAP_BUFFER_ALIGNMENT,
429 				       0, 0, resource, 0, box);
430 
431 			data = r600_buffer_map_sync_with_rings(rctx, staging,
432 							       usage & ~PIPE_MAP_UNSYNCHRONIZED);
433 			if (!data) {
434 				r600_resource_reference(&staging, NULL);
435 				return NULL;
436 			}
437 			data += box->x % R600_MAP_BUFFER_ALIGNMENT;
438 
439 			return r600_buffer_get_transfer(ctx, resource, usage, box,
440 							ptransfer, data, staging, 0);
441 		} else if (rbuffer->flags & RADEON_FLAG_SPARSE) {
442 			return NULL;
443 		}
444 	}
445 
446 	data = r600_buffer_map_sync_with_rings(rctx, rbuffer, usage);
447 	if (!data) {
448 		return NULL;
449 	}
450 	data += box->x;
451 
452 	return r600_buffer_get_transfer(ctx, resource, usage, box,
453 					ptransfer, data, NULL, 0);
454 }
455 
r600_buffer_do_flush_region(struct pipe_context * ctx,struct pipe_transfer * transfer,const struct pipe_box * box)456 static void r600_buffer_do_flush_region(struct pipe_context *ctx,
457 					struct pipe_transfer *transfer,
458 				        const struct pipe_box *box)
459 {
460 	struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
461 	struct r600_resource *rbuffer = r600_resource(transfer->resource);
462 
463 	if (rtransfer->staging) {
464 		struct pipe_resource *dst, *src;
465 		unsigned soffset;
466 		struct pipe_box dma_box;
467 
468 		dst = transfer->resource;
469 		src = &rtransfer->staging->b.b;
470 		soffset = rtransfer->b.b.offset + box->x % R600_MAP_BUFFER_ALIGNMENT;
471 
472 		u_box_1d(soffset, box->width, &dma_box);
473 
474 		/* Copy the staging buffer into the original one. */
475 		ctx->resource_copy_region(ctx, dst, 0, box->x, 0, 0, src, 0, &dma_box);
476 	}
477 
478 	util_range_add(&rbuffer->b.b, &rbuffer->valid_buffer_range, box->x,
479 		       box->x + box->width);
480 }
481 
r600_buffer_flush_region(struct pipe_context * ctx,struct pipe_transfer * transfer,const struct pipe_box * rel_box)482 void r600_buffer_flush_region(struct pipe_context *ctx,
483 			      struct pipe_transfer *transfer,
484 			      const struct pipe_box *rel_box)
485 {
486 	unsigned required_usage = PIPE_MAP_WRITE |
487 				  PIPE_MAP_FLUSH_EXPLICIT;
488 
489 	if (r600_resource(transfer->resource)->compute_global_bo)
490 		return;
491 
492 	if ((transfer->usage & required_usage) == required_usage) {
493 		struct pipe_box box;
494 
495 		u_box_1d(transfer->box.x + rel_box->x, rel_box->width, &box);
496 		r600_buffer_do_flush_region(ctx, transfer, &box);
497 	}
498 }
499 
r600_buffer_transfer_unmap(struct pipe_context * ctx,struct pipe_transfer * transfer)500 void r600_buffer_transfer_unmap(struct pipe_context *ctx,
501 				struct pipe_transfer *transfer)
502 {
503 	struct r600_common_context *rctx = (struct r600_common_context*)ctx;
504 	struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
505 	struct r600_resource *rtransferr = r600_resource(transfer->resource);
506 
507 	if (rtransferr->compute_global_bo && !rtransferr->b.is_user_ptr) {
508 		r600_compute_global_transfer_unmap(ctx, transfer);
509 		return;
510 	}
511 
512 	if (transfer->usage & PIPE_MAP_WRITE &&
513 	    !(transfer->usage & PIPE_MAP_FLUSH_EXPLICIT))
514 		r600_buffer_do_flush_region(ctx, transfer, &transfer->box);
515 
516 	r600_resource_reference(&rtransfer->staging, NULL);
517 	assert(rtransfer->b.staging == NULL); /* for threaded context only */
518 	pipe_resource_reference(&transfer->resource, NULL);
519 
520 	/* Don't use pool_transfers_unsync. We are always in the driver
521 	 * thread. */
522 	slab_free(&rctx->pool_transfers, transfer);
523 }
524 
r600_buffer_subdata(struct pipe_context * ctx,struct pipe_resource * buffer,unsigned usage,unsigned offset,unsigned size,const void * data)525 void r600_buffer_subdata(struct pipe_context *ctx,
526 			 struct pipe_resource *buffer,
527 			 unsigned usage, unsigned offset,
528 			 unsigned size, const void *data)
529 {
530 	struct pipe_transfer *transfer = NULL;
531 	struct pipe_box box;
532 	uint8_t *map = NULL;
533 
534 	usage |= PIPE_MAP_WRITE;
535 
536 	if (!(usage & PIPE_MAP_DIRECTLY))
537 		usage |= PIPE_MAP_DISCARD_RANGE;
538 
539 	u_box_1d(offset, size, &box);
540 	map = r600_buffer_transfer_map(ctx, buffer, 0, usage, &box, &transfer);
541 	if (!map)
542 		return;
543 
544 	memcpy(map, data, size);
545 	r600_buffer_transfer_unmap(ctx, transfer);
546 }
547 
548 static struct r600_resource *
r600_alloc_buffer_struct(struct pipe_screen * screen,const struct pipe_resource * templ)549 r600_alloc_buffer_struct(struct pipe_screen *screen,
550 			 const struct pipe_resource *templ)
551 {
552 	struct r600_resource *rbuffer;
553 
554 	rbuffer = MALLOC_STRUCT(r600_resource);
555 
556 	rbuffer->b.b = *templ;
557 	rbuffer->b.b.next = NULL;
558 	pipe_reference_init(&rbuffer->b.b.reference, 1);
559 	rbuffer->b.b.screen = screen;
560 
561 	threaded_resource_init(&rbuffer->b.b, false);
562 
563 	rbuffer->buf = NULL;
564 	rbuffer->bind_history = 0;
565 	rbuffer->immed_buffer = NULL;
566 	rbuffer->compute_global_bo = false;
567 	util_range_init(&rbuffer->valid_buffer_range);
568 	return rbuffer;
569 }
570 
r600_buffer_create(struct pipe_screen * screen,const struct pipe_resource * templ,unsigned alignment)571 struct pipe_resource *r600_buffer_create(struct pipe_screen *screen,
572 					 const struct pipe_resource *templ,
573 					 unsigned alignment)
574 {
575 	struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
576 	struct r600_resource *rbuffer = r600_alloc_buffer_struct(screen, templ);
577 
578 	r600_init_resource_fields(rscreen, rbuffer, templ->width0, alignment);
579 
580 	if (templ->flags & PIPE_RESOURCE_FLAG_SPARSE)
581 		rbuffer->flags |= RADEON_FLAG_SPARSE;
582 
583 	if (!r600_alloc_resource(rscreen, rbuffer)) {
584 		FREE(rbuffer);
585 		return NULL;
586 	}
587 	return &rbuffer->b.b;
588 }
589 
r600_aligned_buffer_create(struct pipe_screen * screen,unsigned flags,unsigned usage,unsigned size,unsigned alignment)590 struct pipe_resource *r600_aligned_buffer_create(struct pipe_screen *screen,
591 						 unsigned flags,
592 						 unsigned usage,
593 						 unsigned size,
594 						 unsigned alignment)
595 {
596 	struct pipe_resource buffer;
597 
598 	memset(&buffer, 0, sizeof buffer);
599 	buffer.target = PIPE_BUFFER;
600 	buffer.format = PIPE_FORMAT_R8_UNORM;
601 	buffer.bind = 0;
602 	buffer.usage = usage;
603 	buffer.flags = flags;
604 	buffer.width0 = size;
605 	buffer.height0 = 1;
606 	buffer.depth0 = 1;
607 	buffer.array_size = 1;
608 	return r600_buffer_create(screen, &buffer, alignment);
609 }
610 
611 struct pipe_resource *
r600_buffer_from_user_memory(struct pipe_screen * screen,const struct pipe_resource * templ,void * user_memory)612 r600_buffer_from_user_memory(struct pipe_screen *screen,
613 			     const struct pipe_resource *templ,
614 			     void *user_memory)
615 {
616 	struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
617 	struct radeon_winsys *ws = rscreen->ws;
618 	struct r600_resource *rbuffer;
619 
620 	if ((templ->bind & PIPE_BIND_GLOBAL) &&
621 	    (templ->bind & PIPE_BIND_COMPUTE_RESOURCE)) {
622 		rbuffer = r600_resource(r600_compute_global_buffer_create(screen, templ));
623 		((struct r600_resource_global *)rbuffer)->chunk->real_buffer = rbuffer;
624 	} else {
625 		rbuffer = r600_alloc_buffer_struct(screen, templ);
626 	}
627 
628 	rbuffer->domains = RADEON_DOMAIN_GTT;
629 	rbuffer->flags = 0;
630 	rbuffer->b.is_user_ptr = true;
631 	util_range_add(&rbuffer->b.b, &rbuffer->valid_buffer_range, 0, templ->width0);
632 	util_range_add(&rbuffer->b.b, &rbuffer->b.valid_buffer_range, 0, templ->width0);
633 
634 	/* Convert a user pointer to a buffer. */
635 	rbuffer->buf = ws->buffer_from_ptr(ws, user_memory, templ->width0,
636 	                                   templ->usage == PIPE_USAGE_IMMUTABLE? RADEON_FLAG_READ_ONLY : 0);
637 	if (!rbuffer->buf) {
638 		FREE(rbuffer);
639 		return NULL;
640 	}
641 
642 	if (rscreen->info.r600_has_virtual_memory)
643 		rbuffer->gpu_address =
644 			ws->buffer_get_virtual_address(rbuffer->buf);
645 	else
646 		rbuffer->gpu_address = 0;
647 
648 	rbuffer->vram_usage = 0;
649 	rbuffer->gart_usage = templ->width0;
650 
651 	return &rbuffer->b.b;
652 }
653