• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2013 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * on the rights to use, copy, modify, merge, publish, distribute, sub
8  * license, and/or sell copies of the Software, and to permit persons to whom
9  * the Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21  * USE OR OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Marek Olšák
25  */
26 
27 #include "r600_cs.h"
28 #include "util/u_memory.h"
29 #include "util/u_upload_mgr.h"
30 #include <inttypes.h>
31 #include <stdio.h>
32 
r600_rings_is_buffer_referenced(struct r600_common_context * ctx,struct pb_buffer * buf,enum radeon_bo_usage usage)33 bool r600_rings_is_buffer_referenced(struct r600_common_context *ctx,
34 				     struct pb_buffer *buf,
35 				     enum radeon_bo_usage usage)
36 {
37 	if (ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs, buf, usage)) {
38 		return true;
39 	}
40 	if (radeon_emitted(ctx->dma.cs, 0) &&
41 	    ctx->ws->cs_is_buffer_referenced(ctx->dma.cs, buf, usage)) {
42 		return true;
43 	}
44 	return false;
45 }
46 
r600_buffer_map_sync_with_rings(struct r600_common_context * ctx,struct r600_resource * resource,unsigned usage)47 void *r600_buffer_map_sync_with_rings(struct r600_common_context *ctx,
48                                       struct r600_resource *resource,
49                                       unsigned usage)
50 {
51 	enum radeon_bo_usage rusage = RADEON_USAGE_READWRITE;
52 	bool busy = false;
53 
54 	if (usage & PIPE_TRANSFER_UNSYNCHRONIZED) {
55 		return ctx->ws->buffer_map(resource->buf, NULL, usage);
56 	}
57 
58 	if (!(usage & PIPE_TRANSFER_WRITE)) {
59 		/* have to wait for the last write */
60 		rusage = RADEON_USAGE_WRITE;
61 	}
62 
63 	if (radeon_emitted(ctx->gfx.cs, ctx->initial_gfx_cs_size) &&
64 	    ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs,
65 					     resource->buf, rusage)) {
66 		if (usage & PIPE_TRANSFER_DONTBLOCK) {
67 			ctx->gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
68 			return NULL;
69 		} else {
70 			ctx->gfx.flush(ctx, 0, NULL);
71 			busy = true;
72 		}
73 	}
74 	if (radeon_emitted(ctx->dma.cs, 0) &&
75 	    ctx->ws->cs_is_buffer_referenced(ctx->dma.cs,
76 					     resource->buf, rusage)) {
77 		if (usage & PIPE_TRANSFER_DONTBLOCK) {
78 			ctx->dma.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
79 			return NULL;
80 		} else {
81 			ctx->dma.flush(ctx, 0, NULL);
82 			busy = true;
83 		}
84 	}
85 
86 	if (busy || !ctx->ws->buffer_wait(resource->buf, 0, rusage)) {
87 		if (usage & PIPE_TRANSFER_DONTBLOCK) {
88 			return NULL;
89 		} else {
90 			/* We will be wait for the GPU. Wait for any offloaded
91 			 * CS flush to complete to avoid busy-waiting in the winsys. */
92 			ctx->ws->cs_sync_flush(ctx->gfx.cs);
93 			if (ctx->dma.cs)
94 				ctx->ws->cs_sync_flush(ctx->dma.cs);
95 		}
96 	}
97 
98 	/* Setting the CS to NULL will prevent doing checks we have done already. */
99 	return ctx->ws->buffer_map(resource->buf, NULL, usage);
100 }
101 
r600_init_resource_fields(struct r600_common_screen * rscreen,struct r600_resource * res,uint64_t size,unsigned alignment)102 void r600_init_resource_fields(struct r600_common_screen *rscreen,
103 			       struct r600_resource *res,
104 			       uint64_t size, unsigned alignment)
105 {
106 	struct r600_texture *rtex = (struct r600_texture*)res;
107 
108 	res->bo_size = size;
109 	res->bo_alignment = alignment;
110 	res->flags = 0;
111 
112 	switch (res->b.b.usage) {
113 	case PIPE_USAGE_STREAM:
114 		res->flags = RADEON_FLAG_GTT_WC;
115 		/* fall through */
116 	case PIPE_USAGE_STAGING:
117 		/* Transfers are likely to occur more often with these
118 		 * resources. */
119 		res->domains = RADEON_DOMAIN_GTT;
120 		break;
121 	case PIPE_USAGE_DYNAMIC:
122 		/* Older kernels didn't always flush the HDP cache before
123 		 * CS execution
124 		 */
125 		if (rscreen->info.drm_major == 2 &&
126 		    rscreen->info.drm_minor < 40) {
127 			res->domains = RADEON_DOMAIN_GTT;
128 			res->flags |= RADEON_FLAG_GTT_WC;
129 			break;
130 		}
131 		res->flags |= RADEON_FLAG_CPU_ACCESS;
132 		/* fall through */
133 	case PIPE_USAGE_DEFAULT:
134 	case PIPE_USAGE_IMMUTABLE:
135 	default:
136 		/* Not listing GTT here improves performance in some
137 		 * apps. */
138 		res->domains = RADEON_DOMAIN_VRAM;
139 		res->flags |= RADEON_FLAG_GTT_WC;
140 		break;
141 	}
142 
143 	if (res->b.b.target == PIPE_BUFFER &&
144 	    res->b.b.flags & (PIPE_RESOURCE_FLAG_MAP_PERSISTENT |
145 			      PIPE_RESOURCE_FLAG_MAP_COHERENT)) {
146 		/* Use GTT for all persistent mappings with older
147 		 * kernels, because they didn't always flush the HDP
148 		 * cache before CS execution.
149 		 *
150 		 * Write-combined CPU mappings are fine, the kernel
151 		 * ensures all CPU writes finish before the GPU
152 		 * executes a command stream.
153 		 */
154 		if (rscreen->info.drm_major == 2 &&
155 		    rscreen->info.drm_minor < 40)
156 			res->domains = RADEON_DOMAIN_GTT;
157 		else if (res->domains & RADEON_DOMAIN_VRAM)
158 			res->flags |= RADEON_FLAG_CPU_ACCESS;
159 	}
160 
161 	/* Tiled textures are unmappable. Always put them in VRAM. */
162 	if (res->b.b.target != PIPE_BUFFER &&
163 	    !rtex->surface.is_linear) {
164 		res->domains = RADEON_DOMAIN_VRAM;
165 		res->flags &= ~RADEON_FLAG_CPU_ACCESS;
166 		res->flags |= RADEON_FLAG_NO_CPU_ACCESS |
167 			 RADEON_FLAG_GTT_WC;
168 	}
169 
170 	/* If VRAM is just stolen system memory, allow both VRAM and
171 	 * GTT, whichever has free space. If a buffer is evicted from
172 	 * VRAM to GTT, it will stay there.
173 	 */
174 	if (!rscreen->info.has_dedicated_vram &&
175 	    res->domains == RADEON_DOMAIN_VRAM)
176 		res->domains = RADEON_DOMAIN_VRAM_GTT;
177 
178 	if (rscreen->debug_flags & DBG_NO_WC)
179 		res->flags &= ~RADEON_FLAG_GTT_WC;
180 
181 	/* Set expected VRAM and GART usage for the buffer. */
182 	res->vram_usage = 0;
183 	res->gart_usage = 0;
184 
185 	if (res->domains & RADEON_DOMAIN_VRAM)
186 		res->vram_usage = size;
187 	else if (res->domains & RADEON_DOMAIN_GTT)
188 		res->gart_usage = size;
189 }
190 
r600_alloc_resource(struct r600_common_screen * rscreen,struct r600_resource * res)191 bool r600_alloc_resource(struct r600_common_screen *rscreen,
192 			 struct r600_resource *res)
193 {
194 	struct pb_buffer *old_buf, *new_buf;
195 
196 	/* Allocate a new resource. */
197 	new_buf = rscreen->ws->buffer_create(rscreen->ws, res->bo_size,
198 					     res->bo_alignment,
199 					     res->domains, res->flags);
200 	if (!new_buf) {
201 		return false;
202 	}
203 
204 	/* Replace the pointer such that if res->buf wasn't NULL, it won't be
205 	 * NULL. This should prevent crashes with multiple contexts using
206 	 * the same buffer where one of the contexts invalidates it while
207 	 * the others are using it. */
208 	old_buf = res->buf;
209 	res->buf = new_buf; /* should be atomic */
210 
211 	if (rscreen->info.has_virtual_memory)
212 		res->gpu_address = rscreen->ws->buffer_get_virtual_address(res->buf);
213 	else
214 		res->gpu_address = 0;
215 
216 	pb_reference(&old_buf, NULL);
217 
218 	util_range_set_empty(&res->valid_buffer_range);
219 	res->TC_L2_dirty = false;
220 
221 	/* Print debug information. */
222 	if (rscreen->debug_flags & DBG_VM && res->b.b.target == PIPE_BUFFER) {
223 		fprintf(stderr, "VM start=0x%"PRIX64"  end=0x%"PRIX64" | Buffer %"PRIu64" bytes\n",
224 			res->gpu_address, res->gpu_address + res->buf->size,
225 			res->buf->size);
226 	}
227 	return true;
228 }
229 
r600_buffer_destroy(struct pipe_screen * screen,struct pipe_resource * buf)230 static void r600_buffer_destroy(struct pipe_screen *screen,
231 				struct pipe_resource *buf)
232 {
233 	struct r600_resource *rbuffer = r600_resource(buf);
234 
235 	util_range_destroy(&rbuffer->valid_buffer_range);
236 	pb_reference(&rbuffer->buf, NULL);
237 	FREE(rbuffer);
238 }
239 
240 static bool
r600_invalidate_buffer(struct r600_common_context * rctx,struct r600_resource * rbuffer)241 r600_invalidate_buffer(struct r600_common_context *rctx,
242 		       struct r600_resource *rbuffer)
243 {
244 	/* Shared buffers can't be reallocated. */
245 	if (rbuffer->is_shared)
246 		return false;
247 
248 	/* In AMD_pinned_memory, the user pointer association only gets
249 	 * broken when the buffer is explicitly re-allocated.
250 	 */
251 	if (rctx->ws->buffer_is_user_ptr(rbuffer->buf))
252 		return false;
253 
254 	/* Check if mapping this buffer would cause waiting for the GPU. */
255 	if (r600_rings_is_buffer_referenced(rctx, rbuffer->buf, RADEON_USAGE_READWRITE) ||
256 	    !rctx->ws->buffer_wait(rbuffer->buf, 0, RADEON_USAGE_READWRITE)) {
257 		rctx->invalidate_buffer(&rctx->b, &rbuffer->b.b);
258 	} else {
259 		util_range_set_empty(&rbuffer->valid_buffer_range);
260 	}
261 
262 	return true;
263 }
264 
r600_invalidate_resource(struct pipe_context * ctx,struct pipe_resource * resource)265 void r600_invalidate_resource(struct pipe_context *ctx,
266 			      struct pipe_resource *resource)
267 {
268 	struct r600_common_context *rctx = (struct r600_common_context*)ctx;
269 	struct r600_resource *rbuffer = r600_resource(resource);
270 
271 	/* We currently only do anyting here for buffers */
272 	if (resource->target == PIPE_BUFFER)
273 		(void)r600_invalidate_buffer(rctx, rbuffer);
274 }
275 
r600_buffer_get_transfer(struct pipe_context * ctx,struct pipe_resource * resource,unsigned level,unsigned usage,const struct pipe_box * box,struct pipe_transfer ** ptransfer,void * data,struct r600_resource * staging,unsigned offset)276 static void *r600_buffer_get_transfer(struct pipe_context *ctx,
277 				      struct pipe_resource *resource,
278                                       unsigned level,
279                                       unsigned usage,
280                                       const struct pipe_box *box,
281 				      struct pipe_transfer **ptransfer,
282 				      void *data, struct r600_resource *staging,
283 				      unsigned offset)
284 {
285 	struct r600_common_context *rctx = (struct r600_common_context*)ctx;
286 	struct r600_transfer *transfer = slab_alloc(&rctx->pool_transfers);
287 
288 	transfer->transfer.resource = resource;
289 	transfer->transfer.level = level;
290 	transfer->transfer.usage = usage;
291 	transfer->transfer.box = *box;
292 	transfer->transfer.stride = 0;
293 	transfer->transfer.layer_stride = 0;
294 	transfer->offset = offset;
295 	transfer->staging = staging;
296 	*ptransfer = &transfer->transfer;
297 	return data;
298 }
299 
r600_can_dma_copy_buffer(struct r600_common_context * rctx,unsigned dstx,unsigned srcx,unsigned size)300 static bool r600_can_dma_copy_buffer(struct r600_common_context *rctx,
301 				     unsigned dstx, unsigned srcx, unsigned size)
302 {
303 	bool dword_aligned = !(dstx % 4) && !(srcx % 4) && !(size % 4);
304 
305 	return rctx->screen->has_cp_dma ||
306 	       (dword_aligned && (rctx->dma.cs ||
307 				  rctx->screen->has_streamout));
308 
309 }
310 
r600_buffer_transfer_map(struct pipe_context * ctx,struct pipe_resource * resource,unsigned level,unsigned usage,const struct pipe_box * box,struct pipe_transfer ** ptransfer)311 static void *r600_buffer_transfer_map(struct pipe_context *ctx,
312                                       struct pipe_resource *resource,
313                                       unsigned level,
314                                       unsigned usage,
315                                       const struct pipe_box *box,
316                                       struct pipe_transfer **ptransfer)
317 {
318 	struct r600_common_context *rctx = (struct r600_common_context*)ctx;
319 	struct r600_common_screen *rscreen = (struct r600_common_screen*)ctx->screen;
320         struct r600_resource *rbuffer = r600_resource(resource);
321         uint8_t *data;
322 
323 	assert(box->x + box->width <= resource->width0);
324 
325 	/* See if the buffer range being mapped has never been initialized,
326 	 * in which case it can be mapped unsynchronized. */
327 	if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED) &&
328 	    usage & PIPE_TRANSFER_WRITE &&
329 	    !rbuffer->is_shared &&
330 	    !util_ranges_intersect(&rbuffer->valid_buffer_range, box->x, box->x + box->width)) {
331 		usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
332 	}
333 
334 	/* If discarding the entire range, discard the whole resource instead. */
335 	if (usage & PIPE_TRANSFER_DISCARD_RANGE &&
336 	    box->x == 0 && box->width == resource->width0) {
337 		usage |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
338 	}
339 
340 	if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE &&
341 	    !(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
342 		assert(usage & PIPE_TRANSFER_WRITE);
343 
344 		if (r600_invalidate_buffer(rctx, rbuffer)) {
345 			/* At this point, the buffer is always idle. */
346 			usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
347 		} else {
348 			/* Fall back to a temporary buffer. */
349 			usage |= PIPE_TRANSFER_DISCARD_RANGE;
350 		}
351 	}
352 
353 	if ((usage & PIPE_TRANSFER_DISCARD_RANGE) &&
354 	    !(usage & (PIPE_TRANSFER_UNSYNCHRONIZED |
355 		       PIPE_TRANSFER_PERSISTENT)) &&
356 	    !(rscreen->debug_flags & DBG_NO_DISCARD_RANGE) &&
357 	    r600_can_dma_copy_buffer(rctx, box->x, 0, box->width)) {
358 		assert(usage & PIPE_TRANSFER_WRITE);
359 
360 		/* Check if mapping this buffer would cause waiting for the GPU. */
361 		if (r600_rings_is_buffer_referenced(rctx, rbuffer->buf, RADEON_USAGE_READWRITE) ||
362 		    !rctx->ws->buffer_wait(rbuffer->buf, 0, RADEON_USAGE_READWRITE)) {
363 			/* Do a wait-free write-only transfer using a temporary buffer. */
364 			unsigned offset;
365 			struct r600_resource *staging = NULL;
366 
367 			u_upload_alloc(rctx->uploader, 0, box->width + (box->x % R600_MAP_BUFFER_ALIGNMENT),
368 				       256, &offset, (struct pipe_resource**)&staging, (void**)&data);
369 
370 			if (staging) {
371 				data += box->x % R600_MAP_BUFFER_ALIGNMENT;
372 				return r600_buffer_get_transfer(ctx, resource, level, usage, box,
373 								ptransfer, data, staging, offset);
374 			}
375 		} else {
376 			/* At this point, the buffer is always idle (we checked it above). */
377 			usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
378 		}
379 	}
380 	/* Use a staging buffer in cached GTT for reads. */
381 	else if ((usage & PIPE_TRANSFER_READ) &&
382 		 !(usage & PIPE_TRANSFER_PERSISTENT) &&
383 		 (rbuffer->domains & RADEON_DOMAIN_VRAM ||
384 		  rbuffer->flags & RADEON_FLAG_GTT_WC) &&
385 		 r600_can_dma_copy_buffer(rctx, 0, box->x, box->width)) {
386 		struct r600_resource *staging;
387 
388 		staging = (struct r600_resource*) pipe_buffer_create(
389 				ctx->screen, 0, PIPE_USAGE_STAGING,
390 				box->width + (box->x % R600_MAP_BUFFER_ALIGNMENT));
391 		if (staging) {
392 			/* Copy the VRAM buffer to the staging buffer. */
393 			rctx->dma_copy(ctx, &staging->b.b, 0,
394 				       box->x % R600_MAP_BUFFER_ALIGNMENT,
395 				       0, 0, resource, 0, box);
396 
397 			data = r600_buffer_map_sync_with_rings(rctx, staging,
398 							       usage & ~PIPE_TRANSFER_UNSYNCHRONIZED);
399 			if (!data) {
400 				r600_resource_reference(&staging, NULL);
401 				return NULL;
402 			}
403 			data += box->x % R600_MAP_BUFFER_ALIGNMENT;
404 
405 			return r600_buffer_get_transfer(ctx, resource, level, usage, box,
406 							ptransfer, data, staging, 0);
407 		}
408 	}
409 
410 	data = r600_buffer_map_sync_with_rings(rctx, rbuffer, usage);
411 	if (!data) {
412 		return NULL;
413 	}
414 	data += box->x;
415 
416 	return r600_buffer_get_transfer(ctx, resource, level, usage, box,
417 					ptransfer, data, NULL, 0);
418 }
419 
r600_buffer_do_flush_region(struct pipe_context * ctx,struct pipe_transfer * transfer,const struct pipe_box * box)420 static void r600_buffer_do_flush_region(struct pipe_context *ctx,
421 					struct pipe_transfer *transfer,
422 				        const struct pipe_box *box)
423 {
424 	struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
425 	struct r600_resource *rbuffer = r600_resource(transfer->resource);
426 
427 	if (rtransfer->staging) {
428 		struct pipe_resource *dst, *src;
429 		unsigned soffset;
430 		struct pipe_box dma_box;
431 
432 		dst = transfer->resource;
433 		src = &rtransfer->staging->b.b;
434 		soffset = rtransfer->offset + box->x % R600_MAP_BUFFER_ALIGNMENT;
435 
436 		u_box_1d(soffset, box->width, &dma_box);
437 
438 		/* Copy the staging buffer into the original one. */
439 		ctx->resource_copy_region(ctx, dst, 0, box->x, 0, 0, src, 0, &dma_box);
440 	}
441 
442 	util_range_add(&rbuffer->valid_buffer_range, box->x,
443 		       box->x + box->width);
444 }
445 
r600_buffer_flush_region(struct pipe_context * ctx,struct pipe_transfer * transfer,const struct pipe_box * rel_box)446 static void r600_buffer_flush_region(struct pipe_context *ctx,
447 				     struct pipe_transfer *transfer,
448 				     const struct pipe_box *rel_box)
449 {
450 	if (transfer->usage & (PIPE_TRANSFER_WRITE |
451 			       PIPE_TRANSFER_FLUSH_EXPLICIT)) {
452 		struct pipe_box box;
453 
454 		u_box_1d(transfer->box.x + rel_box->x, rel_box->width, &box);
455 		r600_buffer_do_flush_region(ctx, transfer, &box);
456 	}
457 }
458 
r600_buffer_transfer_unmap(struct pipe_context * ctx,struct pipe_transfer * transfer)459 static void r600_buffer_transfer_unmap(struct pipe_context *ctx,
460 				       struct pipe_transfer *transfer)
461 {
462 	struct r600_common_context *rctx = (struct r600_common_context*)ctx;
463 	struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
464 
465 	if (transfer->usage & PIPE_TRANSFER_WRITE &&
466 	    !(transfer->usage & PIPE_TRANSFER_FLUSH_EXPLICIT))
467 		r600_buffer_do_flush_region(ctx, transfer, &transfer->box);
468 
469 	if (rtransfer->staging)
470 		r600_resource_reference(&rtransfer->staging, NULL);
471 
472 	slab_free(&rctx->pool_transfers, transfer);
473 }
474 
r600_buffer_subdata(struct pipe_context * ctx,struct pipe_resource * buffer,unsigned usage,unsigned offset,unsigned size,const void * data)475 void r600_buffer_subdata(struct pipe_context *ctx,
476 			 struct pipe_resource *buffer,
477 			 unsigned usage, unsigned offset,
478 			 unsigned size, const void *data)
479 {
480 	struct pipe_transfer *transfer = NULL;
481 	struct pipe_box box;
482 	uint8_t *map = NULL;
483 
484 	u_box_1d(offset, size, &box);
485 	map = r600_buffer_transfer_map(ctx, buffer, 0,
486 				       PIPE_TRANSFER_WRITE |
487 				       PIPE_TRANSFER_DISCARD_RANGE |
488 				       usage,
489 				       &box, &transfer);
490 	if (!map)
491 		return;
492 
493 	memcpy(map, data, size);
494 	r600_buffer_transfer_unmap(ctx, transfer);
495 }
496 
497 static const struct u_resource_vtbl r600_buffer_vtbl =
498 {
499 	NULL,				/* get_handle */
500 	r600_buffer_destroy,		/* resource_destroy */
501 	r600_buffer_transfer_map,	/* transfer_map */
502 	r600_buffer_flush_region,	/* transfer_flush_region */
503 	r600_buffer_transfer_unmap,	/* transfer_unmap */
504 };
505 
506 static struct r600_resource *
r600_alloc_buffer_struct(struct pipe_screen * screen,const struct pipe_resource * templ)507 r600_alloc_buffer_struct(struct pipe_screen *screen,
508 			 const struct pipe_resource *templ)
509 {
510 	struct r600_resource *rbuffer;
511 
512 	rbuffer = MALLOC_STRUCT(r600_resource);
513 
514 	rbuffer->b.b = *templ;
515 	rbuffer->b.b.next = NULL;
516 	pipe_reference_init(&rbuffer->b.b.reference, 1);
517 	rbuffer->b.b.screen = screen;
518 	rbuffer->b.vtbl = &r600_buffer_vtbl;
519 	rbuffer->buf = NULL;
520 	rbuffer->bind_history = 0;
521 	rbuffer->TC_L2_dirty = false;
522 	rbuffer->is_shared = false;
523 	util_range_init(&rbuffer->valid_buffer_range);
524 	return rbuffer;
525 }
526 
r600_buffer_create(struct pipe_screen * screen,const struct pipe_resource * templ,unsigned alignment)527 struct pipe_resource *r600_buffer_create(struct pipe_screen *screen,
528 					 const struct pipe_resource *templ,
529 					 unsigned alignment)
530 {
531 	struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
532 	struct r600_resource *rbuffer = r600_alloc_buffer_struct(screen, templ);
533 
534 	r600_init_resource_fields(rscreen, rbuffer, templ->width0, alignment);
535 
536 	if (templ->bind & PIPE_BIND_SHARED)
537 		rbuffer->flags |= RADEON_FLAG_HANDLE;
538 
539 	if (!r600_alloc_resource(rscreen, rbuffer)) {
540 		FREE(rbuffer);
541 		return NULL;
542 	}
543 	return &rbuffer->b.b;
544 }
545 
r600_aligned_buffer_create(struct pipe_screen * screen,unsigned bind,unsigned usage,unsigned size,unsigned alignment)546 struct pipe_resource *r600_aligned_buffer_create(struct pipe_screen *screen,
547 						 unsigned bind,
548 						 unsigned usage,
549 						 unsigned size,
550 						 unsigned alignment)
551 {
552 	struct pipe_resource buffer;
553 
554 	memset(&buffer, 0, sizeof buffer);
555 	buffer.target = PIPE_BUFFER;
556 	buffer.format = PIPE_FORMAT_R8_UNORM;
557 	buffer.bind = bind;
558 	buffer.usage = usage;
559 	buffer.flags = 0;
560 	buffer.width0 = size;
561 	buffer.height0 = 1;
562 	buffer.depth0 = 1;
563 	buffer.array_size = 1;
564 	return r600_buffer_create(screen, &buffer, alignment);
565 }
566 
567 struct pipe_resource *
r600_buffer_from_user_memory(struct pipe_screen * screen,const struct pipe_resource * templ,void * user_memory)568 r600_buffer_from_user_memory(struct pipe_screen *screen,
569 			     const struct pipe_resource *templ,
570 			     void *user_memory)
571 {
572 	struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
573 	struct radeon_winsys *ws = rscreen->ws;
574 	struct r600_resource *rbuffer = r600_alloc_buffer_struct(screen, templ);
575 
576 	rbuffer->domains = RADEON_DOMAIN_GTT;
577 	util_range_add(&rbuffer->valid_buffer_range, 0, templ->width0);
578 
579 	/* Convert a user pointer to a buffer. */
580 	rbuffer->buf = ws->buffer_from_ptr(ws, user_memory, templ->width0);
581 	if (!rbuffer->buf) {
582 		FREE(rbuffer);
583 		return NULL;
584 	}
585 
586 	if (rscreen->info.has_virtual_memory)
587 		rbuffer->gpu_address =
588 			ws->buffer_get_virtual_address(rbuffer->buf);
589 	else
590 		rbuffer->gpu_address = 0;
591 
592 	return &rbuffer->b.b;
593 }
594