• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * on the rights to use, copy, modify, merge, publish, distribute, sub
8  * license, and/or sell copies of the Software, and to permit persons to whom
9  * the Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21  * USE OR OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Jerome Glisse
25  *      Corbin Simpson
26  */
27 #include <errno.h>
28 #include "pipe/p_screen.h"
29 #include "util/u_format.h"
30 #include "util/u_format_s3tc.h"
31 #include "util/u_math.h"
32 #include "util/u_inlines.h"
33 #include "util/u_memory.h"
34 #include "pipebuffer/pb_buffer.h"
35 #include "radeonsi_pipe.h"
36 #include "r600_resource.h"
37 #include "sid.h"
38 
39 /* Copy from a full GPU texture to a transfer's staging one. */
r600_copy_to_staging_texture(struct pipe_context * ctx,struct r600_transfer * rtransfer)40 static void r600_copy_to_staging_texture(struct pipe_context *ctx, struct r600_transfer *rtransfer)
41 {
42 	struct pipe_transfer *transfer = (struct pipe_transfer*)rtransfer;
43 	struct pipe_resource *texture = transfer->resource;
44 
45 	ctx->resource_copy_region(ctx, rtransfer->staging_texture,
46 				0, 0, 0, 0, texture, transfer->level,
47 				&transfer->box);
48 }
49 
50 
51 /* Copy from a transfer's staging texture to a full GPU one. */
r600_copy_from_staging_texture(struct pipe_context * ctx,struct r600_transfer * rtransfer)52 static void r600_copy_from_staging_texture(struct pipe_context *ctx, struct r600_transfer *rtransfer)
53 {
54 	struct pipe_transfer *transfer = (struct pipe_transfer*)rtransfer;
55 	struct pipe_resource *texture = transfer->resource;
56 	struct pipe_box sbox;
57 
58 	sbox.x = sbox.y = sbox.z = 0;
59 	sbox.width = transfer->box.width;
60 	sbox.height = transfer->box.height;
61 	/* XXX that might be wrong */
62 	sbox.depth = 1;
63 	ctx->resource_copy_region(ctx, texture, transfer->level,
64 				  transfer->box.x, transfer->box.y, transfer->box.z,
65 				  rtransfer->staging_texture,
66 				  0, &sbox);
67 }
68 
r600_texture_get_offset(struct r600_resource_texture * rtex,unsigned level,unsigned layer)69 static unsigned r600_texture_get_offset(struct r600_resource_texture *rtex,
70 					unsigned level, unsigned layer)
71 {
72 	return rtex->surface.level[level].offset +
73 	       layer * rtex->surface.level[level].slice_size;
74 }
75 
r600_init_surface(struct radeon_surface * surface,const struct pipe_resource * ptex,unsigned array_mode)76 static int r600_init_surface(struct radeon_surface *surface,
77 			     const struct pipe_resource *ptex,
78 			     unsigned array_mode)
79 {
80 	surface->npix_x = ptex->width0;
81 	surface->npix_y = ptex->height0;
82 	surface->npix_z = ptex->depth0;
83 	surface->blk_w = util_format_get_blockwidth(ptex->format);
84 	surface->blk_h = util_format_get_blockheight(ptex->format);
85 	surface->blk_d = 1;
86 	surface->array_size = 1;
87 	surface->last_level = ptex->last_level;
88 	surface->bpe = util_format_get_blocksize(ptex->format);
89 	/* align byte per element on dword */
90 	if (surface->bpe == 3) {
91 		surface->bpe = 4;
92 	}
93 	surface->nsamples = 1;
94 	surface->flags = 0;
95 	switch (array_mode) {
96 	case V_009910_ARRAY_1D_TILED_THIN1:
97 		surface->flags |= RADEON_SURF_SET(RADEON_SURF_MODE_1D, MODE);
98 		break;
99 	case V_009910_ARRAY_2D_TILED_THIN1:
100 		surface->flags |= RADEON_SURF_SET(RADEON_SURF_MODE_2D, MODE);
101 		break;
102 	case V_009910_ARRAY_LINEAR_ALIGNED:
103 		surface->flags |= RADEON_SURF_SET(RADEON_SURF_MODE_LINEAR_ALIGNED, MODE);
104 		break;
105 	case V_009910_ARRAY_LINEAR_GENERAL:
106 	default:
107 		surface->flags |= RADEON_SURF_SET(RADEON_SURF_MODE_LINEAR, MODE);
108 		break;
109 	}
110 	switch (ptex->target) {
111 	case PIPE_TEXTURE_1D:
112 		surface->flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_1D, TYPE);
113 		break;
114 	case PIPE_TEXTURE_RECT:
115 	case PIPE_TEXTURE_2D:
116 		surface->flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_2D, TYPE);
117 		break;
118 	case PIPE_TEXTURE_3D:
119 		surface->flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_3D, TYPE);
120 		break;
121 	case PIPE_TEXTURE_1D_ARRAY:
122 		surface->flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_1D_ARRAY, TYPE);
123 		surface->array_size = ptex->array_size;
124 		break;
125 	case PIPE_TEXTURE_2D_ARRAY:
126 		surface->flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_2D_ARRAY, TYPE);
127 		surface->array_size = ptex->array_size;
128 		break;
129 	case PIPE_TEXTURE_CUBE:
130 		surface->flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_CUBEMAP, TYPE);
131 		break;
132 	case PIPE_BUFFER:
133 	default:
134 		return -EINVAL;
135 	}
136 	if (ptex->bind & PIPE_BIND_SCANOUT) {
137 		surface->flags |= RADEON_SURF_SCANOUT;
138 	}
139 	if (util_format_is_depth_and_stencil(ptex->format)) {
140 		surface->flags |= RADEON_SURF_ZBUFFER;
141 		surface->flags |= RADEON_SURF_SBUFFER;
142 	}
143 
144 	return 0;
145 }
146 
r600_setup_surface(struct pipe_screen * screen,struct r600_resource_texture * rtex,unsigned array_mode,unsigned pitch_in_bytes_override)147 static int r600_setup_surface(struct pipe_screen *screen,
148 			      struct r600_resource_texture *rtex,
149 			      unsigned array_mode,
150 			      unsigned pitch_in_bytes_override)
151 {
152 	struct r600_screen *rscreen = (struct r600_screen*)screen;
153 	int r;
154 
155 	if (util_format_is_depth_or_stencil(rtex->real_format)) {
156 		rtex->surface.flags |= RADEON_SURF_ZBUFFER;
157 		rtex->surface.flags |= RADEON_SURF_SBUFFER;
158 	}
159 
160 	r = rscreen->ws->surface_init(rscreen->ws, &rtex->surface);
161 	if (r) {
162 		return r;
163 	}
164 	if (pitch_in_bytes_override && pitch_in_bytes_override != rtex->surface.level[0].pitch_bytes) {
165 		/* old ddx on evergreen over estimate alignment for 1d, only 1 level
166 		 * for those
167 		 */
168 		rtex->surface.level[0].nblk_x = pitch_in_bytes_override / rtex->surface.bpe;
169 		rtex->surface.level[0].pitch_bytes = pitch_in_bytes_override;
170 		rtex->surface.level[0].slice_size = pitch_in_bytes_override * rtex->surface.level[0].nblk_y;
171 		if (rtex->surface.flags & RADEON_SURF_SBUFFER) {
172 			rtex->surface.stencil_offset = rtex->surface.level[0].slice_size;
173 		}
174 	}
175 	return 0;
176 }
177 
178 /* Figure out whether u_blitter will fallback to a transfer operation.
179  * If so, don't use a staging resource.
180  */
permit_hardware_blit(struct pipe_screen * screen,const struct pipe_resource * res)181 static boolean permit_hardware_blit(struct pipe_screen *screen,
182 					const struct pipe_resource *res)
183 {
184 	unsigned bind;
185 
186 	if (util_format_is_depth_or_stencil(res->format))
187 		bind = PIPE_BIND_DEPTH_STENCIL;
188 	else
189 		bind = PIPE_BIND_RENDER_TARGET;
190 
191 	/* hackaround for S3TC */
192 	if (util_format_is_compressed(res->format))
193 		return TRUE;
194 
195 	if (!screen->is_format_supported(screen,
196 				res->format,
197 				res->target,
198 				res->nr_samples,
199                                 bind))
200 		return FALSE;
201 
202 	if (!screen->is_format_supported(screen,
203 				res->format,
204 				res->target,
205 				res->nr_samples,
206                                 PIPE_BIND_SAMPLER_VIEW))
207 		return FALSE;
208 
209 	switch (res->usage) {
210 	case PIPE_USAGE_STREAM:
211 	case PIPE_USAGE_STAGING:
212 		return FALSE;
213 
214 	default:
215 		return TRUE;
216 	}
217 }
218 
r600_texture_get_handle(struct pipe_screen * screen,struct pipe_resource * ptex,struct winsys_handle * whandle)219 static boolean r600_texture_get_handle(struct pipe_screen* screen,
220 					struct pipe_resource *ptex,
221 					struct winsys_handle *whandle)
222 {
223 	struct r600_resource_texture *rtex = (struct r600_resource_texture*)ptex;
224 	struct si_resource *resource = &rtex->resource;
225 	struct radeon_surface *surface = &rtex->surface;
226 	struct r600_screen *rscreen = (struct r600_screen*)screen;
227 
228 	rscreen->ws->buffer_set_tiling(resource->buf,
229 				       NULL,
230 				       surface->level[0].mode >= RADEON_SURF_MODE_1D ?
231 				       RADEON_LAYOUT_TILED : RADEON_LAYOUT_LINEAR,
232 				       surface->level[0].mode >= RADEON_SURF_MODE_2D ?
233 				       RADEON_LAYOUT_TILED : RADEON_LAYOUT_LINEAR,
234 				       surface->bankw, surface->bankh,
235 				       surface->tile_split,
236 				       surface->stencil_tile_split,
237 				       surface->mtilea,
238 				       surface->level[0].pitch_bytes);
239 
240 	return rscreen->ws->buffer_get_handle(resource->buf,
241 					      surface->level[0].pitch_bytes, whandle);
242 }
243 
r600_texture_destroy(struct pipe_screen * screen,struct pipe_resource * ptex)244 static void r600_texture_destroy(struct pipe_screen *screen,
245 				 struct pipe_resource *ptex)
246 {
247 	struct r600_resource_texture *rtex = (struct r600_resource_texture*)ptex;
248 	struct si_resource *resource = &rtex->resource;
249 
250 	if (rtex->flushed_depth_texture)
251 		si_resource_reference((struct si_resource **)&rtex->flushed_depth_texture, NULL);
252 
253 	pb_reference(&resource->buf, NULL);
254 	FREE(rtex);
255 }
256 
257 /* Needs adjustment for pixelformat:
258  */
u_box_volume(const struct pipe_box * box)259 static INLINE unsigned u_box_volume( const struct pipe_box *box )
260 {
261 	return box->width * box->depth * box->height;
262 };
263 
si_texture_get_transfer(struct pipe_context * ctx,struct pipe_resource * texture,unsigned level,unsigned usage,const struct pipe_box * box)264 static struct pipe_transfer* si_texture_get_transfer(struct pipe_context *ctx,
265 						     struct pipe_resource *texture,
266 						     unsigned level,
267 						     unsigned usage,
268 						     const struct pipe_box *box)
269 {
270 	struct r600_resource_texture *rtex = (struct r600_resource_texture*)texture;
271 	struct pipe_resource resource;
272 	struct r600_transfer *trans;
273 	int r;
274 	boolean use_staging_texture = FALSE;
275 
276 	/* We cannot map a tiled texture directly because the data is
277 	 * in a different order, therefore we do detiling using a blit.
278 	 *
279 	 * Also, use a temporary in GTT memory for read transfers, as
280 	 * the CPU is much happier reading out of cached system memory
281 	 * than uncached VRAM.
282 	 */
283 	if (rtex->surface.level[level].mode != RADEON_SURF_MODE_LINEAR_ALIGNED &&
284 	    rtex->surface.level[level].mode != RADEON_SURF_MODE_LINEAR)
285 		use_staging_texture = TRUE;
286 
287 	if ((usage & PIPE_TRANSFER_READ) && u_box_volume(box) > 1024)
288 		use_staging_texture = TRUE;
289 
290 	/* XXX: Use a staging texture for uploads if the underlying BO
291 	 * is busy.  No interface for checking that currently? so do
292 	 * it eagerly whenever the transfer doesn't require a readback
293 	 * and might block.
294 	 */
295 	if ((usage & PIPE_TRANSFER_WRITE) &&
296 			!(usage & (PIPE_TRANSFER_READ |
297 					PIPE_TRANSFER_DONTBLOCK |
298 					PIPE_TRANSFER_UNSYNCHRONIZED)))
299 		use_staging_texture = TRUE;
300 
301 	if (!permit_hardware_blit(ctx->screen, texture) ||
302 		(texture->flags & R600_RESOURCE_FLAG_TRANSFER))
303 		use_staging_texture = FALSE;
304 
305 	if (use_staging_texture && (usage & PIPE_TRANSFER_MAP_DIRECTLY))
306 		return NULL;
307 
308 	trans = CALLOC_STRUCT(r600_transfer);
309 	if (trans == NULL)
310 		return NULL;
311 	pipe_resource_reference(&trans->transfer.resource, texture);
312 	trans->transfer.level = level;
313 	trans->transfer.usage = usage;
314 	trans->transfer.box = *box;
315 	if (rtex->depth) {
316 		/* XXX: only readback the rectangle which is being mapped?
317 		*/
318 		/* XXX: when discard is true, no need to read back from depth texture
319 		*/
320 		r = r600_texture_depth_flush(ctx, texture, FALSE);
321 		if (r < 0) {
322 			R600_ERR("failed to create temporary texture to hold untiled copy\n");
323 			pipe_resource_reference(&trans->transfer.resource, NULL);
324 			FREE(trans);
325 			return NULL;
326 		}
327 		trans->transfer.stride = rtex->flushed_depth_texture->surface.level[level].pitch_bytes;
328 		trans->offset = r600_texture_get_offset(rtex->flushed_depth_texture, level, box->z);
329 		return &trans->transfer;
330 	} else if (use_staging_texture) {
331 		resource.target = PIPE_TEXTURE_2D;
332 		resource.format = texture->format;
333 		resource.width0 = box->width;
334 		resource.height0 = box->height;
335 		resource.depth0 = 1;
336 		resource.array_size = 1;
337 		resource.last_level = 0;
338 		resource.nr_samples = 0;
339 		resource.usage = PIPE_USAGE_STAGING;
340 		resource.bind = 0;
341 		resource.flags = R600_RESOURCE_FLAG_TRANSFER;
342 		/* For texture reading, the temporary (detiled) texture is used as
343 		 * a render target when blitting from a tiled texture. */
344 		if (usage & PIPE_TRANSFER_READ) {
345 			resource.bind |= PIPE_BIND_RENDER_TARGET;
346 		}
347 		/* For texture writing, the temporary texture is used as a sampler
348 		 * when blitting into a tiled texture. */
349 		if (usage & PIPE_TRANSFER_WRITE) {
350 			resource.bind |= PIPE_BIND_SAMPLER_VIEW;
351 		}
352 		/* Create the temporary texture. */
353 		trans->staging_texture = ctx->screen->resource_create(ctx->screen, &resource);
354 		if (trans->staging_texture == NULL) {
355 			R600_ERR("failed to create temporary texture to hold untiled copy\n");
356 			pipe_resource_reference(&trans->transfer.resource, NULL);
357 			FREE(trans);
358 			return NULL;
359 		}
360 
361 		trans->transfer.stride = ((struct r600_resource_texture *)trans->staging_texture)
362 					->surface.level[0].pitch_bytes;
363 		if (usage & PIPE_TRANSFER_READ) {
364 			r600_copy_to_staging_texture(ctx, trans);
365 			/* Always referenced in the blit. */
366 			radeonsi_flush(ctx, NULL, 0);
367 		}
368 		return &trans->transfer;
369 	}
370 	trans->transfer.stride = rtex->surface.level[level].pitch_bytes;
371 	trans->transfer.layer_stride = rtex->surface.level[level].slice_size;
372 	trans->offset = r600_texture_get_offset(rtex, level, box->z);
373 	return &trans->transfer;
374 }
375 
si_texture_transfer_destroy(struct pipe_context * ctx,struct pipe_transfer * transfer)376 static void si_texture_transfer_destroy(struct pipe_context *ctx,
377 					struct pipe_transfer *transfer)
378 {
379 	struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
380 	struct pipe_resource *texture = transfer->resource;
381 	struct r600_resource_texture *rtex = (struct r600_resource_texture*)texture;
382 
383 	if (rtransfer->staging_texture) {
384 		if (transfer->usage & PIPE_TRANSFER_WRITE) {
385 			r600_copy_from_staging_texture(ctx, rtransfer);
386 		}
387 		pipe_resource_reference(&rtransfer->staging_texture, NULL);
388 	}
389 
390 	if (rtex->depth && !rtex->is_flushing_texture) {
391 		if ((transfer->usage & PIPE_TRANSFER_WRITE) && rtex->flushed_depth_texture)
392 			r600_blit_push_depth(ctx, rtex);
393 	}
394 
395 	pipe_resource_reference(&transfer->resource, NULL);
396 	FREE(transfer);
397 }
398 
si_texture_transfer_map(struct pipe_context * ctx,struct pipe_transfer * transfer)399 static void* si_texture_transfer_map(struct pipe_context *ctx,
400 				     struct pipe_transfer* transfer)
401 {
402 	struct r600_context *rctx = (struct r600_context *)ctx;
403 	struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
404 	struct radeon_winsys_cs_handle *buf;
405 	enum pipe_format format = transfer->resource->format;
406 	unsigned offset = 0;
407 	char *map;
408 
409 	if (rtransfer->staging_texture) {
410 		buf = si_resource(rtransfer->staging_texture)->cs_buf;
411 	} else {
412 		struct r600_resource_texture *rtex = (struct r600_resource_texture*)transfer->resource;
413 
414 		if (rtex->flushed_depth_texture)
415 			buf = rtex->flushed_depth_texture->resource.cs_buf;
416 		else
417 			buf = si_resource(transfer->resource)->cs_buf;
418 
419 		offset = rtransfer->offset +
420 			transfer->box.y / util_format_get_blockheight(format) * transfer->stride +
421 			transfer->box.x / util_format_get_blockwidth(format) * util_format_get_blocksize(format);
422 	}
423 
424 	if (!(map = rctx->ws->buffer_map(buf, rctx->cs, transfer->usage))) {
425 		return NULL;
426 	}
427 
428 	return map + offset;
429 }
430 
si_texture_transfer_unmap(struct pipe_context * ctx,struct pipe_transfer * transfer)431 static void si_texture_transfer_unmap(struct pipe_context *ctx,
432 				      struct pipe_transfer* transfer)
433 {
434 	struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
435 	struct r600_context *rctx = (struct r600_context*)ctx;
436 	struct radeon_winsys_cs_handle *buf;
437 
438 	if (rtransfer->staging_texture) {
439 		buf = si_resource(rtransfer->staging_texture)->cs_buf;
440 	} else {
441 		struct r600_resource_texture *rtex = (struct r600_resource_texture*)transfer->resource;
442 
443 		if (rtex->flushed_depth_texture) {
444 			buf = rtex->flushed_depth_texture->resource.cs_buf;
445 		} else {
446 			buf = si_resource(transfer->resource)->cs_buf;
447 		}
448 	}
449 	rctx->ws->buffer_unmap(buf);
450 }
451 
452 static const struct u_resource_vtbl r600_texture_vtbl =
453 {
454 	r600_texture_get_handle,	/* get_handle */
455 	r600_texture_destroy,		/* resource_destroy */
456 	si_texture_get_transfer,	/* get_transfer */
457 	si_texture_transfer_destroy,	/* transfer_destroy */
458 	si_texture_transfer_map,	/* transfer_map */
459 	u_default_transfer_flush_region,/* transfer_flush_region */
460 	si_texture_transfer_unmap,	/* transfer_unmap */
461 	NULL	/* transfer_inline_write */
462 };
463 
464 static struct r600_resource_texture *
r600_texture_create_object(struct pipe_screen * screen,const struct pipe_resource * base,unsigned array_mode,unsigned pitch_in_bytes_override,unsigned max_buffer_size,struct pb_buffer * buf,boolean alloc_bo,struct radeon_surface * surface)465 r600_texture_create_object(struct pipe_screen *screen,
466 			   const struct pipe_resource *base,
467 			   unsigned array_mode,
468 			   unsigned pitch_in_bytes_override,
469 			   unsigned max_buffer_size,
470 			   struct pb_buffer *buf,
471 			   boolean alloc_bo,
472 			   struct radeon_surface *surface)
473 {
474 	struct r600_resource_texture *rtex;
475 	struct si_resource *resource;
476 	struct r600_screen *rscreen = (struct r600_screen*)screen;
477 	int r;
478 
479 	rtex = CALLOC_STRUCT(r600_resource_texture);
480 	if (rtex == NULL)
481 		return NULL;
482 
483 	resource = &rtex->resource;
484 	resource->b.b = *base;
485 	resource->b.vtbl = &r600_texture_vtbl;
486 	pipe_reference_init(&resource->b.b.reference, 1);
487 	resource->b.b.screen = screen;
488 	rtex->pitch_override = pitch_in_bytes_override;
489 	rtex->real_format = base->format;
490 
491 	/* only mark depth textures the HW can hit as depth textures */
492 	if (util_format_is_depth_or_stencil(rtex->real_format) && permit_hardware_blit(screen, base))
493 		rtex->depth = 1;
494 
495 	rtex->surface = *surface;
496 	r = r600_setup_surface(screen, rtex, array_mode, pitch_in_bytes_override);
497 	if (r) {
498 		FREE(rtex);
499 		return NULL;
500 	}
501 
502 	/* Now create the backing buffer. */
503 	if (!buf && alloc_bo) {
504 		unsigned base_align = rtex->surface.bo_alignment;
505 		unsigned size = rtex->surface.bo_size;
506 
507 		base_align = rtex->surface.bo_alignment;
508 		if (!si_init_resource(rscreen, resource, size, base_align, base->bind, base->usage)) {
509 			FREE(rtex);
510 			return NULL;
511 		}
512 	} else if (buf) {
513 		resource->buf = buf;
514 		resource->cs_buf = rscreen->ws->buffer_get_cs_handle(buf);
515 		resource->domains = RADEON_DOMAIN_GTT | RADEON_DOMAIN_VRAM;
516 	}
517 
518 	return rtex;
519 }
520 
si_texture_create(struct pipe_screen * screen,const struct pipe_resource * templ)521 struct pipe_resource *si_texture_create(struct pipe_screen *screen,
522 					const struct pipe_resource *templ)
523 {
524 	struct r600_screen *rscreen = (struct r600_screen*)screen;
525 	struct radeon_surface surface;
526 	unsigned array_mode = 0;
527 	int r;
528 
529 #if 0
530 	if (!(templ->flags & R600_RESOURCE_FLAG_TRANSFER) &&
531 	    !(templ->bind & PIPE_BIND_SCANOUT)) {
532 		if (permit_hardware_blit(screen, templ)) {
533 			array_mode = V_009910_ARRAY_2D_TILED_THIN1;
534 		}
535 	}
536 #endif
537 
538 	r = r600_init_surface(&surface, templ, array_mode);
539 	if (r) {
540 		return NULL;
541 	}
542 	r = rscreen->ws->surface_best(rscreen->ws, &surface);
543 	if (r) {
544 		return NULL;
545 	}
546 	return (struct pipe_resource *)r600_texture_create_object(screen, templ, array_mode,
547 								  0, 0, NULL, TRUE, &surface);
548 }
549 
r600_create_surface(struct pipe_context * pipe,struct pipe_resource * texture,const struct pipe_surface * surf_tmpl)550 static struct pipe_surface *r600_create_surface(struct pipe_context *pipe,
551 						struct pipe_resource *texture,
552 						const struct pipe_surface *surf_tmpl)
553 {
554 	struct r600_resource_texture *rtex = (struct r600_resource_texture*)texture;
555 	struct r600_surface *surface = CALLOC_STRUCT(r600_surface);
556 	unsigned level = surf_tmpl->u.tex.level;
557 
558 	assert(surf_tmpl->u.tex.first_layer == surf_tmpl->u.tex.last_layer);
559 	if (surface == NULL)
560 		return NULL;
561 	/* XXX no offset */
562 /*	offset = r600_texture_get_offset(rtex, level, surf_tmpl->u.tex.first_layer);*/
563 	pipe_reference_init(&surface->base.reference, 1);
564 	pipe_resource_reference(&surface->base.texture, texture);
565 	surface->base.context = pipe;
566 	surface->base.format = surf_tmpl->format;
567 	surface->base.width = rtex->surface.level[level].npix_x;
568 	surface->base.height = rtex->surface.level[level].npix_y;
569 	surface->base.usage = surf_tmpl->usage;
570 	surface->base.texture = texture;
571 	surface->base.u.tex.first_layer = surf_tmpl->u.tex.first_layer;
572 	surface->base.u.tex.last_layer = surf_tmpl->u.tex.last_layer;
573 	surface->base.u.tex.level = level;
574 
575 	return &surface->base;
576 }
577 
r600_surface_destroy(struct pipe_context * pipe,struct pipe_surface * surface)578 static void r600_surface_destroy(struct pipe_context *pipe,
579 				 struct pipe_surface *surface)
580 {
581 	pipe_resource_reference(&surface->texture, NULL);
582 	FREE(surface);
583 }
584 
si_texture_from_handle(struct pipe_screen * screen,const struct pipe_resource * templ,struct winsys_handle * whandle)585 struct pipe_resource *si_texture_from_handle(struct pipe_screen *screen,
586 					     const struct pipe_resource *templ,
587 					     struct winsys_handle *whandle)
588 {
589 	struct r600_screen *rscreen = (struct r600_screen*)screen;
590 	struct pb_buffer *buf = NULL;
591 	unsigned stride = 0;
592 	unsigned array_mode = 0;
593 	enum radeon_bo_layout micro, macro;
594 	struct radeon_surface surface;
595 	int r;
596 
597 	/* Support only 2D textures without mipmaps */
598 	if ((templ->target != PIPE_TEXTURE_2D && templ->target != PIPE_TEXTURE_RECT) ||
599 	      templ->depth0 != 1 || templ->last_level != 0)
600 		return NULL;
601 
602 	buf = rscreen->ws->buffer_from_handle(rscreen->ws, whandle, &stride);
603 	if (!buf)
604 		return NULL;
605 
606 	rscreen->ws->buffer_get_tiling(buf, &micro, &macro,
607 				       &surface.bankw, &surface.bankh,
608 				       &surface.tile_split,
609 				       &surface.stencil_tile_split,
610 				       &surface.mtilea);
611 
612 	if (macro == RADEON_LAYOUT_TILED)
613 		array_mode = V_009910_ARRAY_2D_TILED_THIN1;
614 	else if (micro == RADEON_LAYOUT_TILED)
615 		array_mode = V_009910_ARRAY_1D_TILED_THIN1;
616 	else
617 		array_mode = 0;
618 
619 	r = r600_init_surface(&surface, templ, array_mode);
620 	if (r) {
621 		return NULL;
622 	}
623 	return (struct pipe_resource *)r600_texture_create_object(screen, templ, array_mode,
624 								  stride, 0, buf, FALSE, &surface);
625 }
626 
r600_texture_depth_flush(struct pipe_context * ctx,struct pipe_resource * texture,boolean just_create)627 int r600_texture_depth_flush(struct pipe_context *ctx,
628 			     struct pipe_resource *texture, boolean just_create)
629 {
630 	struct r600_resource_texture *rtex = (struct r600_resource_texture*)texture;
631 	struct pipe_resource resource;
632 
633 	if (rtex->flushed_depth_texture)
634 		goto out;
635 
636 	resource.target = texture->target;
637 	resource.format = texture->format;
638 	resource.width0 = texture->width0;
639 	resource.height0 = texture->height0;
640 	resource.depth0 = texture->depth0;
641 	resource.array_size = texture->array_size;
642 	resource.last_level = texture->last_level;
643 	resource.nr_samples = texture->nr_samples;
644 	resource.usage = PIPE_USAGE_DYNAMIC;
645 	resource.bind = texture->bind | PIPE_BIND_DEPTH_STENCIL;
646 	resource.flags = R600_RESOURCE_FLAG_TRANSFER | texture->flags;
647 
648 	rtex->flushed_depth_texture = (struct r600_resource_texture *)ctx->screen->resource_create(ctx->screen, &resource);
649 	if (rtex->flushed_depth_texture == NULL) {
650 		R600_ERR("failed to create temporary texture to hold untiled copy\n");
651 		return -ENOMEM;
652 	}
653 
654 	((struct r600_resource_texture *)rtex->flushed_depth_texture)->is_flushing_texture = TRUE;
655 out:
656 	if (just_create)
657 		return 0;
658 
659 	/* XXX: only do this if the depth texture has actually changed:
660 	 */
661 	si_blit_uncompress_depth(ctx, rtex);
662 	return 0;
663 }
664 
si_init_surface_functions(struct r600_context * r600)665 void si_init_surface_functions(struct r600_context *r600)
666 {
667 	r600->context.create_surface = r600_create_surface;
668 	r600->context.surface_destroy = r600_surface_destroy;
669 }
670