• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2010 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * on the rights to use, copy, modify, merge, publish, distribute, sub
8  * license, and/or sell copies of the Software, and to permit persons to whom
9  * the Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21  * USE OR OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * Authors: Dave Airlie
24  */
25 
26 #include <stdio.h>
27 
28 #include "util/u_inlines.h"
29 #include "util/u_memory.h"
30 #include "util/u_upload_mgr.h"
31 #include "util/u_math.h"
32 
33 #include "r300_screen_buffer.h"
34 
r300_upload_index_buffer(struct r300_context * r300,struct pipe_resource ** index_buffer,unsigned index_size,unsigned * start,unsigned count,const uint8_t * ptr)35 void r300_upload_index_buffer(struct r300_context *r300,
36 			      struct pipe_resource **index_buffer,
37 			      unsigned index_size, unsigned *start,
38 			      unsigned count, const uint8_t *ptr)
39 {
40     unsigned index_offset;
41 
42     *index_buffer = NULL;
43 
44     u_upload_data(r300->uploader,
45                   0, count * index_size, 4,
46                   ptr + (*start * index_size),
47                   &index_offset,
48                   index_buffer);
49 
50     *start = index_offset / index_size;
51 }
52 
r300_buffer_destroy(struct pipe_screen * screen,struct pipe_resource * buf)53 static void r300_buffer_destroy(struct pipe_screen *screen,
54 				struct pipe_resource *buf)
55 {
56     struct r300_resource *rbuf = r300_resource(buf);
57 
58     align_free(rbuf->malloced_buffer);
59 
60     if (rbuf->buf)
61         pb_reference(&rbuf->buf, NULL);
62 
63     FREE(rbuf);
64 }
65 
66 static void *
r300_buffer_transfer_map(struct pipe_context * context,struct pipe_resource * resource,unsigned level,unsigned usage,const struct pipe_box * box,struct pipe_transfer ** ptransfer)67 r300_buffer_transfer_map( struct pipe_context *context,
68                           struct pipe_resource *resource,
69                           unsigned level,
70                           unsigned usage,
71                           const struct pipe_box *box,
72                           struct pipe_transfer **ptransfer )
73 {
74     struct r300_context *r300 = r300_context(context);
75     struct radeon_winsys *rws = r300->screen->rws;
76     struct r300_resource *rbuf = r300_resource(resource);
77     struct pipe_transfer *transfer;
78     uint8_t *map;
79 
80     transfer = slab_alloc(&r300->pool_transfers);
81     transfer->resource = resource;
82     transfer->level = level;
83     transfer->usage = usage;
84     transfer->box = *box;
85     transfer->stride = 0;
86     transfer->layer_stride = 0;
87 
88     if (rbuf->malloced_buffer) {
89         *ptransfer = transfer;
90         return rbuf->malloced_buffer + box->x;
91     }
92 
93     if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE &&
94         !(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
95         assert(usage & PIPE_TRANSFER_WRITE);
96 
97         /* Check if mapping this buffer would cause waiting for the GPU. */
98         if (r300->rws->cs_is_buffer_referenced(r300->cs, rbuf->buf, RADEON_USAGE_READWRITE) ||
99             !r300->rws->buffer_wait(rbuf->buf, 0, RADEON_USAGE_READWRITE)) {
100             unsigned i;
101             struct pb_buffer *new_buf;
102 
103             /* Create a new one in the same pipe_resource. */
104             new_buf = r300->rws->buffer_create(r300->rws, rbuf->b.b.width0,
105                                                R300_BUFFER_ALIGNMENT,
106                                                rbuf->domain, 0);
107             if (new_buf) {
108                 /* Discard the old buffer. */
109                 pb_reference(&rbuf->buf, NULL);
110                 rbuf->buf = new_buf;
111 
112                 /* We changed the buffer, now we need to bind it where the old one was bound. */
113                 for (i = 0; i < r300->nr_vertex_buffers; i++) {
114                     if (r300->vertex_buffer[i].buffer == &rbuf->b.b) {
115                         r300->vertex_arrays_dirty = TRUE;
116                         break;
117                     }
118                 }
119             }
120         }
121     }
122 
123     /* Buffers are never used for write, therefore mapping for read can be
124      * unsynchronized. */
125     if (!(usage & PIPE_TRANSFER_WRITE)) {
126        usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
127     }
128 
129     map = rws->buffer_map(rbuf->buf, r300->cs, usage);
130 
131     if (!map) {
132         slab_free(&r300->pool_transfers, transfer);
133         return NULL;
134     }
135 
136     *ptransfer = transfer;
137     return map + box->x;
138 }
139 
r300_buffer_transfer_unmap(struct pipe_context * pipe,struct pipe_transfer * transfer)140 static void r300_buffer_transfer_unmap( struct pipe_context *pipe,
141                                         struct pipe_transfer *transfer )
142 {
143     struct r300_context *r300 = r300_context(pipe);
144 
145     slab_free(&r300->pool_transfers, transfer);
146 }
147 
148 static const struct u_resource_vtbl r300_buffer_vtbl =
149 {
150    NULL,                               /* get_handle */
151    r300_buffer_destroy,                /* resource_destroy */
152    r300_buffer_transfer_map,           /* transfer_map */
153    NULL,                               /* transfer_flush_region */
154    r300_buffer_transfer_unmap,         /* transfer_unmap */
155 };
156 
r300_buffer_create(struct pipe_screen * screen,const struct pipe_resource * templ)157 struct pipe_resource *r300_buffer_create(struct pipe_screen *screen,
158 					 const struct pipe_resource *templ)
159 {
160     struct r300_screen *r300screen = r300_screen(screen);
161     struct r300_resource *rbuf;
162 
163     rbuf = MALLOC_STRUCT(r300_resource);
164 
165     rbuf->b.b = *templ;
166     rbuf->b.vtbl = &r300_buffer_vtbl;
167     pipe_reference_init(&rbuf->b.b.reference, 1);
168     rbuf->b.b.screen = screen;
169     rbuf->domain = RADEON_DOMAIN_GTT;
170     rbuf->buf = NULL;
171     rbuf->malloced_buffer = NULL;
172 
173     /* Allocate constant buffers and SWTCL vertex and index buffers in RAM.
174      * Note that uploaded index buffers use the flag PIPE_BIND_CUSTOM, so that
175      * we can distinguish them from user-created buffers.
176      */
177     if (templ->bind & PIPE_BIND_CONSTANT_BUFFER ||
178         (!r300screen->caps.has_tcl && !(templ->bind & PIPE_BIND_CUSTOM))) {
179         rbuf->malloced_buffer = align_malloc(templ->width0, 64);
180         return &rbuf->b.b;
181     }
182 
183     rbuf->buf =
184         r300screen->rws->buffer_create(r300screen->rws, rbuf->b.b.width0,
185                                        R300_BUFFER_ALIGNMENT,
186                                        rbuf->domain, 0);
187     if (!rbuf->buf) {
188         FREE(rbuf);
189         return NULL;
190     }
191     return &rbuf->b.b;
192 }
193