• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
2 
3 /*
4  * Copyright (C) 2013 Rob Clark <robclark@freedesktop.org>
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the next
14  * paragraph) shall be included in all copies or substantial portions of the
15  * Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23  * SOFTWARE.
24  *
25  * Authors:
26  *    Rob Clark <robclark@freedesktop.org>
27  */
28 
29 #ifdef HAVE_CONFIG_H
30 # include <config.h>
31 #endif
32 
33 #include <assert.h>
34 
35 #include "freedreno_ringbuffer.h"
36 #include "kgsl_priv.h"
37 
38 
39 /* because kgsl tries to validate the gpuaddr on kernel side in ISSUEIBCMDS,
40  * we can't use normal gem bo's for ringbuffer..  someday the kernel part
41  * needs to be reworked into a single sane drm driver :-/
42  */
43 struct kgsl_rb_bo {
44 	struct kgsl_pipe *pipe;
45 	void    *hostptr;
46 	uint32_t gpuaddr;
47 	uint32_t size;
48 };
49 
50 struct kgsl_ringbuffer {
51 	struct fd_ringbuffer base;
52 	struct kgsl_rb_bo *bo;
53 };
54 
to_kgsl_ringbuffer(struct fd_ringbuffer * x)55 static inline struct kgsl_ringbuffer * to_kgsl_ringbuffer(struct fd_ringbuffer *x)
56 {
57 	return (struct kgsl_ringbuffer *)x;
58 }
59 
kgsl_rb_bo_del(struct kgsl_rb_bo * bo)60 static void kgsl_rb_bo_del(struct kgsl_rb_bo *bo)
61 {
62 	struct kgsl_sharedmem_free req = {
63 			.gpuaddr = bo->gpuaddr,
64 	};
65 	int ret;
66 
67 	drm_munmap(bo->hostptr, bo->size);
68 
69 	ret = ioctl(bo->pipe->fd, IOCTL_KGSL_SHAREDMEM_FREE, &req);
70 	if (ret) {
71 		ERROR_MSG("sharedmem free failed: %s", strerror(errno));
72 	}
73 
74 	free(bo);
75 }
76 
kgsl_rb_bo_new(struct kgsl_pipe * pipe,uint32_t size)77 static struct kgsl_rb_bo * kgsl_rb_bo_new(struct kgsl_pipe *pipe, uint32_t size)
78 {
79 	struct kgsl_rb_bo *bo;
80 	struct kgsl_gpumem_alloc req = {
81 			.size = ALIGN(size, 4096),
82 			.flags = KGSL_MEMFLAGS_GPUREADONLY,
83 	};
84 	int ret;
85 
86 	bo = calloc(1, sizeof(*bo));
87 	if (!bo) {
88 		ERROR_MSG("allocation failed");
89 		return NULL;
90 	}
91 	ret = ioctl(pipe->fd, IOCTL_KGSL_GPUMEM_ALLOC, &req);
92 	if (ret) {
93 		ERROR_MSG("gpumem allocation failed: %s", strerror(errno));
94 		goto fail;
95 	}
96 
97 	bo->pipe = pipe;
98 	bo->gpuaddr = req.gpuaddr;
99 	bo->size = size;
100 	bo->hostptr = drm_mmap(NULL, size, PROT_WRITE|PROT_READ,
101 				MAP_SHARED, pipe->fd, req.gpuaddr);
102 
103 	return bo;
104 fail:
105 	if (bo)
106 		kgsl_rb_bo_del(bo);
107 	return NULL;
108 }
109 
kgsl_ringbuffer_hostptr(struct fd_ringbuffer * ring)110 static void * kgsl_ringbuffer_hostptr(struct fd_ringbuffer *ring)
111 {
112 	struct kgsl_ringbuffer *kgsl_ring = to_kgsl_ringbuffer(ring);
113 	return kgsl_ring->bo->hostptr;
114 }
115 
kgsl_ringbuffer_flush(struct fd_ringbuffer * ring,uint32_t * last_start)116 static int kgsl_ringbuffer_flush(struct fd_ringbuffer *ring, uint32_t *last_start)
117 {
118 	struct kgsl_ringbuffer *kgsl_ring = to_kgsl_ringbuffer(ring);
119 	struct kgsl_pipe *kgsl_pipe = to_kgsl_pipe(ring->pipe);
120 	uint32_t offset = (uint8_t *)last_start - (uint8_t *)ring->start;
121 	struct kgsl_ibdesc ibdesc = {
122 			.gpuaddr     = kgsl_ring->bo->gpuaddr + offset,
123 			.hostptr     = last_start,
124 			.sizedwords  = ring->cur - last_start,
125 	};
126 	struct kgsl_ringbuffer_issueibcmds req = {
127 			.drawctxt_id = kgsl_pipe->drawctxt_id,
128 			.ibdesc_addr = (unsigned long)&ibdesc,
129 			.numibs      = 1,
130 			.flags       = KGSL_CONTEXT_SUBMIT_IB_LIST,
131 	};
132 	int ret;
133 
134 	kgsl_pipe_pre_submit(kgsl_pipe);
135 
136 	/* z180_cmdstream_issueibcmds() is made of fail: */
137 	if (ring->pipe->id == FD_PIPE_2D) {
138 		/* fix up size field in last cmd packet */
139 		uint32_t last_size = (uint32_t)(ring->cur - last_start);
140 		/* 5 is length of first packet, 2 for the two 7f000000's */
141 		last_start[2] = last_size - (5 + 2);
142 		ibdesc.gpuaddr = kgsl_ring->bo->gpuaddr;
143 		ibdesc.hostptr = kgsl_ring->bo->hostptr;
144 		ibdesc.sizedwords = 0x145;
145 		req.timestamp = (uint32_t)kgsl_ring->bo->hostptr;
146 	}
147 
148 	do {
149 		ret = ioctl(kgsl_pipe->fd, IOCTL_KGSL_RINGBUFFER_ISSUEIBCMDS, &req);
150 	} while ((ret == -1) && ((errno == EINTR) || (errno == EAGAIN)));
151 	if (ret)
152 		ERROR_MSG("issueibcmds failed!  %d (%s)", ret, strerror(errno));
153 
154 	ring->last_timestamp = req.timestamp;
155 	ring->last_start = ring->cur;
156 
157 	kgsl_pipe_post_submit(kgsl_pipe, req.timestamp);
158 
159 	return ret;
160 }
161 
kgsl_ringbuffer_emit_reloc(struct fd_ringbuffer * ring,const struct fd_reloc * r)162 static void kgsl_ringbuffer_emit_reloc(struct fd_ringbuffer *ring,
163 		const struct fd_reloc *r)
164 {
165 	struct kgsl_bo *kgsl_bo = to_kgsl_bo(r->bo);
166 	uint32_t addr = kgsl_bo_gpuaddr(kgsl_bo, r->offset);
167 	assert(addr);
168 	if (r->shift < 0)
169 		addr >>= -r->shift;
170 	else
171 		addr <<= r->shift;
172 	(*ring->cur++) = addr | r->or;
173 	kgsl_pipe_add_submit(to_kgsl_pipe(ring->pipe), kgsl_bo);
174 }
175 
kgsl_ringbuffer_emit_reloc_ring(struct fd_ringbuffer * ring,struct fd_ringmarker * target,struct fd_ringmarker * end)176 static void kgsl_ringbuffer_emit_reloc_ring(struct fd_ringbuffer *ring,
177 		struct fd_ringmarker *target, struct fd_ringmarker *end)
178 {
179 	struct kgsl_ringbuffer *target_ring = to_kgsl_ringbuffer(target->ring);
180 	(*ring->cur++) = target_ring->bo->gpuaddr +
181 			(uint8_t *)target->cur - (uint8_t *)target->ring->start;
182 }
183 
kgsl_ringbuffer_destroy(struct fd_ringbuffer * ring)184 static void kgsl_ringbuffer_destroy(struct fd_ringbuffer *ring)
185 {
186 	struct kgsl_ringbuffer *kgsl_ring = to_kgsl_ringbuffer(ring);
187 	if (ring->last_timestamp)
188 		fd_pipe_wait(ring->pipe, ring->last_timestamp);
189 	if (kgsl_ring->bo)
190 		kgsl_rb_bo_del(kgsl_ring->bo);
191 	free(kgsl_ring);
192 }
193 
194 static const struct fd_ringbuffer_funcs funcs = {
195 		.hostptr = kgsl_ringbuffer_hostptr,
196 		.flush = kgsl_ringbuffer_flush,
197 		.emit_reloc = kgsl_ringbuffer_emit_reloc,
198 		.emit_reloc_ring = kgsl_ringbuffer_emit_reloc_ring,
199 		.destroy = kgsl_ringbuffer_destroy,
200 };
201 
kgsl_ringbuffer_new(struct fd_pipe * pipe,uint32_t size)202 drm_private struct fd_ringbuffer * kgsl_ringbuffer_new(struct fd_pipe *pipe,
203 		uint32_t size)
204 {
205 	struct kgsl_ringbuffer *kgsl_ring;
206 	struct fd_ringbuffer *ring = NULL;
207 
208 	kgsl_ring = calloc(1, sizeof(*kgsl_ring));
209 	if (!kgsl_ring) {
210 		ERROR_MSG("allocation failed");
211 		goto fail;
212 	}
213 
214 	ring = &kgsl_ring->base;
215 	ring->funcs = &funcs;
216 
217 	kgsl_ring->bo = kgsl_rb_bo_new(to_kgsl_pipe(pipe), size);
218 	if (!kgsl_ring->bo) {
219 		ERROR_MSG("ringbuffer allocation failed");
220 		goto fail;
221 	}
222 
223 	return ring;
224 fail:
225 	if (ring)
226 		fd_ringbuffer_del(ring);
227 	return NULL;
228 }
229