• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2012 Rob Clark <robdclark@gmail.com>
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  */
23 
24 /*
25  * Helper lib to track gpu buffers contents/address, and map between gpu and
26  * host address while decoding cmdstream/crashdumps
27  */
28 
29 #include <assert.h>
30 #include <stdlib.h>
31 
32 #include "buffers.h"
33 
34 struct buffer {
35 	void *hostptr;
36 	unsigned int len;
37 	uint64_t gpuaddr;
38 
39 	/* for 'once' mode, for buffers containing cmdstream keep track per offset
40 	 * into buffer of which modes it has already been dumped;
41 	 */
42 	struct {
43 		unsigned offset;
44 		unsigned dumped_mask;
45 	} offsets[64];
46 	unsigned noffsets;
47 };
48 
49 static struct buffer buffers[512];
50 static int nbuffers;
51 
52 static int
buffer_contains_gpuaddr(struct buffer * buf,uint64_t gpuaddr,uint32_t len)53 buffer_contains_gpuaddr(struct buffer *buf, uint64_t gpuaddr, uint32_t len)
54 {
55 	return (buf->gpuaddr <= gpuaddr) && (gpuaddr < (buf->gpuaddr + buf->len));
56 }
57 
58 static int
buffer_contains_hostptr(struct buffer * buf,void * hostptr)59 buffer_contains_hostptr(struct buffer *buf, void *hostptr)
60 {
61 	return (buf->hostptr <= hostptr) && (hostptr < (buf->hostptr + buf->len));
62 }
63 
64 
65 uint64_t
gpuaddr(void * hostptr)66 gpuaddr(void *hostptr)
67 {
68 	int i;
69 	for (i = 0; i < nbuffers; i++)
70 		if (buffer_contains_hostptr(&buffers[i], hostptr))
71 			return buffers[i].gpuaddr + (hostptr - buffers[i].hostptr);
72 	return 0;
73 }
74 
75 uint64_t
gpubaseaddr(uint64_t gpuaddr)76 gpubaseaddr(uint64_t gpuaddr)
77 {
78 	int i;
79 	if (!gpuaddr)
80 		return 0;
81 	for (i = 0; i < nbuffers; i++)
82 		if (buffer_contains_gpuaddr(&buffers[i], gpuaddr, 0))
83 			return buffers[i].gpuaddr;
84 	return 0;
85 }
86 
87 void *
hostptr(uint64_t gpuaddr)88 hostptr(uint64_t gpuaddr)
89 {
90 	int i;
91 	if (!gpuaddr)
92 		return 0;
93 	for (i = 0; i < nbuffers; i++)
94 		if (buffer_contains_gpuaddr(&buffers[i], gpuaddr, 0))
95 			return buffers[i].hostptr + (gpuaddr - buffers[i].gpuaddr);
96 	return 0;
97 }
98 
99 unsigned
hostlen(uint64_t gpuaddr)100 hostlen(uint64_t gpuaddr)
101 {
102 	int i;
103 	if (!gpuaddr)
104 		return 0;
105 	for (i = 0; i < nbuffers; i++)
106 		if (buffer_contains_gpuaddr(&buffers[i], gpuaddr, 0))
107 			return buffers[i].len + buffers[i].gpuaddr - gpuaddr;
108 	return 0;
109 }
110 
111 bool
has_dumped(uint64_t gpuaddr,unsigned enable_mask)112 has_dumped(uint64_t gpuaddr, unsigned enable_mask)
113 {
114 	if (!gpuaddr)
115 		return false;
116 
117 	for (int i = 0; i < nbuffers; i++) {
118 		if (buffer_contains_gpuaddr(&buffers[i], gpuaddr, 0)) {
119 			struct buffer *b = &buffers[i];
120 			assert(gpuaddr >= b->gpuaddr);
121 			unsigned offset = gpuaddr - b->gpuaddr;
122 
123 			unsigned n = 0;
124 			while (n < b->noffsets) {
125 				if (offset == b->offsets[n].offset)
126 					break;
127 				n++;
128 			}
129 
130 			/* if needed, allocate a new offset entry: */
131 			if (n == b->noffsets) {
132 				b->noffsets++;
133 				assert(b->noffsets < ARRAY_SIZE(b->offsets));
134 				b->offsets[n].dumped_mask = 0;
135 				b->offsets[n].offset = offset;
136 			}
137 
138 			if ((b->offsets[n].dumped_mask & enable_mask) == enable_mask)
139 				return true;
140 
141 			b->offsets[n].dumped_mask |= enable_mask;
142 
143 			return false;
144 		}
145 	}
146 
147 	return false;
148 }
149 
150 void
reset_buffers(void)151 reset_buffers(void)
152 {
153 	for (int i = 0; i < nbuffers; i++) {
154 		free(buffers[i].hostptr);
155 		buffers[i].hostptr = NULL;
156 		buffers[i].len = 0;
157 		buffers[i].noffsets = 0;
158 	}
159 	nbuffers = 0;
160 }
161 
162 /**
163  * Record buffer contents, takes ownership of hostptr (freed in
164  * reset_buffers())
165  */
166 void
add_buffer(uint64_t gpuaddr,unsigned int len,void * hostptr)167 add_buffer(uint64_t gpuaddr, unsigned int len, void *hostptr)
168 {
169 	int i;
170 
171 	for (i = 0; i < nbuffers; i++) {
172 		if (buffers[i].gpuaddr == gpuaddr)
173 			break;
174 	}
175 
176 	if (i == nbuffers) {
177 		/* some traces, like test-perf, with some blob versions,
178 		 * seem to generate an unreasonable # of gpu buffers (a
179 		 * leak?), so just ignore them.
180 		 */
181 		if (nbuffers >= ARRAY_SIZE(buffers)) {
182 			free(hostptr);
183 			return;
184 		}
185 		nbuffers++;
186 	}
187 
188 	buffers[i].hostptr = hostptr;
189 	buffers[i].len     = len;
190 	buffers[i].gpuaddr = gpuaddr;
191 }
192