1 /*
2 * Copyright © 2015-2018 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #undef _FILE_OFFSET_BITS /* prevent #define open open64 */
25
26 #include <string.h>
27 #include <stdlib.h>
28 #include <stdio.h>
29 #include <stdint.h>
30 #include <stdarg.h>
31 #include <fcntl.h>
32 #include <unistd.h>
33 #include <sys/ioctl.h>
34 #include <sys/stat.h>
35 #include <sys/mman.h>
36 #include <sys/sysmacros.h>
37 #include <dlfcn.h>
38 #include <pthread.h>
39 #include "drm-uapi/i915_drm.h"
40
41 #include "util/hash_table.h"
42 #include "util/u_math.h"
43
44 #define MESA_LOG_TAG "INTEL-SANITIZE-GPU"
45 #include "util/log.h"
46 #include "common/intel_mem.h"
47
48 static int (*libc_open)(const char *pathname, int flags, mode_t mode);
49 static int (*libc_close)(int fd);
50 static int (*libc_ioctl)(int fd, unsigned long request, void *argp);
51 static int (*libc_fcntl)(int fd, int cmd, int param);
52
53 #define DRM_MAJOR 226
54
55 /* TODO: we want to make sure that the padding forces
56 * the BO to take another page on the (PP)GTT; 4KB
57 * may or may not be the page size for the BO. Indeed,
58 * depending on GPU, kernel version and GEM size, the
59 * page size can be one of 4KB, 64KB or 2M.
60 */
61 #define PADDING_SIZE 4096
62
63 struct refcnt_hash_table {
64 struct hash_table *t;
65 int refcnt;
66 };
67
68 pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
69 #define MUTEX_LOCK() do { \
70 if (unlikely(pthread_mutex_lock(&mutex))) { \
71 mesa_loge("mutex_lock failed"); \
72 abort(); \
73 } \
74 } while (0)
75 #define MUTEX_UNLOCK() do { \
76 if (unlikely(pthread_mutex_unlock(&mutex))) { \
77 mesa_loge("mutex_unlock failed"); \
78 abort(); \
79 } \
80 } while (0)
81
82 static struct hash_table *fds_to_bo_sizes = NULL;
83
84 static inline struct hash_table*
bo_size_table(int fd)85 bo_size_table(int fd)
86 {
87 struct hash_entry *e = _mesa_hash_table_search(fds_to_bo_sizes,
88 (void*)(uintptr_t)fd);
89 return e ? ((struct refcnt_hash_table*)e->data)->t : NULL;
90 }
91
92 static inline uint64_t
bo_size(int fd,uint32_t handle)93 bo_size(int fd, uint32_t handle)
94 {
95 struct hash_table *t = bo_size_table(fd);
96 if (!t)
97 return UINT64_MAX;
98 struct hash_entry *e = _mesa_hash_table_search(t, (void*)(uintptr_t)handle);
99 return e ? (uint64_t)(uintptr_t)e->data : UINT64_MAX;
100 }
101
102 static inline bool
is_drm_fd(int fd)103 is_drm_fd(int fd)
104 {
105 return !!bo_size_table(fd);
106 }
107
108 static inline void
add_drm_fd(int fd)109 add_drm_fd(int fd)
110 {
111 struct refcnt_hash_table *r = malloc(sizeof(*r));
112 r->refcnt = 1;
113 r->t = _mesa_pointer_hash_table_create(NULL);
114 _mesa_hash_table_insert(fds_to_bo_sizes, (void*)(uintptr_t)fd,
115 (void*)(uintptr_t)r);
116 }
117
118 static inline void
dup_drm_fd(int old_fd,int new_fd)119 dup_drm_fd(int old_fd, int new_fd)
120 {
121 struct hash_entry *e = _mesa_hash_table_search(fds_to_bo_sizes,
122 (void*)(uintptr_t)old_fd);
123 struct refcnt_hash_table *r = e->data;
124 r->refcnt++;
125 _mesa_hash_table_insert(fds_to_bo_sizes, (void*)(uintptr_t)new_fd,
126 (void*)(uintptr_t)r);
127 }
128
129 static inline void
del_drm_fd(int fd)130 del_drm_fd(int fd)
131 {
132 struct hash_entry *e = _mesa_hash_table_search(fds_to_bo_sizes,
133 (void*)(uintptr_t)fd);
134 struct refcnt_hash_table *r = e->data;
135 if (!--r->refcnt) {
136 _mesa_hash_table_remove(fds_to_bo_sizes, e);
137 _mesa_hash_table_destroy(r->t, NULL);
138 free(r);
139 }
140 }
141
142 /* Our goal is not to have noise good enough for crypto,
143 * but instead values that are unique-ish enough that
144 * it is incredibly unlikely that a buffer overwrite
145 * will produce the exact same values.
146 */
147 static uint8_t
next_noise_value(uint8_t prev_noise)148 next_noise_value(uint8_t prev_noise)
149 {
150 uint32_t v = prev_noise;
151 return (v * 103u + 227u) & 0xFF;
152 }
153
154 static void
fill_noise_buffer(uint8_t * dst,uint8_t start,uint32_t length)155 fill_noise_buffer(uint8_t *dst, uint8_t start, uint32_t length)
156 {
157 for(uint32_t i = 0; i < length; ++i) {
158 dst[i] = start;
159 start = next_noise_value(start);
160 }
161 }
162
163 static bool
padding_is_good(int fd,uint32_t handle)164 padding_is_good(int fd, uint32_t handle)
165 {
166 struct drm_i915_gem_mmap mmap_arg = {
167 .handle = handle,
168 .offset = align64(bo_size(fd, handle), 4096),
169 .size = PADDING_SIZE,
170 .flags = 0,
171 };
172
173 /* Unknown bo, maybe prime or userptr. Ignore */
174 if (mmap_arg.offset == UINT64_MAX)
175 return true;
176
177 uint8_t *mapped;
178 int ret;
179 uint8_t expected_value;
180
181 ret = libc_ioctl(fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
182 if (ret != 0) {
183 mesa_logd("Unable to map buffer %d for pad checking.", handle);
184 return false;
185 }
186
187 mapped = (uint8_t*) (uintptr_t) mmap_arg.addr_ptr;
188 #ifdef SUPPORT_INTEL_INTEGRATED_GPUS
189 /* bah-humbug, we need to see the latest contents and
190 * if the bo is not cache coherent we likely need to
191 * invalidate the cache lines to get it.
192 */
193 intel_invalidate_range(mapped, PADDING_SIZE);
194 #endif
195
196 expected_value = handle & 0xFF;
197 for (uint32_t i = 0; i < PADDING_SIZE; ++i) {
198 if (expected_value != mapped[i]) {
199 munmap(mapped, PADDING_SIZE);
200 return false;
201 }
202 expected_value = next_noise_value(expected_value);
203 }
204 munmap(mapped, PADDING_SIZE);
205
206 return true;
207 }
208
209 static int
create_with_padding(int fd,struct drm_i915_gem_create * create)210 create_with_padding(int fd, struct drm_i915_gem_create *create)
211 {
212 uint64_t original_size = create->size;
213
214 create->size = align64(original_size, 4096) + PADDING_SIZE;
215 int ret = libc_ioctl(fd, DRM_IOCTL_I915_GEM_CREATE, create);
216 create->size = original_size;
217
218 if (ret != 0)
219 return ret;
220
221 uint8_t *noise_values;
222 struct drm_i915_gem_mmap mmap_arg = {
223 .handle = create->handle,
224 .offset = align64(create->size, 4096),
225 .size = PADDING_SIZE,
226 .flags = 0,
227 };
228
229 ret = libc_ioctl(fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
230 if (ret != 0) {
231 mesa_logd("Unable to map buffer %d for pad creation.\n", create->handle);
232 return 0;
233 }
234
235 noise_values = (uint8_t*) (uintptr_t) mmap_arg.addr_ptr;
236 fill_noise_buffer(noise_values, create->handle & 0xFF,
237 PADDING_SIZE);
238 munmap(noise_values, PADDING_SIZE);
239
240 _mesa_hash_table_insert(bo_size_table(fd), (void*)(uintptr_t)create->handle,
241 (void*)(uintptr_t)create->size);
242
243 return 0;
244 }
245
246 static int
exec_and_check_padding(int fd,unsigned long request,struct drm_i915_gem_execbuffer2 * exec)247 exec_and_check_padding(int fd, unsigned long request,
248 struct drm_i915_gem_execbuffer2 *exec)
249 {
250 int ret = libc_ioctl(fd, request, exec);
251 if (ret != 0)
252 return ret;
253
254 struct drm_i915_gem_exec_object2 *objects =
255 (void*)(uintptr_t)exec->buffers_ptr;
256 uint32_t batch_bo = exec->flags & I915_EXEC_BATCH_FIRST ? objects[0].handle :
257 objects[exec->buffer_count - 1].handle;
258
259 struct drm_i915_gem_wait wait = {
260 .bo_handle = batch_bo,
261 .timeout_ns = -1,
262 };
263 ret = libc_ioctl(fd, DRM_IOCTL_I915_GEM_WAIT, &wait);
264 if (ret != 0)
265 return ret;
266
267 bool detected_out_of_bounds_write = false;
268
269 for (int i = 0; i < exec->buffer_count; i++) {
270 uint32_t handle = objects[i].handle;
271
272 if (!padding_is_good(fd, handle)) {
273 detected_out_of_bounds_write = true;
274 mesa_loge("Detected buffer out-of-bounds write in bo %d", handle);
275 }
276 }
277
278 if (unlikely(detected_out_of_bounds_write)) {
279 abort();
280 }
281
282 return 0;
283 }
284
285 static int
gem_close(int fd,struct drm_gem_close * close)286 gem_close(int fd, struct drm_gem_close *close)
287 {
288 int ret = libc_ioctl(fd, DRM_IOCTL_GEM_CLOSE, close);
289 if (ret != 0)
290 return ret;
291
292 struct hash_table *t = bo_size_table(fd);
293 struct hash_entry *e =
294 _mesa_hash_table_search(t, (void*)(uintptr_t)close->handle);
295
296 if (e)
297 _mesa_hash_table_remove(t, e);
298
299 return 0;
300 }
301
302 static bool
is_i915(int fd)303 is_i915(int fd) {
304 struct stat stat;
305 if (fstat(fd, &stat))
306 return false;
307
308 if (!S_ISCHR(stat.st_mode) || major(stat.st_rdev) != DRM_MAJOR)
309 return false;
310
311 char name[5] = "";
312 drm_version_t version = {
313 .name = name,
314 .name_len = sizeof(name) - 1,
315 };
316 if (libc_ioctl(fd, DRM_IOCTL_VERSION, &version))
317 return false;
318
319 return strcmp("i915", name) == 0;
320 }
321
322 __attribute__ ((visibility ("default"))) int
open(const char * path,int flags,...)323 open(const char *path, int flags, ...)
324 {
325 va_list args;
326 mode_t mode;
327
328 va_start(args, flags);
329 mode = va_arg(args, int);
330 va_end(args);
331
332 int fd = libc_open(path, flags, mode);
333
334 MUTEX_LOCK();
335
336 if (fd >= 0 && is_i915(fd))
337 add_drm_fd(fd);
338
339 MUTEX_UNLOCK();
340
341 return fd;
342 }
343
344 __attribute__ ((visibility ("default"), alias ("open"))) int
345 open64(const char *path, int flags, ...);
346
347 __attribute__ ((visibility ("default"))) int
close(int fd)348 close(int fd)
349 {
350 MUTEX_LOCK();
351
352 if (is_drm_fd(fd))
353 del_drm_fd(fd);
354
355 MUTEX_UNLOCK();
356
357 return libc_close(fd);
358 }
359
360 __attribute__ ((visibility ("default"))) int
fcntl(int fd,int cmd,...)361 fcntl(int fd, int cmd, ...)
362 {
363 va_list args;
364 int param;
365
366 va_start(args, cmd);
367 param = va_arg(args, int);
368 va_end(args);
369
370 int res = libc_fcntl(fd, cmd, param);
371
372 MUTEX_LOCK();
373
374 if (is_drm_fd(fd) && cmd == F_DUPFD_CLOEXEC)
375 dup_drm_fd(fd, res);
376
377 MUTEX_UNLOCK();
378
379 return res;
380 }
381
382 __attribute__ ((visibility ("default"))) int
ioctl(int fd,unsigned long request,...)383 ioctl(int fd, unsigned long request, ...)
384 {
385 int res;
386 va_list args;
387 void *argp;
388
389 MUTEX_LOCK();
390
391 va_start(args, request);
392 argp = va_arg(args, void *);
393 va_end(args);
394
395 if (_IOC_TYPE(request) == DRM_IOCTL_BASE && !is_drm_fd(fd) && is_i915(fd)) {
396 mesa_loge("missed drm fd %d", fd);
397 add_drm_fd(fd);
398 }
399
400 if (is_drm_fd(fd)) {
401 switch (request) {
402 case DRM_IOCTL_GEM_CLOSE:
403 res = gem_close(fd, (struct drm_gem_close*)argp);
404 goto out;
405
406 case DRM_IOCTL_I915_GEM_CREATE:
407 res = create_with_padding(fd, (struct drm_i915_gem_create*)argp);
408 goto out;
409
410 case DRM_IOCTL_I915_GEM_EXECBUFFER2:
411 case DRM_IOCTL_I915_GEM_EXECBUFFER2_WR:
412 res = exec_and_check_padding(fd, request,
413 (struct drm_i915_gem_execbuffer2*)argp);
414 goto out;
415
416 default:
417 break;
418 }
419 }
420 res = libc_ioctl(fd, request, argp);
421
422 out:
423 MUTEX_UNLOCK();
424 return res;
425 }
426
427 static void __attribute__ ((constructor))
init(void)428 init(void)
429 {
430 fds_to_bo_sizes = _mesa_pointer_hash_table_create(NULL);
431 libc_open = dlsym(RTLD_NEXT, "open");
432 libc_close = dlsym(RTLD_NEXT, "close");
433 libc_fcntl = dlsym(RTLD_NEXT, "fcntl");
434 libc_ioctl = dlsym(RTLD_NEXT, "ioctl");
435 }
436