1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <sys/mman.h>
25 #include <sys/syscall.h>
26
27 #include "util/anon_file.h"
28 #include "anv_private.h"
29
30 static void
stub_gem_close(struct anv_device * device,struct anv_bo * bo)31 stub_gem_close(struct anv_device *device, struct anv_bo *bo)
32 {
33 close(bo->gem_handle);
34 }
35
36 static uint32_t
stub_gem_create(struct anv_device * device,const struct intel_memory_class_instance ** regions,uint16_t num_regions,uint64_t size,enum anv_bo_alloc_flags alloc_flags,uint64_t * actual_size)37 stub_gem_create(struct anv_device *device,
38 const struct intel_memory_class_instance **regions,
39 uint16_t num_regions, uint64_t size,
40 enum anv_bo_alloc_flags alloc_flags,
41 uint64_t *actual_size)
42 {
43 int fd = os_create_anonymous_file(size, "fake bo");
44 if (fd == -1)
45 return 0;
46
47 assert(fd != 0);
48
49 *actual_size = size;
50 return fd;
51 }
52
53 static void *
stub_gem_mmap(struct anv_device * device,struct anv_bo * bo,uint64_t offset,uint64_t size)54 stub_gem_mmap(struct anv_device *device, struct anv_bo *bo, uint64_t offset,
55 uint64_t size)
56 {
57 return mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, bo->gem_handle,
58 offset);
59 }
60
61 static VkResult
stub_execute_simple_batch(struct anv_queue * queue,struct anv_bo * batch_bo,uint32_t batch_bo_size,bool is_companion_rcs_batch)62 stub_execute_simple_batch(struct anv_queue *queue, struct anv_bo *batch_bo,
63 uint32_t batch_bo_size, bool is_companion_rcs_batch)
64 {
65 return VK_ERROR_UNKNOWN;
66 }
67
68 static VkResult
stub_execute_trtt_batch(struct anv_sparse_submission * submit,struct anv_trtt_batch_bo * trtt_bbo)69 stub_execute_trtt_batch(struct anv_sparse_submission *submit,
70 struct anv_trtt_batch_bo *trtt_bbo)
71 {
72 return VK_ERROR_UNKNOWN;
73 }
74
75 static VkResult
stub_queue_exec_locked(struct anv_queue * queue,uint32_t wait_count,const struct vk_sync_wait * waits,uint32_t cmd_buffer_count,struct anv_cmd_buffer ** cmd_buffers,uint32_t signal_count,const struct vk_sync_signal * signals,struct anv_query_pool * perf_query_pool,uint32_t perf_query_pass,struct anv_utrace_submit * utrace_submit)76 stub_queue_exec_locked(struct anv_queue *queue,
77 uint32_t wait_count,
78 const struct vk_sync_wait *waits,
79 uint32_t cmd_buffer_count,
80 struct anv_cmd_buffer **cmd_buffers,
81 uint32_t signal_count,
82 const struct vk_sync_signal *signals,
83 struct anv_query_pool *perf_query_pool,
84 uint32_t perf_query_pass,
85 struct anv_utrace_submit *utrace_submit)
86 {
87 return VK_ERROR_UNKNOWN;
88 }
89
90 static VkResult
stub_queue_exec_trace(struct anv_queue * queue,struct anv_utrace_submit * submit)91 stub_queue_exec_trace(struct anv_queue *queue, struct anv_utrace_submit *submit)
92 {
93 return VK_ERROR_UNKNOWN;
94 }
95
96 static uint32_t
stub_bo_alloc_flags_to_bo_flags(struct anv_device * device,enum anv_bo_alloc_flags alloc_flags)97 stub_bo_alloc_flags_to_bo_flags(struct anv_device *device,
98 enum anv_bo_alloc_flags alloc_flags)
99 {
100 return 0;
101 }
102
103 void *
anv_gem_mmap(struct anv_device * device,struct anv_bo * bo,uint64_t offset,uint64_t size)104 anv_gem_mmap(struct anv_device *device, struct anv_bo *bo, uint64_t offset,
105 uint64_t size)
106 {
107 void *map = device->kmd_backend->gem_mmap(device, bo, offset, size);
108
109 if (map != MAP_FAILED)
110 VG(VALGRIND_MALLOCLIKE_BLOCK(map, size, 0, 1));
111
112 return map;
113 }
114
115 /* This is just a wrapper around munmap, but it also notifies valgrind that
116 * this map is no longer valid. Pair this with gem_mmap().
117 */
118 void
anv_gem_munmap(struct anv_device * device,void * p,uint64_t size)119 anv_gem_munmap(struct anv_device *device, void *p, uint64_t size)
120 {
121 munmap(p, size);
122 }
123
124 static uint32_t
stub_gem_create_userptr(struct anv_device * device,void * mem,uint64_t size)125 stub_gem_create_userptr(struct anv_device *device, void *mem, uint64_t size)
126 {
127 int fd = os_create_anonymous_file(size, "fake bo");
128 if (fd == -1)
129 return 0;
130
131 assert(fd != 0);
132
133 return fd;
134 }
135
136 int
anv_gem_wait(struct anv_device * device,uint32_t gem_handle,int64_t * timeout_ns)137 anv_gem_wait(struct anv_device *device, uint32_t gem_handle, int64_t *timeout_ns)
138 {
139 return 0;
140 }
141
142 int
anv_gem_set_tiling(struct anv_device * device,uint32_t gem_handle,uint32_t stride,uint32_t tiling)143 anv_gem_set_tiling(struct anv_device *device,
144 uint32_t gem_handle, uint32_t stride, uint32_t tiling)
145 {
146 return 0;
147 }
148
149 int
anv_gem_get_tiling(struct anv_device * device,uint32_t gem_handle)150 anv_gem_get_tiling(struct anv_device *device, uint32_t gem_handle)
151 {
152 return 0;
153 }
154
155 int
anv_gem_handle_to_fd(struct anv_device * device,uint32_t gem_handle)156 anv_gem_handle_to_fd(struct anv_device *device, uint32_t gem_handle)
157 {
158 unreachable("Unused");
159 }
160
161 uint32_t
anv_gem_fd_to_handle(struct anv_device * device,int fd)162 anv_gem_fd_to_handle(struct anv_device *device, int fd)
163 {
164 unreachable("Unused");
165 }
166
167 VkResult
anv_gem_import_bo_alloc_flags_to_bo_flags(struct anv_device * device,struct anv_bo * bo,enum anv_bo_alloc_flags alloc_flags,uint32_t * bo_flags)168 anv_gem_import_bo_alloc_flags_to_bo_flags(struct anv_device *device,
169 struct anv_bo *bo,
170 enum anv_bo_alloc_flags alloc_flags,
171 uint32_t *bo_flags)
172 {
173 return VK_SUCCESS;
174 }
175
176 static int
stub_vm_bind(struct anv_device * device,struct anv_sparse_submission * submit)177 stub_vm_bind(struct anv_device *device, struct anv_sparse_submission *submit)
178 {
179 return 0;
180 }
181
182 static int
stub_vm_bind_bo(struct anv_device * device,struct anv_bo * bo)183 stub_vm_bind_bo(struct anv_device *device, struct anv_bo *bo)
184 {
185 return 0;
186 }
187
anv_stub_kmd_backend_get(void)188 const struct anv_kmd_backend *anv_stub_kmd_backend_get(void)
189 {
190 static const struct anv_kmd_backend stub_backend = {
191 .gem_create = stub_gem_create,
192 .gem_create_userptr = stub_gem_create_userptr,
193 .gem_close = stub_gem_close,
194 .gem_mmap = stub_gem_mmap,
195 .vm_bind = stub_vm_bind,
196 .vm_bind_bo = stub_vm_bind_bo,
197 .vm_unbind_bo = stub_vm_bind_bo,
198 .execute_simple_batch = stub_execute_simple_batch,
199 .execute_trtt_batch = stub_execute_trtt_batch,
200 .queue_exec_locked = stub_queue_exec_locked,
201 .queue_exec_trace = stub_queue_exec_trace,
202 .bo_alloc_flags_to_bo_flags = stub_bo_alloc_flags_to_bo_flags,
203 };
204 return &stub_backend;
205 }
206