• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include <sys/mman.h>
25 #include <sys/syscall.h>
26 
27 #include "util/anon_file.h"
28 #include "anv_private.h"
29 
30 uint32_t
anv_gem_create(struct anv_device * device,uint64_t size)31 anv_gem_create(struct anv_device *device, uint64_t size)
32 {
33    int fd = os_create_anonymous_file(size, "fake bo");
34    if (fd == -1)
35       return 0;
36 
37    assert(fd != 0);
38 
39    return fd;
40 }
41 
42 void
anv_gem_close(struct anv_device * device,uint32_t gem_handle)43 anv_gem_close(struct anv_device *device, uint32_t gem_handle)
44 {
45    close(gem_handle);
46 }
47 
48 void*
anv_gem_mmap(struct anv_device * device,uint32_t gem_handle,uint64_t offset,uint64_t size,uint32_t flags)49 anv_gem_mmap(struct anv_device *device, uint32_t gem_handle,
50              uint64_t offset, uint64_t size, uint32_t flags)
51 {
52    /* Ignore flags, as they're specific to I915_GEM_MMAP. */
53    (void) flags;
54 
55    return mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED,
56                gem_handle, offset);
57 }
58 
59 /* This is just a wrapper around munmap, but it also notifies valgrind that
60  * this map is no longer valid.  Pair this with anv_gem_mmap().
61  */
62 void
anv_gem_munmap(struct anv_device * device,void * p,uint64_t size)63 anv_gem_munmap(struct anv_device *device, void *p, uint64_t size)
64 {
65    munmap(p, size);
66 }
67 
68 uint32_t
anv_gem_userptr(struct anv_device * device,void * mem,size_t size)69 anv_gem_userptr(struct anv_device *device, void *mem, size_t size)
70 {
71    int fd = os_create_anonymous_file(size, "fake bo");
72    if (fd == -1)
73       return 0;
74 
75    assert(fd != 0);
76 
77    return fd;
78 }
79 
80 int
anv_gem_busy(struct anv_device * device,uint32_t gem_handle)81 anv_gem_busy(struct anv_device *device, uint32_t gem_handle)
82 {
83    return 0;
84 }
85 
86 int
anv_gem_wait(struct anv_device * device,uint32_t gem_handle,int64_t * timeout_ns)87 anv_gem_wait(struct anv_device *device, uint32_t gem_handle, int64_t *timeout_ns)
88 {
89    return 0;
90 }
91 
92 int
anv_gem_execbuffer(struct anv_device * device,struct drm_i915_gem_execbuffer2 * execbuf)93 anv_gem_execbuffer(struct anv_device *device,
94                    struct drm_i915_gem_execbuffer2 *execbuf)
95 {
96    return 0;
97 }
98 
99 int
anv_gem_set_tiling(struct anv_device * device,uint32_t gem_handle,uint32_t stride,uint32_t tiling)100 anv_gem_set_tiling(struct anv_device *device,
101                    uint32_t gem_handle, uint32_t stride, uint32_t tiling)
102 {
103    return 0;
104 }
105 
106 int
anv_gem_get_tiling(struct anv_device * device,uint32_t gem_handle)107 anv_gem_get_tiling(struct anv_device *device, uint32_t gem_handle)
108 {
109    return 0;
110 }
111 
112 int
anv_gem_set_caching(struct anv_device * device,uint32_t gem_handle,uint32_t caching)113 anv_gem_set_caching(struct anv_device *device, uint32_t gem_handle,
114                     uint32_t caching)
115 {
116    return 0;
117 }
118 
119 int
anv_gem_set_domain(struct anv_device * device,uint32_t gem_handle,uint32_t read_domains,uint32_t write_domain)120 anv_gem_set_domain(struct anv_device *device, uint32_t gem_handle,
121                    uint32_t read_domains, uint32_t write_domain)
122 {
123    return 0;
124 }
125 
126 int
anv_gem_get_param(int fd,uint32_t param)127 anv_gem_get_param(int fd, uint32_t param)
128 {
129    unreachable("Unused");
130 }
131 
132 uint64_t
anv_gem_get_drm_cap(int fd,uint32_t capability)133 anv_gem_get_drm_cap(int fd, uint32_t capability)
134 {
135    return 0;
136 }
137 
138 bool
anv_gem_get_bit6_swizzle(int fd,uint32_t tiling)139 anv_gem_get_bit6_swizzle(int fd, uint32_t tiling)
140 {
141    unreachable("Unused");
142 }
143 
144 int
anv_gem_create_context(struct anv_device * device)145 anv_gem_create_context(struct anv_device *device)
146 {
147    unreachable("Unused");
148 }
149 
150 int
anv_gem_destroy_context(struct anv_device * device,int context)151 anv_gem_destroy_context(struct anv_device *device, int context)
152 {
153    unreachable("Unused");
154 }
155 
156 int
anv_gem_set_context_param(int fd,int context,uint32_t param,uint64_t value)157 anv_gem_set_context_param(int fd, int context, uint32_t param, uint64_t value)
158 {
159    unreachable("Unused");
160 }
161 
162 int
anv_gem_get_context_param(int fd,int context,uint32_t param,uint64_t * value)163 anv_gem_get_context_param(int fd, int context, uint32_t param, uint64_t *value)
164 {
165    unreachable("Unused");
166 }
167 
168 bool
anv_gem_has_context_priority(int fd)169 anv_gem_has_context_priority(int fd)
170 {
171    unreachable("Unused");
172 }
173 
174 int
anv_gem_gpu_get_reset_stats(struct anv_device * device,uint32_t * active,uint32_t * pending)175 anv_gem_gpu_get_reset_stats(struct anv_device *device,
176                             uint32_t *active, uint32_t *pending)
177 {
178    unreachable("Unused");
179 }
180 
181 int
anv_gem_handle_to_fd(struct anv_device * device,uint32_t gem_handle)182 anv_gem_handle_to_fd(struct anv_device *device, uint32_t gem_handle)
183 {
184    unreachable("Unused");
185 }
186 
187 uint32_t
anv_gem_fd_to_handle(struct anv_device * device,int fd)188 anv_gem_fd_to_handle(struct anv_device *device, int fd)
189 {
190    unreachable("Unused");
191 }
192 
193 int
anv_gem_sync_file_merge(struct anv_device * device,int fd1,int fd2)194 anv_gem_sync_file_merge(struct anv_device *device, int fd1, int fd2)
195 {
196    unreachable("Unused");
197 }
198 
199 int
anv_gem_syncobj_export_sync_file(struct anv_device * device,uint32_t handle)200 anv_gem_syncobj_export_sync_file(struct anv_device *device, uint32_t handle)
201 {
202    unreachable("Unused");
203 }
204 
205 int
anv_gem_syncobj_import_sync_file(struct anv_device * device,uint32_t handle,int fd)206 anv_gem_syncobj_import_sync_file(struct anv_device *device,
207                                  uint32_t handle, int fd)
208 {
209    unreachable("Unused");
210 }
211 
212 uint32_t
anv_gem_syncobj_create(struct anv_device * device,uint32_t flags)213 anv_gem_syncobj_create(struct anv_device *device, uint32_t flags)
214 {
215    unreachable("Unused");
216 }
217 
218 void
anv_gem_syncobj_destroy(struct anv_device * device,uint32_t handle)219 anv_gem_syncobj_destroy(struct anv_device *device, uint32_t handle)
220 {
221    unreachable("Unused");
222 }
223 
224 int
anv_gem_syncobj_handle_to_fd(struct anv_device * device,uint32_t handle)225 anv_gem_syncobj_handle_to_fd(struct anv_device *device, uint32_t handle)
226 {
227    unreachable("Unused");
228 }
229 
230 uint32_t
anv_gem_syncobj_fd_to_handle(struct anv_device * device,int fd)231 anv_gem_syncobj_fd_to_handle(struct anv_device *device, int fd)
232 {
233    unreachable("Unused");
234 }
235 
236 void
anv_gem_syncobj_reset(struct anv_device * device,uint32_t handle)237 anv_gem_syncobj_reset(struct anv_device *device, uint32_t handle)
238 {
239    unreachable("Unused");
240 }
241 
242 bool
anv_gem_supports_syncobj_wait(int fd)243 anv_gem_supports_syncobj_wait(int fd)
244 {
245    return false;
246 }
247 
248 int
anv_gem_syncobj_wait(struct anv_device * device,const uint32_t * handles,uint32_t num_handles,int64_t abs_timeout_ns,bool wait_all)249 anv_gem_syncobj_wait(struct anv_device *device,
250                      const uint32_t *handles, uint32_t num_handles,
251                      int64_t abs_timeout_ns, bool wait_all)
252 {
253    unreachable("Unused");
254 }
255 
256 int
anv_gem_reg_read(int fd,uint32_t offset,uint64_t * result)257 anv_gem_reg_read(int fd, uint32_t offset, uint64_t *result)
258 {
259    unreachable("Unused");
260 }
261 
262 int
anv_gem_syncobj_timeline_wait(struct anv_device * device,const uint32_t * handles,const uint64_t * points,uint32_t num_items,int64_t abs_timeout_ns,bool wait_all,bool wait_materialize)263 anv_gem_syncobj_timeline_wait(struct anv_device *device,
264                               const uint32_t *handles, const uint64_t *points,
265                               uint32_t num_items, int64_t abs_timeout_ns,
266                               bool wait_all, bool wait_materialize)
267 {
268    unreachable("Unused");
269 }
270 
271 int
anv_gem_syncobj_timeline_signal(struct anv_device * device,const uint32_t * handles,const uint64_t * points,uint32_t num_items)272 anv_gem_syncobj_timeline_signal(struct anv_device *device,
273                                 const uint32_t *handles, const uint64_t *points,
274                                 uint32_t num_items)
275 {
276    unreachable("Unused");
277 }
278 
279 int
anv_gem_syncobj_timeline_query(struct anv_device * device,const uint32_t * handles,uint64_t * points,uint32_t num_items)280 anv_gem_syncobj_timeline_query(struct anv_device *device,
281                                const uint32_t *handles, uint64_t *points,
282                                uint32_t num_items)
283 {
284    unreachable("Unused");
285 }
286