1 /*
2 * Copyright © 2016-2018 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <string.h>
25 #include <unistd.h>
26 #include <sys/types.h>
27 #include <sys/mman.h>
28
29 #include "aub_mem.h"
30 #include "util/anon_file.h"
31
32 struct bo_map {
33 struct list_head link;
34 struct gen_batch_decode_bo bo;
35 bool unmap_after_use;
36 bool ppgtt;
37 };
38
39 struct ggtt_entry {
40 struct rb_node node;
41 uint64_t virt_addr;
42 uint64_t phys_addr;
43 };
44
45 struct phys_mem {
46 struct rb_node node;
47 uint64_t fd_offset;
48 uint64_t phys_addr;
49 uint8_t *data;
50 const uint8_t *aub_data;
51 };
52
53 static void
add_gtt_bo_map(struct aub_mem * mem,struct gen_batch_decode_bo bo,bool ppgtt,bool unmap_after_use)54 add_gtt_bo_map(struct aub_mem *mem, struct gen_batch_decode_bo bo, bool ppgtt, bool unmap_after_use)
55 {
56 struct bo_map *m = calloc(1, sizeof(*m));
57
58 m->ppgtt = ppgtt;
59 m->bo = bo;
60 m->unmap_after_use = unmap_after_use;
61 list_add(&m->link, &mem->maps);
62 }
63
64 void
aub_mem_clear_bo_maps(struct aub_mem * mem)65 aub_mem_clear_bo_maps(struct aub_mem *mem)
66 {
67 list_for_each_entry_safe(struct bo_map, i, &mem->maps, link) {
68 if (i->unmap_after_use)
69 munmap((void *)i->bo.map, i->bo.size);
70 list_del(&i->link);
71 free(i);
72 }
73 }
74
75 static inline struct ggtt_entry *
ggtt_entry_next(struct ggtt_entry * entry)76 ggtt_entry_next(struct ggtt_entry *entry)
77 {
78 if (!entry)
79 return NULL;
80 struct rb_node *node = rb_node_next(&entry->node);
81 if (!node)
82 return NULL;
83 return rb_node_data(struct ggtt_entry, node, node);
84 }
85
86 static inline int
cmp_uint64(uint64_t a,uint64_t b)87 cmp_uint64(uint64_t a, uint64_t b)
88 {
89 if (a < b)
90 return 1;
91 if (a > b)
92 return -1;
93 return 0;
94 }
95
96 static inline int
cmp_ggtt_entry(const struct rb_node * node,const void * addr)97 cmp_ggtt_entry(const struct rb_node *node, const void *addr)
98 {
99 struct ggtt_entry *entry = rb_node_data(struct ggtt_entry, node, node);
100 return cmp_uint64(entry->virt_addr, *(const uint64_t *)addr);
101 }
102
103 static struct ggtt_entry *
ensure_ggtt_entry(struct aub_mem * mem,uint64_t virt_addr)104 ensure_ggtt_entry(struct aub_mem *mem, uint64_t virt_addr)
105 {
106 struct rb_node *node = rb_tree_search_sloppy(&mem->ggtt, &virt_addr,
107 cmp_ggtt_entry);
108 int cmp = 0;
109 if (!node || (cmp = cmp_ggtt_entry(node, &virt_addr))) {
110 struct ggtt_entry *new_entry = calloc(1, sizeof(*new_entry));
111 new_entry->virt_addr = virt_addr;
112 rb_tree_insert_at(&mem->ggtt, node, &new_entry->node, cmp < 0);
113 node = &new_entry->node;
114 }
115
116 return rb_node_data(struct ggtt_entry, node, node);
117 }
118
119 static struct ggtt_entry *
search_ggtt_entry(struct aub_mem * mem,uint64_t virt_addr)120 search_ggtt_entry(struct aub_mem *mem, uint64_t virt_addr)
121 {
122 virt_addr &= ~0xfff;
123
124 struct rb_node *node = rb_tree_search(&mem->ggtt, &virt_addr, cmp_ggtt_entry);
125
126 if (!node)
127 return NULL;
128
129 return rb_node_data(struct ggtt_entry, node, node);
130 }
131
132 static inline int
cmp_phys_mem(const struct rb_node * node,const void * addr)133 cmp_phys_mem(const struct rb_node *node, const void *addr)
134 {
135 struct phys_mem *mem = rb_node_data(struct phys_mem, node, node);
136 return cmp_uint64(mem->phys_addr, *(uint64_t *)addr);
137 }
138
139 static struct phys_mem *
ensure_phys_mem(struct aub_mem * mem,uint64_t phys_addr)140 ensure_phys_mem(struct aub_mem *mem, uint64_t phys_addr)
141 {
142 struct rb_node *node = rb_tree_search_sloppy(&mem->mem, &phys_addr, cmp_phys_mem);
143 int cmp = 0;
144 if (!node || (cmp = cmp_phys_mem(node, &phys_addr))) {
145 struct phys_mem *new_mem = calloc(1, sizeof(*new_mem));
146 new_mem->phys_addr = phys_addr;
147 new_mem->fd_offset = mem->mem_fd_len;
148
149 ASSERTED int ftruncate_res = ftruncate(mem->mem_fd, mem->mem_fd_len += 4096);
150 assert(ftruncate_res == 0);
151
152 new_mem->data = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED,
153 mem->mem_fd, new_mem->fd_offset);
154 assert(new_mem->data != MAP_FAILED);
155
156 rb_tree_insert_at(&mem->mem, node, &new_mem->node, cmp < 0);
157 node = &new_mem->node;
158 }
159
160 return rb_node_data(struct phys_mem, node, node);
161 }
162
163 static struct phys_mem *
search_phys_mem(struct aub_mem * mem,uint64_t phys_addr)164 search_phys_mem(struct aub_mem *mem, uint64_t phys_addr)
165 {
166 phys_addr &= ~0xfff;
167
168 struct rb_node *node = rb_tree_search(&mem->mem, &phys_addr, cmp_phys_mem);
169
170 if (!node)
171 return NULL;
172
173 return rb_node_data(struct phys_mem, node, node);
174 }
175
176 void
aub_mem_local_write(void * _mem,uint64_t address,const void * data,uint32_t size)177 aub_mem_local_write(void *_mem, uint64_t address,
178 const void *data, uint32_t size)
179 {
180 struct aub_mem *mem = _mem;
181 struct gen_batch_decode_bo bo = {
182 .map = data,
183 .addr = address,
184 .size = size,
185 };
186 add_gtt_bo_map(mem, bo, false, false);
187 }
188
189 void
aub_mem_ggtt_entry_write(void * _mem,uint64_t address,const void * _data,uint32_t _size)190 aub_mem_ggtt_entry_write(void *_mem, uint64_t address,
191 const void *_data, uint32_t _size)
192 {
193 struct aub_mem *mem = _mem;
194 uint64_t virt_addr = (address / sizeof(uint64_t)) << 12;
195 const uint64_t *data = _data;
196 size_t size = _size / sizeof(*data);
197 for (const uint64_t *entry = data;
198 entry < data + size;
199 entry++, virt_addr += 4096) {
200 struct ggtt_entry *pt = ensure_ggtt_entry(mem, virt_addr);
201 pt->phys_addr = *entry;
202 }
203 }
204
205 void
aub_mem_phys_write(void * _mem,uint64_t phys_address,const void * data,uint32_t size)206 aub_mem_phys_write(void *_mem, uint64_t phys_address,
207 const void *data, uint32_t size)
208 {
209 struct aub_mem *mem = _mem;
210 uint32_t to_write = size;
211 for (uint64_t page = phys_address & ~0xfff; page < phys_address + size; page += 4096) {
212 struct phys_mem *pmem = ensure_phys_mem(mem, page);
213 uint64_t offset = MAX2(page, phys_address) - page;
214 uint32_t size_this_page = MIN2(to_write, 4096 - offset);
215 to_write -= size_this_page;
216 memcpy(pmem->data + offset, data, size_this_page);
217 pmem->aub_data = data - offset;
218 data = (const uint8_t *)data + size_this_page;
219 }
220 }
221
222 void
aub_mem_ggtt_write(void * _mem,uint64_t virt_address,const void * data,uint32_t size)223 aub_mem_ggtt_write(void *_mem, uint64_t virt_address,
224 const void *data, uint32_t size)
225 {
226 struct aub_mem *mem = _mem;
227 uint32_t to_write = size;
228 for (uint64_t page = virt_address & ~0xfff; page < virt_address + size; page += 4096) {
229 struct ggtt_entry *entry = search_ggtt_entry(mem, page);
230 assert(entry && entry->phys_addr & 0x1);
231
232 uint64_t offset = MAX2(page, virt_address) - page;
233 uint32_t size_this_page = MIN2(to_write, 4096 - offset);
234 to_write -= size_this_page;
235
236 uint64_t phys_page = entry->phys_addr & ~0xfff; /* Clear the validity bits. */
237 aub_mem_phys_write(mem, phys_page + offset, data, size_this_page);
238 data = (const uint8_t *)data + size_this_page;
239 }
240 }
241
242 struct gen_batch_decode_bo
aub_mem_get_ggtt_bo(void * _mem,uint64_t address)243 aub_mem_get_ggtt_bo(void *_mem, uint64_t address)
244 {
245 struct aub_mem *mem = _mem;
246 struct gen_batch_decode_bo bo = {0};
247
248 list_for_each_entry(struct bo_map, i, &mem->maps, link)
249 if (!i->ppgtt && i->bo.addr <= address && i->bo.addr + i->bo.size > address)
250 return i->bo;
251
252 address &= ~0xfff;
253
254 struct ggtt_entry *start =
255 (struct ggtt_entry *)rb_tree_search_sloppy(&mem->ggtt, &address,
256 cmp_ggtt_entry);
257 if (start && start->virt_addr < address)
258 start = ggtt_entry_next(start);
259 if (!start)
260 return bo;
261
262 struct ggtt_entry *last = start;
263 for (struct ggtt_entry *i = ggtt_entry_next(last);
264 i && last->virt_addr + 4096 == i->virt_addr;
265 last = i, i = ggtt_entry_next(last))
266 ;
267
268 bo.addr = MIN2(address, start->virt_addr);
269 bo.size = last->virt_addr - bo.addr + 4096;
270 bo.map = mmap(NULL, bo.size, PROT_READ, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
271 assert(bo.map != MAP_FAILED);
272
273 for (struct ggtt_entry *i = start;
274 i;
275 i = i == last ? NULL : ggtt_entry_next(i)) {
276 uint64_t phys_addr = i->phys_addr & ~0xfff;
277 struct phys_mem *phys_mem = search_phys_mem(mem, phys_addr);
278
279 if (!phys_mem)
280 continue;
281
282 uint32_t map_offset = i->virt_addr - address;
283 ASSERTED void *res =
284 mmap((uint8_t *)bo.map + map_offset, 4096, PROT_READ,
285 MAP_SHARED | MAP_FIXED, mem->mem_fd, phys_mem->fd_offset);
286 assert(res != MAP_FAILED);
287 }
288
289 add_gtt_bo_map(mem, bo, false, true);
290
291 return bo;
292 }
293
294 static struct phys_mem *
ppgtt_walk(struct aub_mem * mem,uint64_t pml4,uint64_t address)295 ppgtt_walk(struct aub_mem *mem, uint64_t pml4, uint64_t address)
296 {
297 uint64_t shift = 39;
298 uint64_t addr = pml4;
299 for (int level = 4; level > 0; level--) {
300 struct phys_mem *table = search_phys_mem(mem, addr);
301 if (!table)
302 return NULL;
303 int index = (address >> shift) & 0x1ff;
304 uint64_t entry = ((uint64_t *)table->data)[index];
305 if (!(entry & 1))
306 return NULL;
307 addr = entry & ~0xfff;
308 shift -= 9;
309 }
310 return search_phys_mem(mem, addr);
311 }
312
313 static bool
ppgtt_mapped(struct aub_mem * mem,uint64_t pml4,uint64_t address)314 ppgtt_mapped(struct aub_mem *mem, uint64_t pml4, uint64_t address)
315 {
316 return ppgtt_walk(mem, pml4, address) != NULL;
317 }
318
319 struct gen_batch_decode_bo
aub_mem_get_ppgtt_bo(void * _mem,uint64_t address)320 aub_mem_get_ppgtt_bo(void *_mem, uint64_t address)
321 {
322 struct aub_mem *mem = _mem;
323 struct gen_batch_decode_bo bo = {0};
324
325 list_for_each_entry(struct bo_map, i, &mem->maps, link)
326 if (i->ppgtt && i->bo.addr <= address && i->bo.addr + i->bo.size > address)
327 return i->bo;
328
329 address &= ~0xfff;
330
331 if (!ppgtt_mapped(mem, mem->pml4, address))
332 return bo;
333
334 /* Map everything until the first gap since we don't know how much the
335 * decoder actually needs.
336 */
337 uint64_t end = address;
338 while (ppgtt_mapped(mem, mem->pml4, end))
339 end += 4096;
340
341 bo.addr = address;
342 bo.size = end - address;
343 bo.map = mmap(NULL, bo.size, PROT_READ, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
344 assert(bo.map != MAP_FAILED);
345
346 for (uint64_t page = address; page < end; page += 4096) {
347 struct phys_mem *phys_mem = ppgtt_walk(mem, mem->pml4, page);
348
349 ASSERTED void *res =
350 mmap((uint8_t *)bo.map + (page - bo.addr), 4096, PROT_READ,
351 MAP_SHARED | MAP_FIXED, mem->mem_fd, phys_mem->fd_offset);
352 assert(res != MAP_FAILED);
353 }
354
355 add_gtt_bo_map(mem, bo, true, true);
356
357 return bo;
358 }
359
360 bool
aub_mem_init(struct aub_mem * mem)361 aub_mem_init(struct aub_mem *mem)
362 {
363 memset(mem, 0, sizeof(*mem));
364
365 list_inithead(&mem->maps);
366
367 mem->mem_fd = os_create_anonymous_file(0, "phys memory");
368
369 return mem->mem_fd != -1;
370 }
371
372 void
aub_mem_fini(struct aub_mem * mem)373 aub_mem_fini(struct aub_mem *mem)
374 {
375 if (mem->mem_fd == -1)
376 return;
377
378 aub_mem_clear_bo_maps(mem);
379
380
381 rb_tree_foreach_safe(struct ggtt_entry, entry, &mem->ggtt, node) {
382 rb_tree_remove(&mem->ggtt, &entry->node);
383 free(entry);
384 }
385 rb_tree_foreach_safe(struct phys_mem, entry, &mem->mem, node) {
386 rb_tree_remove(&mem->mem, &entry->node);
387 free(entry);
388 }
389
390 close(mem->mem_fd);
391 mem->mem_fd = -1;
392 }
393
394 struct gen_batch_decode_bo
aub_mem_get_phys_addr_data(struct aub_mem * mem,uint64_t phys_addr)395 aub_mem_get_phys_addr_data(struct aub_mem *mem, uint64_t phys_addr)
396 {
397 struct phys_mem *page = search_phys_mem(mem, phys_addr);
398 return page ?
399 (struct gen_batch_decode_bo) { .map = page->data, .addr = page->phys_addr, .size = 4096 } :
400 (struct gen_batch_decode_bo) {};
401 }
402
403 struct gen_batch_decode_bo
aub_mem_get_ppgtt_addr_data(struct aub_mem * mem,uint64_t virt_addr)404 aub_mem_get_ppgtt_addr_data(struct aub_mem *mem, uint64_t virt_addr)
405 {
406 struct phys_mem *page = ppgtt_walk(mem, mem->pml4, virt_addr);
407 return page ?
408 (struct gen_batch_decode_bo) { .map = page->data, .addr = virt_addr & ~((1ULL << 12) - 1), .size = 4096 } :
409 (struct gen_batch_decode_bo) {};
410 }
411
412 struct gen_batch_decode_bo
aub_mem_get_ppgtt_addr_aub_data(struct aub_mem * mem,uint64_t virt_addr)413 aub_mem_get_ppgtt_addr_aub_data(struct aub_mem *mem, uint64_t virt_addr)
414 {
415 struct phys_mem *page = ppgtt_walk(mem, mem->pml4, virt_addr);
416 return page ?
417 (struct gen_batch_decode_bo) { .map = page->aub_data, .addr = virt_addr & ~((1ULL << 12) - 1), .size = 4096 } :
418 (struct gen_batch_decode_bo) {};
419 }
420