• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2022 Imagination Technologies Ltd.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a copy
5  * of this software and associated documentation files (the "Software"), to deal
6  * in the Software without restriction, including without limitation the rights
7  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8  * copies of the Software, and to permit persons to whom the Software is
9  * furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  */
23 
24 #include <assert.h>
25 #include <fcntl.h>
26 #include <stdbool.h>
27 #include <stddef.h>
28 #include <stdint.h>
29 #include <sys/mman.h>
30 #include <xf86drm.h>
31 
32 #include "pvr_private.h"
33 #include "pvr_srv.h"
34 #include "pvr_srv_bo.h"
35 #include "pvr_srv_bridge.h"
36 #include "pvr_types.h"
37 #include "pvr_winsys_helper.h"
38 #include "util/u_atomic.h"
39 #include "util/bitscan.h"
40 #include "util/macros.h"
41 #include "util/u_math.h"
42 #include "vk_log.h"
43 
44 /* Note: This function does not have an associated pvr_srv_free_display_pmr
45  * function, use pvr_srv_free_pmr instead.
46  */
pvr_srv_alloc_display_pmr(struct pvr_srv_winsys * srv_ws,uint64_t size,uint64_t srv_flags,void ** const pmr_out,uint32_t * const handle_out)47 static VkResult pvr_srv_alloc_display_pmr(struct pvr_srv_winsys *srv_ws,
48                                           uint64_t size,
49                                           uint64_t srv_flags,
50                                           void **const pmr_out,
51                                           uint32_t *const handle_out)
52 {
53    uint64_t aligment_out;
54    uint64_t size_out;
55    VkResult result;
56    uint32_t handle;
57    int ret;
58    int fd;
59 
60    ret =
61       pvr_winsys_helper_display_buffer_create(srv_ws->master_fd, size, &handle);
62    if (ret)
63       return vk_error(NULL, VK_ERROR_OUT_OF_DEVICE_MEMORY);
64 
65    ret = drmPrimeHandleToFD(srv_ws->master_fd, handle, O_CLOEXEC, &fd);
66    if (ret) {
67       result = vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
68       goto err_display_buffer_destroy;
69    }
70 
71    result = pvr_srv_physmem_import_dmabuf(srv_ws->render_fd,
72                                           fd,
73                                           srv_flags,
74                                           pmr_out,
75                                           &size_out,
76                                           &aligment_out);
77 
78    assert(size_out >= size);
79    assert(aligment_out == srv_ws->base.page_size);
80 
81    /* close fd, not needed anymore */
82    close(fd);
83 
84    if (result != VK_SUCCESS)
85       goto err_display_buffer_destroy;
86 
87    *handle_out = handle;
88 
89    return VK_SUCCESS;
90 
91 err_display_buffer_destroy:
92    pvr_winsys_helper_display_buffer_destroy(srv_ws->master_fd, handle);
93 
94    return result;
95 }
96 
buffer_acquire(struct pvr_srv_winsys_bo * srv_bo)97 static void buffer_acquire(struct pvr_srv_winsys_bo *srv_bo)
98 {
99    p_atomic_inc(&srv_bo->ref_count);
100 }
101 
buffer_release(struct pvr_srv_winsys_bo * srv_bo)102 static void buffer_release(struct pvr_srv_winsys_bo *srv_bo)
103 {
104    struct pvr_srv_winsys *srv_ws;
105 
106    /* If all references were dropped the pmr can be freed and unlocked */
107    if (p_atomic_dec_return(&srv_bo->ref_count) == 0) {
108       srv_ws = to_pvr_srv_winsys(srv_bo->base.ws);
109       pvr_srv_free_pmr(srv_ws->render_fd, srv_bo->pmr);
110 
111       if (srv_bo->is_display_buffer) {
112          pvr_winsys_helper_display_buffer_destroy(srv_ws->master_fd,
113                                                   srv_bo->handle);
114       }
115 
116       vk_free(srv_ws->alloc, srv_bo);
117    }
118 }
119 
pvr_srv_get_alloc_flags(uint32_t ws_flags)120 static uint64_t pvr_srv_get_alloc_flags(uint32_t ws_flags)
121 {
122    /* TODO: For now we assume that buffers should always be accessible to the
123     * kernel and that the PVR_WINSYS_BO_FLAG_CPU_ACCESS flag only applies to
124     * userspace mappings. Check to see if there's any situations where we
125     * wouldn't want this to be the case.
126     */
127    uint64_t srv_flags = PVR_SRV_MEMALLOCFLAG_GPU_READABLE |
128                         PVR_SRV_MEMALLOCFLAG_GPU_WRITEABLE |
129                         PVR_SRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
130                         PVR_SRV_MEMALLOCFLAG_CPU_UNCACHED_WC;
131 
132    if (ws_flags & PVR_WINSYS_BO_FLAG_CPU_ACCESS) {
133       srv_flags |= PVR_SRV_MEMALLOCFLAG_CPU_READABLE |
134                    PVR_SRV_MEMALLOCFLAG_CPU_WRITEABLE;
135    }
136 
137    if (ws_flags & PVR_WINSYS_BO_FLAG_GPU_UNCACHED)
138       srv_flags |= PVR_SRV_MEMALLOCFLAG_GPU_UNCACHED;
139    else
140       srv_flags |= PVR_SRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT;
141 
142    if (ws_flags & PVR_WINSYS_BO_FLAG_PM_FW_PROTECT)
143       srv_flags |= PVR_SRV_MEMALLOCFLAG_DEVICE_FLAG(PM_FW_PROTECT);
144 
145    if (ws_flags & PVR_WINSYS_BO_FLAG_ZERO_ON_ALLOC)
146       srv_flags |= PVR_SRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
147 
148    return srv_flags;
149 }
150 
pvr_srv_winsys_buffer_create(struct pvr_winsys * ws,uint64_t size,uint64_t alignment,enum pvr_winsys_bo_type type,uint32_t ws_flags,struct pvr_winsys_bo ** const bo_out)151 VkResult pvr_srv_winsys_buffer_create(struct pvr_winsys *ws,
152                                       uint64_t size,
153                                       uint64_t alignment,
154                                       enum pvr_winsys_bo_type type,
155                                       uint32_t ws_flags,
156                                       struct pvr_winsys_bo **const bo_out)
157 {
158    const uint64_t srv_flags = pvr_srv_get_alloc_flags(ws_flags);
159    struct pvr_srv_winsys *srv_ws = to_pvr_srv_winsys(ws);
160    struct pvr_srv_winsys_bo *srv_bo;
161    VkResult result;
162 
163    assert(util_is_power_of_two_nonzero(alignment));
164 
165    /* Kernel will page align the size, we do the same here so we have access to
166     * all the allocated memory.
167     */
168    alignment = MAX2(alignment, ws->page_size);
169    size = ALIGN_POT(size, alignment);
170 
171    srv_bo = vk_zalloc(srv_ws->alloc,
172                       sizeof(*srv_bo),
173                       8,
174                       VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
175    if (!srv_bo)
176       return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
177 
178    srv_bo->is_display_buffer = (type == PVR_WINSYS_BO_TYPE_DISPLAY);
179    if (srv_bo->is_display_buffer) {
180       result = pvr_srv_alloc_display_pmr(srv_ws,
181                                          size,
182                                          srv_flags &
183                                             PVR_SRV_MEMALLOCFLAGS_PMRFLAGSMASK,
184                                          &srv_bo->pmr,
185                                          &srv_bo->handle);
186 
187       srv_bo->base.is_imported = true;
188    } else {
189       result =
190          pvr_srv_alloc_pmr(srv_ws->render_fd,
191                            size,
192                            size,
193                            1,
194                            1,
195                            srv_ws->base.log2_page_size,
196                            (srv_flags & PVR_SRV_MEMALLOCFLAGS_PMRFLAGSMASK),
197                            getpid(),
198                            &srv_bo->pmr);
199    }
200 
201    if (result != VK_SUCCESS)
202       goto err_vk_free_srv_bo;
203 
204    srv_bo->base.size = size;
205    srv_bo->base.ws = ws;
206    srv_bo->flags = srv_flags;
207 
208    p_atomic_set(&srv_bo->ref_count, 1);
209 
210    *bo_out = &srv_bo->base;
211 
212    return VK_SUCCESS;
213 
214 err_vk_free_srv_bo:
215    vk_free(srv_ws->alloc, srv_bo);
216 
217    return result;
218 }
219 
220 VkResult
pvr_srv_winsys_buffer_create_from_fd(struct pvr_winsys * ws,int fd,struct pvr_winsys_bo ** const bo_out)221 pvr_srv_winsys_buffer_create_from_fd(struct pvr_winsys *ws,
222                                      int fd,
223                                      struct pvr_winsys_bo **const bo_out)
224 {
225    /* FIXME: PVR_SRV_MEMALLOCFLAG_CPU_UNCACHED_WC should be changed to
226     * PVR_SRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT, as dma-buf is always mapped
227     * as cacheable by the exporter. Flags are not passed to the exporter and it
228     * doesn't really change the behavior, but these can be used for internal
229     * checking so it should reflect the correct cachability of the buffer.
230     * Ref: pvr_GetMemoryFdPropertiesKHR
231     * 	    https://www.kernel.org/doc/html/latest/driver-api/dma-buf.html#c.dma_buf_ops
232     */
233    static const uint64_t srv_flags =
234       PVR_SRV_MEMALLOCFLAG_CPU_READABLE | PVR_SRV_MEMALLOCFLAG_CPU_WRITEABLE |
235       PVR_SRV_MEMALLOCFLAG_CPU_UNCACHED_WC | PVR_SRV_MEMALLOCFLAG_GPU_READABLE |
236       PVR_SRV_MEMALLOCFLAG_GPU_WRITEABLE |
237       PVR_SRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT;
238    struct pvr_srv_winsys *srv_ws = to_pvr_srv_winsys(ws);
239    struct pvr_srv_winsys_bo *srv_bo;
240    uint64_t aligment_out;
241    uint64_t size_out;
242    VkResult result;
243 
244    srv_bo = vk_zalloc(srv_ws->alloc,
245                       sizeof(*srv_bo),
246                       8,
247                       VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
248    if (!srv_bo)
249       return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
250 
251    result = pvr_srv_physmem_import_dmabuf(srv_ws->render_fd,
252                                           fd,
253                                           srv_flags,
254                                           &srv_bo->pmr,
255                                           &size_out,
256                                           &aligment_out);
257    if (result != VK_SUCCESS)
258       goto err_vk_free_srv_bo;
259 
260    assert(aligment_out == srv_ws->base.page_size);
261 
262    srv_bo->base.ws = ws;
263    srv_bo->base.size = size_out;
264    srv_bo->base.is_imported = true;
265    srv_bo->flags = srv_flags;
266 
267    p_atomic_set(&srv_bo->ref_count, 1);
268 
269    *bo_out = &srv_bo->base;
270 
271    return VK_SUCCESS;
272 
273 err_vk_free_srv_bo:
274    vk_free(srv_ws->alloc, srv_bo);
275 
276    return result;
277 }
278 
pvr_srv_winsys_buffer_destroy(struct pvr_winsys_bo * bo)279 void pvr_srv_winsys_buffer_destroy(struct pvr_winsys_bo *bo)
280 {
281    struct pvr_srv_winsys_bo *srv_bo = to_pvr_srv_winsys_bo(bo);
282 
283    buffer_release(srv_bo);
284 }
285 
pvr_srv_winsys_buffer_get_fd(struct pvr_winsys_bo * bo,int * const fd_out)286 VkResult pvr_srv_winsys_buffer_get_fd(struct pvr_winsys_bo *bo,
287                                       int *const fd_out)
288 {
289    struct pvr_srv_winsys_bo *srv_bo = to_pvr_srv_winsys_bo(bo);
290    struct pvr_srv_winsys *srv_ws = to_pvr_srv_winsys(bo->ws);
291    int ret;
292 
293    if (!srv_bo->is_display_buffer)
294       return pvr_srv_physmem_export_dmabuf(srv_ws->render_fd,
295                                            srv_bo->pmr,
296                                            fd_out);
297 
298    /* For display buffers, export using saved buffer handle */
299    ret =
300       drmPrimeHandleToFD(srv_ws->master_fd, srv_bo->handle, O_CLOEXEC, fd_out);
301    if (ret)
302       return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
303 
304    return VK_SUCCESS;
305 }
306 
pvr_srv_winsys_buffer_map(struct pvr_winsys_bo * bo)307 void *pvr_srv_winsys_buffer_map(struct pvr_winsys_bo *bo)
308 {
309    struct pvr_srv_winsys_bo *srv_bo = to_pvr_srv_winsys_bo(bo);
310    struct pvr_srv_winsys *srv_ws = to_pvr_srv_winsys(bo->ws);
311    const int prot =
312       (srv_bo->flags & PVR_SRV_MEMALLOCFLAG_CPU_WRITEABLE ? PROT_WRITE : 0) |
313       (srv_bo->flags & PVR_SRV_MEMALLOCFLAG_CPU_READABLE ? PROT_READ : 0);
314 
315    /* assert if memory is already mapped */
316    assert(!bo->map);
317 
318    /* Map the full PMR to CPU space */
319    bo->map = mmap(NULL,
320                   bo->size,
321                   prot,
322                   MAP_SHARED,
323                   srv_ws->render_fd,
324                   (off_t)srv_bo->pmr << srv_ws->base.log2_page_size);
325    if (bo->map == MAP_FAILED) {
326       bo->map = NULL;
327       vk_error(NULL, VK_ERROR_MEMORY_MAP_FAILED);
328       return NULL;
329    }
330 
331    VG(VALGRIND_MALLOCLIKE_BLOCK(bo->map,
332                                 bo->size,
333                                 0,
334                                 srv_bo->flags &
335                                    PVR_SRV_MEMALLOCFLAG_ZERO_ON_ALLOC));
336 
337    buffer_acquire(srv_bo);
338 
339    return bo->map;
340 }
341 
pvr_srv_winsys_buffer_unmap(struct pvr_winsys_bo * bo)342 void pvr_srv_winsys_buffer_unmap(struct pvr_winsys_bo *bo)
343 {
344    struct pvr_srv_winsys_bo *srv_bo = to_pvr_srv_winsys_bo(bo);
345 
346    /* output error if trying to unmap memory that is not previously mapped */
347    assert(bo->map);
348 
349    /* Unmap the whole PMR from CPU space */
350    if (munmap(bo->map, bo->size))
351       vk_error(NULL, VK_ERROR_UNKNOWN);
352 
353    VG(VALGRIND_FREELIKE_BLOCK(bo->map, 0));
354 
355    bo->map = NULL;
356 
357    buffer_release(srv_bo);
358 }
359 
360 /* This function must be used to allocate inside reserved region and must be
361  * used internally only. This also means whoever is using it, must know what
362  * they are doing.
363  */
364 struct pvr_winsys_vma *
pvr_srv_heap_alloc_reserved(struct pvr_winsys_heap * heap,const pvr_dev_addr_t reserved_dev_addr,uint64_t size,uint64_t alignment)365 pvr_srv_heap_alloc_reserved(struct pvr_winsys_heap *heap,
366                             const pvr_dev_addr_t reserved_dev_addr,
367                             uint64_t size,
368                             uint64_t alignment)
369 {
370    struct pvr_srv_winsys_heap *srv_heap = to_pvr_srv_winsys_heap(heap);
371    struct pvr_srv_winsys *srv_ws = to_pvr_srv_winsys(heap->ws);
372    struct pvr_srv_winsys_vma *srv_vma;
373    VkResult result;
374 
375    assert(util_is_power_of_two_nonzero(alignment));
376 
377    /* pvr_srv_winsys_buffer_create() page aligns the size. We must do the same
378     * here to ensure enough heap space is allocated to be able to map the
379     * buffer to the GPU.
380     */
381    alignment = MAX2(alignment, heap->ws->page_size);
382    size = ALIGN_POT(size, alignment);
383 
384    srv_vma = vk_alloc(srv_ws->alloc,
385                       sizeof(*srv_vma),
386                       8,
387                       VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
388    if (!srv_vma) {
389       vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
390       return NULL;
391    }
392 
393    /* Just check address is correct and aligned, locking is not required as
394     * user is responsible to provide a distinct address.
395     */
396    if (reserved_dev_addr.addr < heap->base_addr.addr ||
397        reserved_dev_addr.addr + size >
398           heap->base_addr.addr + heap->reserved_size ||
399        reserved_dev_addr.addr & ((srv_ws->base.page_size) - 1))
400       goto err_vk_free_srv_vma;
401 
402    /* Reserve the virtual range in the MMU and create a mapping structure */
403    result = pvr_srv_int_reserve_addr(srv_ws->render_fd,
404                                      srv_heap->server_heap,
405                                      reserved_dev_addr,
406                                      size,
407                                      &srv_vma->reservation);
408    if (result != VK_SUCCESS)
409       goto err_vk_free_srv_vma;
410 
411    srv_vma->base.dev_addr = reserved_dev_addr;
412    srv_vma->base.bo = NULL;
413    srv_vma->base.heap = heap;
414    srv_vma->base.size = size;
415 
416    p_atomic_inc(&srv_heap->base.ref_count);
417 
418    return &srv_vma->base;
419 
420 err_vk_free_srv_vma:
421    vk_free(srv_ws->alloc, srv_vma);
422 
423    return NULL;
424 }
425 
pvr_srv_winsys_heap_alloc(struct pvr_winsys_heap * heap,uint64_t size,uint64_t alignment)426 struct pvr_winsys_vma *pvr_srv_winsys_heap_alloc(struct pvr_winsys_heap *heap,
427                                                  uint64_t size,
428                                                  uint64_t alignment)
429 {
430    struct pvr_srv_winsys_heap *const srv_heap = to_pvr_srv_winsys_heap(heap);
431    struct pvr_srv_winsys *const srv_ws = to_pvr_srv_winsys(heap->ws);
432    struct pvr_srv_winsys_vma *srv_vma;
433    VkResult result;
434    bool ret;
435 
436    srv_vma = vk_alloc(srv_ws->alloc,
437                       sizeof(*srv_vma),
438                       8,
439                       VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
440    if (!srv_vma) {
441       vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
442       return NULL;
443    }
444 
445    ret = pvr_winsys_helper_heap_alloc(heap, size, alignment, &srv_vma->base);
446    if (!ret)
447       goto err_pvr_srv_free_vma;
448 
449    /* Reserve the virtual range in the MMU and create a mapping structure. */
450    result = pvr_srv_int_reserve_addr(srv_ws->render_fd,
451                                      srv_heap->server_heap,
452                                      srv_vma->base.dev_addr,
453                                      srv_vma->base.size,
454                                      &srv_vma->reservation);
455    if (result != VK_SUCCESS)
456       goto err_pvr_srv_free_allocation;
457 
458    return &srv_vma->base;
459 
460 err_pvr_srv_free_allocation:
461    pvr_winsys_helper_heap_free(&srv_vma->base);
462 
463 err_pvr_srv_free_vma:
464    vk_free(srv_ws->alloc, srv_vma);
465 
466    return NULL;
467 }
468 
pvr_srv_winsys_heap_free(struct pvr_winsys_vma * vma)469 void pvr_srv_winsys_heap_free(struct pvr_winsys_vma *vma)
470 {
471    struct pvr_srv_winsys *srv_ws = to_pvr_srv_winsys(vma->heap->ws);
472    struct pvr_srv_winsys_vma *srv_vma = to_pvr_srv_winsys_vma(vma);
473 
474    /* A vma with an existing device mapping should not be freed. */
475    assert(!srv_vma->base.bo);
476 
477    /* Remove mapping handle and underlying reservation. */
478    pvr_srv_int_unreserve_addr(srv_ws->render_fd, srv_vma->reservation);
479 
480    /* Check if we are dealing with reserved address range. */
481    if (vma->dev_addr.addr <
482        (vma->heap->base_addr.addr + vma->heap->reserved_size)) {
483       /* For the reserved addresses just decrement the reference count. */
484       p_atomic_dec(&vma->heap->ref_count);
485    } else {
486       /* Free allocated virtual space. */
487       pvr_winsys_helper_heap_free(vma);
488    }
489 
490    vk_free(srv_ws->alloc, srv_vma);
491 }
492 
493 /* * We assume the vma has been allocated with extra space to accommodate the
494  *   offset.
495  * * The offset passed in is unchanged and can be used to calculate the extra
496  *   size that needs to be mapped and final device virtual address.
497  */
pvr_srv_winsys_vma_map(struct pvr_winsys_vma * vma,struct pvr_winsys_bo * bo,uint64_t offset,uint64_t size)498 pvr_dev_addr_t pvr_srv_winsys_vma_map(struct pvr_winsys_vma *vma,
499                                       struct pvr_winsys_bo *bo,
500                                       uint64_t offset,
501                                       uint64_t size)
502 {
503    struct pvr_srv_winsys_vma *srv_vma = to_pvr_srv_winsys_vma(vma);
504    struct pvr_srv_winsys_bo *srv_bo = to_pvr_srv_winsys_bo(bo);
505    struct pvr_srv_winsys *srv_ws = to_pvr_srv_winsys(bo->ws);
506    const uint64_t srv_flags = srv_bo->flags &
507                               PVR_SRV_MEMALLOCFLAGS_VIRTUAL_MASK;
508    const uint32_t virt_offset = offset & (vma->heap->page_size - 1);
509    const uint64_t aligned_virt_size =
510       ALIGN_POT(virt_offset + size, vma->heap->page_size);
511    VkResult result;
512 
513    /* Address should not be mapped already */
514    assert(!srv_vma->base.bo);
515 
516    if (srv_bo->is_display_buffer) {
517       struct pvr_srv_winsys_heap *srv_heap = to_pvr_srv_winsys_heap(vma->heap);
518 
519       /* In case of display buffers, we only support to map whole PMR */
520       if (offset != 0 || bo->size != ALIGN_POT(size, srv_ws->base.page_size) ||
521           vma->size != bo->size) {
522          vk_error(NULL, VK_ERROR_MEMORY_MAP_FAILED);
523          return PVR_DEV_ADDR_INVALID;
524       }
525 
526       /* Map the requested pmr */
527       result = pvr_srv_int_map_pmr(srv_ws->render_fd,
528                                    srv_heap->server_heap,
529                                    srv_vma->reservation,
530                                    srv_bo->pmr,
531                                    srv_flags,
532                                    &srv_vma->mapping);
533 
534    } else {
535       const uint32_t phys_page_offset = (offset - virt_offset) >>
536                                         srv_ws->base.log2_page_size;
537       const uint32_t phys_page_count = aligned_virt_size >>
538                                        srv_ws->base.log2_page_size;
539 
540       /* Check if bo and vma can accommodate the given size and offset */
541       if (ALIGN_POT(offset + size, vma->heap->page_size) > bo->size ||
542           aligned_virt_size > vma->size) {
543          vk_error(NULL, VK_ERROR_MEMORY_MAP_FAILED);
544          return PVR_DEV_ADDR_INVALID;
545       }
546 
547       /* Map the requested pages */
548       result = pvr_srv_int_map_pages(srv_ws->render_fd,
549                                      srv_vma->reservation,
550                                      srv_bo->pmr,
551                                      phys_page_count,
552                                      phys_page_offset,
553                                      srv_flags,
554                                      vma->dev_addr);
555    }
556 
557    if (result != VK_SUCCESS)
558       return PVR_DEV_ADDR_INVALID;
559 
560    buffer_acquire(srv_bo);
561 
562    vma->bo = &srv_bo->base;
563    vma->bo_offset = offset;
564    vma->mapped_size = aligned_virt_size;
565 
566    return PVR_DEV_ADDR_OFFSET(vma->dev_addr, virt_offset);
567 }
568 
pvr_srv_winsys_vma_unmap(struct pvr_winsys_vma * vma)569 void pvr_srv_winsys_vma_unmap(struct pvr_winsys_vma *vma)
570 {
571    struct pvr_srv_winsys *srv_ws = to_pvr_srv_winsys(vma->heap->ws);
572    struct pvr_srv_winsys_vma *srv_vma = to_pvr_srv_winsys_vma(vma);
573    struct pvr_srv_winsys_bo *srv_bo;
574 
575    /* Address should be mapped */
576    assert(srv_vma->base.bo);
577 
578    srv_bo = to_pvr_srv_winsys_bo(srv_vma->base.bo);
579 
580    if (srv_bo->is_display_buffer) {
581       /* Unmap the requested pmr */
582       pvr_srv_int_unmap_pmr(srv_ws->render_fd, srv_vma->mapping);
583    } else {
584       /* Unmap requested pages */
585       pvr_srv_int_unmap_pages(srv_ws->render_fd,
586                               srv_vma->reservation,
587                               vma->dev_addr,
588                               vma->mapped_size >> srv_ws->base.log2_page_size);
589    }
590 
591    buffer_release(srv_bo);
592 
593    srv_vma->base.bo = NULL;
594 }
595