• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2021 Collabora Ltd.
3  * SPDX-License-Identifier: MIT
4  */
5 
6 #include <assert.h>
7 
8 #include "vk_alloc.h"
9 #include "vk_log.h"
10 
11 #include "panvk_device.h"
12 #include "panvk_priv_bo.h"
13 
14 #include "kmod/pan_kmod.h"
15 
16 #include "genxml/decode.h"
17 
18 VkResult
panvk_priv_bo_create(struct panvk_device * dev,size_t size,uint32_t flags,VkSystemAllocationScope scope,struct panvk_priv_bo ** out)19 panvk_priv_bo_create(struct panvk_device *dev, size_t size, uint32_t flags,
20                      VkSystemAllocationScope scope, struct panvk_priv_bo **out)
21 {
22    VkResult result;
23    int ret;
24    struct panvk_priv_bo *priv_bo =
25       vk_zalloc(&dev->vk.alloc, sizeof(*priv_bo), 8, scope);
26 
27    if (!priv_bo)
28       return panvk_error(dev, VK_ERROR_OUT_OF_HOST_MEMORY);
29 
30    struct pan_kmod_bo *bo =
31       pan_kmod_bo_alloc(dev->kmod.dev, dev->kmod.vm, size, flags);
32    if (!bo) {
33       result = panvk_error(dev, VK_ERROR_OUT_OF_DEVICE_MEMORY);
34       goto err_free_priv_bo;
35    }
36 
37    priv_bo->bo = bo;
38    priv_bo->dev = dev;
39 
40    if (!(flags & PAN_KMOD_BO_FLAG_NO_MMAP)) {
41       priv_bo->addr.host = pan_kmod_bo_mmap(
42          bo, 0, pan_kmod_bo_size(bo), PROT_READ | PROT_WRITE, MAP_SHARED, NULL);
43       if (priv_bo->addr.host == MAP_FAILED) {
44          result = panvk_error(dev, VK_ERROR_OUT_OF_HOST_MEMORY);
45          goto err_put_bo;
46       }
47    }
48 
49    struct pan_kmod_vm_op op = {
50       .type = PAN_KMOD_VM_OP_TYPE_MAP,
51       .va = {
52          .start = PAN_KMOD_VM_MAP_AUTO_VA,
53          .size = pan_kmod_bo_size(bo),
54       },
55       .map = {
56          .bo = priv_bo->bo,
57          .bo_offset = 0,
58       },
59    };
60 
61    if (!(dev->kmod.vm->flags & PAN_KMOD_VM_FLAG_AUTO_VA)) {
62       simple_mtx_lock(&dev->as.lock);
63       op.va.start = util_vma_heap_alloc(
64          &dev->as.heap, op.va.size, op.va.size > 0x200000 ? 0x200000 : 0x1000);
65       simple_mtx_unlock(&dev->as.lock);
66       if (!op.va.start) {
67          result = panvk_error(dev, VK_ERROR_OUT_OF_DEVICE_MEMORY);
68          goto err_munmap_bo;
69       }
70    }
71 
72    ret = pan_kmod_vm_bind(dev->kmod.vm, PAN_KMOD_VM_OP_MODE_IMMEDIATE, &op, 1);
73    if (ret) {
74       result = panvk_error(dev, VK_ERROR_OUT_OF_DEVICE_MEMORY);
75       goto err_return_va;
76    }
77 
78    priv_bo->addr.dev = op.va.start;
79 
80    if (dev->debug.decode_ctx) {
81       pandecode_inject_mmap(dev->debug.decode_ctx, priv_bo->addr.dev,
82                             priv_bo->addr.host, pan_kmod_bo_size(priv_bo->bo),
83                             NULL);
84    }
85 
86    p_atomic_set(&priv_bo->refcnt, 1);
87 
88    *out = priv_bo;
89    return VK_SUCCESS;
90 
91 err_return_va:
92    if (!(dev->kmod.vm->flags & PAN_KMOD_VM_FLAG_AUTO_VA)) {
93       simple_mtx_lock(&dev->as.lock);
94       util_vma_heap_free(&dev->as.heap, op.va.start, op.va.size);
95       simple_mtx_unlock(&dev->as.lock);
96    }
97 
98 err_munmap_bo:
99    if (priv_bo->addr.host) {
100       ret = os_munmap(priv_bo->addr.host, pan_kmod_bo_size(bo));
101       assert(!ret);
102    }
103 
104 err_put_bo:
105    pan_kmod_bo_put(bo);
106 
107 err_free_priv_bo:
108    vk_free(&dev->vk.alloc, priv_bo);
109    return result;
110 }
111 
112 static void
panvk_priv_bo_destroy(struct panvk_priv_bo * priv_bo)113 panvk_priv_bo_destroy(struct panvk_priv_bo *priv_bo)
114 {
115    struct panvk_device *dev = priv_bo->dev;
116 
117    if (dev->debug.decode_ctx) {
118       pandecode_inject_free(dev->debug.decode_ctx, priv_bo->addr.dev,
119                             pan_kmod_bo_size(priv_bo->bo));
120    }
121 
122    struct pan_kmod_vm_op op = {
123       .type = PAN_KMOD_VM_OP_TYPE_UNMAP,
124       .va = {
125          .start = priv_bo->addr.dev,
126          .size = pan_kmod_bo_size(priv_bo->bo),
127       },
128    };
129    ASSERTED int ret =
130       pan_kmod_vm_bind(dev->kmod.vm, PAN_KMOD_VM_OP_MODE_IMMEDIATE, &op, 1);
131    assert(!ret);
132 
133    if (!(dev->kmod.vm->flags & PAN_KMOD_VM_FLAG_AUTO_VA)) {
134       simple_mtx_lock(&dev->as.lock);
135       util_vma_heap_free(&dev->as.heap, op.va.start, op.va.size);
136       simple_mtx_unlock(&dev->as.lock);
137    }
138 
139    if (priv_bo->addr.host) {
140       ret = os_munmap(priv_bo->addr.host, pan_kmod_bo_size(priv_bo->bo));
141       assert(!ret);
142    }
143 
144    pan_kmod_bo_put(priv_bo->bo);
145    vk_free(&dev->vk.alloc, priv_bo);
146 }
147 
148 void
panvk_priv_bo_unref(struct panvk_priv_bo * priv_bo)149 panvk_priv_bo_unref(struct panvk_priv_bo *priv_bo)
150 {
151    if (!priv_bo || p_atomic_dec_return(&priv_bo->refcnt))
152       return;
153 
154    panvk_priv_bo_destroy(priv_bo);
155 }
156