1 /**********************************************************
2 * Copyright 2009-2023 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26
27 #include "vmw_screen.h"
28
29 #include "vmw_buffer.h"
30 #include "vmw_fence.h"
31
32 #include "pipebuffer/pb_buffer.h"
33 #include "pipebuffer/pb_bufmgr.h"
34
35 /**
36 * vmw_pools_cleanup - Destroy the buffer pools.
37 *
38 * @vws: pointer to a struct vmw_winsys_screen.
39 */
40 void
vmw_pools_cleanup(struct vmw_winsys_screen * vws)41 vmw_pools_cleanup(struct vmw_winsys_screen *vws)
42 {
43 if (vws->pools.dma_slab_fenced)
44 vws->pools.dma_slab_fenced->destroy
45 (vws->pools.dma_slab_fenced);
46 if (vws->pools.dma_slab)
47 vws->pools.dma_slab->destroy(vws->pools.dma_slab);
48 if (vws->pools.dma_fenced)
49 vws->pools.dma_fenced->destroy(vws->pools.dma_fenced);
50 if (vws->pools.dma_cache)
51 vws->pools.dma_cache->destroy(vws->pools.dma_cache);
52
53 if (vws->pools.query_fenced)
54 vws->pools.query_fenced->destroy(vws->pools.query_fenced);
55 if (vws->pools.query_mm)
56 vws->pools.query_mm->destroy(vws->pools.query_mm);
57
58 if (vws->pools.dma_mm)
59 vws->pools.dma_mm->destroy(vws->pools.dma_mm);
60 if (vws->pools.dma_base)
61 vws->pools.dma_base->destroy(vws->pools.dma_base);
62 }
63
64
65 /**
66 * vmw_query_pools_init - Create a pool of query buffers.
67 *
68 * @vws: Pointer to a struct vmw_winsys_screen.
69 *
70 * Typically this pool should be created on demand when we
71 * detect that the app will be using queries. There's nothing
72 * special with this pool other than the backing kernel buffer sizes,
73 * which are limited to 8192.
74 * If there is a performance issue with allocation and freeing of the
75 * query slabs, it should be easily fixable by allocating them out
76 * of a buffer cache.
77 */
78 bool
vmw_query_pools_init(struct vmw_winsys_screen * vws)79 vmw_query_pools_init(struct vmw_winsys_screen *vws)
80 {
81 struct pb_desc desc;
82
83 desc.alignment = 16;
84 desc.usage = ~(VMW_BUFFER_USAGE_SHARED | VMW_BUFFER_USAGE_SYNC);
85
86 vws->pools.query_mm = pb_slab_range_manager_create(vws->pools.dma_base, 16, 128,
87 VMW_QUERY_POOL_SIZE,
88 &desc);
89 if (!vws->pools.query_mm)
90 return false;
91
92 vws->pools.query_fenced = simple_fenced_bufmgr_create(
93 vws->pools.query_mm, vws->fence_ops);
94
95 if(!vws->pools.query_fenced)
96 goto out_no_query_fenced;
97
98 return true;
99
100 out_no_query_fenced:
101 vws->pools.query_mm->destroy(vws->pools.query_mm);
102 return false;
103 }
104
105 /**
106 * vmw_pool_init - Create a pool of buffers.
107 *
108 * @vws: Pointer to a struct vmw_winsys_screen.
109 */
110 bool
vmw_pools_init(struct vmw_winsys_screen * vws)111 vmw_pools_init(struct vmw_winsys_screen *vws)
112 {
113 struct pb_desc desc;
114
115 vws->pools.dma_base = vmw_dma_bufmgr_create(vws);
116 if (!vws->pools.dma_base)
117 goto error;
118
119 /*
120 * A managed pool for DMA buffers.
121 */
122 vws->pools.dma_mm = mm_bufmgr_create(vws->pools.dma_base,
123 VMW_GMR_POOL_SIZE,
124 12 /* 4096 alignment */);
125 if(!vws->pools.dma_mm)
126 goto error;
127
128 vws->pools.dma_cache =
129 pb_cache_manager_create(vws->pools.dma_base, 100000, 2.0f,
130 VMW_BUFFER_USAGE_SHARED,
131 64 * 1024 * 1024);
132
133 if (!vws->pools.dma_cache)
134 goto error;
135
136 vws->pools.dma_fenced =
137 simple_fenced_bufmgr_create(vws->pools.dma_cache,
138 vws->fence_ops);
139
140 if(!vws->pools.dma_fenced)
141 goto error;
142
143 /*
144 * The slab pool allocates buffers directly from the kernel except
145 * for very small buffers which are allocated from a slab in order
146 * not to waste memory, since a kernel buffer is a minimum 4096 bytes.
147 *
148 * Here we use it only for emergency in the case our pre-allocated
149 * managed buffer pool runs out of memory.
150 */
151 desc.alignment = 64;
152 desc.usage = ~(SVGA_BUFFER_USAGE_PINNED | VMW_BUFFER_USAGE_SHARED |
153 VMW_BUFFER_USAGE_SYNC);
154 vws->pools.dma_slab =
155 pb_slab_range_manager_create(vws->pools.dma_cache,
156 64,
157 8192,
158 16384,
159 &desc);
160 if(!vws->pools.dma_slab)
161 goto error;
162
163 vws->pools.dma_slab_fenced =
164 simple_fenced_bufmgr_create(vws->pools.dma_slab,
165 vws->fence_ops);
166 if (!vws->pools.dma_slab_fenced)
167 goto error;
168
169 vws->pools.query_fenced = NULL;
170 vws->pools.query_mm = NULL;
171
172 return true;
173
174 error:
175 vmw_pools_cleanup(vws);
176 return false;
177 }
178