1 /*
2 * Copyright © 2008 Jérôme Glisse
3 * Copyright © 2011 Marek Olšák <maraeo@gmail.com>
4 * Copyright © 2015 Advanced Micro Devices, Inc.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining
8 * a copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
17 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
19 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
26 * of the Software.
27 */
28
29 #ifndef AMDGPU_BO_H
30 #define AMDGPU_BO_H
31
32 #include "amdgpu_winsys.h"
33
34 #include "pipebuffer/pb_slab.h"
35
36 struct amdgpu_sparse_backing_chunk;
37
38 /*
39 * Sub-allocation information for a real buffer used as backing memory of a
40 * sparse buffer.
41 */
42 struct amdgpu_sparse_backing {
43 struct list_head list;
44
45 struct amdgpu_winsys_bo *bo;
46
47 /* Sorted list of free chunks. */
48 struct amdgpu_sparse_backing_chunk *chunks;
49 uint32_t max_chunks;
50 uint32_t num_chunks;
51 };
52
53 struct amdgpu_sparse_commitment {
54 struct amdgpu_sparse_backing *backing;
55 uint32_t page;
56 };
57
58 struct amdgpu_winsys_bo {
59 struct pb_buffer base;
60 union {
61 struct {
62 struct pb_cache_entry cache_entry;
63
64 amdgpu_va_handle va_handle;
65 int map_count;
66 bool use_reusable_pool;
67
68 struct list_head global_list_item;
69
70 uint32_t kms_handle;
71 } real;
72 struct {
73 struct pb_slab_entry entry;
74 struct amdgpu_winsys_bo *real;
75 } slab;
76 struct {
77 amdgpu_va_handle va_handle;
78 enum radeon_bo_flag flags;
79
80 uint32_t num_va_pages;
81 uint32_t num_backing_pages;
82
83 struct list_head backing;
84
85 /* Commitment information for each page of the virtual memory area. */
86 struct amdgpu_sparse_commitment *commitments;
87 } sparse;
88 } u;
89
90 struct amdgpu_winsys *ws;
91 void *cpu_ptr; /* for user_ptr and permanent maps */
92
93 amdgpu_bo_handle bo; /* NULL for slab entries and sparse buffers */
94 bool sparse;
95 bool is_user_ptr;
96 uint32_t unique_id;
97 uint64_t va;
98 enum radeon_bo_domain initial_domain;
99 enum radeon_bo_flag flags;
100
101 /* how many command streams is this bo referenced in? */
102 int num_cs_references;
103
104 /* how many command streams, which are being emitted in a separate
105 * thread, is this bo referenced in? */
106 volatile int num_active_ioctls;
107
108 /* whether buffer_get_handle or buffer_from_handle was called,
109 * it can only transition from false to true
110 */
111 volatile int is_shared; /* bool (int for atomicity) */
112
113 /* Fences for buffer synchronization. */
114 unsigned num_fences;
115 unsigned max_fences;
116 struct pipe_fence_handle **fences;
117
118 simple_mtx_t lock;
119 };
120
121 struct amdgpu_slab {
122 struct pb_slab base;
123 struct amdgpu_winsys_bo *buffer;
124 struct amdgpu_winsys_bo *entries;
125 };
126
127 bool amdgpu_bo_can_reclaim(struct pb_buffer *_buf);
128 struct pb_buffer *amdgpu_bo_create(struct amdgpu_winsys *ws,
129 uint64_t size,
130 unsigned alignment,
131 enum radeon_bo_domain domain,
132 enum radeon_bo_flag flags);
133 void amdgpu_bo_destroy(struct pb_buffer *_buf);
134 void *amdgpu_bo_map(struct pb_buffer *buf,
135 struct radeon_cmdbuf *rcs,
136 enum pipe_map_flags usage);
137 void amdgpu_bo_unmap(struct pb_buffer *buf);
138 void amdgpu_bo_init_functions(struct amdgpu_screen_winsys *ws);
139
140 bool amdgpu_bo_can_reclaim_slab(void *priv, struct pb_slab_entry *entry);
141 struct pb_slab *amdgpu_bo_slab_alloc_encrypted(void *priv, unsigned heap,
142 unsigned entry_size,
143 unsigned group_index);
144 struct pb_slab *amdgpu_bo_slab_alloc_normal(void *priv, unsigned heap,
145 unsigned entry_size,
146 unsigned group_index);
147 void amdgpu_bo_slab_free(void *priv, struct pb_slab *slab);
148
149 static inline
amdgpu_winsys_bo(struct pb_buffer * bo)150 struct amdgpu_winsys_bo *amdgpu_winsys_bo(struct pb_buffer *bo)
151 {
152 return (struct amdgpu_winsys_bo *)bo;
153 }
154
155 static inline
amdgpu_slab(struct pb_slab * slab)156 struct amdgpu_slab *amdgpu_slab(struct pb_slab *slab)
157 {
158 return (struct amdgpu_slab *)slab;
159 }
160
161 static inline
amdgpu_winsys_bo_reference(struct amdgpu_winsys_bo ** dst,struct amdgpu_winsys_bo * src)162 void amdgpu_winsys_bo_reference(struct amdgpu_winsys_bo **dst,
163 struct amdgpu_winsys_bo *src)
164 {
165 pb_reference((struct pb_buffer**)dst, (struct pb_buffer*)src);
166 }
167
168 #endif
169