1 /*
2 * Copyright © 2008 Jérôme Glisse
3 * Copyright © 2011 Marek Olšák <maraeo@gmail.com>
4 * Copyright © 2015 Advanced Micro Devices, Inc.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining
8 * a copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
17 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
19 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
26 * of the Software.
27 */
28
29 #ifndef AMDGPU_BO_H
30 #define AMDGPU_BO_H
31
32 #include "amdgpu_winsys.h"
33
34 #include "pipebuffer/pb_slab.h"
35
36 struct amdgpu_sparse_backing_chunk;
37
38 /*
39 * Sub-allocation information for a real buffer used as backing memory of a
40 * sparse buffer.
41 */
42 struct amdgpu_sparse_backing {
43 struct list_head list;
44
45 struct amdgpu_winsys_bo *bo;
46
47 /* Sorted list of free chunks. */
48 struct amdgpu_sparse_backing_chunk *chunks;
49 uint32_t max_chunks;
50 uint32_t num_chunks;
51 };
52
53 struct amdgpu_sparse_commitment {
54 struct amdgpu_sparse_backing *backing;
55 uint32_t page;
56 };
57
58 struct amdgpu_winsys_bo {
59 struct pb_buffer base;
60 union {
61 struct {
62 amdgpu_va_handle va_handle;
63 #if DEBUG
64 struct list_head global_list_item;
65 #endif
66 void *cpu_ptr; /* for user_ptr and permanent maps */
67 uint32_t kms_handle;
68 int map_count;
69
70 bool is_user_ptr;
71 bool use_reusable_pool;
72
73 /* Whether buffer_get_handle or buffer_from_handle has been called,
74 * it can only transition from false to true. Protected by lock.
75 */
76 bool is_shared;
77 } real;
78 struct {
79 struct pb_slab_entry entry;
80 struct amdgpu_winsys_bo *real;
81 } slab;
82 struct {
83 amdgpu_va_handle va_handle;
84
85 uint32_t num_va_pages;
86 uint32_t num_backing_pages;
87
88 struct list_head backing;
89
90 /* Commitment information for each page of the virtual memory area. */
91 struct amdgpu_sparse_commitment *commitments;
92 } sparse;
93 } u;
94
95 amdgpu_bo_handle bo; /* NULL for slab entries and sparse buffers */
96 uint64_t va;
97
98 uint32_t unique_id;
99 simple_mtx_t lock;
100
101 /* how many command streams, which are being emitted in a separate
102 * thread, is this bo referenced in? */
103 volatile int num_active_ioctls;
104
105 /* Fences for buffer synchronization. */
106 uint16_t num_fences;
107 uint16_t max_fences;
108 struct pipe_fence_handle **fences;
109
110 struct pb_cache_entry cache_entry[];
111 };
112
113 struct amdgpu_slab {
114 struct pb_slab base;
115 unsigned entry_size;
116 struct amdgpu_winsys_bo *buffer;
117 struct amdgpu_winsys_bo *entries;
118 };
119
120 bool amdgpu_bo_can_reclaim(struct amdgpu_winsys *ws, struct pb_buffer *_buf);
121 struct pb_buffer *amdgpu_bo_create(struct amdgpu_winsys *ws,
122 uint64_t size,
123 unsigned alignment,
124 enum radeon_bo_domain domain,
125 enum radeon_bo_flag flags);
126 void amdgpu_bo_destroy(struct amdgpu_winsys *ws, struct pb_buffer *_buf);
127 void *amdgpu_bo_map(struct radeon_winsys *rws,
128 struct pb_buffer *buf,
129 struct radeon_cmdbuf *rcs,
130 enum pipe_map_flags usage);
131 void amdgpu_bo_unmap(struct radeon_winsys *rws, struct pb_buffer *buf);
132 void amdgpu_bo_init_functions(struct amdgpu_screen_winsys *ws);
133
134 bool amdgpu_bo_can_reclaim_slab(void *priv, struct pb_slab_entry *entry);
135 struct pb_slab *amdgpu_bo_slab_alloc(void *priv, unsigned heap, unsigned entry_size,
136 unsigned group_index);
137 void amdgpu_bo_slab_free(struct amdgpu_winsys *ws, struct pb_slab *slab);
138
139 static inline
amdgpu_winsys_bo(struct pb_buffer * bo)140 struct amdgpu_winsys_bo *amdgpu_winsys_bo(struct pb_buffer *bo)
141 {
142 return (struct amdgpu_winsys_bo *)bo;
143 }
144
145 static inline
amdgpu_slab(struct pb_slab * slab)146 struct amdgpu_slab *amdgpu_slab(struct pb_slab *slab)
147 {
148 return (struct amdgpu_slab *)slab;
149 }
150
151 static inline
amdgpu_winsys_bo_reference(struct amdgpu_winsys * ws,struct amdgpu_winsys_bo ** dst,struct amdgpu_winsys_bo * src)152 void amdgpu_winsys_bo_reference(struct amdgpu_winsys *ws,
153 struct amdgpu_winsys_bo **dst,
154 struct amdgpu_winsys_bo *src)
155 {
156 radeon_bo_reference(&ws->dummy_ws.base,
157 (struct pb_buffer**)dst, (struct pb_buffer*)src);
158 }
159
160 #endif
161