1 /*
2 * Copyright © 2024 Advanced Micro Devices, Inc.
3 *
4 * SPDX-License-Identifier: MIT
5 */
6
7 #include "amdgpu_bo.h"
8 #include "ac_linux_drm.h"
9
10 static bool
amdgpu_userq_ring_init(struct amdgpu_winsys * aws,struct amdgpu_userq * userq)11 amdgpu_userq_ring_init(struct amdgpu_winsys *aws, struct amdgpu_userq *userq)
12 {
13 /* Allocate ring and user fence in one buffer. */
14 uint32_t gtt_bo_size = AMDGPU_USERQ_RING_SIZE + aws->info.gart_page_size;
15 userq->gtt_bo = amdgpu_bo_create(aws, gtt_bo_size, 256, RADEON_DOMAIN_GTT,
16 RADEON_FLAG_GL2_BYPASS | RADEON_FLAG_NO_INTERPROCESS_SHARING);
17 if (!userq->gtt_bo)
18 return false;
19
20 userq->gtt_bo_map = amdgpu_bo_map(&aws->dummy_sws.base, userq->gtt_bo, NULL,
21 PIPE_MAP_READ | PIPE_MAP_WRITE | PIPE_MAP_UNSYNCHRONIZED);
22 if (!userq->gtt_bo_map)
23 return false;
24
25 userq->wptr_bo = amdgpu_bo_create(aws, aws->info.gart_page_size, 256, RADEON_DOMAIN_GTT,
26 RADEON_FLAG_GL2_BYPASS | RADEON_FLAG_NO_SUBALLOC |
27 RADEON_FLAG_NO_INTERPROCESS_SHARING);
28 if (!userq->wptr_bo)
29 return false;
30
31 userq->wptr_bo_map = amdgpu_bo_map(&aws->dummy_sws.base, userq->wptr_bo, NULL,
32 PIPE_MAP_READ | PIPE_MAP_WRITE | PIPE_MAP_UNSYNCHRONIZED);
33 if (!userq->wptr_bo_map)
34 return false;
35
36 userq->ring_ptr = (uint32_t*)userq->gtt_bo_map;
37 userq->user_fence_ptr = (uint64_t*)(userq->gtt_bo_map + AMDGPU_USERQ_RING_SIZE);
38 userq->user_fence_va = amdgpu_bo_get_va(userq->gtt_bo) + AMDGPU_USERQ_RING_SIZE;
39 *userq->user_fence_ptr = 0;
40 *userq->wptr_bo_map = 0;
41
42 userq->rptr_bo = amdgpu_bo_create(aws, aws->info.gart_page_size, 256, RADEON_DOMAIN_VRAM,
43 RADEON_FLAG_CLEAR_VRAM | RADEON_FLAG_GL2_BYPASS |
44 RADEON_FLAG_NO_SUBALLOC |
45 RADEON_FLAG_NO_INTERPROCESS_SHARING);
46 if (!userq->rptr_bo)
47 return false;
48
49 return true;
50 }
51
52 void
amdgpu_userq_deinit(struct amdgpu_winsys * aws,struct amdgpu_userq * userq)53 amdgpu_userq_deinit(struct amdgpu_winsys *aws, struct amdgpu_userq *userq)
54 {
55 if (userq->userq_handle)
56 ac_drm_free_userqueue(aws->dev, userq->userq_handle);
57
58 radeon_bo_reference(&aws->dummy_sws.base, &userq->gtt_bo, NULL);
59 radeon_bo_reference(&aws->dummy_sws.base, &userq->wptr_bo, NULL);
60 radeon_bo_reference(&aws->dummy_sws.base, &userq->rptr_bo, NULL);
61 radeon_bo_reference(&aws->dummy_sws.base, &userq->doorbell_bo, NULL);
62
63 switch (userq->ip_type) {
64 case AMD_IP_GFX:
65 radeon_bo_reference(&aws->dummy_sws.base, &userq->gfx_data.csa_bo, NULL);
66 radeon_bo_reference(&aws->dummy_sws.base, &userq->gfx_data.shadow_bo, NULL);
67 break;
68 case AMD_IP_COMPUTE:
69 radeon_bo_reference(&aws->dummy_sws.base, &userq->compute_data.eop_bo, NULL);
70 break;
71 case AMD_IP_SDMA:
72 radeon_bo_reference(&aws->dummy_sws.base, &userq->sdma_data.csa_bo, NULL);
73 break;
74 default:
75 fprintf(stderr, "amdgpu: userq unsupported for ip = %d\n", userq->ip_type);
76 }
77 }
78
79 bool
amdgpu_userq_init(struct amdgpu_winsys * aws,struct amdgpu_userq * userq,enum amd_ip_type ip_type)80 amdgpu_userq_init(struct amdgpu_winsys *aws, struct amdgpu_userq *userq, enum amd_ip_type ip_type)
81 {
82 int r = -1;
83 uint32_t hw_ip_type;
84 struct drm_amdgpu_userq_mqd_gfx11 gfx_mqd;
85 struct drm_amdgpu_userq_mqd_compute_gfx11 compute_mqd;
86 struct drm_amdgpu_userq_mqd_sdma_gfx11 sdma_mqd;
87 void *mqd;
88
89 simple_mtx_lock(&userq->lock);
90
91 if (userq->gtt_bo) {
92 simple_mtx_unlock(&userq->lock);
93 return true;
94 }
95
96 userq->ip_type = ip_type;
97 if (!amdgpu_userq_ring_init(aws, userq))
98 goto fail;
99
100 switch (userq->ip_type) {
101 case AMD_IP_GFX:
102 hw_ip_type = AMDGPU_HW_IP_GFX;
103 userq->gfx_data.csa_bo = amdgpu_bo_create(aws, aws->info.fw_based_mcbp.csa_size,
104 aws->info.fw_based_mcbp.csa_alignment,
105 RADEON_DOMAIN_VRAM,
106 RADEON_FLAG_NO_INTERPROCESS_SHARING);
107 if (!userq->gfx_data.csa_bo)
108 goto fail;
109
110 userq->gfx_data.shadow_bo = amdgpu_bo_create(aws, aws->info.fw_based_mcbp.shadow_size,
111 aws->info.fw_based_mcbp.shadow_alignment,
112 RADEON_DOMAIN_VRAM,
113 RADEON_FLAG_NO_INTERPROCESS_SHARING);
114 if (!userq->gfx_data.shadow_bo)
115 goto fail;
116
117 gfx_mqd.shadow_va = amdgpu_bo_get_va(userq->gfx_data.shadow_bo);
118 gfx_mqd.csa_va = amdgpu_bo_get_va(userq->gfx_data.csa_bo);
119 mqd = &gfx_mqd;
120 break;
121 case AMD_IP_COMPUTE:
122 hw_ip_type = AMDGPU_HW_IP_COMPUTE;
123 userq->compute_data.eop_bo = amdgpu_bo_create(aws, aws->info.gart_page_size, 256,
124 RADEON_DOMAIN_VRAM,
125 RADEON_FLAG_NO_INTERPROCESS_SHARING);
126 if (!userq->compute_data.eop_bo)
127 goto fail;
128
129 compute_mqd.eop_va = amdgpu_bo_get_va(userq->compute_data.eop_bo);
130 mqd = &compute_mqd;
131 break;
132 case AMD_IP_SDMA:
133 hw_ip_type = AMDGPU_HW_IP_DMA;
134 userq->sdma_data.csa_bo = amdgpu_bo_create(aws, aws->info.fw_based_mcbp.csa_size,
135 aws->info.fw_based_mcbp.csa_alignment,
136 RADEON_DOMAIN_VRAM,
137 RADEON_FLAG_NO_INTERPROCESS_SHARING);
138 if (!userq->sdma_data.csa_bo)
139 goto fail;
140
141 sdma_mqd.csa_va = amdgpu_bo_get_va(userq->sdma_data.csa_bo);
142 mqd = &sdma_mqd;
143 break;
144 default:
145 fprintf(stderr, "amdgpu: userq unsupported for ip = %d\n", userq->ip_type);
146 goto fail;
147 }
148
149 userq->doorbell_bo = amdgpu_bo_create(aws, aws->info.gart_page_size, 256,
150 RADEON_DOMAIN_DOORBELL,
151 RADEON_FLAG_NO_INTERPROCESS_SHARING);
152 if (!userq->doorbell_bo)
153 goto fail;
154
155 /* doorbell map should be the last map call, it is used to wait for all mappings before
156 * calling amdgpu_create_userqueue().
157 */
158 userq->doorbell_bo_map = amdgpu_bo_map(&aws->dummy_sws.base, userq->doorbell_bo, NULL,
159 PIPE_MAP_WRITE | PIPE_MAP_UNSYNCHRONIZED);
160 if (!userq->doorbell_bo_map)
161 goto fail;
162
163 /* The VA page table for ring buffer should be ready before job submission so that the packets
164 * submitted can be read by gpu. The same applies to rptr, wptr buffers also.
165 */
166 r = ac_drm_cs_syncobj_timeline_wait(aws->fd, &aws->vm_timeline_syncobj,
167 &get_real_bo(amdgpu_winsys_bo(userq->doorbell_bo))
168 ->vm_timeline_point,
169 1, INT64_MAX, DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL |
170 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT, NULL);
171 if (r) {
172 fprintf(stderr, "amdgpu: waiting for vm fences failed\n");
173 goto fail;
174 }
175
176 uint64_t ring_va = amdgpu_bo_get_va(userq->gtt_bo);
177 r = ac_drm_create_userqueue(aws->dev, hw_ip_type,
178 get_real_bo(amdgpu_winsys_bo(userq->doorbell_bo))->kms_handle,
179 AMDGPU_USERQ_DOORBELL_INDEX, ring_va, AMDGPU_USERQ_RING_SIZE,
180 amdgpu_bo_get_va(userq->wptr_bo), amdgpu_bo_get_va(userq->rptr_bo),
181 mqd, &userq->userq_handle);
182 if (r) {
183 fprintf(stderr, "amdgpu: failed to create userq\n");
184 goto fail;
185 }
186
187 simple_mtx_unlock(&userq->lock);
188 return true;
189 fail:
190 amdgpu_userq_deinit(aws, userq);
191 simple_mtx_unlock(&userq->lock);
192 return false;
193 }
194