1 /*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include <linux/slab.h>
25 #include <linux/mutex.h>
26 #include "kfd_device_queue_manager.h"
27 #include "kfd_kernel_queue.h"
28 #include "kfd_priv.h"
29
inc_wptr(unsigned int * wptr,unsigned int increment_bytes,unsigned int buffer_size_bytes)30 static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes,
31 unsigned int buffer_size_bytes)
32 {
33 unsigned int temp = *wptr + increment_bytes / sizeof(uint32_t);
34
35 WARN((temp * sizeof(uint32_t)) > buffer_size_bytes,
36 "Runlist IB overflow");
37 *wptr = temp;
38 }
39
pm_calc_rlib_size(struct packet_manager * pm,unsigned int * rlib_size,bool * over_subscription)40 static void pm_calc_rlib_size(struct packet_manager *pm,
41 unsigned int *rlib_size,
42 bool *over_subscription)
43 {
44 unsigned int process_count, queue_count, compute_queue_count, gws_queue_count;
45 unsigned int map_queue_size;
46 unsigned int max_proc_per_quantum = 1;
47 struct kfd_dev *dev = pm->dqm->dev;
48
49 process_count = pm->dqm->processes_count;
50 queue_count = pm->dqm->active_queue_count;
51 compute_queue_count = pm->dqm->active_cp_queue_count;
52 gws_queue_count = pm->dqm->gws_queue_count;
53
54 /* check if there is over subscription
55 * Note: the arbitration between the number of VMIDs and
56 * hws_max_conc_proc has been done in
57 * kgd2kfd_device_init().
58 */
59 *over_subscription = false;
60
61 if (dev->max_proc_per_quantum > 1)
62 max_proc_per_quantum = dev->max_proc_per_quantum;
63
64 if ((process_count > max_proc_per_quantum) ||
65 compute_queue_count > get_cp_queues_num(pm->dqm) ||
66 gws_queue_count > 1) {
67 *over_subscription = true;
68 pr_debug("Over subscribed runlist\n");
69 }
70
71 map_queue_size = pm->pmf->map_queues_size;
72 /* calculate run list ib allocation size */
73 *rlib_size = process_count * pm->pmf->map_process_size +
74 queue_count * map_queue_size;
75
76 /*
77 * Increase the allocation size in case we need a chained run list
78 * when over subscription
79 */
80 if (*over_subscription)
81 *rlib_size += pm->pmf->runlist_size;
82
83 pr_debug("runlist ib size %d\n", *rlib_size);
84 }
85
pm_allocate_runlist_ib(struct packet_manager * pm,unsigned int ** rl_buffer,uint64_t * rl_gpu_buffer,unsigned int * rl_buffer_size,bool * is_over_subscription)86 static int pm_allocate_runlist_ib(struct packet_manager *pm,
87 unsigned int **rl_buffer,
88 uint64_t *rl_gpu_buffer,
89 unsigned int *rl_buffer_size,
90 bool *is_over_subscription)
91 {
92 int retval;
93
94 if (WARN_ON(pm->allocated))
95 return -EINVAL;
96
97 pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription);
98
99 mutex_lock(&pm->lock);
100
101 retval = kfd_gtt_sa_allocate(pm->dqm->dev, *rl_buffer_size,
102 &pm->ib_buffer_obj);
103
104 if (retval) {
105 pr_err("Failed to allocate runlist IB\n");
106 goto out;
107 }
108
109 *(void **)rl_buffer = pm->ib_buffer_obj->cpu_ptr;
110 *rl_gpu_buffer = pm->ib_buffer_obj->gpu_addr;
111
112 memset(*rl_buffer, 0, *rl_buffer_size);
113 pm->allocated = true;
114
115 out:
116 mutex_unlock(&pm->lock);
117 return retval;
118 }
119
pm_create_runlist_ib(struct packet_manager * pm,struct list_head * queues,uint64_t * rl_gpu_addr,size_t * rl_size_bytes)120 static int pm_create_runlist_ib(struct packet_manager *pm,
121 struct list_head *queues,
122 uint64_t *rl_gpu_addr,
123 size_t *rl_size_bytes)
124 {
125 unsigned int alloc_size_bytes;
126 unsigned int *rl_buffer, rl_wptr, i;
127 int retval, processes_mapped;
128 struct device_process_node *cur;
129 struct qcm_process_device *qpd;
130 struct queue *q;
131 struct kernel_queue *kq;
132 bool is_over_subscription;
133
134 rl_wptr = retval = processes_mapped = 0;
135
136 retval = pm_allocate_runlist_ib(pm, &rl_buffer, rl_gpu_addr,
137 &alloc_size_bytes, &is_over_subscription);
138 if (retval)
139 return retval;
140
141 *rl_size_bytes = alloc_size_bytes;
142 pm->ib_size_bytes = alloc_size_bytes;
143
144 pr_debug("Building runlist ib process count: %d queues count %d\n",
145 pm->dqm->processes_count, pm->dqm->active_queue_count);
146
147 /* build the run list ib packet */
148 list_for_each_entry(cur, queues, list) {
149 qpd = cur->qpd;
150 /* build map process packet */
151 if (processes_mapped >= pm->dqm->processes_count) {
152 pr_debug("Not enough space left in runlist IB\n");
153 pm_release_ib(pm);
154 return -ENOMEM;
155 }
156
157 retval = pm->pmf->map_process(pm, &rl_buffer[rl_wptr], qpd);
158 if (retval)
159 return retval;
160
161 processes_mapped++;
162 inc_wptr(&rl_wptr, pm->pmf->map_process_size,
163 alloc_size_bytes);
164
165 list_for_each_entry(kq, &qpd->priv_queue_list, list) {
166 if (!kq->queue->properties.is_active)
167 continue;
168
169 pr_debug("static_queue, mapping kernel q %d, is debug status %d\n",
170 kq->queue->queue, qpd->is_debug);
171
172 retval = pm->pmf->map_queues(pm,
173 &rl_buffer[rl_wptr],
174 kq->queue,
175 qpd->is_debug);
176 if (retval)
177 return retval;
178
179 inc_wptr(&rl_wptr,
180 pm->pmf->map_queues_size,
181 alloc_size_bytes);
182 }
183
184 list_for_each_entry(q, &qpd->queues_list, list) {
185 if (!q->properties.is_active)
186 continue;
187
188 pr_debug("static_queue, mapping user queue %d, is debug status %d\n",
189 q->queue, qpd->is_debug);
190
191 retval = pm->pmf->map_queues(pm,
192 &rl_buffer[rl_wptr],
193 q,
194 qpd->is_debug);
195
196 if (retval)
197 return retval;
198
199 inc_wptr(&rl_wptr,
200 pm->pmf->map_queues_size,
201 alloc_size_bytes);
202 }
203 }
204
205 pr_debug("Finished map process and queues to runlist\n");
206
207 if (is_over_subscription) {
208 if (!pm->is_over_subscription)
209 pr_warn("Runlist is getting oversubscribed. Expect reduced ROCm performance.\n");
210 retval = pm->pmf->runlist(pm, &rl_buffer[rl_wptr],
211 *rl_gpu_addr,
212 alloc_size_bytes / sizeof(uint32_t),
213 true);
214 }
215 pm->is_over_subscription = is_over_subscription;
216
217 for (i = 0; i < alloc_size_bytes / sizeof(uint32_t); i++)
218 pr_debug("0x%2X ", rl_buffer[i]);
219 pr_debug("\n");
220
221 return retval;
222 }
223
pm_init(struct packet_manager * pm,struct device_queue_manager * dqm)224 int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
225 {
226 switch (dqm->dev->device_info->asic_family) {
227 case CHIP_KAVERI:
228 case CHIP_HAWAII:
229 /* PM4 packet structures on CIK are the same as on VI */
230 case CHIP_CARRIZO:
231 case CHIP_TONGA:
232 case CHIP_FIJI:
233 case CHIP_POLARIS10:
234 case CHIP_POLARIS11:
235 case CHIP_POLARIS12:
236 case CHIP_VEGAM:
237 pm->pmf = &kfd_vi_pm_funcs;
238 break;
239 case CHIP_VEGA10:
240 case CHIP_VEGA12:
241 case CHIP_VEGA20:
242 case CHIP_RAVEN:
243 case CHIP_RENOIR:
244 case CHIP_ARCTURUS:
245 case CHIP_NAVI10:
246 case CHIP_NAVI12:
247 case CHIP_NAVI14:
248 case CHIP_SIENNA_CICHLID:
249 case CHIP_NAVY_FLOUNDER:
250 case CHIP_VANGOGH:
251 case CHIP_DIMGREY_CAVEFISH:
252 case CHIP_BEIGE_GOBY:
253 case CHIP_YELLOW_CARP:
254 case CHIP_CYAN_SKILLFISH:
255 pm->pmf = &kfd_v9_pm_funcs;
256 break;
257 case CHIP_ALDEBARAN:
258 pm->pmf = &kfd_aldebaran_pm_funcs;
259 break;
260 default:
261 WARN(1, "Unexpected ASIC family %u",
262 dqm->dev->device_info->asic_family);
263 return -EINVAL;
264 }
265
266 pm->dqm = dqm;
267 mutex_init(&pm->lock);
268 pm->priv_queue = kernel_queue_init(dqm->dev, KFD_QUEUE_TYPE_HIQ);
269 if (!pm->priv_queue) {
270 mutex_destroy(&pm->lock);
271 return -ENOMEM;
272 }
273 pm->allocated = false;
274
275 return 0;
276 }
277
pm_uninit(struct packet_manager * pm,bool hanging)278 void pm_uninit(struct packet_manager *pm, bool hanging)
279 {
280 mutex_destroy(&pm->lock);
281 kernel_queue_uninit(pm->priv_queue, hanging);
282 pm->priv_queue = NULL;
283 }
284
pm_send_set_resources(struct packet_manager * pm,struct scheduling_resources * res)285 int pm_send_set_resources(struct packet_manager *pm,
286 struct scheduling_resources *res)
287 {
288 uint32_t *buffer, size;
289 int retval = 0;
290
291 size = pm->pmf->set_resources_size;
292 mutex_lock(&pm->lock);
293 kq_acquire_packet_buffer(pm->priv_queue,
294 size / sizeof(uint32_t),
295 (unsigned int **)&buffer);
296 if (!buffer) {
297 pr_err("Failed to allocate buffer on kernel queue\n");
298 retval = -ENOMEM;
299 goto out;
300 }
301
302 retval = pm->pmf->set_resources(pm, buffer, res);
303 if (!retval)
304 kq_submit_packet(pm->priv_queue);
305 else
306 kq_rollback_packet(pm->priv_queue);
307
308 out:
309 mutex_unlock(&pm->lock);
310
311 return retval;
312 }
313
pm_send_runlist(struct packet_manager * pm,struct list_head * dqm_queues)314 int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
315 {
316 uint64_t rl_gpu_ib_addr;
317 uint32_t *rl_buffer;
318 size_t rl_ib_size, packet_size_dwords;
319 int retval;
320
321 retval = pm_create_runlist_ib(pm, dqm_queues, &rl_gpu_ib_addr,
322 &rl_ib_size);
323 if (retval)
324 goto fail_create_runlist_ib;
325
326 pr_debug("runlist IB address: 0x%llX\n", rl_gpu_ib_addr);
327
328 packet_size_dwords = pm->pmf->runlist_size / sizeof(uint32_t);
329 mutex_lock(&pm->lock);
330
331 retval = kq_acquire_packet_buffer(pm->priv_queue,
332 packet_size_dwords, &rl_buffer);
333 if (retval)
334 goto fail_acquire_packet_buffer;
335
336 retval = pm->pmf->runlist(pm, rl_buffer, rl_gpu_ib_addr,
337 rl_ib_size / sizeof(uint32_t), false);
338 if (retval)
339 goto fail_create_runlist;
340
341 kq_submit_packet(pm->priv_queue);
342
343 mutex_unlock(&pm->lock);
344
345 return retval;
346
347 fail_create_runlist:
348 kq_rollback_packet(pm->priv_queue);
349 fail_acquire_packet_buffer:
350 mutex_unlock(&pm->lock);
351 fail_create_runlist_ib:
352 pm_release_ib(pm);
353 return retval;
354 }
355
pm_send_query_status(struct packet_manager * pm,uint64_t fence_address,uint64_t fence_value)356 int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
357 uint64_t fence_value)
358 {
359 uint32_t *buffer, size;
360 int retval = 0;
361
362 if (WARN_ON(!fence_address))
363 return -EFAULT;
364
365 size = pm->pmf->query_status_size;
366 mutex_lock(&pm->lock);
367 kq_acquire_packet_buffer(pm->priv_queue,
368 size / sizeof(uint32_t), (unsigned int **)&buffer);
369 if (!buffer) {
370 pr_err("Failed to allocate buffer on kernel queue\n");
371 retval = -ENOMEM;
372 goto out;
373 }
374
375 retval = pm->pmf->query_status(pm, buffer, fence_address, fence_value);
376 if (!retval)
377 kq_submit_packet(pm->priv_queue);
378 else
379 kq_rollback_packet(pm->priv_queue);
380
381 out:
382 mutex_unlock(&pm->lock);
383 return retval;
384 }
385
pm_send_unmap_queue(struct packet_manager * pm,enum kfd_queue_type type,enum kfd_unmap_queues_filter filter,uint32_t filter_param,bool reset,unsigned int sdma_engine)386 int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
387 enum kfd_unmap_queues_filter filter,
388 uint32_t filter_param, bool reset,
389 unsigned int sdma_engine)
390 {
391 uint32_t *buffer, size;
392 int retval = 0;
393
394 size = pm->pmf->unmap_queues_size;
395 mutex_lock(&pm->lock);
396 kq_acquire_packet_buffer(pm->priv_queue,
397 size / sizeof(uint32_t), (unsigned int **)&buffer);
398 if (!buffer) {
399 pr_err("Failed to allocate buffer on kernel queue\n");
400 retval = -ENOMEM;
401 goto out;
402 }
403
404 retval = pm->pmf->unmap_queues(pm, buffer, type, filter, filter_param,
405 reset, sdma_engine);
406 if (!retval)
407 kq_submit_packet(pm->priv_queue);
408 else
409 kq_rollback_packet(pm->priv_queue);
410
411 out:
412 mutex_unlock(&pm->lock);
413 return retval;
414 }
415
pm_release_ib(struct packet_manager * pm)416 void pm_release_ib(struct packet_manager *pm)
417 {
418 mutex_lock(&pm->lock);
419 if (pm->allocated) {
420 kfd_gtt_sa_free(pm->dqm->dev, pm->ib_buffer_obj);
421 pm->allocated = false;
422 }
423 mutex_unlock(&pm->lock);
424 }
425
426 #if defined(CONFIG_DEBUG_FS)
427
pm_debugfs_runlist(struct seq_file * m,void * data)428 int pm_debugfs_runlist(struct seq_file *m, void *data)
429 {
430 struct packet_manager *pm = data;
431
432 mutex_lock(&pm->lock);
433
434 if (!pm->allocated) {
435 seq_puts(m, " No active runlist\n");
436 goto out;
437 }
438
439 seq_hex_dump(m, " ", DUMP_PREFIX_OFFSET, 32, 4,
440 pm->ib_buffer_obj->cpu_ptr, pm->ib_size_bytes, false);
441
442 out:
443 mutex_unlock(&pm->lock);
444 return 0;
445 }
446
pm_debugfs_hang_hws(struct packet_manager * pm)447 int pm_debugfs_hang_hws(struct packet_manager *pm)
448 {
449 uint32_t *buffer, size;
450 int r = 0;
451
452 if (!pm->priv_queue)
453 return -EAGAIN;
454
455 size = pm->pmf->query_status_size;
456 mutex_lock(&pm->lock);
457 kq_acquire_packet_buffer(pm->priv_queue,
458 size / sizeof(uint32_t), (unsigned int **)&buffer);
459 if (!buffer) {
460 pr_err("Failed to allocate buffer on kernel queue\n");
461 r = -ENOMEM;
462 goto out;
463 }
464 memset(buffer, 0x55, size);
465 kq_submit_packet(pm->priv_queue);
466
467 pr_info("Submitting %x %x %x %x %x %x %x to HIQ to hang the HWS.",
468 buffer[0], buffer[1], buffer[2], buffer[3],
469 buffer[4], buffer[5], buffer[6]);
470 out:
471 mutex_unlock(&pm->lock);
472 return r;
473 }
474
475
476 #endif
477