1 // SPDX-License-Identifier: GPL-2.0
2
3 /*
4 * Copyright 2016-2021 HabanaLabs, Ltd.
5 * All Rights Reserved.
6 */
7
8 #include <uapi/misc/habanalabs.h>
9 #include "habanalabs.h"
10
11 #include <linux/uaccess.h>
12 #include <linux/slab.h>
13
14 #define HL_CS_FLAGS_TYPE_MASK (HL_CS_FLAGS_SIGNAL | HL_CS_FLAGS_WAIT | \
15 HL_CS_FLAGS_COLLECTIVE_WAIT | HL_CS_FLAGS_RESERVE_SIGNALS_ONLY | \
16 HL_CS_FLAGS_UNRESERVE_SIGNALS_ONLY | HL_CS_FLAGS_ENGINE_CORE_COMMAND)
17
18
19 #define MAX_TS_ITER_NUM 10
20
21 /**
22 * enum hl_cs_wait_status - cs wait status
23 * @CS_WAIT_STATUS_BUSY: cs was not completed yet
24 * @CS_WAIT_STATUS_COMPLETED: cs completed
25 * @CS_WAIT_STATUS_GONE: cs completed but fence is already gone
26 */
27 enum hl_cs_wait_status {
28 CS_WAIT_STATUS_BUSY,
29 CS_WAIT_STATUS_COMPLETED,
30 CS_WAIT_STATUS_GONE
31 };
32
33 static void job_wq_completion(struct work_struct *work);
34 static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx, u64 timeout_us, u64 seq,
35 enum hl_cs_wait_status *status, s64 *timestamp);
36 static void cs_do_release(struct kref *ref);
37
hl_push_cs_outcome(struct hl_device * hdev,struct hl_cs_outcome_store * outcome_store,u64 seq,ktime_t ts,int error)38 static void hl_push_cs_outcome(struct hl_device *hdev,
39 struct hl_cs_outcome_store *outcome_store,
40 u64 seq, ktime_t ts, int error)
41 {
42 struct hl_cs_outcome *node;
43 unsigned long flags;
44
45 /*
46 * CS outcome store supports the following operations:
47 * push outcome - store a recent CS outcome in the store
48 * pop outcome - retrieve a SPECIFIC (by seq) CS outcome from the store
49 * It uses 2 lists: used list and free list.
50 * It has a pre-allocated amount of nodes, each node stores
51 * a single CS outcome.
52 * Initially, all the nodes are in the free list.
53 * On push outcome, a node (any) is taken from the free list, its
54 * information is filled in, and the node is moved to the used list.
55 * It is possible, that there are no nodes left in the free list.
56 * In this case, we will lose some information about old outcomes. We
57 * will pop the OLDEST node from the used list, and make it free.
58 * On pop, the node is searched for in the used list (using a search
59 * index).
60 * If found, the node is then removed from the used list, and moved
61 * back to the free list. The outcome data that the node contained is
62 * returned back to the user.
63 */
64
65 spin_lock_irqsave(&outcome_store->db_lock, flags);
66
67 if (list_empty(&outcome_store->free_list)) {
68 node = list_last_entry(&outcome_store->used_list,
69 struct hl_cs_outcome, list_link);
70 hash_del(&node->map_link);
71 dev_dbg(hdev->dev, "CS %llu outcome was lost\n", node->seq);
72 } else {
73 node = list_last_entry(&outcome_store->free_list,
74 struct hl_cs_outcome, list_link);
75 }
76
77 list_del_init(&node->list_link);
78
79 node->seq = seq;
80 node->ts = ts;
81 node->error = error;
82
83 list_add(&node->list_link, &outcome_store->used_list);
84 hash_add(outcome_store->outcome_map, &node->map_link, node->seq);
85
86 spin_unlock_irqrestore(&outcome_store->db_lock, flags);
87 }
88
hl_pop_cs_outcome(struct hl_cs_outcome_store * outcome_store,u64 seq,ktime_t * ts,int * error)89 static bool hl_pop_cs_outcome(struct hl_cs_outcome_store *outcome_store,
90 u64 seq, ktime_t *ts, int *error)
91 {
92 struct hl_cs_outcome *node;
93 unsigned long flags;
94
95 spin_lock_irqsave(&outcome_store->db_lock, flags);
96
97 hash_for_each_possible(outcome_store->outcome_map, node, map_link, seq)
98 if (node->seq == seq) {
99 *ts = node->ts;
100 *error = node->error;
101
102 hash_del(&node->map_link);
103 list_del_init(&node->list_link);
104 list_add(&node->list_link, &outcome_store->free_list);
105
106 spin_unlock_irqrestore(&outcome_store->db_lock, flags);
107
108 return true;
109 }
110
111 spin_unlock_irqrestore(&outcome_store->db_lock, flags);
112
113 return false;
114 }
115
hl_sob_reset(struct kref * ref)116 static void hl_sob_reset(struct kref *ref)
117 {
118 struct hl_hw_sob *hw_sob = container_of(ref, struct hl_hw_sob,
119 kref);
120 struct hl_device *hdev = hw_sob->hdev;
121
122 dev_dbg(hdev->dev, "reset sob id %u\n", hw_sob->sob_id);
123
124 hdev->asic_funcs->reset_sob(hdev, hw_sob);
125
126 hw_sob->need_reset = false;
127 }
128
hl_sob_reset_error(struct kref * ref)129 void hl_sob_reset_error(struct kref *ref)
130 {
131 struct hl_hw_sob *hw_sob = container_of(ref, struct hl_hw_sob,
132 kref);
133 struct hl_device *hdev = hw_sob->hdev;
134
135 dev_crit(hdev->dev,
136 "SOB release shouldn't be called here, q_idx: %d, sob_id: %d\n",
137 hw_sob->q_idx, hw_sob->sob_id);
138 }
139
hw_sob_put(struct hl_hw_sob * hw_sob)140 void hw_sob_put(struct hl_hw_sob *hw_sob)
141 {
142 if (hw_sob)
143 kref_put(&hw_sob->kref, hl_sob_reset);
144 }
145
hw_sob_put_err(struct hl_hw_sob * hw_sob)146 static void hw_sob_put_err(struct hl_hw_sob *hw_sob)
147 {
148 if (hw_sob)
149 kref_put(&hw_sob->kref, hl_sob_reset_error);
150 }
151
hw_sob_get(struct hl_hw_sob * hw_sob)152 void hw_sob_get(struct hl_hw_sob *hw_sob)
153 {
154 if (hw_sob)
155 kref_get(&hw_sob->kref);
156 }
157
158 /**
159 * hl_gen_sob_mask() - Generates a sob mask to be used in a monitor arm packet
160 * @sob_base: sob base id
161 * @sob_mask: sob user mask, each bit represents a sob offset from sob base
162 * @mask: generated mask
163 *
164 * Return: 0 if given parameters are valid
165 */
hl_gen_sob_mask(u16 sob_base,u8 sob_mask,u8 * mask)166 int hl_gen_sob_mask(u16 sob_base, u8 sob_mask, u8 *mask)
167 {
168 int i;
169
170 if (sob_mask == 0)
171 return -EINVAL;
172
173 if (sob_mask == 0x1) {
174 *mask = ~(1 << (sob_base & 0x7));
175 } else {
176 /* find msb in order to verify sob range is valid */
177 for (i = BITS_PER_BYTE - 1 ; i >= 0 ; i--)
178 if (BIT(i) & sob_mask)
179 break;
180
181 if (i > (HL_MAX_SOBS_PER_MONITOR - (sob_base & 0x7) - 1))
182 return -EINVAL;
183
184 *mask = ~sob_mask;
185 }
186
187 return 0;
188 }
189
hl_fence_release(struct kref * kref)190 static void hl_fence_release(struct kref *kref)
191 {
192 struct hl_fence *fence =
193 container_of(kref, struct hl_fence, refcount);
194 struct hl_cs_compl *hl_cs_cmpl =
195 container_of(fence, struct hl_cs_compl, base_fence);
196
197 kfree(hl_cs_cmpl);
198 }
199
hl_fence_put(struct hl_fence * fence)200 void hl_fence_put(struct hl_fence *fence)
201 {
202 if (IS_ERR_OR_NULL(fence))
203 return;
204 kref_put(&fence->refcount, hl_fence_release);
205 }
206
hl_fences_put(struct hl_fence ** fence,int len)207 void hl_fences_put(struct hl_fence **fence, int len)
208 {
209 int i;
210
211 for (i = 0; i < len; i++, fence++)
212 hl_fence_put(*fence);
213 }
214
hl_fence_get(struct hl_fence * fence)215 void hl_fence_get(struct hl_fence *fence)
216 {
217 if (fence)
218 kref_get(&fence->refcount);
219 }
220
hl_fence_init(struct hl_fence * fence,u64 sequence)221 static void hl_fence_init(struct hl_fence *fence, u64 sequence)
222 {
223 kref_init(&fence->refcount);
224 fence->cs_sequence = sequence;
225 fence->error = 0;
226 fence->timestamp = ktime_set(0, 0);
227 fence->mcs_handling_done = false;
228 init_completion(&fence->completion);
229 }
230
cs_get(struct hl_cs * cs)231 void cs_get(struct hl_cs *cs)
232 {
233 kref_get(&cs->refcount);
234 }
235
cs_get_unless_zero(struct hl_cs * cs)236 static int cs_get_unless_zero(struct hl_cs *cs)
237 {
238 return kref_get_unless_zero(&cs->refcount);
239 }
240
cs_put(struct hl_cs * cs)241 static void cs_put(struct hl_cs *cs)
242 {
243 kref_put(&cs->refcount, cs_do_release);
244 }
245
cs_job_do_release(struct kref * ref)246 static void cs_job_do_release(struct kref *ref)
247 {
248 struct hl_cs_job *job = container_of(ref, struct hl_cs_job, refcount);
249
250 kfree(job);
251 }
252
hl_cs_job_put(struct hl_cs_job * job)253 static void hl_cs_job_put(struct hl_cs_job *job)
254 {
255 kref_put(&job->refcount, cs_job_do_release);
256 }
257
cs_needs_completion(struct hl_cs * cs)258 bool cs_needs_completion(struct hl_cs *cs)
259 {
260 /* In case this is a staged CS, only the last CS in sequence should
261 * get a completion, any non staged CS will always get a completion
262 */
263 if (cs->staged_cs && !cs->staged_last)
264 return false;
265
266 return true;
267 }
268
cs_needs_timeout(struct hl_cs * cs)269 bool cs_needs_timeout(struct hl_cs *cs)
270 {
271 /* In case this is a staged CS, only the first CS in sequence should
272 * get a timeout, any non staged CS will always get a timeout
273 */
274 if (cs->staged_cs && !cs->staged_first)
275 return false;
276
277 return true;
278 }
279
is_cb_patched(struct hl_device * hdev,struct hl_cs_job * job)280 static bool is_cb_patched(struct hl_device *hdev, struct hl_cs_job *job)
281 {
282 /*
283 * Patched CB is created for external queues jobs, and for H/W queues
284 * jobs if the user CB was allocated by driver and MMU is disabled.
285 */
286 return (job->queue_type == QUEUE_TYPE_EXT ||
287 (job->queue_type == QUEUE_TYPE_HW &&
288 job->is_kernel_allocated_cb &&
289 !hdev->mmu_enable));
290 }
291
292 /*
293 * cs_parser - parse the user command submission
294 *
295 * @hpriv : pointer to the private data of the fd
296 * @job : pointer to the job that holds the command submission info
297 *
298 * The function parses the command submission of the user. It calls the
299 * ASIC specific parser, which returns a list of memory blocks to send
300 * to the device as different command buffers
301 *
302 */
cs_parser(struct hl_fpriv * hpriv,struct hl_cs_job * job)303 static int cs_parser(struct hl_fpriv *hpriv, struct hl_cs_job *job)
304 {
305 struct hl_device *hdev = hpriv->hdev;
306 struct hl_cs_parser parser;
307 int rc;
308
309 parser.ctx_id = job->cs->ctx->asid;
310 parser.cs_sequence = job->cs->sequence;
311 parser.job_id = job->id;
312
313 parser.hw_queue_id = job->hw_queue_id;
314 parser.job_userptr_list = &job->userptr_list;
315 parser.patched_cb = NULL;
316 parser.user_cb = job->user_cb;
317 parser.user_cb_size = job->user_cb_size;
318 parser.queue_type = job->queue_type;
319 parser.is_kernel_allocated_cb = job->is_kernel_allocated_cb;
320 job->patched_cb = NULL;
321 parser.completion = cs_needs_completion(job->cs);
322
323 rc = hdev->asic_funcs->cs_parser(hdev, &parser);
324
325 if (is_cb_patched(hdev, job)) {
326 if (!rc) {
327 job->patched_cb = parser.patched_cb;
328 job->job_cb_size = parser.patched_cb_size;
329 job->contains_dma_pkt = parser.contains_dma_pkt;
330 atomic_inc(&job->patched_cb->cs_cnt);
331 }
332
333 /*
334 * Whether the parsing worked or not, we don't need the
335 * original CB anymore because it was already parsed and
336 * won't be accessed again for this CS
337 */
338 atomic_dec(&job->user_cb->cs_cnt);
339 hl_cb_put(job->user_cb);
340 job->user_cb = NULL;
341 } else if (!rc) {
342 job->job_cb_size = job->user_cb_size;
343 }
344
345 return rc;
346 }
347
hl_complete_job(struct hl_device * hdev,struct hl_cs_job * job)348 static void hl_complete_job(struct hl_device *hdev, struct hl_cs_job *job)
349 {
350 struct hl_cs *cs = job->cs;
351
352 if (is_cb_patched(hdev, job)) {
353 hl_userptr_delete_list(hdev, &job->userptr_list);
354
355 /*
356 * We might arrive here from rollback and patched CB wasn't
357 * created, so we need to check it's not NULL
358 */
359 if (job->patched_cb) {
360 atomic_dec(&job->patched_cb->cs_cnt);
361 hl_cb_put(job->patched_cb);
362 }
363 }
364
365 /* For H/W queue jobs, if a user CB was allocated by driver and MMU is
366 * enabled, the user CB isn't released in cs_parser() and thus should be
367 * released here. This is also true for INT queues jobs which were
368 * allocated by driver.
369 */
370 if ((job->is_kernel_allocated_cb &&
371 ((job->queue_type == QUEUE_TYPE_HW && hdev->mmu_enable) ||
372 job->queue_type == QUEUE_TYPE_INT))) {
373 atomic_dec(&job->user_cb->cs_cnt);
374 hl_cb_put(job->user_cb);
375 }
376
377 /*
378 * This is the only place where there can be multiple threads
379 * modifying the list at the same time
380 */
381 spin_lock(&cs->job_lock);
382 list_del(&job->cs_node);
383 spin_unlock(&cs->job_lock);
384
385 hl_debugfs_remove_job(hdev, job);
386
387 /* We decrement reference only for a CS that gets completion
388 * because the reference was incremented only for this kind of CS
389 * right before it was scheduled.
390 *
391 * In staged submission, only the last CS marked as 'staged_last'
392 * gets completion, hence its release function will be called from here.
393 * As for all the rest CS's in the staged submission which do not get
394 * completion, their CS reference will be decremented by the
395 * 'staged_last' CS during the CS release flow.
396 * All relevant PQ CI counters will be incremented during the CS release
397 * flow by calling 'hl_hw_queue_update_ci'.
398 */
399 if (cs_needs_completion(cs) &&
400 (job->queue_type == QUEUE_TYPE_EXT || job->queue_type == QUEUE_TYPE_HW))
401 cs_put(cs);
402
403 hl_cs_job_put(job);
404 }
405
406 /*
407 * hl_staged_cs_find_first - locate the first CS in this staged submission
408 *
409 * @hdev: pointer to device structure
410 * @cs_seq: staged submission sequence number
411 *
412 * @note: This function must be called under 'hdev->cs_mirror_lock'
413 *
414 * Find and return a CS pointer with the given sequence
415 */
hl_staged_cs_find_first(struct hl_device * hdev,u64 cs_seq)416 struct hl_cs *hl_staged_cs_find_first(struct hl_device *hdev, u64 cs_seq)
417 {
418 struct hl_cs *cs;
419
420 list_for_each_entry_reverse(cs, &hdev->cs_mirror_list, mirror_node)
421 if (cs->staged_cs && cs->staged_first &&
422 cs->sequence == cs_seq)
423 return cs;
424
425 return NULL;
426 }
427
428 /*
429 * is_staged_cs_last_exists - returns true if the last CS in sequence exists
430 *
431 * @hdev: pointer to device structure
432 * @cs: staged submission member
433 *
434 */
is_staged_cs_last_exists(struct hl_device * hdev,struct hl_cs * cs)435 bool is_staged_cs_last_exists(struct hl_device *hdev, struct hl_cs *cs)
436 {
437 struct hl_cs *last_entry;
438
439 last_entry = list_last_entry(&cs->staged_cs_node, struct hl_cs,
440 staged_cs_node);
441
442 if (last_entry->staged_last)
443 return true;
444
445 return false;
446 }
447
448 /*
449 * staged_cs_get - get CS reference if this CS is a part of a staged CS
450 *
451 * @hdev: pointer to device structure
452 * @cs: current CS
453 * @cs_seq: staged submission sequence number
454 *
455 * Increment CS reference for every CS in this staged submission except for
456 * the CS which get completion.
457 */
staged_cs_get(struct hl_device * hdev,struct hl_cs * cs)458 static void staged_cs_get(struct hl_device *hdev, struct hl_cs *cs)
459 {
460 /* Only the last CS in this staged submission will get a completion.
461 * We must increment the reference for all other CS's in this
462 * staged submission.
463 * Once we get a completion we will release the whole staged submission.
464 */
465 if (!cs->staged_last)
466 cs_get(cs);
467 }
468
469 /*
470 * staged_cs_put - put a CS in case it is part of staged submission
471 *
472 * @hdev: pointer to device structure
473 * @cs: CS to put
474 *
475 * This function decrements a CS reference (for a non completion CS)
476 */
staged_cs_put(struct hl_device * hdev,struct hl_cs * cs)477 static void staged_cs_put(struct hl_device *hdev, struct hl_cs *cs)
478 {
479 /* We release all CS's in a staged submission except the last
480 * CS which we have never incremented its reference.
481 */
482 if (!cs_needs_completion(cs))
483 cs_put(cs);
484 }
485
cs_handle_tdr(struct hl_device * hdev,struct hl_cs * cs)486 static void cs_handle_tdr(struct hl_device *hdev, struct hl_cs *cs)
487 {
488 struct hl_cs *next = NULL, *iter, *first_cs;
489
490 if (!cs_needs_timeout(cs))
491 return;
492
493 spin_lock(&hdev->cs_mirror_lock);
494
495 /* We need to handle tdr only once for the complete staged submission.
496 * Hence, we choose the CS that reaches this function first which is
497 * the CS marked as 'staged_last'.
498 * In case single staged cs was submitted which has both first and last
499 * indications, then "cs_find_first" below will return NULL, since we
500 * removed the cs node from the list before getting here,
501 * in such cases just continue with the cs to cancel it's TDR work.
502 */
503 if (cs->staged_cs && cs->staged_last) {
504 first_cs = hl_staged_cs_find_first(hdev, cs->staged_sequence);
505 if (first_cs)
506 cs = first_cs;
507 }
508
509 spin_unlock(&hdev->cs_mirror_lock);
510
511 /* Don't cancel TDR in case this CS was timedout because we might be
512 * running from the TDR context
513 */
514 if (cs->timedout || hdev->timeout_jiffies == MAX_SCHEDULE_TIMEOUT)
515 return;
516
517 if (cs->tdr_active)
518 cancel_delayed_work_sync(&cs->work_tdr);
519
520 spin_lock(&hdev->cs_mirror_lock);
521
522 /* queue TDR for next CS */
523 list_for_each_entry(iter, &hdev->cs_mirror_list, mirror_node)
524 if (cs_needs_timeout(iter)) {
525 next = iter;
526 break;
527 }
528
529 if (next && !next->tdr_active) {
530 next->tdr_active = true;
531 schedule_delayed_work(&next->work_tdr, next->timeout_jiffies);
532 }
533
534 spin_unlock(&hdev->cs_mirror_lock);
535 }
536
537 /*
538 * force_complete_multi_cs - complete all contexts that wait on multi-CS
539 *
540 * @hdev: pointer to habanalabs device structure
541 */
force_complete_multi_cs(struct hl_device * hdev)542 static void force_complete_multi_cs(struct hl_device *hdev)
543 {
544 int i;
545
546 for (i = 0; i < MULTI_CS_MAX_USER_CTX; i++) {
547 struct multi_cs_completion *mcs_compl;
548
549 mcs_compl = &hdev->multi_cs_completion[i];
550
551 spin_lock(&mcs_compl->lock);
552
553 if (!mcs_compl->used) {
554 spin_unlock(&mcs_compl->lock);
555 continue;
556 }
557
558 /* when calling force complete no context should be waiting on
559 * multi-cS.
560 * We are calling the function as a protection for such case
561 * to free any pending context and print error message
562 */
563 dev_err(hdev->dev,
564 "multi-CS completion context %d still waiting when calling force completion\n",
565 i);
566 complete_all(&mcs_compl->completion);
567 spin_unlock(&mcs_compl->lock);
568 }
569 }
570
571 /*
572 * complete_multi_cs - complete all waiting entities on multi-CS
573 *
574 * @hdev: pointer to habanalabs device structure
575 * @cs: CS structure
576 * The function signals a waiting entity that has an overlapping stream masters
577 * with the completed CS.
578 * For example:
579 * - a completed CS worked on stream master QID 4, multi CS completion
580 * is actively waiting on stream master QIDs 3, 5. don't send signal as no
581 * common stream master QID
582 * - a completed CS worked on stream master QID 4, multi CS completion
583 * is actively waiting on stream master QIDs 3, 4. send signal as stream
584 * master QID 4 is common
585 */
complete_multi_cs(struct hl_device * hdev,struct hl_cs * cs)586 static void complete_multi_cs(struct hl_device *hdev, struct hl_cs *cs)
587 {
588 struct hl_fence *fence = cs->fence;
589 int i;
590
591 /* in case of multi CS check for completion only for the first CS */
592 if (cs->staged_cs && !cs->staged_first)
593 return;
594
595 for (i = 0; i < MULTI_CS_MAX_USER_CTX; i++) {
596 struct multi_cs_completion *mcs_compl;
597
598 mcs_compl = &hdev->multi_cs_completion[i];
599 if (!mcs_compl->used)
600 continue;
601
602 spin_lock(&mcs_compl->lock);
603
604 /*
605 * complete if:
606 * 1. still waiting for completion
607 * 2. the completed CS has at least one overlapping stream
608 * master with the stream masters in the completion
609 */
610 if (mcs_compl->used &&
611 (fence->stream_master_qid_map &
612 mcs_compl->stream_master_qid_map)) {
613 /* extract the timestamp only of first completed CS */
614 if (!mcs_compl->timestamp)
615 mcs_compl->timestamp = ktime_to_ns(fence->timestamp);
616
617 complete_all(&mcs_compl->completion);
618
619 /*
620 * Setting mcs_handling_done inside the lock ensures
621 * at least one fence have mcs_handling_done set to
622 * true before wait for mcs finish. This ensures at
623 * least one CS will be set as completed when polling
624 * mcs fences.
625 */
626 fence->mcs_handling_done = true;
627 }
628
629 spin_unlock(&mcs_compl->lock);
630 }
631 /* In case CS completed without mcs completion initialized */
632 fence->mcs_handling_done = true;
633 }
634
cs_release_sob_reset_handler(struct hl_device * hdev,struct hl_cs * cs,struct hl_cs_compl * hl_cs_cmpl)635 static inline void cs_release_sob_reset_handler(struct hl_device *hdev,
636 struct hl_cs *cs,
637 struct hl_cs_compl *hl_cs_cmpl)
638 {
639 /* Skip this handler if the cs wasn't submitted, to avoid putting
640 * the hw_sob twice, since this case already handled at this point,
641 * also skip if the hw_sob pointer wasn't set.
642 */
643 if (!hl_cs_cmpl->hw_sob || !cs->submitted)
644 return;
645
646 spin_lock(&hl_cs_cmpl->lock);
647
648 /*
649 * we get refcount upon reservation of signals or signal/wait cs for the
650 * hw_sob object, and need to put it when the first staged cs
651 * (which cotains the encaps signals) or cs signal/wait is completed.
652 */
653 if ((hl_cs_cmpl->type == CS_TYPE_SIGNAL) ||
654 (hl_cs_cmpl->type == CS_TYPE_WAIT) ||
655 (hl_cs_cmpl->type == CS_TYPE_COLLECTIVE_WAIT) ||
656 (!!hl_cs_cmpl->encaps_signals)) {
657 dev_dbg(hdev->dev,
658 "CS 0x%llx type %d finished, sob_id: %d, sob_val: %u\n",
659 hl_cs_cmpl->cs_seq,
660 hl_cs_cmpl->type,
661 hl_cs_cmpl->hw_sob->sob_id,
662 hl_cs_cmpl->sob_val);
663
664 hw_sob_put(hl_cs_cmpl->hw_sob);
665
666 if (hl_cs_cmpl->type == CS_TYPE_COLLECTIVE_WAIT)
667 hdev->asic_funcs->reset_sob_group(hdev,
668 hl_cs_cmpl->sob_group);
669 }
670
671 spin_unlock(&hl_cs_cmpl->lock);
672 }
673
cs_do_release(struct kref * ref)674 static void cs_do_release(struct kref *ref)
675 {
676 struct hl_cs *cs = container_of(ref, struct hl_cs, refcount);
677 struct hl_device *hdev = cs->ctx->hdev;
678 struct hl_cs_job *job, *tmp;
679 struct hl_cs_compl *hl_cs_cmpl =
680 container_of(cs->fence, struct hl_cs_compl, base_fence);
681
682 cs->completed = true;
683
684 /*
685 * Although if we reached here it means that all external jobs have
686 * finished, because each one of them took refcnt to CS, we still
687 * need to go over the internal jobs and complete them. Otherwise, we
688 * will have leaked memory and what's worse, the CS object (and
689 * potentially the CTX object) could be released, while the JOB
690 * still holds a pointer to them (but no reference).
691 */
692 list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
693 hl_complete_job(hdev, job);
694
695 if (!cs->submitted) {
696 /*
697 * In case the wait for signal CS was submitted, the fence put
698 * occurs in init_signal_wait_cs() or collective_wait_init_cs()
699 * right before hanging on the PQ.
700 */
701 if (cs->type == CS_TYPE_WAIT ||
702 cs->type == CS_TYPE_COLLECTIVE_WAIT)
703 hl_fence_put(cs->signal_fence);
704
705 goto out;
706 }
707
708 /* Need to update CI for all queue jobs that does not get completion */
709 hl_hw_queue_update_ci(cs);
710
711 /* remove CS from CS mirror list */
712 spin_lock(&hdev->cs_mirror_lock);
713 list_del_init(&cs->mirror_node);
714 spin_unlock(&hdev->cs_mirror_lock);
715
716 cs_handle_tdr(hdev, cs);
717
718 if (cs->staged_cs) {
719 /* the completion CS decrements reference for the entire
720 * staged submission
721 */
722 if (cs->staged_last) {
723 struct hl_cs *staged_cs, *tmp_cs;
724
725 list_for_each_entry_safe(staged_cs, tmp_cs,
726 &cs->staged_cs_node, staged_cs_node)
727 staged_cs_put(hdev, staged_cs);
728 }
729
730 /* A staged CS will be a member in the list only after it
731 * was submitted. We used 'cs_mirror_lock' when inserting
732 * it to list so we will use it again when removing it
733 */
734 if (cs->submitted) {
735 spin_lock(&hdev->cs_mirror_lock);
736 list_del(&cs->staged_cs_node);
737 spin_unlock(&hdev->cs_mirror_lock);
738 }
739
740 /* decrement refcount to handle when first staged cs
741 * with encaps signals is completed.
742 */
743 if (hl_cs_cmpl->encaps_signals)
744 kref_put(&hl_cs_cmpl->encaps_sig_hdl->refcount,
745 hl_encaps_handle_do_release);
746 }
747
748 if ((cs->type == CS_TYPE_WAIT || cs->type == CS_TYPE_COLLECTIVE_WAIT)
749 && cs->encaps_signals)
750 kref_put(&cs->encaps_sig_hdl->refcount,
751 hl_encaps_handle_do_release);
752
753 out:
754 /* Must be called before hl_ctx_put because inside we use ctx to get
755 * the device
756 */
757 hl_debugfs_remove_cs(cs);
758
759 hdev->shadow_cs_queue[cs->sequence & (hdev->asic_prop.max_pending_cs - 1)] = NULL;
760
761 /* We need to mark an error for not submitted because in that case
762 * the hl fence release flow is different. Mainly, we don't need
763 * to handle hw_sob for signal/wait
764 */
765 if (cs->timedout)
766 cs->fence->error = -ETIMEDOUT;
767 else if (cs->aborted)
768 cs->fence->error = -EIO;
769 else if (!cs->submitted)
770 cs->fence->error = -EBUSY;
771
772 if (unlikely(cs->skip_reset_on_timeout)) {
773 dev_err(hdev->dev,
774 "Command submission %llu completed after %llu (s)\n",
775 cs->sequence,
776 div_u64(jiffies - cs->submission_time_jiffies, HZ));
777 }
778
779 if (cs->timestamp) {
780 cs->fence->timestamp = ktime_get();
781 hl_push_cs_outcome(hdev, &cs->ctx->outcome_store, cs->sequence,
782 cs->fence->timestamp, cs->fence->error);
783 }
784
785 hl_ctx_put(cs->ctx);
786
787 complete_all(&cs->fence->completion);
788 complete_multi_cs(hdev, cs);
789
790 cs_release_sob_reset_handler(hdev, cs, hl_cs_cmpl);
791
792 hl_fence_put(cs->fence);
793
794 kfree(cs->jobs_in_queue_cnt);
795 kfree(cs);
796 }
797
cs_timedout(struct work_struct * work)798 static void cs_timedout(struct work_struct *work)
799 {
800 struct hl_device *hdev;
801 u64 event_mask;
802 int rc;
803 struct hl_cs *cs = container_of(work, struct hl_cs,
804 work_tdr.work);
805 bool skip_reset_on_timeout = cs->skip_reset_on_timeout, device_reset = false;
806
807 rc = cs_get_unless_zero(cs);
808 if (!rc)
809 return;
810
811 if ((!cs->submitted) || (cs->completed)) {
812 cs_put(cs);
813 return;
814 }
815
816 hdev = cs->ctx->hdev;
817
818 if (likely(!skip_reset_on_timeout)) {
819 if (hdev->reset_on_lockup)
820 device_reset = true;
821 else
822 hdev->reset_info.needs_reset = true;
823
824 /* Mark the CS is timed out so we won't try to cancel its TDR */
825 cs->timedout = true;
826 }
827
828 /* Save only the first CS timeout parameters */
829 rc = atomic_cmpxchg(&hdev->captured_err_info.cs_timeout.write_enable, 1, 0);
830 if (rc) {
831 hdev->captured_err_info.cs_timeout.timestamp = ktime_get();
832 hdev->captured_err_info.cs_timeout.seq = cs->sequence;
833
834 event_mask = device_reset ? (HL_NOTIFIER_EVENT_CS_TIMEOUT |
835 HL_NOTIFIER_EVENT_DEVICE_RESET) : HL_NOTIFIER_EVENT_CS_TIMEOUT;
836
837 hl_notifier_event_send_all(hdev, event_mask);
838 }
839
840 switch (cs->type) {
841 case CS_TYPE_SIGNAL:
842 dev_err(hdev->dev,
843 "Signal command submission %llu has not finished in time!\n",
844 cs->sequence);
845 break;
846
847 case CS_TYPE_WAIT:
848 dev_err(hdev->dev,
849 "Wait command submission %llu has not finished in time!\n",
850 cs->sequence);
851 break;
852
853 case CS_TYPE_COLLECTIVE_WAIT:
854 dev_err(hdev->dev,
855 "Collective Wait command submission %llu has not finished in time!\n",
856 cs->sequence);
857 break;
858
859 default:
860 dev_err(hdev->dev,
861 "Command submission %llu has not finished in time!\n",
862 cs->sequence);
863 break;
864 }
865
866 rc = hl_state_dump(hdev);
867 if (rc)
868 dev_err(hdev->dev, "Error during system state dump %d\n", rc);
869
870 cs_put(cs);
871
872 if (device_reset)
873 hl_device_reset(hdev, HL_DRV_RESET_TDR);
874 }
875
allocate_cs(struct hl_device * hdev,struct hl_ctx * ctx,enum hl_cs_type cs_type,u64 user_sequence,struct hl_cs ** cs_new,u32 flags,u32 timeout)876 static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
877 enum hl_cs_type cs_type, u64 user_sequence,
878 struct hl_cs **cs_new, u32 flags, u32 timeout)
879 {
880 struct hl_cs_counters_atomic *cntr;
881 struct hl_fence *other = NULL;
882 struct hl_cs_compl *cs_cmpl;
883 struct hl_cs *cs;
884 int rc;
885
886 cntr = &hdev->aggregated_cs_counters;
887
888 cs = kzalloc(sizeof(*cs), GFP_ATOMIC);
889 if (!cs)
890 cs = kzalloc(sizeof(*cs), GFP_KERNEL);
891
892 if (!cs) {
893 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
894 atomic64_inc(&cntr->out_of_mem_drop_cnt);
895 return -ENOMEM;
896 }
897
898 /* increment refcnt for context */
899 hl_ctx_get(ctx);
900
901 cs->ctx = ctx;
902 cs->submitted = false;
903 cs->completed = false;
904 cs->type = cs_type;
905 cs->timestamp = !!(flags & HL_CS_FLAGS_TIMESTAMP);
906 cs->encaps_signals = !!(flags & HL_CS_FLAGS_ENCAP_SIGNALS);
907 cs->timeout_jiffies = timeout;
908 cs->skip_reset_on_timeout =
909 hdev->reset_info.skip_reset_on_timeout ||
910 !!(flags & HL_CS_FLAGS_SKIP_RESET_ON_TIMEOUT);
911 cs->submission_time_jiffies = jiffies;
912 INIT_LIST_HEAD(&cs->job_list);
913 INIT_DELAYED_WORK(&cs->work_tdr, cs_timedout);
914 kref_init(&cs->refcount);
915 spin_lock_init(&cs->job_lock);
916
917 cs_cmpl = kzalloc(sizeof(*cs_cmpl), GFP_ATOMIC);
918 if (!cs_cmpl)
919 cs_cmpl = kzalloc(sizeof(*cs_cmpl), GFP_KERNEL);
920
921 if (!cs_cmpl) {
922 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
923 atomic64_inc(&cntr->out_of_mem_drop_cnt);
924 rc = -ENOMEM;
925 goto free_cs;
926 }
927
928 cs->jobs_in_queue_cnt = kcalloc(hdev->asic_prop.max_queues,
929 sizeof(*cs->jobs_in_queue_cnt), GFP_ATOMIC);
930 if (!cs->jobs_in_queue_cnt)
931 cs->jobs_in_queue_cnt = kcalloc(hdev->asic_prop.max_queues,
932 sizeof(*cs->jobs_in_queue_cnt), GFP_KERNEL);
933
934 if (!cs->jobs_in_queue_cnt) {
935 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
936 atomic64_inc(&cntr->out_of_mem_drop_cnt);
937 rc = -ENOMEM;
938 goto free_cs_cmpl;
939 }
940
941 cs_cmpl->hdev = hdev;
942 cs_cmpl->type = cs->type;
943 spin_lock_init(&cs_cmpl->lock);
944 cs->fence = &cs_cmpl->base_fence;
945
946 spin_lock(&ctx->cs_lock);
947
948 cs_cmpl->cs_seq = ctx->cs_sequence;
949 other = ctx->cs_pending[cs_cmpl->cs_seq &
950 (hdev->asic_prop.max_pending_cs - 1)];
951
952 if (other && !completion_done(&other->completion)) {
953 /* If the following statement is true, it means we have reached
954 * a point in which only part of the staged submission was
955 * submitted and we don't have enough room in the 'cs_pending'
956 * array for the rest of the submission.
957 * This causes a deadlock because this CS will never be
958 * completed as it depends on future CS's for completion.
959 */
960 if (other->cs_sequence == user_sequence)
961 dev_crit_ratelimited(hdev->dev,
962 "Staged CS %llu deadlock due to lack of resources",
963 user_sequence);
964
965 dev_dbg_ratelimited(hdev->dev,
966 "Rejecting CS because of too many in-flights CS\n");
967 atomic64_inc(&ctx->cs_counters.max_cs_in_flight_drop_cnt);
968 atomic64_inc(&cntr->max_cs_in_flight_drop_cnt);
969 rc = -EAGAIN;
970 goto free_fence;
971 }
972
973 /* init hl_fence */
974 hl_fence_init(&cs_cmpl->base_fence, cs_cmpl->cs_seq);
975
976 cs->sequence = cs_cmpl->cs_seq;
977
978 ctx->cs_pending[cs_cmpl->cs_seq &
979 (hdev->asic_prop.max_pending_cs - 1)] =
980 &cs_cmpl->base_fence;
981 ctx->cs_sequence++;
982
983 hl_fence_get(&cs_cmpl->base_fence);
984
985 hl_fence_put(other);
986
987 spin_unlock(&ctx->cs_lock);
988
989 *cs_new = cs;
990
991 return 0;
992
993 free_fence:
994 spin_unlock(&ctx->cs_lock);
995 kfree(cs->jobs_in_queue_cnt);
996 free_cs_cmpl:
997 kfree(cs_cmpl);
998 free_cs:
999 kfree(cs);
1000 hl_ctx_put(ctx);
1001 return rc;
1002 }
1003
cs_rollback(struct hl_device * hdev,struct hl_cs * cs)1004 static void cs_rollback(struct hl_device *hdev, struct hl_cs *cs)
1005 {
1006 struct hl_cs_job *job, *tmp;
1007
1008 staged_cs_put(hdev, cs);
1009
1010 list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
1011 hl_complete_job(hdev, job);
1012 }
1013
hl_cs_rollback_all(struct hl_device * hdev,bool skip_wq_flush)1014 void hl_cs_rollback_all(struct hl_device *hdev, bool skip_wq_flush)
1015 {
1016 int i;
1017 struct hl_cs *cs, *tmp;
1018
1019 if (!skip_wq_flush) {
1020 flush_workqueue(hdev->ts_free_obj_wq);
1021
1022 /* flush all completions before iterating over the CS mirror list in
1023 * order to avoid a race with the release functions
1024 */
1025 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
1026 flush_workqueue(hdev->cq_wq[i]);
1027
1028 flush_workqueue(hdev->cs_cmplt_wq);
1029 }
1030
1031 /* Make sure we don't have leftovers in the CS mirror list */
1032 list_for_each_entry_safe(cs, tmp, &hdev->cs_mirror_list, mirror_node) {
1033 cs_get(cs);
1034 cs->aborted = true;
1035 dev_warn_ratelimited(hdev->dev, "Killing CS %d.%llu\n",
1036 cs->ctx->asid, cs->sequence);
1037 cs_rollback(hdev, cs);
1038 cs_put(cs);
1039 }
1040
1041 force_complete_multi_cs(hdev);
1042 }
1043
1044 static void
wake_pending_user_interrupt_threads(struct hl_user_interrupt * interrupt)1045 wake_pending_user_interrupt_threads(struct hl_user_interrupt *interrupt)
1046 {
1047 struct hl_user_pending_interrupt *pend, *temp;
1048 unsigned long flags;
1049
1050 spin_lock_irqsave(&interrupt->wait_list_lock, flags);
1051 list_for_each_entry_safe(pend, temp, &interrupt->wait_list_head, wait_list_node) {
1052 if (pend->ts_reg_info.buf) {
1053 list_del(&pend->wait_list_node);
1054 hl_mmap_mem_buf_put(pend->ts_reg_info.buf);
1055 hl_cb_put(pend->ts_reg_info.cq_cb);
1056 } else {
1057 pend->fence.error = -EIO;
1058 complete_all(&pend->fence.completion);
1059 }
1060 }
1061 spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
1062 }
1063
hl_release_pending_user_interrupts(struct hl_device * hdev)1064 void hl_release_pending_user_interrupts(struct hl_device *hdev)
1065 {
1066 struct asic_fixed_properties *prop = &hdev->asic_prop;
1067 struct hl_user_interrupt *interrupt;
1068 int i;
1069
1070 if (!prop->user_interrupt_count)
1071 return;
1072
1073 /* We iterate through the user interrupt requests and waking up all
1074 * user threads waiting for interrupt completion. We iterate the
1075 * list under a lock, this is why all user threads, once awake,
1076 * will wait on the same lock and will release the waiting object upon
1077 * unlock.
1078 */
1079
1080 for (i = 0 ; i < prop->user_interrupt_count ; i++) {
1081 interrupt = &hdev->user_interrupt[i];
1082 wake_pending_user_interrupt_threads(interrupt);
1083 }
1084
1085 interrupt = &hdev->common_user_cq_interrupt;
1086 wake_pending_user_interrupt_threads(interrupt);
1087
1088 interrupt = &hdev->common_decoder_interrupt;
1089 wake_pending_user_interrupt_threads(interrupt);
1090 }
1091
job_wq_completion(struct work_struct * work)1092 static void job_wq_completion(struct work_struct *work)
1093 {
1094 struct hl_cs_job *job = container_of(work, struct hl_cs_job,
1095 finish_work);
1096 struct hl_cs *cs = job->cs;
1097 struct hl_device *hdev = cs->ctx->hdev;
1098
1099 /* job is no longer needed */
1100 hl_complete_job(hdev, job);
1101 }
1102
cs_completion(struct work_struct * work)1103 static void cs_completion(struct work_struct *work)
1104 {
1105 struct hl_cs *cs = container_of(work, struct hl_cs, finish_work);
1106 struct hl_device *hdev = cs->ctx->hdev;
1107 struct hl_cs_job *job, *tmp;
1108
1109 list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
1110 hl_complete_job(hdev, job);
1111 }
1112
validate_queue_index(struct hl_device * hdev,struct hl_cs_chunk * chunk,enum hl_queue_type * queue_type,bool * is_kernel_allocated_cb)1113 static int validate_queue_index(struct hl_device *hdev,
1114 struct hl_cs_chunk *chunk,
1115 enum hl_queue_type *queue_type,
1116 bool *is_kernel_allocated_cb)
1117 {
1118 struct asic_fixed_properties *asic = &hdev->asic_prop;
1119 struct hw_queue_properties *hw_queue_prop;
1120
1121 /* This must be checked here to prevent out-of-bounds access to
1122 * hw_queues_props array
1123 */
1124 if (chunk->queue_index >= asic->max_queues) {
1125 dev_err(hdev->dev, "Queue index %d is invalid\n",
1126 chunk->queue_index);
1127 return -EINVAL;
1128 }
1129
1130 hw_queue_prop = &asic->hw_queues_props[chunk->queue_index];
1131
1132 if (hw_queue_prop->type == QUEUE_TYPE_NA) {
1133 dev_err(hdev->dev, "Queue index %d is not applicable\n",
1134 chunk->queue_index);
1135 return -EINVAL;
1136 }
1137
1138 if (hw_queue_prop->binned) {
1139 dev_err(hdev->dev, "Queue index %d is binned out\n",
1140 chunk->queue_index);
1141 return -EINVAL;
1142 }
1143
1144 if (hw_queue_prop->driver_only) {
1145 dev_err(hdev->dev,
1146 "Queue index %d is restricted for the kernel driver\n",
1147 chunk->queue_index);
1148 return -EINVAL;
1149 }
1150
1151 /* When hw queue type isn't QUEUE_TYPE_HW,
1152 * USER_ALLOC_CB flag shall be referred as "don't care".
1153 */
1154 if (hw_queue_prop->type == QUEUE_TYPE_HW) {
1155 if (chunk->cs_chunk_flags & HL_CS_CHUNK_FLAGS_USER_ALLOC_CB) {
1156 if (!(hw_queue_prop->cb_alloc_flags & CB_ALLOC_USER)) {
1157 dev_err(hdev->dev,
1158 "Queue index %d doesn't support user CB\n",
1159 chunk->queue_index);
1160 return -EINVAL;
1161 }
1162
1163 *is_kernel_allocated_cb = false;
1164 } else {
1165 if (!(hw_queue_prop->cb_alloc_flags &
1166 CB_ALLOC_KERNEL)) {
1167 dev_err(hdev->dev,
1168 "Queue index %d doesn't support kernel CB\n",
1169 chunk->queue_index);
1170 return -EINVAL;
1171 }
1172
1173 *is_kernel_allocated_cb = true;
1174 }
1175 } else {
1176 *is_kernel_allocated_cb = !!(hw_queue_prop->cb_alloc_flags
1177 & CB_ALLOC_KERNEL);
1178 }
1179
1180 *queue_type = hw_queue_prop->type;
1181 return 0;
1182 }
1183
get_cb_from_cs_chunk(struct hl_device * hdev,struct hl_mem_mgr * mmg,struct hl_cs_chunk * chunk)1184 static struct hl_cb *get_cb_from_cs_chunk(struct hl_device *hdev,
1185 struct hl_mem_mgr *mmg,
1186 struct hl_cs_chunk *chunk)
1187 {
1188 struct hl_cb *cb;
1189
1190 cb = hl_cb_get(mmg, chunk->cb_handle);
1191 if (!cb) {
1192 dev_err(hdev->dev, "CB handle 0x%llx invalid\n", chunk->cb_handle);
1193 return NULL;
1194 }
1195
1196 if ((chunk->cb_size < 8) || (chunk->cb_size > cb->size)) {
1197 dev_err(hdev->dev, "CB size %u invalid\n", chunk->cb_size);
1198 goto release_cb;
1199 }
1200
1201 atomic_inc(&cb->cs_cnt);
1202
1203 return cb;
1204
1205 release_cb:
1206 hl_cb_put(cb);
1207 return NULL;
1208 }
1209
hl_cs_allocate_job(struct hl_device * hdev,enum hl_queue_type queue_type,bool is_kernel_allocated_cb)1210 struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev,
1211 enum hl_queue_type queue_type, bool is_kernel_allocated_cb)
1212 {
1213 struct hl_cs_job *job;
1214
1215 job = kzalloc(sizeof(*job), GFP_ATOMIC);
1216 if (!job)
1217 job = kzalloc(sizeof(*job), GFP_KERNEL);
1218
1219 if (!job)
1220 return NULL;
1221
1222 kref_init(&job->refcount);
1223 job->queue_type = queue_type;
1224 job->is_kernel_allocated_cb = is_kernel_allocated_cb;
1225
1226 if (is_cb_patched(hdev, job))
1227 INIT_LIST_HEAD(&job->userptr_list);
1228
1229 if (job->queue_type == QUEUE_TYPE_EXT)
1230 INIT_WORK(&job->finish_work, job_wq_completion);
1231
1232 return job;
1233 }
1234
hl_cs_get_cs_type(u32 cs_type_flags)1235 static enum hl_cs_type hl_cs_get_cs_type(u32 cs_type_flags)
1236 {
1237 if (cs_type_flags & HL_CS_FLAGS_SIGNAL)
1238 return CS_TYPE_SIGNAL;
1239 else if (cs_type_flags & HL_CS_FLAGS_WAIT)
1240 return CS_TYPE_WAIT;
1241 else if (cs_type_flags & HL_CS_FLAGS_COLLECTIVE_WAIT)
1242 return CS_TYPE_COLLECTIVE_WAIT;
1243 else if (cs_type_flags & HL_CS_FLAGS_RESERVE_SIGNALS_ONLY)
1244 return CS_RESERVE_SIGNALS;
1245 else if (cs_type_flags & HL_CS_FLAGS_UNRESERVE_SIGNALS_ONLY)
1246 return CS_UNRESERVE_SIGNALS;
1247 else if (cs_type_flags & HL_CS_FLAGS_ENGINE_CORE_COMMAND)
1248 return CS_TYPE_ENGINE_CORE;
1249 else
1250 return CS_TYPE_DEFAULT;
1251 }
1252
hl_cs_sanity_checks(struct hl_fpriv * hpriv,union hl_cs_args * args)1253 static int hl_cs_sanity_checks(struct hl_fpriv *hpriv, union hl_cs_args *args)
1254 {
1255 struct hl_device *hdev = hpriv->hdev;
1256 struct hl_ctx *ctx = hpriv->ctx;
1257 u32 cs_type_flags, num_chunks;
1258 enum hl_device_status status;
1259 enum hl_cs_type cs_type;
1260 bool is_sync_stream;
1261
1262 if (!hl_device_operational(hdev, &status)) {
1263 return -EBUSY;
1264 }
1265
1266 if ((args->in.cs_flags & HL_CS_FLAGS_STAGED_SUBMISSION) &&
1267 !hdev->supports_staged_submission) {
1268 dev_err(hdev->dev, "staged submission not supported");
1269 return -EPERM;
1270 }
1271
1272 cs_type_flags = args->in.cs_flags & HL_CS_FLAGS_TYPE_MASK;
1273
1274 if (unlikely(cs_type_flags && !is_power_of_2(cs_type_flags))) {
1275 dev_err(hdev->dev,
1276 "CS type flags are mutually exclusive, context %d\n",
1277 ctx->asid);
1278 return -EINVAL;
1279 }
1280
1281 cs_type = hl_cs_get_cs_type(cs_type_flags);
1282 num_chunks = args->in.num_chunks_execute;
1283
1284 is_sync_stream = (cs_type == CS_TYPE_SIGNAL || cs_type == CS_TYPE_WAIT ||
1285 cs_type == CS_TYPE_COLLECTIVE_WAIT);
1286
1287 if (unlikely(is_sync_stream && !hdev->supports_sync_stream)) {
1288 dev_err(hdev->dev, "Sync stream CS is not supported\n");
1289 return -EINVAL;
1290 }
1291
1292 if (cs_type == CS_TYPE_DEFAULT) {
1293 if (!num_chunks) {
1294 dev_err(hdev->dev, "Got execute CS with 0 chunks, context %d\n", ctx->asid);
1295 return -EINVAL;
1296 }
1297 } else if (is_sync_stream && num_chunks != 1) {
1298 dev_err(hdev->dev,
1299 "Sync stream CS mandates one chunk only, context %d\n",
1300 ctx->asid);
1301 return -EINVAL;
1302 }
1303
1304 return 0;
1305 }
1306
hl_cs_copy_chunk_array(struct hl_device * hdev,struct hl_cs_chunk ** cs_chunk_array,void __user * chunks,u32 num_chunks,struct hl_ctx * ctx)1307 static int hl_cs_copy_chunk_array(struct hl_device *hdev,
1308 struct hl_cs_chunk **cs_chunk_array,
1309 void __user *chunks, u32 num_chunks,
1310 struct hl_ctx *ctx)
1311 {
1312 u32 size_to_copy;
1313
1314 if (num_chunks > HL_MAX_JOBS_PER_CS) {
1315 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1316 atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
1317 dev_err(hdev->dev,
1318 "Number of chunks can NOT be larger than %d\n",
1319 HL_MAX_JOBS_PER_CS);
1320 return -EINVAL;
1321 }
1322
1323 *cs_chunk_array = kmalloc_array(num_chunks, sizeof(**cs_chunk_array),
1324 GFP_ATOMIC);
1325 if (!*cs_chunk_array)
1326 *cs_chunk_array = kmalloc_array(num_chunks,
1327 sizeof(**cs_chunk_array), GFP_KERNEL);
1328 if (!*cs_chunk_array) {
1329 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
1330 atomic64_inc(&hdev->aggregated_cs_counters.out_of_mem_drop_cnt);
1331 return -ENOMEM;
1332 }
1333
1334 size_to_copy = num_chunks * sizeof(struct hl_cs_chunk);
1335 if (copy_from_user(*cs_chunk_array, chunks, size_to_copy)) {
1336 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1337 atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
1338 dev_err(hdev->dev, "Failed to copy cs chunk array from user\n");
1339 kfree(*cs_chunk_array);
1340 return -EFAULT;
1341 }
1342
1343 return 0;
1344 }
1345
cs_staged_submission(struct hl_device * hdev,struct hl_cs * cs,u64 sequence,u32 flags,u32 encaps_signal_handle)1346 static int cs_staged_submission(struct hl_device *hdev, struct hl_cs *cs,
1347 u64 sequence, u32 flags,
1348 u32 encaps_signal_handle)
1349 {
1350 if (!(flags & HL_CS_FLAGS_STAGED_SUBMISSION))
1351 return 0;
1352
1353 cs->staged_last = !!(flags & HL_CS_FLAGS_STAGED_SUBMISSION_LAST);
1354 cs->staged_first = !!(flags & HL_CS_FLAGS_STAGED_SUBMISSION_FIRST);
1355
1356 if (cs->staged_first) {
1357 /* Staged CS sequence is the first CS sequence */
1358 INIT_LIST_HEAD(&cs->staged_cs_node);
1359 cs->staged_sequence = cs->sequence;
1360
1361 if (cs->encaps_signals)
1362 cs->encaps_sig_hdl_id = encaps_signal_handle;
1363 } else {
1364 /* User sequence will be validated in 'hl_hw_queue_schedule_cs'
1365 * under the cs_mirror_lock
1366 */
1367 cs->staged_sequence = sequence;
1368 }
1369
1370 /* Increment CS reference if needed */
1371 staged_cs_get(hdev, cs);
1372
1373 cs->staged_cs = true;
1374
1375 return 0;
1376 }
1377
get_stream_master_qid_mask(struct hl_device * hdev,u32 qid)1378 static u32 get_stream_master_qid_mask(struct hl_device *hdev, u32 qid)
1379 {
1380 int i;
1381
1382 for (i = 0; i < hdev->stream_master_qid_arr_size; i++)
1383 if (qid == hdev->stream_master_qid_arr[i])
1384 return BIT(i);
1385
1386 return 0;
1387 }
1388
cs_ioctl_default(struct hl_fpriv * hpriv,void __user * chunks,u32 num_chunks,u64 * cs_seq,u32 flags,u32 encaps_signals_handle,u32 timeout,u16 * signal_initial_sob_count)1389 static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
1390 u32 num_chunks, u64 *cs_seq, u32 flags,
1391 u32 encaps_signals_handle, u32 timeout,
1392 u16 *signal_initial_sob_count)
1393 {
1394 bool staged_mid, int_queues_only = true, using_hw_queues = false;
1395 struct hl_device *hdev = hpriv->hdev;
1396 struct hl_cs_chunk *cs_chunk_array;
1397 struct hl_cs_counters_atomic *cntr;
1398 struct hl_ctx *ctx = hpriv->ctx;
1399 struct hl_cs_job *job;
1400 struct hl_cs *cs;
1401 struct hl_cb *cb;
1402 u64 user_sequence;
1403 u8 stream_master_qid_map = 0;
1404 int rc, i;
1405
1406 cntr = &hdev->aggregated_cs_counters;
1407 user_sequence = *cs_seq;
1408 *cs_seq = ULLONG_MAX;
1409
1410 rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks,
1411 hpriv->ctx);
1412 if (rc)
1413 goto out;
1414
1415 if ((flags & HL_CS_FLAGS_STAGED_SUBMISSION) &&
1416 !(flags & HL_CS_FLAGS_STAGED_SUBMISSION_FIRST))
1417 staged_mid = true;
1418 else
1419 staged_mid = false;
1420
1421 rc = allocate_cs(hdev, hpriv->ctx, CS_TYPE_DEFAULT,
1422 staged_mid ? user_sequence : ULLONG_MAX, &cs, flags,
1423 timeout);
1424 if (rc)
1425 goto free_cs_chunk_array;
1426
1427 *cs_seq = cs->sequence;
1428
1429 hl_debugfs_add_cs(cs);
1430
1431 rc = cs_staged_submission(hdev, cs, user_sequence, flags,
1432 encaps_signals_handle);
1433 if (rc)
1434 goto free_cs_object;
1435
1436 /* If this is a staged submission we must return the staged sequence
1437 * rather than the internal CS sequence
1438 */
1439 if (cs->staged_cs)
1440 *cs_seq = cs->staged_sequence;
1441
1442 /* Validate ALL the CS chunks before submitting the CS */
1443 for (i = 0 ; i < num_chunks ; i++) {
1444 struct hl_cs_chunk *chunk = &cs_chunk_array[i];
1445 enum hl_queue_type queue_type;
1446 bool is_kernel_allocated_cb;
1447
1448 rc = validate_queue_index(hdev, chunk, &queue_type,
1449 &is_kernel_allocated_cb);
1450 if (rc) {
1451 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1452 atomic64_inc(&cntr->validation_drop_cnt);
1453 goto free_cs_object;
1454 }
1455
1456 if (is_kernel_allocated_cb) {
1457 cb = get_cb_from_cs_chunk(hdev, &hpriv->mem_mgr, chunk);
1458 if (!cb) {
1459 atomic64_inc(
1460 &ctx->cs_counters.validation_drop_cnt);
1461 atomic64_inc(&cntr->validation_drop_cnt);
1462 rc = -EINVAL;
1463 goto free_cs_object;
1464 }
1465 } else {
1466 cb = (struct hl_cb *) (uintptr_t) chunk->cb_handle;
1467 }
1468
1469 if (queue_type == QUEUE_TYPE_EXT ||
1470 queue_type == QUEUE_TYPE_HW) {
1471 int_queues_only = false;
1472
1473 /*
1474 * store which stream are being used for external/HW
1475 * queues of this CS
1476 */
1477 if (hdev->supports_wait_for_multi_cs)
1478 stream_master_qid_map |=
1479 get_stream_master_qid_mask(hdev,
1480 chunk->queue_index);
1481 }
1482
1483 if (queue_type == QUEUE_TYPE_HW)
1484 using_hw_queues = true;
1485
1486 job = hl_cs_allocate_job(hdev, queue_type,
1487 is_kernel_allocated_cb);
1488 if (!job) {
1489 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
1490 atomic64_inc(&cntr->out_of_mem_drop_cnt);
1491 dev_err(hdev->dev, "Failed to allocate a new job\n");
1492 rc = -ENOMEM;
1493 if (is_kernel_allocated_cb)
1494 goto release_cb;
1495
1496 goto free_cs_object;
1497 }
1498
1499 job->id = i + 1;
1500 job->cs = cs;
1501 job->user_cb = cb;
1502 job->user_cb_size = chunk->cb_size;
1503 job->hw_queue_id = chunk->queue_index;
1504
1505 cs->jobs_in_queue_cnt[job->hw_queue_id]++;
1506 cs->jobs_cnt++;
1507
1508 list_add_tail(&job->cs_node, &cs->job_list);
1509
1510 /*
1511 * Increment CS reference. When CS reference is 0, CS is
1512 * done and can be signaled to user and free all its resources
1513 * Only increment for JOB on external or H/W queues, because
1514 * only for those JOBs we get completion
1515 */
1516 if (cs_needs_completion(cs) &&
1517 (job->queue_type == QUEUE_TYPE_EXT ||
1518 job->queue_type == QUEUE_TYPE_HW))
1519 cs_get(cs);
1520
1521 hl_debugfs_add_job(hdev, job);
1522
1523 rc = cs_parser(hpriv, job);
1524 if (rc) {
1525 atomic64_inc(&ctx->cs_counters.parsing_drop_cnt);
1526 atomic64_inc(&cntr->parsing_drop_cnt);
1527 dev_err(hdev->dev,
1528 "Failed to parse JOB %d.%llu.%d, err %d, rejecting the CS\n",
1529 cs->ctx->asid, cs->sequence, job->id, rc);
1530 goto free_cs_object;
1531 }
1532 }
1533
1534 /* We allow a CS with any queue type combination as long as it does
1535 * not get a completion
1536 */
1537 if (int_queues_only && cs_needs_completion(cs)) {
1538 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1539 atomic64_inc(&cntr->validation_drop_cnt);
1540 dev_err(hdev->dev,
1541 "Reject CS %d.%llu since it contains only internal queues jobs and needs completion\n",
1542 cs->ctx->asid, cs->sequence);
1543 rc = -EINVAL;
1544 goto free_cs_object;
1545 }
1546
1547 if (using_hw_queues)
1548 INIT_WORK(&cs->finish_work, cs_completion);
1549
1550 /*
1551 * store the (external/HW queues) streams used by the CS in the
1552 * fence object for multi-CS completion
1553 */
1554 if (hdev->supports_wait_for_multi_cs)
1555 cs->fence->stream_master_qid_map = stream_master_qid_map;
1556
1557 rc = hl_hw_queue_schedule_cs(cs);
1558 if (rc) {
1559 if (rc != -EAGAIN)
1560 dev_err(hdev->dev,
1561 "Failed to submit CS %d.%llu to H/W queues, error %d\n",
1562 cs->ctx->asid, cs->sequence, rc);
1563 goto free_cs_object;
1564 }
1565
1566 *signal_initial_sob_count = cs->initial_sob_count;
1567
1568 rc = HL_CS_STATUS_SUCCESS;
1569 goto put_cs;
1570
1571 release_cb:
1572 atomic_dec(&cb->cs_cnt);
1573 hl_cb_put(cb);
1574 free_cs_object:
1575 cs_rollback(hdev, cs);
1576 *cs_seq = ULLONG_MAX;
1577 /* The path below is both for good and erroneous exits */
1578 put_cs:
1579 /* We finished with the CS in this function, so put the ref */
1580 cs_put(cs);
1581 free_cs_chunk_array:
1582 kfree(cs_chunk_array);
1583 out:
1584 return rc;
1585 }
1586
hl_cs_ctx_switch(struct hl_fpriv * hpriv,union hl_cs_args * args,u64 * cs_seq)1587 static int hl_cs_ctx_switch(struct hl_fpriv *hpriv, union hl_cs_args *args,
1588 u64 *cs_seq)
1589 {
1590 struct hl_device *hdev = hpriv->hdev;
1591 struct hl_ctx *ctx = hpriv->ctx;
1592 bool need_soft_reset = false;
1593 int rc = 0, do_ctx_switch = 0;
1594 void __user *chunks;
1595 u32 num_chunks, tmp;
1596 u16 sob_count;
1597 int ret;
1598
1599 if (hdev->supports_ctx_switch)
1600 do_ctx_switch = atomic_cmpxchg(&ctx->thread_ctx_switch_token, 1, 0);
1601
1602 if (do_ctx_switch || (args->in.cs_flags & HL_CS_FLAGS_FORCE_RESTORE)) {
1603 mutex_lock(&hpriv->restore_phase_mutex);
1604
1605 if (do_ctx_switch) {
1606 rc = hdev->asic_funcs->context_switch(hdev, ctx->asid);
1607 if (rc) {
1608 dev_err_ratelimited(hdev->dev,
1609 "Failed to switch to context %d, rejecting CS! %d\n",
1610 ctx->asid, rc);
1611 /*
1612 * If we timedout, or if the device is not IDLE
1613 * while we want to do context-switch (-EBUSY),
1614 * we need to soft-reset because QMAN is
1615 * probably stuck. However, we can't call to
1616 * reset here directly because of deadlock, so
1617 * need to do it at the very end of this
1618 * function
1619 */
1620 if ((rc == -ETIMEDOUT) || (rc == -EBUSY))
1621 need_soft_reset = true;
1622 mutex_unlock(&hpriv->restore_phase_mutex);
1623 goto out;
1624 }
1625 }
1626
1627 hdev->asic_funcs->restore_phase_topology(hdev);
1628
1629 chunks = (void __user *) (uintptr_t) args->in.chunks_restore;
1630 num_chunks = args->in.num_chunks_restore;
1631
1632 if (!num_chunks) {
1633 dev_dbg(hdev->dev,
1634 "Need to run restore phase but restore CS is empty\n");
1635 rc = 0;
1636 } else {
1637 rc = cs_ioctl_default(hpriv, chunks, num_chunks,
1638 cs_seq, 0, 0, hdev->timeout_jiffies, &sob_count);
1639 }
1640
1641 mutex_unlock(&hpriv->restore_phase_mutex);
1642
1643 if (rc) {
1644 dev_err(hdev->dev,
1645 "Failed to submit restore CS for context %d (%d)\n",
1646 ctx->asid, rc);
1647 goto out;
1648 }
1649
1650 /* Need to wait for restore completion before execution phase */
1651 if (num_chunks) {
1652 enum hl_cs_wait_status status;
1653 wait_again:
1654 ret = _hl_cs_wait_ioctl(hdev, ctx,
1655 jiffies_to_usecs(hdev->timeout_jiffies),
1656 *cs_seq, &status, NULL);
1657 if (ret) {
1658 if (ret == -ERESTARTSYS) {
1659 usleep_range(100, 200);
1660 goto wait_again;
1661 }
1662
1663 dev_err(hdev->dev,
1664 "Restore CS for context %d failed to complete %d\n",
1665 ctx->asid, ret);
1666 rc = -ENOEXEC;
1667 goto out;
1668 }
1669 }
1670
1671 if (hdev->supports_ctx_switch)
1672 ctx->thread_ctx_switch_wait_token = 1;
1673
1674 } else if (hdev->supports_ctx_switch && !ctx->thread_ctx_switch_wait_token) {
1675 rc = hl_poll_timeout_memory(hdev,
1676 &ctx->thread_ctx_switch_wait_token, tmp, (tmp == 1),
1677 100, jiffies_to_usecs(hdev->timeout_jiffies), false);
1678
1679 if (rc == -ETIMEDOUT) {
1680 dev_err(hdev->dev,
1681 "context switch phase timeout (%d)\n", tmp);
1682 goto out;
1683 }
1684 }
1685
1686 out:
1687 if ((rc == -ETIMEDOUT || rc == -EBUSY) && (need_soft_reset))
1688 hl_device_reset(hdev, 0);
1689
1690 return rc;
1691 }
1692
1693 /*
1694 * hl_cs_signal_sob_wraparound_handler: handle SOB value wrapaound case.
1695 * if the SOB value reaches the max value move to the other SOB reserved
1696 * to the queue.
1697 * @hdev: pointer to device structure
1698 * @q_idx: stream queue index
1699 * @hw_sob: the H/W SOB used in this signal CS.
1700 * @count: signals count
1701 * @encaps_sig: tells whether it's reservation for encaps signals or not.
1702 *
1703 * Note that this function must be called while hw_queues_lock is taken.
1704 */
hl_cs_signal_sob_wraparound_handler(struct hl_device * hdev,u32 q_idx,struct hl_hw_sob ** hw_sob,u32 count,bool encaps_sig)1705 int hl_cs_signal_sob_wraparound_handler(struct hl_device *hdev, u32 q_idx,
1706 struct hl_hw_sob **hw_sob, u32 count, bool encaps_sig)
1707
1708 {
1709 struct hl_sync_stream_properties *prop;
1710 struct hl_hw_sob *sob = *hw_sob, *other_sob;
1711 u8 other_sob_offset;
1712
1713 prop = &hdev->kernel_queues[q_idx].sync_stream_prop;
1714
1715 hw_sob_get(sob);
1716
1717 /* check for wraparound */
1718 if (prop->next_sob_val + count >= HL_MAX_SOB_VAL) {
1719 /*
1720 * Decrement as we reached the max value.
1721 * The release function won't be called here as we've
1722 * just incremented the refcount right before calling this
1723 * function.
1724 */
1725 hw_sob_put_err(sob);
1726
1727 /*
1728 * check the other sob value, if it still in use then fail
1729 * otherwise make the switch
1730 */
1731 other_sob_offset = (prop->curr_sob_offset + 1) % HL_RSVD_SOBS;
1732 other_sob = &prop->hw_sob[other_sob_offset];
1733
1734 if (kref_read(&other_sob->kref) != 1) {
1735 dev_err(hdev->dev, "error: Cannot switch SOBs q_idx: %d\n",
1736 q_idx);
1737 return -EINVAL;
1738 }
1739
1740 /*
1741 * next_sob_val always points to the next available signal
1742 * in the sob, so in encaps signals it will be the next one
1743 * after reserving the required amount.
1744 */
1745 if (encaps_sig)
1746 prop->next_sob_val = count + 1;
1747 else
1748 prop->next_sob_val = count;
1749
1750 /* only two SOBs are currently in use */
1751 prop->curr_sob_offset = other_sob_offset;
1752 *hw_sob = other_sob;
1753
1754 /*
1755 * check if other_sob needs reset, then do it before using it
1756 * for the reservation or the next signal cs.
1757 * we do it here, and for both encaps and regular signal cs
1758 * cases in order to avoid possible races of two kref_put
1759 * of the sob which can occur at the same time if we move the
1760 * sob reset(kref_put) to cs_do_release function.
1761 * in addition, if we have combination of cs signal and
1762 * encaps, and at the point we need to reset the sob there was
1763 * no more reservations and only signal cs keep coming,
1764 * in such case we need signal_cs to put the refcount and
1765 * reset the sob.
1766 */
1767 if (other_sob->need_reset)
1768 hw_sob_put(other_sob);
1769
1770 if (encaps_sig) {
1771 /* set reset indication for the sob */
1772 sob->need_reset = true;
1773 hw_sob_get(other_sob);
1774 }
1775
1776 dev_dbg(hdev->dev, "switched to SOB %d, q_idx: %d\n",
1777 prop->curr_sob_offset, q_idx);
1778 } else {
1779 prop->next_sob_val += count;
1780 }
1781
1782 return 0;
1783 }
1784
cs_ioctl_extract_signal_seq(struct hl_device * hdev,struct hl_cs_chunk * chunk,u64 * signal_seq,struct hl_ctx * ctx,bool encaps_signals)1785 static int cs_ioctl_extract_signal_seq(struct hl_device *hdev,
1786 struct hl_cs_chunk *chunk, u64 *signal_seq, struct hl_ctx *ctx,
1787 bool encaps_signals)
1788 {
1789 u64 *signal_seq_arr = NULL;
1790 u32 size_to_copy, signal_seq_arr_len;
1791 int rc = 0;
1792
1793 if (encaps_signals) {
1794 *signal_seq = chunk->encaps_signal_seq;
1795 return 0;
1796 }
1797
1798 signal_seq_arr_len = chunk->num_signal_seq_arr;
1799
1800 /* currently only one signal seq is supported */
1801 if (signal_seq_arr_len != 1) {
1802 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1803 atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
1804 dev_err(hdev->dev,
1805 "Wait for signal CS supports only one signal CS seq\n");
1806 return -EINVAL;
1807 }
1808
1809 signal_seq_arr = kmalloc_array(signal_seq_arr_len,
1810 sizeof(*signal_seq_arr),
1811 GFP_ATOMIC);
1812 if (!signal_seq_arr)
1813 signal_seq_arr = kmalloc_array(signal_seq_arr_len,
1814 sizeof(*signal_seq_arr),
1815 GFP_KERNEL);
1816 if (!signal_seq_arr) {
1817 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
1818 atomic64_inc(&hdev->aggregated_cs_counters.out_of_mem_drop_cnt);
1819 return -ENOMEM;
1820 }
1821
1822 size_to_copy = signal_seq_arr_len * sizeof(*signal_seq_arr);
1823 if (copy_from_user(signal_seq_arr,
1824 u64_to_user_ptr(chunk->signal_seq_arr),
1825 size_to_copy)) {
1826 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1827 atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
1828 dev_err(hdev->dev,
1829 "Failed to copy signal seq array from user\n");
1830 rc = -EFAULT;
1831 goto out;
1832 }
1833
1834 /* currently it is guaranteed to have only one signal seq */
1835 *signal_seq = signal_seq_arr[0];
1836
1837 out:
1838 kfree(signal_seq_arr);
1839
1840 return rc;
1841 }
1842
cs_ioctl_signal_wait_create_jobs(struct hl_device * hdev,struct hl_ctx * ctx,struct hl_cs * cs,enum hl_queue_type q_type,u32 q_idx,u32 encaps_signal_offset)1843 static int cs_ioctl_signal_wait_create_jobs(struct hl_device *hdev,
1844 struct hl_ctx *ctx, struct hl_cs *cs,
1845 enum hl_queue_type q_type, u32 q_idx, u32 encaps_signal_offset)
1846 {
1847 struct hl_cs_counters_atomic *cntr;
1848 struct hl_cs_job *job;
1849 struct hl_cb *cb;
1850 u32 cb_size;
1851
1852 cntr = &hdev->aggregated_cs_counters;
1853
1854 job = hl_cs_allocate_job(hdev, q_type, true);
1855 if (!job) {
1856 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
1857 atomic64_inc(&cntr->out_of_mem_drop_cnt);
1858 dev_err(hdev->dev, "Failed to allocate a new job\n");
1859 return -ENOMEM;
1860 }
1861
1862 if (cs->type == CS_TYPE_WAIT)
1863 cb_size = hdev->asic_funcs->get_wait_cb_size(hdev);
1864 else
1865 cb_size = hdev->asic_funcs->get_signal_cb_size(hdev);
1866
1867 cb = hl_cb_kernel_create(hdev, cb_size,
1868 q_type == QUEUE_TYPE_HW && hdev->mmu_enable);
1869 if (!cb) {
1870 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
1871 atomic64_inc(&cntr->out_of_mem_drop_cnt);
1872 kfree(job);
1873 return -EFAULT;
1874 }
1875
1876 job->id = 0;
1877 job->cs = cs;
1878 job->user_cb = cb;
1879 atomic_inc(&job->user_cb->cs_cnt);
1880 job->user_cb_size = cb_size;
1881 job->hw_queue_id = q_idx;
1882
1883 if ((cs->type == CS_TYPE_WAIT || cs->type == CS_TYPE_COLLECTIVE_WAIT)
1884 && cs->encaps_signals)
1885 job->encaps_sig_wait_offset = encaps_signal_offset;
1886 /*
1887 * No need in parsing, user CB is the patched CB.
1888 * We call hl_cb_destroy() out of two reasons - we don't need the CB in
1889 * the CB idr anymore and to decrement its refcount as it was
1890 * incremented inside hl_cb_kernel_create().
1891 */
1892 job->patched_cb = job->user_cb;
1893 job->job_cb_size = job->user_cb_size;
1894 hl_cb_destroy(&hdev->kernel_mem_mgr, cb->buf->handle);
1895
1896 /* increment refcount as for external queues we get completion */
1897 cs_get(cs);
1898
1899 cs->jobs_in_queue_cnt[job->hw_queue_id]++;
1900 cs->jobs_cnt++;
1901
1902 list_add_tail(&job->cs_node, &cs->job_list);
1903
1904 hl_debugfs_add_job(hdev, job);
1905
1906 return 0;
1907 }
1908
cs_ioctl_reserve_signals(struct hl_fpriv * hpriv,u32 q_idx,u32 count,u32 * handle_id,u32 * sob_addr,u32 * signals_count)1909 static int cs_ioctl_reserve_signals(struct hl_fpriv *hpriv,
1910 u32 q_idx, u32 count,
1911 u32 *handle_id, u32 *sob_addr,
1912 u32 *signals_count)
1913 {
1914 struct hw_queue_properties *hw_queue_prop;
1915 struct hl_sync_stream_properties *prop;
1916 struct hl_device *hdev = hpriv->hdev;
1917 struct hl_cs_encaps_sig_handle *handle;
1918 struct hl_encaps_signals_mgr *mgr;
1919 struct hl_hw_sob *hw_sob;
1920 int hdl_id;
1921 int rc = 0;
1922
1923 if (count >= HL_MAX_SOB_VAL) {
1924 dev_err(hdev->dev, "signals count(%u) exceeds the max SOB value\n",
1925 count);
1926 rc = -EINVAL;
1927 goto out;
1928 }
1929
1930 if (q_idx >= hdev->asic_prop.max_queues) {
1931 dev_err(hdev->dev, "Queue index %d is invalid\n",
1932 q_idx);
1933 rc = -EINVAL;
1934 goto out;
1935 }
1936
1937 hw_queue_prop = &hdev->asic_prop.hw_queues_props[q_idx];
1938
1939 if (!hw_queue_prop->supports_sync_stream) {
1940 dev_err(hdev->dev,
1941 "Queue index %d does not support sync stream operations\n",
1942 q_idx);
1943 rc = -EINVAL;
1944 goto out;
1945 }
1946
1947 prop = &hdev->kernel_queues[q_idx].sync_stream_prop;
1948
1949 handle = kzalloc(sizeof(*handle), GFP_KERNEL);
1950 if (!handle) {
1951 rc = -ENOMEM;
1952 goto out;
1953 }
1954
1955 handle->count = count;
1956
1957 hl_ctx_get(hpriv->ctx);
1958 handle->ctx = hpriv->ctx;
1959 mgr = &hpriv->ctx->sig_mgr;
1960
1961 spin_lock(&mgr->lock);
1962 hdl_id = idr_alloc(&mgr->handles, handle, 1, 0, GFP_ATOMIC);
1963 spin_unlock(&mgr->lock);
1964
1965 if (hdl_id < 0) {
1966 dev_err(hdev->dev, "Failed to allocate IDR for a new signal reservation\n");
1967 rc = -EINVAL;
1968 goto put_ctx;
1969 }
1970
1971 handle->id = hdl_id;
1972 handle->q_idx = q_idx;
1973 handle->hdev = hdev;
1974 kref_init(&handle->refcount);
1975
1976 hdev->asic_funcs->hw_queues_lock(hdev);
1977
1978 hw_sob = &prop->hw_sob[prop->curr_sob_offset];
1979
1980 /*
1981 * Increment the SOB value by count by user request
1982 * to reserve those signals
1983 * check if the signals amount to reserve is not exceeding the max sob
1984 * value, if yes then switch sob.
1985 */
1986 rc = hl_cs_signal_sob_wraparound_handler(hdev, q_idx, &hw_sob, count,
1987 true);
1988 if (rc) {
1989 dev_err(hdev->dev, "Failed to switch SOB\n");
1990 hdev->asic_funcs->hw_queues_unlock(hdev);
1991 rc = -EINVAL;
1992 goto remove_idr;
1993 }
1994 /* set the hw_sob to the handle after calling the sob wraparound handler
1995 * since sob could have changed.
1996 */
1997 handle->hw_sob = hw_sob;
1998
1999 /* store the current sob value for unreserve validity check, and
2000 * signal offset support
2001 */
2002 handle->pre_sob_val = prop->next_sob_val - handle->count;
2003
2004 *signals_count = prop->next_sob_val;
2005 hdev->asic_funcs->hw_queues_unlock(hdev);
2006
2007 *sob_addr = handle->hw_sob->sob_addr;
2008 *handle_id = hdl_id;
2009
2010 dev_dbg(hdev->dev,
2011 "Signals reserved, sob_id: %d, sob addr: 0x%x, last sob_val: %u, q_idx: %d, hdl_id: %d\n",
2012 hw_sob->sob_id, handle->hw_sob->sob_addr,
2013 prop->next_sob_val - 1, q_idx, hdl_id);
2014 goto out;
2015
2016 remove_idr:
2017 spin_lock(&mgr->lock);
2018 idr_remove(&mgr->handles, hdl_id);
2019 spin_unlock(&mgr->lock);
2020
2021 put_ctx:
2022 hl_ctx_put(handle->ctx);
2023 kfree(handle);
2024
2025 out:
2026 return rc;
2027 }
2028
cs_ioctl_unreserve_signals(struct hl_fpriv * hpriv,u32 handle_id)2029 static int cs_ioctl_unreserve_signals(struct hl_fpriv *hpriv, u32 handle_id)
2030 {
2031 struct hl_cs_encaps_sig_handle *encaps_sig_hdl;
2032 struct hl_sync_stream_properties *prop;
2033 struct hl_device *hdev = hpriv->hdev;
2034 struct hl_encaps_signals_mgr *mgr;
2035 struct hl_hw_sob *hw_sob;
2036 u32 q_idx, sob_addr;
2037 int rc = 0;
2038
2039 mgr = &hpriv->ctx->sig_mgr;
2040
2041 spin_lock(&mgr->lock);
2042 encaps_sig_hdl = idr_find(&mgr->handles, handle_id);
2043 if (encaps_sig_hdl) {
2044 dev_dbg(hdev->dev, "unreserve signals, handle: %u, SOB:0x%x, count: %u\n",
2045 handle_id, encaps_sig_hdl->hw_sob->sob_addr,
2046 encaps_sig_hdl->count);
2047
2048 hdev->asic_funcs->hw_queues_lock(hdev);
2049
2050 q_idx = encaps_sig_hdl->q_idx;
2051 prop = &hdev->kernel_queues[q_idx].sync_stream_prop;
2052 hw_sob = &prop->hw_sob[prop->curr_sob_offset];
2053 sob_addr = hdev->asic_funcs->get_sob_addr(hdev, hw_sob->sob_id);
2054
2055 /* Check if sob_val got out of sync due to other
2056 * signal submission requests which were handled
2057 * between the reserve-unreserve calls or SOB switch
2058 * upon reaching SOB max value.
2059 */
2060 if (encaps_sig_hdl->pre_sob_val + encaps_sig_hdl->count
2061 != prop->next_sob_val ||
2062 sob_addr != encaps_sig_hdl->hw_sob->sob_addr) {
2063 dev_err(hdev->dev, "Cannot unreserve signals, SOB val ran out of sync, expected: %u, actual val: %u\n",
2064 encaps_sig_hdl->pre_sob_val,
2065 (prop->next_sob_val - encaps_sig_hdl->count));
2066
2067 hdev->asic_funcs->hw_queues_unlock(hdev);
2068 rc = -EINVAL;
2069 goto out;
2070 }
2071
2072 /*
2073 * Decrement the SOB value by count by user request
2074 * to unreserve those signals
2075 */
2076 prop->next_sob_val -= encaps_sig_hdl->count;
2077
2078 hdev->asic_funcs->hw_queues_unlock(hdev);
2079
2080 hw_sob_put(hw_sob);
2081
2082 /* Release the id and free allocated memory of the handle */
2083 idr_remove(&mgr->handles, handle_id);
2084 hl_ctx_put(encaps_sig_hdl->ctx);
2085 kfree(encaps_sig_hdl);
2086 } else {
2087 rc = -EINVAL;
2088 dev_err(hdev->dev, "failed to unreserve signals, cannot find handler\n");
2089 }
2090 out:
2091 spin_unlock(&mgr->lock);
2092
2093 return rc;
2094 }
2095
cs_ioctl_signal_wait(struct hl_fpriv * hpriv,enum hl_cs_type cs_type,void __user * chunks,u32 num_chunks,u64 * cs_seq,u32 flags,u32 timeout,u32 * signal_sob_addr_offset,u16 * signal_initial_sob_count)2096 static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
2097 void __user *chunks, u32 num_chunks,
2098 u64 *cs_seq, u32 flags, u32 timeout,
2099 u32 *signal_sob_addr_offset, u16 *signal_initial_sob_count)
2100 {
2101 struct hl_cs_encaps_sig_handle *encaps_sig_hdl = NULL;
2102 bool handle_found = false, is_wait_cs = false,
2103 wait_cs_submitted = false,
2104 cs_encaps_signals = false;
2105 struct hl_cs_chunk *cs_chunk_array, *chunk;
2106 bool staged_cs_with_encaps_signals = false;
2107 struct hw_queue_properties *hw_queue_prop;
2108 struct hl_device *hdev = hpriv->hdev;
2109 struct hl_cs_compl *sig_waitcs_cmpl;
2110 u32 q_idx, collective_engine_id = 0;
2111 struct hl_cs_counters_atomic *cntr;
2112 struct hl_fence *sig_fence = NULL;
2113 struct hl_ctx *ctx = hpriv->ctx;
2114 enum hl_queue_type q_type;
2115 struct hl_cs *cs;
2116 u64 signal_seq;
2117 int rc;
2118
2119 cntr = &hdev->aggregated_cs_counters;
2120 *cs_seq = ULLONG_MAX;
2121
2122 rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks,
2123 ctx);
2124 if (rc)
2125 goto out;
2126
2127 /* currently it is guaranteed to have only one chunk */
2128 chunk = &cs_chunk_array[0];
2129
2130 if (chunk->queue_index >= hdev->asic_prop.max_queues) {
2131 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
2132 atomic64_inc(&cntr->validation_drop_cnt);
2133 dev_err(hdev->dev, "Queue index %d is invalid\n",
2134 chunk->queue_index);
2135 rc = -EINVAL;
2136 goto free_cs_chunk_array;
2137 }
2138
2139 q_idx = chunk->queue_index;
2140 hw_queue_prop = &hdev->asic_prop.hw_queues_props[q_idx];
2141 q_type = hw_queue_prop->type;
2142
2143 if (!hw_queue_prop->supports_sync_stream) {
2144 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
2145 atomic64_inc(&cntr->validation_drop_cnt);
2146 dev_err(hdev->dev,
2147 "Queue index %d does not support sync stream operations\n",
2148 q_idx);
2149 rc = -EINVAL;
2150 goto free_cs_chunk_array;
2151 }
2152
2153 if (cs_type == CS_TYPE_COLLECTIVE_WAIT) {
2154 if (!(hw_queue_prop->collective_mode == HL_COLLECTIVE_MASTER)) {
2155 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
2156 atomic64_inc(&cntr->validation_drop_cnt);
2157 dev_err(hdev->dev,
2158 "Queue index %d is invalid\n", q_idx);
2159 rc = -EINVAL;
2160 goto free_cs_chunk_array;
2161 }
2162
2163 if (!hdev->nic_ports_mask) {
2164 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
2165 atomic64_inc(&cntr->validation_drop_cnt);
2166 dev_err(hdev->dev,
2167 "Collective operations not supported when NIC ports are disabled");
2168 rc = -EINVAL;
2169 goto free_cs_chunk_array;
2170 }
2171
2172 collective_engine_id = chunk->collective_engine_id;
2173 }
2174
2175 is_wait_cs = !!(cs_type == CS_TYPE_WAIT ||
2176 cs_type == CS_TYPE_COLLECTIVE_WAIT);
2177
2178 cs_encaps_signals = !!(flags & HL_CS_FLAGS_ENCAP_SIGNALS);
2179
2180 if (is_wait_cs) {
2181 rc = cs_ioctl_extract_signal_seq(hdev, chunk, &signal_seq,
2182 ctx, cs_encaps_signals);
2183 if (rc)
2184 goto free_cs_chunk_array;
2185
2186 if (cs_encaps_signals) {
2187 /* check if cs sequence has encapsulated
2188 * signals handle
2189 */
2190 struct idr *idp;
2191 u32 id;
2192
2193 spin_lock(&ctx->sig_mgr.lock);
2194 idp = &ctx->sig_mgr.handles;
2195 idr_for_each_entry(idp, encaps_sig_hdl, id) {
2196 if (encaps_sig_hdl->cs_seq == signal_seq) {
2197 /* get refcount to protect removing this handle from idr,
2198 * needed when multiple wait cs are used with offset
2199 * to wait on reserved encaps signals.
2200 * Since kref_put of this handle is executed outside the
2201 * current lock, it is possible that the handle refcount
2202 * is 0 but it yet to be removed from the list. In this
2203 * case need to consider the handle as not valid.
2204 */
2205 if (kref_get_unless_zero(&encaps_sig_hdl->refcount))
2206 handle_found = true;
2207 break;
2208 }
2209 }
2210 spin_unlock(&ctx->sig_mgr.lock);
2211
2212 if (!handle_found) {
2213 /* treat as signal CS already finished */
2214 dev_dbg(hdev->dev, "Cannot find encapsulated signals handle for seq 0x%llx\n",
2215 signal_seq);
2216 rc = 0;
2217 goto free_cs_chunk_array;
2218 }
2219
2220 /* validate also the signal offset value */
2221 if (chunk->encaps_signal_offset >
2222 encaps_sig_hdl->count) {
2223 dev_err(hdev->dev, "offset(%u) value exceed max reserved signals count(%u)!\n",
2224 chunk->encaps_signal_offset,
2225 encaps_sig_hdl->count);
2226 rc = -EINVAL;
2227 goto free_cs_chunk_array;
2228 }
2229 }
2230
2231 sig_fence = hl_ctx_get_fence(ctx, signal_seq);
2232 if (IS_ERR(sig_fence)) {
2233 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
2234 atomic64_inc(&cntr->validation_drop_cnt);
2235 dev_err(hdev->dev,
2236 "Failed to get signal CS with seq 0x%llx\n",
2237 signal_seq);
2238 rc = PTR_ERR(sig_fence);
2239 goto free_cs_chunk_array;
2240 }
2241
2242 if (!sig_fence) {
2243 /* signal CS already finished */
2244 rc = 0;
2245 goto free_cs_chunk_array;
2246 }
2247
2248 sig_waitcs_cmpl =
2249 container_of(sig_fence, struct hl_cs_compl, base_fence);
2250
2251 staged_cs_with_encaps_signals = !!
2252 (sig_waitcs_cmpl->type == CS_TYPE_DEFAULT &&
2253 (flags & HL_CS_FLAGS_ENCAP_SIGNALS));
2254
2255 if (sig_waitcs_cmpl->type != CS_TYPE_SIGNAL &&
2256 !staged_cs_with_encaps_signals) {
2257 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
2258 atomic64_inc(&cntr->validation_drop_cnt);
2259 dev_err(hdev->dev,
2260 "CS seq 0x%llx is not of a signal/encaps-signal CS\n",
2261 signal_seq);
2262 hl_fence_put(sig_fence);
2263 rc = -EINVAL;
2264 goto free_cs_chunk_array;
2265 }
2266
2267 if (completion_done(&sig_fence->completion)) {
2268 /* signal CS already finished */
2269 hl_fence_put(sig_fence);
2270 rc = 0;
2271 goto free_cs_chunk_array;
2272 }
2273 }
2274
2275 rc = allocate_cs(hdev, ctx, cs_type, ULLONG_MAX, &cs, flags, timeout);
2276 if (rc) {
2277 if (is_wait_cs)
2278 hl_fence_put(sig_fence);
2279
2280 goto free_cs_chunk_array;
2281 }
2282
2283 /*
2284 * Save the signal CS fence for later initialization right before
2285 * hanging the wait CS on the queue.
2286 * for encaps signals case, we save the cs sequence and handle pointer
2287 * for later initialization.
2288 */
2289 if (is_wait_cs) {
2290 cs->signal_fence = sig_fence;
2291 /* store the handle pointer, so we don't have to
2292 * look for it again, later on the flow
2293 * when we need to set SOB info in hw_queue.
2294 */
2295 if (cs->encaps_signals)
2296 cs->encaps_sig_hdl = encaps_sig_hdl;
2297 }
2298
2299 hl_debugfs_add_cs(cs);
2300
2301 *cs_seq = cs->sequence;
2302
2303 if (cs_type == CS_TYPE_WAIT || cs_type == CS_TYPE_SIGNAL)
2304 rc = cs_ioctl_signal_wait_create_jobs(hdev, ctx, cs, q_type,
2305 q_idx, chunk->encaps_signal_offset);
2306 else if (cs_type == CS_TYPE_COLLECTIVE_WAIT)
2307 rc = hdev->asic_funcs->collective_wait_create_jobs(hdev, ctx,
2308 cs, q_idx, collective_engine_id,
2309 chunk->encaps_signal_offset);
2310 else {
2311 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
2312 atomic64_inc(&cntr->validation_drop_cnt);
2313 rc = -EINVAL;
2314 }
2315
2316 if (rc)
2317 goto free_cs_object;
2318
2319 if (q_type == QUEUE_TYPE_HW)
2320 INIT_WORK(&cs->finish_work, cs_completion);
2321
2322 rc = hl_hw_queue_schedule_cs(cs);
2323 if (rc) {
2324 /* In case wait cs failed here, it means the signal cs
2325 * already completed. we want to free all it's related objects
2326 * but we don't want to fail the ioctl.
2327 */
2328 if (is_wait_cs)
2329 rc = 0;
2330 else if (rc != -EAGAIN)
2331 dev_err(hdev->dev,
2332 "Failed to submit CS %d.%llu to H/W queues, error %d\n",
2333 ctx->asid, cs->sequence, rc);
2334 goto free_cs_object;
2335 }
2336
2337 *signal_sob_addr_offset = cs->sob_addr_offset;
2338 *signal_initial_sob_count = cs->initial_sob_count;
2339
2340 rc = HL_CS_STATUS_SUCCESS;
2341 if (is_wait_cs)
2342 wait_cs_submitted = true;
2343 goto put_cs;
2344
2345 free_cs_object:
2346 cs_rollback(hdev, cs);
2347 *cs_seq = ULLONG_MAX;
2348 /* The path below is both for good and erroneous exits */
2349 put_cs:
2350 /* We finished with the CS in this function, so put the ref */
2351 cs_put(cs);
2352 free_cs_chunk_array:
2353 if (!wait_cs_submitted && cs_encaps_signals && handle_found &&
2354 is_wait_cs)
2355 kref_put(&encaps_sig_hdl->refcount,
2356 hl_encaps_handle_do_release);
2357 kfree(cs_chunk_array);
2358 out:
2359 return rc;
2360 }
2361
cs_ioctl_engine_cores(struct hl_fpriv * hpriv,u64 engine_cores,u32 num_engine_cores,u32 core_command)2362 static int cs_ioctl_engine_cores(struct hl_fpriv *hpriv, u64 engine_cores,
2363 u32 num_engine_cores, u32 core_command)
2364 {
2365 int rc;
2366 struct hl_device *hdev = hpriv->hdev;
2367 void __user *engine_cores_arr;
2368 u32 *cores;
2369
2370 if (!num_engine_cores || num_engine_cores > hdev->asic_prop.num_engine_cores) {
2371 dev_err(hdev->dev, "Number of engine cores %d is invalid\n", num_engine_cores);
2372 return -EINVAL;
2373 }
2374
2375 if (core_command != HL_ENGINE_CORE_RUN && core_command != HL_ENGINE_CORE_HALT) {
2376 dev_err(hdev->dev, "Engine core command is invalid\n");
2377 return -EINVAL;
2378 }
2379
2380 engine_cores_arr = (void __user *) (uintptr_t) engine_cores;
2381 cores = kmalloc_array(num_engine_cores, sizeof(u32), GFP_KERNEL);
2382 if (!cores)
2383 return -ENOMEM;
2384
2385 if (copy_from_user(cores, engine_cores_arr, num_engine_cores * sizeof(u32))) {
2386 dev_err(hdev->dev, "Failed to copy core-ids array from user\n");
2387 kfree(cores);
2388 return -EFAULT;
2389 }
2390
2391 rc = hdev->asic_funcs->set_engine_cores(hdev, cores, num_engine_cores, core_command);
2392 kfree(cores);
2393
2394 return rc;
2395 }
2396
hl_cs_ioctl(struct hl_fpriv * hpriv,void * data)2397 int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
2398 {
2399 union hl_cs_args *args = data;
2400 enum hl_cs_type cs_type = 0;
2401 u64 cs_seq = ULONG_MAX;
2402 void __user *chunks;
2403 u32 num_chunks, flags, timeout,
2404 signals_count = 0, sob_addr = 0, handle_id = 0;
2405 u16 sob_initial_count = 0;
2406 int rc;
2407
2408 rc = hl_cs_sanity_checks(hpriv, args);
2409 if (rc)
2410 goto out;
2411
2412 rc = hl_cs_ctx_switch(hpriv, args, &cs_seq);
2413 if (rc)
2414 goto out;
2415
2416 cs_type = hl_cs_get_cs_type(args->in.cs_flags &
2417 ~HL_CS_FLAGS_FORCE_RESTORE);
2418 chunks = (void __user *) (uintptr_t) args->in.chunks_execute;
2419 num_chunks = args->in.num_chunks_execute;
2420 flags = args->in.cs_flags;
2421
2422 /* In case this is a staged CS, user should supply the CS sequence */
2423 if ((flags & HL_CS_FLAGS_STAGED_SUBMISSION) &&
2424 !(flags & HL_CS_FLAGS_STAGED_SUBMISSION_FIRST))
2425 cs_seq = args->in.seq;
2426
2427 timeout = flags & HL_CS_FLAGS_CUSTOM_TIMEOUT
2428 ? msecs_to_jiffies(args->in.timeout * 1000)
2429 : hpriv->hdev->timeout_jiffies;
2430
2431 switch (cs_type) {
2432 case CS_TYPE_SIGNAL:
2433 case CS_TYPE_WAIT:
2434 case CS_TYPE_COLLECTIVE_WAIT:
2435 rc = cs_ioctl_signal_wait(hpriv, cs_type, chunks, num_chunks,
2436 &cs_seq, args->in.cs_flags, timeout,
2437 &sob_addr, &sob_initial_count);
2438 break;
2439 case CS_RESERVE_SIGNALS:
2440 rc = cs_ioctl_reserve_signals(hpriv,
2441 args->in.encaps_signals_q_idx,
2442 args->in.encaps_signals_count,
2443 &handle_id, &sob_addr, &signals_count);
2444 break;
2445 case CS_UNRESERVE_SIGNALS:
2446 rc = cs_ioctl_unreserve_signals(hpriv,
2447 args->in.encaps_sig_handle_id);
2448 break;
2449 case CS_TYPE_ENGINE_CORE:
2450 rc = cs_ioctl_engine_cores(hpriv, args->in.engine_cores,
2451 args->in.num_engine_cores, args->in.core_command);
2452 break;
2453 default:
2454 rc = cs_ioctl_default(hpriv, chunks, num_chunks, &cs_seq,
2455 args->in.cs_flags,
2456 args->in.encaps_sig_handle_id,
2457 timeout, &sob_initial_count);
2458 break;
2459 }
2460 out:
2461 if (rc != -EAGAIN) {
2462 memset(args, 0, sizeof(*args));
2463
2464 switch (cs_type) {
2465 case CS_RESERVE_SIGNALS:
2466 args->out.handle_id = handle_id;
2467 args->out.sob_base_addr_offset = sob_addr;
2468 args->out.count = signals_count;
2469 break;
2470 case CS_TYPE_SIGNAL:
2471 args->out.sob_base_addr_offset = sob_addr;
2472 args->out.sob_count_before_submission = sob_initial_count;
2473 args->out.seq = cs_seq;
2474 break;
2475 case CS_TYPE_DEFAULT:
2476 args->out.sob_count_before_submission = sob_initial_count;
2477 args->out.seq = cs_seq;
2478 break;
2479 default:
2480 args->out.seq = cs_seq;
2481 break;
2482 }
2483
2484 args->out.status = rc;
2485 }
2486
2487 return rc;
2488 }
2489
hl_wait_for_fence(struct hl_ctx * ctx,u64 seq,struct hl_fence * fence,enum hl_cs_wait_status * status,u64 timeout_us,s64 * timestamp)2490 static int hl_wait_for_fence(struct hl_ctx *ctx, u64 seq, struct hl_fence *fence,
2491 enum hl_cs_wait_status *status, u64 timeout_us, s64 *timestamp)
2492 {
2493 struct hl_device *hdev = ctx->hdev;
2494 ktime_t timestamp_kt;
2495 long completion_rc;
2496 int rc = 0, error;
2497
2498 if (IS_ERR(fence)) {
2499 rc = PTR_ERR(fence);
2500 if (rc == -EINVAL)
2501 dev_notice_ratelimited(hdev->dev,
2502 "Can't wait on CS %llu because current CS is at seq %llu\n",
2503 seq, ctx->cs_sequence);
2504 return rc;
2505 }
2506
2507 if (!fence) {
2508 if (!hl_pop_cs_outcome(&ctx->outcome_store, seq, ×tamp_kt, &error)) {
2509 dev_dbg(hdev->dev,
2510 "Can't wait on seq %llu because current CS is at seq %llu (Fence is gone)\n",
2511 seq, ctx->cs_sequence);
2512 *status = CS_WAIT_STATUS_GONE;
2513 return 0;
2514 }
2515
2516 completion_rc = 1;
2517 goto report_results;
2518 }
2519
2520 if (!timeout_us) {
2521 completion_rc = completion_done(&fence->completion);
2522 } else {
2523 unsigned long timeout;
2524
2525 timeout = (timeout_us == MAX_SCHEDULE_TIMEOUT) ?
2526 timeout_us : usecs_to_jiffies(timeout_us);
2527 completion_rc =
2528 wait_for_completion_interruptible_timeout(
2529 &fence->completion, timeout);
2530 }
2531
2532 error = fence->error;
2533 timestamp_kt = fence->timestamp;
2534
2535 report_results:
2536 if (completion_rc > 0) {
2537 *status = CS_WAIT_STATUS_COMPLETED;
2538 if (timestamp)
2539 *timestamp = ktime_to_ns(timestamp_kt);
2540 } else {
2541 *status = CS_WAIT_STATUS_BUSY;
2542 }
2543
2544 if (error == -ETIMEDOUT || error == -EIO)
2545 rc = error;
2546
2547 return rc;
2548 }
2549
2550 /*
2551 * hl_cs_poll_fences - iterate CS fences to check for CS completion
2552 *
2553 * @mcs_data: multi-CS internal data
2554 * @mcs_compl: multi-CS completion structure
2555 *
2556 * @return 0 on success, otherwise non 0 error code
2557 *
2558 * The function iterates on all CS sequence in the list and set bit in
2559 * completion_bitmap for each completed CS.
2560 * While iterating, the function sets the stream map of each fence in the fence
2561 * array in the completion QID stream map to be used by CSs to perform
2562 * completion to the multi-CS context.
2563 * This function shall be called after taking context ref
2564 */
hl_cs_poll_fences(struct multi_cs_data * mcs_data,struct multi_cs_completion * mcs_compl)2565 static int hl_cs_poll_fences(struct multi_cs_data *mcs_data, struct multi_cs_completion *mcs_compl)
2566 {
2567 struct hl_fence **fence_ptr = mcs_data->fence_arr;
2568 struct hl_device *hdev = mcs_data->ctx->hdev;
2569 int i, rc, arr_len = mcs_data->arr_len;
2570 u64 *seq_arr = mcs_data->seq_arr;
2571 ktime_t max_ktime, first_cs_time;
2572 enum hl_cs_wait_status status;
2573
2574 memset(fence_ptr, 0, arr_len * sizeof(struct hl_fence *));
2575
2576 /* get all fences under the same lock */
2577 rc = hl_ctx_get_fences(mcs_data->ctx, seq_arr, fence_ptr, arr_len);
2578 if (rc)
2579 return rc;
2580
2581 /*
2582 * re-initialize the completion here to handle 2 possible cases:
2583 * 1. CS will complete the multi-CS prior clearing the completion. in which
2584 * case the fence iteration is guaranteed to catch the CS completion.
2585 * 2. the completion will occur after re-init of the completion.
2586 * in which case we will wake up immediately in wait_for_completion.
2587 */
2588 reinit_completion(&mcs_compl->completion);
2589
2590 /*
2591 * set to maximum time to verify timestamp is valid: if at the end
2592 * this value is maintained- no timestamp was updated
2593 */
2594 max_ktime = ktime_set(KTIME_SEC_MAX, 0);
2595 first_cs_time = max_ktime;
2596
2597 for (i = 0; i < arr_len; i++, fence_ptr++) {
2598 struct hl_fence *fence = *fence_ptr;
2599
2600 /*
2601 * In order to prevent case where we wait until timeout even though a CS associated
2602 * with the multi-CS actually completed we do things in the below order:
2603 * 1. for each fence set it's QID map in the multi-CS completion QID map. This way
2604 * any CS can, potentially, complete the multi CS for the specific QID (note
2605 * that once completion is initialized, calling complete* and then wait on the
2606 * completion will cause it to return at once)
2607 * 2. only after allowing multi-CS completion for the specific QID we check whether
2608 * the specific CS already completed (and thus the wait for completion part will
2609 * be skipped). if the CS not completed it is guaranteed that completing CS will
2610 * wake up the completion.
2611 */
2612 if (fence)
2613 mcs_compl->stream_master_qid_map |= fence->stream_master_qid_map;
2614
2615 /*
2616 * function won't sleep as it is called with timeout 0 (i.e.
2617 * poll the fence)
2618 */
2619 rc = hl_wait_for_fence(mcs_data->ctx, seq_arr[i], fence, &status, 0, NULL);
2620 if (rc) {
2621 dev_err(hdev->dev,
2622 "wait_for_fence error :%d for CS seq %llu\n",
2623 rc, seq_arr[i]);
2624 break;
2625 }
2626
2627 switch (status) {
2628 case CS_WAIT_STATUS_BUSY:
2629 /* CS did not finished, QID to wait on already stored */
2630 break;
2631 case CS_WAIT_STATUS_COMPLETED:
2632 /*
2633 * Using mcs_handling_done to avoid possibility of mcs_data
2634 * returns to user indicating CS completed before it finished
2635 * all of its mcs handling, to avoid race the next time the
2636 * user waits for mcs.
2637 * note: when reaching this case fence is definitely not NULL
2638 * but NULL check was added to overcome static analysis
2639 */
2640 if (fence && !fence->mcs_handling_done) {
2641 /*
2642 * in case multi CS is completed but MCS handling not done
2643 * we "complete" the multi CS to prevent it from waiting
2644 * until time-out and the "multi-CS handling done" will have
2645 * another chance at the next iteration
2646 */
2647 complete_all(&mcs_compl->completion);
2648 break;
2649 }
2650
2651 mcs_data->completion_bitmap |= BIT(i);
2652 /*
2653 * For all completed CSs we take the earliest timestamp.
2654 * For this we have to validate that the timestamp is
2655 * earliest of all timestamps so far.
2656 */
2657 if (fence && mcs_data->update_ts &&
2658 (ktime_compare(fence->timestamp, first_cs_time) < 0))
2659 first_cs_time = fence->timestamp;
2660 break;
2661 case CS_WAIT_STATUS_GONE:
2662 mcs_data->update_ts = false;
2663 mcs_data->gone_cs = true;
2664 /*
2665 * It is possible to get an old sequence numbers from user
2666 * which related to already completed CSs and their fences
2667 * already gone. In this case, CS set as completed but
2668 * no need to consider its QID for mcs completion.
2669 */
2670 mcs_data->completion_bitmap |= BIT(i);
2671 break;
2672 default:
2673 dev_err(hdev->dev, "Invalid fence status\n");
2674 return -EINVAL;
2675 }
2676
2677 }
2678
2679 hl_fences_put(mcs_data->fence_arr, arr_len);
2680
2681 if (mcs_data->update_ts &&
2682 (ktime_compare(first_cs_time, max_ktime) != 0))
2683 mcs_data->timestamp = ktime_to_ns(first_cs_time);
2684
2685 return rc;
2686 }
2687
_hl_cs_wait_ioctl(struct hl_device * hdev,struct hl_ctx * ctx,u64 timeout_us,u64 seq,enum hl_cs_wait_status * status,s64 * timestamp)2688 static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx, u64 timeout_us, u64 seq,
2689 enum hl_cs_wait_status *status, s64 *timestamp)
2690 {
2691 struct hl_fence *fence;
2692 int rc = 0;
2693
2694 if (timestamp)
2695 *timestamp = 0;
2696
2697 hl_ctx_get(ctx);
2698
2699 fence = hl_ctx_get_fence(ctx, seq);
2700
2701 rc = hl_wait_for_fence(ctx, seq, fence, status, timeout_us, timestamp);
2702 hl_fence_put(fence);
2703 hl_ctx_put(ctx);
2704
2705 return rc;
2706 }
2707
hl_usecs64_to_jiffies(const u64 usecs)2708 static inline unsigned long hl_usecs64_to_jiffies(const u64 usecs)
2709 {
2710 if (usecs <= U32_MAX)
2711 return usecs_to_jiffies(usecs);
2712
2713 /*
2714 * If the value in nanoseconds is larger than 64 bit, use the largest
2715 * 64 bit value.
2716 */
2717 if (usecs >= ((u64)(U64_MAX / NSEC_PER_USEC)))
2718 return nsecs_to_jiffies(U64_MAX);
2719
2720 return nsecs_to_jiffies(usecs * NSEC_PER_USEC);
2721 }
2722
2723 /*
2724 * hl_wait_multi_cs_completion_init - init completion structure
2725 *
2726 * @hdev: pointer to habanalabs device structure
2727 * @stream_master_bitmap: stream master QIDs map, set bit indicates stream
2728 * master QID to wait on
2729 *
2730 * @return valid completion struct pointer on success, otherwise error pointer
2731 *
2732 * up to MULTI_CS_MAX_USER_CTX calls can be done concurrently to the driver.
2733 * the function gets the first available completion (by marking it "used")
2734 * and initialize its values.
2735 */
hl_wait_multi_cs_completion_init(struct hl_device * hdev)2736 static struct multi_cs_completion *hl_wait_multi_cs_completion_init(struct hl_device *hdev)
2737 {
2738 struct multi_cs_completion *mcs_compl;
2739 int i;
2740
2741 /* find free multi_cs completion structure */
2742 for (i = 0; i < MULTI_CS_MAX_USER_CTX; i++) {
2743 mcs_compl = &hdev->multi_cs_completion[i];
2744 spin_lock(&mcs_compl->lock);
2745 if (!mcs_compl->used) {
2746 mcs_compl->used = 1;
2747 mcs_compl->timestamp = 0;
2748 /*
2749 * init QID map to 0 to avoid completion by CSs. the actual QID map
2750 * to multi-CS CSs will be set incrementally at a later stage
2751 */
2752 mcs_compl->stream_master_qid_map = 0;
2753 spin_unlock(&mcs_compl->lock);
2754 break;
2755 }
2756 spin_unlock(&mcs_compl->lock);
2757 }
2758
2759 if (i == MULTI_CS_MAX_USER_CTX) {
2760 dev_err(hdev->dev, "no available multi-CS completion structure\n");
2761 return ERR_PTR(-ENOMEM);
2762 }
2763 return mcs_compl;
2764 }
2765
2766 /*
2767 * hl_wait_multi_cs_completion_fini - return completion structure and set as
2768 * unused
2769 *
2770 * @mcs_compl: pointer to the completion structure
2771 */
hl_wait_multi_cs_completion_fini(struct multi_cs_completion * mcs_compl)2772 static void hl_wait_multi_cs_completion_fini(
2773 struct multi_cs_completion *mcs_compl)
2774 {
2775 /*
2776 * free completion structure, do it under lock to be in-sync with the
2777 * thread that signals completion
2778 */
2779 spin_lock(&mcs_compl->lock);
2780 mcs_compl->used = 0;
2781 spin_unlock(&mcs_compl->lock);
2782 }
2783
2784 /*
2785 * hl_wait_multi_cs_completion - wait for first CS to complete
2786 *
2787 * @mcs_data: multi-CS internal data
2788 *
2789 * @return 0 on success, otherwise non 0 error code
2790 */
hl_wait_multi_cs_completion(struct multi_cs_data * mcs_data,struct multi_cs_completion * mcs_compl)2791 static int hl_wait_multi_cs_completion(struct multi_cs_data *mcs_data,
2792 struct multi_cs_completion *mcs_compl)
2793 {
2794 long completion_rc;
2795
2796 completion_rc = wait_for_completion_interruptible_timeout(&mcs_compl->completion,
2797 mcs_data->timeout_jiffies);
2798
2799 /* update timestamp */
2800 if (completion_rc > 0)
2801 mcs_data->timestamp = mcs_compl->timestamp;
2802
2803 mcs_data->wait_status = completion_rc;
2804
2805 return 0;
2806 }
2807
2808 /*
2809 * hl_multi_cs_completion_init - init array of multi-CS completion structures
2810 *
2811 * @hdev: pointer to habanalabs device structure
2812 */
hl_multi_cs_completion_init(struct hl_device * hdev)2813 void hl_multi_cs_completion_init(struct hl_device *hdev)
2814 {
2815 struct multi_cs_completion *mcs_cmpl;
2816 int i;
2817
2818 for (i = 0; i < MULTI_CS_MAX_USER_CTX; i++) {
2819 mcs_cmpl = &hdev->multi_cs_completion[i];
2820 mcs_cmpl->used = 0;
2821 spin_lock_init(&mcs_cmpl->lock);
2822 init_completion(&mcs_cmpl->completion);
2823 }
2824 }
2825
2826 /*
2827 * hl_multi_cs_wait_ioctl - implementation of the multi-CS wait ioctl
2828 *
2829 * @hpriv: pointer to the private data of the fd
2830 * @data: pointer to multi-CS wait ioctl in/out args
2831 *
2832 */
hl_multi_cs_wait_ioctl(struct hl_fpriv * hpriv,void * data)2833 static int hl_multi_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
2834 {
2835 struct multi_cs_completion *mcs_compl;
2836 struct hl_device *hdev = hpriv->hdev;
2837 struct multi_cs_data mcs_data = {};
2838 union hl_wait_cs_args *args = data;
2839 struct hl_ctx *ctx = hpriv->ctx;
2840 struct hl_fence **fence_arr;
2841 void __user *seq_arr;
2842 u32 size_to_copy;
2843 u64 *cs_seq_arr;
2844 u8 seq_arr_len;
2845 int rc;
2846
2847 if (!hdev->supports_wait_for_multi_cs) {
2848 dev_err(hdev->dev, "Wait for multi CS is not supported\n");
2849 return -EPERM;
2850 }
2851
2852 seq_arr_len = args->in.seq_arr_len;
2853
2854 if (seq_arr_len > HL_WAIT_MULTI_CS_LIST_MAX_LEN) {
2855 dev_err(hdev->dev, "Can wait only up to %d CSs, input sequence is of length %u\n",
2856 HL_WAIT_MULTI_CS_LIST_MAX_LEN, seq_arr_len);
2857 return -EINVAL;
2858 }
2859
2860 /* allocate memory for sequence array */
2861 cs_seq_arr =
2862 kmalloc_array(seq_arr_len, sizeof(*cs_seq_arr), GFP_KERNEL);
2863 if (!cs_seq_arr)
2864 return -ENOMEM;
2865
2866 /* copy CS sequence array from user */
2867 seq_arr = (void __user *) (uintptr_t) args->in.seq;
2868 size_to_copy = seq_arr_len * sizeof(*cs_seq_arr);
2869 if (copy_from_user(cs_seq_arr, seq_arr, size_to_copy)) {
2870 dev_err(hdev->dev, "Failed to copy multi-cs sequence array from user\n");
2871 rc = -EFAULT;
2872 goto free_seq_arr;
2873 }
2874
2875 /* allocate array for the fences */
2876 fence_arr = kmalloc_array(seq_arr_len, sizeof(struct hl_fence *), GFP_KERNEL);
2877 if (!fence_arr) {
2878 rc = -ENOMEM;
2879 goto free_seq_arr;
2880 }
2881
2882 /* initialize the multi-CS internal data */
2883 mcs_data.ctx = ctx;
2884 mcs_data.seq_arr = cs_seq_arr;
2885 mcs_data.fence_arr = fence_arr;
2886 mcs_data.arr_len = seq_arr_len;
2887
2888 hl_ctx_get(ctx);
2889
2890 /* wait (with timeout) for the first CS to be completed */
2891 mcs_data.timeout_jiffies = hl_usecs64_to_jiffies(args->in.timeout_us);
2892 mcs_compl = hl_wait_multi_cs_completion_init(hdev);
2893 if (IS_ERR(mcs_compl)) {
2894 rc = PTR_ERR(mcs_compl);
2895 goto put_ctx;
2896 }
2897
2898 /* poll all CS fences, extract timestamp */
2899 mcs_data.update_ts = true;
2900 rc = hl_cs_poll_fences(&mcs_data, mcs_compl);
2901 /*
2902 * skip wait for CS completion when one of the below is true:
2903 * - an error on the poll function
2904 * - one or more CS in the list completed
2905 * - the user called ioctl with timeout 0
2906 */
2907 if (rc || mcs_data.completion_bitmap || !args->in.timeout_us)
2908 goto completion_fini;
2909
2910 while (true) {
2911 rc = hl_wait_multi_cs_completion(&mcs_data, mcs_compl);
2912 if (rc || (mcs_data.wait_status == 0))
2913 break;
2914
2915 /*
2916 * poll fences once again to update the CS map.
2917 * no timestamp should be updated this time.
2918 */
2919 mcs_data.update_ts = false;
2920 rc = hl_cs_poll_fences(&mcs_data, mcs_compl);
2921
2922 if (rc || mcs_data.completion_bitmap)
2923 break;
2924
2925 /*
2926 * if hl_wait_multi_cs_completion returned before timeout (i.e.
2927 * it got a completion) it either got completed by CS in the multi CS list
2928 * (in which case the indication will be non empty completion_bitmap) or it
2929 * got completed by CS submitted to one of the shared stream master but
2930 * not in the multi CS list (in which case we should wait again but modify
2931 * the timeout and set timestamp as zero to let a CS related to the current
2932 * multi-CS set a new, relevant, timestamp)
2933 */
2934 mcs_data.timeout_jiffies = mcs_data.wait_status;
2935 mcs_compl->timestamp = 0;
2936 }
2937
2938 completion_fini:
2939 hl_wait_multi_cs_completion_fini(mcs_compl);
2940
2941 put_ctx:
2942 hl_ctx_put(ctx);
2943 kfree(fence_arr);
2944
2945 free_seq_arr:
2946 kfree(cs_seq_arr);
2947
2948 if (rc)
2949 return rc;
2950
2951 if (mcs_data.wait_status == -ERESTARTSYS) {
2952 dev_err_ratelimited(hdev->dev,
2953 "user process got signal while waiting for Multi-CS\n");
2954 return -EINTR;
2955 }
2956
2957 /* update output args */
2958 memset(args, 0, sizeof(*args));
2959
2960 if (mcs_data.completion_bitmap) {
2961 args->out.status = HL_WAIT_CS_STATUS_COMPLETED;
2962 args->out.cs_completion_map = mcs_data.completion_bitmap;
2963
2964 /* if timestamp not 0- it's valid */
2965 if (mcs_data.timestamp) {
2966 args->out.timestamp_nsec = mcs_data.timestamp;
2967 args->out.flags |= HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD;
2968 }
2969
2970 /* update if some CS was gone */
2971 if (!mcs_data.timestamp)
2972 args->out.flags |= HL_WAIT_CS_STATUS_FLAG_GONE;
2973 } else {
2974 args->out.status = HL_WAIT_CS_STATUS_BUSY;
2975 }
2976
2977 return 0;
2978 }
2979
hl_cs_wait_ioctl(struct hl_fpriv * hpriv,void * data)2980 static int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
2981 {
2982 struct hl_device *hdev = hpriv->hdev;
2983 union hl_wait_cs_args *args = data;
2984 enum hl_cs_wait_status status;
2985 u64 seq = args->in.seq;
2986 s64 timestamp;
2987 int rc;
2988
2989 rc = _hl_cs_wait_ioctl(hdev, hpriv->ctx, args->in.timeout_us, seq, &status, ×tamp);
2990
2991 if (rc == -ERESTARTSYS) {
2992 dev_err_ratelimited(hdev->dev,
2993 "user process got signal while waiting for CS handle %llu\n",
2994 seq);
2995 return -EINTR;
2996 }
2997
2998 memset(args, 0, sizeof(*args));
2999
3000 if (rc) {
3001 if (rc == -ETIMEDOUT) {
3002 dev_err_ratelimited(hdev->dev,
3003 "CS %llu has timed-out while user process is waiting for it\n",
3004 seq);
3005 args->out.status = HL_WAIT_CS_STATUS_TIMEDOUT;
3006 } else if (rc == -EIO) {
3007 dev_err_ratelimited(hdev->dev,
3008 "CS %llu has been aborted while user process is waiting for it\n",
3009 seq);
3010 args->out.status = HL_WAIT_CS_STATUS_ABORTED;
3011 }
3012 return rc;
3013 }
3014
3015 if (timestamp) {
3016 args->out.flags |= HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD;
3017 args->out.timestamp_nsec = timestamp;
3018 }
3019
3020 switch (status) {
3021 case CS_WAIT_STATUS_GONE:
3022 args->out.flags |= HL_WAIT_CS_STATUS_FLAG_GONE;
3023 fallthrough;
3024 case CS_WAIT_STATUS_COMPLETED:
3025 args->out.status = HL_WAIT_CS_STATUS_COMPLETED;
3026 break;
3027 case CS_WAIT_STATUS_BUSY:
3028 default:
3029 args->out.status = HL_WAIT_CS_STATUS_BUSY;
3030 break;
3031 }
3032
3033 return 0;
3034 }
3035
ts_buff_get_kernel_ts_record(struct hl_mmap_mem_buf * buf,struct hl_cb * cq_cb,u64 ts_offset,u64 cq_offset,u64 target_value,spinlock_t * wait_list_lock,struct hl_user_pending_interrupt ** pend)3036 static int ts_buff_get_kernel_ts_record(struct hl_mmap_mem_buf *buf,
3037 struct hl_cb *cq_cb,
3038 u64 ts_offset, u64 cq_offset, u64 target_value,
3039 spinlock_t *wait_list_lock,
3040 struct hl_user_pending_interrupt **pend)
3041 {
3042 struct hl_ts_buff *ts_buff = buf->private;
3043 struct hl_user_pending_interrupt *requested_offset_record =
3044 (struct hl_user_pending_interrupt *)ts_buff->kernel_buff_address +
3045 ts_offset;
3046 struct hl_user_pending_interrupt *cb_last =
3047 (struct hl_user_pending_interrupt *)ts_buff->kernel_buff_address +
3048 (ts_buff->kernel_buff_size / sizeof(struct hl_user_pending_interrupt));
3049 unsigned long flags, iter_counter = 0;
3050 u64 current_cq_counter;
3051
3052 /* Validate ts_offset not exceeding last max */
3053 if (requested_offset_record >= cb_last) {
3054 dev_err(buf->mmg->dev, "Ts offset exceeds max CB offset(0x%llx)\n",
3055 (u64)(uintptr_t)cb_last);
3056 return -EINVAL;
3057 }
3058
3059 start_over:
3060 spin_lock_irqsave(wait_list_lock, flags);
3061
3062 /* Unregister only if we didn't reach the target value
3063 * since in this case there will be no handling in irq context
3064 * and then it's safe to delete the node out of the interrupt list
3065 * then re-use it on other interrupt
3066 */
3067 if (requested_offset_record->ts_reg_info.in_use) {
3068 current_cq_counter = *requested_offset_record->cq_kernel_addr;
3069 if (current_cq_counter < requested_offset_record->cq_target_value) {
3070 list_del(&requested_offset_record->wait_list_node);
3071 spin_unlock_irqrestore(wait_list_lock, flags);
3072
3073 hl_mmap_mem_buf_put(requested_offset_record->ts_reg_info.buf);
3074 hl_cb_put(requested_offset_record->ts_reg_info.cq_cb);
3075
3076 dev_dbg(buf->mmg->dev,
3077 "ts node removed from interrupt list now can re-use\n");
3078 } else {
3079 dev_dbg(buf->mmg->dev,
3080 "ts node in middle of irq handling\n");
3081
3082 /* irq handling in the middle give it time to finish */
3083 spin_unlock_irqrestore(wait_list_lock, flags);
3084 usleep_range(1, 10);
3085 if (++iter_counter == MAX_TS_ITER_NUM) {
3086 dev_err(buf->mmg->dev,
3087 "handling registration interrupt took too long!!\n");
3088 return -EINVAL;
3089 }
3090
3091 goto start_over;
3092 }
3093 } else {
3094 /* Fill up the new registration node info */
3095 requested_offset_record->ts_reg_info.buf = buf;
3096 requested_offset_record->ts_reg_info.cq_cb = cq_cb;
3097 requested_offset_record->ts_reg_info.timestamp_kernel_addr =
3098 (u64 *) ts_buff->user_buff_address + ts_offset;
3099 requested_offset_record->cq_kernel_addr =
3100 (u64 *) cq_cb->kernel_address + cq_offset;
3101 requested_offset_record->cq_target_value = target_value;
3102
3103 spin_unlock_irqrestore(wait_list_lock, flags);
3104 }
3105
3106 *pend = requested_offset_record;
3107
3108 dev_dbg(buf->mmg->dev, "Found available node in TS kernel CB %p\n",
3109 requested_offset_record);
3110 return 0;
3111 }
3112
_hl_interrupt_wait_ioctl(struct hl_device * hdev,struct hl_ctx * ctx,struct hl_mem_mgr * cb_mmg,struct hl_mem_mgr * mmg,u64 timeout_us,u64 cq_counters_handle,u64 cq_counters_offset,u64 target_value,struct hl_user_interrupt * interrupt,bool register_ts_record,u64 ts_handle,u64 ts_offset,u32 * status,u64 * timestamp)3113 static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
3114 struct hl_mem_mgr *cb_mmg, struct hl_mem_mgr *mmg,
3115 u64 timeout_us, u64 cq_counters_handle, u64 cq_counters_offset,
3116 u64 target_value, struct hl_user_interrupt *interrupt,
3117 bool register_ts_record, u64 ts_handle, u64 ts_offset,
3118 u32 *status, u64 *timestamp)
3119 {
3120 struct hl_user_pending_interrupt *pend;
3121 struct hl_mmap_mem_buf *buf;
3122 struct hl_cb *cq_cb;
3123 unsigned long timeout, flags;
3124 long completion_rc;
3125 int rc = 0;
3126
3127 timeout = hl_usecs64_to_jiffies(timeout_us);
3128
3129 hl_ctx_get(ctx);
3130
3131 cq_cb = hl_cb_get(cb_mmg, cq_counters_handle);
3132 if (!cq_cb) {
3133 rc = -EINVAL;
3134 goto put_ctx;
3135 }
3136
3137 /* Validate the cq offset */
3138 if (((u64 *) cq_cb->kernel_address + cq_counters_offset) >=
3139 ((u64 *) cq_cb->kernel_address + (cq_cb->size / sizeof(u64)))) {
3140 rc = -EINVAL;
3141 goto put_cq_cb;
3142 }
3143
3144 if (register_ts_record) {
3145 dev_dbg(hdev->dev, "Timestamp registration: interrupt id: %u, ts offset: %llu, cq_offset: %llu\n",
3146 interrupt->interrupt_id, ts_offset, cq_counters_offset);
3147 buf = hl_mmap_mem_buf_get(mmg, ts_handle);
3148 if (!buf) {
3149 rc = -EINVAL;
3150 goto put_cq_cb;
3151 }
3152
3153 /* get ts buffer record */
3154 rc = ts_buff_get_kernel_ts_record(buf, cq_cb, ts_offset,
3155 cq_counters_offset, target_value,
3156 &interrupt->wait_list_lock, &pend);
3157 if (rc)
3158 goto put_ts_buff;
3159 } else {
3160 pend = kzalloc(sizeof(*pend), GFP_KERNEL);
3161 if (!pend) {
3162 rc = -ENOMEM;
3163 goto put_cq_cb;
3164 }
3165 hl_fence_init(&pend->fence, ULONG_MAX);
3166 pend->cq_kernel_addr = (u64 *) cq_cb->kernel_address + cq_counters_offset;
3167 pend->cq_target_value = target_value;
3168 }
3169
3170 spin_lock_irqsave(&interrupt->wait_list_lock, flags);
3171
3172 /* We check for completion value as interrupt could have been received
3173 * before we added the node to the wait list
3174 */
3175 if (*pend->cq_kernel_addr >= target_value) {
3176 if (register_ts_record)
3177 pend->ts_reg_info.in_use = 0;
3178 spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
3179
3180 *status = HL_WAIT_CS_STATUS_COMPLETED;
3181
3182 if (register_ts_record) {
3183 *pend->ts_reg_info.timestamp_kernel_addr = ktime_get_ns();
3184 goto put_ts_buff;
3185 } else {
3186 pend->fence.timestamp = ktime_get();
3187 goto set_timestamp;
3188 }
3189 } else if (!timeout_us) {
3190 spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
3191 *status = HL_WAIT_CS_STATUS_BUSY;
3192 pend->fence.timestamp = ktime_get();
3193 goto set_timestamp;
3194 }
3195
3196 /* Add pending user interrupt to relevant list for the interrupt
3197 * handler to monitor.
3198 * Note that we cannot have sorted list by target value,
3199 * in order to shorten the list pass loop, since
3200 * same list could have nodes for different cq counter handle.
3201 * Note:
3202 * Mark ts buff offset as in use here in the spinlock protection area
3203 * to avoid getting in the re-use section in ts_buff_get_kernel_ts_record
3204 * before adding the node to the list. this scenario might happen when
3205 * multiple threads are racing on same offset and one thread could
3206 * set the ts buff in ts_buff_get_kernel_ts_record then the other thread
3207 * takes over and get to ts_buff_get_kernel_ts_record and then we will try
3208 * to re-use the same ts buff offset, and will try to delete a non existing
3209 * node from the list.
3210 */
3211 if (register_ts_record)
3212 pend->ts_reg_info.in_use = 1;
3213
3214 list_add_tail(&pend->wait_list_node, &interrupt->wait_list_head);
3215 spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
3216
3217 if (register_ts_record) {
3218 rc = *status = HL_WAIT_CS_STATUS_COMPLETED;
3219 goto ts_registration_exit;
3220 }
3221
3222 /* Wait for interrupt handler to signal completion */
3223 completion_rc = wait_for_completion_interruptible_timeout(&pend->fence.completion,
3224 timeout);
3225 if (completion_rc > 0) {
3226 *status = HL_WAIT_CS_STATUS_COMPLETED;
3227 } else {
3228 if (completion_rc == -ERESTARTSYS) {
3229 dev_err_ratelimited(hdev->dev,
3230 "user process got signal while waiting for interrupt ID %d\n",
3231 interrupt->interrupt_id);
3232 rc = -EINTR;
3233 *status = HL_WAIT_CS_STATUS_ABORTED;
3234 } else {
3235 if (pend->fence.error == -EIO) {
3236 dev_err_ratelimited(hdev->dev,
3237 "interrupt based wait ioctl aborted(error:%d) due to a reset cycle initiated\n",
3238 pend->fence.error);
3239 rc = -EIO;
3240 *status = HL_WAIT_CS_STATUS_ABORTED;
3241 } else {
3242 /* The wait has timed-out. We don't know anything beyond that
3243 * because the workload wasn't submitted through the driver.
3244 * Therefore, from driver's perspective, the workload is still
3245 * executing.
3246 */
3247 rc = 0;
3248 *status = HL_WAIT_CS_STATUS_BUSY;
3249 }
3250 }
3251 }
3252
3253 /*
3254 * We keep removing the node from list here, and not at the irq handler
3255 * for completion timeout case. and if it's a registration
3256 * for ts record, the node will be deleted in the irq handler after
3257 * we reach the target value.
3258 */
3259 spin_lock_irqsave(&interrupt->wait_list_lock, flags);
3260 list_del(&pend->wait_list_node);
3261 spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
3262
3263 set_timestamp:
3264 *timestamp = ktime_to_ns(pend->fence.timestamp);
3265 kfree(pend);
3266 hl_cb_put(cq_cb);
3267 ts_registration_exit:
3268 hl_ctx_put(ctx);
3269
3270 return rc;
3271
3272 put_ts_buff:
3273 hl_mmap_mem_buf_put(buf);
3274 put_cq_cb:
3275 hl_cb_put(cq_cb);
3276 put_ctx:
3277 hl_ctx_put(ctx);
3278
3279 return rc;
3280 }
3281
_hl_interrupt_wait_ioctl_user_addr(struct hl_device * hdev,struct hl_ctx * ctx,u64 timeout_us,u64 user_address,u64 target_value,struct hl_user_interrupt * interrupt,u32 * status,u64 * timestamp)3282 static int _hl_interrupt_wait_ioctl_user_addr(struct hl_device *hdev, struct hl_ctx *ctx,
3283 u64 timeout_us, u64 user_address,
3284 u64 target_value, struct hl_user_interrupt *interrupt,
3285 u32 *status,
3286 u64 *timestamp)
3287 {
3288 struct hl_user_pending_interrupt *pend;
3289 unsigned long timeout, flags;
3290 u64 completion_value;
3291 long completion_rc;
3292 int rc = 0;
3293
3294 timeout = hl_usecs64_to_jiffies(timeout_us);
3295
3296 hl_ctx_get(ctx);
3297
3298 pend = kzalloc(sizeof(*pend), GFP_KERNEL);
3299 if (!pend) {
3300 hl_ctx_put(ctx);
3301 return -ENOMEM;
3302 }
3303
3304 hl_fence_init(&pend->fence, ULONG_MAX);
3305
3306 /* Add pending user interrupt to relevant list for the interrupt
3307 * handler to monitor
3308 */
3309 spin_lock_irqsave(&interrupt->wait_list_lock, flags);
3310 list_add_tail(&pend->wait_list_node, &interrupt->wait_list_head);
3311 spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
3312
3313 /* We check for completion value as interrupt could have been received
3314 * before we added the node to the wait list
3315 */
3316 if (copy_from_user(&completion_value, u64_to_user_ptr(user_address), 8)) {
3317 dev_err(hdev->dev, "Failed to copy completion value from user\n");
3318 rc = -EFAULT;
3319 goto remove_pending_user_interrupt;
3320 }
3321
3322 if (completion_value >= target_value) {
3323 *status = HL_WAIT_CS_STATUS_COMPLETED;
3324 /* There was no interrupt, we assume the completion is now. */
3325 pend->fence.timestamp = ktime_get();
3326 } else {
3327 *status = HL_WAIT_CS_STATUS_BUSY;
3328 }
3329
3330 if (!timeout_us || (*status == HL_WAIT_CS_STATUS_COMPLETED))
3331 goto remove_pending_user_interrupt;
3332
3333 wait_again:
3334 /* Wait for interrupt handler to signal completion */
3335 completion_rc = wait_for_completion_interruptible_timeout(&pend->fence.completion,
3336 timeout);
3337
3338 /* If timeout did not expire we need to perform the comparison.
3339 * If comparison fails, keep waiting until timeout expires
3340 */
3341 if (completion_rc > 0) {
3342 spin_lock_irqsave(&interrupt->wait_list_lock, flags);
3343 /* reinit_completion must be called before we check for user
3344 * completion value, otherwise, if interrupt is received after
3345 * the comparison and before the next wait_for_completion,
3346 * we will reach timeout and fail
3347 */
3348 reinit_completion(&pend->fence.completion);
3349 spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
3350
3351 if (copy_from_user(&completion_value, u64_to_user_ptr(user_address), 8)) {
3352 dev_err(hdev->dev, "Failed to copy completion value from user\n");
3353 rc = -EFAULT;
3354
3355 goto remove_pending_user_interrupt;
3356 }
3357
3358 if (completion_value >= target_value) {
3359 *status = HL_WAIT_CS_STATUS_COMPLETED;
3360 } else if (pend->fence.error) {
3361 dev_err_ratelimited(hdev->dev,
3362 "interrupt based wait ioctl aborted(error:%d) due to a reset cycle initiated\n",
3363 pend->fence.error);
3364 /* set the command completion status as ABORTED */
3365 *status = HL_WAIT_CS_STATUS_ABORTED;
3366 } else {
3367 timeout = completion_rc;
3368 goto wait_again;
3369 }
3370 } else if (completion_rc == -ERESTARTSYS) {
3371 dev_err_ratelimited(hdev->dev,
3372 "user process got signal while waiting for interrupt ID %d\n",
3373 interrupt->interrupt_id);
3374 rc = -EINTR;
3375 } else {
3376 /* The wait has timed-out. We don't know anything beyond that
3377 * because the workload wasn't submitted through the driver.
3378 * Therefore, from driver's perspective, the workload is still
3379 * executing.
3380 */
3381 rc = 0;
3382 *status = HL_WAIT_CS_STATUS_BUSY;
3383 }
3384
3385 remove_pending_user_interrupt:
3386 spin_lock_irqsave(&interrupt->wait_list_lock, flags);
3387 list_del(&pend->wait_list_node);
3388 spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
3389
3390 *timestamp = ktime_to_ns(pend->fence.timestamp);
3391
3392 kfree(pend);
3393 hl_ctx_put(ctx);
3394
3395 return rc;
3396 }
3397
hl_interrupt_wait_ioctl(struct hl_fpriv * hpriv,void * data)3398 static int hl_interrupt_wait_ioctl(struct hl_fpriv *hpriv, void *data)
3399 {
3400 u16 interrupt_id, first_interrupt, last_interrupt;
3401 struct hl_device *hdev = hpriv->hdev;
3402 struct asic_fixed_properties *prop;
3403 struct hl_user_interrupt *interrupt;
3404 union hl_wait_cs_args *args = data;
3405 u32 status = HL_WAIT_CS_STATUS_BUSY;
3406 u64 timestamp = 0;
3407 int rc, int_idx;
3408
3409 prop = &hdev->asic_prop;
3410
3411 if (!(prop->user_interrupt_count + prop->user_dec_intr_count)) {
3412 dev_err(hdev->dev, "no user interrupts allowed");
3413 return -EPERM;
3414 }
3415
3416 interrupt_id = FIELD_GET(HL_WAIT_CS_FLAGS_INTERRUPT_MASK, args->in.flags);
3417
3418 first_interrupt = prop->first_available_user_interrupt;
3419 last_interrupt = prop->first_available_user_interrupt + prop->user_interrupt_count - 1;
3420
3421 if (interrupt_id < prop->user_dec_intr_count) {
3422
3423 /* Check if the requested core is enabled */
3424 if (!(prop->decoder_enabled_mask & BIT(interrupt_id))) {
3425 dev_err(hdev->dev, "interrupt on a disabled core(%u) not allowed",
3426 interrupt_id);
3427 return -EINVAL;
3428 }
3429
3430 interrupt = &hdev->user_interrupt[interrupt_id];
3431
3432 } else if (interrupt_id >= first_interrupt && interrupt_id <= last_interrupt) {
3433
3434 int_idx = interrupt_id - first_interrupt + prop->user_dec_intr_count;
3435 interrupt = &hdev->user_interrupt[int_idx];
3436
3437 } else if (interrupt_id == HL_COMMON_USER_CQ_INTERRUPT_ID) {
3438 interrupt = &hdev->common_user_cq_interrupt;
3439 } else if (interrupt_id == HL_COMMON_DEC_INTERRUPT_ID) {
3440 interrupt = &hdev->common_decoder_interrupt;
3441 } else {
3442 dev_err(hdev->dev, "invalid user interrupt %u", interrupt_id);
3443 return -EINVAL;
3444 }
3445
3446 if (args->in.flags & HL_WAIT_CS_FLAGS_INTERRUPT_KERNEL_CQ)
3447 rc = _hl_interrupt_wait_ioctl(hdev, hpriv->ctx, &hpriv->mem_mgr, &hpriv->mem_mgr,
3448 args->in.interrupt_timeout_us, args->in.cq_counters_handle,
3449 args->in.cq_counters_offset,
3450 args->in.target, interrupt,
3451 !!(args->in.flags & HL_WAIT_CS_FLAGS_REGISTER_INTERRUPT),
3452 args->in.timestamp_handle, args->in.timestamp_offset,
3453 &status, ×tamp);
3454 else
3455 rc = _hl_interrupt_wait_ioctl_user_addr(hdev, hpriv->ctx,
3456 args->in.interrupt_timeout_us, args->in.addr,
3457 args->in.target, interrupt, &status,
3458 ×tamp);
3459 if (rc)
3460 return rc;
3461
3462 memset(args, 0, sizeof(*args));
3463 args->out.status = status;
3464
3465 if (timestamp) {
3466 args->out.timestamp_nsec = timestamp;
3467 args->out.flags |= HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD;
3468 }
3469
3470 return 0;
3471 }
3472
hl_wait_ioctl(struct hl_fpriv * hpriv,void * data)3473 int hl_wait_ioctl(struct hl_fpriv *hpriv, void *data)
3474 {
3475 union hl_wait_cs_args *args = data;
3476 u32 flags = args->in.flags;
3477 int rc;
3478
3479 /* If the device is not operational, no point in waiting for any command submission or
3480 * user interrupt
3481 */
3482 if (!hl_device_operational(hpriv->hdev, NULL))
3483 return -EBUSY;
3484
3485 if (flags & HL_WAIT_CS_FLAGS_INTERRUPT)
3486 rc = hl_interrupt_wait_ioctl(hpriv, data);
3487 else if (flags & HL_WAIT_CS_FLAGS_MULTI_CS)
3488 rc = hl_multi_cs_wait_ioctl(hpriv, data);
3489 else
3490 rc = hl_cs_wait_ioctl(hpriv, data);
3491
3492 return rc;
3493 }
3494