• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 
3 /*
4  * Copyright 2016-2019 HabanaLabs, Ltd.
5  * All Rights Reserved.
6  */
7 
8 #include <uapi/misc/habanalabs.h>
9 #include "habanalabs.h"
10 
11 #include <linux/uaccess.h>
12 #include <linux/slab.h>
13 
14 #define HL_CS_FLAGS_TYPE_MASK	(HL_CS_FLAGS_SIGNAL | HL_CS_FLAGS_WAIT | \
15 				HL_CS_FLAGS_COLLECTIVE_WAIT)
16 
17 /**
18  * enum hl_cs_wait_status - cs wait status
19  * @CS_WAIT_STATUS_BUSY: cs was not completed yet
20  * @CS_WAIT_STATUS_COMPLETED: cs completed
21  * @CS_WAIT_STATUS_GONE: cs completed but fence is already gone
22  */
23 enum hl_cs_wait_status {
24 	CS_WAIT_STATUS_BUSY,
25 	CS_WAIT_STATUS_COMPLETED,
26 	CS_WAIT_STATUS_GONE
27 };
28 
29 static void job_wq_completion(struct work_struct *work);
30 static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
31 				u64 timeout_us, u64 seq,
32 				enum hl_cs_wait_status *status, s64 *timestamp);
33 static void cs_do_release(struct kref *ref);
34 
hl_sob_reset(struct kref * ref)35 static void hl_sob_reset(struct kref *ref)
36 {
37 	struct hl_hw_sob *hw_sob = container_of(ref, struct hl_hw_sob,
38 							kref);
39 	struct hl_device *hdev = hw_sob->hdev;
40 
41 	dev_dbg(hdev->dev, "reset sob id %u\n", hw_sob->sob_id);
42 
43 	hdev->asic_funcs->reset_sob(hdev, hw_sob);
44 
45 	hw_sob->need_reset = false;
46 }
47 
hl_sob_reset_error(struct kref * ref)48 void hl_sob_reset_error(struct kref *ref)
49 {
50 	struct hl_hw_sob *hw_sob = container_of(ref, struct hl_hw_sob,
51 							kref);
52 	struct hl_device *hdev = hw_sob->hdev;
53 
54 	dev_crit(hdev->dev,
55 		"SOB release shouldn't be called here, q_idx: %d, sob_id: %d\n",
56 		hw_sob->q_idx, hw_sob->sob_id);
57 }
58 
hw_sob_put(struct hl_hw_sob * hw_sob)59 void hw_sob_put(struct hl_hw_sob *hw_sob)
60 {
61 	if (hw_sob)
62 		kref_put(&hw_sob->kref, hl_sob_reset);
63 }
64 
hw_sob_put_err(struct hl_hw_sob * hw_sob)65 static void hw_sob_put_err(struct hl_hw_sob *hw_sob)
66 {
67 	if (hw_sob)
68 		kref_put(&hw_sob->kref, hl_sob_reset_error);
69 }
70 
hw_sob_get(struct hl_hw_sob * hw_sob)71 void hw_sob_get(struct hl_hw_sob *hw_sob)
72 {
73 	if (hw_sob)
74 		kref_get(&hw_sob->kref);
75 }
76 
77 /**
78  * hl_gen_sob_mask() - Generates a sob mask to be used in a monitor arm packet
79  * @sob_base: sob base id
80  * @sob_mask: sob user mask, each bit represents a sob offset from sob base
81  * @mask: generated mask
82  *
83  * Return: 0 if given parameters are valid
84  */
hl_gen_sob_mask(u16 sob_base,u8 sob_mask,u8 * mask)85 int hl_gen_sob_mask(u16 sob_base, u8 sob_mask, u8 *mask)
86 {
87 	int i;
88 
89 	if (sob_mask == 0)
90 		return -EINVAL;
91 
92 	if (sob_mask == 0x1) {
93 		*mask = ~(1 << (sob_base & 0x7));
94 	} else {
95 		/* find msb in order to verify sob range is valid */
96 		for (i = BITS_PER_BYTE - 1 ; i >= 0 ; i--)
97 			if (BIT(i) & sob_mask)
98 				break;
99 
100 		if (i > (HL_MAX_SOBS_PER_MONITOR - (sob_base & 0x7) - 1))
101 			return -EINVAL;
102 
103 		*mask = ~sob_mask;
104 	}
105 
106 	return 0;
107 }
108 
hl_fence_release(struct kref * kref)109 static void hl_fence_release(struct kref *kref)
110 {
111 	struct hl_fence *fence =
112 		container_of(kref, struct hl_fence, refcount);
113 	struct hl_cs_compl *hl_cs_cmpl =
114 		container_of(fence, struct hl_cs_compl, base_fence);
115 
116 	kfree(hl_cs_cmpl);
117 }
118 
hl_fence_put(struct hl_fence * fence)119 void hl_fence_put(struct hl_fence *fence)
120 {
121 	if (IS_ERR_OR_NULL(fence))
122 		return;
123 	kref_put(&fence->refcount, hl_fence_release);
124 }
125 
hl_fences_put(struct hl_fence ** fence,int len)126 void hl_fences_put(struct hl_fence **fence, int len)
127 {
128 	int i;
129 
130 	for (i = 0; i < len; i++, fence++)
131 		hl_fence_put(*fence);
132 }
133 
hl_fence_get(struct hl_fence * fence)134 void hl_fence_get(struct hl_fence *fence)
135 {
136 	if (fence)
137 		kref_get(&fence->refcount);
138 }
139 
hl_fence_init(struct hl_fence * fence,u64 sequence)140 static void hl_fence_init(struct hl_fence *fence, u64 sequence)
141 {
142 	kref_init(&fence->refcount);
143 	fence->cs_sequence = sequence;
144 	fence->error = 0;
145 	fence->timestamp = ktime_set(0, 0);
146 	init_completion(&fence->completion);
147 }
148 
cs_get(struct hl_cs * cs)149 void cs_get(struct hl_cs *cs)
150 {
151 	kref_get(&cs->refcount);
152 }
153 
cs_get_unless_zero(struct hl_cs * cs)154 static int cs_get_unless_zero(struct hl_cs *cs)
155 {
156 	return kref_get_unless_zero(&cs->refcount);
157 }
158 
cs_put(struct hl_cs * cs)159 static void cs_put(struct hl_cs *cs)
160 {
161 	kref_put(&cs->refcount, cs_do_release);
162 }
163 
cs_job_do_release(struct kref * ref)164 static void cs_job_do_release(struct kref *ref)
165 {
166 	struct hl_cs_job *job = container_of(ref, struct hl_cs_job, refcount);
167 
168 	kfree(job);
169 }
170 
cs_job_put(struct hl_cs_job * job)171 static void cs_job_put(struct hl_cs_job *job)
172 {
173 	kref_put(&job->refcount, cs_job_do_release);
174 }
175 
cs_needs_completion(struct hl_cs * cs)176 bool cs_needs_completion(struct hl_cs *cs)
177 {
178 	/* In case this is a staged CS, only the last CS in sequence should
179 	 * get a completion, any non staged CS will always get a completion
180 	 */
181 	if (cs->staged_cs && !cs->staged_last)
182 		return false;
183 
184 	return true;
185 }
186 
cs_needs_timeout(struct hl_cs * cs)187 bool cs_needs_timeout(struct hl_cs *cs)
188 {
189 	/* In case this is a staged CS, only the first CS in sequence should
190 	 * get a timeout, any non staged CS will always get a timeout
191 	 */
192 	if (cs->staged_cs && !cs->staged_first)
193 		return false;
194 
195 	return true;
196 }
197 
is_cb_patched(struct hl_device * hdev,struct hl_cs_job * job)198 static bool is_cb_patched(struct hl_device *hdev, struct hl_cs_job *job)
199 {
200 	/*
201 	 * Patched CB is created for external queues jobs, and for H/W queues
202 	 * jobs if the user CB was allocated by driver and MMU is disabled.
203 	 */
204 	return (job->queue_type == QUEUE_TYPE_EXT ||
205 			(job->queue_type == QUEUE_TYPE_HW &&
206 					job->is_kernel_allocated_cb &&
207 					!hdev->mmu_enable));
208 }
209 
210 /*
211  * cs_parser - parse the user command submission
212  *
213  * @hpriv	: pointer to the private data of the fd
214  * @job        : pointer to the job that holds the command submission info
215  *
216  * The function parses the command submission of the user. It calls the
217  * ASIC specific parser, which returns a list of memory blocks to send
218  * to the device as different command buffers
219  *
220  */
cs_parser(struct hl_fpriv * hpriv,struct hl_cs_job * job)221 static int cs_parser(struct hl_fpriv *hpriv, struct hl_cs_job *job)
222 {
223 	struct hl_device *hdev = hpriv->hdev;
224 	struct hl_cs_parser parser;
225 	int rc;
226 
227 	parser.ctx_id = job->cs->ctx->asid;
228 	parser.cs_sequence = job->cs->sequence;
229 	parser.job_id = job->id;
230 
231 	parser.hw_queue_id = job->hw_queue_id;
232 	parser.job_userptr_list = &job->userptr_list;
233 	parser.patched_cb = NULL;
234 	parser.user_cb = job->user_cb;
235 	parser.user_cb_size = job->user_cb_size;
236 	parser.queue_type = job->queue_type;
237 	parser.is_kernel_allocated_cb = job->is_kernel_allocated_cb;
238 	job->patched_cb = NULL;
239 	parser.completion = cs_needs_completion(job->cs);
240 
241 	rc = hdev->asic_funcs->cs_parser(hdev, &parser);
242 
243 	if (is_cb_patched(hdev, job)) {
244 		if (!rc) {
245 			job->patched_cb = parser.patched_cb;
246 			job->job_cb_size = parser.patched_cb_size;
247 			job->contains_dma_pkt = parser.contains_dma_pkt;
248 			atomic_inc(&job->patched_cb->cs_cnt);
249 		}
250 
251 		/*
252 		 * Whether the parsing worked or not, we don't need the
253 		 * original CB anymore because it was already parsed and
254 		 * won't be accessed again for this CS
255 		 */
256 		atomic_dec(&job->user_cb->cs_cnt);
257 		hl_cb_put(job->user_cb);
258 		job->user_cb = NULL;
259 	} else if (!rc) {
260 		job->job_cb_size = job->user_cb_size;
261 	}
262 
263 	return rc;
264 }
265 
complete_job(struct hl_device * hdev,struct hl_cs_job * job)266 static void complete_job(struct hl_device *hdev, struct hl_cs_job *job)
267 {
268 	struct hl_cs *cs = job->cs;
269 
270 	if (is_cb_patched(hdev, job)) {
271 		hl_userptr_delete_list(hdev, &job->userptr_list);
272 
273 		/*
274 		 * We might arrive here from rollback and patched CB wasn't
275 		 * created, so we need to check it's not NULL
276 		 */
277 		if (job->patched_cb) {
278 			atomic_dec(&job->patched_cb->cs_cnt);
279 			hl_cb_put(job->patched_cb);
280 		}
281 	}
282 
283 	/* For H/W queue jobs, if a user CB was allocated by driver and MMU is
284 	 * enabled, the user CB isn't released in cs_parser() and thus should be
285 	 * released here.
286 	 * This is also true for INT queues jobs which were allocated by driver
287 	 */
288 	if (job->is_kernel_allocated_cb &&
289 		((job->queue_type == QUEUE_TYPE_HW && hdev->mmu_enable) ||
290 				job->queue_type == QUEUE_TYPE_INT)) {
291 		atomic_dec(&job->user_cb->cs_cnt);
292 		hl_cb_put(job->user_cb);
293 	}
294 
295 	/*
296 	 * This is the only place where there can be multiple threads
297 	 * modifying the list at the same time
298 	 */
299 	spin_lock(&cs->job_lock);
300 	list_del(&job->cs_node);
301 	spin_unlock(&cs->job_lock);
302 
303 	hl_debugfs_remove_job(hdev, job);
304 
305 	/* We decrement reference only for a CS that gets completion
306 	 * because the reference was incremented only for this kind of CS
307 	 * right before it was scheduled.
308 	 *
309 	 * In staged submission, only the last CS marked as 'staged_last'
310 	 * gets completion, hence its release function will be called from here.
311 	 * As for all the rest CS's in the staged submission which do not get
312 	 * completion, their CS reference will be decremented by the
313 	 * 'staged_last' CS during the CS release flow.
314 	 * All relevant PQ CI counters will be incremented during the CS release
315 	 * flow by calling 'hl_hw_queue_update_ci'.
316 	 */
317 	if (cs_needs_completion(cs) &&
318 		(job->queue_type == QUEUE_TYPE_EXT ||
319 			job->queue_type == QUEUE_TYPE_HW))
320 		cs_put(cs);
321 
322 	cs_job_put(job);
323 }
324 
325 /*
326  * hl_staged_cs_find_first - locate the first CS in this staged submission
327  *
328  * @hdev: pointer to device structure
329  * @cs_seq: staged submission sequence number
330  *
331  * @note: This function must be called under 'hdev->cs_mirror_lock'
332  *
333  * Find and return a CS pointer with the given sequence
334  */
hl_staged_cs_find_first(struct hl_device * hdev,u64 cs_seq)335 struct hl_cs *hl_staged_cs_find_first(struct hl_device *hdev, u64 cs_seq)
336 {
337 	struct hl_cs *cs;
338 
339 	list_for_each_entry_reverse(cs, &hdev->cs_mirror_list, mirror_node)
340 		if (cs->staged_cs && cs->staged_first &&
341 				cs->sequence == cs_seq)
342 			return cs;
343 
344 	return NULL;
345 }
346 
347 /*
348  * is_staged_cs_last_exists - returns true if the last CS in sequence exists
349  *
350  * @hdev: pointer to device structure
351  * @cs: staged submission member
352  *
353  */
is_staged_cs_last_exists(struct hl_device * hdev,struct hl_cs * cs)354 bool is_staged_cs_last_exists(struct hl_device *hdev, struct hl_cs *cs)
355 {
356 	struct hl_cs *last_entry;
357 
358 	last_entry = list_last_entry(&cs->staged_cs_node, struct hl_cs,
359 								staged_cs_node);
360 
361 	if (last_entry->staged_last)
362 		return true;
363 
364 	return false;
365 }
366 
367 /*
368  * staged_cs_get - get CS reference if this CS is a part of a staged CS
369  *
370  * @hdev: pointer to device structure
371  * @cs: current CS
372  * @cs_seq: staged submission sequence number
373  *
374  * Increment CS reference for every CS in this staged submission except for
375  * the CS which get completion.
376  */
staged_cs_get(struct hl_device * hdev,struct hl_cs * cs)377 static void staged_cs_get(struct hl_device *hdev, struct hl_cs *cs)
378 {
379 	/* Only the last CS in this staged submission will get a completion.
380 	 * We must increment the reference for all other CS's in this
381 	 * staged submission.
382 	 * Once we get a completion we will release the whole staged submission.
383 	 */
384 	if (!cs->staged_last)
385 		cs_get(cs);
386 }
387 
388 /*
389  * staged_cs_put - put a CS in case it is part of staged submission
390  *
391  * @hdev: pointer to device structure
392  * @cs: CS to put
393  *
394  * This function decrements a CS reference (for a non completion CS)
395  */
staged_cs_put(struct hl_device * hdev,struct hl_cs * cs)396 static void staged_cs_put(struct hl_device *hdev, struct hl_cs *cs)
397 {
398 	/* We release all CS's in a staged submission except the last
399 	 * CS which we have never incremented its reference.
400 	 */
401 	if (!cs_needs_completion(cs))
402 		cs_put(cs);
403 }
404 
cs_handle_tdr(struct hl_device * hdev,struct hl_cs * cs)405 static void cs_handle_tdr(struct hl_device *hdev, struct hl_cs *cs)
406 {
407 	bool next_entry_found = false;
408 	struct hl_cs *next, *first_cs;
409 
410 	if (!cs_needs_timeout(cs))
411 		return;
412 
413 	spin_lock(&hdev->cs_mirror_lock);
414 
415 	/* We need to handle tdr only once for the complete staged submission.
416 	 * Hence, we choose the CS that reaches this function first which is
417 	 * the CS marked as 'staged_last'.
418 	 * In case single staged cs was submitted which has both first and last
419 	 * indications, then "cs_find_first" below will return NULL, since we
420 	 * removed the cs node from the list before getting here,
421 	 * in such cases just continue with the cs to cancel it's TDR work.
422 	 */
423 	if (cs->staged_cs && cs->staged_last) {
424 		first_cs = hl_staged_cs_find_first(hdev, cs->staged_sequence);
425 		if (first_cs)
426 			cs = first_cs;
427 	}
428 
429 	spin_unlock(&hdev->cs_mirror_lock);
430 
431 	/* Don't cancel TDR in case this CS was timedout because we might be
432 	 * running from the TDR context
433 	 */
434 	if (cs && (cs->timedout ||
435 			hdev->timeout_jiffies == MAX_SCHEDULE_TIMEOUT))
436 		return;
437 
438 	if (cs && cs->tdr_active)
439 		cancel_delayed_work_sync(&cs->work_tdr);
440 
441 	spin_lock(&hdev->cs_mirror_lock);
442 
443 	/* queue TDR for next CS */
444 	list_for_each_entry(next, &hdev->cs_mirror_list, mirror_node)
445 		if (cs_needs_timeout(next)) {
446 			next_entry_found = true;
447 			break;
448 		}
449 
450 	if (next_entry_found && !next->tdr_active) {
451 		next->tdr_active = true;
452 		schedule_delayed_work(&next->work_tdr, next->timeout_jiffies);
453 	}
454 
455 	spin_unlock(&hdev->cs_mirror_lock);
456 }
457 
458 /*
459  * force_complete_multi_cs - complete all contexts that wait on multi-CS
460  *
461  * @hdev: pointer to habanalabs device structure
462  */
force_complete_multi_cs(struct hl_device * hdev)463 static void force_complete_multi_cs(struct hl_device *hdev)
464 {
465 	int i;
466 
467 	for (i = 0; i < MULTI_CS_MAX_USER_CTX; i++) {
468 		struct multi_cs_completion *mcs_compl;
469 
470 		mcs_compl = &hdev->multi_cs_completion[i];
471 
472 		spin_lock(&mcs_compl->lock);
473 
474 		if (!mcs_compl->used) {
475 			spin_unlock(&mcs_compl->lock);
476 			continue;
477 		}
478 
479 		/* when calling force complete no context should be waiting on
480 		 * multi-cS.
481 		 * We are calling the function as a protection for such case
482 		 * to free any pending context and print error message
483 		 */
484 		dev_err(hdev->dev,
485 				"multi-CS completion context %d still waiting when calling force completion\n",
486 				i);
487 		complete_all(&mcs_compl->completion);
488 		spin_unlock(&mcs_compl->lock);
489 	}
490 }
491 
492 /*
493  * complete_multi_cs - complete all waiting entities on multi-CS
494  *
495  * @hdev: pointer to habanalabs device structure
496  * @cs: CS structure
497  * The function signals a waiting entity that has an overlapping stream masters
498  * with the completed CS.
499  * For example:
500  * - a completed CS worked on stream master QID 4, multi CS completion
501  *   is actively waiting on stream master QIDs 3, 5. don't send signal as no
502  *   common stream master QID
503  * - a completed CS worked on stream master QID 4, multi CS completion
504  *   is actively waiting on stream master QIDs 3, 4. send signal as stream
505  *   master QID 4 is common
506  */
complete_multi_cs(struct hl_device * hdev,struct hl_cs * cs)507 static void complete_multi_cs(struct hl_device *hdev, struct hl_cs *cs)
508 {
509 	struct hl_fence *fence = cs->fence;
510 	int i;
511 
512 	/* in case of multi CS check for completion only for the first CS */
513 	if (cs->staged_cs && !cs->staged_first)
514 		return;
515 
516 	for (i = 0; i < MULTI_CS_MAX_USER_CTX; i++) {
517 		struct multi_cs_completion *mcs_compl;
518 
519 		mcs_compl = &hdev->multi_cs_completion[i];
520 		if (!mcs_compl->used)
521 			continue;
522 
523 		spin_lock(&mcs_compl->lock);
524 
525 		/*
526 		 * complete if:
527 		 * 1. still waiting for completion
528 		 * 2. the completed CS has at least one overlapping stream
529 		 *    master with the stream masters in the completion
530 		 */
531 		if (mcs_compl->used &&
532 				(fence->stream_master_qid_map &
533 					mcs_compl->stream_master_qid_map)) {
534 			/* extract the timestamp only of first completed CS */
535 			if (!mcs_compl->timestamp)
536 				mcs_compl->timestamp =
537 						ktime_to_ns(fence->timestamp);
538 			complete_all(&mcs_compl->completion);
539 		}
540 
541 		spin_unlock(&mcs_compl->lock);
542 	}
543 }
544 
cs_release_sob_reset_handler(struct hl_device * hdev,struct hl_cs * cs,struct hl_cs_compl * hl_cs_cmpl)545 static inline void cs_release_sob_reset_handler(struct hl_device *hdev,
546 					struct hl_cs *cs,
547 					struct hl_cs_compl *hl_cs_cmpl)
548 {
549 	/* Skip this handler if the cs wasn't submitted, to avoid putting
550 	 * the hw_sob twice, since this case already handled at this point,
551 	 * also skip if the hw_sob pointer wasn't set.
552 	 */
553 	if (!hl_cs_cmpl->hw_sob || !cs->submitted)
554 		return;
555 
556 	spin_lock(&hl_cs_cmpl->lock);
557 
558 	/*
559 	 * we get refcount upon reservation of signals or signal/wait cs for the
560 	 * hw_sob object, and need to put it when the first staged cs
561 	 * (which cotains the encaps signals) or cs signal/wait is completed.
562 	 */
563 	if ((hl_cs_cmpl->type == CS_TYPE_SIGNAL) ||
564 			(hl_cs_cmpl->type == CS_TYPE_WAIT) ||
565 			(hl_cs_cmpl->type == CS_TYPE_COLLECTIVE_WAIT) ||
566 			(!!hl_cs_cmpl->encaps_signals)) {
567 		dev_dbg(hdev->dev,
568 				"CS 0x%llx type %d finished, sob_id: %d, sob_val: %u\n",
569 				hl_cs_cmpl->cs_seq,
570 				hl_cs_cmpl->type,
571 				hl_cs_cmpl->hw_sob->sob_id,
572 				hl_cs_cmpl->sob_val);
573 
574 		hw_sob_put(hl_cs_cmpl->hw_sob);
575 
576 		if (hl_cs_cmpl->type == CS_TYPE_COLLECTIVE_WAIT)
577 			hdev->asic_funcs->reset_sob_group(hdev,
578 					hl_cs_cmpl->sob_group);
579 	}
580 
581 	spin_unlock(&hl_cs_cmpl->lock);
582 }
583 
cs_do_release(struct kref * ref)584 static void cs_do_release(struct kref *ref)
585 {
586 	struct hl_cs *cs = container_of(ref, struct hl_cs, refcount);
587 	struct hl_device *hdev = cs->ctx->hdev;
588 	struct hl_cs_job *job, *tmp;
589 	struct hl_cs_compl *hl_cs_cmpl =
590 			container_of(cs->fence, struct hl_cs_compl, base_fence);
591 
592 	cs->completed = true;
593 
594 	/*
595 	 * Although if we reached here it means that all external jobs have
596 	 * finished, because each one of them took refcnt to CS, we still
597 	 * need to go over the internal jobs and complete them. Otherwise, we
598 	 * will have leaked memory and what's worse, the CS object (and
599 	 * potentially the CTX object) could be released, while the JOB
600 	 * still holds a pointer to them (but no reference).
601 	 */
602 	list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
603 		complete_job(hdev, job);
604 
605 	if (!cs->submitted) {
606 		/*
607 		 * In case the wait for signal CS was submitted, the fence put
608 		 * occurs in init_signal_wait_cs() or collective_wait_init_cs()
609 		 * right before hanging on the PQ.
610 		 */
611 		if (cs->type == CS_TYPE_WAIT ||
612 				cs->type == CS_TYPE_COLLECTIVE_WAIT)
613 			hl_fence_put(cs->signal_fence);
614 
615 		goto out;
616 	}
617 
618 	/* Need to update CI for all queue jobs that does not get completion */
619 	hl_hw_queue_update_ci(cs);
620 
621 	/* remove CS from CS mirror list */
622 	spin_lock(&hdev->cs_mirror_lock);
623 	list_del_init(&cs->mirror_node);
624 	spin_unlock(&hdev->cs_mirror_lock);
625 
626 	cs_handle_tdr(hdev, cs);
627 
628 	if (cs->staged_cs) {
629 		/* the completion CS decrements reference for the entire
630 		 * staged submission
631 		 */
632 		if (cs->staged_last) {
633 			struct hl_cs *staged_cs, *tmp;
634 
635 			list_for_each_entry_safe(staged_cs, tmp,
636 					&cs->staged_cs_node, staged_cs_node)
637 				staged_cs_put(hdev, staged_cs);
638 		}
639 
640 		/* A staged CS will be a member in the list only after it
641 		 * was submitted. We used 'cs_mirror_lock' when inserting
642 		 * it to list so we will use it again when removing it
643 		 */
644 		if (cs->submitted) {
645 			spin_lock(&hdev->cs_mirror_lock);
646 			list_del(&cs->staged_cs_node);
647 			spin_unlock(&hdev->cs_mirror_lock);
648 		}
649 
650 		/* decrement refcount to handle when first staged cs
651 		 * with encaps signals is completed.
652 		 */
653 		if (hl_cs_cmpl->encaps_signals)
654 			kref_put(&hl_cs_cmpl->encaps_sig_hdl->refcount,
655 						hl_encaps_handle_do_release);
656 	}
657 
658 	if ((cs->type == CS_TYPE_WAIT || cs->type == CS_TYPE_COLLECTIVE_WAIT)
659 			&& cs->encaps_signals)
660 		kref_put(&cs->encaps_sig_hdl->refcount,
661 					hl_encaps_handle_do_release);
662 
663 out:
664 	/* Must be called before hl_ctx_put because inside we use ctx to get
665 	 * the device
666 	 */
667 	hl_debugfs_remove_cs(cs);
668 
669 	hl_ctx_put(cs->ctx);
670 
671 	/* We need to mark an error for not submitted because in that case
672 	 * the hl fence release flow is different. Mainly, we don't need
673 	 * to handle hw_sob for signal/wait
674 	 */
675 	if (cs->timedout)
676 		cs->fence->error = -ETIMEDOUT;
677 	else if (cs->aborted)
678 		cs->fence->error = -EIO;
679 	else if (!cs->submitted)
680 		cs->fence->error = -EBUSY;
681 
682 	if (unlikely(cs->skip_reset_on_timeout)) {
683 		dev_err(hdev->dev,
684 			"Command submission %llu completed after %llu (s)\n",
685 			cs->sequence,
686 			div_u64(jiffies - cs->submission_time_jiffies, HZ));
687 	}
688 
689 	if (cs->timestamp)
690 		cs->fence->timestamp = ktime_get();
691 	complete_all(&cs->fence->completion);
692 	complete_multi_cs(hdev, cs);
693 
694 	cs_release_sob_reset_handler(hdev, cs, hl_cs_cmpl);
695 
696 	hl_fence_put(cs->fence);
697 
698 	kfree(cs->jobs_in_queue_cnt);
699 	kfree(cs);
700 }
701 
cs_timedout(struct work_struct * work)702 static void cs_timedout(struct work_struct *work)
703 {
704 	struct hl_device *hdev;
705 	int rc;
706 	struct hl_cs *cs = container_of(work, struct hl_cs,
707 						 work_tdr.work);
708 	bool skip_reset_on_timeout = cs->skip_reset_on_timeout;
709 
710 	rc = cs_get_unless_zero(cs);
711 	if (!rc)
712 		return;
713 
714 	if ((!cs->submitted) || (cs->completed)) {
715 		cs_put(cs);
716 		return;
717 	}
718 
719 	/* Mark the CS is timed out so we won't try to cancel its TDR */
720 	if (likely(!skip_reset_on_timeout))
721 		cs->timedout = true;
722 
723 	hdev = cs->ctx->hdev;
724 
725 	switch (cs->type) {
726 	case CS_TYPE_SIGNAL:
727 		dev_err(hdev->dev,
728 			"Signal command submission %llu has not finished in time!\n",
729 			cs->sequence);
730 		break;
731 
732 	case CS_TYPE_WAIT:
733 		dev_err(hdev->dev,
734 			"Wait command submission %llu has not finished in time!\n",
735 			cs->sequence);
736 		break;
737 
738 	case CS_TYPE_COLLECTIVE_WAIT:
739 		dev_err(hdev->dev,
740 			"Collective Wait command submission %llu has not finished in time!\n",
741 			cs->sequence);
742 		break;
743 
744 	default:
745 		dev_err(hdev->dev,
746 			"Command submission %llu has not finished in time!\n",
747 			cs->sequence);
748 		break;
749 	}
750 
751 	rc = hl_state_dump(hdev);
752 	if (rc)
753 		dev_err(hdev->dev, "Error during system state dump %d\n", rc);
754 
755 	cs_put(cs);
756 
757 	if (likely(!skip_reset_on_timeout)) {
758 		if (hdev->reset_on_lockup)
759 			hl_device_reset(hdev, HL_RESET_TDR);
760 		else
761 			hdev->needs_reset = true;
762 	}
763 }
764 
allocate_cs(struct hl_device * hdev,struct hl_ctx * ctx,enum hl_cs_type cs_type,u64 user_sequence,struct hl_cs ** cs_new,u32 flags,u32 timeout)765 static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
766 			enum hl_cs_type cs_type, u64 user_sequence,
767 			struct hl_cs **cs_new, u32 flags, u32 timeout)
768 {
769 	struct hl_cs_counters_atomic *cntr;
770 	struct hl_fence *other = NULL;
771 	struct hl_cs_compl *cs_cmpl;
772 	struct hl_cs *cs;
773 	int rc;
774 
775 	cntr = &hdev->aggregated_cs_counters;
776 
777 	cs = kzalloc(sizeof(*cs), GFP_ATOMIC);
778 	if (!cs)
779 		cs = kzalloc(sizeof(*cs), GFP_KERNEL);
780 
781 	if (!cs) {
782 		atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
783 		atomic64_inc(&cntr->out_of_mem_drop_cnt);
784 		return -ENOMEM;
785 	}
786 
787 	/* increment refcnt for context */
788 	hl_ctx_get(hdev, ctx);
789 
790 	cs->ctx = ctx;
791 	cs->submitted = false;
792 	cs->completed = false;
793 	cs->type = cs_type;
794 	cs->timestamp = !!(flags & HL_CS_FLAGS_TIMESTAMP);
795 	cs->encaps_signals = !!(flags & HL_CS_FLAGS_ENCAP_SIGNALS);
796 	cs->timeout_jiffies = timeout;
797 	cs->skip_reset_on_timeout =
798 		hdev->skip_reset_on_timeout ||
799 		!!(flags & HL_CS_FLAGS_SKIP_RESET_ON_TIMEOUT);
800 	cs->submission_time_jiffies = jiffies;
801 	INIT_LIST_HEAD(&cs->job_list);
802 	INIT_DELAYED_WORK(&cs->work_tdr, cs_timedout);
803 	kref_init(&cs->refcount);
804 	spin_lock_init(&cs->job_lock);
805 
806 	cs_cmpl = kzalloc(sizeof(*cs_cmpl), GFP_ATOMIC);
807 	if (!cs_cmpl)
808 		cs_cmpl = kzalloc(sizeof(*cs_cmpl), GFP_KERNEL);
809 
810 	if (!cs_cmpl) {
811 		atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
812 		atomic64_inc(&cntr->out_of_mem_drop_cnt);
813 		rc = -ENOMEM;
814 		goto free_cs;
815 	}
816 
817 	cs->jobs_in_queue_cnt = kcalloc(hdev->asic_prop.max_queues,
818 			sizeof(*cs->jobs_in_queue_cnt), GFP_ATOMIC);
819 	if (!cs->jobs_in_queue_cnt)
820 		cs->jobs_in_queue_cnt = kcalloc(hdev->asic_prop.max_queues,
821 				sizeof(*cs->jobs_in_queue_cnt), GFP_KERNEL);
822 
823 	if (!cs->jobs_in_queue_cnt) {
824 		atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
825 		atomic64_inc(&cntr->out_of_mem_drop_cnt);
826 		rc = -ENOMEM;
827 		goto free_cs_cmpl;
828 	}
829 
830 	cs_cmpl->hdev = hdev;
831 	cs_cmpl->type = cs->type;
832 	spin_lock_init(&cs_cmpl->lock);
833 	cs->fence = &cs_cmpl->base_fence;
834 
835 	spin_lock(&ctx->cs_lock);
836 
837 	cs_cmpl->cs_seq = ctx->cs_sequence;
838 	other = ctx->cs_pending[cs_cmpl->cs_seq &
839 				(hdev->asic_prop.max_pending_cs - 1)];
840 
841 	if (other && !completion_done(&other->completion)) {
842 		/* If the following statement is true, it means we have reached
843 		 * a point in which only part of the staged submission was
844 		 * submitted and we don't have enough room in the 'cs_pending'
845 		 * array for the rest of the submission.
846 		 * This causes a deadlock because this CS will never be
847 		 * completed as it depends on future CS's for completion.
848 		 */
849 		if (other->cs_sequence == user_sequence)
850 			dev_crit_ratelimited(hdev->dev,
851 				"Staged CS %llu deadlock due to lack of resources",
852 				user_sequence);
853 
854 		dev_dbg_ratelimited(hdev->dev,
855 			"Rejecting CS because of too many in-flights CS\n");
856 		atomic64_inc(&ctx->cs_counters.max_cs_in_flight_drop_cnt);
857 		atomic64_inc(&cntr->max_cs_in_flight_drop_cnt);
858 		rc = -EAGAIN;
859 		goto free_fence;
860 	}
861 
862 	/* init hl_fence */
863 	hl_fence_init(&cs_cmpl->base_fence, cs_cmpl->cs_seq);
864 
865 	cs->sequence = cs_cmpl->cs_seq;
866 
867 	ctx->cs_pending[cs_cmpl->cs_seq &
868 			(hdev->asic_prop.max_pending_cs - 1)] =
869 							&cs_cmpl->base_fence;
870 	ctx->cs_sequence++;
871 
872 	hl_fence_get(&cs_cmpl->base_fence);
873 
874 	hl_fence_put(other);
875 
876 	spin_unlock(&ctx->cs_lock);
877 
878 	*cs_new = cs;
879 
880 	return 0;
881 
882 free_fence:
883 	spin_unlock(&ctx->cs_lock);
884 	kfree(cs->jobs_in_queue_cnt);
885 free_cs_cmpl:
886 	kfree(cs_cmpl);
887 free_cs:
888 	kfree(cs);
889 	hl_ctx_put(ctx);
890 	return rc;
891 }
892 
cs_rollback(struct hl_device * hdev,struct hl_cs * cs)893 static void cs_rollback(struct hl_device *hdev, struct hl_cs *cs)
894 {
895 	struct hl_cs_job *job, *tmp;
896 
897 	staged_cs_put(hdev, cs);
898 
899 	list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
900 		complete_job(hdev, job);
901 }
902 
hl_cs_rollback_all(struct hl_device * hdev)903 void hl_cs_rollback_all(struct hl_device *hdev)
904 {
905 	int i;
906 	struct hl_cs *cs, *tmp;
907 
908 	flush_workqueue(hdev->sob_reset_wq);
909 
910 	/* flush all completions before iterating over the CS mirror list in
911 	 * order to avoid a race with the release functions
912 	 */
913 	for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
914 		flush_workqueue(hdev->cq_wq[i]);
915 
916 	/* Make sure we don't have leftovers in the CS mirror list */
917 	list_for_each_entry_safe(cs, tmp, &hdev->cs_mirror_list, mirror_node) {
918 		cs_get(cs);
919 		cs->aborted = true;
920 		dev_warn_ratelimited(hdev->dev, "Killing CS %d.%llu\n",
921 				cs->ctx->asid, cs->sequence);
922 		cs_rollback(hdev, cs);
923 		cs_put(cs);
924 	}
925 
926 	force_complete_multi_cs(hdev);
927 }
928 
929 static void
wake_pending_user_interrupt_threads(struct hl_user_interrupt * interrupt)930 wake_pending_user_interrupt_threads(struct hl_user_interrupt *interrupt)
931 {
932 	struct hl_user_pending_interrupt *pend;
933 	unsigned long flags;
934 
935 	spin_lock_irqsave(&interrupt->wait_list_lock, flags);
936 	list_for_each_entry(pend, &interrupt->wait_list_head, wait_list_node) {
937 		pend->fence.error = -EIO;
938 		complete_all(&pend->fence.completion);
939 	}
940 	spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
941 }
942 
hl_release_pending_user_interrupts(struct hl_device * hdev)943 void hl_release_pending_user_interrupts(struct hl_device *hdev)
944 {
945 	struct asic_fixed_properties *prop = &hdev->asic_prop;
946 	struct hl_user_interrupt *interrupt;
947 	int i;
948 
949 	if (!prop->user_interrupt_count)
950 		return;
951 
952 	/* We iterate through the user interrupt requests and waking up all
953 	 * user threads waiting for interrupt completion. We iterate the
954 	 * list under a lock, this is why all user threads, once awake,
955 	 * will wait on the same lock and will release the waiting object upon
956 	 * unlock.
957 	 */
958 
959 	for (i = 0 ; i < prop->user_interrupt_count ; i++) {
960 		interrupt = &hdev->user_interrupt[i];
961 		wake_pending_user_interrupt_threads(interrupt);
962 	}
963 
964 	interrupt = &hdev->common_user_interrupt;
965 	wake_pending_user_interrupt_threads(interrupt);
966 }
967 
job_wq_completion(struct work_struct * work)968 static void job_wq_completion(struct work_struct *work)
969 {
970 	struct hl_cs_job *job = container_of(work, struct hl_cs_job,
971 						finish_work);
972 	struct hl_cs *cs = job->cs;
973 	struct hl_device *hdev = cs->ctx->hdev;
974 
975 	/* job is no longer needed */
976 	complete_job(hdev, job);
977 }
978 
validate_queue_index(struct hl_device * hdev,struct hl_cs_chunk * chunk,enum hl_queue_type * queue_type,bool * is_kernel_allocated_cb)979 static int validate_queue_index(struct hl_device *hdev,
980 				struct hl_cs_chunk *chunk,
981 				enum hl_queue_type *queue_type,
982 				bool *is_kernel_allocated_cb)
983 {
984 	struct asic_fixed_properties *asic = &hdev->asic_prop;
985 	struct hw_queue_properties *hw_queue_prop;
986 
987 	/* This must be checked here to prevent out-of-bounds access to
988 	 * hw_queues_props array
989 	 */
990 	if (chunk->queue_index >= asic->max_queues) {
991 		dev_err(hdev->dev, "Queue index %d is invalid\n",
992 			chunk->queue_index);
993 		return -EINVAL;
994 	}
995 
996 	hw_queue_prop = &asic->hw_queues_props[chunk->queue_index];
997 
998 	if (hw_queue_prop->type == QUEUE_TYPE_NA) {
999 		dev_err(hdev->dev, "Queue index %d is invalid\n",
1000 			chunk->queue_index);
1001 		return -EINVAL;
1002 	}
1003 
1004 	if (hw_queue_prop->driver_only) {
1005 		dev_err(hdev->dev,
1006 			"Queue index %d is restricted for the kernel driver\n",
1007 			chunk->queue_index);
1008 		return -EINVAL;
1009 	}
1010 
1011 	/* When hw queue type isn't QUEUE_TYPE_HW,
1012 	 * USER_ALLOC_CB flag shall be referred as "don't care".
1013 	 */
1014 	if (hw_queue_prop->type == QUEUE_TYPE_HW) {
1015 		if (chunk->cs_chunk_flags & HL_CS_CHUNK_FLAGS_USER_ALLOC_CB) {
1016 			if (!(hw_queue_prop->cb_alloc_flags & CB_ALLOC_USER)) {
1017 				dev_err(hdev->dev,
1018 					"Queue index %d doesn't support user CB\n",
1019 					chunk->queue_index);
1020 				return -EINVAL;
1021 			}
1022 
1023 			*is_kernel_allocated_cb = false;
1024 		} else {
1025 			if (!(hw_queue_prop->cb_alloc_flags &
1026 					CB_ALLOC_KERNEL)) {
1027 				dev_err(hdev->dev,
1028 					"Queue index %d doesn't support kernel CB\n",
1029 					chunk->queue_index);
1030 				return -EINVAL;
1031 			}
1032 
1033 			*is_kernel_allocated_cb = true;
1034 		}
1035 	} else {
1036 		*is_kernel_allocated_cb = !!(hw_queue_prop->cb_alloc_flags
1037 						& CB_ALLOC_KERNEL);
1038 	}
1039 
1040 	*queue_type = hw_queue_prop->type;
1041 	return 0;
1042 }
1043 
get_cb_from_cs_chunk(struct hl_device * hdev,struct hl_cb_mgr * cb_mgr,struct hl_cs_chunk * chunk)1044 static struct hl_cb *get_cb_from_cs_chunk(struct hl_device *hdev,
1045 					struct hl_cb_mgr *cb_mgr,
1046 					struct hl_cs_chunk *chunk)
1047 {
1048 	struct hl_cb *cb;
1049 	u32 cb_handle;
1050 
1051 	cb_handle = (u32) (chunk->cb_handle >> PAGE_SHIFT);
1052 
1053 	cb = hl_cb_get(hdev, cb_mgr, cb_handle);
1054 	if (!cb) {
1055 		dev_err(hdev->dev, "CB handle 0x%x invalid\n", cb_handle);
1056 		return NULL;
1057 	}
1058 
1059 	if ((chunk->cb_size < 8) || (chunk->cb_size > cb->size)) {
1060 		dev_err(hdev->dev, "CB size %u invalid\n", chunk->cb_size);
1061 		goto release_cb;
1062 	}
1063 
1064 	atomic_inc(&cb->cs_cnt);
1065 
1066 	return cb;
1067 
1068 release_cb:
1069 	hl_cb_put(cb);
1070 	return NULL;
1071 }
1072 
hl_cs_allocate_job(struct hl_device * hdev,enum hl_queue_type queue_type,bool is_kernel_allocated_cb)1073 struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev,
1074 		enum hl_queue_type queue_type, bool is_kernel_allocated_cb)
1075 {
1076 	struct hl_cs_job *job;
1077 
1078 	job = kzalloc(sizeof(*job), GFP_ATOMIC);
1079 	if (!job)
1080 		job = kzalloc(sizeof(*job), GFP_KERNEL);
1081 
1082 	if (!job)
1083 		return NULL;
1084 
1085 	kref_init(&job->refcount);
1086 	job->queue_type = queue_type;
1087 	job->is_kernel_allocated_cb = is_kernel_allocated_cb;
1088 
1089 	if (is_cb_patched(hdev, job))
1090 		INIT_LIST_HEAD(&job->userptr_list);
1091 
1092 	if (job->queue_type == QUEUE_TYPE_EXT)
1093 		INIT_WORK(&job->finish_work, job_wq_completion);
1094 
1095 	return job;
1096 }
1097 
hl_cs_get_cs_type(u32 cs_type_flags)1098 static enum hl_cs_type hl_cs_get_cs_type(u32 cs_type_flags)
1099 {
1100 	if (cs_type_flags & HL_CS_FLAGS_SIGNAL)
1101 		return CS_TYPE_SIGNAL;
1102 	else if (cs_type_flags & HL_CS_FLAGS_WAIT)
1103 		return CS_TYPE_WAIT;
1104 	else if (cs_type_flags & HL_CS_FLAGS_COLLECTIVE_WAIT)
1105 		return CS_TYPE_COLLECTIVE_WAIT;
1106 	else if (cs_type_flags & HL_CS_FLAGS_RESERVE_SIGNALS_ONLY)
1107 		return CS_RESERVE_SIGNALS;
1108 	else if (cs_type_flags & HL_CS_FLAGS_UNRESERVE_SIGNALS_ONLY)
1109 		return CS_UNRESERVE_SIGNALS;
1110 	else
1111 		return CS_TYPE_DEFAULT;
1112 }
1113 
hl_cs_sanity_checks(struct hl_fpriv * hpriv,union hl_cs_args * args)1114 static int hl_cs_sanity_checks(struct hl_fpriv *hpriv, union hl_cs_args *args)
1115 {
1116 	struct hl_device *hdev = hpriv->hdev;
1117 	struct hl_ctx *ctx = hpriv->ctx;
1118 	u32 cs_type_flags, num_chunks;
1119 	enum hl_device_status status;
1120 	enum hl_cs_type cs_type;
1121 
1122 	if (!hl_device_operational(hdev, &status)) {
1123 		dev_warn_ratelimited(hdev->dev,
1124 			"Device is %s. Can't submit new CS\n",
1125 			hdev->status[status]);
1126 		return -EBUSY;
1127 	}
1128 
1129 	if ((args->in.cs_flags & HL_CS_FLAGS_STAGED_SUBMISSION) &&
1130 			!hdev->supports_staged_submission) {
1131 		dev_err(hdev->dev, "staged submission not supported");
1132 		return -EPERM;
1133 	}
1134 
1135 	cs_type_flags = args->in.cs_flags & HL_CS_FLAGS_TYPE_MASK;
1136 
1137 	if (unlikely(cs_type_flags && !is_power_of_2(cs_type_flags))) {
1138 		dev_err(hdev->dev,
1139 			"CS type flags are mutually exclusive, context %d\n",
1140 			ctx->asid);
1141 		return -EINVAL;
1142 	}
1143 
1144 	cs_type = hl_cs_get_cs_type(cs_type_flags);
1145 	num_chunks = args->in.num_chunks_execute;
1146 
1147 	if (unlikely((cs_type != CS_TYPE_DEFAULT) &&
1148 					!hdev->supports_sync_stream)) {
1149 		dev_err(hdev->dev, "Sync stream CS is not supported\n");
1150 		return -EINVAL;
1151 	}
1152 
1153 	if (cs_type == CS_TYPE_DEFAULT) {
1154 		if (!num_chunks) {
1155 			dev_err(hdev->dev,
1156 				"Got execute CS with 0 chunks, context %d\n",
1157 				ctx->asid);
1158 			return -EINVAL;
1159 		}
1160 	} else if (num_chunks != 1) {
1161 		dev_err(hdev->dev,
1162 			"Sync stream CS mandates one chunk only, context %d\n",
1163 			ctx->asid);
1164 		return -EINVAL;
1165 	}
1166 
1167 	return 0;
1168 }
1169 
hl_cs_copy_chunk_array(struct hl_device * hdev,struct hl_cs_chunk ** cs_chunk_array,void __user * chunks,u32 num_chunks,struct hl_ctx * ctx)1170 static int hl_cs_copy_chunk_array(struct hl_device *hdev,
1171 					struct hl_cs_chunk **cs_chunk_array,
1172 					void __user *chunks, u32 num_chunks,
1173 					struct hl_ctx *ctx)
1174 {
1175 	u32 size_to_copy;
1176 
1177 	if (num_chunks > HL_MAX_JOBS_PER_CS) {
1178 		atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1179 		atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
1180 		dev_err(hdev->dev,
1181 			"Number of chunks can NOT be larger than %d\n",
1182 			HL_MAX_JOBS_PER_CS);
1183 		return -EINVAL;
1184 	}
1185 
1186 	*cs_chunk_array = kmalloc_array(num_chunks, sizeof(**cs_chunk_array),
1187 					GFP_ATOMIC);
1188 	if (!*cs_chunk_array)
1189 		*cs_chunk_array = kmalloc_array(num_chunks,
1190 					sizeof(**cs_chunk_array), GFP_KERNEL);
1191 	if (!*cs_chunk_array) {
1192 		atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
1193 		atomic64_inc(&hdev->aggregated_cs_counters.out_of_mem_drop_cnt);
1194 		return -ENOMEM;
1195 	}
1196 
1197 	size_to_copy = num_chunks * sizeof(struct hl_cs_chunk);
1198 	if (copy_from_user(*cs_chunk_array, chunks, size_to_copy)) {
1199 		atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1200 		atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
1201 		dev_err(hdev->dev, "Failed to copy cs chunk array from user\n");
1202 		kfree(*cs_chunk_array);
1203 		return -EFAULT;
1204 	}
1205 
1206 	return 0;
1207 }
1208 
cs_staged_submission(struct hl_device * hdev,struct hl_cs * cs,u64 sequence,u32 flags,u32 encaps_signal_handle)1209 static int cs_staged_submission(struct hl_device *hdev, struct hl_cs *cs,
1210 				u64 sequence, u32 flags,
1211 				u32 encaps_signal_handle)
1212 {
1213 	if (!(flags & HL_CS_FLAGS_STAGED_SUBMISSION))
1214 		return 0;
1215 
1216 	cs->staged_last = !!(flags & HL_CS_FLAGS_STAGED_SUBMISSION_LAST);
1217 	cs->staged_first = !!(flags & HL_CS_FLAGS_STAGED_SUBMISSION_FIRST);
1218 
1219 	if (cs->staged_first) {
1220 		/* Staged CS sequence is the first CS sequence */
1221 		INIT_LIST_HEAD(&cs->staged_cs_node);
1222 		cs->staged_sequence = cs->sequence;
1223 
1224 		if (cs->encaps_signals)
1225 			cs->encaps_sig_hdl_id = encaps_signal_handle;
1226 	} else {
1227 		/* User sequence will be validated in 'hl_hw_queue_schedule_cs'
1228 		 * under the cs_mirror_lock
1229 		 */
1230 		cs->staged_sequence = sequence;
1231 	}
1232 
1233 	/* Increment CS reference if needed */
1234 	staged_cs_get(hdev, cs);
1235 
1236 	cs->staged_cs = true;
1237 
1238 	return 0;
1239 }
1240 
get_stream_master_qid_mask(struct hl_device * hdev,u32 qid)1241 static u32 get_stream_master_qid_mask(struct hl_device *hdev, u32 qid)
1242 {
1243 	int i;
1244 
1245 	for (i = 0; i < hdev->stream_master_qid_arr_size; i++)
1246 		if (qid == hdev->stream_master_qid_arr[i])
1247 			return BIT(i);
1248 
1249 	return 0;
1250 }
1251 
cs_ioctl_default(struct hl_fpriv * hpriv,void __user * chunks,u32 num_chunks,u64 * cs_seq,u32 flags,u32 encaps_signals_handle,u32 timeout)1252 static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
1253 				u32 num_chunks, u64 *cs_seq, u32 flags,
1254 				u32 encaps_signals_handle, u32 timeout)
1255 {
1256 	bool staged_mid, int_queues_only = true;
1257 	struct hl_device *hdev = hpriv->hdev;
1258 	struct hl_cs_chunk *cs_chunk_array;
1259 	struct hl_cs_counters_atomic *cntr;
1260 	struct hl_ctx *ctx = hpriv->ctx;
1261 	struct hl_cs_job *job;
1262 	struct hl_cs *cs;
1263 	struct hl_cb *cb;
1264 	u64 user_sequence;
1265 	u8 stream_master_qid_map = 0;
1266 	int rc, i;
1267 
1268 	cntr = &hdev->aggregated_cs_counters;
1269 	user_sequence = *cs_seq;
1270 	*cs_seq = ULLONG_MAX;
1271 
1272 	rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks,
1273 			hpriv->ctx);
1274 	if (rc)
1275 		goto out;
1276 
1277 	if ((flags & HL_CS_FLAGS_STAGED_SUBMISSION) &&
1278 			!(flags & HL_CS_FLAGS_STAGED_SUBMISSION_FIRST))
1279 		staged_mid = true;
1280 	else
1281 		staged_mid = false;
1282 
1283 	rc = allocate_cs(hdev, hpriv->ctx, CS_TYPE_DEFAULT,
1284 			staged_mid ? user_sequence : ULLONG_MAX, &cs, flags,
1285 			timeout);
1286 	if (rc)
1287 		goto free_cs_chunk_array;
1288 
1289 	*cs_seq = cs->sequence;
1290 
1291 	hl_debugfs_add_cs(cs);
1292 
1293 	rc = cs_staged_submission(hdev, cs, user_sequence, flags,
1294 						encaps_signals_handle);
1295 	if (rc)
1296 		goto free_cs_object;
1297 
1298 	/* If this is a staged submission we must return the staged sequence
1299 	 * rather than the internal CS sequence
1300 	 */
1301 	if (cs->staged_cs)
1302 		*cs_seq = cs->staged_sequence;
1303 
1304 	/* Validate ALL the CS chunks before submitting the CS */
1305 	for (i = 0 ; i < num_chunks ; i++) {
1306 		struct hl_cs_chunk *chunk = &cs_chunk_array[i];
1307 		enum hl_queue_type queue_type;
1308 		bool is_kernel_allocated_cb;
1309 
1310 		rc = validate_queue_index(hdev, chunk, &queue_type,
1311 						&is_kernel_allocated_cb);
1312 		if (rc) {
1313 			atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1314 			atomic64_inc(&cntr->validation_drop_cnt);
1315 			goto free_cs_object;
1316 		}
1317 
1318 		if (is_kernel_allocated_cb) {
1319 			cb = get_cb_from_cs_chunk(hdev, &hpriv->cb_mgr, chunk);
1320 			if (!cb) {
1321 				atomic64_inc(
1322 					&ctx->cs_counters.validation_drop_cnt);
1323 				atomic64_inc(&cntr->validation_drop_cnt);
1324 				rc = -EINVAL;
1325 				goto free_cs_object;
1326 			}
1327 		} else {
1328 			cb = (struct hl_cb *) (uintptr_t) chunk->cb_handle;
1329 		}
1330 
1331 		if (queue_type == QUEUE_TYPE_EXT ||
1332 						queue_type == QUEUE_TYPE_HW) {
1333 			int_queues_only = false;
1334 
1335 			/*
1336 			 * store which stream are being used for external/HW
1337 			 * queues of this CS
1338 			 */
1339 			if (hdev->supports_wait_for_multi_cs)
1340 				stream_master_qid_map |=
1341 					get_stream_master_qid_mask(hdev,
1342 							chunk->queue_index);
1343 		}
1344 
1345 		job = hl_cs_allocate_job(hdev, queue_type,
1346 						is_kernel_allocated_cb);
1347 		if (!job) {
1348 			atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
1349 			atomic64_inc(&cntr->out_of_mem_drop_cnt);
1350 			dev_err(hdev->dev, "Failed to allocate a new job\n");
1351 			rc = -ENOMEM;
1352 			if (is_kernel_allocated_cb)
1353 				goto release_cb;
1354 
1355 			goto free_cs_object;
1356 		}
1357 
1358 		job->id = i + 1;
1359 		job->cs = cs;
1360 		job->user_cb = cb;
1361 		job->user_cb_size = chunk->cb_size;
1362 		job->hw_queue_id = chunk->queue_index;
1363 
1364 		cs->jobs_in_queue_cnt[job->hw_queue_id]++;
1365 
1366 		list_add_tail(&job->cs_node, &cs->job_list);
1367 
1368 		/*
1369 		 * Increment CS reference. When CS reference is 0, CS is
1370 		 * done and can be signaled to user and free all its resources
1371 		 * Only increment for JOB on external or H/W queues, because
1372 		 * only for those JOBs we get completion
1373 		 */
1374 		if (cs_needs_completion(cs) &&
1375 			(job->queue_type == QUEUE_TYPE_EXT ||
1376 				job->queue_type == QUEUE_TYPE_HW))
1377 			cs_get(cs);
1378 
1379 		hl_debugfs_add_job(hdev, job);
1380 
1381 		rc = cs_parser(hpriv, job);
1382 		if (rc) {
1383 			atomic64_inc(&ctx->cs_counters.parsing_drop_cnt);
1384 			atomic64_inc(&cntr->parsing_drop_cnt);
1385 			dev_err(hdev->dev,
1386 				"Failed to parse JOB %d.%llu.%d, err %d, rejecting the CS\n",
1387 				cs->ctx->asid, cs->sequence, job->id, rc);
1388 			goto free_cs_object;
1389 		}
1390 	}
1391 
1392 	/* We allow a CS with any queue type combination as long as it does
1393 	 * not get a completion
1394 	 */
1395 	if (int_queues_only && cs_needs_completion(cs)) {
1396 		atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1397 		atomic64_inc(&cntr->validation_drop_cnt);
1398 		dev_err(hdev->dev,
1399 			"Reject CS %d.%llu since it contains only internal queues jobs and needs completion\n",
1400 			cs->ctx->asid, cs->sequence);
1401 		rc = -EINVAL;
1402 		goto free_cs_object;
1403 	}
1404 
1405 	/*
1406 	 * store the (external/HW queues) streams used by the CS in the
1407 	 * fence object for multi-CS completion
1408 	 */
1409 	if (hdev->supports_wait_for_multi_cs)
1410 		cs->fence->stream_master_qid_map = stream_master_qid_map;
1411 
1412 	rc = hl_hw_queue_schedule_cs(cs);
1413 	if (rc) {
1414 		if (rc != -EAGAIN)
1415 			dev_err(hdev->dev,
1416 				"Failed to submit CS %d.%llu to H/W queues, error %d\n",
1417 				cs->ctx->asid, cs->sequence, rc);
1418 		goto free_cs_object;
1419 	}
1420 
1421 	rc = HL_CS_STATUS_SUCCESS;
1422 	goto put_cs;
1423 
1424 release_cb:
1425 	atomic_dec(&cb->cs_cnt);
1426 	hl_cb_put(cb);
1427 free_cs_object:
1428 	cs_rollback(hdev, cs);
1429 	*cs_seq = ULLONG_MAX;
1430 	/* The path below is both for good and erroneous exits */
1431 put_cs:
1432 	/* We finished with the CS in this function, so put the ref */
1433 	cs_put(cs);
1434 free_cs_chunk_array:
1435 	kfree(cs_chunk_array);
1436 out:
1437 	return rc;
1438 }
1439 
hl_cs_ctx_switch(struct hl_fpriv * hpriv,union hl_cs_args * args,u64 * cs_seq)1440 static int hl_cs_ctx_switch(struct hl_fpriv *hpriv, union hl_cs_args *args,
1441 				u64 *cs_seq)
1442 {
1443 	struct hl_device *hdev = hpriv->hdev;
1444 	struct hl_ctx *ctx = hpriv->ctx;
1445 	bool need_soft_reset = false;
1446 	int rc = 0, do_ctx_switch;
1447 	void __user *chunks;
1448 	u32 num_chunks, tmp;
1449 	int ret;
1450 
1451 	do_ctx_switch = atomic_cmpxchg(&ctx->thread_ctx_switch_token, 1, 0);
1452 
1453 	if (do_ctx_switch || (args->in.cs_flags & HL_CS_FLAGS_FORCE_RESTORE)) {
1454 		mutex_lock(&hpriv->restore_phase_mutex);
1455 
1456 		if (do_ctx_switch) {
1457 			rc = hdev->asic_funcs->context_switch(hdev, ctx->asid);
1458 			if (rc) {
1459 				dev_err_ratelimited(hdev->dev,
1460 					"Failed to switch to context %d, rejecting CS! %d\n",
1461 					ctx->asid, rc);
1462 				/*
1463 				 * If we timedout, or if the device is not IDLE
1464 				 * while we want to do context-switch (-EBUSY),
1465 				 * we need to soft-reset because QMAN is
1466 				 * probably stuck. However, we can't call to
1467 				 * reset here directly because of deadlock, so
1468 				 * need to do it at the very end of this
1469 				 * function
1470 				 */
1471 				if ((rc == -ETIMEDOUT) || (rc == -EBUSY))
1472 					need_soft_reset = true;
1473 				mutex_unlock(&hpriv->restore_phase_mutex);
1474 				goto out;
1475 			}
1476 		}
1477 
1478 		hdev->asic_funcs->restore_phase_topology(hdev);
1479 
1480 		chunks = (void __user *) (uintptr_t) args->in.chunks_restore;
1481 		num_chunks = args->in.num_chunks_restore;
1482 
1483 		if (!num_chunks) {
1484 			dev_dbg(hdev->dev,
1485 				"Need to run restore phase but restore CS is empty\n");
1486 			rc = 0;
1487 		} else {
1488 			rc = cs_ioctl_default(hpriv, chunks, num_chunks,
1489 					cs_seq, 0, 0, hdev->timeout_jiffies);
1490 		}
1491 
1492 		mutex_unlock(&hpriv->restore_phase_mutex);
1493 
1494 		if (rc) {
1495 			dev_err(hdev->dev,
1496 				"Failed to submit restore CS for context %d (%d)\n",
1497 				ctx->asid, rc);
1498 			goto out;
1499 		}
1500 
1501 		/* Need to wait for restore completion before execution phase */
1502 		if (num_chunks) {
1503 			enum hl_cs_wait_status status;
1504 wait_again:
1505 			ret = _hl_cs_wait_ioctl(hdev, ctx,
1506 					jiffies_to_usecs(hdev->timeout_jiffies),
1507 					*cs_seq, &status, NULL);
1508 			if (ret) {
1509 				if (ret == -ERESTARTSYS) {
1510 					usleep_range(100, 200);
1511 					goto wait_again;
1512 				}
1513 
1514 				dev_err(hdev->dev,
1515 					"Restore CS for context %d failed to complete %d\n",
1516 					ctx->asid, ret);
1517 				rc = -ENOEXEC;
1518 				goto out;
1519 			}
1520 		}
1521 
1522 		ctx->thread_ctx_switch_wait_token = 1;
1523 
1524 	} else if (!ctx->thread_ctx_switch_wait_token) {
1525 		rc = hl_poll_timeout_memory(hdev,
1526 			&ctx->thread_ctx_switch_wait_token, tmp, (tmp == 1),
1527 			100, jiffies_to_usecs(hdev->timeout_jiffies), false);
1528 
1529 		if (rc == -ETIMEDOUT) {
1530 			dev_err(hdev->dev,
1531 				"context switch phase timeout (%d)\n", tmp);
1532 			goto out;
1533 		}
1534 	}
1535 
1536 out:
1537 	if ((rc == -ETIMEDOUT || rc == -EBUSY) && (need_soft_reset))
1538 		hl_device_reset(hdev, 0);
1539 
1540 	return rc;
1541 }
1542 
1543 /*
1544  * hl_cs_signal_sob_wraparound_handler: handle SOB value wrapaound case.
1545  * if the SOB value reaches the max value move to the other SOB reserved
1546  * to the queue.
1547  * @hdev: pointer to device structure
1548  * @q_idx: stream queue index
1549  * @hw_sob: the H/W SOB used in this signal CS.
1550  * @count: signals count
1551  * @encaps_sig: tells whether it's reservation for encaps signals or not.
1552  *
1553  * Note that this function must be called while hw_queues_lock is taken.
1554  */
hl_cs_signal_sob_wraparound_handler(struct hl_device * hdev,u32 q_idx,struct hl_hw_sob ** hw_sob,u32 count,bool encaps_sig)1555 int hl_cs_signal_sob_wraparound_handler(struct hl_device *hdev, u32 q_idx,
1556 			struct hl_hw_sob **hw_sob, u32 count, bool encaps_sig)
1557 
1558 {
1559 	struct hl_sync_stream_properties *prop;
1560 	struct hl_hw_sob *sob = *hw_sob, *other_sob;
1561 	u8 other_sob_offset;
1562 
1563 	prop = &hdev->kernel_queues[q_idx].sync_stream_prop;
1564 
1565 	hw_sob_get(sob);
1566 
1567 	/* check for wraparound */
1568 	if (prop->next_sob_val + count >= HL_MAX_SOB_VAL) {
1569 		/*
1570 		 * Decrement as we reached the max value.
1571 		 * The release function won't be called here as we've
1572 		 * just incremented the refcount right before calling this
1573 		 * function.
1574 		 */
1575 		hw_sob_put_err(sob);
1576 
1577 		/*
1578 		 * check the other sob value, if it still in use then fail
1579 		 * otherwise make the switch
1580 		 */
1581 		other_sob_offset = (prop->curr_sob_offset + 1) % HL_RSVD_SOBS;
1582 		other_sob = &prop->hw_sob[other_sob_offset];
1583 
1584 		if (kref_read(&other_sob->kref) != 1) {
1585 			dev_err(hdev->dev, "error: Cannot switch SOBs q_idx: %d\n",
1586 								q_idx);
1587 			return -EINVAL;
1588 		}
1589 
1590 		/*
1591 		 * next_sob_val always points to the next available signal
1592 		 * in the sob, so in encaps signals it will be the next one
1593 		 * after reserving the required amount.
1594 		 */
1595 		if (encaps_sig)
1596 			prop->next_sob_val = count + 1;
1597 		else
1598 			prop->next_sob_val = count;
1599 
1600 		/* only two SOBs are currently in use */
1601 		prop->curr_sob_offset = other_sob_offset;
1602 		*hw_sob = other_sob;
1603 
1604 		/*
1605 		 * check if other_sob needs reset, then do it before using it
1606 		 * for the reservation or the next signal cs.
1607 		 * we do it here, and for both encaps and regular signal cs
1608 		 * cases in order to avoid possible races of two kref_put
1609 		 * of the sob which can occur at the same time if we move the
1610 		 * sob reset(kref_put) to cs_do_release function.
1611 		 * in addition, if we have combination of cs signal and
1612 		 * encaps, and at the point we need to reset the sob there was
1613 		 * no more reservations and only signal cs keep coming,
1614 		 * in such case we need signal_cs to put the refcount and
1615 		 * reset the sob.
1616 		 */
1617 		if (other_sob->need_reset)
1618 			hw_sob_put(other_sob);
1619 
1620 		if (encaps_sig) {
1621 			/* set reset indication for the sob */
1622 			sob->need_reset = true;
1623 			hw_sob_get(other_sob);
1624 		}
1625 
1626 		dev_dbg(hdev->dev, "switched to SOB %d, q_idx: %d\n",
1627 				prop->curr_sob_offset, q_idx);
1628 	} else {
1629 		prop->next_sob_val += count;
1630 	}
1631 
1632 	return 0;
1633 }
1634 
cs_ioctl_extract_signal_seq(struct hl_device * hdev,struct hl_cs_chunk * chunk,u64 * signal_seq,struct hl_ctx * ctx,bool encaps_signals)1635 static int cs_ioctl_extract_signal_seq(struct hl_device *hdev,
1636 		struct hl_cs_chunk *chunk, u64 *signal_seq, struct hl_ctx *ctx,
1637 		bool encaps_signals)
1638 {
1639 	u64 *signal_seq_arr = NULL;
1640 	u32 size_to_copy, signal_seq_arr_len;
1641 	int rc = 0;
1642 
1643 	if (encaps_signals) {
1644 		*signal_seq = chunk->encaps_signal_seq;
1645 		return 0;
1646 	}
1647 
1648 	signal_seq_arr_len = chunk->num_signal_seq_arr;
1649 
1650 	/* currently only one signal seq is supported */
1651 	if (signal_seq_arr_len != 1) {
1652 		atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1653 		atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
1654 		dev_err(hdev->dev,
1655 			"Wait for signal CS supports only one signal CS seq\n");
1656 		return -EINVAL;
1657 	}
1658 
1659 	signal_seq_arr = kmalloc_array(signal_seq_arr_len,
1660 					sizeof(*signal_seq_arr),
1661 					GFP_ATOMIC);
1662 	if (!signal_seq_arr)
1663 		signal_seq_arr = kmalloc_array(signal_seq_arr_len,
1664 					sizeof(*signal_seq_arr),
1665 					GFP_KERNEL);
1666 	if (!signal_seq_arr) {
1667 		atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
1668 		atomic64_inc(&hdev->aggregated_cs_counters.out_of_mem_drop_cnt);
1669 		return -ENOMEM;
1670 	}
1671 
1672 	size_to_copy = signal_seq_arr_len * sizeof(*signal_seq_arr);
1673 	if (copy_from_user(signal_seq_arr,
1674 				u64_to_user_ptr(chunk->signal_seq_arr),
1675 				size_to_copy)) {
1676 		atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1677 		atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
1678 		dev_err(hdev->dev,
1679 			"Failed to copy signal seq array from user\n");
1680 		rc = -EFAULT;
1681 		goto out;
1682 	}
1683 
1684 	/* currently it is guaranteed to have only one signal seq */
1685 	*signal_seq = signal_seq_arr[0];
1686 
1687 out:
1688 	kfree(signal_seq_arr);
1689 
1690 	return rc;
1691 }
1692 
cs_ioctl_signal_wait_create_jobs(struct hl_device * hdev,struct hl_ctx * ctx,struct hl_cs * cs,enum hl_queue_type q_type,u32 q_idx,u32 encaps_signal_offset)1693 static int cs_ioctl_signal_wait_create_jobs(struct hl_device *hdev,
1694 		struct hl_ctx *ctx, struct hl_cs *cs,
1695 		enum hl_queue_type q_type, u32 q_idx, u32 encaps_signal_offset)
1696 {
1697 	struct hl_cs_counters_atomic *cntr;
1698 	struct hl_cs_job *job;
1699 	struct hl_cb *cb;
1700 	u32 cb_size;
1701 
1702 	cntr = &hdev->aggregated_cs_counters;
1703 
1704 	job = hl_cs_allocate_job(hdev, q_type, true);
1705 	if (!job) {
1706 		atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
1707 		atomic64_inc(&cntr->out_of_mem_drop_cnt);
1708 		dev_err(hdev->dev, "Failed to allocate a new job\n");
1709 		return -ENOMEM;
1710 	}
1711 
1712 	if (cs->type == CS_TYPE_WAIT)
1713 		cb_size = hdev->asic_funcs->get_wait_cb_size(hdev);
1714 	else
1715 		cb_size = hdev->asic_funcs->get_signal_cb_size(hdev);
1716 
1717 	cb = hl_cb_kernel_create(hdev, cb_size,
1718 				q_type == QUEUE_TYPE_HW && hdev->mmu_enable);
1719 	if (!cb) {
1720 		atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
1721 		atomic64_inc(&cntr->out_of_mem_drop_cnt);
1722 		kfree(job);
1723 		return -EFAULT;
1724 	}
1725 
1726 	job->id = 0;
1727 	job->cs = cs;
1728 	job->user_cb = cb;
1729 	atomic_inc(&job->user_cb->cs_cnt);
1730 	job->user_cb_size = cb_size;
1731 	job->hw_queue_id = q_idx;
1732 
1733 	if ((cs->type == CS_TYPE_WAIT || cs->type == CS_TYPE_COLLECTIVE_WAIT)
1734 			&& cs->encaps_signals)
1735 		job->encaps_sig_wait_offset = encaps_signal_offset;
1736 	/*
1737 	 * No need in parsing, user CB is the patched CB.
1738 	 * We call hl_cb_destroy() out of two reasons - we don't need the CB in
1739 	 * the CB idr anymore and to decrement its refcount as it was
1740 	 * incremented inside hl_cb_kernel_create().
1741 	 */
1742 	job->patched_cb = job->user_cb;
1743 	job->job_cb_size = job->user_cb_size;
1744 	hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
1745 
1746 	/* increment refcount as for external queues we get completion */
1747 	cs_get(cs);
1748 
1749 	cs->jobs_in_queue_cnt[job->hw_queue_id]++;
1750 
1751 	list_add_tail(&job->cs_node, &cs->job_list);
1752 
1753 	hl_debugfs_add_job(hdev, job);
1754 
1755 	return 0;
1756 }
1757 
cs_ioctl_reserve_signals(struct hl_fpriv * hpriv,u32 q_idx,u32 count,u32 * handle_id,u32 * sob_addr,u32 * signals_count)1758 static int cs_ioctl_reserve_signals(struct hl_fpriv *hpriv,
1759 				u32 q_idx, u32 count,
1760 				u32 *handle_id, u32 *sob_addr,
1761 				u32 *signals_count)
1762 {
1763 	struct hw_queue_properties *hw_queue_prop;
1764 	struct hl_sync_stream_properties *prop;
1765 	struct hl_device *hdev = hpriv->hdev;
1766 	struct hl_cs_encaps_sig_handle *handle;
1767 	struct hl_encaps_signals_mgr *mgr;
1768 	struct hl_hw_sob *hw_sob;
1769 	int hdl_id;
1770 	int rc = 0;
1771 
1772 	if (count >= HL_MAX_SOB_VAL) {
1773 		dev_err(hdev->dev, "signals count(%u) exceeds the max SOB value\n",
1774 						count);
1775 		rc = -EINVAL;
1776 		goto out;
1777 	}
1778 
1779 	if (q_idx >= hdev->asic_prop.max_queues) {
1780 		dev_err(hdev->dev, "Queue index %d is invalid\n",
1781 			q_idx);
1782 		rc = -EINVAL;
1783 		goto out;
1784 	}
1785 
1786 	hw_queue_prop = &hdev->asic_prop.hw_queues_props[q_idx];
1787 
1788 	if (!hw_queue_prop->supports_sync_stream) {
1789 		dev_err(hdev->dev,
1790 			"Queue index %d does not support sync stream operations\n",
1791 									q_idx);
1792 		rc = -EINVAL;
1793 		goto out;
1794 	}
1795 
1796 	prop = &hdev->kernel_queues[q_idx].sync_stream_prop;
1797 
1798 	handle = kzalloc(sizeof(*handle), GFP_KERNEL);
1799 	if (!handle) {
1800 		rc = -ENOMEM;
1801 		goto out;
1802 	}
1803 
1804 	handle->count = count;
1805 	mgr = &hpriv->ctx->sig_mgr;
1806 
1807 	spin_lock(&mgr->lock);
1808 	hdl_id = idr_alloc(&mgr->handles, handle, 1, 0, GFP_ATOMIC);
1809 	spin_unlock(&mgr->lock);
1810 
1811 	if (hdl_id < 0) {
1812 		dev_err(hdev->dev, "Failed to allocate IDR for a new signal reservation\n");
1813 		rc = -EINVAL;
1814 		goto out;
1815 	}
1816 
1817 	handle->id = hdl_id;
1818 	handle->q_idx = q_idx;
1819 	handle->hdev = hdev;
1820 	kref_init(&handle->refcount);
1821 
1822 	hdev->asic_funcs->hw_queues_lock(hdev);
1823 
1824 	hw_sob = &prop->hw_sob[prop->curr_sob_offset];
1825 
1826 	/*
1827 	 * Increment the SOB value by count by user request
1828 	 * to reserve those signals
1829 	 * check if the signals amount to reserve is not exceeding the max sob
1830 	 * value, if yes then switch sob.
1831 	 */
1832 	rc = hl_cs_signal_sob_wraparound_handler(hdev, q_idx, &hw_sob, count,
1833 								true);
1834 	if (rc) {
1835 		dev_err(hdev->dev, "Failed to switch SOB\n");
1836 		hdev->asic_funcs->hw_queues_unlock(hdev);
1837 		rc = -EINVAL;
1838 		goto remove_idr;
1839 	}
1840 	/* set the hw_sob to the handle after calling the sob wraparound handler
1841 	 * since sob could have changed.
1842 	 */
1843 	handle->hw_sob = hw_sob;
1844 
1845 	/* store the current sob value for unreserve validity check, and
1846 	 * signal offset support
1847 	 */
1848 	handle->pre_sob_val = prop->next_sob_val - handle->count;
1849 
1850 	*signals_count = prop->next_sob_val;
1851 	hdev->asic_funcs->hw_queues_unlock(hdev);
1852 
1853 	*sob_addr = handle->hw_sob->sob_addr;
1854 	*handle_id = hdl_id;
1855 
1856 	dev_dbg(hdev->dev,
1857 		"Signals reserved, sob_id: %d, sob addr: 0x%x, last sob_val: %u, q_idx: %d, hdl_id: %d\n",
1858 			hw_sob->sob_id, handle->hw_sob->sob_addr,
1859 			prop->next_sob_val - 1, q_idx, hdl_id);
1860 	goto out;
1861 
1862 remove_idr:
1863 	spin_lock(&mgr->lock);
1864 	idr_remove(&mgr->handles, hdl_id);
1865 	spin_unlock(&mgr->lock);
1866 
1867 	kfree(handle);
1868 out:
1869 	return rc;
1870 }
1871 
cs_ioctl_unreserve_signals(struct hl_fpriv * hpriv,u32 handle_id)1872 static int cs_ioctl_unreserve_signals(struct hl_fpriv *hpriv, u32 handle_id)
1873 {
1874 	struct hl_cs_encaps_sig_handle *encaps_sig_hdl;
1875 	struct hl_sync_stream_properties *prop;
1876 	struct hl_device *hdev = hpriv->hdev;
1877 	struct hl_encaps_signals_mgr *mgr;
1878 	struct hl_hw_sob *hw_sob;
1879 	u32 q_idx, sob_addr;
1880 	int rc = 0;
1881 
1882 	mgr = &hpriv->ctx->sig_mgr;
1883 
1884 	spin_lock(&mgr->lock);
1885 	encaps_sig_hdl = idr_find(&mgr->handles, handle_id);
1886 	if (encaps_sig_hdl) {
1887 		dev_dbg(hdev->dev, "unreserve signals, handle: %u, SOB:0x%x, count: %u\n",
1888 				handle_id, encaps_sig_hdl->hw_sob->sob_addr,
1889 					encaps_sig_hdl->count);
1890 
1891 		hdev->asic_funcs->hw_queues_lock(hdev);
1892 
1893 		q_idx = encaps_sig_hdl->q_idx;
1894 		prop = &hdev->kernel_queues[q_idx].sync_stream_prop;
1895 		hw_sob = &prop->hw_sob[prop->curr_sob_offset];
1896 		sob_addr = hdev->asic_funcs->get_sob_addr(hdev, hw_sob->sob_id);
1897 
1898 		/* Check if sob_val got out of sync due to other
1899 		 * signal submission requests which were handled
1900 		 * between the reserve-unreserve calls or SOB switch
1901 		 * upon reaching SOB max value.
1902 		 */
1903 		if (encaps_sig_hdl->pre_sob_val + encaps_sig_hdl->count
1904 				!= prop->next_sob_val ||
1905 				sob_addr != encaps_sig_hdl->hw_sob->sob_addr) {
1906 			dev_err(hdev->dev, "Cannot unreserve signals, SOB val ran out of sync, expected: %u, actual val: %u\n",
1907 				encaps_sig_hdl->pre_sob_val,
1908 				(prop->next_sob_val - encaps_sig_hdl->count));
1909 
1910 			hdev->asic_funcs->hw_queues_unlock(hdev);
1911 			rc = -EINVAL;
1912 			goto out;
1913 		}
1914 
1915 		/*
1916 		 * Decrement the SOB value by count by user request
1917 		 * to unreserve those signals
1918 		 */
1919 		prop->next_sob_val -= encaps_sig_hdl->count;
1920 
1921 		hdev->asic_funcs->hw_queues_unlock(hdev);
1922 
1923 		hw_sob_put(hw_sob);
1924 
1925 		/* Release the id and free allocated memory of the handle */
1926 		idr_remove(&mgr->handles, handle_id);
1927 		kfree(encaps_sig_hdl);
1928 	} else {
1929 		rc = -EINVAL;
1930 		dev_err(hdev->dev, "failed to unreserve signals, cannot find handler\n");
1931 	}
1932 out:
1933 	spin_unlock(&mgr->lock);
1934 
1935 	return rc;
1936 }
1937 
cs_ioctl_signal_wait(struct hl_fpriv * hpriv,enum hl_cs_type cs_type,void __user * chunks,u32 num_chunks,u64 * cs_seq,u32 flags,u32 timeout)1938 static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
1939 				void __user *chunks, u32 num_chunks,
1940 				u64 *cs_seq, u32 flags, u32 timeout)
1941 {
1942 	struct hl_cs_encaps_sig_handle *encaps_sig_hdl = NULL;
1943 	bool handle_found = false, is_wait_cs = false,
1944 			wait_cs_submitted = false,
1945 			cs_encaps_signals = false;
1946 	struct hl_cs_chunk *cs_chunk_array, *chunk;
1947 	bool staged_cs_with_encaps_signals = false;
1948 	struct hw_queue_properties *hw_queue_prop;
1949 	struct hl_device *hdev = hpriv->hdev;
1950 	struct hl_cs_compl *sig_waitcs_cmpl;
1951 	u32 q_idx, collective_engine_id = 0;
1952 	struct hl_cs_counters_atomic *cntr;
1953 	struct hl_fence *sig_fence = NULL;
1954 	struct hl_ctx *ctx = hpriv->ctx;
1955 	enum hl_queue_type q_type;
1956 	struct hl_cs *cs;
1957 	u64 signal_seq;
1958 	int rc;
1959 
1960 	cntr = &hdev->aggregated_cs_counters;
1961 	*cs_seq = ULLONG_MAX;
1962 
1963 	rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks,
1964 			ctx);
1965 	if (rc)
1966 		goto out;
1967 
1968 	/* currently it is guaranteed to have only one chunk */
1969 	chunk = &cs_chunk_array[0];
1970 
1971 	if (chunk->queue_index >= hdev->asic_prop.max_queues) {
1972 		atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1973 		atomic64_inc(&cntr->validation_drop_cnt);
1974 		dev_err(hdev->dev, "Queue index %d is invalid\n",
1975 			chunk->queue_index);
1976 		rc = -EINVAL;
1977 		goto free_cs_chunk_array;
1978 	}
1979 
1980 	q_idx = chunk->queue_index;
1981 	hw_queue_prop = &hdev->asic_prop.hw_queues_props[q_idx];
1982 	q_type = hw_queue_prop->type;
1983 
1984 	if (!hw_queue_prop->supports_sync_stream) {
1985 		atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1986 		atomic64_inc(&cntr->validation_drop_cnt);
1987 		dev_err(hdev->dev,
1988 			"Queue index %d does not support sync stream operations\n",
1989 			q_idx);
1990 		rc = -EINVAL;
1991 		goto free_cs_chunk_array;
1992 	}
1993 
1994 	if (cs_type == CS_TYPE_COLLECTIVE_WAIT) {
1995 		if (!(hw_queue_prop->collective_mode == HL_COLLECTIVE_MASTER)) {
1996 			atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1997 			atomic64_inc(&cntr->validation_drop_cnt);
1998 			dev_err(hdev->dev,
1999 				"Queue index %d is invalid\n", q_idx);
2000 			rc = -EINVAL;
2001 			goto free_cs_chunk_array;
2002 		}
2003 
2004 		if (!hdev->nic_ports_mask) {
2005 			atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
2006 			atomic64_inc(&cntr->validation_drop_cnt);
2007 			dev_err(hdev->dev,
2008 				"Collective operations not supported when NIC ports are disabled");
2009 			rc = -EINVAL;
2010 			goto free_cs_chunk_array;
2011 		}
2012 
2013 		collective_engine_id = chunk->collective_engine_id;
2014 	}
2015 
2016 	is_wait_cs = !!(cs_type == CS_TYPE_WAIT ||
2017 			cs_type == CS_TYPE_COLLECTIVE_WAIT);
2018 
2019 	cs_encaps_signals = !!(flags & HL_CS_FLAGS_ENCAP_SIGNALS);
2020 
2021 	if (is_wait_cs) {
2022 		rc = cs_ioctl_extract_signal_seq(hdev, chunk, &signal_seq,
2023 				ctx, cs_encaps_signals);
2024 		if (rc)
2025 			goto free_cs_chunk_array;
2026 
2027 		if (cs_encaps_signals) {
2028 			/* check if cs sequence has encapsulated
2029 			 * signals handle
2030 			 */
2031 			struct idr *idp;
2032 			u32 id;
2033 
2034 			spin_lock(&ctx->sig_mgr.lock);
2035 			idp = &ctx->sig_mgr.handles;
2036 			idr_for_each_entry(idp, encaps_sig_hdl, id) {
2037 				if (encaps_sig_hdl->cs_seq == signal_seq) {
2038 					handle_found = true;
2039 					/* get refcount to protect removing
2040 					 * this handle from idr, needed when
2041 					 * multiple wait cs are used with offset
2042 					 * to wait on reserved encaps signals.
2043 					 */
2044 					kref_get(&encaps_sig_hdl->refcount);
2045 					break;
2046 				}
2047 			}
2048 			spin_unlock(&ctx->sig_mgr.lock);
2049 
2050 			if (!handle_found) {
2051 				/* treat as signal CS already finished */
2052 				dev_dbg(hdev->dev, "Cannot find encapsulated signals handle for seq 0x%llx\n",
2053 						signal_seq);
2054 				rc = 0;
2055 				goto free_cs_chunk_array;
2056 			}
2057 
2058 			/* validate also the signal offset value */
2059 			if (chunk->encaps_signal_offset >
2060 					encaps_sig_hdl->count) {
2061 				dev_err(hdev->dev, "offset(%u) value exceed max reserved signals count(%u)!\n",
2062 						chunk->encaps_signal_offset,
2063 						encaps_sig_hdl->count);
2064 				rc = -EINVAL;
2065 				goto free_cs_chunk_array;
2066 			}
2067 		}
2068 
2069 		sig_fence = hl_ctx_get_fence(ctx, signal_seq);
2070 		if (IS_ERR(sig_fence)) {
2071 			atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
2072 			atomic64_inc(&cntr->validation_drop_cnt);
2073 			dev_err(hdev->dev,
2074 				"Failed to get signal CS with seq 0x%llx\n",
2075 				signal_seq);
2076 			rc = PTR_ERR(sig_fence);
2077 			goto free_cs_chunk_array;
2078 		}
2079 
2080 		if (!sig_fence) {
2081 			/* signal CS already finished */
2082 			rc = 0;
2083 			goto free_cs_chunk_array;
2084 		}
2085 
2086 		sig_waitcs_cmpl =
2087 			container_of(sig_fence, struct hl_cs_compl, base_fence);
2088 
2089 		staged_cs_with_encaps_signals = !!
2090 				(sig_waitcs_cmpl->type == CS_TYPE_DEFAULT &&
2091 				(flags & HL_CS_FLAGS_ENCAP_SIGNALS));
2092 
2093 		if (sig_waitcs_cmpl->type != CS_TYPE_SIGNAL &&
2094 				!staged_cs_with_encaps_signals) {
2095 			atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
2096 			atomic64_inc(&cntr->validation_drop_cnt);
2097 			dev_err(hdev->dev,
2098 				"CS seq 0x%llx is not of a signal/encaps-signal CS\n",
2099 				signal_seq);
2100 			hl_fence_put(sig_fence);
2101 			rc = -EINVAL;
2102 			goto free_cs_chunk_array;
2103 		}
2104 
2105 		if (completion_done(&sig_fence->completion)) {
2106 			/* signal CS already finished */
2107 			hl_fence_put(sig_fence);
2108 			rc = 0;
2109 			goto free_cs_chunk_array;
2110 		}
2111 	}
2112 
2113 	rc = allocate_cs(hdev, ctx, cs_type, ULLONG_MAX, &cs, flags, timeout);
2114 	if (rc) {
2115 		if (is_wait_cs)
2116 			hl_fence_put(sig_fence);
2117 
2118 		goto free_cs_chunk_array;
2119 	}
2120 
2121 	/*
2122 	 * Save the signal CS fence for later initialization right before
2123 	 * hanging the wait CS on the queue.
2124 	 * for encaps signals case, we save the cs sequence and handle pointer
2125 	 * for later initialization.
2126 	 */
2127 	if (is_wait_cs) {
2128 		cs->signal_fence = sig_fence;
2129 		/* store the handle pointer, so we don't have to
2130 		 * look for it again, later on the flow
2131 		 * when we need to set SOB info in hw_queue.
2132 		 */
2133 		if (cs->encaps_signals)
2134 			cs->encaps_sig_hdl = encaps_sig_hdl;
2135 	}
2136 
2137 	hl_debugfs_add_cs(cs);
2138 
2139 	*cs_seq = cs->sequence;
2140 
2141 	if (cs_type == CS_TYPE_WAIT || cs_type == CS_TYPE_SIGNAL)
2142 		rc = cs_ioctl_signal_wait_create_jobs(hdev, ctx, cs, q_type,
2143 				q_idx, chunk->encaps_signal_offset);
2144 	else if (cs_type == CS_TYPE_COLLECTIVE_WAIT)
2145 		rc = hdev->asic_funcs->collective_wait_create_jobs(hdev, ctx,
2146 				cs, q_idx, collective_engine_id,
2147 				chunk->encaps_signal_offset);
2148 	else {
2149 		atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
2150 		atomic64_inc(&cntr->validation_drop_cnt);
2151 		rc = -EINVAL;
2152 	}
2153 
2154 	if (rc)
2155 		goto free_cs_object;
2156 
2157 	rc = hl_hw_queue_schedule_cs(cs);
2158 	if (rc) {
2159 		/* In case wait cs failed here, it means the signal cs
2160 		 * already completed. we want to free all it's related objects
2161 		 * but we don't want to fail the ioctl.
2162 		 */
2163 		if (is_wait_cs)
2164 			rc = 0;
2165 		else if (rc != -EAGAIN)
2166 			dev_err(hdev->dev,
2167 				"Failed to submit CS %d.%llu to H/W queues, error %d\n",
2168 				ctx->asid, cs->sequence, rc);
2169 		goto free_cs_object;
2170 	}
2171 
2172 	rc = HL_CS_STATUS_SUCCESS;
2173 	if (is_wait_cs)
2174 		wait_cs_submitted = true;
2175 	goto put_cs;
2176 
2177 free_cs_object:
2178 	cs_rollback(hdev, cs);
2179 	*cs_seq = ULLONG_MAX;
2180 	/* The path below is both for good and erroneous exits */
2181 put_cs:
2182 	/* We finished with the CS in this function, so put the ref */
2183 	cs_put(cs);
2184 free_cs_chunk_array:
2185 	if (!wait_cs_submitted && cs_encaps_signals && handle_found &&
2186 							is_wait_cs)
2187 		kref_put(&encaps_sig_hdl->refcount,
2188 				hl_encaps_handle_do_release);
2189 	kfree(cs_chunk_array);
2190 out:
2191 	return rc;
2192 }
2193 
hl_cs_ioctl(struct hl_fpriv * hpriv,void * data)2194 int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
2195 {
2196 	union hl_cs_args *args = data;
2197 	enum hl_cs_type cs_type = 0;
2198 	u64 cs_seq = ULONG_MAX;
2199 	void __user *chunks;
2200 	u32 num_chunks, flags, timeout,
2201 		signals_count = 0, sob_addr = 0, handle_id = 0;
2202 	int rc;
2203 
2204 	rc = hl_cs_sanity_checks(hpriv, args);
2205 	if (rc)
2206 		goto out;
2207 
2208 	rc = hl_cs_ctx_switch(hpriv, args, &cs_seq);
2209 	if (rc)
2210 		goto out;
2211 
2212 	cs_type = hl_cs_get_cs_type(args->in.cs_flags &
2213 					~HL_CS_FLAGS_FORCE_RESTORE);
2214 	chunks = (void __user *) (uintptr_t) args->in.chunks_execute;
2215 	num_chunks = args->in.num_chunks_execute;
2216 	flags = args->in.cs_flags;
2217 
2218 	/* In case this is a staged CS, user should supply the CS sequence */
2219 	if ((flags & HL_CS_FLAGS_STAGED_SUBMISSION) &&
2220 			!(flags & HL_CS_FLAGS_STAGED_SUBMISSION_FIRST))
2221 		cs_seq = args->in.seq;
2222 
2223 	timeout = flags & HL_CS_FLAGS_CUSTOM_TIMEOUT
2224 			? msecs_to_jiffies(args->in.timeout * 1000)
2225 			: hpriv->hdev->timeout_jiffies;
2226 
2227 	switch (cs_type) {
2228 	case CS_TYPE_SIGNAL:
2229 	case CS_TYPE_WAIT:
2230 	case CS_TYPE_COLLECTIVE_WAIT:
2231 		rc = cs_ioctl_signal_wait(hpriv, cs_type, chunks, num_chunks,
2232 					&cs_seq, args->in.cs_flags, timeout);
2233 		break;
2234 	case CS_RESERVE_SIGNALS:
2235 		rc = cs_ioctl_reserve_signals(hpriv,
2236 					args->in.encaps_signals_q_idx,
2237 					args->in.encaps_signals_count,
2238 					&handle_id, &sob_addr, &signals_count);
2239 		break;
2240 	case CS_UNRESERVE_SIGNALS:
2241 		rc = cs_ioctl_unreserve_signals(hpriv,
2242 					args->in.encaps_sig_handle_id);
2243 		break;
2244 	default:
2245 		rc = cs_ioctl_default(hpriv, chunks, num_chunks, &cs_seq,
2246 						args->in.cs_flags,
2247 						args->in.encaps_sig_handle_id,
2248 						timeout);
2249 		break;
2250 	}
2251 out:
2252 	if (rc != -EAGAIN) {
2253 		memset(args, 0, sizeof(*args));
2254 
2255 		if (cs_type == CS_RESERVE_SIGNALS) {
2256 			args->out.handle_id = handle_id;
2257 			args->out.sob_base_addr_offset = sob_addr;
2258 			args->out.count = signals_count;
2259 		} else {
2260 			args->out.seq = cs_seq;
2261 		}
2262 		args->out.status = rc;
2263 	}
2264 
2265 	return rc;
2266 }
2267 
hl_wait_for_fence(struct hl_ctx * ctx,u64 seq,struct hl_fence * fence,enum hl_cs_wait_status * status,u64 timeout_us,s64 * timestamp)2268 static int hl_wait_for_fence(struct hl_ctx *ctx, u64 seq, struct hl_fence *fence,
2269 				enum hl_cs_wait_status *status, u64 timeout_us,
2270 				s64 *timestamp)
2271 {
2272 	struct hl_device *hdev = ctx->hdev;
2273 	long completion_rc;
2274 	int rc = 0;
2275 
2276 	if (IS_ERR(fence)) {
2277 		rc = PTR_ERR(fence);
2278 		if (rc == -EINVAL)
2279 			dev_notice_ratelimited(hdev->dev,
2280 				"Can't wait on CS %llu because current CS is at seq %llu\n",
2281 				seq, ctx->cs_sequence);
2282 		return rc;
2283 	}
2284 
2285 	if (!fence) {
2286 		dev_dbg(hdev->dev,
2287 			"Can't wait on seq %llu because current CS is at seq %llu (Fence is gone)\n",
2288 				seq, ctx->cs_sequence);
2289 
2290 		*status = CS_WAIT_STATUS_GONE;
2291 		return 0;
2292 	}
2293 
2294 	if (!timeout_us) {
2295 		completion_rc = completion_done(&fence->completion);
2296 	} else {
2297 		unsigned long timeout;
2298 
2299 		timeout = (timeout_us == MAX_SCHEDULE_TIMEOUT) ?
2300 				timeout_us : usecs_to_jiffies(timeout_us);
2301 		completion_rc =
2302 			wait_for_completion_interruptible_timeout(
2303 				&fence->completion, timeout);
2304 	}
2305 
2306 	if (completion_rc > 0) {
2307 		*status = CS_WAIT_STATUS_COMPLETED;
2308 		if (timestamp)
2309 			*timestamp = ktime_to_ns(fence->timestamp);
2310 	} else {
2311 		*status = CS_WAIT_STATUS_BUSY;
2312 	}
2313 
2314 	if (fence->error == -ETIMEDOUT)
2315 		rc = -ETIMEDOUT;
2316 	else if (fence->error == -EIO)
2317 		rc = -EIO;
2318 
2319 	return rc;
2320 }
2321 
2322 /*
2323  * hl_cs_poll_fences - iterate CS fences to check for CS completion
2324  *
2325  * @mcs_data: multi-CS internal data
2326  *
2327  * @return 0 on success, otherwise non 0 error code
2328  *
2329  * The function iterates on all CS sequence in the list and set bit in
2330  * completion_bitmap for each completed CS.
2331  * while iterating, the function can extracts the stream map to be later
2332  * used by the waiting function.
2333  * this function shall be called after taking context ref
2334  */
hl_cs_poll_fences(struct multi_cs_data * mcs_data)2335 static int hl_cs_poll_fences(struct multi_cs_data *mcs_data)
2336 {
2337 	struct hl_fence **fence_ptr = mcs_data->fence_arr;
2338 	struct hl_device *hdev = mcs_data->ctx->hdev;
2339 	int i, rc, arr_len = mcs_data->arr_len;
2340 	u64 *seq_arr = mcs_data->seq_arr;
2341 	ktime_t max_ktime, first_cs_time;
2342 	enum hl_cs_wait_status status;
2343 
2344 	memset(fence_ptr, 0, arr_len * sizeof(*fence_ptr));
2345 
2346 	/* get all fences under the same lock */
2347 	rc = hl_ctx_get_fences(mcs_data->ctx, seq_arr, fence_ptr, arr_len);
2348 	if (rc)
2349 		return rc;
2350 
2351 	/*
2352 	 * set to maximum time to verify timestamp is valid: if at the end
2353 	 * this value is maintained- no timestamp was updated
2354 	 */
2355 	max_ktime = ktime_set(KTIME_SEC_MAX, 0);
2356 	first_cs_time = max_ktime;
2357 
2358 	for (i = 0; i < arr_len; i++, fence_ptr++) {
2359 		struct hl_fence *fence = *fence_ptr;
2360 
2361 		/*
2362 		 * function won't sleep as it is called with timeout 0 (i.e.
2363 		 * poll the fence)
2364 		 */
2365 		rc = hl_wait_for_fence(mcs_data->ctx, seq_arr[i], fence,
2366 						&status, 0, NULL);
2367 		if (rc) {
2368 			dev_err(hdev->dev,
2369 				"wait_for_fence error :%d for CS seq %llu\n",
2370 								rc, seq_arr[i]);
2371 			break;
2372 		}
2373 
2374 		mcs_data->stream_master_qid_map |= fence->stream_master_qid_map;
2375 
2376 		if (status == CS_WAIT_STATUS_BUSY)
2377 			continue;
2378 
2379 		mcs_data->completion_bitmap |= BIT(i);
2380 
2381 		/*
2382 		 * best effort to extract timestamp. few notes:
2383 		 * - if even single fence is gone we cannot extract timestamp
2384 		 *   (as fence not exist anymore)
2385 		 * - for all completed CSs we take the earliest timestamp.
2386 		 *   for this we have to validate that:
2387 		 *       1. given timestamp was indeed set
2388 		 *       2. the timestamp is earliest of all timestamps so far
2389 		 */
2390 
2391 		if (status == CS_WAIT_STATUS_GONE) {
2392 			mcs_data->update_ts = false;
2393 			mcs_data->gone_cs = true;
2394 		} else if (mcs_data->update_ts &&
2395 			(ktime_compare(fence->timestamp,
2396 						ktime_set(0, 0)) > 0) &&
2397 			(ktime_compare(fence->timestamp, first_cs_time) < 0)) {
2398 			first_cs_time = fence->timestamp;
2399 		}
2400 	}
2401 
2402 	hl_fences_put(mcs_data->fence_arr, arr_len);
2403 
2404 	if (mcs_data->update_ts &&
2405 			(ktime_compare(first_cs_time, max_ktime) != 0))
2406 		mcs_data->timestamp = ktime_to_ns(first_cs_time);
2407 
2408 	return rc;
2409 }
2410 
_hl_cs_wait_ioctl(struct hl_device * hdev,struct hl_ctx * ctx,u64 timeout_us,u64 seq,enum hl_cs_wait_status * status,s64 * timestamp)2411 static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
2412 				u64 timeout_us, u64 seq,
2413 				enum hl_cs_wait_status *status, s64 *timestamp)
2414 {
2415 	struct hl_fence *fence;
2416 	int rc = 0;
2417 
2418 	if (timestamp)
2419 		*timestamp = 0;
2420 
2421 	hl_ctx_get(hdev, ctx);
2422 
2423 	fence = hl_ctx_get_fence(ctx, seq);
2424 
2425 	rc = hl_wait_for_fence(ctx, seq, fence, status, timeout_us, timestamp);
2426 	hl_fence_put(fence);
2427 	hl_ctx_put(ctx);
2428 
2429 	return rc;
2430 }
2431 
2432 /*
2433  * hl_wait_multi_cs_completion_init - init completion structure
2434  *
2435  * @hdev: pointer to habanalabs device structure
2436  * @stream_master_bitmap: stream master QIDs map, set bit indicates stream
2437  *                        master QID to wait on
2438  *
2439  * @return valid completion struct pointer on success, otherwise error pointer
2440  *
2441  * up to MULTI_CS_MAX_USER_CTX calls can be done concurrently to the driver.
2442  * the function gets the first available completion (by marking it "used")
2443  * and initialize its values.
2444  */
hl_wait_multi_cs_completion_init(struct hl_device * hdev,u8 stream_master_bitmap)2445 static struct multi_cs_completion *hl_wait_multi_cs_completion_init(
2446 							struct hl_device *hdev,
2447 							u8 stream_master_bitmap)
2448 {
2449 	struct multi_cs_completion *mcs_compl;
2450 	int i;
2451 
2452 	/* find free multi_cs completion structure */
2453 	for (i = 0; i < MULTI_CS_MAX_USER_CTX; i++) {
2454 		mcs_compl = &hdev->multi_cs_completion[i];
2455 		spin_lock(&mcs_compl->lock);
2456 		if (!mcs_compl->used) {
2457 			mcs_compl->used = 1;
2458 			mcs_compl->timestamp = 0;
2459 			mcs_compl->stream_master_qid_map = stream_master_bitmap;
2460 			reinit_completion(&mcs_compl->completion);
2461 			spin_unlock(&mcs_compl->lock);
2462 			break;
2463 		}
2464 		spin_unlock(&mcs_compl->lock);
2465 	}
2466 
2467 	if (i == MULTI_CS_MAX_USER_CTX) {
2468 		dev_err(hdev->dev,
2469 				"no available multi-CS completion structure\n");
2470 		return ERR_PTR(-ENOMEM);
2471 	}
2472 	return mcs_compl;
2473 }
2474 
2475 /*
2476  * hl_wait_multi_cs_completion_fini - return completion structure and set as
2477  *                                    unused
2478  *
2479  * @mcs_compl: pointer to the completion structure
2480  */
hl_wait_multi_cs_completion_fini(struct multi_cs_completion * mcs_compl)2481 static void hl_wait_multi_cs_completion_fini(
2482 					struct multi_cs_completion *mcs_compl)
2483 {
2484 	/*
2485 	 * free completion structure, do it under lock to be in-sync with the
2486 	 * thread that signals completion
2487 	 */
2488 	spin_lock(&mcs_compl->lock);
2489 	mcs_compl->used = 0;
2490 	spin_unlock(&mcs_compl->lock);
2491 }
2492 
2493 /*
2494  * hl_wait_multi_cs_completion - wait for first CS to complete
2495  *
2496  * @mcs_data: multi-CS internal data
2497  *
2498  * @return 0 on success, otherwise non 0 error code
2499  */
hl_wait_multi_cs_completion(struct multi_cs_data * mcs_data)2500 static int hl_wait_multi_cs_completion(struct multi_cs_data *mcs_data)
2501 {
2502 	struct hl_device *hdev = mcs_data->ctx->hdev;
2503 	struct multi_cs_completion *mcs_compl;
2504 	long completion_rc;
2505 
2506 	mcs_compl = hl_wait_multi_cs_completion_init(hdev,
2507 					mcs_data->stream_master_qid_map);
2508 	if (IS_ERR(mcs_compl))
2509 		return PTR_ERR(mcs_compl);
2510 
2511 	completion_rc = wait_for_completion_interruptible_timeout(
2512 					&mcs_compl->completion,
2513 					usecs_to_jiffies(mcs_data->timeout_us));
2514 
2515 	/* update timestamp */
2516 	if (completion_rc > 0)
2517 		mcs_data->timestamp = mcs_compl->timestamp;
2518 
2519 	hl_wait_multi_cs_completion_fini(mcs_compl);
2520 
2521 	mcs_data->wait_status = completion_rc;
2522 
2523 	return 0;
2524 }
2525 
2526 /*
2527  * hl_multi_cs_completion_init - init array of multi-CS completion structures
2528  *
2529  * @hdev: pointer to habanalabs device structure
2530  */
hl_multi_cs_completion_init(struct hl_device * hdev)2531 void hl_multi_cs_completion_init(struct hl_device *hdev)
2532 {
2533 	struct multi_cs_completion *mcs_cmpl;
2534 	int i;
2535 
2536 	for (i = 0; i < MULTI_CS_MAX_USER_CTX; i++) {
2537 		mcs_cmpl = &hdev->multi_cs_completion[i];
2538 		mcs_cmpl->used = 0;
2539 		spin_lock_init(&mcs_cmpl->lock);
2540 		init_completion(&mcs_cmpl->completion);
2541 	}
2542 }
2543 
2544 /*
2545  * hl_multi_cs_wait_ioctl - implementation of the multi-CS wait ioctl
2546  *
2547  * @hpriv: pointer to the private data of the fd
2548  * @data: pointer to multi-CS wait ioctl in/out args
2549  *
2550  */
hl_multi_cs_wait_ioctl(struct hl_fpriv * hpriv,void * data)2551 static int hl_multi_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
2552 {
2553 	struct hl_device *hdev = hpriv->hdev;
2554 	struct multi_cs_data mcs_data = {0};
2555 	union hl_wait_cs_args *args = data;
2556 	struct hl_ctx *ctx = hpriv->ctx;
2557 	struct hl_fence **fence_arr;
2558 	void __user *seq_arr;
2559 	u32 size_to_copy;
2560 	u64 *cs_seq_arr;
2561 	u8 seq_arr_len;
2562 	int rc;
2563 
2564 	if (!hdev->supports_wait_for_multi_cs) {
2565 		dev_err(hdev->dev, "Wait for multi CS is not supported\n");
2566 		return -EPERM;
2567 	}
2568 
2569 	seq_arr_len = args->in.seq_arr_len;
2570 
2571 	if (seq_arr_len > HL_WAIT_MULTI_CS_LIST_MAX_LEN) {
2572 		dev_err(hdev->dev, "Can wait only up to %d CSs, input sequence is of length %u\n",
2573 				HL_WAIT_MULTI_CS_LIST_MAX_LEN, seq_arr_len);
2574 		return -EINVAL;
2575 	}
2576 
2577 	/* allocate memory for sequence array */
2578 	cs_seq_arr =
2579 		kmalloc_array(seq_arr_len, sizeof(*cs_seq_arr), GFP_KERNEL);
2580 	if (!cs_seq_arr)
2581 		return -ENOMEM;
2582 
2583 	/* copy CS sequence array from user */
2584 	seq_arr = (void __user *) (uintptr_t) args->in.seq;
2585 	size_to_copy = seq_arr_len * sizeof(*cs_seq_arr);
2586 	if (copy_from_user(cs_seq_arr, seq_arr, size_to_copy)) {
2587 		dev_err(hdev->dev, "Failed to copy multi-cs sequence array from user\n");
2588 		rc = -EFAULT;
2589 		goto free_seq_arr;
2590 	}
2591 
2592 	/* allocate array for the fences */
2593 	fence_arr = kmalloc_array(seq_arr_len, sizeof(*fence_arr), GFP_KERNEL);
2594 	if (!fence_arr) {
2595 		rc = -ENOMEM;
2596 		goto free_seq_arr;
2597 	}
2598 
2599 	/* initialize the multi-CS internal data */
2600 	mcs_data.ctx = ctx;
2601 	mcs_data.seq_arr = cs_seq_arr;
2602 	mcs_data.fence_arr = fence_arr;
2603 	mcs_data.arr_len = seq_arr_len;
2604 
2605 	hl_ctx_get(hdev, ctx);
2606 
2607 	/* poll all CS fences, extract timestamp */
2608 	mcs_data.update_ts = true;
2609 	rc = hl_cs_poll_fences(&mcs_data);
2610 	/*
2611 	 * skip wait for CS completion when one of the below is true:
2612 	 * - an error on the poll function
2613 	 * - one or more CS in the list completed
2614 	 * - the user called ioctl with timeout 0
2615 	 */
2616 	if (rc || mcs_data.completion_bitmap || !args->in.timeout_us)
2617 		goto put_ctx;
2618 
2619 	/* wait (with timeout) for the first CS to be completed */
2620 	mcs_data.timeout_us = args->in.timeout_us;
2621 	rc = hl_wait_multi_cs_completion(&mcs_data);
2622 	if (rc)
2623 		goto put_ctx;
2624 
2625 	if (mcs_data.wait_status > 0) {
2626 		/*
2627 		 * poll fences once again to update the CS map.
2628 		 * no timestamp should be updated this time.
2629 		 */
2630 		mcs_data.update_ts = false;
2631 		rc = hl_cs_poll_fences(&mcs_data);
2632 
2633 		/*
2634 		 * if hl_wait_multi_cs_completion returned before timeout (i.e.
2635 		 * it got a completion) we expect to see at least one CS
2636 		 * completed after the poll function.
2637 		 */
2638 		if (!mcs_data.completion_bitmap) {
2639 			dev_warn_ratelimited(hdev->dev,
2640 				"Multi-CS got completion on wait but no CS completed\n");
2641 			rc = -EFAULT;
2642 		}
2643 	}
2644 
2645 put_ctx:
2646 	hl_ctx_put(ctx);
2647 	kfree(fence_arr);
2648 
2649 free_seq_arr:
2650 	kfree(cs_seq_arr);
2651 
2652 	if (rc)
2653 		return rc;
2654 
2655 	if (mcs_data.wait_status == -ERESTARTSYS) {
2656 		dev_err_ratelimited(hdev->dev,
2657 				"user process got signal while waiting for Multi-CS\n");
2658 		return -EINTR;
2659 	}
2660 
2661 	/* update output args */
2662 	memset(args, 0, sizeof(*args));
2663 
2664 	if (mcs_data.completion_bitmap) {
2665 		args->out.status = HL_WAIT_CS_STATUS_COMPLETED;
2666 		args->out.cs_completion_map = mcs_data.completion_bitmap;
2667 
2668 		/* if timestamp not 0- it's valid */
2669 		if (mcs_data.timestamp) {
2670 			args->out.timestamp_nsec = mcs_data.timestamp;
2671 			args->out.flags |= HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD;
2672 		}
2673 
2674 		/* update if some CS was gone */
2675 		if (mcs_data.timestamp)
2676 			args->out.flags |= HL_WAIT_CS_STATUS_FLAG_GONE;
2677 	} else {
2678 		args->out.status = HL_WAIT_CS_STATUS_BUSY;
2679 	}
2680 
2681 	return 0;
2682 }
2683 
hl_cs_wait_ioctl(struct hl_fpriv * hpriv,void * data)2684 static int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
2685 {
2686 	struct hl_device *hdev = hpriv->hdev;
2687 	union hl_wait_cs_args *args = data;
2688 	enum hl_cs_wait_status status;
2689 	u64 seq = args->in.seq;
2690 	s64 timestamp;
2691 	int rc;
2692 
2693 	rc = _hl_cs_wait_ioctl(hdev, hpriv->ctx, args->in.timeout_us, seq,
2694 				&status, &timestamp);
2695 
2696 	if (rc == -ERESTARTSYS) {
2697 		dev_err_ratelimited(hdev->dev,
2698 			"user process got signal while waiting for CS handle %llu\n",
2699 			seq);
2700 		return -EINTR;
2701 	}
2702 
2703 	memset(args, 0, sizeof(*args));
2704 
2705 	if (rc) {
2706 		if (rc == -ETIMEDOUT) {
2707 			dev_err_ratelimited(hdev->dev,
2708 				"CS %llu has timed-out while user process is waiting for it\n",
2709 				seq);
2710 			args->out.status = HL_WAIT_CS_STATUS_TIMEDOUT;
2711 		} else if (rc == -EIO) {
2712 			dev_err_ratelimited(hdev->dev,
2713 				"CS %llu has been aborted while user process is waiting for it\n",
2714 				seq);
2715 			args->out.status = HL_WAIT_CS_STATUS_ABORTED;
2716 		}
2717 		return rc;
2718 	}
2719 
2720 	if (timestamp) {
2721 		args->out.flags |= HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD;
2722 		args->out.timestamp_nsec = timestamp;
2723 	}
2724 
2725 	switch (status) {
2726 	case CS_WAIT_STATUS_GONE:
2727 		args->out.flags |= HL_WAIT_CS_STATUS_FLAG_GONE;
2728 		fallthrough;
2729 	case CS_WAIT_STATUS_COMPLETED:
2730 		args->out.status = HL_WAIT_CS_STATUS_COMPLETED;
2731 		break;
2732 	case CS_WAIT_STATUS_BUSY:
2733 	default:
2734 		args->out.status = HL_WAIT_CS_STATUS_BUSY;
2735 		break;
2736 	}
2737 
2738 	return 0;
2739 }
2740 
_hl_interrupt_wait_ioctl(struct hl_device * hdev,struct hl_ctx * ctx,u32 timeout_us,u64 user_address,u32 target_value,u16 interrupt_offset,enum hl_cs_wait_status * status)2741 static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
2742 				u32 timeout_us, u64 user_address,
2743 				u32 target_value, u16 interrupt_offset,
2744 				enum hl_cs_wait_status *status)
2745 {
2746 	struct hl_user_pending_interrupt *pend;
2747 	struct hl_user_interrupt *interrupt;
2748 	unsigned long timeout, flags;
2749 	u32 completion_value;
2750 	long completion_rc;
2751 	int rc = 0;
2752 
2753 	if (timeout_us == U32_MAX)
2754 		timeout = timeout_us;
2755 	else
2756 		timeout = usecs_to_jiffies(timeout_us);
2757 
2758 	hl_ctx_get(hdev, ctx);
2759 
2760 	pend = kmalloc(sizeof(*pend), GFP_KERNEL);
2761 	if (!pend) {
2762 		hl_ctx_put(ctx);
2763 		return -ENOMEM;
2764 	}
2765 
2766 	hl_fence_init(&pend->fence, ULONG_MAX);
2767 
2768 	if (interrupt_offset == HL_COMMON_USER_INTERRUPT_ID)
2769 		interrupt = &hdev->common_user_interrupt;
2770 	else
2771 		interrupt = &hdev->user_interrupt[interrupt_offset];
2772 
2773 	/* Add pending user interrupt to relevant list for the interrupt
2774 	 * handler to monitor
2775 	 */
2776 	spin_lock_irqsave(&interrupt->wait_list_lock, flags);
2777 	list_add_tail(&pend->wait_list_node, &interrupt->wait_list_head);
2778 	spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
2779 
2780 	/* We check for completion value as interrupt could have been received
2781 	 * before we added the node to the wait list
2782 	 */
2783 	if (copy_from_user(&completion_value, u64_to_user_ptr(user_address), 4)) {
2784 		dev_err(hdev->dev, "Failed to copy completion value from user\n");
2785 		rc = -EFAULT;
2786 		goto remove_pending_user_interrupt;
2787 	}
2788 
2789 	if (completion_value >= target_value)
2790 		*status = CS_WAIT_STATUS_COMPLETED;
2791 	else
2792 		*status = CS_WAIT_STATUS_BUSY;
2793 
2794 	if (!timeout_us || (*status == CS_WAIT_STATUS_COMPLETED))
2795 		goto remove_pending_user_interrupt;
2796 
2797 wait_again:
2798 	/* Wait for interrupt handler to signal completion */
2799 	completion_rc = wait_for_completion_interruptible_timeout(&pend->fence.completion,
2800 										timeout);
2801 
2802 	/* If timeout did not expire we need to perform the comparison.
2803 	 * If comparison fails, keep waiting until timeout expires
2804 	 */
2805 	if (completion_rc > 0) {
2806 		spin_lock_irqsave(&interrupt->wait_list_lock, flags);
2807 		/* reinit_completion must be called before we check for user
2808 		 * completion value, otherwise, if interrupt is received after
2809 		 * the comparison and before the next wait_for_completion,
2810 		 * we will reach timeout and fail
2811 		 */
2812 		reinit_completion(&pend->fence.completion);
2813 		spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
2814 
2815 		if (copy_from_user(&completion_value, u64_to_user_ptr(user_address), 4)) {
2816 			dev_err(hdev->dev, "Failed to copy completion value from user\n");
2817 			rc = -EFAULT;
2818 
2819 			goto remove_pending_user_interrupt;
2820 		}
2821 
2822 		if (completion_value >= target_value) {
2823 			*status = CS_WAIT_STATUS_COMPLETED;
2824 		} else {
2825 			timeout = completion_rc;
2826 			goto wait_again;
2827 		}
2828 	} else if (completion_rc == -ERESTARTSYS) {
2829 		dev_err_ratelimited(hdev->dev,
2830 			"user process got signal while waiting for interrupt ID %d\n",
2831 			interrupt->interrupt_id);
2832 		rc = -EINTR;
2833 	} else {
2834 		*status = CS_WAIT_STATUS_BUSY;
2835 	}
2836 
2837 remove_pending_user_interrupt:
2838 	spin_lock_irqsave(&interrupt->wait_list_lock, flags);
2839 	list_del(&pend->wait_list_node);
2840 	spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
2841 
2842 	kfree(pend);
2843 	hl_ctx_put(ctx);
2844 
2845 	return rc;
2846 }
2847 
hl_interrupt_wait_ioctl(struct hl_fpriv * hpriv,void * data)2848 static int hl_interrupt_wait_ioctl(struct hl_fpriv *hpriv, void *data)
2849 {
2850 	u16 interrupt_id, interrupt_offset, first_interrupt, last_interrupt;
2851 	struct hl_device *hdev = hpriv->hdev;
2852 	struct asic_fixed_properties *prop;
2853 	union hl_wait_cs_args *args = data;
2854 	enum hl_cs_wait_status status;
2855 	int rc;
2856 
2857 	prop = &hdev->asic_prop;
2858 
2859 	if (!prop->user_interrupt_count) {
2860 		dev_err(hdev->dev, "no user interrupts allowed");
2861 		return -EPERM;
2862 	}
2863 
2864 	interrupt_id =
2865 		FIELD_GET(HL_WAIT_CS_FLAGS_INTERRUPT_MASK, args->in.flags);
2866 
2867 	first_interrupt = prop->first_available_user_msix_interrupt;
2868 	last_interrupt = prop->first_available_user_msix_interrupt +
2869 						prop->user_interrupt_count - 1;
2870 
2871 	if ((interrupt_id < first_interrupt || interrupt_id > last_interrupt) &&
2872 			interrupt_id != HL_COMMON_USER_INTERRUPT_ID) {
2873 		dev_err(hdev->dev, "invalid user interrupt %u", interrupt_id);
2874 		return -EINVAL;
2875 	}
2876 
2877 	if (interrupt_id == HL_COMMON_USER_INTERRUPT_ID)
2878 		interrupt_offset = HL_COMMON_USER_INTERRUPT_ID;
2879 	else
2880 		interrupt_offset = interrupt_id - first_interrupt;
2881 
2882 	rc = _hl_interrupt_wait_ioctl(hdev, hpriv->ctx,
2883 				args->in.interrupt_timeout_us, args->in.addr,
2884 				args->in.target, interrupt_offset, &status);
2885 
2886 	if (rc) {
2887 		if (rc != -EINTR)
2888 			dev_err_ratelimited(hdev->dev,
2889 				"interrupt_wait_ioctl failed (%d)\n", rc);
2890 
2891 		return rc;
2892 	}
2893 
2894 	memset(args, 0, sizeof(*args));
2895 
2896 	switch (status) {
2897 	case CS_WAIT_STATUS_COMPLETED:
2898 		args->out.status = HL_WAIT_CS_STATUS_COMPLETED;
2899 		break;
2900 	case CS_WAIT_STATUS_BUSY:
2901 	default:
2902 		args->out.status = HL_WAIT_CS_STATUS_BUSY;
2903 		break;
2904 	}
2905 
2906 	return 0;
2907 }
2908 
hl_wait_ioctl(struct hl_fpriv * hpriv,void * data)2909 int hl_wait_ioctl(struct hl_fpriv *hpriv, void *data)
2910 {
2911 	union hl_wait_cs_args *args = data;
2912 	u32 flags = args->in.flags;
2913 	int rc;
2914 
2915 	/* If the device is not operational, no point in waiting for any command submission or
2916 	 * user interrupt
2917 	 */
2918 	if (!hl_device_operational(hpriv->hdev, NULL))
2919 		return -EPERM;
2920 
2921 	if (flags & HL_WAIT_CS_FLAGS_INTERRUPT)
2922 		rc = hl_interrupt_wait_ioctl(hpriv, data);
2923 	else if (flags & HL_WAIT_CS_FLAGS_MULTI_CS)
2924 		rc = hl_multi_cs_wait_ioctl(hpriv, data);
2925 	else
2926 		rc = hl_cs_wait_ioctl(hpriv, data);
2927 
2928 	return rc;
2929 }
2930