• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 
3 /*
4  * Copyright 2016-2021 HabanaLabs, Ltd.
5  * All Rights Reserved.
6  */
7 
8 #include <uapi/drm/habanalabs_accel.h>
9 #include "habanalabs.h"
10 
11 #include <linux/uaccess.h>
12 #include <linux/slab.h>
13 
14 #define HL_CS_FLAGS_TYPE_MASK	(HL_CS_FLAGS_SIGNAL | HL_CS_FLAGS_WAIT | \
15 			HL_CS_FLAGS_COLLECTIVE_WAIT | HL_CS_FLAGS_RESERVE_SIGNALS_ONLY | \
16 			HL_CS_FLAGS_UNRESERVE_SIGNALS_ONLY | HL_CS_FLAGS_ENGINE_CORE_COMMAND | \
17 			HL_CS_FLAGS_ENGINES_COMMAND | HL_CS_FLAGS_FLUSH_PCI_HBW_WRITES)
18 
19 
20 #define MAX_TS_ITER_NUM 100
21 
22 /**
23  * enum hl_cs_wait_status - cs wait status
24  * @CS_WAIT_STATUS_BUSY: cs was not completed yet
25  * @CS_WAIT_STATUS_COMPLETED: cs completed
26  * @CS_WAIT_STATUS_GONE: cs completed but fence is already gone
27  */
28 enum hl_cs_wait_status {
29 	CS_WAIT_STATUS_BUSY,
30 	CS_WAIT_STATUS_COMPLETED,
31 	CS_WAIT_STATUS_GONE
32 };
33 
34 static void job_wq_completion(struct work_struct *work);
35 static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx, u64 timeout_us, u64 seq,
36 				enum hl_cs_wait_status *status, s64 *timestamp);
37 static void cs_do_release(struct kref *ref);
38 
hl_push_cs_outcome(struct hl_device * hdev,struct hl_cs_outcome_store * outcome_store,u64 seq,ktime_t ts,int error)39 static void hl_push_cs_outcome(struct hl_device *hdev,
40 			       struct hl_cs_outcome_store *outcome_store,
41 			       u64 seq, ktime_t ts, int error)
42 {
43 	struct hl_cs_outcome *node;
44 	unsigned long flags;
45 
46 	/*
47 	 * CS outcome store supports the following operations:
48 	 * push outcome - store a recent CS outcome in the store
49 	 * pop outcome - retrieve a SPECIFIC (by seq) CS outcome from the store
50 	 * It uses 2 lists: used list and free list.
51 	 * It has a pre-allocated amount of nodes, each node stores
52 	 * a single CS outcome.
53 	 * Initially, all the nodes are in the free list.
54 	 * On push outcome, a node (any) is taken from the free list, its
55 	 * information is filled in, and the node is moved to the used list.
56 	 * It is possible, that there are no nodes left in the free list.
57 	 * In this case, we will lose some information about old outcomes. We
58 	 * will pop the OLDEST node from the used list, and make it free.
59 	 * On pop, the node is searched for in the used list (using a search
60 	 * index).
61 	 * If found, the node is then removed from the used list, and moved
62 	 * back to the free list. The outcome data that the node contained is
63 	 * returned back to the user.
64 	 */
65 
66 	spin_lock_irqsave(&outcome_store->db_lock, flags);
67 
68 	if (list_empty(&outcome_store->free_list)) {
69 		node = list_last_entry(&outcome_store->used_list,
70 				       struct hl_cs_outcome, list_link);
71 		hash_del(&node->map_link);
72 		dev_dbg(hdev->dev, "CS %llu outcome was lost\n", node->seq);
73 	} else {
74 		node = list_last_entry(&outcome_store->free_list,
75 				       struct hl_cs_outcome, list_link);
76 	}
77 
78 	list_del_init(&node->list_link);
79 
80 	node->seq = seq;
81 	node->ts = ts;
82 	node->error = error;
83 
84 	list_add(&node->list_link, &outcome_store->used_list);
85 	hash_add(outcome_store->outcome_map, &node->map_link, node->seq);
86 
87 	spin_unlock_irqrestore(&outcome_store->db_lock, flags);
88 }
89 
hl_pop_cs_outcome(struct hl_cs_outcome_store * outcome_store,u64 seq,ktime_t * ts,int * error)90 static bool hl_pop_cs_outcome(struct hl_cs_outcome_store *outcome_store,
91 			       u64 seq, ktime_t *ts, int *error)
92 {
93 	struct hl_cs_outcome *node;
94 	unsigned long flags;
95 
96 	spin_lock_irqsave(&outcome_store->db_lock, flags);
97 
98 	hash_for_each_possible(outcome_store->outcome_map, node, map_link, seq)
99 		if (node->seq == seq) {
100 			*ts = node->ts;
101 			*error = node->error;
102 
103 			hash_del(&node->map_link);
104 			list_del_init(&node->list_link);
105 			list_add(&node->list_link, &outcome_store->free_list);
106 
107 			spin_unlock_irqrestore(&outcome_store->db_lock, flags);
108 
109 			return true;
110 		}
111 
112 	spin_unlock_irqrestore(&outcome_store->db_lock, flags);
113 
114 	return false;
115 }
116 
hl_sob_reset(struct kref * ref)117 static void hl_sob_reset(struct kref *ref)
118 {
119 	struct hl_hw_sob *hw_sob = container_of(ref, struct hl_hw_sob,
120 							kref);
121 	struct hl_device *hdev = hw_sob->hdev;
122 
123 	dev_dbg(hdev->dev, "reset sob id %u\n", hw_sob->sob_id);
124 
125 	hdev->asic_funcs->reset_sob(hdev, hw_sob);
126 
127 	hw_sob->need_reset = false;
128 }
129 
hl_sob_reset_error(struct kref * ref)130 void hl_sob_reset_error(struct kref *ref)
131 {
132 	struct hl_hw_sob *hw_sob = container_of(ref, struct hl_hw_sob,
133 							kref);
134 	struct hl_device *hdev = hw_sob->hdev;
135 
136 	dev_crit(hdev->dev,
137 		"SOB release shouldn't be called here, q_idx: %d, sob_id: %d\n",
138 		hw_sob->q_idx, hw_sob->sob_id);
139 }
140 
hw_sob_put(struct hl_hw_sob * hw_sob)141 void hw_sob_put(struct hl_hw_sob *hw_sob)
142 {
143 	if (hw_sob)
144 		kref_put(&hw_sob->kref, hl_sob_reset);
145 }
146 
hw_sob_put_err(struct hl_hw_sob * hw_sob)147 static void hw_sob_put_err(struct hl_hw_sob *hw_sob)
148 {
149 	if (hw_sob)
150 		kref_put(&hw_sob->kref, hl_sob_reset_error);
151 }
152 
hw_sob_get(struct hl_hw_sob * hw_sob)153 void hw_sob_get(struct hl_hw_sob *hw_sob)
154 {
155 	if (hw_sob)
156 		kref_get(&hw_sob->kref);
157 }
158 
159 /**
160  * hl_gen_sob_mask() - Generates a sob mask to be used in a monitor arm packet
161  * @sob_base: sob base id
162  * @sob_mask: sob user mask, each bit represents a sob offset from sob base
163  * @mask: generated mask
164  *
165  * Return: 0 if given parameters are valid
166  */
hl_gen_sob_mask(u16 sob_base,u8 sob_mask,u8 * mask)167 int hl_gen_sob_mask(u16 sob_base, u8 sob_mask, u8 *mask)
168 {
169 	int i;
170 
171 	if (sob_mask == 0)
172 		return -EINVAL;
173 
174 	if (sob_mask == 0x1) {
175 		*mask = ~(1 << (sob_base & 0x7));
176 	} else {
177 		/* find msb in order to verify sob range is valid */
178 		for (i = BITS_PER_BYTE - 1 ; i >= 0 ; i--)
179 			if (BIT(i) & sob_mask)
180 				break;
181 
182 		if (i > (HL_MAX_SOBS_PER_MONITOR - (sob_base & 0x7) - 1))
183 			return -EINVAL;
184 
185 		*mask = ~sob_mask;
186 	}
187 
188 	return 0;
189 }
190 
hl_fence_release(struct kref * kref)191 static void hl_fence_release(struct kref *kref)
192 {
193 	struct hl_fence *fence =
194 		container_of(kref, struct hl_fence, refcount);
195 	struct hl_cs_compl *hl_cs_cmpl =
196 		container_of(fence, struct hl_cs_compl, base_fence);
197 
198 	kfree(hl_cs_cmpl);
199 }
200 
hl_fence_put(struct hl_fence * fence)201 void hl_fence_put(struct hl_fence *fence)
202 {
203 	if (IS_ERR_OR_NULL(fence))
204 		return;
205 	kref_put(&fence->refcount, hl_fence_release);
206 }
207 
hl_fences_put(struct hl_fence ** fence,int len)208 void hl_fences_put(struct hl_fence **fence, int len)
209 {
210 	int i;
211 
212 	for (i = 0; i < len; i++, fence++)
213 		hl_fence_put(*fence);
214 }
215 
hl_fence_get(struct hl_fence * fence)216 void hl_fence_get(struct hl_fence *fence)
217 {
218 	if (fence)
219 		kref_get(&fence->refcount);
220 }
221 
hl_fence_init(struct hl_fence * fence,u64 sequence)222 static void hl_fence_init(struct hl_fence *fence, u64 sequence)
223 {
224 	kref_init(&fence->refcount);
225 	fence->cs_sequence = sequence;
226 	fence->error = 0;
227 	fence->timestamp = ktime_set(0, 0);
228 	fence->mcs_handling_done = false;
229 	init_completion(&fence->completion);
230 }
231 
cs_get(struct hl_cs * cs)232 void cs_get(struct hl_cs *cs)
233 {
234 	kref_get(&cs->refcount);
235 }
236 
cs_get_unless_zero(struct hl_cs * cs)237 static int cs_get_unless_zero(struct hl_cs *cs)
238 {
239 	return kref_get_unless_zero(&cs->refcount);
240 }
241 
cs_put(struct hl_cs * cs)242 static void cs_put(struct hl_cs *cs)
243 {
244 	kref_put(&cs->refcount, cs_do_release);
245 }
246 
cs_job_do_release(struct kref * ref)247 static void cs_job_do_release(struct kref *ref)
248 {
249 	struct hl_cs_job *job = container_of(ref, struct hl_cs_job, refcount);
250 
251 	kfree(job);
252 }
253 
hl_cs_job_put(struct hl_cs_job * job)254 static void hl_cs_job_put(struct hl_cs_job *job)
255 {
256 	kref_put(&job->refcount, cs_job_do_release);
257 }
258 
cs_needs_completion(struct hl_cs * cs)259 bool cs_needs_completion(struct hl_cs *cs)
260 {
261 	/* In case this is a staged CS, only the last CS in sequence should
262 	 * get a completion, any non staged CS will always get a completion
263 	 */
264 	if (cs->staged_cs && !cs->staged_last)
265 		return false;
266 
267 	return true;
268 }
269 
cs_needs_timeout(struct hl_cs * cs)270 bool cs_needs_timeout(struct hl_cs *cs)
271 {
272 	/* In case this is a staged CS, only the first CS in sequence should
273 	 * get a timeout, any non staged CS will always get a timeout
274 	 */
275 	if (cs->staged_cs && !cs->staged_first)
276 		return false;
277 
278 	return true;
279 }
280 
is_cb_patched(struct hl_device * hdev,struct hl_cs_job * job)281 static bool is_cb_patched(struct hl_device *hdev, struct hl_cs_job *job)
282 {
283 	/* Patched CB is created for external queues jobs */
284 	return (job->queue_type == QUEUE_TYPE_EXT);
285 }
286 
287 /*
288  * cs_parser - parse the user command submission
289  *
290  * @hpriv	: pointer to the private data of the fd
291  * @job        : pointer to the job that holds the command submission info
292  *
293  * The function parses the command submission of the user. It calls the
294  * ASIC specific parser, which returns a list of memory blocks to send
295  * to the device as different command buffers
296  *
297  */
cs_parser(struct hl_fpriv * hpriv,struct hl_cs_job * job)298 static int cs_parser(struct hl_fpriv *hpriv, struct hl_cs_job *job)
299 {
300 	struct hl_device *hdev = hpriv->hdev;
301 	struct hl_cs_parser parser;
302 	int rc;
303 
304 	parser.ctx_id = job->cs->ctx->asid;
305 	parser.cs_sequence = job->cs->sequence;
306 	parser.job_id = job->id;
307 
308 	parser.hw_queue_id = job->hw_queue_id;
309 	parser.job_userptr_list = &job->userptr_list;
310 	parser.patched_cb = NULL;
311 	parser.user_cb = job->user_cb;
312 	parser.user_cb_size = job->user_cb_size;
313 	parser.queue_type = job->queue_type;
314 	parser.is_kernel_allocated_cb = job->is_kernel_allocated_cb;
315 	job->patched_cb = NULL;
316 	parser.completion = cs_needs_completion(job->cs);
317 
318 	rc = hdev->asic_funcs->cs_parser(hdev, &parser);
319 
320 	if (is_cb_patched(hdev, job)) {
321 		if (!rc) {
322 			job->patched_cb = parser.patched_cb;
323 			job->job_cb_size = parser.patched_cb_size;
324 			job->contains_dma_pkt = parser.contains_dma_pkt;
325 			atomic_inc(&job->patched_cb->cs_cnt);
326 		}
327 
328 		/*
329 		 * Whether the parsing worked or not, we don't need the
330 		 * original CB anymore because it was already parsed and
331 		 * won't be accessed again for this CS
332 		 */
333 		atomic_dec(&job->user_cb->cs_cnt);
334 		hl_cb_put(job->user_cb);
335 		job->user_cb = NULL;
336 	} else if (!rc) {
337 		job->job_cb_size = job->user_cb_size;
338 	}
339 
340 	return rc;
341 }
342 
hl_complete_job(struct hl_device * hdev,struct hl_cs_job * job)343 static void hl_complete_job(struct hl_device *hdev, struct hl_cs_job *job)
344 {
345 	struct hl_cs *cs = job->cs;
346 
347 	if (is_cb_patched(hdev, job)) {
348 		hl_userptr_delete_list(hdev, &job->userptr_list);
349 
350 		/*
351 		 * We might arrive here from rollback and patched CB wasn't
352 		 * created, so we need to check it's not NULL
353 		 */
354 		if (job->patched_cb) {
355 			atomic_dec(&job->patched_cb->cs_cnt);
356 			hl_cb_put(job->patched_cb);
357 		}
358 	}
359 
360 	/* For H/W queue jobs, if a user CB was allocated by driver,
361 	 * the user CB isn't released in cs_parser() and thus should be
362 	 * released here. This is also true for INT queues jobs which were
363 	 * allocated by driver.
364 	 */
365 	if (job->is_kernel_allocated_cb &&
366 			(job->queue_type == QUEUE_TYPE_HW || job->queue_type == QUEUE_TYPE_INT)) {
367 		atomic_dec(&job->user_cb->cs_cnt);
368 		hl_cb_put(job->user_cb);
369 	}
370 
371 	/*
372 	 * This is the only place where there can be multiple threads
373 	 * modifying the list at the same time
374 	 */
375 	spin_lock(&cs->job_lock);
376 	list_del(&job->cs_node);
377 	spin_unlock(&cs->job_lock);
378 
379 	hl_debugfs_remove_job(hdev, job);
380 
381 	/* We decrement reference only for a CS that gets completion
382 	 * because the reference was incremented only for this kind of CS
383 	 * right before it was scheduled.
384 	 *
385 	 * In staged submission, only the last CS marked as 'staged_last'
386 	 * gets completion, hence its release function will be called from here.
387 	 * As for all the rest CS's in the staged submission which do not get
388 	 * completion, their CS reference will be decremented by the
389 	 * 'staged_last' CS during the CS release flow.
390 	 * All relevant PQ CI counters will be incremented during the CS release
391 	 * flow by calling 'hl_hw_queue_update_ci'.
392 	 */
393 	if (cs_needs_completion(cs) &&
394 			(job->queue_type == QUEUE_TYPE_EXT || job->queue_type == QUEUE_TYPE_HW)) {
395 
396 		/* In CS based completions, the timestamp is already available,
397 		 * so no need to extract it from job
398 		 */
399 		if (hdev->asic_prop.completion_mode == HL_COMPLETION_MODE_JOB)
400 			cs->completion_timestamp = job->timestamp;
401 
402 		cs_put(cs);
403 	}
404 
405 	hl_cs_job_put(job);
406 }
407 
408 /*
409  * hl_staged_cs_find_first - locate the first CS in this staged submission
410  *
411  * @hdev: pointer to device structure
412  * @cs_seq: staged submission sequence number
413  *
414  * @note: This function must be called under 'hdev->cs_mirror_lock'
415  *
416  * Find and return a CS pointer with the given sequence
417  */
hl_staged_cs_find_first(struct hl_device * hdev,u64 cs_seq)418 struct hl_cs *hl_staged_cs_find_first(struct hl_device *hdev, u64 cs_seq)
419 {
420 	struct hl_cs *cs;
421 
422 	list_for_each_entry_reverse(cs, &hdev->cs_mirror_list, mirror_node)
423 		if (cs->staged_cs && cs->staged_first &&
424 				cs->sequence == cs_seq)
425 			return cs;
426 
427 	return NULL;
428 }
429 
430 /*
431  * is_staged_cs_last_exists - returns true if the last CS in sequence exists
432  *
433  * @hdev: pointer to device structure
434  * @cs: staged submission member
435  *
436  */
is_staged_cs_last_exists(struct hl_device * hdev,struct hl_cs * cs)437 bool is_staged_cs_last_exists(struct hl_device *hdev, struct hl_cs *cs)
438 {
439 	struct hl_cs *last_entry;
440 
441 	last_entry = list_last_entry(&cs->staged_cs_node, struct hl_cs,
442 								staged_cs_node);
443 
444 	if (last_entry->staged_last)
445 		return true;
446 
447 	return false;
448 }
449 
450 /*
451  * staged_cs_get - get CS reference if this CS is a part of a staged CS
452  *
453  * @hdev: pointer to device structure
454  * @cs: current CS
455  * @cs_seq: staged submission sequence number
456  *
457  * Increment CS reference for every CS in this staged submission except for
458  * the CS which get completion.
459  */
staged_cs_get(struct hl_device * hdev,struct hl_cs * cs)460 static void staged_cs_get(struct hl_device *hdev, struct hl_cs *cs)
461 {
462 	/* Only the last CS in this staged submission will get a completion.
463 	 * We must increment the reference for all other CS's in this
464 	 * staged submission.
465 	 * Once we get a completion we will release the whole staged submission.
466 	 */
467 	if (!cs->staged_last)
468 		cs_get(cs);
469 }
470 
471 /*
472  * staged_cs_put - put a CS in case it is part of staged submission
473  *
474  * @hdev: pointer to device structure
475  * @cs: CS to put
476  *
477  * This function decrements a CS reference (for a non completion CS)
478  */
staged_cs_put(struct hl_device * hdev,struct hl_cs * cs)479 static void staged_cs_put(struct hl_device *hdev, struct hl_cs *cs)
480 {
481 	/* We release all CS's in a staged submission except the last
482 	 * CS which we have never incremented its reference.
483 	 */
484 	if (!cs_needs_completion(cs))
485 		cs_put(cs);
486 }
487 
cs_handle_tdr(struct hl_device * hdev,struct hl_cs * cs)488 static void cs_handle_tdr(struct hl_device *hdev, struct hl_cs *cs)
489 {
490 	struct hl_cs *next = NULL, *iter, *first_cs;
491 
492 	if (!cs_needs_timeout(cs))
493 		return;
494 
495 	spin_lock(&hdev->cs_mirror_lock);
496 
497 	/* We need to handle tdr only once for the complete staged submission.
498 	 * Hence, we choose the CS that reaches this function first which is
499 	 * the CS marked as 'staged_last'.
500 	 * In case single staged cs was submitted which has both first and last
501 	 * indications, then "cs_find_first" below will return NULL, since we
502 	 * removed the cs node from the list before getting here,
503 	 * in such cases just continue with the cs to cancel it's TDR work.
504 	 */
505 	if (cs->staged_cs && cs->staged_last) {
506 		first_cs = hl_staged_cs_find_first(hdev, cs->staged_sequence);
507 		if (first_cs)
508 			cs = first_cs;
509 	}
510 
511 	spin_unlock(&hdev->cs_mirror_lock);
512 
513 	/* Don't cancel TDR in case this CS was timedout because we might be
514 	 * running from the TDR context
515 	 */
516 	if (cs->timedout || hdev->timeout_jiffies == MAX_SCHEDULE_TIMEOUT)
517 		return;
518 
519 	if (cs->tdr_active)
520 		cancel_delayed_work_sync(&cs->work_tdr);
521 
522 	spin_lock(&hdev->cs_mirror_lock);
523 
524 	/* queue TDR for next CS */
525 	list_for_each_entry(iter, &hdev->cs_mirror_list, mirror_node)
526 		if (cs_needs_timeout(iter)) {
527 			next = iter;
528 			break;
529 		}
530 
531 	if (next && !next->tdr_active) {
532 		next->tdr_active = true;
533 		schedule_delayed_work(&next->work_tdr, next->timeout_jiffies);
534 	}
535 
536 	spin_unlock(&hdev->cs_mirror_lock);
537 }
538 
539 /*
540  * force_complete_multi_cs - complete all contexts that wait on multi-CS
541  *
542  * @hdev: pointer to habanalabs device structure
543  */
force_complete_multi_cs(struct hl_device * hdev)544 static void force_complete_multi_cs(struct hl_device *hdev)
545 {
546 	int i;
547 
548 	for (i = 0; i < MULTI_CS_MAX_USER_CTX; i++) {
549 		struct multi_cs_completion *mcs_compl;
550 
551 		mcs_compl = &hdev->multi_cs_completion[i];
552 
553 		spin_lock(&mcs_compl->lock);
554 
555 		if (!mcs_compl->used) {
556 			spin_unlock(&mcs_compl->lock);
557 			continue;
558 		}
559 
560 		/* when calling force complete no context should be waiting on
561 		 * multi-cS.
562 		 * We are calling the function as a protection for such case
563 		 * to free any pending context and print error message
564 		 */
565 		dev_err(hdev->dev,
566 				"multi-CS completion context %d still waiting when calling force completion\n",
567 				i);
568 		complete_all(&mcs_compl->completion);
569 		spin_unlock(&mcs_compl->lock);
570 	}
571 }
572 
573 /*
574  * complete_multi_cs - complete all waiting entities on multi-CS
575  *
576  * @hdev: pointer to habanalabs device structure
577  * @cs: CS structure
578  * The function signals a waiting entity that has an overlapping stream masters
579  * with the completed CS.
580  * For example:
581  * - a completed CS worked on stream master QID 4, multi CS completion
582  *   is actively waiting on stream master QIDs 3, 5. don't send signal as no
583  *   common stream master QID
584  * - a completed CS worked on stream master QID 4, multi CS completion
585  *   is actively waiting on stream master QIDs 3, 4. send signal as stream
586  *   master QID 4 is common
587  */
complete_multi_cs(struct hl_device * hdev,struct hl_cs * cs)588 static void complete_multi_cs(struct hl_device *hdev, struct hl_cs *cs)
589 {
590 	struct hl_fence *fence = cs->fence;
591 	int i;
592 
593 	/* in case of multi CS check for completion only for the first CS */
594 	if (cs->staged_cs && !cs->staged_first)
595 		return;
596 
597 	for (i = 0; i < MULTI_CS_MAX_USER_CTX; i++) {
598 		struct multi_cs_completion *mcs_compl;
599 
600 		mcs_compl = &hdev->multi_cs_completion[i];
601 		if (!mcs_compl->used)
602 			continue;
603 
604 		spin_lock(&mcs_compl->lock);
605 
606 		/*
607 		 * complete if:
608 		 * 1. still waiting for completion
609 		 * 2. the completed CS has at least one overlapping stream
610 		 *    master with the stream masters in the completion
611 		 */
612 		if (mcs_compl->used &&
613 				(fence->stream_master_qid_map &
614 					mcs_compl->stream_master_qid_map)) {
615 			/* extract the timestamp only of first completed CS */
616 			if (!mcs_compl->timestamp)
617 				mcs_compl->timestamp = ktime_to_ns(fence->timestamp);
618 
619 			complete_all(&mcs_compl->completion);
620 
621 			/*
622 			 * Setting mcs_handling_done inside the lock ensures
623 			 * at least one fence have mcs_handling_done set to
624 			 * true before wait for mcs finish. This ensures at
625 			 * least one CS will be set as completed when polling
626 			 * mcs fences.
627 			 */
628 			fence->mcs_handling_done = true;
629 		}
630 
631 		spin_unlock(&mcs_compl->lock);
632 	}
633 	/* In case CS completed without mcs completion initialized */
634 	fence->mcs_handling_done = true;
635 }
636 
cs_release_sob_reset_handler(struct hl_device * hdev,struct hl_cs * cs,struct hl_cs_compl * hl_cs_cmpl)637 static inline void cs_release_sob_reset_handler(struct hl_device *hdev,
638 					struct hl_cs *cs,
639 					struct hl_cs_compl *hl_cs_cmpl)
640 {
641 	/* Skip this handler if the cs wasn't submitted, to avoid putting
642 	 * the hw_sob twice, since this case already handled at this point,
643 	 * also skip if the hw_sob pointer wasn't set.
644 	 */
645 	if (!hl_cs_cmpl->hw_sob || !cs->submitted)
646 		return;
647 
648 	spin_lock(&hl_cs_cmpl->lock);
649 
650 	/*
651 	 * we get refcount upon reservation of signals or signal/wait cs for the
652 	 * hw_sob object, and need to put it when the first staged cs
653 	 * (which contains the encaps signals) or cs signal/wait is completed.
654 	 */
655 	if ((hl_cs_cmpl->type == CS_TYPE_SIGNAL) ||
656 			(hl_cs_cmpl->type == CS_TYPE_WAIT) ||
657 			(hl_cs_cmpl->type == CS_TYPE_COLLECTIVE_WAIT) ||
658 			(!!hl_cs_cmpl->encaps_signals)) {
659 		dev_dbg(hdev->dev,
660 				"CS 0x%llx type %d finished, sob_id: %d, sob_val: %u\n",
661 				hl_cs_cmpl->cs_seq,
662 				hl_cs_cmpl->type,
663 				hl_cs_cmpl->hw_sob->sob_id,
664 				hl_cs_cmpl->sob_val);
665 
666 		hw_sob_put(hl_cs_cmpl->hw_sob);
667 
668 		if (hl_cs_cmpl->type == CS_TYPE_COLLECTIVE_WAIT)
669 			hdev->asic_funcs->reset_sob_group(hdev,
670 					hl_cs_cmpl->sob_group);
671 	}
672 
673 	spin_unlock(&hl_cs_cmpl->lock);
674 }
675 
cs_do_release(struct kref * ref)676 static void cs_do_release(struct kref *ref)
677 {
678 	struct hl_cs *cs = container_of(ref, struct hl_cs, refcount);
679 	struct hl_device *hdev = cs->ctx->hdev;
680 	struct hl_cs_job *job, *tmp;
681 	struct hl_cs_compl *hl_cs_cmpl =
682 			container_of(cs->fence, struct hl_cs_compl, base_fence);
683 
684 	cs->completed = true;
685 
686 	/*
687 	 * Although if we reached here it means that all external jobs have
688 	 * finished, because each one of them took refcnt to CS, we still
689 	 * need to go over the internal jobs and complete them. Otherwise, we
690 	 * will have leaked memory and what's worse, the CS object (and
691 	 * potentially the CTX object) could be released, while the JOB
692 	 * still holds a pointer to them (but no reference).
693 	 */
694 	list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
695 		hl_complete_job(hdev, job);
696 
697 	if (!cs->submitted) {
698 		/*
699 		 * In case the wait for signal CS was submitted, the fence put
700 		 * occurs in init_signal_wait_cs() or collective_wait_init_cs()
701 		 * right before hanging on the PQ.
702 		 */
703 		if (cs->type == CS_TYPE_WAIT ||
704 				cs->type == CS_TYPE_COLLECTIVE_WAIT)
705 			hl_fence_put(cs->signal_fence);
706 
707 		goto out;
708 	}
709 
710 	/* Need to update CI for all queue jobs that does not get completion */
711 	hl_hw_queue_update_ci(cs);
712 
713 	/* remove CS from CS mirror list */
714 	spin_lock(&hdev->cs_mirror_lock);
715 	list_del_init(&cs->mirror_node);
716 	spin_unlock(&hdev->cs_mirror_lock);
717 
718 	cs_handle_tdr(hdev, cs);
719 
720 	if (cs->staged_cs) {
721 		/* the completion CS decrements reference for the entire
722 		 * staged submission
723 		 */
724 		if (cs->staged_last) {
725 			struct hl_cs *staged_cs, *tmp_cs;
726 
727 			list_for_each_entry_safe(staged_cs, tmp_cs,
728 					&cs->staged_cs_node, staged_cs_node)
729 				staged_cs_put(hdev, staged_cs);
730 		}
731 
732 		/* A staged CS will be a member in the list only after it
733 		 * was submitted. We used 'cs_mirror_lock' when inserting
734 		 * it to list so we will use it again when removing it
735 		 */
736 		if (cs->submitted) {
737 			spin_lock(&hdev->cs_mirror_lock);
738 			list_del(&cs->staged_cs_node);
739 			spin_unlock(&hdev->cs_mirror_lock);
740 		}
741 
742 		/* decrement refcount to handle when first staged cs
743 		 * with encaps signals is completed.
744 		 */
745 		if (hl_cs_cmpl->encaps_signals)
746 			kref_put(&hl_cs_cmpl->encaps_sig_hdl->refcount,
747 					hl_encaps_release_handle_and_put_ctx);
748 	}
749 
750 	if ((cs->type == CS_TYPE_WAIT || cs->type == CS_TYPE_COLLECTIVE_WAIT) && cs->encaps_signals)
751 		kref_put(&cs->encaps_sig_hdl->refcount, hl_encaps_release_handle_and_put_ctx);
752 
753 out:
754 	/* Must be called before hl_ctx_put because inside we use ctx to get
755 	 * the device
756 	 */
757 	hl_debugfs_remove_cs(cs);
758 
759 	hdev->shadow_cs_queue[cs->sequence & (hdev->asic_prop.max_pending_cs - 1)] = NULL;
760 
761 	/* We need to mark an error for not submitted because in that case
762 	 * the hl fence release flow is different. Mainly, we don't need
763 	 * to handle hw_sob for signal/wait
764 	 */
765 	if (cs->timedout)
766 		cs->fence->error = -ETIMEDOUT;
767 	else if (cs->aborted)
768 		cs->fence->error = -EIO;
769 	else if (!cs->submitted)
770 		cs->fence->error = -EBUSY;
771 
772 	if (unlikely(cs->skip_reset_on_timeout)) {
773 		dev_err(hdev->dev,
774 			"Command submission %llu completed after %llu (s)\n",
775 			cs->sequence,
776 			div_u64(jiffies - cs->submission_time_jiffies, HZ));
777 	}
778 
779 	if (cs->timestamp) {
780 		cs->fence->timestamp = cs->completion_timestamp;
781 		hl_push_cs_outcome(hdev, &cs->ctx->outcome_store, cs->sequence,
782 				   cs->fence->timestamp, cs->fence->error);
783 	}
784 
785 	hl_ctx_put(cs->ctx);
786 
787 	complete_all(&cs->fence->completion);
788 	complete_multi_cs(hdev, cs);
789 
790 	cs_release_sob_reset_handler(hdev, cs, hl_cs_cmpl);
791 
792 	hl_fence_put(cs->fence);
793 
794 	kfree(cs->jobs_in_queue_cnt);
795 	kfree(cs);
796 }
797 
cs_timedout(struct work_struct * work)798 static void cs_timedout(struct work_struct *work)
799 {
800 	struct hl_cs *cs = container_of(work, struct hl_cs, work_tdr.work);
801 	bool skip_reset_on_timeout, device_reset = false;
802 	struct hl_device *hdev;
803 	u64 event_mask = 0x0;
804 	uint timeout_sec;
805 	int rc;
806 
807 	skip_reset_on_timeout = cs->skip_reset_on_timeout;
808 
809 	rc = cs_get_unless_zero(cs);
810 	if (!rc)
811 		return;
812 
813 	if ((!cs->submitted) || (cs->completed)) {
814 		cs_put(cs);
815 		return;
816 	}
817 
818 	hdev = cs->ctx->hdev;
819 
820 	if (likely(!skip_reset_on_timeout)) {
821 		if (hdev->reset_on_lockup)
822 			device_reset = true;
823 		else
824 			hdev->reset_info.needs_reset = true;
825 
826 		/* Mark the CS is timed out so we won't try to cancel its TDR */
827 		cs->timedout = true;
828 	}
829 
830 	/* Save only the first CS timeout parameters */
831 	rc = atomic_cmpxchg(&hdev->captured_err_info.cs_timeout.write_enable, 1, 0);
832 	if (rc) {
833 		hdev->captured_err_info.cs_timeout.timestamp = ktime_get();
834 		hdev->captured_err_info.cs_timeout.seq = cs->sequence;
835 		event_mask |= HL_NOTIFIER_EVENT_CS_TIMEOUT;
836 	}
837 
838 	timeout_sec = jiffies_to_msecs(hdev->timeout_jiffies) / 1000;
839 
840 	switch (cs->type) {
841 	case CS_TYPE_SIGNAL:
842 		dev_err(hdev->dev,
843 			"Signal command submission %llu has not finished in %u seconds!\n",
844 			cs->sequence, timeout_sec);
845 		break;
846 
847 	case CS_TYPE_WAIT:
848 		dev_err(hdev->dev,
849 			"Wait command submission %llu has not finished in %u seconds!\n",
850 			cs->sequence, timeout_sec);
851 		break;
852 
853 	case CS_TYPE_COLLECTIVE_WAIT:
854 		dev_err(hdev->dev,
855 			"Collective Wait command submission %llu has not finished in %u seconds!\n",
856 			cs->sequence, timeout_sec);
857 		break;
858 
859 	default:
860 		dev_err(hdev->dev,
861 			"Command submission %llu has not finished in %u seconds!\n",
862 			cs->sequence, timeout_sec);
863 		break;
864 	}
865 
866 	rc = hl_state_dump(hdev);
867 	if (rc)
868 		dev_err(hdev->dev, "Error during system state dump %d\n", rc);
869 
870 	cs_put(cs);
871 
872 	if (device_reset) {
873 		event_mask |= HL_NOTIFIER_EVENT_DEVICE_RESET;
874 		hl_device_cond_reset(hdev, HL_DRV_RESET_TDR, event_mask);
875 	} else if (event_mask) {
876 		hl_notifier_event_send_all(hdev, event_mask);
877 	}
878 }
879 
allocate_cs(struct hl_device * hdev,struct hl_ctx * ctx,enum hl_cs_type cs_type,u64 user_sequence,struct hl_cs ** cs_new,u32 flags,u32 timeout)880 static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
881 			enum hl_cs_type cs_type, u64 user_sequence,
882 			struct hl_cs **cs_new, u32 flags, u32 timeout)
883 {
884 	struct hl_cs_counters_atomic *cntr;
885 	struct hl_fence *other = NULL;
886 	struct hl_cs_compl *cs_cmpl;
887 	struct hl_cs *cs;
888 	int rc;
889 
890 	cntr = &hdev->aggregated_cs_counters;
891 
892 	cs = kzalloc(sizeof(*cs), GFP_ATOMIC);
893 	if (!cs)
894 		cs = kzalloc(sizeof(*cs), GFP_KERNEL);
895 
896 	if (!cs) {
897 		atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
898 		atomic64_inc(&cntr->out_of_mem_drop_cnt);
899 		return -ENOMEM;
900 	}
901 
902 	/* increment refcnt for context */
903 	hl_ctx_get(ctx);
904 
905 	cs->ctx = ctx;
906 	cs->submitted = false;
907 	cs->completed = false;
908 	cs->type = cs_type;
909 	cs->timestamp = !!(flags & HL_CS_FLAGS_TIMESTAMP);
910 	cs->encaps_signals = !!(flags & HL_CS_FLAGS_ENCAP_SIGNALS);
911 	cs->timeout_jiffies = timeout;
912 	cs->skip_reset_on_timeout =
913 		hdev->reset_info.skip_reset_on_timeout ||
914 		!!(flags & HL_CS_FLAGS_SKIP_RESET_ON_TIMEOUT);
915 	cs->submission_time_jiffies = jiffies;
916 	INIT_LIST_HEAD(&cs->job_list);
917 	INIT_DELAYED_WORK(&cs->work_tdr, cs_timedout);
918 	kref_init(&cs->refcount);
919 	spin_lock_init(&cs->job_lock);
920 
921 	cs_cmpl = kzalloc(sizeof(*cs_cmpl), GFP_ATOMIC);
922 	if (!cs_cmpl)
923 		cs_cmpl = kzalloc(sizeof(*cs_cmpl), GFP_KERNEL);
924 
925 	if (!cs_cmpl) {
926 		atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
927 		atomic64_inc(&cntr->out_of_mem_drop_cnt);
928 		rc = -ENOMEM;
929 		goto free_cs;
930 	}
931 
932 	cs->jobs_in_queue_cnt = kcalloc(hdev->asic_prop.max_queues,
933 			sizeof(*cs->jobs_in_queue_cnt), GFP_ATOMIC);
934 	if (!cs->jobs_in_queue_cnt)
935 		cs->jobs_in_queue_cnt = kcalloc(hdev->asic_prop.max_queues,
936 				sizeof(*cs->jobs_in_queue_cnt), GFP_KERNEL);
937 
938 	if (!cs->jobs_in_queue_cnt) {
939 		atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
940 		atomic64_inc(&cntr->out_of_mem_drop_cnt);
941 		rc = -ENOMEM;
942 		goto free_cs_cmpl;
943 	}
944 
945 	cs_cmpl->hdev = hdev;
946 	cs_cmpl->type = cs->type;
947 	spin_lock_init(&cs_cmpl->lock);
948 	cs->fence = &cs_cmpl->base_fence;
949 
950 	spin_lock(&ctx->cs_lock);
951 
952 	cs_cmpl->cs_seq = ctx->cs_sequence;
953 	other = ctx->cs_pending[cs_cmpl->cs_seq &
954 				(hdev->asic_prop.max_pending_cs - 1)];
955 
956 	if (other && !completion_done(&other->completion)) {
957 		/* If the following statement is true, it means we have reached
958 		 * a point in which only part of the staged submission was
959 		 * submitted and we don't have enough room in the 'cs_pending'
960 		 * array for the rest of the submission.
961 		 * This causes a deadlock because this CS will never be
962 		 * completed as it depends on future CS's for completion.
963 		 */
964 		if (other->cs_sequence == user_sequence)
965 			dev_crit_ratelimited(hdev->dev,
966 				"Staged CS %llu deadlock due to lack of resources",
967 				user_sequence);
968 
969 		dev_dbg_ratelimited(hdev->dev,
970 			"Rejecting CS because of too many in-flights CS\n");
971 		atomic64_inc(&ctx->cs_counters.max_cs_in_flight_drop_cnt);
972 		atomic64_inc(&cntr->max_cs_in_flight_drop_cnt);
973 		rc = -EAGAIN;
974 		goto free_fence;
975 	}
976 
977 	/* init hl_fence */
978 	hl_fence_init(&cs_cmpl->base_fence, cs_cmpl->cs_seq);
979 
980 	cs->sequence = cs_cmpl->cs_seq;
981 
982 	ctx->cs_pending[cs_cmpl->cs_seq &
983 			(hdev->asic_prop.max_pending_cs - 1)] =
984 							&cs_cmpl->base_fence;
985 	ctx->cs_sequence++;
986 
987 	hl_fence_get(&cs_cmpl->base_fence);
988 
989 	hl_fence_put(other);
990 
991 	spin_unlock(&ctx->cs_lock);
992 
993 	*cs_new = cs;
994 
995 	return 0;
996 
997 free_fence:
998 	spin_unlock(&ctx->cs_lock);
999 	kfree(cs->jobs_in_queue_cnt);
1000 free_cs_cmpl:
1001 	kfree(cs_cmpl);
1002 free_cs:
1003 	kfree(cs);
1004 	hl_ctx_put(ctx);
1005 	return rc;
1006 }
1007 
cs_rollback(struct hl_device * hdev,struct hl_cs * cs)1008 static void cs_rollback(struct hl_device *hdev, struct hl_cs *cs)
1009 {
1010 	struct hl_cs_job *job, *tmp;
1011 
1012 	staged_cs_put(hdev, cs);
1013 
1014 	list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
1015 		hl_complete_job(hdev, job);
1016 }
1017 
1018 /*
1019  * release_reserved_encaps_signals() - release reserved encapsulated signals.
1020  * @hdev: pointer to habanalabs device structure
1021  *
1022  * Release reserved encapsulated signals which weren't un-reserved, or for which a CS with
1023  * encapsulated signals wasn't submitted and thus weren't released as part of CS roll-back.
1024  * For these signals need also to put the refcount of the H/W SOB which was taken at the
1025  * reservation.
1026  */
release_reserved_encaps_signals(struct hl_device * hdev)1027 static void release_reserved_encaps_signals(struct hl_device *hdev)
1028 {
1029 	struct hl_ctx *ctx = hl_get_compute_ctx(hdev);
1030 	struct hl_cs_encaps_sig_handle *handle;
1031 	struct hl_encaps_signals_mgr *mgr;
1032 	u32 id;
1033 
1034 	if (!ctx)
1035 		return;
1036 
1037 	mgr = &ctx->sig_mgr;
1038 
1039 	idr_for_each_entry(&mgr->handles, handle, id)
1040 		if (handle->cs_seq == ULLONG_MAX)
1041 			kref_put(&handle->refcount, hl_encaps_release_handle_and_put_sob_ctx);
1042 
1043 	hl_ctx_put(ctx);
1044 }
1045 
hl_cs_rollback_all(struct hl_device * hdev,bool skip_wq_flush)1046 void hl_cs_rollback_all(struct hl_device *hdev, bool skip_wq_flush)
1047 {
1048 	int i;
1049 	struct hl_cs *cs, *tmp;
1050 
1051 	if (!skip_wq_flush) {
1052 		flush_workqueue(hdev->ts_free_obj_wq);
1053 
1054 		/* flush all completions before iterating over the CS mirror list in
1055 		 * order to avoid a race with the release functions
1056 		 */
1057 		for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
1058 			flush_workqueue(hdev->cq_wq[i]);
1059 
1060 		flush_workqueue(hdev->cs_cmplt_wq);
1061 	}
1062 
1063 	/* Make sure we don't have leftovers in the CS mirror list */
1064 	list_for_each_entry_safe(cs, tmp, &hdev->cs_mirror_list, mirror_node) {
1065 		cs_get(cs);
1066 		cs->aborted = true;
1067 		dev_warn_ratelimited(hdev->dev, "Killing CS %d.%llu\n",
1068 					cs->ctx->asid, cs->sequence);
1069 		cs_rollback(hdev, cs);
1070 		cs_put(cs);
1071 	}
1072 
1073 	force_complete_multi_cs(hdev);
1074 
1075 	release_reserved_encaps_signals(hdev);
1076 }
1077 
1078 static void
wake_pending_user_interrupt_threads(struct hl_user_interrupt * interrupt)1079 wake_pending_user_interrupt_threads(struct hl_user_interrupt *interrupt)
1080 {
1081 	struct hl_user_pending_interrupt *pend, *temp;
1082 
1083 	spin_lock(&interrupt->wait_list_lock);
1084 	list_for_each_entry_safe(pend, temp, &interrupt->wait_list_head, wait_list_node) {
1085 		if (pend->ts_reg_info.buf) {
1086 			list_del(&pend->wait_list_node);
1087 			hl_mmap_mem_buf_put(pend->ts_reg_info.buf);
1088 			hl_cb_put(pend->ts_reg_info.cq_cb);
1089 		} else {
1090 			pend->fence.error = -EIO;
1091 			complete_all(&pend->fence.completion);
1092 		}
1093 	}
1094 	spin_unlock(&interrupt->wait_list_lock);
1095 }
1096 
hl_release_pending_user_interrupts(struct hl_device * hdev)1097 void hl_release_pending_user_interrupts(struct hl_device *hdev)
1098 {
1099 	struct asic_fixed_properties *prop = &hdev->asic_prop;
1100 	struct hl_user_interrupt *interrupt;
1101 	int i;
1102 
1103 	if (!prop->user_interrupt_count)
1104 		return;
1105 
1106 	/* We iterate through the user interrupt requests and waking up all
1107 	 * user threads waiting for interrupt completion. We iterate the
1108 	 * list under a lock, this is why all user threads, once awake,
1109 	 * will wait on the same lock and will release the waiting object upon
1110 	 * unlock.
1111 	 */
1112 
1113 	for (i = 0 ; i < prop->user_interrupt_count ; i++) {
1114 		interrupt = &hdev->user_interrupt[i];
1115 		wake_pending_user_interrupt_threads(interrupt);
1116 	}
1117 
1118 	interrupt = &hdev->common_user_cq_interrupt;
1119 	wake_pending_user_interrupt_threads(interrupt);
1120 
1121 	interrupt = &hdev->common_decoder_interrupt;
1122 	wake_pending_user_interrupt_threads(interrupt);
1123 }
1124 
force_complete_cs(struct hl_device * hdev)1125 static void force_complete_cs(struct hl_device *hdev)
1126 {
1127 	struct hl_cs *cs;
1128 
1129 	spin_lock(&hdev->cs_mirror_lock);
1130 
1131 	list_for_each_entry(cs, &hdev->cs_mirror_list, mirror_node) {
1132 		cs->fence->error = -EIO;
1133 		complete_all(&cs->fence->completion);
1134 	}
1135 
1136 	spin_unlock(&hdev->cs_mirror_lock);
1137 }
1138 
hl_abort_waiting_for_cs_completions(struct hl_device * hdev)1139 void hl_abort_waiting_for_cs_completions(struct hl_device *hdev)
1140 {
1141 	force_complete_cs(hdev);
1142 	force_complete_multi_cs(hdev);
1143 }
1144 
job_wq_completion(struct work_struct * work)1145 static void job_wq_completion(struct work_struct *work)
1146 {
1147 	struct hl_cs_job *job = container_of(work, struct hl_cs_job,
1148 						finish_work);
1149 	struct hl_cs *cs = job->cs;
1150 	struct hl_device *hdev = cs->ctx->hdev;
1151 
1152 	/* job is no longer needed */
1153 	hl_complete_job(hdev, job);
1154 }
1155 
cs_completion(struct work_struct * work)1156 static void cs_completion(struct work_struct *work)
1157 {
1158 	struct hl_cs *cs = container_of(work, struct hl_cs, finish_work);
1159 	struct hl_device *hdev = cs->ctx->hdev;
1160 	struct hl_cs_job *job, *tmp;
1161 
1162 	list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
1163 		hl_complete_job(hdev, job);
1164 }
1165 
hl_get_active_cs_num(struct hl_device * hdev)1166 u32 hl_get_active_cs_num(struct hl_device *hdev)
1167 {
1168 	u32 active_cs_num = 0;
1169 	struct hl_cs *cs;
1170 
1171 	spin_lock(&hdev->cs_mirror_lock);
1172 
1173 	list_for_each_entry(cs, &hdev->cs_mirror_list, mirror_node)
1174 		if (!cs->completed)
1175 			active_cs_num++;
1176 
1177 	spin_unlock(&hdev->cs_mirror_lock);
1178 
1179 	return active_cs_num;
1180 }
1181 
validate_queue_index(struct hl_device * hdev,struct hl_cs_chunk * chunk,enum hl_queue_type * queue_type,bool * is_kernel_allocated_cb)1182 static int validate_queue_index(struct hl_device *hdev,
1183 				struct hl_cs_chunk *chunk,
1184 				enum hl_queue_type *queue_type,
1185 				bool *is_kernel_allocated_cb)
1186 {
1187 	struct asic_fixed_properties *asic = &hdev->asic_prop;
1188 	struct hw_queue_properties *hw_queue_prop;
1189 
1190 	/* This must be checked here to prevent out-of-bounds access to
1191 	 * hw_queues_props array
1192 	 */
1193 	if (chunk->queue_index >= asic->max_queues) {
1194 		dev_err(hdev->dev, "Queue index %d is invalid\n",
1195 			chunk->queue_index);
1196 		return -EINVAL;
1197 	}
1198 
1199 	hw_queue_prop = &asic->hw_queues_props[chunk->queue_index];
1200 
1201 	if (hw_queue_prop->type == QUEUE_TYPE_NA) {
1202 		dev_err(hdev->dev, "Queue index %d is not applicable\n",
1203 			chunk->queue_index);
1204 		return -EINVAL;
1205 	}
1206 
1207 	if (hw_queue_prop->binned) {
1208 		dev_err(hdev->dev, "Queue index %d is binned out\n",
1209 			chunk->queue_index);
1210 		return -EINVAL;
1211 	}
1212 
1213 	if (hw_queue_prop->driver_only) {
1214 		dev_err(hdev->dev,
1215 			"Queue index %d is restricted for the kernel driver\n",
1216 			chunk->queue_index);
1217 		return -EINVAL;
1218 	}
1219 
1220 	/* When hw queue type isn't QUEUE_TYPE_HW,
1221 	 * USER_ALLOC_CB flag shall be referred as "don't care".
1222 	 */
1223 	if (hw_queue_prop->type == QUEUE_TYPE_HW) {
1224 		if (chunk->cs_chunk_flags & HL_CS_CHUNK_FLAGS_USER_ALLOC_CB) {
1225 			if (!(hw_queue_prop->cb_alloc_flags & CB_ALLOC_USER)) {
1226 				dev_err(hdev->dev,
1227 					"Queue index %d doesn't support user CB\n",
1228 					chunk->queue_index);
1229 				return -EINVAL;
1230 			}
1231 
1232 			*is_kernel_allocated_cb = false;
1233 		} else {
1234 			if (!(hw_queue_prop->cb_alloc_flags &
1235 					CB_ALLOC_KERNEL)) {
1236 				dev_err(hdev->dev,
1237 					"Queue index %d doesn't support kernel CB\n",
1238 					chunk->queue_index);
1239 				return -EINVAL;
1240 			}
1241 
1242 			*is_kernel_allocated_cb = true;
1243 		}
1244 	} else {
1245 		*is_kernel_allocated_cb = !!(hw_queue_prop->cb_alloc_flags
1246 						& CB_ALLOC_KERNEL);
1247 	}
1248 
1249 	*queue_type = hw_queue_prop->type;
1250 	return 0;
1251 }
1252 
get_cb_from_cs_chunk(struct hl_device * hdev,struct hl_mem_mgr * mmg,struct hl_cs_chunk * chunk)1253 static struct hl_cb *get_cb_from_cs_chunk(struct hl_device *hdev,
1254 					struct hl_mem_mgr *mmg,
1255 					struct hl_cs_chunk *chunk)
1256 {
1257 	struct hl_cb *cb;
1258 
1259 	cb = hl_cb_get(mmg, chunk->cb_handle);
1260 	if (!cb) {
1261 		dev_err(hdev->dev, "CB handle 0x%llx invalid\n", chunk->cb_handle);
1262 		return NULL;
1263 	}
1264 
1265 	if ((chunk->cb_size < 8) || (chunk->cb_size > cb->size)) {
1266 		dev_err(hdev->dev, "CB size %u invalid\n", chunk->cb_size);
1267 		goto release_cb;
1268 	}
1269 
1270 	atomic_inc(&cb->cs_cnt);
1271 
1272 	return cb;
1273 
1274 release_cb:
1275 	hl_cb_put(cb);
1276 	return NULL;
1277 }
1278 
hl_cs_allocate_job(struct hl_device * hdev,enum hl_queue_type queue_type,bool is_kernel_allocated_cb)1279 struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev,
1280 		enum hl_queue_type queue_type, bool is_kernel_allocated_cb)
1281 {
1282 	struct hl_cs_job *job;
1283 
1284 	job = kzalloc(sizeof(*job), GFP_ATOMIC);
1285 	if (!job)
1286 		job = kzalloc(sizeof(*job), GFP_KERNEL);
1287 
1288 	if (!job)
1289 		return NULL;
1290 
1291 	kref_init(&job->refcount);
1292 	job->queue_type = queue_type;
1293 	job->is_kernel_allocated_cb = is_kernel_allocated_cb;
1294 
1295 	if (is_cb_patched(hdev, job))
1296 		INIT_LIST_HEAD(&job->userptr_list);
1297 
1298 	if (job->queue_type == QUEUE_TYPE_EXT)
1299 		INIT_WORK(&job->finish_work, job_wq_completion);
1300 
1301 	return job;
1302 }
1303 
hl_cs_get_cs_type(u32 cs_type_flags)1304 static enum hl_cs_type hl_cs_get_cs_type(u32 cs_type_flags)
1305 {
1306 	if (cs_type_flags & HL_CS_FLAGS_SIGNAL)
1307 		return CS_TYPE_SIGNAL;
1308 	else if (cs_type_flags & HL_CS_FLAGS_WAIT)
1309 		return CS_TYPE_WAIT;
1310 	else if (cs_type_flags & HL_CS_FLAGS_COLLECTIVE_WAIT)
1311 		return CS_TYPE_COLLECTIVE_WAIT;
1312 	else if (cs_type_flags & HL_CS_FLAGS_RESERVE_SIGNALS_ONLY)
1313 		return CS_RESERVE_SIGNALS;
1314 	else if (cs_type_flags & HL_CS_FLAGS_UNRESERVE_SIGNALS_ONLY)
1315 		return CS_UNRESERVE_SIGNALS;
1316 	else if (cs_type_flags & HL_CS_FLAGS_ENGINE_CORE_COMMAND)
1317 		return CS_TYPE_ENGINE_CORE;
1318 	else if (cs_type_flags & HL_CS_FLAGS_ENGINES_COMMAND)
1319 		return CS_TYPE_ENGINES;
1320 	else if (cs_type_flags & HL_CS_FLAGS_FLUSH_PCI_HBW_WRITES)
1321 		return CS_TYPE_FLUSH_PCI_HBW_WRITES;
1322 	else
1323 		return CS_TYPE_DEFAULT;
1324 }
1325 
hl_cs_sanity_checks(struct hl_fpriv * hpriv,union hl_cs_args * args)1326 static int hl_cs_sanity_checks(struct hl_fpriv *hpriv, union hl_cs_args *args)
1327 {
1328 	struct hl_device *hdev = hpriv->hdev;
1329 	struct hl_ctx *ctx = hpriv->ctx;
1330 	u32 cs_type_flags, num_chunks;
1331 	enum hl_device_status status;
1332 	enum hl_cs_type cs_type;
1333 	bool is_sync_stream;
1334 	int i;
1335 
1336 	for (i = 0 ; i < sizeof(args->in.pad) ; i++)
1337 		if (args->in.pad[i]) {
1338 			dev_dbg(hdev->dev, "Padding bytes must be 0\n");
1339 			return -EINVAL;
1340 		}
1341 
1342 	if (!hl_device_operational(hdev, &status)) {
1343 		return -EBUSY;
1344 	}
1345 
1346 	if ((args->in.cs_flags & HL_CS_FLAGS_STAGED_SUBMISSION) &&
1347 			!hdev->supports_staged_submission) {
1348 		dev_err(hdev->dev, "staged submission not supported");
1349 		return -EPERM;
1350 	}
1351 
1352 	cs_type_flags = args->in.cs_flags & HL_CS_FLAGS_TYPE_MASK;
1353 
1354 	if (unlikely(cs_type_flags && !is_power_of_2(cs_type_flags))) {
1355 		dev_err(hdev->dev,
1356 			"CS type flags are mutually exclusive, context %d\n",
1357 			ctx->asid);
1358 		return -EINVAL;
1359 	}
1360 
1361 	cs_type = hl_cs_get_cs_type(cs_type_flags);
1362 	num_chunks = args->in.num_chunks_execute;
1363 
1364 	is_sync_stream = (cs_type == CS_TYPE_SIGNAL || cs_type == CS_TYPE_WAIT ||
1365 			cs_type == CS_TYPE_COLLECTIVE_WAIT);
1366 
1367 	if (unlikely(is_sync_stream && !hdev->supports_sync_stream)) {
1368 		dev_err(hdev->dev, "Sync stream CS is not supported\n");
1369 		return -EINVAL;
1370 	}
1371 
1372 	if (cs_type == CS_TYPE_DEFAULT) {
1373 		if (!num_chunks) {
1374 			dev_err(hdev->dev, "Got execute CS with 0 chunks, context %d\n", ctx->asid);
1375 			return -EINVAL;
1376 		}
1377 	} else if (is_sync_stream && num_chunks != 1) {
1378 		dev_err(hdev->dev,
1379 			"Sync stream CS mandates one chunk only, context %d\n",
1380 			ctx->asid);
1381 		return -EINVAL;
1382 	}
1383 
1384 	return 0;
1385 }
1386 
hl_cs_copy_chunk_array(struct hl_device * hdev,struct hl_cs_chunk ** cs_chunk_array,void __user * chunks,u32 num_chunks,struct hl_ctx * ctx)1387 static int hl_cs_copy_chunk_array(struct hl_device *hdev,
1388 					struct hl_cs_chunk **cs_chunk_array,
1389 					void __user *chunks, u32 num_chunks,
1390 					struct hl_ctx *ctx)
1391 {
1392 	u32 size_to_copy;
1393 
1394 	if (num_chunks > HL_MAX_JOBS_PER_CS) {
1395 		atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1396 		atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
1397 		dev_err(hdev->dev,
1398 			"Number of chunks can NOT be larger than %d\n",
1399 			HL_MAX_JOBS_PER_CS);
1400 		return -EINVAL;
1401 	}
1402 
1403 	*cs_chunk_array = kmalloc_array(num_chunks, sizeof(**cs_chunk_array),
1404 					GFP_ATOMIC);
1405 	if (!*cs_chunk_array)
1406 		*cs_chunk_array = kmalloc_array(num_chunks,
1407 					sizeof(**cs_chunk_array), GFP_KERNEL);
1408 	if (!*cs_chunk_array) {
1409 		atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
1410 		atomic64_inc(&hdev->aggregated_cs_counters.out_of_mem_drop_cnt);
1411 		return -ENOMEM;
1412 	}
1413 
1414 	size_to_copy = num_chunks * sizeof(struct hl_cs_chunk);
1415 	if (copy_from_user(*cs_chunk_array, chunks, size_to_copy)) {
1416 		atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1417 		atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
1418 		dev_err(hdev->dev, "Failed to copy cs chunk array from user\n");
1419 		kfree(*cs_chunk_array);
1420 		return -EFAULT;
1421 	}
1422 
1423 	return 0;
1424 }
1425 
cs_staged_submission(struct hl_device * hdev,struct hl_cs * cs,u64 sequence,u32 flags,u32 encaps_signal_handle)1426 static int cs_staged_submission(struct hl_device *hdev, struct hl_cs *cs,
1427 				u64 sequence, u32 flags,
1428 				u32 encaps_signal_handle)
1429 {
1430 	if (!(flags & HL_CS_FLAGS_STAGED_SUBMISSION))
1431 		return 0;
1432 
1433 	cs->staged_last = !!(flags & HL_CS_FLAGS_STAGED_SUBMISSION_LAST);
1434 	cs->staged_first = !!(flags & HL_CS_FLAGS_STAGED_SUBMISSION_FIRST);
1435 
1436 	if (cs->staged_first) {
1437 		/* Staged CS sequence is the first CS sequence */
1438 		INIT_LIST_HEAD(&cs->staged_cs_node);
1439 		cs->staged_sequence = cs->sequence;
1440 
1441 		if (cs->encaps_signals)
1442 			cs->encaps_sig_hdl_id = encaps_signal_handle;
1443 	} else {
1444 		/* User sequence will be validated in 'hl_hw_queue_schedule_cs'
1445 		 * under the cs_mirror_lock
1446 		 */
1447 		cs->staged_sequence = sequence;
1448 	}
1449 
1450 	/* Increment CS reference if needed */
1451 	staged_cs_get(hdev, cs);
1452 
1453 	cs->staged_cs = true;
1454 
1455 	return 0;
1456 }
1457 
get_stream_master_qid_mask(struct hl_device * hdev,u32 qid)1458 static u32 get_stream_master_qid_mask(struct hl_device *hdev, u32 qid)
1459 {
1460 	int i;
1461 
1462 	for (i = 0; i < hdev->stream_master_qid_arr_size; i++)
1463 		if (qid == hdev->stream_master_qid_arr[i])
1464 			return BIT(i);
1465 
1466 	return 0;
1467 }
1468 
cs_ioctl_default(struct hl_fpriv * hpriv,void __user * chunks,u32 num_chunks,u64 * cs_seq,u32 flags,u32 encaps_signals_handle,u32 timeout,u16 * signal_initial_sob_count)1469 static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
1470 				u32 num_chunks, u64 *cs_seq, u32 flags,
1471 				u32 encaps_signals_handle, u32 timeout,
1472 				u16 *signal_initial_sob_count)
1473 {
1474 	bool staged_mid, int_queues_only = true, using_hw_queues = false;
1475 	struct hl_device *hdev = hpriv->hdev;
1476 	struct hl_cs_chunk *cs_chunk_array;
1477 	struct hl_cs_counters_atomic *cntr;
1478 	struct hl_ctx *ctx = hpriv->ctx;
1479 	struct hl_cs_job *job;
1480 	struct hl_cs *cs;
1481 	struct hl_cb *cb;
1482 	u64 user_sequence;
1483 	u8 stream_master_qid_map = 0;
1484 	int rc, i;
1485 
1486 	cntr = &hdev->aggregated_cs_counters;
1487 	user_sequence = *cs_seq;
1488 	*cs_seq = ULLONG_MAX;
1489 
1490 	rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks,
1491 			hpriv->ctx);
1492 	if (rc)
1493 		goto out;
1494 
1495 	if ((flags & HL_CS_FLAGS_STAGED_SUBMISSION) &&
1496 			!(flags & HL_CS_FLAGS_STAGED_SUBMISSION_FIRST))
1497 		staged_mid = true;
1498 	else
1499 		staged_mid = false;
1500 
1501 	rc = allocate_cs(hdev, hpriv->ctx, CS_TYPE_DEFAULT,
1502 			staged_mid ? user_sequence : ULLONG_MAX, &cs, flags,
1503 			timeout);
1504 	if (rc)
1505 		goto free_cs_chunk_array;
1506 
1507 	*cs_seq = cs->sequence;
1508 
1509 	hl_debugfs_add_cs(cs);
1510 
1511 	rc = cs_staged_submission(hdev, cs, user_sequence, flags,
1512 						encaps_signals_handle);
1513 	if (rc)
1514 		goto free_cs_object;
1515 
1516 	/* If this is a staged submission we must return the staged sequence
1517 	 * rather than the internal CS sequence
1518 	 */
1519 	if (cs->staged_cs)
1520 		*cs_seq = cs->staged_sequence;
1521 
1522 	/* Validate ALL the CS chunks before submitting the CS */
1523 	for (i = 0 ; i < num_chunks ; i++) {
1524 		struct hl_cs_chunk *chunk = &cs_chunk_array[i];
1525 		enum hl_queue_type queue_type;
1526 		bool is_kernel_allocated_cb;
1527 
1528 		rc = validate_queue_index(hdev, chunk, &queue_type,
1529 						&is_kernel_allocated_cb);
1530 		if (rc) {
1531 			atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1532 			atomic64_inc(&cntr->validation_drop_cnt);
1533 			goto free_cs_object;
1534 		}
1535 
1536 		if (is_kernel_allocated_cb) {
1537 			cb = get_cb_from_cs_chunk(hdev, &hpriv->mem_mgr, chunk);
1538 			if (!cb) {
1539 				atomic64_inc(
1540 					&ctx->cs_counters.validation_drop_cnt);
1541 				atomic64_inc(&cntr->validation_drop_cnt);
1542 				rc = -EINVAL;
1543 				goto free_cs_object;
1544 			}
1545 		} else {
1546 			cb = (struct hl_cb *) (uintptr_t) chunk->cb_handle;
1547 		}
1548 
1549 		if (queue_type == QUEUE_TYPE_EXT ||
1550 						queue_type == QUEUE_TYPE_HW) {
1551 			int_queues_only = false;
1552 
1553 			/*
1554 			 * store which stream are being used for external/HW
1555 			 * queues of this CS
1556 			 */
1557 			if (hdev->supports_wait_for_multi_cs)
1558 				stream_master_qid_map |=
1559 					get_stream_master_qid_mask(hdev,
1560 							chunk->queue_index);
1561 		}
1562 
1563 		if (queue_type == QUEUE_TYPE_HW)
1564 			using_hw_queues = true;
1565 
1566 		job = hl_cs_allocate_job(hdev, queue_type,
1567 						is_kernel_allocated_cb);
1568 		if (!job) {
1569 			atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
1570 			atomic64_inc(&cntr->out_of_mem_drop_cnt);
1571 			dev_err(hdev->dev, "Failed to allocate a new job\n");
1572 			rc = -ENOMEM;
1573 			if (is_kernel_allocated_cb)
1574 				goto release_cb;
1575 
1576 			goto free_cs_object;
1577 		}
1578 
1579 		job->id = i + 1;
1580 		job->cs = cs;
1581 		job->user_cb = cb;
1582 		job->user_cb_size = chunk->cb_size;
1583 		job->hw_queue_id = chunk->queue_index;
1584 
1585 		cs->jobs_in_queue_cnt[job->hw_queue_id]++;
1586 		cs->jobs_cnt++;
1587 
1588 		list_add_tail(&job->cs_node, &cs->job_list);
1589 
1590 		/*
1591 		 * Increment CS reference. When CS reference is 0, CS is
1592 		 * done and can be signaled to user and free all its resources
1593 		 * Only increment for JOB on external or H/W queues, because
1594 		 * only for those JOBs we get completion
1595 		 */
1596 		if (cs_needs_completion(cs) &&
1597 			(job->queue_type == QUEUE_TYPE_EXT ||
1598 				job->queue_type == QUEUE_TYPE_HW))
1599 			cs_get(cs);
1600 
1601 		hl_debugfs_add_job(hdev, job);
1602 
1603 		rc = cs_parser(hpriv, job);
1604 		if (rc) {
1605 			atomic64_inc(&ctx->cs_counters.parsing_drop_cnt);
1606 			atomic64_inc(&cntr->parsing_drop_cnt);
1607 			dev_err(hdev->dev,
1608 				"Failed to parse JOB %d.%llu.%d, err %d, rejecting the CS\n",
1609 				cs->ctx->asid, cs->sequence, job->id, rc);
1610 			goto free_cs_object;
1611 		}
1612 	}
1613 
1614 	/* We allow a CS with any queue type combination as long as it does
1615 	 * not get a completion
1616 	 */
1617 	if (int_queues_only && cs_needs_completion(cs)) {
1618 		atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1619 		atomic64_inc(&cntr->validation_drop_cnt);
1620 		dev_err(hdev->dev,
1621 			"Reject CS %d.%llu since it contains only internal queues jobs and needs completion\n",
1622 			cs->ctx->asid, cs->sequence);
1623 		rc = -EINVAL;
1624 		goto free_cs_object;
1625 	}
1626 
1627 	if (using_hw_queues)
1628 		INIT_WORK(&cs->finish_work, cs_completion);
1629 
1630 	/*
1631 	 * store the (external/HW queues) streams used by the CS in the
1632 	 * fence object for multi-CS completion
1633 	 */
1634 	if (hdev->supports_wait_for_multi_cs)
1635 		cs->fence->stream_master_qid_map = stream_master_qid_map;
1636 
1637 	rc = hl_hw_queue_schedule_cs(cs);
1638 	if (rc) {
1639 		if (rc != -EAGAIN)
1640 			dev_err(hdev->dev,
1641 				"Failed to submit CS %d.%llu to H/W queues, error %d\n",
1642 				cs->ctx->asid, cs->sequence, rc);
1643 		goto free_cs_object;
1644 	}
1645 
1646 	*signal_initial_sob_count = cs->initial_sob_count;
1647 
1648 	rc = HL_CS_STATUS_SUCCESS;
1649 	goto put_cs;
1650 
1651 release_cb:
1652 	atomic_dec(&cb->cs_cnt);
1653 	hl_cb_put(cb);
1654 free_cs_object:
1655 	cs_rollback(hdev, cs);
1656 	*cs_seq = ULLONG_MAX;
1657 	/* The path below is both for good and erroneous exits */
1658 put_cs:
1659 	/* We finished with the CS in this function, so put the ref */
1660 	cs_put(cs);
1661 free_cs_chunk_array:
1662 	kfree(cs_chunk_array);
1663 out:
1664 	return rc;
1665 }
1666 
hl_cs_ctx_switch(struct hl_fpriv * hpriv,union hl_cs_args * args,u64 * cs_seq)1667 static int hl_cs_ctx_switch(struct hl_fpriv *hpriv, union hl_cs_args *args,
1668 				u64 *cs_seq)
1669 {
1670 	struct hl_device *hdev = hpriv->hdev;
1671 	struct hl_ctx *ctx = hpriv->ctx;
1672 	bool need_soft_reset = false;
1673 	int rc = 0, do_ctx_switch = 0;
1674 	void __user *chunks;
1675 	u32 num_chunks, tmp;
1676 	u16 sob_count;
1677 	int ret;
1678 
1679 	if (hdev->supports_ctx_switch)
1680 		do_ctx_switch = atomic_cmpxchg(&ctx->thread_ctx_switch_token, 1, 0);
1681 
1682 	if (do_ctx_switch || (args->in.cs_flags & HL_CS_FLAGS_FORCE_RESTORE)) {
1683 		mutex_lock(&hpriv->restore_phase_mutex);
1684 
1685 		if (do_ctx_switch) {
1686 			rc = hdev->asic_funcs->context_switch(hdev, ctx->asid);
1687 			if (rc) {
1688 				dev_err_ratelimited(hdev->dev,
1689 					"Failed to switch to context %d, rejecting CS! %d\n",
1690 					ctx->asid, rc);
1691 				/*
1692 				 * If we timedout, or if the device is not IDLE
1693 				 * while we want to do context-switch (-EBUSY),
1694 				 * we need to soft-reset because QMAN is
1695 				 * probably stuck. However, we can't call to
1696 				 * reset here directly because of deadlock, so
1697 				 * need to do it at the very end of this
1698 				 * function
1699 				 */
1700 				if ((rc == -ETIMEDOUT) || (rc == -EBUSY))
1701 					need_soft_reset = true;
1702 				mutex_unlock(&hpriv->restore_phase_mutex);
1703 				goto out;
1704 			}
1705 		}
1706 
1707 		hdev->asic_funcs->restore_phase_topology(hdev);
1708 
1709 		chunks = (void __user *) (uintptr_t) args->in.chunks_restore;
1710 		num_chunks = args->in.num_chunks_restore;
1711 
1712 		if (!num_chunks) {
1713 			dev_dbg(hdev->dev,
1714 				"Need to run restore phase but restore CS is empty\n");
1715 			rc = 0;
1716 		} else {
1717 			rc = cs_ioctl_default(hpriv, chunks, num_chunks,
1718 					cs_seq, 0, 0, hdev->timeout_jiffies, &sob_count);
1719 		}
1720 
1721 		mutex_unlock(&hpriv->restore_phase_mutex);
1722 
1723 		if (rc) {
1724 			dev_err(hdev->dev,
1725 				"Failed to submit restore CS for context %d (%d)\n",
1726 				ctx->asid, rc);
1727 			goto out;
1728 		}
1729 
1730 		/* Need to wait for restore completion before execution phase */
1731 		if (num_chunks) {
1732 			enum hl_cs_wait_status status;
1733 wait_again:
1734 			ret = _hl_cs_wait_ioctl(hdev, ctx,
1735 					jiffies_to_usecs(hdev->timeout_jiffies),
1736 					*cs_seq, &status, NULL);
1737 			if (ret) {
1738 				if (ret == -ERESTARTSYS) {
1739 					usleep_range(100, 200);
1740 					goto wait_again;
1741 				}
1742 
1743 				dev_err(hdev->dev,
1744 					"Restore CS for context %d failed to complete %d\n",
1745 					ctx->asid, ret);
1746 				rc = -ENOEXEC;
1747 				goto out;
1748 			}
1749 		}
1750 
1751 		if (hdev->supports_ctx_switch)
1752 			ctx->thread_ctx_switch_wait_token = 1;
1753 
1754 	} else if (hdev->supports_ctx_switch && !ctx->thread_ctx_switch_wait_token) {
1755 		rc = hl_poll_timeout_memory(hdev,
1756 			&ctx->thread_ctx_switch_wait_token, tmp, (tmp == 1),
1757 			100, jiffies_to_usecs(hdev->timeout_jiffies), false);
1758 
1759 		if (rc == -ETIMEDOUT) {
1760 			dev_err(hdev->dev,
1761 				"context switch phase timeout (%d)\n", tmp);
1762 			goto out;
1763 		}
1764 	}
1765 
1766 out:
1767 	if ((rc == -ETIMEDOUT || rc == -EBUSY) && (need_soft_reset))
1768 		hl_device_reset(hdev, 0);
1769 
1770 	return rc;
1771 }
1772 
1773 /*
1774  * hl_cs_signal_sob_wraparound_handler: handle SOB value wrapaound case.
1775  * if the SOB value reaches the max value move to the other SOB reserved
1776  * to the queue.
1777  * @hdev: pointer to device structure
1778  * @q_idx: stream queue index
1779  * @hw_sob: the H/W SOB used in this signal CS.
1780  * @count: signals count
1781  * @encaps_sig: tells whether it's reservation for encaps signals or not.
1782  *
1783  * Note that this function must be called while hw_queues_lock is taken.
1784  */
hl_cs_signal_sob_wraparound_handler(struct hl_device * hdev,u32 q_idx,struct hl_hw_sob ** hw_sob,u32 count,bool encaps_sig)1785 int hl_cs_signal_sob_wraparound_handler(struct hl_device *hdev, u32 q_idx,
1786 			struct hl_hw_sob **hw_sob, u32 count, bool encaps_sig)
1787 
1788 {
1789 	struct hl_sync_stream_properties *prop;
1790 	struct hl_hw_sob *sob = *hw_sob, *other_sob;
1791 	u8 other_sob_offset;
1792 
1793 	prop = &hdev->kernel_queues[q_idx].sync_stream_prop;
1794 
1795 	hw_sob_get(sob);
1796 
1797 	/* check for wraparound */
1798 	if (prop->next_sob_val + count >= HL_MAX_SOB_VAL) {
1799 		/*
1800 		 * Decrement as we reached the max value.
1801 		 * The release function won't be called here as we've
1802 		 * just incremented the refcount right before calling this
1803 		 * function.
1804 		 */
1805 		hw_sob_put_err(sob);
1806 
1807 		/*
1808 		 * check the other sob value, if it still in use then fail
1809 		 * otherwise make the switch
1810 		 */
1811 		other_sob_offset = (prop->curr_sob_offset + 1) % HL_RSVD_SOBS;
1812 		other_sob = &prop->hw_sob[other_sob_offset];
1813 
1814 		if (kref_read(&other_sob->kref) != 1) {
1815 			dev_err(hdev->dev, "error: Cannot switch SOBs q_idx: %d\n",
1816 								q_idx);
1817 			return -EINVAL;
1818 		}
1819 
1820 		/*
1821 		 * next_sob_val always points to the next available signal
1822 		 * in the sob, so in encaps signals it will be the next one
1823 		 * after reserving the required amount.
1824 		 */
1825 		if (encaps_sig)
1826 			prop->next_sob_val = count + 1;
1827 		else
1828 			prop->next_sob_val = count;
1829 
1830 		/* only two SOBs are currently in use */
1831 		prop->curr_sob_offset = other_sob_offset;
1832 		*hw_sob = other_sob;
1833 
1834 		/*
1835 		 * check if other_sob needs reset, then do it before using it
1836 		 * for the reservation or the next signal cs.
1837 		 * we do it here, and for both encaps and regular signal cs
1838 		 * cases in order to avoid possible races of two kref_put
1839 		 * of the sob which can occur at the same time if we move the
1840 		 * sob reset(kref_put) to cs_do_release function.
1841 		 * in addition, if we have combination of cs signal and
1842 		 * encaps, and at the point we need to reset the sob there was
1843 		 * no more reservations and only signal cs keep coming,
1844 		 * in such case we need signal_cs to put the refcount and
1845 		 * reset the sob.
1846 		 */
1847 		if (other_sob->need_reset)
1848 			hw_sob_put(other_sob);
1849 
1850 		if (encaps_sig) {
1851 			/* set reset indication for the sob */
1852 			sob->need_reset = true;
1853 			hw_sob_get(other_sob);
1854 		}
1855 
1856 		dev_dbg(hdev->dev, "switched to SOB %d, q_idx: %d\n",
1857 				prop->curr_sob_offset, q_idx);
1858 	} else {
1859 		prop->next_sob_val += count;
1860 	}
1861 
1862 	return 0;
1863 }
1864 
cs_ioctl_extract_signal_seq(struct hl_device * hdev,struct hl_cs_chunk * chunk,u64 * signal_seq,struct hl_ctx * ctx,bool encaps_signals)1865 static int cs_ioctl_extract_signal_seq(struct hl_device *hdev,
1866 		struct hl_cs_chunk *chunk, u64 *signal_seq, struct hl_ctx *ctx,
1867 		bool encaps_signals)
1868 {
1869 	u64 *signal_seq_arr = NULL;
1870 	u32 size_to_copy, signal_seq_arr_len;
1871 	int rc = 0;
1872 
1873 	if (encaps_signals) {
1874 		*signal_seq = chunk->encaps_signal_seq;
1875 		return 0;
1876 	}
1877 
1878 	signal_seq_arr_len = chunk->num_signal_seq_arr;
1879 
1880 	/* currently only one signal seq is supported */
1881 	if (signal_seq_arr_len != 1) {
1882 		atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1883 		atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
1884 		dev_err(hdev->dev,
1885 			"Wait for signal CS supports only one signal CS seq\n");
1886 		return -EINVAL;
1887 	}
1888 
1889 	signal_seq_arr = kmalloc_array(signal_seq_arr_len,
1890 					sizeof(*signal_seq_arr),
1891 					GFP_ATOMIC);
1892 	if (!signal_seq_arr)
1893 		signal_seq_arr = kmalloc_array(signal_seq_arr_len,
1894 					sizeof(*signal_seq_arr),
1895 					GFP_KERNEL);
1896 	if (!signal_seq_arr) {
1897 		atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
1898 		atomic64_inc(&hdev->aggregated_cs_counters.out_of_mem_drop_cnt);
1899 		return -ENOMEM;
1900 	}
1901 
1902 	size_to_copy = signal_seq_arr_len * sizeof(*signal_seq_arr);
1903 	if (copy_from_user(signal_seq_arr,
1904 				u64_to_user_ptr(chunk->signal_seq_arr),
1905 				size_to_copy)) {
1906 		atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1907 		atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
1908 		dev_err(hdev->dev,
1909 			"Failed to copy signal seq array from user\n");
1910 		rc = -EFAULT;
1911 		goto out;
1912 	}
1913 
1914 	/* currently it is guaranteed to have only one signal seq */
1915 	*signal_seq = signal_seq_arr[0];
1916 
1917 out:
1918 	kfree(signal_seq_arr);
1919 
1920 	return rc;
1921 }
1922 
cs_ioctl_signal_wait_create_jobs(struct hl_device * hdev,struct hl_ctx * ctx,struct hl_cs * cs,enum hl_queue_type q_type,u32 q_idx,u32 encaps_signal_offset)1923 static int cs_ioctl_signal_wait_create_jobs(struct hl_device *hdev,
1924 		struct hl_ctx *ctx, struct hl_cs *cs,
1925 		enum hl_queue_type q_type, u32 q_idx, u32 encaps_signal_offset)
1926 {
1927 	struct hl_cs_counters_atomic *cntr;
1928 	struct hl_cs_job *job;
1929 	struct hl_cb *cb;
1930 	u32 cb_size;
1931 
1932 	cntr = &hdev->aggregated_cs_counters;
1933 
1934 	job = hl_cs_allocate_job(hdev, q_type, true);
1935 	if (!job) {
1936 		atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
1937 		atomic64_inc(&cntr->out_of_mem_drop_cnt);
1938 		dev_err(hdev->dev, "Failed to allocate a new job\n");
1939 		return -ENOMEM;
1940 	}
1941 
1942 	if (cs->type == CS_TYPE_WAIT)
1943 		cb_size = hdev->asic_funcs->get_wait_cb_size(hdev);
1944 	else
1945 		cb_size = hdev->asic_funcs->get_signal_cb_size(hdev);
1946 
1947 	cb = hl_cb_kernel_create(hdev, cb_size, q_type == QUEUE_TYPE_HW);
1948 	if (!cb) {
1949 		atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
1950 		atomic64_inc(&cntr->out_of_mem_drop_cnt);
1951 		kfree(job);
1952 		return -EFAULT;
1953 	}
1954 
1955 	job->id = 0;
1956 	job->cs = cs;
1957 	job->user_cb = cb;
1958 	atomic_inc(&job->user_cb->cs_cnt);
1959 	job->user_cb_size = cb_size;
1960 	job->hw_queue_id = q_idx;
1961 
1962 	if ((cs->type == CS_TYPE_WAIT || cs->type == CS_TYPE_COLLECTIVE_WAIT)
1963 			&& cs->encaps_signals)
1964 		job->encaps_sig_wait_offset = encaps_signal_offset;
1965 	/*
1966 	 * No need in parsing, user CB is the patched CB.
1967 	 * We call hl_cb_destroy() out of two reasons - we don't need the CB in
1968 	 * the CB idr anymore and to decrement its refcount as it was
1969 	 * incremented inside hl_cb_kernel_create().
1970 	 */
1971 	job->patched_cb = job->user_cb;
1972 	job->job_cb_size = job->user_cb_size;
1973 	hl_cb_destroy(&hdev->kernel_mem_mgr, cb->buf->handle);
1974 
1975 	/* increment refcount as for external queues we get completion */
1976 	cs_get(cs);
1977 
1978 	cs->jobs_in_queue_cnt[job->hw_queue_id]++;
1979 	cs->jobs_cnt++;
1980 
1981 	list_add_tail(&job->cs_node, &cs->job_list);
1982 
1983 	hl_debugfs_add_job(hdev, job);
1984 
1985 	return 0;
1986 }
1987 
cs_ioctl_reserve_signals(struct hl_fpriv * hpriv,u32 q_idx,u32 count,u32 * handle_id,u32 * sob_addr,u32 * signals_count)1988 static int cs_ioctl_reserve_signals(struct hl_fpriv *hpriv,
1989 				u32 q_idx, u32 count,
1990 				u32 *handle_id, u32 *sob_addr,
1991 				u32 *signals_count)
1992 {
1993 	struct hw_queue_properties *hw_queue_prop;
1994 	struct hl_sync_stream_properties *prop;
1995 	struct hl_device *hdev = hpriv->hdev;
1996 	struct hl_cs_encaps_sig_handle *handle;
1997 	struct hl_encaps_signals_mgr *mgr;
1998 	struct hl_hw_sob *hw_sob;
1999 	int hdl_id;
2000 	int rc = 0;
2001 
2002 	if (count >= HL_MAX_SOB_VAL) {
2003 		dev_err(hdev->dev, "signals count(%u) exceeds the max SOB value\n",
2004 						count);
2005 		rc = -EINVAL;
2006 		goto out;
2007 	}
2008 
2009 	if (q_idx >= hdev->asic_prop.max_queues) {
2010 		dev_err(hdev->dev, "Queue index %d is invalid\n",
2011 			q_idx);
2012 		rc = -EINVAL;
2013 		goto out;
2014 	}
2015 
2016 	hw_queue_prop = &hdev->asic_prop.hw_queues_props[q_idx];
2017 
2018 	if (!hw_queue_prop->supports_sync_stream) {
2019 		dev_err(hdev->dev,
2020 			"Queue index %d does not support sync stream operations\n",
2021 									q_idx);
2022 		rc = -EINVAL;
2023 		goto out;
2024 	}
2025 
2026 	prop = &hdev->kernel_queues[q_idx].sync_stream_prop;
2027 
2028 	handle = kzalloc(sizeof(*handle), GFP_KERNEL);
2029 	if (!handle) {
2030 		rc = -ENOMEM;
2031 		goto out;
2032 	}
2033 
2034 	handle->count = count;
2035 
2036 	hl_ctx_get(hpriv->ctx);
2037 	handle->ctx = hpriv->ctx;
2038 	mgr = &hpriv->ctx->sig_mgr;
2039 
2040 	spin_lock(&mgr->lock);
2041 	hdl_id = idr_alloc(&mgr->handles, handle, 1, 0, GFP_ATOMIC);
2042 	spin_unlock(&mgr->lock);
2043 
2044 	if (hdl_id < 0) {
2045 		dev_err(hdev->dev, "Failed to allocate IDR for a new signal reservation\n");
2046 		rc = -EINVAL;
2047 		goto put_ctx;
2048 	}
2049 
2050 	handle->id = hdl_id;
2051 	handle->q_idx = q_idx;
2052 	handle->hdev = hdev;
2053 	kref_init(&handle->refcount);
2054 
2055 	hdev->asic_funcs->hw_queues_lock(hdev);
2056 
2057 	hw_sob = &prop->hw_sob[prop->curr_sob_offset];
2058 
2059 	/*
2060 	 * Increment the SOB value by count by user request
2061 	 * to reserve those signals
2062 	 * check if the signals amount to reserve is not exceeding the max sob
2063 	 * value, if yes then switch sob.
2064 	 */
2065 	rc = hl_cs_signal_sob_wraparound_handler(hdev, q_idx, &hw_sob, count,
2066 								true);
2067 	if (rc) {
2068 		dev_err(hdev->dev, "Failed to switch SOB\n");
2069 		hdev->asic_funcs->hw_queues_unlock(hdev);
2070 		rc = -EINVAL;
2071 		goto remove_idr;
2072 	}
2073 	/* set the hw_sob to the handle after calling the sob wraparound handler
2074 	 * since sob could have changed.
2075 	 */
2076 	handle->hw_sob = hw_sob;
2077 
2078 	/* store the current sob value for unreserve validity check, and
2079 	 * signal offset support
2080 	 */
2081 	handle->pre_sob_val = prop->next_sob_val - handle->count;
2082 
2083 	handle->cs_seq = ULLONG_MAX;
2084 
2085 	*signals_count = prop->next_sob_val;
2086 	hdev->asic_funcs->hw_queues_unlock(hdev);
2087 
2088 	*sob_addr = handle->hw_sob->sob_addr;
2089 	*handle_id = hdl_id;
2090 
2091 	dev_dbg(hdev->dev,
2092 		"Signals reserved, sob_id: %d, sob addr: 0x%x, last sob_val: %u, q_idx: %d, hdl_id: %d\n",
2093 			hw_sob->sob_id, handle->hw_sob->sob_addr,
2094 			prop->next_sob_val - 1, q_idx, hdl_id);
2095 	goto out;
2096 
2097 remove_idr:
2098 	spin_lock(&mgr->lock);
2099 	idr_remove(&mgr->handles, hdl_id);
2100 	spin_unlock(&mgr->lock);
2101 
2102 put_ctx:
2103 	hl_ctx_put(handle->ctx);
2104 	kfree(handle);
2105 
2106 out:
2107 	return rc;
2108 }
2109 
cs_ioctl_unreserve_signals(struct hl_fpriv * hpriv,u32 handle_id)2110 static int cs_ioctl_unreserve_signals(struct hl_fpriv *hpriv, u32 handle_id)
2111 {
2112 	struct hl_cs_encaps_sig_handle *encaps_sig_hdl;
2113 	struct hl_sync_stream_properties *prop;
2114 	struct hl_device *hdev = hpriv->hdev;
2115 	struct hl_encaps_signals_mgr *mgr;
2116 	struct hl_hw_sob *hw_sob;
2117 	u32 q_idx, sob_addr;
2118 	int rc = 0;
2119 
2120 	mgr = &hpriv->ctx->sig_mgr;
2121 
2122 	spin_lock(&mgr->lock);
2123 	encaps_sig_hdl = idr_find(&mgr->handles, handle_id);
2124 	if (encaps_sig_hdl) {
2125 		dev_dbg(hdev->dev, "unreserve signals, handle: %u, SOB:0x%x, count: %u\n",
2126 				handle_id, encaps_sig_hdl->hw_sob->sob_addr,
2127 					encaps_sig_hdl->count);
2128 
2129 		hdev->asic_funcs->hw_queues_lock(hdev);
2130 
2131 		q_idx = encaps_sig_hdl->q_idx;
2132 		prop = &hdev->kernel_queues[q_idx].sync_stream_prop;
2133 		hw_sob = &prop->hw_sob[prop->curr_sob_offset];
2134 		sob_addr = hdev->asic_funcs->get_sob_addr(hdev, hw_sob->sob_id);
2135 
2136 		/* Check if sob_val got out of sync due to other
2137 		 * signal submission requests which were handled
2138 		 * between the reserve-unreserve calls or SOB switch
2139 		 * upon reaching SOB max value.
2140 		 */
2141 		if (encaps_sig_hdl->pre_sob_val + encaps_sig_hdl->count
2142 				!= prop->next_sob_val ||
2143 				sob_addr != encaps_sig_hdl->hw_sob->sob_addr) {
2144 			dev_err(hdev->dev, "Cannot unreserve signals, SOB val ran out of sync, expected: %u, actual val: %u\n",
2145 				encaps_sig_hdl->pre_sob_val,
2146 				(prop->next_sob_val - encaps_sig_hdl->count));
2147 
2148 			hdev->asic_funcs->hw_queues_unlock(hdev);
2149 			rc = -EINVAL;
2150 			goto out_unlock;
2151 		}
2152 
2153 		/*
2154 		 * Decrement the SOB value by count by user request
2155 		 * to unreserve those signals
2156 		 */
2157 		prop->next_sob_val -= encaps_sig_hdl->count;
2158 
2159 		hdev->asic_funcs->hw_queues_unlock(hdev);
2160 
2161 		hw_sob_put(hw_sob);
2162 
2163 		/* Release the id and free allocated memory of the handle */
2164 		idr_remove(&mgr->handles, handle_id);
2165 
2166 		/* unlock before calling ctx_put, where we might sleep */
2167 		spin_unlock(&mgr->lock);
2168 		hl_ctx_put(encaps_sig_hdl->ctx);
2169 		kfree(encaps_sig_hdl);
2170 		goto out;
2171 	} else {
2172 		rc = -EINVAL;
2173 		dev_err(hdev->dev, "failed to unreserve signals, cannot find handler\n");
2174 	}
2175 
2176 out_unlock:
2177 	spin_unlock(&mgr->lock);
2178 
2179 out:
2180 	return rc;
2181 }
2182 
cs_ioctl_signal_wait(struct hl_fpriv * hpriv,enum hl_cs_type cs_type,void __user * chunks,u32 num_chunks,u64 * cs_seq,u32 flags,u32 timeout,u32 * signal_sob_addr_offset,u16 * signal_initial_sob_count)2183 static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
2184 				void __user *chunks, u32 num_chunks,
2185 				u64 *cs_seq, u32 flags, u32 timeout,
2186 				u32 *signal_sob_addr_offset, u16 *signal_initial_sob_count)
2187 {
2188 	struct hl_cs_encaps_sig_handle *encaps_sig_hdl = NULL;
2189 	bool handle_found = false, is_wait_cs = false,
2190 			wait_cs_submitted = false,
2191 			cs_encaps_signals = false;
2192 	struct hl_cs_chunk *cs_chunk_array, *chunk;
2193 	bool staged_cs_with_encaps_signals = false;
2194 	struct hw_queue_properties *hw_queue_prop;
2195 	struct hl_device *hdev = hpriv->hdev;
2196 	struct hl_cs_compl *sig_waitcs_cmpl;
2197 	u32 q_idx, collective_engine_id = 0;
2198 	struct hl_cs_counters_atomic *cntr;
2199 	struct hl_fence *sig_fence = NULL;
2200 	struct hl_ctx *ctx = hpriv->ctx;
2201 	enum hl_queue_type q_type;
2202 	struct hl_cs *cs;
2203 	u64 signal_seq;
2204 	int rc;
2205 
2206 	cntr = &hdev->aggregated_cs_counters;
2207 	*cs_seq = ULLONG_MAX;
2208 
2209 	rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks,
2210 			ctx);
2211 	if (rc)
2212 		goto out;
2213 
2214 	/* currently it is guaranteed to have only one chunk */
2215 	chunk = &cs_chunk_array[0];
2216 
2217 	if (chunk->queue_index >= hdev->asic_prop.max_queues) {
2218 		atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
2219 		atomic64_inc(&cntr->validation_drop_cnt);
2220 		dev_err(hdev->dev, "Queue index %d is invalid\n",
2221 			chunk->queue_index);
2222 		rc = -EINVAL;
2223 		goto free_cs_chunk_array;
2224 	}
2225 
2226 	q_idx = chunk->queue_index;
2227 	hw_queue_prop = &hdev->asic_prop.hw_queues_props[q_idx];
2228 	q_type = hw_queue_prop->type;
2229 
2230 	if (!hw_queue_prop->supports_sync_stream) {
2231 		atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
2232 		atomic64_inc(&cntr->validation_drop_cnt);
2233 		dev_err(hdev->dev,
2234 			"Queue index %d does not support sync stream operations\n",
2235 			q_idx);
2236 		rc = -EINVAL;
2237 		goto free_cs_chunk_array;
2238 	}
2239 
2240 	if (cs_type == CS_TYPE_COLLECTIVE_WAIT) {
2241 		if (!(hw_queue_prop->collective_mode == HL_COLLECTIVE_MASTER)) {
2242 			atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
2243 			atomic64_inc(&cntr->validation_drop_cnt);
2244 			dev_err(hdev->dev,
2245 				"Queue index %d is invalid\n", q_idx);
2246 			rc = -EINVAL;
2247 			goto free_cs_chunk_array;
2248 		}
2249 
2250 		if (!hdev->nic_ports_mask) {
2251 			atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
2252 			atomic64_inc(&cntr->validation_drop_cnt);
2253 			dev_err(hdev->dev,
2254 				"Collective operations not supported when NIC ports are disabled");
2255 			rc = -EINVAL;
2256 			goto free_cs_chunk_array;
2257 		}
2258 
2259 		collective_engine_id = chunk->collective_engine_id;
2260 	}
2261 
2262 	is_wait_cs = !!(cs_type == CS_TYPE_WAIT ||
2263 			cs_type == CS_TYPE_COLLECTIVE_WAIT);
2264 
2265 	cs_encaps_signals = !!(flags & HL_CS_FLAGS_ENCAP_SIGNALS);
2266 
2267 	if (is_wait_cs) {
2268 		rc = cs_ioctl_extract_signal_seq(hdev, chunk, &signal_seq,
2269 				ctx, cs_encaps_signals);
2270 		if (rc)
2271 			goto free_cs_chunk_array;
2272 
2273 		if (cs_encaps_signals) {
2274 			/* check if cs sequence has encapsulated
2275 			 * signals handle
2276 			 */
2277 			struct idr *idp;
2278 			u32 id;
2279 
2280 			spin_lock(&ctx->sig_mgr.lock);
2281 			idp = &ctx->sig_mgr.handles;
2282 			idr_for_each_entry(idp, encaps_sig_hdl, id) {
2283 				if (encaps_sig_hdl->cs_seq == signal_seq) {
2284 					/* get refcount to protect removing this handle from idr,
2285 					 * needed when multiple wait cs are used with offset
2286 					 * to wait on reserved encaps signals.
2287 					 * Since kref_put of this handle is executed outside the
2288 					 * current lock, it is possible that the handle refcount
2289 					 * is 0 but it yet to be removed from the list. In this
2290 					 * case need to consider the handle as not valid.
2291 					 */
2292 					if (kref_get_unless_zero(&encaps_sig_hdl->refcount))
2293 						handle_found = true;
2294 					break;
2295 				}
2296 			}
2297 			spin_unlock(&ctx->sig_mgr.lock);
2298 
2299 			if (!handle_found) {
2300 				/* treat as signal CS already finished */
2301 				dev_dbg(hdev->dev, "Cannot find encapsulated signals handle for seq 0x%llx\n",
2302 						signal_seq);
2303 				rc = 0;
2304 				goto free_cs_chunk_array;
2305 			}
2306 
2307 			/* validate also the signal offset value */
2308 			if (chunk->encaps_signal_offset >
2309 					encaps_sig_hdl->count) {
2310 				dev_err(hdev->dev, "offset(%u) value exceed max reserved signals count(%u)!\n",
2311 						chunk->encaps_signal_offset,
2312 						encaps_sig_hdl->count);
2313 				rc = -EINVAL;
2314 				goto free_cs_chunk_array;
2315 			}
2316 		}
2317 
2318 		sig_fence = hl_ctx_get_fence(ctx, signal_seq);
2319 		if (IS_ERR(sig_fence)) {
2320 			atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
2321 			atomic64_inc(&cntr->validation_drop_cnt);
2322 			dev_err(hdev->dev,
2323 				"Failed to get signal CS with seq 0x%llx\n",
2324 				signal_seq);
2325 			rc = PTR_ERR(sig_fence);
2326 			goto free_cs_chunk_array;
2327 		}
2328 
2329 		if (!sig_fence) {
2330 			/* signal CS already finished */
2331 			rc = 0;
2332 			goto free_cs_chunk_array;
2333 		}
2334 
2335 		sig_waitcs_cmpl =
2336 			container_of(sig_fence, struct hl_cs_compl, base_fence);
2337 
2338 		staged_cs_with_encaps_signals = !!
2339 				(sig_waitcs_cmpl->type == CS_TYPE_DEFAULT &&
2340 				(flags & HL_CS_FLAGS_ENCAP_SIGNALS));
2341 
2342 		if (sig_waitcs_cmpl->type != CS_TYPE_SIGNAL &&
2343 				!staged_cs_with_encaps_signals) {
2344 			atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
2345 			atomic64_inc(&cntr->validation_drop_cnt);
2346 			dev_err(hdev->dev,
2347 				"CS seq 0x%llx is not of a signal/encaps-signal CS\n",
2348 				signal_seq);
2349 			hl_fence_put(sig_fence);
2350 			rc = -EINVAL;
2351 			goto free_cs_chunk_array;
2352 		}
2353 
2354 		if (completion_done(&sig_fence->completion)) {
2355 			/* signal CS already finished */
2356 			hl_fence_put(sig_fence);
2357 			rc = 0;
2358 			goto free_cs_chunk_array;
2359 		}
2360 	}
2361 
2362 	rc = allocate_cs(hdev, ctx, cs_type, ULLONG_MAX, &cs, flags, timeout);
2363 	if (rc) {
2364 		if (is_wait_cs)
2365 			hl_fence_put(sig_fence);
2366 
2367 		goto free_cs_chunk_array;
2368 	}
2369 
2370 	/*
2371 	 * Save the signal CS fence for later initialization right before
2372 	 * hanging the wait CS on the queue.
2373 	 * for encaps signals case, we save the cs sequence and handle pointer
2374 	 * for later initialization.
2375 	 */
2376 	if (is_wait_cs) {
2377 		cs->signal_fence = sig_fence;
2378 		/* store the handle pointer, so we don't have to
2379 		 * look for it again, later on the flow
2380 		 * when we need to set SOB info in hw_queue.
2381 		 */
2382 		if (cs->encaps_signals)
2383 			cs->encaps_sig_hdl = encaps_sig_hdl;
2384 	}
2385 
2386 	hl_debugfs_add_cs(cs);
2387 
2388 	*cs_seq = cs->sequence;
2389 
2390 	if (cs_type == CS_TYPE_WAIT || cs_type == CS_TYPE_SIGNAL)
2391 		rc = cs_ioctl_signal_wait_create_jobs(hdev, ctx, cs, q_type,
2392 				q_idx, chunk->encaps_signal_offset);
2393 	else if (cs_type == CS_TYPE_COLLECTIVE_WAIT)
2394 		rc = hdev->asic_funcs->collective_wait_create_jobs(hdev, ctx,
2395 				cs, q_idx, collective_engine_id,
2396 				chunk->encaps_signal_offset);
2397 	else {
2398 		atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
2399 		atomic64_inc(&cntr->validation_drop_cnt);
2400 		rc = -EINVAL;
2401 	}
2402 
2403 	if (rc)
2404 		goto free_cs_object;
2405 
2406 	if (q_type == QUEUE_TYPE_HW)
2407 		INIT_WORK(&cs->finish_work, cs_completion);
2408 
2409 	rc = hl_hw_queue_schedule_cs(cs);
2410 	if (rc) {
2411 		/* In case wait cs failed here, it means the signal cs
2412 		 * already completed. we want to free all it's related objects
2413 		 * but we don't want to fail the ioctl.
2414 		 */
2415 		if (is_wait_cs)
2416 			rc = 0;
2417 		else if (rc != -EAGAIN)
2418 			dev_err(hdev->dev,
2419 				"Failed to submit CS %d.%llu to H/W queues, error %d\n",
2420 				ctx->asid, cs->sequence, rc);
2421 		goto free_cs_object;
2422 	}
2423 
2424 	*signal_sob_addr_offset = cs->sob_addr_offset;
2425 	*signal_initial_sob_count = cs->initial_sob_count;
2426 
2427 	rc = HL_CS_STATUS_SUCCESS;
2428 	if (is_wait_cs)
2429 		wait_cs_submitted = true;
2430 	goto put_cs;
2431 
2432 free_cs_object:
2433 	cs_rollback(hdev, cs);
2434 	*cs_seq = ULLONG_MAX;
2435 	/* The path below is both for good and erroneous exits */
2436 put_cs:
2437 	/* We finished with the CS in this function, so put the ref */
2438 	cs_put(cs);
2439 free_cs_chunk_array:
2440 	if (!wait_cs_submitted && cs_encaps_signals && handle_found && is_wait_cs)
2441 		kref_put(&encaps_sig_hdl->refcount, hl_encaps_release_handle_and_put_ctx);
2442 	kfree(cs_chunk_array);
2443 out:
2444 	return rc;
2445 }
2446 
cs_ioctl_engine_cores(struct hl_fpriv * hpriv,u64 engine_cores,u32 num_engine_cores,u32 core_command)2447 static int cs_ioctl_engine_cores(struct hl_fpriv *hpriv, u64 engine_cores,
2448 						u32 num_engine_cores, u32 core_command)
2449 {
2450 	struct hl_device *hdev = hpriv->hdev;
2451 	void __user *engine_cores_arr;
2452 	u32 *cores;
2453 	int rc;
2454 
2455 	if (!hdev->asic_prop.supports_engine_modes)
2456 		return -EPERM;
2457 
2458 	if (!num_engine_cores || num_engine_cores > hdev->asic_prop.num_engine_cores) {
2459 		dev_err(hdev->dev, "Number of engine cores %d is invalid\n", num_engine_cores);
2460 		return -EINVAL;
2461 	}
2462 
2463 	if (core_command != HL_ENGINE_CORE_RUN && core_command != HL_ENGINE_CORE_HALT) {
2464 		dev_err(hdev->dev, "Engine core command is invalid\n");
2465 		return -EINVAL;
2466 	}
2467 
2468 	engine_cores_arr = (void __user *) (uintptr_t) engine_cores;
2469 	cores = kmalloc_array(num_engine_cores, sizeof(u32), GFP_KERNEL);
2470 	if (!cores)
2471 		return -ENOMEM;
2472 
2473 	if (copy_from_user(cores, engine_cores_arr, num_engine_cores * sizeof(u32))) {
2474 		dev_err(hdev->dev, "Failed to copy core-ids array from user\n");
2475 		kfree(cores);
2476 		return -EFAULT;
2477 	}
2478 
2479 	rc = hdev->asic_funcs->set_engine_cores(hdev, cores, num_engine_cores, core_command);
2480 	kfree(cores);
2481 
2482 	return rc;
2483 }
2484 
cs_ioctl_engines(struct hl_fpriv * hpriv,u64 engines_arr_user_addr,u32 num_engines,enum hl_engine_command command)2485 static int cs_ioctl_engines(struct hl_fpriv *hpriv, u64 engines_arr_user_addr,
2486 						u32 num_engines, enum hl_engine_command command)
2487 {
2488 	struct hl_device *hdev = hpriv->hdev;
2489 	u32 *engines, max_num_of_engines;
2490 	void __user *engines_arr;
2491 	int rc;
2492 
2493 	if (!hdev->asic_prop.supports_engine_modes)
2494 		return -EPERM;
2495 
2496 	if (command >= HL_ENGINE_COMMAND_MAX) {
2497 		dev_err(hdev->dev, "Engine command is invalid\n");
2498 		return -EINVAL;
2499 	}
2500 
2501 	max_num_of_engines = hdev->asic_prop.max_num_of_engines;
2502 	if (command == HL_ENGINE_CORE_RUN || command == HL_ENGINE_CORE_HALT)
2503 		max_num_of_engines = hdev->asic_prop.num_engine_cores;
2504 
2505 	if (!num_engines || num_engines > max_num_of_engines) {
2506 		dev_err(hdev->dev, "Number of engines %d is invalid\n", num_engines);
2507 		return -EINVAL;
2508 	}
2509 
2510 	engines_arr = (void __user *) (uintptr_t) engines_arr_user_addr;
2511 	engines = kmalloc_array(num_engines, sizeof(u32), GFP_KERNEL);
2512 	if (!engines)
2513 		return -ENOMEM;
2514 
2515 	if (copy_from_user(engines, engines_arr, num_engines * sizeof(u32))) {
2516 		dev_err(hdev->dev, "Failed to copy engine-ids array from user\n");
2517 		kfree(engines);
2518 		return -EFAULT;
2519 	}
2520 
2521 	rc = hdev->asic_funcs->set_engines(hdev, engines, num_engines, command);
2522 	kfree(engines);
2523 
2524 	return rc;
2525 }
2526 
cs_ioctl_flush_pci_hbw_writes(struct hl_fpriv * hpriv)2527 static int cs_ioctl_flush_pci_hbw_writes(struct hl_fpriv *hpriv)
2528 {
2529 	struct hl_device *hdev = hpriv->hdev;
2530 	struct asic_fixed_properties *prop = &hdev->asic_prop;
2531 
2532 	if (!prop->hbw_flush_reg) {
2533 		dev_dbg(hdev->dev, "HBW flush is not supported\n");
2534 		return -EOPNOTSUPP;
2535 	}
2536 
2537 	RREG32(prop->hbw_flush_reg);
2538 
2539 	return 0;
2540 }
2541 
hl_cs_ioctl(struct hl_fpriv * hpriv,void * data)2542 int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
2543 {
2544 	union hl_cs_args *args = data;
2545 	enum hl_cs_type cs_type = 0;
2546 	u64 cs_seq = ULONG_MAX;
2547 	void __user *chunks;
2548 	u32 num_chunks, flags, timeout,
2549 		signals_count = 0, sob_addr = 0, handle_id = 0;
2550 	u16 sob_initial_count = 0;
2551 	int rc;
2552 
2553 	rc = hl_cs_sanity_checks(hpriv, args);
2554 	if (rc)
2555 		goto out;
2556 
2557 	rc = hl_cs_ctx_switch(hpriv, args, &cs_seq);
2558 	if (rc)
2559 		goto out;
2560 
2561 	cs_type = hl_cs_get_cs_type(args->in.cs_flags &
2562 					~HL_CS_FLAGS_FORCE_RESTORE);
2563 	chunks = (void __user *) (uintptr_t) args->in.chunks_execute;
2564 	num_chunks = args->in.num_chunks_execute;
2565 	flags = args->in.cs_flags;
2566 
2567 	/* In case this is a staged CS, user should supply the CS sequence */
2568 	if ((flags & HL_CS_FLAGS_STAGED_SUBMISSION) &&
2569 			!(flags & HL_CS_FLAGS_STAGED_SUBMISSION_FIRST))
2570 		cs_seq = args->in.seq;
2571 
2572 	timeout = flags & HL_CS_FLAGS_CUSTOM_TIMEOUT
2573 			? msecs_to_jiffies(args->in.timeout * 1000)
2574 			: hpriv->hdev->timeout_jiffies;
2575 
2576 	switch (cs_type) {
2577 	case CS_TYPE_SIGNAL:
2578 	case CS_TYPE_WAIT:
2579 	case CS_TYPE_COLLECTIVE_WAIT:
2580 		rc = cs_ioctl_signal_wait(hpriv, cs_type, chunks, num_chunks,
2581 					&cs_seq, args->in.cs_flags, timeout,
2582 					&sob_addr, &sob_initial_count);
2583 		break;
2584 	case CS_RESERVE_SIGNALS:
2585 		rc = cs_ioctl_reserve_signals(hpriv,
2586 					args->in.encaps_signals_q_idx,
2587 					args->in.encaps_signals_count,
2588 					&handle_id, &sob_addr, &signals_count);
2589 		break;
2590 	case CS_UNRESERVE_SIGNALS:
2591 		rc = cs_ioctl_unreserve_signals(hpriv,
2592 					args->in.encaps_sig_handle_id);
2593 		break;
2594 	case CS_TYPE_ENGINE_CORE:
2595 		rc = cs_ioctl_engine_cores(hpriv, args->in.engine_cores,
2596 				args->in.num_engine_cores, args->in.core_command);
2597 		break;
2598 	case CS_TYPE_ENGINES:
2599 		rc = cs_ioctl_engines(hpriv, args->in.engines,
2600 				args->in.num_engines, args->in.engine_command);
2601 		break;
2602 	case CS_TYPE_FLUSH_PCI_HBW_WRITES:
2603 		rc = cs_ioctl_flush_pci_hbw_writes(hpriv);
2604 		break;
2605 	default:
2606 		rc = cs_ioctl_default(hpriv, chunks, num_chunks, &cs_seq,
2607 						args->in.cs_flags,
2608 						args->in.encaps_sig_handle_id,
2609 						timeout, &sob_initial_count);
2610 		break;
2611 	}
2612 out:
2613 	if (rc != -EAGAIN) {
2614 		memset(args, 0, sizeof(*args));
2615 
2616 		switch (cs_type) {
2617 		case CS_RESERVE_SIGNALS:
2618 			args->out.handle_id = handle_id;
2619 			args->out.sob_base_addr_offset = sob_addr;
2620 			args->out.count = signals_count;
2621 			break;
2622 		case CS_TYPE_SIGNAL:
2623 			args->out.sob_base_addr_offset = sob_addr;
2624 			args->out.sob_count_before_submission = sob_initial_count;
2625 			args->out.seq = cs_seq;
2626 			break;
2627 		case CS_TYPE_DEFAULT:
2628 			args->out.sob_count_before_submission = sob_initial_count;
2629 			args->out.seq = cs_seq;
2630 			break;
2631 		default:
2632 			args->out.seq = cs_seq;
2633 			break;
2634 		}
2635 
2636 		args->out.status = rc;
2637 	}
2638 
2639 	return rc;
2640 }
2641 
hl_wait_for_fence(struct hl_ctx * ctx,u64 seq,struct hl_fence * fence,enum hl_cs_wait_status * status,u64 timeout_us,s64 * timestamp)2642 static int hl_wait_for_fence(struct hl_ctx *ctx, u64 seq, struct hl_fence *fence,
2643 				enum hl_cs_wait_status *status, u64 timeout_us, s64 *timestamp)
2644 {
2645 	struct hl_device *hdev = ctx->hdev;
2646 	ktime_t timestamp_kt;
2647 	long completion_rc;
2648 	int rc = 0, error;
2649 
2650 	if (IS_ERR(fence)) {
2651 		rc = PTR_ERR(fence);
2652 		if (rc == -EINVAL)
2653 			dev_notice_ratelimited(hdev->dev,
2654 				"Can't wait on CS %llu because current CS is at seq %llu\n",
2655 				seq, ctx->cs_sequence);
2656 		return rc;
2657 	}
2658 
2659 	if (!fence) {
2660 		if (!hl_pop_cs_outcome(&ctx->outcome_store, seq, &timestamp_kt, &error)) {
2661 			dev_dbg(hdev->dev,
2662 				"Can't wait on seq %llu because current CS is at seq %llu (Fence is gone)\n",
2663 				seq, ctx->cs_sequence);
2664 			*status = CS_WAIT_STATUS_GONE;
2665 			return 0;
2666 		}
2667 
2668 		completion_rc = 1;
2669 		goto report_results;
2670 	}
2671 
2672 	if (!timeout_us) {
2673 		completion_rc = completion_done(&fence->completion);
2674 	} else {
2675 		unsigned long timeout;
2676 
2677 		timeout = (timeout_us == MAX_SCHEDULE_TIMEOUT) ?
2678 				timeout_us : usecs_to_jiffies(timeout_us);
2679 		completion_rc =
2680 			wait_for_completion_interruptible_timeout(
2681 				&fence->completion, timeout);
2682 	}
2683 
2684 	error = fence->error;
2685 	timestamp_kt = fence->timestamp;
2686 
2687 report_results:
2688 	if (completion_rc > 0) {
2689 		*status = CS_WAIT_STATUS_COMPLETED;
2690 		if (timestamp)
2691 			*timestamp = ktime_to_ns(timestamp_kt);
2692 	} else {
2693 		*status = CS_WAIT_STATUS_BUSY;
2694 	}
2695 
2696 	if (completion_rc == -ERESTARTSYS)
2697 		rc = completion_rc;
2698 	else if (error == -ETIMEDOUT || error == -EIO)
2699 		rc = error;
2700 
2701 	return rc;
2702 }
2703 
2704 /*
2705  * hl_cs_poll_fences - iterate CS fences to check for CS completion
2706  *
2707  * @mcs_data: multi-CS internal data
2708  * @mcs_compl: multi-CS completion structure
2709  *
2710  * @return 0 on success, otherwise non 0 error code
2711  *
2712  * The function iterates on all CS sequence in the list and set bit in
2713  * completion_bitmap for each completed CS.
2714  * While iterating, the function sets the stream map of each fence in the fence
2715  * array in the completion QID stream map to be used by CSs to perform
2716  * completion to the multi-CS context.
2717  * This function shall be called after taking context ref
2718  */
hl_cs_poll_fences(struct multi_cs_data * mcs_data,struct multi_cs_completion * mcs_compl)2719 static int hl_cs_poll_fences(struct multi_cs_data *mcs_data, struct multi_cs_completion *mcs_compl)
2720 {
2721 	struct hl_fence **fence_ptr = mcs_data->fence_arr;
2722 	struct hl_device *hdev = mcs_data->ctx->hdev;
2723 	int i, rc, arr_len = mcs_data->arr_len;
2724 	u64 *seq_arr = mcs_data->seq_arr;
2725 	ktime_t max_ktime, first_cs_time;
2726 	enum hl_cs_wait_status status;
2727 
2728 	memset(fence_ptr, 0, arr_len * sizeof(struct hl_fence *));
2729 
2730 	/* get all fences under the same lock */
2731 	rc = hl_ctx_get_fences(mcs_data->ctx, seq_arr, fence_ptr, arr_len);
2732 	if (rc)
2733 		return rc;
2734 
2735 	/*
2736 	 * re-initialize the completion here to handle 2 possible cases:
2737 	 * 1. CS will complete the multi-CS prior clearing the completion. in which
2738 	 *    case the fence iteration is guaranteed to catch the CS completion.
2739 	 * 2. the completion will occur after re-init of the completion.
2740 	 *    in which case we will wake up immediately in wait_for_completion.
2741 	 */
2742 	reinit_completion(&mcs_compl->completion);
2743 
2744 	/*
2745 	 * set to maximum time to verify timestamp is valid: if at the end
2746 	 * this value is maintained- no timestamp was updated
2747 	 */
2748 	max_ktime = ktime_set(KTIME_SEC_MAX, 0);
2749 	first_cs_time = max_ktime;
2750 
2751 	for (i = 0; i < arr_len; i++, fence_ptr++) {
2752 		struct hl_fence *fence = *fence_ptr;
2753 
2754 		/*
2755 		 * In order to prevent case where we wait until timeout even though a CS associated
2756 		 * with the multi-CS actually completed we do things in the below order:
2757 		 * 1. for each fence set it's QID map in the multi-CS completion QID map. This way
2758 		 *    any CS can, potentially, complete the multi CS for the specific QID (note
2759 		 *    that once completion is initialized, calling complete* and then wait on the
2760 		 *    completion will cause it to return at once)
2761 		 * 2. only after allowing multi-CS completion for the specific QID we check whether
2762 		 *    the specific CS already completed (and thus the wait for completion part will
2763 		 *    be skipped). if the CS not completed it is guaranteed that completing CS will
2764 		 *    wake up the completion.
2765 		 */
2766 		if (fence)
2767 			mcs_compl->stream_master_qid_map |= fence->stream_master_qid_map;
2768 
2769 		/*
2770 		 * function won't sleep as it is called with timeout 0 (i.e.
2771 		 * poll the fence)
2772 		 */
2773 		rc = hl_wait_for_fence(mcs_data->ctx, seq_arr[i], fence, &status, 0, NULL);
2774 		if (rc) {
2775 			dev_err(hdev->dev,
2776 				"wait_for_fence error :%d for CS seq %llu\n",
2777 								rc, seq_arr[i]);
2778 			break;
2779 		}
2780 
2781 		switch (status) {
2782 		case CS_WAIT_STATUS_BUSY:
2783 			/* CS did not finished, QID to wait on already stored */
2784 			break;
2785 		case CS_WAIT_STATUS_COMPLETED:
2786 			/*
2787 			 * Using mcs_handling_done to avoid possibility of mcs_data
2788 			 * returns to user indicating CS completed before it finished
2789 			 * all of its mcs handling, to avoid race the next time the
2790 			 * user waits for mcs.
2791 			 * note: when reaching this case fence is definitely not NULL
2792 			 *       but NULL check was added to overcome static analysis
2793 			 */
2794 			if (fence && !fence->mcs_handling_done) {
2795 				/*
2796 				 * in case multi CS is completed but MCS handling not done
2797 				 * we "complete" the multi CS to prevent it from waiting
2798 				 * until time-out and the "multi-CS handling done" will have
2799 				 * another chance at the next iteration
2800 				 */
2801 				complete_all(&mcs_compl->completion);
2802 				break;
2803 			}
2804 
2805 			mcs_data->completion_bitmap |= BIT(i);
2806 			/*
2807 			 * For all completed CSs we take the earliest timestamp.
2808 			 * For this we have to validate that the timestamp is
2809 			 * earliest of all timestamps so far.
2810 			 */
2811 			if (fence && mcs_data->update_ts &&
2812 					(ktime_compare(fence->timestamp, first_cs_time) < 0))
2813 				first_cs_time = fence->timestamp;
2814 			break;
2815 		case CS_WAIT_STATUS_GONE:
2816 			mcs_data->update_ts = false;
2817 			mcs_data->gone_cs = true;
2818 			/*
2819 			 * It is possible to get an old sequence numbers from user
2820 			 * which related to already completed CSs and their fences
2821 			 * already gone. In this case, CS set as completed but
2822 			 * no need to consider its QID for mcs completion.
2823 			 */
2824 			mcs_data->completion_bitmap |= BIT(i);
2825 			break;
2826 		default:
2827 			dev_err(hdev->dev, "Invalid fence status\n");
2828 			rc = -EINVAL;
2829 			break;
2830 		}
2831 
2832 	}
2833 
2834 	hl_fences_put(mcs_data->fence_arr, arr_len);
2835 
2836 	if (mcs_data->update_ts &&
2837 			(ktime_compare(first_cs_time, max_ktime) != 0))
2838 		mcs_data->timestamp = ktime_to_ns(first_cs_time);
2839 
2840 	return rc;
2841 }
2842 
_hl_cs_wait_ioctl(struct hl_device * hdev,struct hl_ctx * ctx,u64 timeout_us,u64 seq,enum hl_cs_wait_status * status,s64 * timestamp)2843 static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx, u64 timeout_us, u64 seq,
2844 				enum hl_cs_wait_status *status, s64 *timestamp)
2845 {
2846 	struct hl_fence *fence;
2847 	int rc = 0;
2848 
2849 	if (timestamp)
2850 		*timestamp = 0;
2851 
2852 	hl_ctx_get(ctx);
2853 
2854 	fence = hl_ctx_get_fence(ctx, seq);
2855 
2856 	rc = hl_wait_for_fence(ctx, seq, fence, status, timeout_us, timestamp);
2857 	hl_fence_put(fence);
2858 	hl_ctx_put(ctx);
2859 
2860 	return rc;
2861 }
2862 
hl_usecs64_to_jiffies(const u64 usecs)2863 static inline unsigned long hl_usecs64_to_jiffies(const u64 usecs)
2864 {
2865 	if (usecs <= U32_MAX)
2866 		return usecs_to_jiffies(usecs);
2867 
2868 	/*
2869 	 * If the value in nanoseconds is larger than 64 bit, use the largest
2870 	 * 64 bit value.
2871 	 */
2872 	if (usecs >= ((u64)(U64_MAX / NSEC_PER_USEC)))
2873 		return nsecs_to_jiffies(U64_MAX);
2874 
2875 	return nsecs_to_jiffies(usecs * NSEC_PER_USEC);
2876 }
2877 
2878 /*
2879  * hl_wait_multi_cs_completion_init - init completion structure
2880  *
2881  * @hdev: pointer to habanalabs device structure
2882  * @stream_master_bitmap: stream master QIDs map, set bit indicates stream
2883  *                        master QID to wait on
2884  *
2885  * @return valid completion struct pointer on success, otherwise error pointer
2886  *
2887  * up to MULTI_CS_MAX_USER_CTX calls can be done concurrently to the driver.
2888  * the function gets the first available completion (by marking it "used")
2889  * and initialize its values.
2890  */
hl_wait_multi_cs_completion_init(struct hl_device * hdev)2891 static struct multi_cs_completion *hl_wait_multi_cs_completion_init(struct hl_device *hdev)
2892 {
2893 	struct multi_cs_completion *mcs_compl;
2894 	int i;
2895 
2896 	/* find free multi_cs completion structure */
2897 	for (i = 0; i < MULTI_CS_MAX_USER_CTX; i++) {
2898 		mcs_compl = &hdev->multi_cs_completion[i];
2899 		spin_lock(&mcs_compl->lock);
2900 		if (!mcs_compl->used) {
2901 			mcs_compl->used = 1;
2902 			mcs_compl->timestamp = 0;
2903 			/*
2904 			 * init QID map to 0 to avoid completion by CSs. the actual QID map
2905 			 * to multi-CS CSs will be set incrementally at a later stage
2906 			 */
2907 			mcs_compl->stream_master_qid_map = 0;
2908 			spin_unlock(&mcs_compl->lock);
2909 			break;
2910 		}
2911 		spin_unlock(&mcs_compl->lock);
2912 	}
2913 
2914 	if (i == MULTI_CS_MAX_USER_CTX) {
2915 		dev_err(hdev->dev, "no available multi-CS completion structure\n");
2916 		return ERR_PTR(-ENOMEM);
2917 	}
2918 	return mcs_compl;
2919 }
2920 
2921 /*
2922  * hl_wait_multi_cs_completion_fini - return completion structure and set as
2923  *                                    unused
2924  *
2925  * @mcs_compl: pointer to the completion structure
2926  */
hl_wait_multi_cs_completion_fini(struct multi_cs_completion * mcs_compl)2927 static void hl_wait_multi_cs_completion_fini(
2928 					struct multi_cs_completion *mcs_compl)
2929 {
2930 	/*
2931 	 * free completion structure, do it under lock to be in-sync with the
2932 	 * thread that signals completion
2933 	 */
2934 	spin_lock(&mcs_compl->lock);
2935 	mcs_compl->used = 0;
2936 	spin_unlock(&mcs_compl->lock);
2937 }
2938 
2939 /*
2940  * hl_wait_multi_cs_completion - wait for first CS to complete
2941  *
2942  * @mcs_data: multi-CS internal data
2943  *
2944  * @return 0 on success, otherwise non 0 error code
2945  */
hl_wait_multi_cs_completion(struct multi_cs_data * mcs_data,struct multi_cs_completion * mcs_compl)2946 static int hl_wait_multi_cs_completion(struct multi_cs_data *mcs_data,
2947 						struct multi_cs_completion *mcs_compl)
2948 {
2949 	long completion_rc;
2950 
2951 	completion_rc = wait_for_completion_interruptible_timeout(&mcs_compl->completion,
2952 									mcs_data->timeout_jiffies);
2953 
2954 	/* update timestamp */
2955 	if (completion_rc > 0)
2956 		mcs_data->timestamp = mcs_compl->timestamp;
2957 
2958 	if (completion_rc == -ERESTARTSYS)
2959 		return completion_rc;
2960 
2961 	mcs_data->wait_status = completion_rc;
2962 
2963 	return 0;
2964 }
2965 
2966 /*
2967  * hl_multi_cs_completion_init - init array of multi-CS completion structures
2968  *
2969  * @hdev: pointer to habanalabs device structure
2970  */
hl_multi_cs_completion_init(struct hl_device * hdev)2971 void hl_multi_cs_completion_init(struct hl_device *hdev)
2972 {
2973 	struct multi_cs_completion *mcs_cmpl;
2974 	int i;
2975 
2976 	for (i = 0; i < MULTI_CS_MAX_USER_CTX; i++) {
2977 		mcs_cmpl = &hdev->multi_cs_completion[i];
2978 		mcs_cmpl->used = 0;
2979 		spin_lock_init(&mcs_cmpl->lock);
2980 		init_completion(&mcs_cmpl->completion);
2981 	}
2982 }
2983 
2984 /*
2985  * hl_multi_cs_wait_ioctl - implementation of the multi-CS wait ioctl
2986  *
2987  * @hpriv: pointer to the private data of the fd
2988  * @data: pointer to multi-CS wait ioctl in/out args
2989  *
2990  */
hl_multi_cs_wait_ioctl(struct hl_fpriv * hpriv,void * data)2991 static int hl_multi_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
2992 {
2993 	struct multi_cs_completion *mcs_compl;
2994 	struct hl_device *hdev = hpriv->hdev;
2995 	struct multi_cs_data mcs_data = {};
2996 	union hl_wait_cs_args *args = data;
2997 	struct hl_ctx *ctx = hpriv->ctx;
2998 	struct hl_fence **fence_arr;
2999 	void __user *seq_arr;
3000 	u32 size_to_copy;
3001 	u64 *cs_seq_arr;
3002 	u8 seq_arr_len;
3003 	int rc, i;
3004 
3005 	for (i = 0 ; i < sizeof(args->in.pad) ; i++)
3006 		if (args->in.pad[i]) {
3007 			dev_dbg(hdev->dev, "Padding bytes must be 0\n");
3008 			return -EINVAL;
3009 		}
3010 
3011 	if (!hdev->supports_wait_for_multi_cs) {
3012 		dev_err(hdev->dev, "Wait for multi CS is not supported\n");
3013 		return -EPERM;
3014 	}
3015 
3016 	seq_arr_len = args->in.seq_arr_len;
3017 
3018 	if (seq_arr_len > HL_WAIT_MULTI_CS_LIST_MAX_LEN) {
3019 		dev_err(hdev->dev, "Can wait only up to %d CSs, input sequence is of length %u\n",
3020 				HL_WAIT_MULTI_CS_LIST_MAX_LEN, seq_arr_len);
3021 		return -EINVAL;
3022 	}
3023 
3024 	/* allocate memory for sequence array */
3025 	cs_seq_arr =
3026 		kmalloc_array(seq_arr_len, sizeof(*cs_seq_arr), GFP_KERNEL);
3027 	if (!cs_seq_arr)
3028 		return -ENOMEM;
3029 
3030 	/* copy CS sequence array from user */
3031 	seq_arr = (void __user *) (uintptr_t) args->in.seq;
3032 	size_to_copy = seq_arr_len * sizeof(*cs_seq_arr);
3033 	if (copy_from_user(cs_seq_arr, seq_arr, size_to_copy)) {
3034 		dev_err(hdev->dev, "Failed to copy multi-cs sequence array from user\n");
3035 		rc = -EFAULT;
3036 		goto free_seq_arr;
3037 	}
3038 
3039 	/* allocate array for the fences */
3040 	fence_arr = kmalloc_array(seq_arr_len, sizeof(struct hl_fence *), GFP_KERNEL);
3041 	if (!fence_arr) {
3042 		rc = -ENOMEM;
3043 		goto free_seq_arr;
3044 	}
3045 
3046 	/* initialize the multi-CS internal data */
3047 	mcs_data.ctx = ctx;
3048 	mcs_data.seq_arr = cs_seq_arr;
3049 	mcs_data.fence_arr = fence_arr;
3050 	mcs_data.arr_len = seq_arr_len;
3051 
3052 	hl_ctx_get(ctx);
3053 
3054 	/* wait (with timeout) for the first CS to be completed */
3055 	mcs_data.timeout_jiffies = hl_usecs64_to_jiffies(args->in.timeout_us);
3056 	mcs_compl = hl_wait_multi_cs_completion_init(hdev);
3057 	if (IS_ERR(mcs_compl)) {
3058 		rc = PTR_ERR(mcs_compl);
3059 		goto put_ctx;
3060 	}
3061 
3062 	/* poll all CS fences, extract timestamp */
3063 	mcs_data.update_ts = true;
3064 	rc = hl_cs_poll_fences(&mcs_data, mcs_compl);
3065 	/*
3066 	 * skip wait for CS completion when one of the below is true:
3067 	 * - an error on the poll function
3068 	 * - one or more CS in the list completed
3069 	 * - the user called ioctl with timeout 0
3070 	 */
3071 	if (rc || mcs_data.completion_bitmap || !args->in.timeout_us)
3072 		goto completion_fini;
3073 
3074 	while (true) {
3075 		rc = hl_wait_multi_cs_completion(&mcs_data, mcs_compl);
3076 		if (rc || (mcs_data.wait_status == 0))
3077 			break;
3078 
3079 		/*
3080 		 * poll fences once again to update the CS map.
3081 		 * no timestamp should be updated this time.
3082 		 */
3083 		mcs_data.update_ts = false;
3084 		rc = hl_cs_poll_fences(&mcs_data, mcs_compl);
3085 
3086 		if (rc || mcs_data.completion_bitmap)
3087 			break;
3088 
3089 		/*
3090 		 * if hl_wait_multi_cs_completion returned before timeout (i.e.
3091 		 * it got a completion) it either got completed by CS in the multi CS list
3092 		 * (in which case the indication will be non empty completion_bitmap) or it
3093 		 * got completed by CS submitted to one of the shared stream master but
3094 		 * not in the multi CS list (in which case we should wait again but modify
3095 		 * the timeout and set timestamp as zero to let a CS related to the current
3096 		 * multi-CS set a new, relevant, timestamp)
3097 		 */
3098 		mcs_data.timeout_jiffies = mcs_data.wait_status;
3099 		mcs_compl->timestamp = 0;
3100 	}
3101 
3102 completion_fini:
3103 	hl_wait_multi_cs_completion_fini(mcs_compl);
3104 
3105 put_ctx:
3106 	hl_ctx_put(ctx);
3107 	kfree(fence_arr);
3108 
3109 free_seq_arr:
3110 	kfree(cs_seq_arr);
3111 
3112 	if (rc == -ERESTARTSYS) {
3113 		dev_err_ratelimited(hdev->dev,
3114 				"user process got signal while waiting for Multi-CS\n");
3115 		rc = -EINTR;
3116 	}
3117 
3118 	if (rc)
3119 		return rc;
3120 
3121 	/* update output args */
3122 	memset(args, 0, sizeof(*args));
3123 
3124 	if (mcs_data.completion_bitmap) {
3125 		args->out.status = HL_WAIT_CS_STATUS_COMPLETED;
3126 		args->out.cs_completion_map = mcs_data.completion_bitmap;
3127 
3128 		/* if timestamp not 0- it's valid */
3129 		if (mcs_data.timestamp) {
3130 			args->out.timestamp_nsec = mcs_data.timestamp;
3131 			args->out.flags |= HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD;
3132 		}
3133 
3134 		/* update if some CS was gone */
3135 		if (!mcs_data.timestamp)
3136 			args->out.flags |= HL_WAIT_CS_STATUS_FLAG_GONE;
3137 	} else {
3138 		args->out.status = HL_WAIT_CS_STATUS_BUSY;
3139 	}
3140 
3141 	return 0;
3142 }
3143 
hl_cs_wait_ioctl(struct hl_fpriv * hpriv,void * data)3144 static int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
3145 {
3146 	struct hl_device *hdev = hpriv->hdev;
3147 	union hl_wait_cs_args *args = data;
3148 	enum hl_cs_wait_status status;
3149 	u64 seq = args->in.seq;
3150 	s64 timestamp;
3151 	int rc;
3152 
3153 	rc = _hl_cs_wait_ioctl(hdev, hpriv->ctx, args->in.timeout_us, seq, &status, &timestamp);
3154 
3155 	if (rc == -ERESTARTSYS) {
3156 		dev_err_ratelimited(hdev->dev,
3157 			"user process got signal while waiting for CS handle %llu\n",
3158 			seq);
3159 		return -EINTR;
3160 	}
3161 
3162 	memset(args, 0, sizeof(*args));
3163 
3164 	if (rc) {
3165 		if (rc == -ETIMEDOUT) {
3166 			dev_err_ratelimited(hdev->dev,
3167 				"CS %llu has timed-out while user process is waiting for it\n",
3168 				seq);
3169 			args->out.status = HL_WAIT_CS_STATUS_TIMEDOUT;
3170 		} else if (rc == -EIO) {
3171 			dev_err_ratelimited(hdev->dev,
3172 				"CS %llu has been aborted while user process is waiting for it\n",
3173 				seq);
3174 			args->out.status = HL_WAIT_CS_STATUS_ABORTED;
3175 		}
3176 		return rc;
3177 	}
3178 
3179 	if (timestamp) {
3180 		args->out.flags |= HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD;
3181 		args->out.timestamp_nsec = timestamp;
3182 	}
3183 
3184 	switch (status) {
3185 	case CS_WAIT_STATUS_GONE:
3186 		args->out.flags |= HL_WAIT_CS_STATUS_FLAG_GONE;
3187 		fallthrough;
3188 	case CS_WAIT_STATUS_COMPLETED:
3189 		args->out.status = HL_WAIT_CS_STATUS_COMPLETED;
3190 		break;
3191 	case CS_WAIT_STATUS_BUSY:
3192 	default:
3193 		args->out.status = HL_WAIT_CS_STATUS_BUSY;
3194 		break;
3195 	}
3196 
3197 	return 0;
3198 }
3199 
ts_buff_get_kernel_ts_record(struct hl_mmap_mem_buf * buf,struct hl_cb * cq_cb,u64 ts_offset,u64 cq_offset,u64 target_value,spinlock_t * wait_list_lock,struct hl_user_pending_interrupt ** pend)3200 static int ts_buff_get_kernel_ts_record(struct hl_mmap_mem_buf *buf,
3201 					struct hl_cb *cq_cb,
3202 					u64 ts_offset, u64 cq_offset, u64 target_value,
3203 					spinlock_t *wait_list_lock,
3204 					struct hl_user_pending_interrupt **pend)
3205 {
3206 	struct hl_ts_buff *ts_buff = buf->private;
3207 	struct hl_user_pending_interrupt *requested_offset_record =
3208 				(struct hl_user_pending_interrupt *)ts_buff->kernel_buff_address +
3209 				ts_offset;
3210 	struct hl_user_pending_interrupt *cb_last =
3211 			(struct hl_user_pending_interrupt *)ts_buff->kernel_buff_address +
3212 			(ts_buff->kernel_buff_size / sizeof(struct hl_user_pending_interrupt));
3213 	unsigned long iter_counter = 0;
3214 	u64 current_cq_counter;
3215 	ktime_t timestamp;
3216 
3217 	/* Validate ts_offset not exceeding last max */
3218 	if (requested_offset_record >= cb_last) {
3219 		dev_err(buf->mmg->dev, "Ts offset exceeds max CB offset(0x%llx)\n",
3220 								(u64)(uintptr_t)cb_last);
3221 		return -EINVAL;
3222 	}
3223 
3224 	timestamp = ktime_get();
3225 
3226 start_over:
3227 	spin_lock(wait_list_lock);
3228 
3229 	/* Unregister only if we didn't reach the target value
3230 	 * since in this case there will be no handling in irq context
3231 	 * and then it's safe to delete the node out of the interrupt list
3232 	 * then re-use it on other interrupt
3233 	 */
3234 	if (requested_offset_record->ts_reg_info.in_use) {
3235 		current_cq_counter = *requested_offset_record->cq_kernel_addr;
3236 		if (current_cq_counter < requested_offset_record->cq_target_value) {
3237 			list_del(&requested_offset_record->wait_list_node);
3238 			spin_unlock(wait_list_lock);
3239 
3240 			hl_mmap_mem_buf_put(requested_offset_record->ts_reg_info.buf);
3241 			hl_cb_put(requested_offset_record->ts_reg_info.cq_cb);
3242 
3243 			dev_dbg(buf->mmg->dev,
3244 				"ts node removed from interrupt list now can re-use\n");
3245 		} else {
3246 			dev_dbg(buf->mmg->dev,
3247 				"ts node in middle of irq handling\n");
3248 
3249 			/* irq thread handling in the middle give it time to finish */
3250 			spin_unlock(wait_list_lock);
3251 			usleep_range(100, 1000);
3252 			if (++iter_counter == MAX_TS_ITER_NUM) {
3253 				dev_err(buf->mmg->dev,
3254 					"Timestamp offset processing reached timeout of %lld ms\n",
3255 					ktime_ms_delta(ktime_get(), timestamp));
3256 				return -EAGAIN;
3257 			}
3258 
3259 			goto start_over;
3260 		}
3261 	} else {
3262 		/* Fill up the new registration node info */
3263 		requested_offset_record->ts_reg_info.buf = buf;
3264 		requested_offset_record->ts_reg_info.cq_cb = cq_cb;
3265 		requested_offset_record->ts_reg_info.timestamp_kernel_addr =
3266 				(u64 *) ts_buff->user_buff_address + ts_offset;
3267 		requested_offset_record->cq_kernel_addr =
3268 				(u64 *) cq_cb->kernel_address + cq_offset;
3269 		requested_offset_record->cq_target_value = target_value;
3270 
3271 		spin_unlock(wait_list_lock);
3272 	}
3273 
3274 	*pend = requested_offset_record;
3275 
3276 	dev_dbg(buf->mmg->dev, "Found available node in TS kernel CB %p\n",
3277 		requested_offset_record);
3278 	return 0;
3279 }
3280 
_hl_interrupt_wait_ioctl(struct hl_device * hdev,struct hl_ctx * ctx,struct hl_mem_mgr * cb_mmg,struct hl_mem_mgr * mmg,u64 timeout_us,u64 cq_counters_handle,u64 cq_counters_offset,u64 target_value,struct hl_user_interrupt * interrupt,bool register_ts_record,u64 ts_handle,u64 ts_offset,u32 * status,u64 * timestamp)3281 static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
3282 				struct hl_mem_mgr *cb_mmg, struct hl_mem_mgr *mmg,
3283 				u64 timeout_us, u64 cq_counters_handle,	u64 cq_counters_offset,
3284 				u64 target_value, struct hl_user_interrupt *interrupt,
3285 				bool register_ts_record, u64 ts_handle, u64 ts_offset,
3286 				u32 *status, u64 *timestamp)
3287 {
3288 	struct hl_user_pending_interrupt *pend;
3289 	struct hl_mmap_mem_buf *buf;
3290 	struct hl_cb *cq_cb;
3291 	unsigned long timeout;
3292 	long completion_rc;
3293 	int rc = 0;
3294 
3295 	timeout = hl_usecs64_to_jiffies(timeout_us);
3296 
3297 	hl_ctx_get(ctx);
3298 
3299 	cq_cb = hl_cb_get(cb_mmg, cq_counters_handle);
3300 	if (!cq_cb) {
3301 		rc = -EINVAL;
3302 		goto put_ctx;
3303 	}
3304 
3305 	/* Validate the cq offset */
3306 	if (((u64 *) cq_cb->kernel_address + cq_counters_offset) >=
3307 			((u64 *) cq_cb->kernel_address + (cq_cb->size / sizeof(u64)))) {
3308 		rc = -EINVAL;
3309 		goto put_cq_cb;
3310 	}
3311 
3312 	if (register_ts_record) {
3313 		dev_dbg(hdev->dev, "Timestamp registration: interrupt id: %u, ts offset: %llu, cq_offset: %llu\n",
3314 					interrupt->interrupt_id, ts_offset, cq_counters_offset);
3315 		buf = hl_mmap_mem_buf_get(mmg, ts_handle);
3316 		if (!buf) {
3317 			rc = -EINVAL;
3318 			goto put_cq_cb;
3319 		}
3320 
3321 		/* get ts buffer record */
3322 		rc = ts_buff_get_kernel_ts_record(buf, cq_cb, ts_offset,
3323 						cq_counters_offset, target_value,
3324 						&interrupt->wait_list_lock, &pend);
3325 		if (rc)
3326 			goto put_ts_buff;
3327 	} else {
3328 		pend = kzalloc(sizeof(*pend), GFP_KERNEL);
3329 		if (!pend) {
3330 			rc = -ENOMEM;
3331 			goto put_cq_cb;
3332 		}
3333 		hl_fence_init(&pend->fence, ULONG_MAX);
3334 		pend->cq_kernel_addr = (u64 *) cq_cb->kernel_address + cq_counters_offset;
3335 		pend->cq_target_value = target_value;
3336 	}
3337 
3338 	spin_lock(&interrupt->wait_list_lock);
3339 
3340 	/* We check for completion value as interrupt could have been received
3341 	 * before we added the node to the wait list
3342 	 */
3343 	if (*pend->cq_kernel_addr >= target_value) {
3344 		if (register_ts_record)
3345 			pend->ts_reg_info.in_use = 0;
3346 		spin_unlock(&interrupt->wait_list_lock);
3347 
3348 		*status = HL_WAIT_CS_STATUS_COMPLETED;
3349 
3350 		if (register_ts_record) {
3351 			*pend->ts_reg_info.timestamp_kernel_addr = ktime_get_ns();
3352 			goto put_ts_buff;
3353 		} else {
3354 			pend->fence.timestamp = ktime_get();
3355 			goto set_timestamp;
3356 		}
3357 	} else if (!timeout_us) {
3358 		spin_unlock(&interrupt->wait_list_lock);
3359 		*status = HL_WAIT_CS_STATUS_BUSY;
3360 		pend->fence.timestamp = ktime_get();
3361 		goto set_timestamp;
3362 	}
3363 
3364 	/* Add pending user interrupt to relevant list for the interrupt
3365 	 * handler to monitor.
3366 	 * Note that we cannot have sorted list by target value,
3367 	 * in order to shorten the list pass loop, since
3368 	 * same list could have nodes for different cq counter handle.
3369 	 * Note:
3370 	 * Mark ts buff offset as in use here in the spinlock protection area
3371 	 * to avoid getting in the re-use section in ts_buff_get_kernel_ts_record
3372 	 * before adding the node to the list. this scenario might happen when
3373 	 * multiple threads are racing on same offset and one thread could
3374 	 * set the ts buff in ts_buff_get_kernel_ts_record then the other thread
3375 	 * takes over and get to ts_buff_get_kernel_ts_record and then we will try
3376 	 * to re-use the same ts buff offset, and will try to delete a non existing
3377 	 * node from the list.
3378 	 */
3379 	if (register_ts_record)
3380 		pend->ts_reg_info.in_use = 1;
3381 
3382 	list_add_tail(&pend->wait_list_node, &interrupt->wait_list_head);
3383 	spin_unlock(&interrupt->wait_list_lock);
3384 
3385 	if (register_ts_record) {
3386 		rc = *status = HL_WAIT_CS_STATUS_COMPLETED;
3387 		goto ts_registration_exit;
3388 	}
3389 
3390 	/* Wait for interrupt handler to signal completion */
3391 	completion_rc = wait_for_completion_interruptible_timeout(&pend->fence.completion,
3392 								timeout);
3393 	if (completion_rc > 0) {
3394 		*status = HL_WAIT_CS_STATUS_COMPLETED;
3395 	} else {
3396 		if (completion_rc == -ERESTARTSYS) {
3397 			dev_err_ratelimited(hdev->dev,
3398 					"user process got signal while waiting for interrupt ID %d\n",
3399 					interrupt->interrupt_id);
3400 			rc = -EINTR;
3401 			*status = HL_WAIT_CS_STATUS_ABORTED;
3402 		} else {
3403 			if (pend->fence.error == -EIO) {
3404 				dev_err_ratelimited(hdev->dev,
3405 						"interrupt based wait ioctl aborted(error:%d) due to a reset cycle initiated\n",
3406 						pend->fence.error);
3407 				rc = -EIO;
3408 				*status = HL_WAIT_CS_STATUS_ABORTED;
3409 			} else {
3410 				/* The wait has timed-out. We don't know anything beyond that
3411 				 * because the workload wasn't submitted through the driver.
3412 				 * Therefore, from driver's perspective, the workload is still
3413 				 * executing.
3414 				 */
3415 				rc = 0;
3416 				*status = HL_WAIT_CS_STATUS_BUSY;
3417 			}
3418 		}
3419 	}
3420 
3421 	/*
3422 	 * We keep removing the node from list here, and not at the irq handler
3423 	 * for completion timeout case. and if it's a registration
3424 	 * for ts record, the node will be deleted in the irq handler after
3425 	 * we reach the target value.
3426 	 */
3427 	spin_lock(&interrupt->wait_list_lock);
3428 	list_del(&pend->wait_list_node);
3429 	spin_unlock(&interrupt->wait_list_lock);
3430 
3431 set_timestamp:
3432 	*timestamp = ktime_to_ns(pend->fence.timestamp);
3433 	kfree(pend);
3434 	hl_cb_put(cq_cb);
3435 ts_registration_exit:
3436 	hl_ctx_put(ctx);
3437 
3438 	return rc;
3439 
3440 put_ts_buff:
3441 	hl_mmap_mem_buf_put(buf);
3442 put_cq_cb:
3443 	hl_cb_put(cq_cb);
3444 put_ctx:
3445 	hl_ctx_put(ctx);
3446 
3447 	return rc;
3448 }
3449 
_hl_interrupt_wait_ioctl_user_addr(struct hl_device * hdev,struct hl_ctx * ctx,u64 timeout_us,u64 user_address,u64 target_value,struct hl_user_interrupt * interrupt,u32 * status,u64 * timestamp)3450 static int _hl_interrupt_wait_ioctl_user_addr(struct hl_device *hdev, struct hl_ctx *ctx,
3451 				u64 timeout_us, u64 user_address,
3452 				u64 target_value, struct hl_user_interrupt *interrupt,
3453 				u32 *status,
3454 				u64 *timestamp)
3455 {
3456 	struct hl_user_pending_interrupt *pend;
3457 	unsigned long timeout;
3458 	u64 completion_value;
3459 	long completion_rc;
3460 	int rc = 0;
3461 
3462 	timeout = hl_usecs64_to_jiffies(timeout_us);
3463 
3464 	hl_ctx_get(ctx);
3465 
3466 	pend = kzalloc(sizeof(*pend), GFP_KERNEL);
3467 	if (!pend) {
3468 		hl_ctx_put(ctx);
3469 		return -ENOMEM;
3470 	}
3471 
3472 	hl_fence_init(&pend->fence, ULONG_MAX);
3473 
3474 	/* Add pending user interrupt to relevant list for the interrupt
3475 	 * handler to monitor
3476 	 */
3477 	spin_lock(&interrupt->wait_list_lock);
3478 	list_add_tail(&pend->wait_list_node, &interrupt->wait_list_head);
3479 	spin_unlock(&interrupt->wait_list_lock);
3480 
3481 	/* We check for completion value as interrupt could have been received
3482 	 * before we added the node to the wait list
3483 	 */
3484 	if (copy_from_user(&completion_value, u64_to_user_ptr(user_address), 8)) {
3485 		dev_err(hdev->dev, "Failed to copy completion value from user\n");
3486 		rc = -EFAULT;
3487 		goto remove_pending_user_interrupt;
3488 	}
3489 
3490 	if (completion_value >= target_value) {
3491 		*status = HL_WAIT_CS_STATUS_COMPLETED;
3492 		/* There was no interrupt, we assume the completion is now. */
3493 		pend->fence.timestamp = ktime_get();
3494 	} else {
3495 		*status = HL_WAIT_CS_STATUS_BUSY;
3496 	}
3497 
3498 	if (!timeout_us || (*status == HL_WAIT_CS_STATUS_COMPLETED))
3499 		goto remove_pending_user_interrupt;
3500 
3501 wait_again:
3502 	/* Wait for interrupt handler to signal completion */
3503 	completion_rc = wait_for_completion_interruptible_timeout(&pend->fence.completion,
3504 										timeout);
3505 
3506 	/* If timeout did not expire we need to perform the comparison.
3507 	 * If comparison fails, keep waiting until timeout expires
3508 	 */
3509 	if (completion_rc > 0) {
3510 		spin_lock(&interrupt->wait_list_lock);
3511 		/* reinit_completion must be called before we check for user
3512 		 * completion value, otherwise, if interrupt is received after
3513 		 * the comparison and before the next wait_for_completion,
3514 		 * we will reach timeout and fail
3515 		 */
3516 		reinit_completion(&pend->fence.completion);
3517 		spin_unlock(&interrupt->wait_list_lock);
3518 
3519 		if (copy_from_user(&completion_value, u64_to_user_ptr(user_address), 8)) {
3520 			dev_err(hdev->dev, "Failed to copy completion value from user\n");
3521 			rc = -EFAULT;
3522 
3523 			goto remove_pending_user_interrupt;
3524 		}
3525 
3526 		if (completion_value >= target_value) {
3527 			*status = HL_WAIT_CS_STATUS_COMPLETED;
3528 		} else if (pend->fence.error) {
3529 			dev_err_ratelimited(hdev->dev,
3530 				"interrupt based wait ioctl aborted(error:%d) due to a reset cycle initiated\n",
3531 				pend->fence.error);
3532 			/* set the command completion status as ABORTED */
3533 			*status = HL_WAIT_CS_STATUS_ABORTED;
3534 		} else {
3535 			timeout = completion_rc;
3536 			goto wait_again;
3537 		}
3538 	} else if (completion_rc == -ERESTARTSYS) {
3539 		dev_err_ratelimited(hdev->dev,
3540 			"user process got signal while waiting for interrupt ID %d\n",
3541 			interrupt->interrupt_id);
3542 		rc = -EINTR;
3543 	} else {
3544 		/* The wait has timed-out. We don't know anything beyond that
3545 		 * because the workload wasn't submitted through the driver.
3546 		 * Therefore, from driver's perspective, the workload is still
3547 		 * executing.
3548 		 */
3549 		rc = 0;
3550 		*status = HL_WAIT_CS_STATUS_BUSY;
3551 	}
3552 
3553 remove_pending_user_interrupt:
3554 	spin_lock(&interrupt->wait_list_lock);
3555 	list_del(&pend->wait_list_node);
3556 	spin_unlock(&interrupt->wait_list_lock);
3557 
3558 	*timestamp = ktime_to_ns(pend->fence.timestamp);
3559 
3560 	kfree(pend);
3561 	hl_ctx_put(ctx);
3562 
3563 	return rc;
3564 }
3565 
hl_interrupt_wait_ioctl(struct hl_fpriv * hpriv,void * data)3566 static int hl_interrupt_wait_ioctl(struct hl_fpriv *hpriv, void *data)
3567 {
3568 	u16 interrupt_id, first_interrupt, last_interrupt;
3569 	struct hl_device *hdev = hpriv->hdev;
3570 	struct asic_fixed_properties *prop;
3571 	struct hl_user_interrupt *interrupt;
3572 	union hl_wait_cs_args *args = data;
3573 	u32 status = HL_WAIT_CS_STATUS_BUSY;
3574 	u64 timestamp = 0;
3575 	int rc, int_idx;
3576 
3577 	prop = &hdev->asic_prop;
3578 
3579 	if (!(prop->user_interrupt_count + prop->user_dec_intr_count)) {
3580 		dev_err(hdev->dev, "no user interrupts allowed");
3581 		return -EPERM;
3582 	}
3583 
3584 	interrupt_id = FIELD_GET(HL_WAIT_CS_FLAGS_INTERRUPT_MASK, args->in.flags);
3585 
3586 	first_interrupt = prop->first_available_user_interrupt;
3587 	last_interrupt = prop->first_available_user_interrupt + prop->user_interrupt_count - 1;
3588 
3589 	if (interrupt_id < prop->user_dec_intr_count) {
3590 
3591 		/* Check if the requested core is enabled */
3592 		if (!(prop->decoder_enabled_mask & BIT(interrupt_id))) {
3593 			dev_err(hdev->dev, "interrupt on a disabled core(%u) not allowed",
3594 				interrupt_id);
3595 			return -EINVAL;
3596 		}
3597 
3598 		interrupt = &hdev->user_interrupt[interrupt_id];
3599 
3600 	} else if (interrupt_id >= first_interrupt && interrupt_id <= last_interrupt) {
3601 
3602 		int_idx = interrupt_id - first_interrupt + prop->user_dec_intr_count;
3603 		interrupt = &hdev->user_interrupt[int_idx];
3604 
3605 	} else if (interrupt_id == HL_COMMON_USER_CQ_INTERRUPT_ID) {
3606 		interrupt = &hdev->common_user_cq_interrupt;
3607 	} else if (interrupt_id == HL_COMMON_DEC_INTERRUPT_ID) {
3608 		interrupt = &hdev->common_decoder_interrupt;
3609 	} else {
3610 		dev_err(hdev->dev, "invalid user interrupt %u", interrupt_id);
3611 		return -EINVAL;
3612 	}
3613 
3614 	if (args->in.flags & HL_WAIT_CS_FLAGS_INTERRUPT_KERNEL_CQ)
3615 		rc = _hl_interrupt_wait_ioctl(hdev, hpriv->ctx, &hpriv->mem_mgr, &hpriv->mem_mgr,
3616 				args->in.interrupt_timeout_us, args->in.cq_counters_handle,
3617 				args->in.cq_counters_offset,
3618 				args->in.target, interrupt,
3619 				!!(args->in.flags & HL_WAIT_CS_FLAGS_REGISTER_INTERRUPT),
3620 				args->in.timestamp_handle, args->in.timestamp_offset,
3621 				&status, &timestamp);
3622 	else
3623 		rc = _hl_interrupt_wait_ioctl_user_addr(hdev, hpriv->ctx,
3624 				args->in.interrupt_timeout_us, args->in.addr,
3625 				args->in.target, interrupt, &status,
3626 				&timestamp);
3627 	if (rc)
3628 		return rc;
3629 
3630 	memset(args, 0, sizeof(*args));
3631 	args->out.status = status;
3632 
3633 	if (timestamp) {
3634 		args->out.timestamp_nsec = timestamp;
3635 		args->out.flags |= HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD;
3636 	}
3637 
3638 	return 0;
3639 }
3640 
hl_wait_ioctl(struct hl_fpriv * hpriv,void * data)3641 int hl_wait_ioctl(struct hl_fpriv *hpriv, void *data)
3642 {
3643 	struct hl_device *hdev = hpriv->hdev;
3644 	union hl_wait_cs_args *args = data;
3645 	u32 flags = args->in.flags;
3646 	int rc;
3647 
3648 	/* If the device is not operational, or if an error has happened and user should release the
3649 	 * device, there is no point in waiting for any command submission or user interrupt.
3650 	 */
3651 	if (!hl_device_operational(hpriv->hdev, NULL) || hdev->reset_info.watchdog_active)
3652 		return -EBUSY;
3653 
3654 	if (flags & HL_WAIT_CS_FLAGS_INTERRUPT)
3655 		rc = hl_interrupt_wait_ioctl(hpriv, data);
3656 	else if (flags & HL_WAIT_CS_FLAGS_MULTI_CS)
3657 		rc = hl_multi_cs_wait_ioctl(hpriv, data);
3658 	else
3659 		rc = hl_cs_wait_ioctl(hpriv, data);
3660 
3661 	return rc;
3662 }
3663