1 // SPDX-License-Identifier: GPL-2.0
2
3 /*
4 * Copyright 2016-2019 HabanaLabs, Ltd.
5 * All Rights Reserved.
6 */
7
8 #include "habanalabs.h"
9
10 #include <linux/slab.h>
11
12 /*
13 * hl_queue_add_ptr - add to pi or ci and checks if it wraps around
14 *
15 * @ptr: the current pi/ci value
16 * @val: the amount to add
17 *
18 * Add val to ptr. It can go until twice the queue length.
19 */
hl_hw_queue_add_ptr(u32 ptr,u16 val)20 inline u32 hl_hw_queue_add_ptr(u32 ptr, u16 val)
21 {
22 ptr += val;
23 ptr &= ((HL_QUEUE_LENGTH << 1) - 1);
24 return ptr;
25 }
queue_ci_get(atomic_t * ci,u32 queue_len)26 static inline int queue_ci_get(atomic_t *ci, u32 queue_len)
27 {
28 return atomic_read(ci) & ((queue_len << 1) - 1);
29 }
30
queue_free_slots(struct hl_hw_queue * q,u32 queue_len)31 static inline int queue_free_slots(struct hl_hw_queue *q, u32 queue_len)
32 {
33 int delta = (q->pi - queue_ci_get(&q->ci, queue_len));
34
35 if (delta >= 0)
36 return (queue_len - delta);
37 else
38 return (abs(delta) - queue_len);
39 }
40
hl_hw_queue_update_ci(struct hl_cs * cs)41 void hl_hw_queue_update_ci(struct hl_cs *cs)
42 {
43 struct hl_device *hdev = cs->ctx->hdev;
44 struct hl_hw_queue *q;
45 int i;
46
47 if (hdev->disabled)
48 return;
49
50 q = &hdev->kernel_queues[0];
51
52 /* There are no internal queues if H/W queues are being used */
53 if (!hdev->asic_prop.max_queues || q->queue_type == QUEUE_TYPE_HW)
54 return;
55
56 /* We must increment CI for every queue that will never get a
57 * completion, there are 2 scenarios this can happen:
58 * 1. All queues of a non completion CS will never get a completion.
59 * 2. Internal queues never gets completion.
60 */
61 for (i = 0 ; i < hdev->asic_prop.max_queues ; i++, q++) {
62 if (!cs_needs_completion(cs) || q->queue_type == QUEUE_TYPE_INT)
63 atomic_add(cs->jobs_in_queue_cnt[i], &q->ci);
64 }
65 }
66
67 /*
68 * hl_hw_queue_submit_bd() - Submit a buffer descriptor to an external or a
69 * H/W queue.
70 * @hdev: pointer to habanalabs device structure
71 * @q: pointer to habanalabs queue structure
72 * @ctl: BD's control word
73 * @len: BD's length
74 * @ptr: BD's pointer
75 *
76 * This function assumes there is enough space on the queue to submit a new
77 * BD to it. It initializes the next BD and calls the device specific
78 * function to set the pi (and doorbell)
79 *
80 * This function must be called when the scheduler mutex is taken
81 *
82 */
hl_hw_queue_submit_bd(struct hl_device * hdev,struct hl_hw_queue * q,u32 ctl,u32 len,u64 ptr)83 void hl_hw_queue_submit_bd(struct hl_device *hdev, struct hl_hw_queue *q,
84 u32 ctl, u32 len, u64 ptr)
85 {
86 struct hl_bd *bd;
87
88 bd = q->kernel_address;
89 bd += hl_pi_2_offset(q->pi);
90 bd->ctl = cpu_to_le32(ctl);
91 bd->len = cpu_to_le32(len);
92 bd->ptr = cpu_to_le64(ptr);
93
94 q->pi = hl_queue_inc_ptr(q->pi);
95 hdev->asic_funcs->ring_doorbell(hdev, q->hw_queue_id, q->pi);
96 }
97
98 /*
99 * ext_queue_sanity_checks - perform some sanity checks on external queue
100 *
101 * @hdev : pointer to hl_device structure
102 * @q : pointer to hl_hw_queue structure
103 * @num_of_entries : how many entries to check for space
104 * @reserve_cq_entry : whether to reserve an entry in the cq
105 *
106 * H/W queues spinlock should be taken before calling this function
107 *
108 * Perform the following:
109 * - Make sure we have enough space in the h/w queue
110 * - Make sure we have enough space in the completion queue
111 * - Reserve space in the completion queue (needs to be reversed if there
112 * is a failure down the road before the actual submission of work). Only
113 * do this action if reserve_cq_entry is true
114 *
115 */
ext_queue_sanity_checks(struct hl_device * hdev,struct hl_hw_queue * q,int num_of_entries,bool reserve_cq_entry)116 static int ext_queue_sanity_checks(struct hl_device *hdev,
117 struct hl_hw_queue *q, int num_of_entries,
118 bool reserve_cq_entry)
119 {
120 atomic_t *free_slots =
121 &hdev->completion_queue[q->cq_id].free_slots_cnt;
122 int free_slots_cnt;
123
124 /* Check we have enough space in the queue */
125 free_slots_cnt = queue_free_slots(q, HL_QUEUE_LENGTH);
126
127 if (free_slots_cnt < num_of_entries) {
128 dev_dbg(hdev->dev, "Queue %d doesn't have room for %d CBs\n",
129 q->hw_queue_id, num_of_entries);
130 return -EAGAIN;
131 }
132
133 if (reserve_cq_entry) {
134 /*
135 * Check we have enough space in the completion queue
136 * Add -1 to counter (decrement) unless counter was already 0
137 * In that case, CQ is full so we can't submit a new CB because
138 * we won't get ack on its completion
139 * atomic_add_unless will return 0 if counter was already 0
140 */
141 if (atomic_add_negative(num_of_entries * -1, free_slots)) {
142 dev_dbg(hdev->dev, "No space for %d on CQ %d\n",
143 num_of_entries, q->hw_queue_id);
144 atomic_add(num_of_entries, free_slots);
145 return -EAGAIN;
146 }
147 }
148
149 return 0;
150 }
151
152 /*
153 * int_queue_sanity_checks - perform some sanity checks on internal queue
154 *
155 * @hdev : pointer to hl_device structure
156 * @q : pointer to hl_hw_queue structure
157 * @num_of_entries : how many entries to check for space
158 *
159 * H/W queues spinlock should be taken before calling this function
160 *
161 * Perform the following:
162 * - Make sure we have enough space in the h/w queue
163 *
164 */
int_queue_sanity_checks(struct hl_device * hdev,struct hl_hw_queue * q,int num_of_entries)165 static int int_queue_sanity_checks(struct hl_device *hdev,
166 struct hl_hw_queue *q,
167 int num_of_entries)
168 {
169 int free_slots_cnt;
170
171 if (num_of_entries > q->int_queue_len) {
172 dev_err(hdev->dev,
173 "Cannot populate queue %u with %u jobs\n",
174 q->hw_queue_id, num_of_entries);
175 return -ENOMEM;
176 }
177
178 /* Check we have enough space in the queue */
179 free_slots_cnt = queue_free_slots(q, q->int_queue_len);
180
181 if (free_slots_cnt < num_of_entries) {
182 dev_dbg(hdev->dev, "Queue %d doesn't have room for %d CBs\n",
183 q->hw_queue_id, num_of_entries);
184 return -EAGAIN;
185 }
186
187 return 0;
188 }
189
190 /*
191 * hw_queue_sanity_checks() - Make sure we have enough space in the h/w queue
192 * @hdev: Pointer to hl_device structure.
193 * @q: Pointer to hl_hw_queue structure.
194 * @num_of_entries: How many entries to check for space.
195 *
196 * Notice: We do not reserve queue entries so this function mustn't be called
197 * more than once per CS for the same queue
198 *
199 */
hw_queue_sanity_checks(struct hl_device * hdev,struct hl_hw_queue * q,int num_of_entries)200 static int hw_queue_sanity_checks(struct hl_device *hdev, struct hl_hw_queue *q,
201 int num_of_entries)
202 {
203 int free_slots_cnt;
204
205 /* Check we have enough space in the queue */
206 free_slots_cnt = queue_free_slots(q, HL_QUEUE_LENGTH);
207
208 if (free_slots_cnt < num_of_entries) {
209 dev_dbg(hdev->dev, "Queue %d doesn't have room for %d CBs\n",
210 q->hw_queue_id, num_of_entries);
211 return -EAGAIN;
212 }
213
214 return 0;
215 }
216
217 /*
218 * hl_hw_queue_send_cb_no_cmpl - send a single CB (not a JOB) without completion
219 *
220 * @hdev: pointer to hl_device structure
221 * @hw_queue_id: Queue's type
222 * @cb_size: size of CB
223 * @cb_ptr: pointer to CB location
224 *
225 * This function sends a single CB, that must NOT generate a completion entry.
226 * Sending CPU messages can be done instead via 'hl_hw_queue_submit_bd()'
227 */
hl_hw_queue_send_cb_no_cmpl(struct hl_device * hdev,u32 hw_queue_id,u32 cb_size,u64 cb_ptr)228 int hl_hw_queue_send_cb_no_cmpl(struct hl_device *hdev, u32 hw_queue_id,
229 u32 cb_size, u64 cb_ptr)
230 {
231 struct hl_hw_queue *q = &hdev->kernel_queues[hw_queue_id];
232 int rc = 0;
233
234 hdev->asic_funcs->hw_queues_lock(hdev);
235
236 if (hdev->disabled) {
237 rc = -EPERM;
238 goto out;
239 }
240
241 /*
242 * hl_hw_queue_send_cb_no_cmpl() is called for queues of a H/W queue
243 * type only on init phase, when the queues are empty and being tested,
244 * so there is no need for sanity checks.
245 */
246 if (q->queue_type != QUEUE_TYPE_HW) {
247 rc = ext_queue_sanity_checks(hdev, q, 1, false);
248 if (rc)
249 goto out;
250 }
251
252 hl_hw_queue_submit_bd(hdev, q, 0, cb_size, cb_ptr);
253
254 out:
255 hdev->asic_funcs->hw_queues_unlock(hdev);
256
257 return rc;
258 }
259
260 /*
261 * ext_queue_schedule_job - submit a JOB to an external queue
262 *
263 * @job: pointer to the job that needs to be submitted to the queue
264 *
265 * This function must be called when the scheduler mutex is taken
266 *
267 */
ext_queue_schedule_job(struct hl_cs_job * job)268 static void ext_queue_schedule_job(struct hl_cs_job *job)
269 {
270 struct hl_device *hdev = job->cs->ctx->hdev;
271 struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
272 struct hl_cq_entry cq_pkt;
273 struct hl_cq *cq;
274 u64 cq_addr;
275 struct hl_cb *cb;
276 u32 ctl;
277 u32 len;
278 u64 ptr;
279
280 /*
281 * Update the JOB ID inside the BD CTL so the device would know what
282 * to write in the completion queue
283 */
284 ctl = ((q->pi << BD_CTL_SHADOW_INDEX_SHIFT) & BD_CTL_SHADOW_INDEX_MASK);
285
286 cb = job->patched_cb;
287 len = job->job_cb_size;
288 ptr = cb->bus_address;
289
290 /* Skip completion flow in case this is a non completion CS */
291 if (!cs_needs_completion(job->cs))
292 goto submit_bd;
293
294 cq_pkt.data = cpu_to_le32(
295 ((q->pi << CQ_ENTRY_SHADOW_INDEX_SHIFT)
296 & CQ_ENTRY_SHADOW_INDEX_MASK) |
297 FIELD_PREP(CQ_ENTRY_SHADOW_INDEX_VALID_MASK, 1) |
298 FIELD_PREP(CQ_ENTRY_READY_MASK, 1));
299
300 /*
301 * No need to protect pi_offset because scheduling to the
302 * H/W queues is done under the scheduler mutex
303 *
304 * No need to check if CQ is full because it was already
305 * checked in ext_queue_sanity_checks
306 */
307 cq = &hdev->completion_queue[q->cq_id];
308 cq_addr = cq->bus_address + cq->pi * sizeof(struct hl_cq_entry);
309
310 hdev->asic_funcs->add_end_of_cb_packets(hdev, cb->kernel_address, len,
311 job->user_cb_size,
312 cq_addr,
313 le32_to_cpu(cq_pkt.data),
314 q->msi_vec,
315 job->contains_dma_pkt);
316
317 q->shadow_queue[hl_pi_2_offset(q->pi)] = job;
318
319 cq->pi = hl_cq_inc_ptr(cq->pi);
320
321 submit_bd:
322 hl_hw_queue_submit_bd(hdev, q, ctl, len, ptr);
323 }
324
325 /*
326 * int_queue_schedule_job - submit a JOB to an internal queue
327 *
328 * @job: pointer to the job that needs to be submitted to the queue
329 *
330 * This function must be called when the scheduler mutex is taken
331 *
332 */
int_queue_schedule_job(struct hl_cs_job * job)333 static void int_queue_schedule_job(struct hl_cs_job *job)
334 {
335 struct hl_device *hdev = job->cs->ctx->hdev;
336 struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
337 struct hl_bd bd;
338 __le64 *pi;
339
340 bd.ctl = 0;
341 bd.len = cpu_to_le32(job->job_cb_size);
342
343 if (job->is_kernel_allocated_cb)
344 /* bus_address is actually a mmu mapped address
345 * allocated from an internal pool
346 */
347 bd.ptr = cpu_to_le64(job->user_cb->bus_address);
348 else
349 bd.ptr = cpu_to_le64((u64) (uintptr_t) job->user_cb);
350
351 pi = q->kernel_address + (q->pi & (q->int_queue_len - 1)) * sizeof(bd);
352
353 q->pi++;
354 q->pi &= ((q->int_queue_len << 1) - 1);
355
356 hdev->asic_funcs->pqe_write(hdev, pi, &bd);
357
358 hdev->asic_funcs->ring_doorbell(hdev, q->hw_queue_id, q->pi);
359 }
360
361 /*
362 * hw_queue_schedule_job - submit a JOB to a H/W queue
363 *
364 * @job: pointer to the job that needs to be submitted to the queue
365 *
366 * This function must be called when the scheduler mutex is taken
367 *
368 */
hw_queue_schedule_job(struct hl_cs_job * job)369 static void hw_queue_schedule_job(struct hl_cs_job *job)
370 {
371 struct hl_device *hdev = job->cs->ctx->hdev;
372 struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
373 u64 ptr;
374 u32 offset, ctl, len;
375
376 /*
377 * Upon PQE completion, COMP_DATA is used as the write data to the
378 * completion queue (QMAN HBW message), and COMP_OFFSET is used as the
379 * write address offset in the SM block (QMAN LBW message).
380 * The write address offset is calculated as "COMP_OFFSET << 2".
381 */
382 offset = job->cs->sequence & (hdev->asic_prop.max_pending_cs - 1);
383 ctl = ((offset << BD_CTL_COMP_OFFSET_SHIFT) & BD_CTL_COMP_OFFSET_MASK) |
384 ((q->pi << BD_CTL_COMP_DATA_SHIFT) & BD_CTL_COMP_DATA_MASK);
385
386 len = job->job_cb_size;
387
388 /*
389 * A patched CB is created only if a user CB was allocated by driver and
390 * MMU is disabled. If MMU is enabled, the user CB should be used
391 * instead. If the user CB wasn't allocated by driver, assume that it
392 * holds an address.
393 */
394 if (job->patched_cb)
395 ptr = job->patched_cb->bus_address;
396 else if (job->is_kernel_allocated_cb)
397 ptr = job->user_cb->bus_address;
398 else
399 ptr = (u64) (uintptr_t) job->user_cb;
400
401 hl_hw_queue_submit_bd(hdev, q, ctl, len, ptr);
402 }
403
init_signal_cs(struct hl_device * hdev,struct hl_cs_job * job,struct hl_cs_compl * cs_cmpl)404 static int init_signal_cs(struct hl_device *hdev,
405 struct hl_cs_job *job, struct hl_cs_compl *cs_cmpl)
406 {
407 struct hl_sync_stream_properties *prop;
408 struct hl_hw_sob *hw_sob;
409 u32 q_idx;
410 int rc = 0;
411
412 q_idx = job->hw_queue_id;
413 prop = &hdev->kernel_queues[q_idx].sync_stream_prop;
414 hw_sob = &prop->hw_sob[prop->curr_sob_offset];
415
416 cs_cmpl->hw_sob = hw_sob;
417 cs_cmpl->sob_val = prop->next_sob_val;
418
419 dev_dbg(hdev->dev,
420 "generate signal CB, sob_id: %d, sob val: %u, q_idx: %d, seq: %llu\n",
421 cs_cmpl->hw_sob->sob_id, cs_cmpl->sob_val, q_idx,
422 cs_cmpl->cs_seq);
423
424 /* we set an EB since we must make sure all oeprations are done
425 * when sending the signal
426 */
427 hdev->asic_funcs->gen_signal_cb(hdev, job->patched_cb,
428 cs_cmpl->hw_sob->sob_id, 0, true);
429
430 rc = hl_cs_signal_sob_wraparound_handler(hdev, q_idx, &hw_sob, 1,
431 false);
432
433 job->cs->sob_addr_offset = hw_sob->sob_addr;
434 job->cs->initial_sob_count = prop->next_sob_val - 1;
435
436 return rc;
437 }
438
hl_hw_queue_encaps_sig_set_sob_info(struct hl_device * hdev,struct hl_cs * cs,struct hl_cs_job * job,struct hl_cs_compl * cs_cmpl)439 void hl_hw_queue_encaps_sig_set_sob_info(struct hl_device *hdev,
440 struct hl_cs *cs, struct hl_cs_job *job,
441 struct hl_cs_compl *cs_cmpl)
442 {
443 struct hl_cs_encaps_sig_handle *handle = cs->encaps_sig_hdl;
444 u32 offset = 0;
445
446 cs_cmpl->hw_sob = handle->hw_sob;
447
448 /* Note that encaps_sig_wait_offset was validated earlier in the flow
449 * for offset value which exceeds the max reserved signal count.
450 * always decrement 1 of the offset since when the user
451 * set offset 1 for example he mean to wait only for the first
452 * signal only, which will be pre_sob_val, and if he set offset 2
453 * then the value required is (pre_sob_val + 1) and so on...
454 * if user set wait offset to 0, then treat it as legacy wait cs,
455 * wait for the next signal.
456 */
457 if (job->encaps_sig_wait_offset)
458 offset = job->encaps_sig_wait_offset - 1;
459
460 cs_cmpl->sob_val = handle->pre_sob_val + offset;
461 }
462
init_wait_cs(struct hl_device * hdev,struct hl_cs * cs,struct hl_cs_job * job,struct hl_cs_compl * cs_cmpl)463 static int init_wait_cs(struct hl_device *hdev, struct hl_cs *cs,
464 struct hl_cs_job *job, struct hl_cs_compl *cs_cmpl)
465 {
466 struct hl_gen_wait_properties wait_prop;
467 struct hl_sync_stream_properties *prop;
468 struct hl_cs_compl *signal_cs_cmpl;
469 u32 q_idx;
470
471 q_idx = job->hw_queue_id;
472 prop = &hdev->kernel_queues[q_idx].sync_stream_prop;
473
474 signal_cs_cmpl = container_of(cs->signal_fence,
475 struct hl_cs_compl,
476 base_fence);
477
478 if (cs->encaps_signals) {
479 /* use the encaps signal handle stored earlier in the flow
480 * and set the SOB information from the encaps
481 * signals handle
482 */
483 hl_hw_queue_encaps_sig_set_sob_info(hdev, cs, job, cs_cmpl);
484
485 dev_dbg(hdev->dev, "Wait for encaps signals handle, qidx(%u), CS sequence(%llu), sob val: 0x%x, offset: %u\n",
486 cs->encaps_sig_hdl->q_idx,
487 cs->encaps_sig_hdl->cs_seq,
488 cs_cmpl->sob_val,
489 job->encaps_sig_wait_offset);
490 } else {
491 /* Copy the SOB id and value of the signal CS */
492 cs_cmpl->hw_sob = signal_cs_cmpl->hw_sob;
493 cs_cmpl->sob_val = signal_cs_cmpl->sob_val;
494 }
495
496 /* check again if the signal cs already completed.
497 * if yes then don't send any wait cs since the hw_sob
498 * could be in reset already. if signal is not completed
499 * then get refcount to hw_sob to prevent resetting the sob
500 * while wait cs is not submitted.
501 * note that this check is protected by two locks,
502 * hw queue lock and completion object lock,
503 * and the same completion object lock also protects
504 * the hw_sob reset handler function.
505 * The hw_queue lock prevent out of sync of hw_sob
506 * refcount value, changed by signal/wait flows.
507 */
508 spin_lock(&signal_cs_cmpl->lock);
509
510 if (completion_done(&cs->signal_fence->completion)) {
511 spin_unlock(&signal_cs_cmpl->lock);
512 return -EINVAL;
513 }
514
515 kref_get(&cs_cmpl->hw_sob->kref);
516
517 spin_unlock(&signal_cs_cmpl->lock);
518
519 dev_dbg(hdev->dev,
520 "generate wait CB, sob_id: %d, sob_val: 0x%x, mon_id: %d, q_idx: %d, seq: %llu\n",
521 cs_cmpl->hw_sob->sob_id, cs_cmpl->sob_val,
522 prop->base_mon_id, q_idx, cs->sequence);
523
524 wait_prop.data = (void *) job->patched_cb;
525 wait_prop.sob_base = cs_cmpl->hw_sob->sob_id;
526 wait_prop.sob_mask = 0x1;
527 wait_prop.sob_val = cs_cmpl->sob_val;
528 wait_prop.mon_id = prop->base_mon_id;
529 wait_prop.q_idx = q_idx;
530 wait_prop.size = 0;
531
532 hdev->asic_funcs->gen_wait_cb(hdev, &wait_prop);
533
534 mb();
535 hl_fence_put(cs->signal_fence);
536 cs->signal_fence = NULL;
537
538 return 0;
539 }
540
541 /*
542 * init_signal_wait_cs - initialize a signal/wait CS
543 * @cs: pointer to the signal/wait CS
544 *
545 * H/W queues spinlock should be taken before calling this function
546 */
init_signal_wait_cs(struct hl_cs * cs)547 static int init_signal_wait_cs(struct hl_cs *cs)
548 {
549 struct hl_ctx *ctx = cs->ctx;
550 struct hl_device *hdev = ctx->hdev;
551 struct hl_cs_job *job;
552 struct hl_cs_compl *cs_cmpl =
553 container_of(cs->fence, struct hl_cs_compl, base_fence);
554 int rc = 0;
555
556 /* There is only one job in a signal/wait CS */
557 job = list_first_entry(&cs->job_list, struct hl_cs_job,
558 cs_node);
559
560 if (cs->type & CS_TYPE_SIGNAL)
561 rc = init_signal_cs(hdev, job, cs_cmpl);
562 else if (cs->type & CS_TYPE_WAIT)
563 rc = init_wait_cs(hdev, cs, job, cs_cmpl);
564
565 return rc;
566 }
567
encaps_sig_first_staged_cs_handler(struct hl_device * hdev,struct hl_cs * cs)568 static int encaps_sig_first_staged_cs_handler
569 (struct hl_device *hdev, struct hl_cs *cs)
570 {
571 struct hl_cs_compl *cs_cmpl =
572 container_of(cs->fence,
573 struct hl_cs_compl, base_fence);
574 struct hl_cs_encaps_sig_handle *encaps_sig_hdl;
575 struct hl_encaps_signals_mgr *mgr;
576 int rc = 0;
577
578 mgr = &cs->ctx->sig_mgr;
579
580 spin_lock(&mgr->lock);
581 encaps_sig_hdl = idr_find(&mgr->handles, cs->encaps_sig_hdl_id);
582 if (encaps_sig_hdl) {
583 /*
584 * Set handler CS sequence,
585 * the CS which contains the encapsulated signals.
586 */
587 encaps_sig_hdl->cs_seq = cs->sequence;
588 /* store the handle and set encaps signal indication,
589 * to be used later in cs_do_release to put the last
590 * reference to encaps signals handlers.
591 */
592 cs_cmpl->encaps_signals = true;
593 cs_cmpl->encaps_sig_hdl = encaps_sig_hdl;
594
595 /* set hw_sob pointer in completion object
596 * since it's used in cs_do_release flow to put
597 * refcount to sob
598 */
599 cs_cmpl->hw_sob = encaps_sig_hdl->hw_sob;
600 cs_cmpl->sob_val = encaps_sig_hdl->pre_sob_val +
601 encaps_sig_hdl->count;
602
603 dev_dbg(hdev->dev, "CS seq (%llu) added to encaps signal handler id (%u), count(%u), qidx(%u), sob(%u), val(%u)\n",
604 cs->sequence, encaps_sig_hdl->id,
605 encaps_sig_hdl->count,
606 encaps_sig_hdl->q_idx,
607 cs_cmpl->hw_sob->sob_id,
608 cs_cmpl->sob_val);
609
610 } else {
611 dev_err(hdev->dev, "encaps handle id(%u) wasn't found!\n",
612 cs->encaps_sig_hdl_id);
613 rc = -EINVAL;
614 }
615
616 spin_unlock(&mgr->lock);
617
618 return rc;
619 }
620
621 /*
622 * hl_hw_queue_schedule_cs - schedule a command submission
623 * @cs: pointer to the CS
624 */
hl_hw_queue_schedule_cs(struct hl_cs * cs)625 int hl_hw_queue_schedule_cs(struct hl_cs *cs)
626 {
627 enum hl_device_status status;
628 struct hl_cs_counters_atomic *cntr;
629 struct hl_ctx *ctx = cs->ctx;
630 struct hl_device *hdev = ctx->hdev;
631 struct hl_cs_job *job, *tmp;
632 struct hl_hw_queue *q;
633 int rc = 0, i, cq_cnt;
634 bool first_entry;
635 u32 max_queues;
636
637 cntr = &hdev->aggregated_cs_counters;
638
639 hdev->asic_funcs->hw_queues_lock(hdev);
640
641 if (!hl_device_operational(hdev, &status)) {
642 atomic64_inc(&cntr->device_in_reset_drop_cnt);
643 atomic64_inc(&ctx->cs_counters.device_in_reset_drop_cnt);
644 dev_err(hdev->dev,
645 "device is %s, CS rejected!\n", hdev->status[status]);
646 rc = -EPERM;
647 goto out;
648 }
649
650 max_queues = hdev->asic_prop.max_queues;
651
652 q = &hdev->kernel_queues[0];
653 for (i = 0, cq_cnt = 0 ; i < max_queues ; i++, q++) {
654 if (cs->jobs_in_queue_cnt[i]) {
655 switch (q->queue_type) {
656 case QUEUE_TYPE_EXT:
657 rc = ext_queue_sanity_checks(hdev, q,
658 cs->jobs_in_queue_cnt[i],
659 cs_needs_completion(cs) ?
660 true : false);
661 break;
662 case QUEUE_TYPE_INT:
663 rc = int_queue_sanity_checks(hdev, q,
664 cs->jobs_in_queue_cnt[i]);
665 break;
666 case QUEUE_TYPE_HW:
667 rc = hw_queue_sanity_checks(hdev, q,
668 cs->jobs_in_queue_cnt[i]);
669 break;
670 default:
671 dev_err(hdev->dev, "Queue type %d is invalid\n",
672 q->queue_type);
673 rc = -EINVAL;
674 break;
675 }
676
677 if (rc) {
678 atomic64_inc(
679 &ctx->cs_counters.queue_full_drop_cnt);
680 atomic64_inc(&cntr->queue_full_drop_cnt);
681 goto unroll_cq_resv;
682 }
683
684 if (q->queue_type == QUEUE_TYPE_EXT)
685 cq_cnt++;
686 }
687 }
688
689 if ((cs->type == CS_TYPE_SIGNAL) || (cs->type == CS_TYPE_WAIT)) {
690 rc = init_signal_wait_cs(cs);
691 if (rc)
692 goto unroll_cq_resv;
693 } else if (cs->type == CS_TYPE_COLLECTIVE_WAIT) {
694 rc = hdev->asic_funcs->collective_wait_init_cs(cs);
695 if (rc)
696 goto unroll_cq_resv;
697 }
698
699 rc = hdev->asic_funcs->pre_schedule_cs(cs);
700 if (rc) {
701 dev_err(hdev->dev,
702 "Failed in pre-submission operations of CS %d.%llu\n",
703 ctx->asid, cs->sequence);
704 goto unroll_cq_resv;
705 }
706
707 hdev->shadow_cs_queue[cs->sequence &
708 (hdev->asic_prop.max_pending_cs - 1)] = cs;
709
710 if (cs->encaps_signals && cs->staged_first) {
711 rc = encaps_sig_first_staged_cs_handler(hdev, cs);
712 if (rc)
713 goto unroll_cq_resv;
714 }
715
716 spin_lock(&hdev->cs_mirror_lock);
717
718 /* Verify staged CS exists and add to the staged list */
719 if (cs->staged_cs && !cs->staged_first) {
720 struct hl_cs *staged_cs;
721
722 staged_cs = hl_staged_cs_find_first(hdev, cs->staged_sequence);
723 if (!staged_cs) {
724 dev_err(hdev->dev,
725 "Cannot find staged submission sequence %llu",
726 cs->staged_sequence);
727 rc = -EINVAL;
728 goto unlock_cs_mirror;
729 }
730
731 if (is_staged_cs_last_exists(hdev, staged_cs)) {
732 dev_err(hdev->dev,
733 "Staged submission sequence %llu already submitted",
734 cs->staged_sequence);
735 rc = -EINVAL;
736 goto unlock_cs_mirror;
737 }
738
739 list_add_tail(&cs->staged_cs_node, &staged_cs->staged_cs_node);
740
741 /* update stream map of the first CS */
742 if (hdev->supports_wait_for_multi_cs)
743 staged_cs->fence->stream_master_qid_map |=
744 cs->fence->stream_master_qid_map;
745 }
746
747 list_add_tail(&cs->mirror_node, &hdev->cs_mirror_list);
748
749 /* Queue TDR if the CS is the first entry and if timeout is wanted */
750 first_entry = list_first_entry(&hdev->cs_mirror_list,
751 struct hl_cs, mirror_node) == cs;
752 if ((hdev->timeout_jiffies != MAX_SCHEDULE_TIMEOUT) &&
753 first_entry && cs_needs_timeout(cs)) {
754 cs->tdr_active = true;
755 schedule_delayed_work(&cs->work_tdr, cs->timeout_jiffies);
756
757 }
758
759 spin_unlock(&hdev->cs_mirror_lock);
760
761 list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
762 switch (job->queue_type) {
763 case QUEUE_TYPE_EXT:
764 ext_queue_schedule_job(job);
765 break;
766 case QUEUE_TYPE_INT:
767 int_queue_schedule_job(job);
768 break;
769 case QUEUE_TYPE_HW:
770 hw_queue_schedule_job(job);
771 break;
772 default:
773 break;
774 }
775
776 cs->submitted = true;
777
778 goto out;
779
780 unlock_cs_mirror:
781 spin_unlock(&hdev->cs_mirror_lock);
782 unroll_cq_resv:
783 q = &hdev->kernel_queues[0];
784 for (i = 0 ; (i < max_queues) && (cq_cnt > 0) ; i++, q++) {
785 if ((q->queue_type == QUEUE_TYPE_EXT) &&
786 (cs->jobs_in_queue_cnt[i])) {
787 atomic_t *free_slots =
788 &hdev->completion_queue[i].free_slots_cnt;
789 atomic_add(cs->jobs_in_queue_cnt[i], free_slots);
790 cq_cnt--;
791 }
792 }
793
794 out:
795 hdev->asic_funcs->hw_queues_unlock(hdev);
796
797 return rc;
798 }
799
800 /*
801 * hl_hw_queue_inc_ci_kernel - increment ci for kernel's queue
802 *
803 * @hdev: pointer to hl_device structure
804 * @hw_queue_id: which queue to increment its ci
805 */
hl_hw_queue_inc_ci_kernel(struct hl_device * hdev,u32 hw_queue_id)806 void hl_hw_queue_inc_ci_kernel(struct hl_device *hdev, u32 hw_queue_id)
807 {
808 struct hl_hw_queue *q = &hdev->kernel_queues[hw_queue_id];
809
810 atomic_inc(&q->ci);
811 }
812
ext_and_cpu_queue_init(struct hl_device * hdev,struct hl_hw_queue * q,bool is_cpu_queue)813 static int ext_and_cpu_queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
814 bool is_cpu_queue)
815 {
816 void *p;
817 int rc;
818
819 if (is_cpu_queue)
820 p = hl_cpu_accessible_dma_pool_alloc(hdev, HL_QUEUE_SIZE_IN_BYTES, &q->bus_address);
821 else
822 p = hl_asic_dma_alloc_coherent(hdev, HL_QUEUE_SIZE_IN_BYTES, &q->bus_address,
823 GFP_KERNEL | __GFP_ZERO);
824 if (!p)
825 return -ENOMEM;
826
827 q->kernel_address = p;
828
829 q->shadow_queue = kmalloc_array(HL_QUEUE_LENGTH, sizeof(struct hl_cs_job *), GFP_KERNEL);
830 if (!q->shadow_queue) {
831 dev_err(hdev->dev,
832 "Failed to allocate shadow queue for H/W queue %d\n",
833 q->hw_queue_id);
834 rc = -ENOMEM;
835 goto free_queue;
836 }
837
838 /* Make sure read/write pointers are initialized to start of queue */
839 atomic_set(&q->ci, 0);
840 q->pi = 0;
841
842 return 0;
843
844 free_queue:
845 if (is_cpu_queue)
846 hl_cpu_accessible_dma_pool_free(hdev, HL_QUEUE_SIZE_IN_BYTES, q->kernel_address);
847 else
848 hl_asic_dma_free_coherent(hdev, HL_QUEUE_SIZE_IN_BYTES, q->kernel_address,
849 q->bus_address);
850
851 return rc;
852 }
853
int_queue_init(struct hl_device * hdev,struct hl_hw_queue * q)854 static int int_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
855 {
856 void *p;
857
858 p = hdev->asic_funcs->get_int_queue_base(hdev, q->hw_queue_id,
859 &q->bus_address, &q->int_queue_len);
860 if (!p) {
861 dev_err(hdev->dev,
862 "Failed to get base address for internal queue %d\n",
863 q->hw_queue_id);
864 return -EFAULT;
865 }
866
867 q->kernel_address = p;
868 q->pi = 0;
869 atomic_set(&q->ci, 0);
870
871 return 0;
872 }
873
cpu_queue_init(struct hl_device * hdev,struct hl_hw_queue * q)874 static int cpu_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
875 {
876 return ext_and_cpu_queue_init(hdev, q, true);
877 }
878
ext_queue_init(struct hl_device * hdev,struct hl_hw_queue * q)879 static int ext_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
880 {
881 return ext_and_cpu_queue_init(hdev, q, false);
882 }
883
hw_queue_init(struct hl_device * hdev,struct hl_hw_queue * q)884 static int hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
885 {
886 void *p;
887
888 p = hl_asic_dma_alloc_coherent(hdev, HL_QUEUE_SIZE_IN_BYTES, &q->bus_address,
889 GFP_KERNEL | __GFP_ZERO);
890 if (!p)
891 return -ENOMEM;
892
893 q->kernel_address = p;
894
895 /* Make sure read/write pointers are initialized to start of queue */
896 atomic_set(&q->ci, 0);
897 q->pi = 0;
898
899 return 0;
900 }
901
sync_stream_queue_init(struct hl_device * hdev,u32 q_idx)902 static void sync_stream_queue_init(struct hl_device *hdev, u32 q_idx)
903 {
904 struct hl_sync_stream_properties *sync_stream_prop;
905 struct asic_fixed_properties *prop = &hdev->asic_prop;
906 struct hl_hw_sob *hw_sob;
907 int sob, reserved_mon_idx, queue_idx;
908
909 sync_stream_prop = &hdev->kernel_queues[q_idx].sync_stream_prop;
910
911 /* We use 'collective_mon_idx' as a running index in order to reserve
912 * monitors for collective master/slave queues.
913 * collective master queue gets 2 reserved monitors
914 * collective slave queue gets 1 reserved monitor
915 */
916 if (hdev->kernel_queues[q_idx].collective_mode ==
917 HL_COLLECTIVE_MASTER) {
918 reserved_mon_idx = hdev->collective_mon_idx;
919
920 /* reserve the first monitor for collective master queue */
921 sync_stream_prop->collective_mstr_mon_id[0] =
922 prop->collective_first_mon + reserved_mon_idx;
923
924 /* reserve the second monitor for collective master queue */
925 sync_stream_prop->collective_mstr_mon_id[1] =
926 prop->collective_first_mon + reserved_mon_idx + 1;
927
928 hdev->collective_mon_idx += HL_COLLECTIVE_RSVD_MSTR_MONS;
929 } else if (hdev->kernel_queues[q_idx].collective_mode ==
930 HL_COLLECTIVE_SLAVE) {
931 reserved_mon_idx = hdev->collective_mon_idx++;
932
933 /* reserve a monitor for collective slave queue */
934 sync_stream_prop->collective_slave_mon_id =
935 prop->collective_first_mon + reserved_mon_idx;
936 }
937
938 if (!hdev->kernel_queues[q_idx].supports_sync_stream)
939 return;
940
941 queue_idx = hdev->sync_stream_queue_idx++;
942
943 sync_stream_prop->base_sob_id = prop->sync_stream_first_sob +
944 (queue_idx * HL_RSVD_SOBS);
945 sync_stream_prop->base_mon_id = prop->sync_stream_first_mon +
946 (queue_idx * HL_RSVD_MONS);
947 sync_stream_prop->next_sob_val = 1;
948 sync_stream_prop->curr_sob_offset = 0;
949
950 for (sob = 0 ; sob < HL_RSVD_SOBS ; sob++) {
951 hw_sob = &sync_stream_prop->hw_sob[sob];
952 hw_sob->hdev = hdev;
953 hw_sob->sob_id = sync_stream_prop->base_sob_id + sob;
954 hw_sob->sob_addr =
955 hdev->asic_funcs->get_sob_addr(hdev, hw_sob->sob_id);
956 hw_sob->q_idx = q_idx;
957 kref_init(&hw_sob->kref);
958 }
959 }
960
sync_stream_queue_reset(struct hl_device * hdev,u32 q_idx)961 static void sync_stream_queue_reset(struct hl_device *hdev, u32 q_idx)
962 {
963 struct hl_sync_stream_properties *prop =
964 &hdev->kernel_queues[q_idx].sync_stream_prop;
965
966 /*
967 * In case we got here due to a stuck CS, the refcnt might be bigger
968 * than 1 and therefore we reset it.
969 */
970 kref_init(&prop->hw_sob[prop->curr_sob_offset].kref);
971 prop->curr_sob_offset = 0;
972 prop->next_sob_val = 1;
973 }
974
975 /*
976 * queue_init - main initialization function for H/W queue object
977 *
978 * @hdev: pointer to hl_device device structure
979 * @q: pointer to hl_hw_queue queue structure
980 * @hw_queue_id: The id of the H/W queue
981 *
982 * Allocate dma-able memory for the queue and initialize fields
983 * Returns 0 on success
984 */
queue_init(struct hl_device * hdev,struct hl_hw_queue * q,u32 hw_queue_id)985 static int queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
986 u32 hw_queue_id)
987 {
988 int rc;
989
990 q->hw_queue_id = hw_queue_id;
991
992 switch (q->queue_type) {
993 case QUEUE_TYPE_EXT:
994 rc = ext_queue_init(hdev, q);
995 break;
996 case QUEUE_TYPE_INT:
997 rc = int_queue_init(hdev, q);
998 break;
999 case QUEUE_TYPE_CPU:
1000 rc = cpu_queue_init(hdev, q);
1001 break;
1002 case QUEUE_TYPE_HW:
1003 rc = hw_queue_init(hdev, q);
1004 break;
1005 case QUEUE_TYPE_NA:
1006 q->valid = 0;
1007 return 0;
1008 default:
1009 dev_crit(hdev->dev, "wrong queue type %d during init\n",
1010 q->queue_type);
1011 rc = -EINVAL;
1012 break;
1013 }
1014
1015 sync_stream_queue_init(hdev, q->hw_queue_id);
1016
1017 if (rc)
1018 return rc;
1019
1020 q->valid = 1;
1021
1022 return 0;
1023 }
1024
1025 /*
1026 * hw_queue_fini - destroy queue
1027 *
1028 * @hdev: pointer to hl_device device structure
1029 * @q: pointer to hl_hw_queue queue structure
1030 *
1031 * Free the queue memory
1032 */
queue_fini(struct hl_device * hdev,struct hl_hw_queue * q)1033 static void queue_fini(struct hl_device *hdev, struct hl_hw_queue *q)
1034 {
1035 if (!q->valid)
1036 return;
1037
1038 /*
1039 * If we arrived here, there are no jobs waiting on this queue
1040 * so we can safely remove it.
1041 * This is because this function can only called when:
1042 * 1. Either a context is deleted, which only can occur if all its
1043 * jobs were finished
1044 * 2. A context wasn't able to be created due to failure or timeout,
1045 * which means there are no jobs on the queue yet
1046 *
1047 * The only exception are the queues of the kernel context, but
1048 * if they are being destroyed, it means that the entire module is
1049 * being removed. If the module is removed, it means there is no open
1050 * user context. It also means that if a job was submitted by
1051 * the kernel driver (e.g. context creation), the job itself was
1052 * released by the kernel driver when a timeout occurred on its
1053 * Completion. Thus, we don't need to release it again.
1054 */
1055
1056 if (q->queue_type == QUEUE_TYPE_INT)
1057 return;
1058
1059 kfree(q->shadow_queue);
1060
1061 if (q->queue_type == QUEUE_TYPE_CPU)
1062 hl_cpu_accessible_dma_pool_free(hdev, HL_QUEUE_SIZE_IN_BYTES, q->kernel_address);
1063 else
1064 hl_asic_dma_free_coherent(hdev, HL_QUEUE_SIZE_IN_BYTES, q->kernel_address,
1065 q->bus_address);
1066 }
1067
hl_hw_queues_create(struct hl_device * hdev)1068 int hl_hw_queues_create(struct hl_device *hdev)
1069 {
1070 struct asic_fixed_properties *asic = &hdev->asic_prop;
1071 struct hl_hw_queue *q;
1072 int i, rc, q_ready_cnt;
1073
1074 hdev->kernel_queues = kcalloc(asic->max_queues,
1075 sizeof(*hdev->kernel_queues), GFP_KERNEL);
1076
1077 if (!hdev->kernel_queues) {
1078 dev_err(hdev->dev, "Not enough memory for H/W queues\n");
1079 return -ENOMEM;
1080 }
1081
1082 /* Initialize the H/W queues */
1083 for (i = 0, q_ready_cnt = 0, q = hdev->kernel_queues;
1084 i < asic->max_queues ; i++, q_ready_cnt++, q++) {
1085
1086 q->queue_type = asic->hw_queues_props[i].type;
1087 q->supports_sync_stream =
1088 asic->hw_queues_props[i].supports_sync_stream;
1089 q->collective_mode = asic->hw_queues_props[i].collective_mode;
1090 rc = queue_init(hdev, q, i);
1091 if (rc) {
1092 dev_err(hdev->dev,
1093 "failed to initialize queue %d\n", i);
1094 goto release_queues;
1095 }
1096 }
1097
1098 return 0;
1099
1100 release_queues:
1101 for (i = 0, q = hdev->kernel_queues ; i < q_ready_cnt ; i++, q++)
1102 queue_fini(hdev, q);
1103
1104 kfree(hdev->kernel_queues);
1105
1106 return rc;
1107 }
1108
hl_hw_queues_destroy(struct hl_device * hdev)1109 void hl_hw_queues_destroy(struct hl_device *hdev)
1110 {
1111 struct hl_hw_queue *q;
1112 u32 max_queues = hdev->asic_prop.max_queues;
1113 int i;
1114
1115 for (i = 0, q = hdev->kernel_queues ; i < max_queues ; i++, q++)
1116 queue_fini(hdev, q);
1117
1118 kfree(hdev->kernel_queues);
1119 }
1120
hl_hw_queue_reset(struct hl_device * hdev,bool hard_reset)1121 void hl_hw_queue_reset(struct hl_device *hdev, bool hard_reset)
1122 {
1123 struct hl_hw_queue *q;
1124 u32 max_queues = hdev->asic_prop.max_queues;
1125 int i;
1126
1127 for (i = 0, q = hdev->kernel_queues ; i < max_queues ; i++, q++) {
1128 if ((!q->valid) ||
1129 ((!hard_reset) && (q->queue_type == QUEUE_TYPE_CPU)))
1130 continue;
1131 q->pi = 0;
1132 atomic_set(&q->ci, 0);
1133
1134 if (q->supports_sync_stream)
1135 sync_stream_queue_reset(hdev, q->hw_queue_id);
1136 }
1137 }
1138