Lines Matching refs:q
31 static inline int queue_free_slots(struct hl_hw_queue *q, u32 queue_len) in queue_free_slots() argument
33 int delta = (q->pi - queue_ci_get(&q->ci, queue_len)); in queue_free_slots()
44 struct hl_hw_queue *q; in hl_int_hw_queue_update_ci() local
50 q = &hdev->kernel_queues[0]; in hl_int_hw_queue_update_ci()
51 for (i = 0 ; i < hdev->asic_prop.max_queues ; i++, q++) { in hl_int_hw_queue_update_ci()
52 if (q->queue_type == QUEUE_TYPE_INT) in hl_int_hw_queue_update_ci()
53 atomic_add(cs->jobs_in_queue_cnt[i], &q->ci); in hl_int_hw_queue_update_ci()
74 struct hl_hw_queue *q, u32 ctl, u32 len, u64 ptr) in ext_and_hw_queue_submit_bd() argument
78 bd = q->kernel_address; in ext_and_hw_queue_submit_bd()
79 bd += hl_pi_2_offset(q->pi); in ext_and_hw_queue_submit_bd()
84 q->pi = hl_queue_inc_ptr(q->pi); in ext_and_hw_queue_submit_bd()
85 hdev->asic_funcs->ring_doorbell(hdev, q->hw_queue_id, q->pi); in ext_and_hw_queue_submit_bd()
107 struct hl_hw_queue *q, int num_of_entries, in ext_queue_sanity_checks() argument
111 &hdev->completion_queue[q->cq_id].free_slots_cnt; in ext_queue_sanity_checks()
115 free_slots_cnt = queue_free_slots(q, HL_QUEUE_LENGTH); in ext_queue_sanity_checks()
119 q->hw_queue_id, num_of_entries); in ext_queue_sanity_checks()
133 num_of_entries, q->hw_queue_id); in ext_queue_sanity_checks()
156 struct hl_hw_queue *q, in int_queue_sanity_checks() argument
161 if (num_of_entries > q->int_queue_len) { in int_queue_sanity_checks()
164 q->hw_queue_id, num_of_entries); in int_queue_sanity_checks()
169 free_slots_cnt = queue_free_slots(q, q->int_queue_len); in int_queue_sanity_checks()
173 q->hw_queue_id, num_of_entries); in int_queue_sanity_checks()
190 static int hw_queue_sanity_checks(struct hl_device *hdev, struct hl_hw_queue *q, in hw_queue_sanity_checks() argument
196 free_slots_cnt = queue_free_slots(q, HL_QUEUE_LENGTH); in hw_queue_sanity_checks()
200 q->hw_queue_id, num_of_entries); in hw_queue_sanity_checks()
221 struct hl_hw_queue *q = &hdev->kernel_queues[hw_queue_id]; in hl_hw_queue_send_cb_no_cmpl() local
232 if (q->queue_type != QUEUE_TYPE_CPU) in hl_hw_queue_send_cb_no_cmpl()
245 if (q->queue_type != QUEUE_TYPE_HW) { in hl_hw_queue_send_cb_no_cmpl()
246 rc = ext_queue_sanity_checks(hdev, q, 1, false); in hl_hw_queue_send_cb_no_cmpl()
251 ext_and_hw_queue_submit_bd(hdev, q, 0, cb_size, cb_ptr); in hl_hw_queue_send_cb_no_cmpl()
254 if (q->queue_type != QUEUE_TYPE_CPU) in hl_hw_queue_send_cb_no_cmpl()
271 struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id]; in ext_queue_schedule_job() local
284 ctl = ((q->pi << BD_CTL_SHADOW_INDEX_SHIFT) & BD_CTL_SHADOW_INDEX_MASK); in ext_queue_schedule_job()
291 ((q->pi << CQ_ENTRY_SHADOW_INDEX_SHIFT) in ext_queue_schedule_job()
303 cq = &hdev->completion_queue[q->cq_id]; in ext_queue_schedule_job()
309 q->msi_vec, in ext_queue_schedule_job()
312 q->shadow_queue[hl_pi_2_offset(q->pi)] = job; in ext_queue_schedule_job()
316 ext_and_hw_queue_submit_bd(hdev, q, ctl, len, ptr); in ext_queue_schedule_job()
330 struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id]; in int_queue_schedule_job() local
338 pi = q->kernel_address + (q->pi & (q->int_queue_len - 1)) * sizeof(bd); in int_queue_schedule_job()
340 q->pi++; in int_queue_schedule_job()
341 q->pi &= ((q->int_queue_len << 1) - 1); in int_queue_schedule_job()
345 hdev->asic_funcs->ring_doorbell(hdev, q->hw_queue_id, q->pi); in int_queue_schedule_job()
359 struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id]; in hw_queue_schedule_job() local
371 ((q->pi << BD_CTL_COMP_DATA_SHIFT) & BD_CTL_COMP_DATA_MASK); in hw_queue_schedule_job()
388 ext_and_hw_queue_submit_bd(hdev, q, ctl, len, ptr); in hw_queue_schedule_job()
490 struct hl_hw_queue *q; in hl_hw_queue_schedule_cs() local
506 q = &hdev->kernel_queues[0]; in hl_hw_queue_schedule_cs()
507 for (i = 0, cq_cnt = 0 ; i < max_queues ; i++, q++) { in hl_hw_queue_schedule_cs()
509 switch (q->queue_type) { in hl_hw_queue_schedule_cs()
511 rc = ext_queue_sanity_checks(hdev, q, in hl_hw_queue_schedule_cs()
515 rc = int_queue_sanity_checks(hdev, q, in hl_hw_queue_schedule_cs()
519 rc = hw_queue_sanity_checks(hdev, q, in hl_hw_queue_schedule_cs()
524 q->queue_type); in hl_hw_queue_schedule_cs()
534 if (q->queue_type == QUEUE_TYPE_EXT) in hl_hw_queue_schedule_cs()
584 q = &hdev->kernel_queues[0]; in hl_hw_queue_schedule_cs()
585 for (i = 0 ; (i < max_queues) && (cq_cnt > 0) ; i++, q++) { in hl_hw_queue_schedule_cs()
586 if ((q->queue_type == QUEUE_TYPE_EXT) && in hl_hw_queue_schedule_cs()
609 struct hl_hw_queue *q = &hdev->kernel_queues[hw_queue_id]; in hl_hw_queue_inc_ci_kernel() local
611 atomic_inc(&q->ci); in hl_hw_queue_inc_ci_kernel()
614 static int ext_and_cpu_queue_init(struct hl_device *hdev, struct hl_hw_queue *q, in ext_and_cpu_queue_init() argument
623 &q->bus_address); in ext_and_cpu_queue_init()
627 &q->bus_address, in ext_and_cpu_queue_init()
632 q->kernel_address = p; in ext_and_cpu_queue_init()
634 q->shadow_queue = kmalloc_array(HL_QUEUE_LENGTH, in ext_and_cpu_queue_init()
635 sizeof(*q->shadow_queue), in ext_and_cpu_queue_init()
637 if (!q->shadow_queue) { in ext_and_cpu_queue_init()
640 q->hw_queue_id); in ext_and_cpu_queue_init()
646 atomic_set(&q->ci, 0); in ext_and_cpu_queue_init()
647 q->pi = 0; in ext_and_cpu_queue_init()
655 q->kernel_address); in ext_and_cpu_queue_init()
659 q->kernel_address, in ext_and_cpu_queue_init()
660 q->bus_address); in ext_and_cpu_queue_init()
665 static int int_queue_init(struct hl_device *hdev, struct hl_hw_queue *q) in int_queue_init() argument
669 p = hdev->asic_funcs->get_int_queue_base(hdev, q->hw_queue_id, in int_queue_init()
670 &q->bus_address, &q->int_queue_len); in int_queue_init()
674 q->hw_queue_id); in int_queue_init()
678 q->kernel_address = p; in int_queue_init()
679 q->pi = 0; in int_queue_init()
680 atomic_set(&q->ci, 0); in int_queue_init()
685 static int cpu_queue_init(struct hl_device *hdev, struct hl_hw_queue *q) in cpu_queue_init() argument
687 return ext_and_cpu_queue_init(hdev, q, true); in cpu_queue_init()
690 static int ext_queue_init(struct hl_device *hdev, struct hl_hw_queue *q) in ext_queue_init() argument
692 return ext_and_cpu_queue_init(hdev, q, false); in ext_queue_init()
695 static int hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q) in hw_queue_init() argument
701 &q->bus_address, in hw_queue_init()
706 q->kernel_address = p; in hw_queue_init()
709 atomic_set(&q->ci, 0); in hw_queue_init()
710 q->pi = 0; in hw_queue_init()
761 static int queue_init(struct hl_device *hdev, struct hl_hw_queue *q, in queue_init() argument
766 q->hw_queue_id = hw_queue_id; in queue_init()
768 switch (q->queue_type) { in queue_init()
770 rc = ext_queue_init(hdev, q); in queue_init()
773 rc = int_queue_init(hdev, q); in queue_init()
776 rc = cpu_queue_init(hdev, q); in queue_init()
779 rc = hw_queue_init(hdev, q); in queue_init()
782 q->valid = 0; in queue_init()
786 q->queue_type); in queue_init()
791 if (q->supports_sync_stream) in queue_init()
792 sync_stream_queue_init(hdev, q->hw_queue_id); in queue_init()
797 q->valid = 1; in queue_init()
810 static void queue_fini(struct hl_device *hdev, struct hl_hw_queue *q) in queue_fini() argument
812 if (!q->valid) in queue_fini()
833 if (q->queue_type == QUEUE_TYPE_INT) in queue_fini()
836 kfree(q->shadow_queue); in queue_fini()
838 if (q->queue_type == QUEUE_TYPE_CPU) in queue_fini()
841 q->kernel_address); in queue_fini()
845 q->kernel_address, in queue_fini()
846 q->bus_address); in queue_fini()
852 struct hl_hw_queue *q; in hl_hw_queues_create() local
864 for (i = 0, q_ready_cnt = 0, q = hdev->kernel_queues; in hl_hw_queues_create()
865 i < asic->max_queues ; i++, q_ready_cnt++, q++) { in hl_hw_queues_create()
867 q->queue_type = asic->hw_queues_props[i].type; in hl_hw_queues_create()
868 q->supports_sync_stream = in hl_hw_queues_create()
870 rc = queue_init(hdev, q, i); in hl_hw_queues_create()
881 for (i = 0, q = hdev->kernel_queues ; i < q_ready_cnt ; i++, q++) in hl_hw_queues_create()
882 queue_fini(hdev, q); in hl_hw_queues_create()
891 struct hl_hw_queue *q; in hl_hw_queues_destroy() local
895 for (i = 0, q = hdev->kernel_queues ; i < max_queues ; i++, q++) in hl_hw_queues_destroy()
896 queue_fini(hdev, q); in hl_hw_queues_destroy()
903 struct hl_hw_queue *q; in hl_hw_queue_reset() local
907 for (i = 0, q = hdev->kernel_queues ; i < max_queues ; i++, q++) { in hl_hw_queue_reset()
908 if ((!q->valid) || in hl_hw_queue_reset()
909 ((!hard_reset) && (q->queue_type == QUEUE_TYPE_CPU))) in hl_hw_queue_reset()
911 q->pi = 0; in hl_hw_queue_reset()
912 atomic_set(&q->ci, 0); in hl_hw_queue_reset()
914 if (q->supports_sync_stream) in hl_hw_queue_reset()
915 sync_stream_queue_reset(hdev, q->hw_queue_id); in hl_hw_queue_reset()