Lines Matching refs:dqm
40 static int set_pasid_vmid_mapping(struct device_queue_manager *dqm,
43 static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
47 static int execute_queues_cpsch(struct device_queue_manager *dqm, bool lock);
48 static int destroy_queues_cpsch(struct device_queue_manager *dqm,
51 static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
55 static void deallocate_sdma_queue(struct device_queue_manager *dqm,
66 unsigned int get_first_pipe(struct device_queue_manager *dqm) in get_first_pipe() argument
68 BUG_ON(!dqm || !dqm->dev); in get_first_pipe()
69 return dqm->dev->shared_resources.first_compute_pipe; in get_first_pipe()
72 unsigned int get_pipes_num(struct device_queue_manager *dqm) in get_pipes_num() argument
74 BUG_ON(!dqm || !dqm->dev); in get_pipes_num()
75 return dqm->dev->shared_resources.compute_pipe_count; in get_pipes_num()
83 void program_sh_mem_settings(struct device_queue_manager *dqm, in program_sh_mem_settings() argument
86 return dqm->dev->kfd2kgd->program_sh_mem_settings( in program_sh_mem_settings()
87 dqm->dev->kgd, qpd->vmid, in program_sh_mem_settings()
94 static int allocate_vmid(struct device_queue_manager *dqm, in allocate_vmid() argument
100 if (dqm->vmid_bitmap == 0) in allocate_vmid()
103 bit = find_first_bit((unsigned long *)&dqm->vmid_bitmap, CIK_VMID_NUM); in allocate_vmid()
104 clear_bit(bit, (unsigned long *)&dqm->vmid_bitmap); in allocate_vmid()
112 set_pasid_vmid_mapping(dqm, q->process->pasid, q->properties.vmid); in allocate_vmid()
113 program_sh_mem_settings(dqm, qpd); in allocate_vmid()
118 static void deallocate_vmid(struct device_queue_manager *dqm, in deallocate_vmid() argument
125 set_pasid_vmid_mapping(dqm, 0, qpd->vmid); in deallocate_vmid()
127 set_bit(bit, (unsigned long *)&dqm->vmid_bitmap); in deallocate_vmid()
132 static int create_queue_nocpsch(struct device_queue_manager *dqm, in create_queue_nocpsch() argument
139 BUG_ON(!dqm || !q || !qpd || !allocated_vmid); in create_queue_nocpsch()
144 mutex_lock(&dqm->lock); in create_queue_nocpsch()
146 if (dqm->total_queue_count >= max_num_of_queues_per_device) { in create_queue_nocpsch()
148 dqm->total_queue_count); in create_queue_nocpsch()
149 mutex_unlock(&dqm->lock); in create_queue_nocpsch()
154 retval = allocate_vmid(dqm, qpd, q); in create_queue_nocpsch()
156 mutex_unlock(&dqm->lock); in create_queue_nocpsch()
164 retval = create_compute_queue_nocpsch(dqm, q, qpd); in create_queue_nocpsch()
166 retval = create_sdma_queue_nocpsch(dqm, q, qpd); in create_queue_nocpsch()
170 deallocate_vmid(dqm, qpd, q); in create_queue_nocpsch()
173 mutex_unlock(&dqm->lock); in create_queue_nocpsch()
179 dqm->queue_count++; in create_queue_nocpsch()
182 dqm->sdma_queue_count++; in create_queue_nocpsch()
188 dqm->total_queue_count++; in create_queue_nocpsch()
190 dqm->total_queue_count); in create_queue_nocpsch()
192 mutex_unlock(&dqm->lock); in create_queue_nocpsch()
196 static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q) in allocate_hqd() argument
203 for (pipe = dqm->next_pipe_to_allocate, i = 0; i < get_pipes_num(dqm); in allocate_hqd()
204 pipe = ((pipe + 1) % get_pipes_num(dqm)), ++i) { in allocate_hqd()
205 if (dqm->allocated_queues[pipe] != 0) { in allocate_hqd()
207 (unsigned long *)&dqm->allocated_queues[pipe], in allocate_hqd()
211 (unsigned long *)&dqm->allocated_queues[pipe]); in allocate_hqd()
225 dqm->next_pipe_to_allocate = (pipe + 1) % get_pipes_num(dqm); in allocate_hqd()
230 static inline void deallocate_hqd(struct device_queue_manager *dqm, in deallocate_hqd() argument
233 set_bit(q->queue, (unsigned long *)&dqm->allocated_queues[q->pipe]); in deallocate_hqd()
236 static int create_compute_queue_nocpsch(struct device_queue_manager *dqm, in create_compute_queue_nocpsch() argument
243 BUG_ON(!dqm || !q || !qpd); in create_compute_queue_nocpsch()
245 mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE); in create_compute_queue_nocpsch()
249 retval = allocate_hqd(dqm, q); in create_compute_queue_nocpsch()
256 deallocate_hqd(dqm, q); in create_compute_queue_nocpsch()
267 deallocate_hqd(dqm, q); in create_compute_queue_nocpsch()
275 static int destroy_queue_nocpsch(struct device_queue_manager *dqm, in destroy_queue_nocpsch() argument
282 BUG_ON(!dqm || !q || !q->mqd || !qpd); in destroy_queue_nocpsch()
288 mutex_lock(&dqm->lock); in destroy_queue_nocpsch()
291 mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE); in destroy_queue_nocpsch()
296 deallocate_hqd(dqm, q); in destroy_queue_nocpsch()
298 mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA); in destroy_queue_nocpsch()
303 dqm->sdma_queue_count--; in destroy_queue_nocpsch()
304 deallocate_sdma_queue(dqm, q->sdma_id); in destroy_queue_nocpsch()
324 deallocate_vmid(dqm, qpd, q); in destroy_queue_nocpsch()
326 dqm->queue_count--; in destroy_queue_nocpsch()
332 dqm->total_queue_count--; in destroy_queue_nocpsch()
334 dqm->total_queue_count); in destroy_queue_nocpsch()
337 mutex_unlock(&dqm->lock); in destroy_queue_nocpsch()
341 static int update_queue(struct device_queue_manager *dqm, struct queue *q) in update_queue() argument
347 BUG_ON(!dqm || !q || !q->mqd); in update_queue()
349 mutex_lock(&dqm->lock); in update_queue()
350 mqd = dqm->ops.get_mqd_manager(dqm, in update_queue()
353 mutex_unlock(&dqm->lock); in update_queue()
367 dqm->queue_count++; in update_queue()
369 dqm->queue_count--; in update_queue()
372 retval = execute_queues_cpsch(dqm, false); in update_queue()
374 mutex_unlock(&dqm->lock); in update_queue()
379 struct device_queue_manager *dqm, enum KFD_MQD_TYPE type) in get_mqd_manager_nocpsch() argument
383 BUG_ON(!dqm || type >= KFD_MQD_TYPE_MAX); in get_mqd_manager_nocpsch()
387 mqd = dqm->mqds[type]; in get_mqd_manager_nocpsch()
389 mqd = mqd_manager_init(type, dqm->dev); in get_mqd_manager_nocpsch()
392 dqm->mqds[type] = mqd; in get_mqd_manager_nocpsch()
398 static int register_process_nocpsch(struct device_queue_manager *dqm, in register_process_nocpsch() argument
404 BUG_ON(!dqm || !qpd); in register_process_nocpsch()
414 mutex_lock(&dqm->lock); in register_process_nocpsch()
415 list_add(&n->list, &dqm->queues); in register_process_nocpsch()
417 retval = dqm->ops_asic_specific.register_process(dqm, qpd); in register_process_nocpsch()
419 dqm->processes_count++; in register_process_nocpsch()
421 mutex_unlock(&dqm->lock); in register_process_nocpsch()
426 static int unregister_process_nocpsch(struct device_queue_manager *dqm, in unregister_process_nocpsch() argument
432 BUG_ON(!dqm || !qpd); in unregister_process_nocpsch()
440 mutex_lock(&dqm->lock); in unregister_process_nocpsch()
442 list_for_each_entry_safe(cur, next, &dqm->queues, list) { in unregister_process_nocpsch()
446 dqm->processes_count--; in unregister_process_nocpsch()
453 mutex_unlock(&dqm->lock); in unregister_process_nocpsch()
458 set_pasid_vmid_mapping(struct device_queue_manager *dqm, unsigned int pasid, in set_pasid_vmid_mapping() argument
467 return dqm->dev->kfd2kgd->set_pasid_vmid_mapping( in set_pasid_vmid_mapping()
468 dqm->dev->kgd, pasid_mapping, in set_pasid_vmid_mapping()
472 int init_pipelines(struct device_queue_manager *dqm, in init_pipelines() argument
480 BUG_ON(!dqm || !dqm->dev); in init_pipelines()
491 err = kfd_gtt_sa_allocate(dqm->dev, CIK_HPD_EOP_BYTES * pipes_num, in init_pipelines()
492 &dqm->pipeline_mem); in init_pipelines()
500 hpdptr = dqm->pipeline_mem->cpu_ptr; in init_pipelines()
501 dqm->pipelines_addr = dqm->pipeline_mem->gpu_addr; in init_pipelines()
505 mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE); in init_pipelines()
507 kfd_gtt_sa_free(dqm->dev, dqm->pipeline_mem); in init_pipelines()
517 pipe_hpd_addr = dqm->pipelines_addr + i * CIK_HPD_EOP_BYTES; in init_pipelines()
520 dqm->dev->kfd2kgd->init_pipeline(dqm->dev->kgd, inx, in init_pipelines()
527 static void init_interrupts(struct device_queue_manager *dqm) in init_interrupts() argument
531 BUG_ON(dqm == NULL); in init_interrupts()
533 for (i = 0 ; i < get_pipes_num(dqm) ; i++) in init_interrupts()
534 dqm->dev->kfd2kgd->init_interrupts(dqm->dev->kgd, in init_interrupts()
535 i + get_first_pipe(dqm)); in init_interrupts()
538 static int init_scheduler(struct device_queue_manager *dqm) in init_scheduler() argument
542 BUG_ON(!dqm); in init_scheduler()
546 retval = init_pipelines(dqm, get_pipes_num(dqm), get_first_pipe(dqm)); in init_scheduler()
550 static int initialize_nocpsch(struct device_queue_manager *dqm) in initialize_nocpsch() argument
554 BUG_ON(!dqm); in initialize_nocpsch()
557 __func__, get_pipes_num(dqm)); in initialize_nocpsch()
559 mutex_init(&dqm->lock); in initialize_nocpsch()
560 INIT_LIST_HEAD(&dqm->queues); in initialize_nocpsch()
561 dqm->queue_count = dqm->next_pipe_to_allocate = 0; in initialize_nocpsch()
562 dqm->sdma_queue_count = 0; in initialize_nocpsch()
563 dqm->allocated_queues = kcalloc(get_pipes_num(dqm), in initialize_nocpsch()
565 if (!dqm->allocated_queues) { in initialize_nocpsch()
566 mutex_destroy(&dqm->lock); in initialize_nocpsch()
570 for (i = 0; i < get_pipes_num(dqm); i++) in initialize_nocpsch()
571 dqm->allocated_queues[i] = (1 << QUEUES_PER_PIPE) - 1; in initialize_nocpsch()
573 dqm->vmid_bitmap = (1 << VMID_PER_DEVICE) - 1; in initialize_nocpsch()
574 dqm->sdma_bitmap = (1 << CIK_SDMA_QUEUES) - 1; in initialize_nocpsch()
576 init_scheduler(dqm); in initialize_nocpsch()
580 static void uninitialize_nocpsch(struct device_queue_manager *dqm) in uninitialize_nocpsch() argument
584 BUG_ON(!dqm); in uninitialize_nocpsch()
586 BUG_ON(dqm->queue_count > 0 || dqm->processes_count > 0); in uninitialize_nocpsch()
588 kfree(dqm->allocated_queues); in uninitialize_nocpsch()
590 kfree(dqm->mqds[i]); in uninitialize_nocpsch()
591 mutex_destroy(&dqm->lock); in uninitialize_nocpsch()
592 kfd_gtt_sa_free(dqm->dev, dqm->pipeline_mem); in uninitialize_nocpsch()
595 static int start_nocpsch(struct device_queue_manager *dqm) in start_nocpsch() argument
597 init_interrupts(dqm); in start_nocpsch()
601 static int stop_nocpsch(struct device_queue_manager *dqm) in stop_nocpsch() argument
606 static int allocate_sdma_queue(struct device_queue_manager *dqm, in allocate_sdma_queue() argument
611 if (dqm->sdma_bitmap == 0) in allocate_sdma_queue()
614 bit = find_first_bit((unsigned long *)&dqm->sdma_bitmap, in allocate_sdma_queue()
617 clear_bit(bit, (unsigned long *)&dqm->sdma_bitmap); in allocate_sdma_queue()
623 static void deallocate_sdma_queue(struct device_queue_manager *dqm, in deallocate_sdma_queue() argument
628 set_bit(sdma_queue_id, (unsigned long *)&dqm->sdma_bitmap); in deallocate_sdma_queue()
631 static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm, in create_sdma_queue_nocpsch() argument
638 mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA); in create_sdma_queue_nocpsch()
642 retval = allocate_sdma_queue(dqm, &q->sdma_id); in create_sdma_queue_nocpsch()
653 dqm->ops_asic_specific.init_sdma_vm(dqm, q, qpd); in create_sdma_queue_nocpsch()
657 deallocate_sdma_queue(dqm, q->sdma_id); in create_sdma_queue_nocpsch()
664 deallocate_sdma_queue(dqm, q->sdma_id); in create_sdma_queue_nocpsch()
676 static int set_sched_resources(struct device_queue_manager *dqm) in set_sched_resources() argument
681 BUG_ON(!dqm); in set_sched_resources()
689 res.queue_mask = queue_mask << (get_first_pipe(dqm) * QUEUES_PER_PIPE); in set_sched_resources()
698 return pm_send_set_resources(&dqm->packets, &res); in set_sched_resources()
701 static int initialize_cpsch(struct device_queue_manager *dqm) in initialize_cpsch() argument
705 BUG_ON(!dqm); in initialize_cpsch()
710 mutex_init(&dqm->lock); in initialize_cpsch()
711 INIT_LIST_HEAD(&dqm->queues); in initialize_cpsch()
712 dqm->queue_count = dqm->processes_count = 0; in initialize_cpsch()
713 dqm->sdma_queue_count = 0; in initialize_cpsch()
714 dqm->active_runlist = false; in initialize_cpsch()
715 retval = dqm->ops_asic_specific.initialize(dqm); in initialize_cpsch()
722 mutex_destroy(&dqm->lock); in initialize_cpsch()
726 static int start_cpsch(struct device_queue_manager *dqm) in start_cpsch() argument
731 BUG_ON(!dqm); in start_cpsch()
735 retval = pm_init(&dqm->packets, dqm); in start_cpsch()
739 retval = set_sched_resources(dqm); in start_cpsch()
746 retval = kfd_gtt_sa_allocate(dqm->dev, sizeof(*dqm->fence_addr), in start_cpsch()
747 &dqm->fence_mem); in start_cpsch()
752 dqm->fence_addr = dqm->fence_mem->cpu_ptr; in start_cpsch()
753 dqm->fence_gpu_addr = dqm->fence_mem->gpu_addr; in start_cpsch()
755 init_interrupts(dqm); in start_cpsch()
757 list_for_each_entry(node, &dqm->queues, list) in start_cpsch()
758 if (node->qpd->pqm->process && dqm->dev) in start_cpsch()
759 kfd_bind_process_to_device(dqm->dev, in start_cpsch()
762 execute_queues_cpsch(dqm, true); in start_cpsch()
767 pm_uninit(&dqm->packets); in start_cpsch()
772 static int stop_cpsch(struct device_queue_manager *dqm) in stop_cpsch() argument
777 BUG_ON(!dqm); in stop_cpsch()
779 destroy_queues_cpsch(dqm, true, true); in stop_cpsch()
781 list_for_each_entry(node, &dqm->queues, list) { in stop_cpsch()
785 kfd_gtt_sa_free(dqm->dev, dqm->fence_mem); in stop_cpsch()
786 pm_uninit(&dqm->packets); in stop_cpsch()
791 static int create_kernel_queue_cpsch(struct device_queue_manager *dqm, in create_kernel_queue_cpsch() argument
795 BUG_ON(!dqm || !kq || !qpd); in create_kernel_queue_cpsch()
799 mutex_lock(&dqm->lock); in create_kernel_queue_cpsch()
800 if (dqm->total_queue_count >= max_num_of_queues_per_device) { in create_kernel_queue_cpsch()
802 dqm->total_queue_count); in create_kernel_queue_cpsch()
803 mutex_unlock(&dqm->lock); in create_kernel_queue_cpsch()
811 dqm->total_queue_count++; in create_kernel_queue_cpsch()
813 dqm->total_queue_count); in create_kernel_queue_cpsch()
816 dqm->queue_count++; in create_kernel_queue_cpsch()
818 execute_queues_cpsch(dqm, false); in create_kernel_queue_cpsch()
819 mutex_unlock(&dqm->lock); in create_kernel_queue_cpsch()
824 static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm, in destroy_kernel_queue_cpsch() argument
828 BUG_ON(!dqm || !kq); in destroy_kernel_queue_cpsch()
832 mutex_lock(&dqm->lock); in destroy_kernel_queue_cpsch()
834 destroy_queues_cpsch(dqm, true, false); in destroy_kernel_queue_cpsch()
836 dqm->queue_count--; in destroy_kernel_queue_cpsch()
838 execute_queues_cpsch(dqm, false); in destroy_kernel_queue_cpsch()
843 dqm->total_queue_count--; in destroy_kernel_queue_cpsch()
845 dqm->total_queue_count); in destroy_kernel_queue_cpsch()
846 mutex_unlock(&dqm->lock); in destroy_kernel_queue_cpsch()
857 static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, in create_queue_cpsch() argument
863 BUG_ON(!dqm || !q || !qpd); in create_queue_cpsch()
870 mutex_lock(&dqm->lock); in create_queue_cpsch()
872 if (dqm->total_queue_count >= max_num_of_queues_per_device) { in create_queue_cpsch()
874 dqm->total_queue_count); in create_queue_cpsch()
882 mqd = dqm->ops.get_mqd_manager(dqm, in create_queue_cpsch()
886 mutex_unlock(&dqm->lock); in create_queue_cpsch()
890 dqm->ops_asic_specific.init_sdma_vm(dqm, q, qpd); in create_queue_cpsch()
898 dqm->queue_count++; in create_queue_cpsch()
899 retval = execute_queues_cpsch(dqm, false); in create_queue_cpsch()
903 dqm->sdma_queue_count++; in create_queue_cpsch()
908 dqm->total_queue_count++; in create_queue_cpsch()
911 dqm->total_queue_count); in create_queue_cpsch()
914 mutex_unlock(&dqm->lock); in create_queue_cpsch()
936 static int destroy_sdma_queues(struct device_queue_manager *dqm, in destroy_sdma_queues() argument
939 return pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_SDMA, in destroy_sdma_queues()
944 static int destroy_queues_cpsch(struct device_queue_manager *dqm, in destroy_queues_cpsch() argument
951 BUG_ON(!dqm); in destroy_queues_cpsch()
956 mutex_lock(&dqm->lock); in destroy_queues_cpsch()
957 if (dqm->active_runlist == false) in destroy_queues_cpsch()
961 dqm->sdma_queue_count); in destroy_queues_cpsch()
963 if (dqm->sdma_queue_count > 0) { in destroy_queues_cpsch()
964 destroy_sdma_queues(dqm, 0); in destroy_queues_cpsch()
965 destroy_sdma_queues(dqm, 1); in destroy_queues_cpsch()
972 retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE, in destroy_queues_cpsch()
977 *dqm->fence_addr = KFD_FENCE_INIT; in destroy_queues_cpsch()
978 pm_send_query_status(&dqm->packets, dqm->fence_gpu_addr, in destroy_queues_cpsch()
981 retval = amdkfd_fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED, in destroy_queues_cpsch()
984 pdd = kfd_get_process_device_data(dqm->dev, in destroy_queues_cpsch()
989 pm_release_ib(&dqm->packets); in destroy_queues_cpsch()
990 dqm->active_runlist = false; in destroy_queues_cpsch()
994 mutex_unlock(&dqm->lock); in destroy_queues_cpsch()
998 static int execute_queues_cpsch(struct device_queue_manager *dqm, bool lock) in execute_queues_cpsch() argument
1002 BUG_ON(!dqm); in execute_queues_cpsch()
1005 mutex_lock(&dqm->lock); in execute_queues_cpsch()
1007 retval = destroy_queues_cpsch(dqm, false, false); in execute_queues_cpsch()
1013 if (dqm->queue_count <= 0 || dqm->processes_count <= 0) { in execute_queues_cpsch()
1018 if (dqm->active_runlist) { in execute_queues_cpsch()
1023 retval = pm_send_runlist(&dqm->packets, &dqm->queues); in execute_queues_cpsch()
1028 dqm->active_runlist = true; in execute_queues_cpsch()
1032 mutex_unlock(&dqm->lock); in execute_queues_cpsch()
1036 static int destroy_queue_cpsch(struct device_queue_manager *dqm, in destroy_queue_cpsch() argument
1044 BUG_ON(!dqm || !qpd || !q); in destroy_queue_cpsch()
1051 mutex_lock(&dqm->lock); in destroy_queue_cpsch()
1063 mqd = dqm->ops.get_mqd_manager(dqm, in destroy_queue_cpsch()
1071 dqm->sdma_queue_count--; in destroy_queue_cpsch()
1075 dqm->queue_count--; in destroy_queue_cpsch()
1077 execute_queues_cpsch(dqm, false); in destroy_queue_cpsch()
1085 dqm->total_queue_count--; in destroy_queue_cpsch()
1087 dqm->total_queue_count); in destroy_queue_cpsch()
1089 mutex_unlock(&dqm->lock); in destroy_queue_cpsch()
1096 mutex_unlock(&dqm->lock); in destroy_queue_cpsch()
1108 static bool set_cache_memory_policy(struct device_queue_manager *dqm, in set_cache_memory_policy() argument
1119 mutex_lock(&dqm->lock); in set_cache_memory_policy()
1152 retval = dqm->ops_asic_specific.set_cache_memory_policy( in set_cache_memory_policy()
1153 dqm, in set_cache_memory_policy()
1161 program_sh_mem_settings(dqm, qpd); in set_cache_memory_policy()
1167 mutex_unlock(&dqm->lock); in set_cache_memory_policy()
1171 mutex_unlock(&dqm->lock); in set_cache_memory_policy()
1177 struct device_queue_manager *dqm; in device_queue_manager_init() local
1183 dqm = kzalloc(sizeof(struct device_queue_manager), GFP_KERNEL); in device_queue_manager_init()
1184 if (!dqm) in device_queue_manager_init()
1187 dqm->dev = dev; in device_queue_manager_init()
1192 dqm->ops.create_queue = create_queue_cpsch; in device_queue_manager_init()
1193 dqm->ops.initialize = initialize_cpsch; in device_queue_manager_init()
1194 dqm->ops.start = start_cpsch; in device_queue_manager_init()
1195 dqm->ops.stop = stop_cpsch; in device_queue_manager_init()
1196 dqm->ops.destroy_queue = destroy_queue_cpsch; in device_queue_manager_init()
1197 dqm->ops.update_queue = update_queue; in device_queue_manager_init()
1198 dqm->ops.get_mqd_manager = get_mqd_manager_nocpsch; in device_queue_manager_init()
1199 dqm->ops.register_process = register_process_nocpsch; in device_queue_manager_init()
1200 dqm->ops.unregister_process = unregister_process_nocpsch; in device_queue_manager_init()
1201 dqm->ops.uninitialize = uninitialize_nocpsch; in device_queue_manager_init()
1202 dqm->ops.create_kernel_queue = create_kernel_queue_cpsch; in device_queue_manager_init()
1203 dqm->ops.destroy_kernel_queue = destroy_kernel_queue_cpsch; in device_queue_manager_init()
1204 dqm->ops.set_cache_memory_policy = set_cache_memory_policy; in device_queue_manager_init()
1208 dqm->ops.start = start_nocpsch; in device_queue_manager_init()
1209 dqm->ops.stop = stop_nocpsch; in device_queue_manager_init()
1210 dqm->ops.create_queue = create_queue_nocpsch; in device_queue_manager_init()
1211 dqm->ops.destroy_queue = destroy_queue_nocpsch; in device_queue_manager_init()
1212 dqm->ops.update_queue = update_queue; in device_queue_manager_init()
1213 dqm->ops.get_mqd_manager = get_mqd_manager_nocpsch; in device_queue_manager_init()
1214 dqm->ops.register_process = register_process_nocpsch; in device_queue_manager_init()
1215 dqm->ops.unregister_process = unregister_process_nocpsch; in device_queue_manager_init()
1216 dqm->ops.initialize = initialize_nocpsch; in device_queue_manager_init()
1217 dqm->ops.uninitialize = uninitialize_nocpsch; in device_queue_manager_init()
1218 dqm->ops.set_cache_memory_policy = set_cache_memory_policy; in device_queue_manager_init()
1227 device_queue_manager_init_vi(&dqm->ops_asic_specific); in device_queue_manager_init()
1231 device_queue_manager_init_cik(&dqm->ops_asic_specific); in device_queue_manager_init()
1235 if (dqm->ops.initialize(dqm) != 0) { in device_queue_manager_init()
1236 kfree(dqm); in device_queue_manager_init()
1240 return dqm; in device_queue_manager_init()
1243 void device_queue_manager_uninit(struct device_queue_manager *dqm) in device_queue_manager_uninit() argument
1245 BUG_ON(!dqm); in device_queue_manager_uninit()
1247 dqm->ops.uninitialize(dqm); in device_queue_manager_uninit()
1248 kfree(dqm); in device_queue_manager_uninit()