Home
last modified time | relevance | path

Searched refs:kctx (Results 1 – 25 of 315) sorted by relevance

12345678910>>...13

/device/soc/rockchip/common/vendor/drivers/gpu/arm/midgard/
Dmali_kbase_context.c37 struct kbase_context *kctx; in kbase_create_context() local
43 kctx = vzalloc(sizeof(*kctx)); in kbase_create_context()
44 if (!kctx) { in kbase_create_context()
51 kctx->kbdev = kbdev; in kbase_create_context()
52 kctx->as_nr = KBASEP_AS_NR_INVALID; in kbase_create_context()
53 atomic_set(&kctx->refcount, 0); in kbase_create_context()
55 kbase_ctx_flag_set(kctx, KCTX_COMPAT); in kbase_create_context()
58 kctx->timeline.owner_tgid = task_tgid_nr(current); in kbase_create_context()
60 atomic_set(&kctx->setup_complete, 0); in kbase_create_context()
61 atomic_set(&kctx->setup_in_progress, 0); in kbase_create_context()
[all …]
Dmali_kbase_js.c68 struct kbase_device *kbdev, struct kbase_context *kctx,
73 static void kbase_js_foreach_ctx_job(struct kbase_context *kctx, kbasep_js_ctx_job_cb callback);
77 static int kbasep_js_trace_get_refcnt(struct kbase_device *kbdev, struct kbase_context *kctx) in kbasep_js_trace_get_refcnt() argument
79 return atomic_read(&kctx->refcount); in kbasep_js_trace_get_refcnt()
82 static int kbasep_js_trace_get_refcnt(struct kbase_device *kbdev, struct kbase_context *kctx) in kbasep_js_trace_get_refcnt() argument
85 CSTD_UNUSED(kctx); in kbasep_js_trace_get_refcnt()
137 bool kbasep_js_runpool_retain_ctx_nolock(struct kbase_device *kbdev, struct kbase_context *kctx) in kbasep_js_runpool_retain_ctx_nolock() argument
144 KBASE_DEBUG_ASSERT(kctx != NULL); in kbasep_js_runpool_retain_ctx_nolock()
149 as_nr = kctx->as_nr; in kbasep_js_runpool_retain_ctx_nolock()
150 if (atomic_read(&kctx->refcount) > 0) { in kbasep_js_runpool_retain_ctx_nolock()
[all …]
Dmali_kbase_mmu.c32 #define beenthere(kctx, f, a...) dev_dbg(kctx->kbdev->dev, "%s:" f, __func__, ##a) argument
71 static void kbase_mmu_flush_invalidate(struct kbase_context *kctx, u64 vpfn, size_t nr, bool sync);
101 static void kbase_mmu_report_fault_and_kill(struct kbase_context *kctx, struct kbase_as *as, const …
122 struct kbase_context *kctx; in page_fault_worker() local
137 kctx = kbasep_js_runpool_lookup_ctx_noretain(kbdev, as_no); in page_fault_worker()
138 if (WARN_ON(!kctx)) { in page_fault_worker()
143 KBASE_DEBUG_ASSERT(kctx->kbdev == kbdev); in page_fault_worker()
146 kbase_mmu_report_fault_and_kill(kctx, faulting_as, "Protected mode fault"); in page_fault_worker()
147 kbase_mmu_hw_clear_fault(kbdev, faulting_as, kctx, KBASE_MMU_FAULT_TYPE_PAGE); in page_fault_worker()
159 kbase_mmu_report_fault_and_kill(kctx, faulting_as, "Permission failure"); in page_fault_worker()
[all …]
Dmali_kbase_ctx_sched.c67 static int kbasep_ctx_sched_find_as_for_ctx(struct kbase_context *kctx) in kbasep_ctx_sched_find_as_for_ctx() argument
69 struct kbase_device *const kbdev = kctx->kbdev; in kbasep_ctx_sched_find_as_for_ctx()
75 if ((kctx->as_nr != KBASEP_AS_NR_INVALID) && (kbdev->as_free & (1u << kctx->as_nr))) { in kbasep_ctx_sched_find_as_for_ctx()
76 return kctx->as_nr; in kbasep_ctx_sched_find_as_for_ctx()
90 int kbase_ctx_sched_retain_ctx(struct kbase_context *kctx) in kbase_ctx_sched_retain_ctx() argument
92 struct kbase_device *const kbdev = kctx->kbdev; in kbase_ctx_sched_retain_ctx()
99 if (atomic_inc_return(&kctx->refcount) == 1) { in kbase_ctx_sched_retain_ctx()
100 int const free_as = kbasep_ctx_sched_find_as_for_ctx(kctx); in kbase_ctx_sched_retain_ctx()
106 if (free_as != kctx->as_nr) { in kbase_ctx_sched_retain_ctx()
115 kctx->as_nr = free_as; in kbase_ctx_sched_retain_ctx()
[all …]
Dmali_kbase_mem_profile_debugfs.c32 struct kbase_context *kctx = sfile->private; in kbasep_mem_profile_seq_show() local
34 mutex_lock(&kctx->mem_profile_lock); in kbasep_mem_profile_seq_show()
36 seq_write(sfile, kctx->mem_profile_data, kctx->mem_profile_size); in kbasep_mem_profile_seq_show()
40 mutex_unlock(&kctx->mem_profile_lock); in kbasep_mem_profile_seq_show()
60 int kbasep_mem_profile_debugfs_insert(struct kbase_context *kctx, char *data, size_t size) in kbasep_mem_profile_debugfs_insert() argument
64 mutex_lock(&kctx->mem_profile_lock); in kbasep_mem_profile_debugfs_insert()
66 … dev_dbg(kctx->kbdev->dev, "initialised: %d", kbase_ctx_flag(kctx, KCTX_MEM_PROFILE_INITIALIZED)); in kbasep_mem_profile_debugfs_insert()
68 if (!kbase_ctx_flag(kctx, KCTX_MEM_PROFILE_INITIALIZED)) { in kbasep_mem_profile_debugfs_insert()
69 …if (!debugfs_create_file("mem_profile", S_IRUGO, kctx->kctx_dentry, kctx, &kbasep_mem_profile_debu… in kbasep_mem_profile_debugfs_insert()
72 kbase_ctx_flag_set(kctx, KCTX_MEM_PROFILE_INITIALIZED); in kbasep_mem_profile_debugfs_insert()
[all …]
/device/soc/rockchip/common/kernel/drivers/gpu/arm/midgard/
Dmali_kbase_context.c42 struct kbase_context *kctx; in kbase_create_context() local
48 kctx = vzalloc(sizeof(*kctx)); in kbase_create_context()
50 if (!kctx) in kbase_create_context()
56 kctx->kbdev = kbdev; in kbase_create_context()
57 kctx->as_nr = KBASEP_AS_NR_INVALID; in kbase_create_context()
58 atomic_set(&kctx->refcount, 0); in kbase_create_context()
60 kbase_ctx_flag_set(kctx, KCTX_COMPAT); in kbase_create_context()
62 kctx->timeline.owner_tgid = task_tgid_nr(current); in kbase_create_context()
64 atomic_set(&kctx->setup_complete, 0); in kbase_create_context()
65 atomic_set(&kctx->setup_in_progress, 0); in kbase_create_context()
[all …]
Dmali_kbase_js.c71 struct kbase_device *kbdev, struct kbase_context *kctx,
77 static void kbase_js_foreach_ctx_job(struct kbase_context *kctx,
83 struct kbase_context *kctx) in kbasep_js_trace_get_refcnt() argument
85 return atomic_read(&kctx->refcount); in kbasep_js_trace_get_refcnt()
89 struct kbase_context *kctx) in kbasep_js_trace_get_refcnt() argument
92 CSTD_UNUSED(kctx); in kbasep_js_trace_get_refcnt()
140 struct kbase_context *kctx) in kbasep_js_runpool_retain_ctx_nolock() argument
147 KBASE_DEBUG_ASSERT(kctx != NULL); in kbasep_js_runpool_retain_ctx_nolock()
152 as_nr = kctx->as_nr; in kbasep_js_runpool_retain_ctx_nolock()
153 if (atomic_read(&kctx->refcount) > 0) { in kbasep_js_runpool_retain_ctx_nolock()
[all …]
Dmali_kbase_mmu.c37 #define beenthere(kctx, f, a...) dev_dbg(kctx->kbdev->dev, "%s:" f, __func__, ##a) argument
66 static void kbase_mmu_flush_invalidate(struct kbase_context *kctx,
98 static void kbase_mmu_report_fault_and_kill(struct kbase_context *kctx,
120 struct kbase_context *kctx; in page_fault_worker() local
135 kctx = kbasep_js_runpool_lookup_ctx_noretain(kbdev, as_no); in page_fault_worker()
136 if (WARN_ON(!kctx)) { in page_fault_worker()
141 KBASE_DEBUG_ASSERT(kctx->kbdev == kbdev); in page_fault_worker()
145 kbase_mmu_report_fault_and_kill(kctx, faulting_as, in page_fault_worker()
147 kbase_mmu_hw_clear_fault(kbdev, faulting_as, kctx, in page_fault_worker()
161 kbase_mmu_report_fault_and_kill(kctx, faulting_as, in page_fault_worker()
[all …]
Dmali_kbase_ctx_sched.c68 static int kbasep_ctx_sched_find_as_for_ctx(struct kbase_context *kctx) in kbasep_ctx_sched_find_as_for_ctx() argument
70 struct kbase_device *const kbdev = kctx->kbdev; in kbasep_ctx_sched_find_as_for_ctx()
76 if ((kctx->as_nr != KBASEP_AS_NR_INVALID) && in kbasep_ctx_sched_find_as_for_ctx()
77 (kbdev->as_free & (1u << kctx->as_nr))) in kbasep_ctx_sched_find_as_for_ctx()
78 return kctx->as_nr; in kbasep_ctx_sched_find_as_for_ctx()
90 int kbase_ctx_sched_retain_ctx(struct kbase_context *kctx) in kbase_ctx_sched_retain_ctx() argument
92 struct kbase_device *const kbdev = kctx->kbdev; in kbase_ctx_sched_retain_ctx()
99 if (atomic_inc_return(&kctx->refcount) == 1) { in kbase_ctx_sched_retain_ctx()
100 int const free_as = kbasep_ctx_sched_find_as_for_ctx(kctx); in kbase_ctx_sched_retain_ctx()
107 if (free_as != kctx->as_nr) { in kbase_ctx_sched_retain_ctx()
[all …]
Dmali_kbase_mem_profile_debugfs.c34 struct kbase_context *kctx = sfile->private; in kbasep_mem_profile_seq_show() local
36 mutex_lock(&kctx->mem_profile_lock); in kbasep_mem_profile_seq_show()
38 seq_write(sfile, kctx->mem_profile_data, kctx->mem_profile_size); in kbasep_mem_profile_seq_show()
42 mutex_unlock(&kctx->mem_profile_lock); in kbasep_mem_profile_seq_show()
62 int kbasep_mem_profile_debugfs_insert(struct kbase_context *kctx, char *data, in kbasep_mem_profile_debugfs_insert() argument
67 mutex_lock(&kctx->mem_profile_lock); in kbasep_mem_profile_debugfs_insert()
69 dev_dbg(kctx->kbdev->dev, "initialised: %d", in kbasep_mem_profile_debugfs_insert()
70 kbase_ctx_flag(kctx, KCTX_MEM_PROFILE_INITIALIZED)); in kbasep_mem_profile_debugfs_insert()
72 if (!kbase_ctx_flag(kctx, KCTX_MEM_PROFILE_INITIALIZED)) { in kbasep_mem_profile_debugfs_insert()
74 kctx->kctx_dentry, kctx, in kbasep_mem_profile_debugfs_insert()
[all …]
/device/soc/rockchip/common/kernel/drivers/gpu/arm/bifrost/context/
Dmali_kbase_context.c79 static int kbase_insert_kctx_to_process(struct kbase_context *kctx) in kbase_insert_kctx_to_process() argument
81 struct rb_root *const prcs_root = &kctx->kbdev->process_root; in kbase_insert_kctx_to_process()
82 const pid_t tgid = kctx->tgid; in kbase_insert_kctx_to_process()
85 lockdep_assert_held(&kctx->kbdev->kctx_list_lock); in kbase_insert_kctx_to_process()
118 kctx->kprcs = kprcs; in kbase_insert_kctx_to_process()
119 list_add(&kctx->kprcs_link, &kprcs->kctx_list); in kbase_insert_kctx_to_process()
124 int kbase_context_common_init(struct kbase_context *kctx) in kbase_context_common_init() argument
130 kbase_disjoint_event(kctx->kbdev); in kbase_context_common_init()
132 kctx->as_nr = KBASEP_AS_NR_INVALID; in kbase_context_common_init()
134 atomic_set(&kctx->refcount, 0); in kbase_context_common_init()
[all …]
/device/soc/rockchip/common/vendor/drivers/gpu/arm/bifrost/context/
Dmali_kbase_context.c83 static int kbase_insert_kctx_to_process(struct kbase_context *kctx) in kbase_insert_kctx_to_process() argument
85 struct rb_root *const prcs_root = &kctx->kbdev->process_root; in kbase_insert_kctx_to_process()
86 const pid_t tgid = kctx->tgid; in kbase_insert_kctx_to_process()
89 lockdep_assert_held(&kctx->kbdev->kctx_list_lock); in kbase_insert_kctx_to_process()
122 kctx->kprcs = kprcs; in kbase_insert_kctx_to_process()
123 list_add(&kctx->kprcs_link, &kprcs->kctx_list); in kbase_insert_kctx_to_process()
128 int kbase_context_common_init(struct kbase_context *kctx) in kbase_context_common_init() argument
134 kbase_disjoint_event(kctx->kbdev); in kbase_context_common_init()
136 kctx->as_nr = KBASEP_AS_NR_INVALID; in kbase_context_common_init()
138 atomic_set(&kctx->refcount, 0); in kbase_context_common_init()
[all …]
/device/soc/rockchip/common/kernel/drivers/gpu/arm/bifrost/
Dmali_kbase_ctx_sched.c32 static int kbase_ktrace_get_ctx_refcnt(struct kbase_context *kctx) in kbase_ktrace_get_ctx_refcnt() argument
34 return atomic_read(&kctx->refcount); in kbase_ktrace_get_ctx_refcnt()
37 static int kbase_ktrace_get_ctx_refcnt(struct kbase_context *kctx) in kbase_ktrace_get_ctx_refcnt() argument
39 CSTD_UNUSED(kctx); in kbase_ktrace_get_ctx_refcnt()
80 static int kbasep_ctx_sched_find_as_for_ctx(struct kbase_context *kctx) in kbasep_ctx_sched_find_as_for_ctx() argument
82 struct kbase_device *const kbdev = kctx->kbdev; in kbasep_ctx_sched_find_as_for_ctx()
88 if ((kctx->as_nr != KBASEP_AS_NR_INVALID) && in kbasep_ctx_sched_find_as_for_ctx()
89 (kbdev->as_free & (1u << kctx->as_nr))) in kbasep_ctx_sched_find_as_for_ctx()
90 return kctx->as_nr; in kbasep_ctx_sched_find_as_for_ctx()
102 int kbase_ctx_sched_retain_ctx(struct kbase_context *kctx) in kbase_ctx_sched_retain_ctx() argument
[all …]
Dmali_kbase_js.c77 struct kbase_device *kbdev, struct kbase_context *kctx,
83 static void kbase_js_foreach_ctx_job(struct kbase_context *kctx,
88 static int kbase_ktrace_get_ctx_refcnt(struct kbase_context *kctx) in kbase_ktrace_get_ctx_refcnt() argument
90 return atomic_read(&kctx->refcount); in kbase_ktrace_get_ctx_refcnt()
93 static int kbase_ktrace_get_ctx_refcnt(struct kbase_context *kctx) in kbase_ktrace_get_ctx_refcnt() argument
95 CSTD_UNUSED(kctx); in kbase_ktrace_get_ctx_refcnt()
155 jsctx_rb_none_to_pull_prio(struct kbase_context *kctx, int js, int prio) in jsctx_rb_none_to_pull_prio() argument
158 struct jsctx_queue *rb = &kctx->jsctx_queue[prio][js]; in jsctx_rb_none_to_pull_prio()
160 lockdep_assert_held(&kctx->kbdev->hwaccess_lock); in jsctx_rb_none_to_pull_prio()
164 dev_dbg(kctx->kbdev->dev, in jsctx_rb_none_to_pull_prio()
[all …]
Dmali_kbase_gwt.c26 struct kbase_context *kctx, in kbase_gpu_gwt_setup_page_permission() argument
39 err = kbase_mmu_update_pages(kctx, reg->start_pfn, in kbase_gpu_gwt_setup_page_permission()
45 dev_warn(kctx->kbdev->dev, "kbase_mmu_update_pages failure\n"); in kbase_gpu_gwt_setup_page_permission()
52 static void kbase_gpu_gwt_setup_pages(struct kbase_context *kctx, in kbase_gpu_gwt_setup_pages() argument
55 kbase_gpu_gwt_setup_page_permission(kctx, flag, in kbase_gpu_gwt_setup_pages()
56 rb_first(&(kctx->reg_rbtree_same))); in kbase_gpu_gwt_setup_pages()
57 kbase_gpu_gwt_setup_page_permission(kctx, flag, in kbase_gpu_gwt_setup_pages()
58 rb_first(&(kctx->reg_rbtree_custom))); in kbase_gpu_gwt_setup_pages()
62 int kbase_gpu_gwt_start(struct kbase_context *kctx) in kbase_gpu_gwt_start() argument
64 kbase_gpu_vm_lock(kctx); in kbase_gpu_gwt_start()
[all …]
Dmali_kbase_mem.c71 static void free_partial_locked(struct kbase_context *kctx,
74 static size_t kbase_get_num_cpu_va_bits(struct kbase_context *kctx) in kbase_get_num_cpu_va_bits() argument
93 if (kbase_ctx_flag(kctx, KCTX_COMPAT)) in kbase_get_num_cpu_va_bits()
103 static struct rb_root *kbase_gpu_va_to_rbtree(struct kbase_context *kctx, in kbase_gpu_va_to_rbtree() argument
108 kbase_ctx_reg_zone_get(kctx, KBASE_REG_ZONE_EXEC_VA); in kbase_gpu_va_to_rbtree()
114 rbtree = &kctx->reg_rbtree_exec; in kbase_gpu_va_to_rbtree()
119 if (kbase_ctx_flag(kctx, KCTX_COMPAT)) { in kbase_gpu_va_to_rbtree()
125 kbase_ctx_reg_zone_get(kctx, in kbase_gpu_va_to_rbtree()
132 rbtree = &kctx->reg_rbtree_custom; in kbase_gpu_va_to_rbtree()
134 rbtree = &kctx->reg_rbtree_same; in kbase_gpu_va_to_rbtree()
[all …]
/device/soc/rockchip/common/vendor/drivers/gpu/arm/bifrost/
Dmali_kbase_ctx_sched.c32 static int kbase_ktrace_get_ctx_refcnt(struct kbase_context *kctx) in kbase_ktrace_get_ctx_refcnt() argument
34 return atomic_read(&kctx->refcount); in kbase_ktrace_get_ctx_refcnt()
37 static int kbase_ktrace_get_ctx_refcnt(struct kbase_context *kctx) in kbase_ktrace_get_ctx_refcnt() argument
39 CSTD_UNUSED(kctx); in kbase_ktrace_get_ctx_refcnt()
79 static int kbasep_ctx_sched_find_as_for_ctx(struct kbase_context *kctx) in kbasep_ctx_sched_find_as_for_ctx() argument
81 struct kbase_device *const kbdev = kctx->kbdev; in kbasep_ctx_sched_find_as_for_ctx()
87 if ((kctx->as_nr != KBASEP_AS_NR_INVALID) && (kbdev->as_free & (1u << kctx->as_nr))) { in kbasep_ctx_sched_find_as_for_ctx()
88 return kctx->as_nr; in kbasep_ctx_sched_find_as_for_ctx()
102 int kbase_ctx_sched_retain_ctx(struct kbase_context *kctx) in kbase_ctx_sched_retain_ctx() argument
104 struct kbase_device *const kbdev = kctx->kbdev; in kbase_ctx_sched_retain_ctx()
[all …]
Dmali_kbase_js.c71 (struct kbase_device *kbdev, struct kbase_context *kctx, struct kbasep_js_atom_retained_state *kato…
75 static void kbase_js_foreach_ctx_job(struct kbase_context *kctx, kbasep_js_ctx_job_cb callback);
79 static int kbase_ktrace_get_ctx_refcnt(struct kbase_context *kctx) in kbase_ktrace_get_ctx_refcnt() argument
81 return atomic_read(&kctx->refcount); in kbase_ktrace_get_ctx_refcnt()
84 static int kbase_ktrace_get_ctx_refcnt(struct kbase_context *kctx) in kbase_ktrace_get_ctx_refcnt() argument
86 CSTD_UNUSED(kctx); in kbase_ktrace_get_ctx_refcnt()
150 static inline bool jsctx_rb_none_to_pull_prio(struct kbase_context *kctx, int js, int prio) in jsctx_rb_none_to_pull_prio() argument
153 struct jsctx_queue *rb = &kctx->jsctx_queue[prio][js]; in jsctx_rb_none_to_pull_prio()
155 lockdep_assert_held(&kctx->kbdev->hwaccess_lock); in jsctx_rb_none_to_pull_prio()
159 …dev_dbg(kctx->kbdev->dev, "Slot %d (prio %d) is %spullable in kctx %p\n", js, prio, none_to_pull ?… in jsctx_rb_none_to_pull_prio()
[all …]
Dmali_kbase_gwt.c26 static inline void kbase_gpu_gwt_setup_page_permission(struct kbase_context *kctx, unsigned long fl… in kbase_gpu_gwt_setup_page_permission() argument
37 …err = kbase_mmu_update_pages(kctx, reg->start_pfn, kbase_get_gpu_phy_pages(reg), reg->gpu_alloc->n… in kbase_gpu_gwt_setup_page_permission()
40 dev_warn(kctx->kbdev->dev, "kbase_mmu_update_pages failure\n"); in kbase_gpu_gwt_setup_page_permission()
48 static void kbase_gpu_gwt_setup_pages(struct kbase_context *kctx, unsigned long flag) in kbase_gpu_gwt_setup_pages() argument
50 kbase_gpu_gwt_setup_page_permission(kctx, flag, rb_first(&(kctx->reg_rbtree_same))); in kbase_gpu_gwt_setup_pages()
51 kbase_gpu_gwt_setup_page_permission(kctx, flag, rb_first(&(kctx->reg_rbtree_custom))); in kbase_gpu_gwt_setup_pages()
54 int kbase_gpu_gwt_start(struct kbase_context *kctx) in kbase_gpu_gwt_start() argument
56 kbase_gpu_vm_lock(kctx); in kbase_gpu_gwt_start()
57 if (kctx->gwt_enabled) { in kbase_gpu_gwt_start()
58 kbase_gpu_vm_unlock(kctx); in kbase_gpu_gwt_start()
[all …]
/device/soc/rockchip/common/vendor/drivers/gpu/arm/bifrost/context/backend/
Dmali_kbase_context_jm.c44 void kbase_context_debugfs_init(struct kbase_context *const kctx) in kbase_context_debugfs_init() argument
46 kbase_debug_mem_view_init(kctx); in kbase_context_debugfs_init()
47 kbase_mem_pool_debugfs_init(kctx->kctx_dentry, kctx); in kbase_context_debugfs_init()
48 kbase_jit_debugfs_init(kctx); in kbase_context_debugfs_init()
49 kbasep_jd_debugfs_ctx_init(kctx); in kbase_context_debugfs_init()
50 kbase_debug_job_fault_context_init(kctx); in kbase_context_debugfs_init()
54 void kbase_context_debugfs_term(struct kbase_context *const kctx) in kbase_context_debugfs_term() argument
56 debugfs_remove_recursive(kctx->kctx_dentry); in kbase_context_debugfs_term()
57 kbase_debug_job_fault_context_term(kctx); in kbase_context_debugfs_term()
61 void kbase_context_debugfs_init(struct kbase_context *const kctx) in kbase_context_debugfs_init() argument
[all …]
/device/soc/rockchip/common/kernel/drivers/gpu/arm/bifrost/csf/
Dmali_kbase_csf_event.c38 struct kbase_context *kctx; member
43 int kbase_csf_event_wait_add(struct kbase_context *kctx, in kbase_csf_event_wait_add() argument
53 event_cb->kctx = kctx; in kbase_csf_event_wait_add()
57 spin_lock_irqsave(&kctx->csf.event.lock, flags); in kbase_csf_event_wait_add()
58 list_add_tail(&event_cb->link, &kctx->csf.event.callback_list); in kbase_csf_event_wait_add()
59 dev_dbg(kctx->kbdev->dev, in kbase_csf_event_wait_add()
62 spin_unlock_irqrestore(&kctx->csf.event.lock, flags); in kbase_csf_event_wait_add()
70 void kbase_csf_event_wait_remove(struct kbase_context *kctx, in kbase_csf_event_wait_remove() argument
76 spin_lock_irqsave(&kctx->csf.event.lock, flags); in kbase_csf_event_wait_remove()
78 list_for_each_entry(event_cb, &kctx->csf.event.callback_list, link) { in kbase_csf_event_wait_remove()
[all …]
Dmali_kbase_csf_tiler_heap.c95 struct kbase_context *const kctx = heap->kctx; in link_chunk() local
97 u64 *const prev_hdr = kbase_vmap_prot(kctx, prev->gpu_va, in link_chunk()
101 dev_err(kctx->kbdev->dev, in link_chunk()
108 kbase_vunmap(kctx, &map); in link_chunk()
110 dev_dbg(kctx->kbdev->dev, in link_chunk()
137 struct kbase_context *const kctx = heap->kctx; in init_chunk() local
140 dev_err(kctx->kbdev->dev, in init_chunk()
145 chunk_hdr = kbase_vmap_prot(kctx, in init_chunk()
149 dev_err(kctx->kbdev->dev, in init_chunk()
155 kbase_vunmap(kctx, &map); in init_chunk()
[all …]
Dmali_kbase_csf_cpu_queue_debugfs.c28 bool kbase_csf_cpu_queue_read_dump_req(struct kbase_context *kctx, in kbase_csf_cpu_queue_read_dump_req() argument
31 if (atomic_cmpxchg(&kctx->csf.cpu_queue.dump_req_status, in kbase_csf_cpu_queue_read_dump_req()
52 struct kbase_context *kctx = file->private; in kbasep_csf_cpu_queue_debugfs_show() local
54 mutex_lock(&kctx->csf.lock); in kbasep_csf_cpu_queue_debugfs_show()
55 if (atomic_read(&kctx->csf.cpu_queue.dump_req_status) != in kbasep_csf_cpu_queue_debugfs_show()
58 mutex_unlock(&kctx->csf.lock); in kbasep_csf_cpu_queue_debugfs_show()
62 atomic_set(&kctx->csf.cpu_queue.dump_req_status, BASE_CSF_CPU_QUEUE_DUMP_ISSUED); in kbasep_csf_cpu_queue_debugfs_show()
63 init_completion(&kctx->csf.cpu_queue.dump_cmp); in kbasep_csf_cpu_queue_debugfs_show()
64 kbase_event_wakeup(kctx); in kbasep_csf_cpu_queue_debugfs_show()
65 mutex_unlock(&kctx->csf.lock); in kbasep_csf_cpu_queue_debugfs_show()
[all …]
/device/soc/rockchip/common/kernel/drivers/gpu/arm/bifrost/context/backend/
Dmali_kbase_context_jm.c41 void kbase_context_debugfs_init(struct kbase_context *const kctx) in kbase_context_debugfs_init() argument
43 kbase_debug_mem_view_init(kctx); in kbase_context_debugfs_init()
44 kbase_mem_pool_debugfs_init(kctx->kctx_dentry, kctx); in kbase_context_debugfs_init()
45 kbase_jit_debugfs_init(kctx); in kbase_context_debugfs_init()
46 kbasep_jd_debugfs_ctx_init(kctx); in kbase_context_debugfs_init()
50 void kbase_context_debugfs_term(struct kbase_context *const kctx) in kbase_context_debugfs_term() argument
52 debugfs_remove_recursive(kctx->kctx_dentry); in kbase_context_debugfs_term()
56 void kbase_context_debugfs_init(struct kbase_context *const kctx) in kbase_context_debugfs_init() argument
58 CSTD_UNUSED(kctx); in kbase_context_debugfs_init()
62 void kbase_context_debugfs_term(struct kbase_context *const kctx) in kbase_context_debugfs_term() argument
[all …]
Dmali_kbase_context_csf.c43 void kbase_context_debugfs_init(struct kbase_context *const kctx) in kbase_context_debugfs_init() argument
45 kbase_debug_mem_view_init(kctx); in kbase_context_debugfs_init()
46 kbase_mem_pool_debugfs_init(kctx->kctx_dentry, kctx); in kbase_context_debugfs_init()
47 kbase_jit_debugfs_init(kctx); in kbase_context_debugfs_init()
48 kbase_csf_queue_group_debugfs_init(kctx); in kbase_context_debugfs_init()
49 kbase_csf_kcpu_debugfs_init(kctx); in kbase_context_debugfs_init()
50 kbase_csf_tiler_heap_debugfs_init(kctx); in kbase_context_debugfs_init()
51 kbase_csf_tiler_heap_total_debugfs_init(kctx); in kbase_context_debugfs_init()
52 kbase_csf_cpu_queue_debugfs_init(kctx); in kbase_context_debugfs_init()
56 void kbase_context_debugfs_term(struct kbase_context *const kctx) in kbase_context_debugfs_term() argument
[all …]

12345678910>>...13