• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
2 /*
3  *
4  * (C) COPYRIGHT 2019-2021 ARM Limited. All rights reserved.
5  *
6  * This program is free software and is provided to you under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation, and any use by you of this program is subject to the terms
9  * of such GNU license.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, you can access it online at
18  * http://www.gnu.org/licenses/gpl-2.0.html.
19  *
20  */
21 
22 /**
23  * DOC: Base kernel MMU management specific for CSF GPU.
24  */
25 
26 #include <mali_kbase.h>
27 #include <gpu/mali_kbase_gpu_fault.h>
28 #include <mali_kbase_ctx_sched.h>
29 #include <mali_kbase_reset_gpu.h>
30 #include <mali_kbase_as_fault_debugfs.h>
31 #include <mmu/mali_kbase_mmu_internal.h>
32 
kbase_mmu_get_as_setup(struct kbase_mmu_table * mmut,struct kbase_mmu_setup * const setup)33 void kbase_mmu_get_as_setup(struct kbase_mmu_table *mmut,
34 		struct kbase_mmu_setup * const setup)
35 {
36 	/* Set up the required caching policies at the correct indices
37 	 * in the memattr register.
38 	 */
39 	setup->memattr =
40 		(AS_MEMATTR_IMPL_DEF_CACHE_POLICY <<
41 			(AS_MEMATTR_INDEX_IMPL_DEF_CACHE_POLICY * 8)) |
42 		(AS_MEMATTR_FORCE_TO_CACHE_ALL <<
43 			(AS_MEMATTR_INDEX_FORCE_TO_CACHE_ALL * 8)) |
44 		(AS_MEMATTR_WRITE_ALLOC <<
45 			(AS_MEMATTR_INDEX_WRITE_ALLOC * 8)) |
46 		(AS_MEMATTR_AARCH64_OUTER_IMPL_DEF   <<
47 			(AS_MEMATTR_INDEX_OUTER_IMPL_DEF * 8)) |
48 		(AS_MEMATTR_AARCH64_OUTER_WA <<
49 			(AS_MEMATTR_INDEX_OUTER_WA * 8)) |
50 		(AS_MEMATTR_AARCH64_NON_CACHEABLE <<
51 			(AS_MEMATTR_INDEX_NON_CACHEABLE * 8)) |
52 		(AS_MEMATTR_AARCH64_SHARED <<
53 			(AS_MEMATTR_INDEX_SHARED * 8));
54 
55 	setup->transtab = (u64)mmut->pgd & AS_TRANSTAB_BASE_MASK;
56 	setup->transcfg = AS_TRANSCFG_ADRMODE_AARCH64_4K;
57 }
58 
59 /**
60  * submit_work_pagefault() - Submit a work for MMU page fault.
61  *
62  * @kbdev:    Kbase device pointer
63  * @as_nr:    Faulty address space
64  * @fault:    Data relating to the fault
65  *
66  * This function submits a work for reporting the details of MMU fault.
67  */
submit_work_pagefault(struct kbase_device * kbdev,u32 as_nr,struct kbase_fault * fault)68 static void submit_work_pagefault(struct kbase_device *kbdev, u32 as_nr,
69 		struct kbase_fault *fault)
70 {
71 	unsigned long flags;
72 	struct kbase_as *const as = &kbdev->as[as_nr];
73 	struct kbase_context *kctx;
74 
75 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
76 	kctx = kbase_ctx_sched_as_to_ctx_nolock(kbdev, as_nr);
77 
78 	if (kctx) {
79 		kbase_ctx_sched_retain_ctx_refcount(kctx);
80 
81 		as->pf_data = (struct kbase_fault) {
82 			.status = fault->status,
83 			.addr = fault->addr,
84 		};
85 
86 		/*
87 		 * A page fault work item could already be pending for the
88 		 * context's address space, when the page fault occurs for
89 		 * MCU's address space.
90 		 */
91 		if (!queue_work(as->pf_wq, &as->work_pagefault))
92 			kbase_ctx_sched_release_ctx(kctx);
93 		else {
94 			dev_dbg(kbdev->dev,
95 				"Page fault is already pending for as %u\n",
96 				as_nr);
97 			atomic_inc(&kbdev->faults_pending);
98 		}
99 	}
100 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
101 }
102 
kbase_mmu_report_mcu_as_fault_and_reset(struct kbase_device * kbdev,struct kbase_fault * fault)103 void kbase_mmu_report_mcu_as_fault_and_reset(struct kbase_device *kbdev,
104 		struct kbase_fault *fault)
105 {
106 	/* decode the fault status */
107 	u32 exception_type = fault->status & 0xFF;
108 	u32 access_type = (fault->status >> 8) & 0x3;
109 	u32 source_id = (fault->status >> 16);
110 	int as_no;
111 
112 	/* terminal fault, print info about the fault */
113 	dev_err(kbdev->dev,
114 		"Unexpected Page fault in firmware address space at VA 0x%016llX\n"
115 		"raw fault status: 0x%X\n"
116 		"exception type 0x%X: %s\n"
117 		"access type 0x%X: %s\n"
118 		"source id 0x%X\n",
119 		fault->addr,
120 		fault->status,
121 		exception_type, kbase_gpu_exception_name(exception_type),
122 		access_type, kbase_gpu_access_type_name(fault->status),
123 		source_id);
124 
125 	/* Report MMU fault for all address spaces (except MCU_AS_NR) */
126 	for (as_no = 1; as_no < kbdev->nr_hw_address_spaces; as_no++)
127 		submit_work_pagefault(kbdev, as_no, fault);
128 
129 	/* GPU reset is required to recover */
130 	if (kbase_prepare_to_reset_gpu(kbdev,
131 				       RESET_FLAGS_HWC_UNRECOVERABLE_ERROR))
132 		kbase_reset_gpu(kbdev);
133 
134 }
135 KBASE_EXPORT_TEST_API(kbase_mmu_report_mcu_as_fault_and_reset);
136 
kbase_gpu_report_bus_fault_and_kill(struct kbase_context * kctx,struct kbase_as * as,struct kbase_fault * fault)137 void kbase_gpu_report_bus_fault_and_kill(struct kbase_context *kctx,
138 		struct kbase_as *as, struct kbase_fault *fault)
139 {
140 	struct kbase_device *kbdev = kctx->kbdev;
141 	u32 const status = fault->status;
142 	int exception_type = (status & GPU_FAULTSTATUS_EXCEPTION_TYPE_MASK) >>
143 				GPU_FAULTSTATUS_EXCEPTION_TYPE_SHIFT;
144 	int access_type = (status & GPU_FAULTSTATUS_ACCESS_TYPE_MASK) >>
145 				GPU_FAULTSTATUS_ACCESS_TYPE_SHIFT;
146 	int source_id = (status & GPU_FAULTSTATUS_SOURCE_ID_MASK) >>
147 				GPU_FAULTSTATUS_SOURCE_ID_SHIFT;
148 	const char *addr_valid = (status & GPU_FAULTSTATUS_ADDR_VALID_FLAG) ?
149 					"true" : "false";
150 	int as_no = as->number;
151 	unsigned long flags;
152 
153 	/* terminal fault, print info about the fault */
154 	dev_err(kbdev->dev,
155 		"GPU bus fault in AS%d at VA 0x%016llX\n"
156 		"VA_VALID: %s\n"
157 		"raw fault status: 0x%X\n"
158 		"exception type 0x%X: %s\n"
159 		"access type 0x%X: %s\n"
160 		"source id 0x%X\n"
161 		"pid: %d\n",
162 		as_no, fault->addr,
163 		addr_valid,
164 		status,
165 		exception_type, kbase_gpu_exception_name(exception_type),
166 		access_type, kbase_gpu_access_type_name(access_type),
167 		source_id,
168 		kctx->pid);
169 
170 	/* AS transaction begin */
171 	mutex_lock(&kbdev->mmu_hw_mutex);
172 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
173 	kbase_mmu_disable(kctx);
174 	kbase_ctx_flag_set(kctx, KCTX_AS_DISABLED_ON_FAULT);
175 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
176 	mutex_unlock(&kbdev->mmu_hw_mutex);
177 
178 	/* Switching to UNMAPPED mode above would have enabled the firmware to
179 	 * recover from the fault (if the memory access was made by firmware)
180 	 * and it can then respond to CSG termination requests to be sent now.
181 	 * All GPU command queue groups associated with the context would be
182 	 * affected as they use the same GPU address space.
183 	 */
184 	kbase_csf_ctx_handle_fault(kctx, fault);
185 
186 	/* Now clear the GPU fault */
187 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
188 	kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
189 			GPU_COMMAND_CLEAR_FAULT);
190 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
191 }
192 
193 /*
194  * The caller must ensure it's retained the ctx to prevent it from being
195  * scheduled out whilst it's being worked on.
196  */
kbase_mmu_report_fault_and_kill(struct kbase_context * kctx,struct kbase_as * as,const char * reason_str,struct kbase_fault * fault)197 void kbase_mmu_report_fault_and_kill(struct kbase_context *kctx,
198 		struct kbase_as *as, const char *reason_str,
199 		struct kbase_fault *fault)
200 {
201 	unsigned long flags;
202 	unsigned int exception_type;
203 	unsigned int access_type;
204 	unsigned int source_id;
205 	int as_no;
206 	struct kbase_device *kbdev;
207 	const u32 status = fault->status;
208 
209 	as_no = as->number;
210 	kbdev = kctx->kbdev;
211 
212 	/* Make sure the context was active */
213 	if (WARN_ON(atomic_read(&kctx->refcount) <= 0))
214 		return;
215 
216 	/* decode the fault status */
217 	exception_type = AS_FAULTSTATUS_EXCEPTION_TYPE_GET(status);
218 	access_type = AS_FAULTSTATUS_ACCESS_TYPE_GET(status);
219 	source_id = AS_FAULTSTATUS_SOURCE_ID_GET(status);
220 
221 	/* terminal fault, print info about the fault */
222 	dev_err(kbdev->dev,
223 		"Unhandled Page fault in AS%d at VA 0x%016llX\n"
224 		"Reason: %s\n"
225 		"raw fault status: 0x%X\n"
226 		"exception type 0x%X: %s\n"
227 		"access type 0x%X: %s\n"
228 		"source id 0x%X\n"
229 		"pid: %d\n",
230 		as_no, fault->addr,
231 		reason_str,
232 		status,
233 		exception_type, kbase_gpu_exception_name(exception_type),
234 		access_type, kbase_gpu_access_type_name(status),
235 		source_id,
236 		kctx->pid);
237 
238 	/* AS transaction begin */
239 	mutex_lock(&kbdev->mmu_hw_mutex);
240 
241 	/* switch to UNMAPPED mode,
242 	 * will abort all jobs and stop any hw counter dumping
243 	 */
244 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
245 	kbase_mmu_disable(kctx);
246 	kbase_ctx_flag_set(kctx, KCTX_AS_DISABLED_ON_FAULT);
247 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
248 
249 	mutex_unlock(&kbdev->mmu_hw_mutex);
250 	/* AS transaction end */
251 
252 	/* Switching to UNMAPPED mode above would have enabled the firmware to
253 	 * recover from the fault (if the memory access was made by firmware)
254 	 * and it can then respond to CSG termination requests to be sent now.
255 	 * All GPU command queue groups associated with the context would be
256 	 * affected as they use the same GPU address space.
257 	 */
258 	kbase_csf_ctx_handle_fault(kctx, fault);
259 
260 	/* Clear down the fault */
261 	kbase_mmu_hw_clear_fault(kbdev, as,
262 			KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED);
263 	kbase_mmu_hw_enable_fault(kbdev, as,
264 			KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED);
265 }
266 
267 /**
268  * kbase_mmu_interrupt_process() - Process a bus or page fault.
269  * @kbdev:	The kbase_device the fault happened on
270  * @kctx:	The kbase_context for the faulting address space if one was
271  *		found.
272  * @as:		The address space that has the fault
273  * @fault:	Data relating to the fault
274  *
275  * This function will process a fault on a specific address space
276  */
kbase_mmu_interrupt_process(struct kbase_device * kbdev,struct kbase_context * kctx,struct kbase_as * as,struct kbase_fault * fault)277 static void kbase_mmu_interrupt_process(struct kbase_device *kbdev,
278 		struct kbase_context *kctx, struct kbase_as *as,
279 		struct kbase_fault *fault)
280 {
281 	lockdep_assert_held(&kbdev->hwaccess_lock);
282 
283 	if (!kctx) {
284 		dev_warn(kbdev->dev, "%s in AS%d at 0x%016llx with no context present! Spurious IRQ or SW Design Error?\n",
285 				kbase_as_has_bus_fault(as, fault) ?
286 						"Bus error" : "Page fault",
287 				as->number, fault->addr);
288 
289 		/* Since no ctx was found, the MMU must be disabled. */
290 		WARN_ON(as->current_setup.transtab);
291 
292 		if (kbase_as_has_bus_fault(as, fault))
293 			kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
294 				GPU_COMMAND_CLEAR_FAULT);
295 		else if (kbase_as_has_page_fault(as, fault)) {
296 			kbase_mmu_hw_clear_fault(kbdev, as,
297 					KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED);
298 			kbase_mmu_hw_enable_fault(kbdev, as,
299 					KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED);
300 		}
301 
302 		return;
303 	}
304 
305 	if (kbase_as_has_bus_fault(as, fault)) {
306 		/*
307 		 * We need to switch to UNMAPPED mode - but we do this in a
308 		 * worker so that we can sleep
309 		 */
310 		WARN_ON(!queue_work(as->pf_wq, &as->work_busfault));
311 		atomic_inc(&kbdev->faults_pending);
312 	} else {
313 		WARN_ON(!queue_work(as->pf_wq, &as->work_pagefault));
314 		atomic_inc(&kbdev->faults_pending);
315 	}
316 }
317 
kbase_mmu_bus_fault_interrupt(struct kbase_device * kbdev,u32 status,u32 as_nr)318 int kbase_mmu_bus_fault_interrupt(struct kbase_device *kbdev,
319 		u32 status, u32 as_nr)
320 {
321 	struct kbase_context *kctx;
322 	unsigned long flags;
323 	struct kbase_as *as;
324 	struct kbase_fault *fault;
325 
326 	if (WARN_ON(as_nr == MCU_AS_NR))
327 		return -EINVAL;
328 
329 	if (WARN_ON(as_nr >= BASE_MAX_NR_AS))
330 		return -EINVAL;
331 
332 	as = &kbdev->as[as_nr];
333 	fault = &as->bf_data;
334 	fault->status = status;
335 	fault->addr = (u64) kbase_reg_read(kbdev,
336 		GPU_CONTROL_REG(GPU_FAULTADDRESS_HI)) << 32;
337 	fault->addr |= kbase_reg_read(kbdev,
338 		GPU_CONTROL_REG(GPU_FAULTADDRESS_LO));
339 	fault->protected_mode = false;
340 
341 	/* report the fault to debugfs */
342 	kbase_as_fault_debugfs_new(kbdev, as_nr);
343 
344 	kctx = kbase_ctx_sched_as_to_ctx_refcount(kbdev, as_nr);
345 
346 	/* Process the bus fault interrupt for this address space */
347 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
348 	kbase_mmu_interrupt_process(kbdev, kctx, as, fault);
349 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
350 
351 	return 0;
352 }
353 
kbase_mmu_interrupt(struct kbase_device * kbdev,u32 irq_stat)354 void kbase_mmu_interrupt(struct kbase_device *kbdev, u32 irq_stat)
355 {
356 	const int num_as = 16;
357 	const int pf_shift = 0;
358 	const unsigned long as_bit_mask = (1UL << num_as) - 1;
359 	unsigned long flags;
360 	u32 new_mask;
361 	u32 tmp;
362 	u32 pf_bits = ((irq_stat >> pf_shift) & as_bit_mask);
363 
364 	/* remember current mask */
365 	spin_lock_irqsave(&kbdev->mmu_mask_change, flags);
366 	new_mask = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_MASK));
367 	/* mask interrupts for now */
368 	kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), 0);
369 	spin_unlock_irqrestore(&kbdev->mmu_mask_change, flags);
370 
371 	while (pf_bits) {
372 		struct kbase_context *kctx;
373 		int as_no = ffs(pf_bits) - 1;
374 		struct kbase_as *as = &kbdev->as[as_no];
375 		struct kbase_fault *fault = &as->pf_data;
376 
377 		/* find faulting address */
378 		fault->addr = kbase_reg_read(kbdev, MMU_AS_REG(as_no,
379 				AS_FAULTADDRESS_HI));
380 		fault->addr <<= 32;
381 		fault->addr |= kbase_reg_read(kbdev, MMU_AS_REG(as_no,
382 				AS_FAULTADDRESS_LO));
383 
384 		/* Mark the fault protected or not */
385 		fault->protected_mode = false;
386 
387 		/* report the fault to debugfs */
388 		kbase_as_fault_debugfs_new(kbdev, as_no);
389 
390 		/* record the fault status */
391 		fault->status = kbase_reg_read(kbdev, MMU_AS_REG(as_no,
392 				AS_FAULTSTATUS));
393 
394 		fault->extra_addr = kbase_reg_read(kbdev,
395 					MMU_AS_REG(as_no, AS_FAULTEXTRA_HI));
396 		fault->extra_addr <<= 32;
397 		fault->extra_addr |= kbase_reg_read(kbdev,
398 					MMU_AS_REG(as_no, AS_FAULTEXTRA_LO));
399 
400 		/* Mark page fault as handled */
401 		pf_bits &= ~(1UL << as_no);
402 
403 		/* remove the queued PF from the mask */
404 		new_mask &= ~MMU_PAGE_FAULT(as_no);
405 
406 		if (as_no == MCU_AS_NR) {
407 			kbase_mmu_report_mcu_as_fault_and_reset(kbdev, fault);
408 			/* Pointless to handle remaining faults */
409 			break;
410 		}
411 
412 		/*
413 		 * Refcount the kctx - it shouldn't disappear anyway, since
414 		 * Page faults _should_ only occur whilst GPU commands are
415 		 * executing, and a command causing the Page fault shouldn't
416 		 * complete until the MMU is updated.
417 		 * Reference is released at the end of bottom half of page
418 		 * fault handling.
419 		 */
420 		kctx = kbase_ctx_sched_as_to_ctx_refcount(kbdev, as_no);
421 
422 		/* Process the interrupt for this address space */
423 		spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
424 		kbase_mmu_interrupt_process(kbdev, kctx, as, fault);
425 		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
426 	}
427 
428 	/* reenable interrupts */
429 	spin_lock_irqsave(&kbdev->mmu_mask_change, flags);
430 	tmp = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_MASK));
431 	new_mask |= tmp;
432 	kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), new_mask);
433 	spin_unlock_irqrestore(&kbdev->mmu_mask_change, flags);
434 }
435 
kbase_mmu_switch_to_ir(struct kbase_context * const kctx,struct kbase_va_region * const reg)436 int kbase_mmu_switch_to_ir(struct kbase_context *const kctx,
437 	struct kbase_va_region *const reg)
438 {
439 	/* Can't soft-stop the provoking job */
440 	return -EPERM;
441 }
442 
443 /**
444  * kbase_mmu_gpu_fault_worker() - Process a GPU fault for the device.
445  *
446  * @data:  work_struct passed by queue_work()
447  *
448  * Report a GPU fatal error for all GPU command queue groups that are
449  * using the address space and terminate them.
450  */
kbase_mmu_gpu_fault_worker(struct work_struct * data)451 static void kbase_mmu_gpu_fault_worker(struct work_struct *data)
452 {
453 	struct kbase_as *const faulting_as = container_of(data, struct kbase_as,
454 			work_gpufault);
455 	const u32 as_nr = faulting_as->number;
456 	struct kbase_device *const kbdev = container_of(faulting_as, struct
457 			kbase_device, as[as_nr]);
458 	struct kbase_fault *fault;
459 	struct kbase_context *kctx;
460 	u32 status;
461 	u64 address;
462 	u32 as_valid;
463 	unsigned long flags;
464 
465 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
466 	fault = &faulting_as->gf_data;
467 	status = fault->status;
468 	as_valid = status & GPU_FAULTSTATUS_JASID_VALID_FLAG;
469 	address = fault->addr;
470 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
471 
472 	dev_warn(kbdev->dev,
473 		 "GPU Fault 0x%08x (%s) in AS%u at 0x%016llx\n"
474 		 "ASID_VALID: %s,  ADDRESS_VALID: %s\n",
475 		 status,
476 		 kbase_gpu_exception_name(
477 			GPU_FAULTSTATUS_EXCEPTION_TYPE_GET(status)),
478 		 as_nr, address,
479 		 as_valid ? "true" : "false",
480 		 status & GPU_FAULTSTATUS_ADDR_VALID_FLAG ? "true" : "false");
481 
482 	kctx = kbase_ctx_sched_as_to_ctx(kbdev, as_nr);
483 	kbase_csf_ctx_handle_fault(kctx, fault);
484 	kbase_ctx_sched_release_ctx_lock(kctx);
485 
486 	/* A work for GPU fault is complete.
487 	 * Till reaching here, no further GPU fault will be reported.
488 	 * Now clear the GPU fault to allow next GPU fault interrupt report.
489 	 */
490 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
491 	kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
492 			GPU_COMMAND_CLEAR_FAULT);
493 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
494 
495 	atomic_dec(&kbdev->faults_pending);
496 }
497 
498 /**
499  * submit_work_gpufault() - Submit a work for GPU fault.
500  *
501  * @kbdev:    Kbase device pointer
502  * @status:   GPU fault status
503  * @as_nr:    Faulty address space
504  * @address:  GPU fault address
505  *
506  * This function submits a work for reporting the details of GPU fault.
507  */
submit_work_gpufault(struct kbase_device * kbdev,u32 status,u32 as_nr,u64 address)508 static void submit_work_gpufault(struct kbase_device *kbdev, u32 status,
509 		u32 as_nr, u64 address)
510 {
511 	unsigned long flags;
512 	struct kbase_as *const as = &kbdev->as[as_nr];
513 	struct kbase_context *kctx;
514 
515 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
516 	kctx = kbase_ctx_sched_as_to_ctx_nolock(kbdev, as_nr);
517 
518 	if (kctx) {
519 		kbase_ctx_sched_retain_ctx_refcount(kctx);
520 
521 		as->gf_data = (struct kbase_fault) {
522 			.status = status,
523 			.addr = address,
524 		};
525 
526 		if (WARN_ON(!queue_work(as->pf_wq, &as->work_gpufault)))
527 			kbase_ctx_sched_release_ctx(kctx);
528 		else
529 			atomic_inc(&kbdev->faults_pending);
530 	}
531 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
532 }
533 
kbase_mmu_gpu_fault_interrupt(struct kbase_device * kbdev,u32 status,u32 as_nr,u64 address,bool as_valid)534 void kbase_mmu_gpu_fault_interrupt(struct kbase_device *kbdev, u32 status,
535 		u32 as_nr, u64 address, bool as_valid)
536 {
537 	if (!as_valid || (as_nr == MCU_AS_NR)) {
538 		int as;
539 
540 		/* Report GPU fault for all contexts (except MCU_AS_NR) in case either
541 		 * the address space is invalid or it's MCU address space.
542 		 */
543 		for (as = 1; as < kbdev->nr_hw_address_spaces; as++)
544 			submit_work_gpufault(kbdev, status, as, address);
545 	} else
546 		submit_work_gpufault(kbdev, status, as_nr, address);
547 }
548 KBASE_EXPORT_TEST_API(kbase_mmu_gpu_fault_interrupt);
549 
kbase_mmu_as_init(struct kbase_device * kbdev,int i)550 int kbase_mmu_as_init(struct kbase_device *kbdev, int i)
551 {
552 	kbdev->as[i].number = i;
553 	kbdev->as[i].bf_data.addr = 0ULL;
554 	kbdev->as[i].pf_data.addr = 0ULL;
555 	kbdev->as[i].gf_data.addr = 0ULL;
556 
557 	kbdev->as[i].pf_wq = alloc_workqueue("mali_mmu%d", 0, 1, i);
558 	if (!kbdev->as[i].pf_wq)
559 		return -ENOMEM;
560 
561 	INIT_WORK(&kbdev->as[i].work_pagefault, kbase_mmu_page_fault_worker);
562 	INIT_WORK(&kbdev->as[i].work_busfault, kbase_mmu_bus_fault_worker);
563 	INIT_WORK(&kbdev->as[i].work_gpufault, kbase_mmu_gpu_fault_worker);
564 
565 	return 0;
566 }
567