• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
2 /*
3  *
4  * (C) COPYRIGHT 2018-2021 ARM Limited. All rights reserved.
5  *
6  * This program is free software and is provided to you under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation, and any use by you of this program is subject to the terms
9  * of such GNU license.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, you can access it online at
18  * http://www.gnu.org/licenses/gpl-2.0.html.
19  *
20  */
21 
22 #include <mali_kbase.h>
23 #include <linux/random.h>
24 #include "backend/gpu/mali_kbase_model_dummy.h"
25 
26 /* all the error conditions supported by the model */
27 #define TOTAL_FAULTS 27
28 /* maximum number of levels in the MMU translation table tree */
29 #define MAX_MMU_TABLE_LEVEL 4
30 /* worst case scenario is <1 MMU fault + 1 job fault + 2 GPU faults> */
31 #define MAX_CONCURRENT_FAULTS 3
32 
33 static struct kbase_error_atom *error_track_list;
34 
35 unsigned int rand_seed;
36 
37 /*following error probability are set quite high in order to stress the driver*/
38 unsigned int error_probability = 50;	/* to be set between 0 and 100 */
39 /* probability to have multiple error give that there is an error */
40 unsigned int multiple_error_probability = 50;
41 
gpu_generate_error(void)42 void gpu_generate_error(void)
43 {
44 	unsigned int errors_num = 0;
45 
46 	/*is there at least one error? */
47 	if ((prandom_u32() % 100) < error_probability) {
48 		/* pick up a faulty mmu address space */
49 		hw_error_status.faulty_mmu_as = prandom_u32() % NUM_MMU_AS;
50 		/* pick up an mmu table level */
51 		hw_error_status.mmu_table_level =
52 			1 + (prandom_u32() % MAX_MMU_TABLE_LEVEL);
53 		hw_error_status.errors_mask =
54 			(u32)(1 << (prandom_u32() % TOTAL_FAULTS));
55 
56 		/*is there also one or more errors? */
57 		if ((prandom_u32() % 100) < multiple_error_probability) {
58 			errors_num = 1 + (prandom_u32() %
59 					  (MAX_CONCURRENT_FAULTS - 1));
60 			while (errors_num-- > 0) {
61 				u32 temp_mask;
62 
63 				temp_mask = (u32)(
64 					1 << (prandom_u32() % TOTAL_FAULTS));
65 				/* below we check that no bit of the same error
66 				 * type is set again in the error mask
67 				 */
68 				if ((temp_mask & IS_A_JOB_ERROR) &&
69 						(hw_error_status.errors_mask &
70 							IS_A_JOB_ERROR)) {
71 					errors_num++;
72 					continue;
73 				}
74 				if ((temp_mask & IS_A_MMU_ERROR) &&
75 						(hw_error_status.errors_mask &
76 							IS_A_MMU_ERROR)) {
77 					errors_num++;
78 					continue;
79 				}
80 				if ((temp_mask & IS_A_GPU_ERROR) &&
81 						(hw_error_status.errors_mask &
82 							IS_A_GPU_ERROR)) {
83 					errors_num++;
84 					continue;
85 				}
86 				/* this error mask is already set */
87 				if ((hw_error_status.errors_mask | temp_mask) ==
88 						hw_error_status.errors_mask) {
89 					errors_num++;
90 					continue;
91 				}
92 				hw_error_status.errors_mask |= temp_mask;
93 			}
94 		}
95 	}
96 }
97 
job_atom_inject_error(struct kbase_error_params * params)98 int job_atom_inject_error(struct kbase_error_params *params)
99 {
100 	struct kbase_error_atom *new_elem;
101 
102 	KBASE_DEBUG_ASSERT(params);
103 
104 	new_elem = kzalloc(sizeof(*new_elem), GFP_KERNEL);
105 
106 	if (!new_elem) {
107 		model_error_log(KBASE_CORE,
108 			"\njob_atom_inject_error: kzalloc failed for new_elem\n"
109 									);
110 		return -ENOMEM;
111 	}
112 	new_elem->params.jc = params->jc;
113 	new_elem->params.errors_mask = params->errors_mask;
114 	new_elem->params.mmu_table_level = params->mmu_table_level;
115 	new_elem->params.faulty_mmu_as = params->faulty_mmu_as;
116 
117 	/*circular list below */
118 	if (error_track_list == NULL) {	/*no elements */
119 		error_track_list = new_elem;
120 		new_elem->next = error_track_list;
121 	} else {
122 		struct kbase_error_atom *walker = error_track_list;
123 
124 		while (walker->next != error_track_list)
125 			walker = walker->next;
126 
127 		new_elem->next = error_track_list;
128 		walker->next = new_elem;
129 	}
130 	return 0;
131 }
132 
midgard_set_error(int job_slot)133 void midgard_set_error(int job_slot)
134 {
135 #ifdef CONFIG_MALI_ERROR_INJECT_RANDOM
136 	gpu_generate_error();
137 #else
138 	struct kbase_error_atom *walker, *auxiliar;
139 
140 	if (error_track_list != NULL) {
141 		walker = error_track_list->next;
142 		auxiliar = error_track_list;
143 		do {
144 			if (walker->params.jc == hw_error_status.current_jc) {
145 				/* found a faulty atom matching with the
146 				 * current one
147 				 */
148 				hw_error_status.errors_mask =
149 						walker->params.errors_mask;
150 				hw_error_status.mmu_table_level =
151 						walker->params.mmu_table_level;
152 				hw_error_status.faulty_mmu_as =
153 						walker->params.faulty_mmu_as;
154 				hw_error_status.current_job_slot = job_slot;
155 
156 				if (walker->next == walker) {
157 					/* only one element */
158 					kfree(error_track_list);
159 					error_track_list = NULL;
160 				} else {
161 					auxiliar->next = walker->next;
162 					if (walker == error_track_list)
163 						error_track_list = walker->next;
164 
165 					kfree(walker);
166 				}
167 				break;
168 			}
169 			auxiliar = walker;
170 			walker = walker->next;
171 		} while (auxiliar->next != error_track_list);
172 	}
173 #endif				/* CONFIG_MALI_ERROR_INJECT_RANDOM */
174 }
175