• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * OMAP L3 Interconnect error handling driver
3  *
4  * Copyright (C) 2011-2015 Texas Instruments Incorporated - http://www.ti.com/
5  *	Santosh Shilimkar <santosh.shilimkar@ti.com>
6  *	Sricharan <r.sricharan@ti.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  *
12  * This program is distributed "as is" WITHOUT ANY WARRANTY of any
13  * kind, whether express or implied; without even the implied warranty
14  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  */
17 #include <linux/init.h>
18 #include <linux/interrupt.h>
19 #include <linux/io.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/of_device.h>
23 #include <linux/of.h>
24 #include <linux/platform_device.h>
25 #include <linux/slab.h>
26 
27 #include "omap_l3_noc.h"
28 
29 /**
30  * l3_handle_target() - Handle Target specific parse and reporting
31  * @l3:		pointer to l3 struct
32  * @base:	base address of clkdm
33  * @flag_mux:	flagmux corresponding to the event
34  * @err_src:	error source index of the slave (target)
35  *
36  * This does the second part of the error interrupt handling:
37  *	3) Parse in the slave information
38  *	4) Print the logged information.
39  *	5) Add dump stack to provide kernel trace.
40  *	6) Clear the source if known.
41  *
42  * This handles two types of errors:
43  *	1) Custom errors in L3 :
44  *		Target like DMM/FW/EMIF generates SRESP=ERR error
45  *	2) Standard L3 error:
46  *		- Unsupported CMD.
47  *			L3 tries to access target while it is idle
48  *		- OCP disconnect.
49  *		- Address hole error:
50  *			If DSS/ISS/FDIF/USBHOSTFS access a target where they
51  *			do not have connectivity, the error is logged in
52  *			their default target which is DMM2.
53  *
54  *	On High Secure devices, firewall errors are possible and those
55  *	can be trapped as well. But the trapping is implemented as part
56  *	secure software and hence need not be implemented here.
57  */
l3_handle_target(struct omap_l3 * l3,void __iomem * base,struct l3_flagmux_data * flag_mux,int err_src)58 static int l3_handle_target(struct omap_l3 *l3, void __iomem *base,
59 			    struct l3_flagmux_data *flag_mux, int err_src)
60 {
61 	int k;
62 	u32 std_err_main, clear, masterid;
63 	u8 op_code, m_req_info;
64 	void __iomem *l3_targ_base;
65 	void __iomem *l3_targ_stderr, *l3_targ_slvofslsb, *l3_targ_mstaddr;
66 	void __iomem *l3_targ_hdr, *l3_targ_info;
67 	struct l3_target_data *l3_targ_inst;
68 	struct l3_masters_data *master;
69 	char *target_name, *master_name = "UN IDENTIFIED";
70 	char *err_description;
71 	char err_string[30] = { 0 };
72 	char info_string[60] = { 0 };
73 
74 	/* We DONOT expect err_src to go out of bounds */
75 	BUG_ON(err_src > MAX_CLKDM_TARGETS);
76 
77 	if (err_src < flag_mux->num_targ_data) {
78 		l3_targ_inst = &flag_mux->l3_targ[err_src];
79 		target_name = l3_targ_inst->name;
80 		l3_targ_base = base + l3_targ_inst->offset;
81 	} else {
82 		target_name = L3_TARGET_NOT_SUPPORTED;
83 	}
84 
85 	if (target_name == L3_TARGET_NOT_SUPPORTED)
86 		return -ENODEV;
87 
88 	/* Read the stderrlog_main_source from clk domain */
89 	l3_targ_stderr = l3_targ_base + L3_TARG_STDERRLOG_MAIN;
90 	l3_targ_slvofslsb = l3_targ_base + L3_TARG_STDERRLOG_SLVOFSLSB;
91 
92 	std_err_main = readl_relaxed(l3_targ_stderr);
93 
94 	switch (std_err_main & CUSTOM_ERROR) {
95 	case STANDARD_ERROR:
96 		err_description = "Standard";
97 		snprintf(err_string, sizeof(err_string),
98 			 ": At Address: 0x%08X ",
99 			 readl_relaxed(l3_targ_slvofslsb));
100 
101 		l3_targ_mstaddr = l3_targ_base + L3_TARG_STDERRLOG_MSTADDR;
102 		l3_targ_hdr = l3_targ_base + L3_TARG_STDERRLOG_HDR;
103 		l3_targ_info = l3_targ_base + L3_TARG_STDERRLOG_INFO;
104 		break;
105 
106 	case CUSTOM_ERROR:
107 		err_description = "Custom";
108 
109 		l3_targ_mstaddr = l3_targ_base +
110 				  L3_TARG_STDERRLOG_CINFO_MSTADDR;
111 		l3_targ_hdr = l3_targ_base + L3_TARG_STDERRLOG_CINFO_OPCODE;
112 		l3_targ_info = l3_targ_base + L3_TARG_STDERRLOG_CINFO_INFO;
113 		break;
114 
115 	default:
116 		/* Nothing to be handled here as of now */
117 		return 0;
118 	}
119 
120 	/* STDERRLOG_MSTADDR Stores the NTTP master address. */
121 	masterid = (readl_relaxed(l3_targ_mstaddr) &
122 		    l3->mst_addr_mask) >> __ffs(l3->mst_addr_mask);
123 
124 	for (k = 0, master = l3->l3_masters; k < l3->num_masters;
125 	     k++, master++) {
126 		if (masterid == master->id) {
127 			master_name = master->name;
128 			break;
129 		}
130 	}
131 
132 	op_code = readl_relaxed(l3_targ_hdr) & 0x7;
133 
134 	m_req_info = readl_relaxed(l3_targ_info) & 0xF;
135 	snprintf(info_string, sizeof(info_string),
136 		 ": %s in %s mode during %s access",
137 		 (m_req_info & BIT(0)) ? "Opcode Fetch" : "Data Access",
138 		 (m_req_info & BIT(1)) ? "Supervisor" : "User",
139 		 (m_req_info & BIT(3)) ? "Debug" : "Functional");
140 
141 	WARN(true,
142 	     "%s:L3 %s Error: MASTER %s TARGET %s (%s)%s%s\n",
143 	     dev_name(l3->dev),
144 	     err_description,
145 	     master_name, target_name,
146 	     l3_transaction_type[op_code],
147 	     err_string, info_string);
148 
149 	/* clear the std error log*/
150 	clear = std_err_main | CLEAR_STDERR_LOG;
151 	writel_relaxed(clear, l3_targ_stderr);
152 
153 	return 0;
154 }
155 
156 /**
157  * l3_interrupt_handler() - interrupt handler for l3 events
158  * @irq:	irq number
159  * @_l3:	pointer to l3 structure
160  *
161  * Interrupt Handler for L3 error detection.
162  *	1) Identify the L3 clockdomain partition to which the error belongs to.
163  *	2) Identify the slave where the error information is logged
164  *	... handle the slave event..
165  *	7) if the slave is unknown, mask out the slave.
166  */
l3_interrupt_handler(int irq,void * _l3)167 static irqreturn_t l3_interrupt_handler(int irq, void *_l3)
168 {
169 	struct omap_l3 *l3 = _l3;
170 	int inttype, i, ret;
171 	int err_src = 0;
172 	u32 err_reg, mask_val;
173 	void __iomem *base, *mask_reg;
174 	struct l3_flagmux_data *flag_mux;
175 
176 	/* Get the Type of interrupt */
177 	inttype = irq == l3->app_irq ? L3_APPLICATION_ERROR : L3_DEBUG_ERROR;
178 
179 	for (i = 0; i < l3->num_modules; i++) {
180 		/*
181 		 * Read the regerr register of the clock domain
182 		 * to determine the source
183 		 */
184 		base = l3->l3_base[i];
185 		flag_mux = l3->l3_flagmux[i];
186 		err_reg = readl_relaxed(base + flag_mux->offset +
187 					L3_FLAGMUX_REGERR0 + (inttype << 3));
188 
189 		err_reg &= ~(inttype ? flag_mux->mask_app_bits :
190 				flag_mux->mask_dbg_bits);
191 
192 		/* Get the corresponding error and analyse */
193 		if (err_reg) {
194 			/* Identify the source from control status register */
195 			err_src = __ffs(err_reg);
196 
197 			ret = l3_handle_target(l3, base, flag_mux, err_src);
198 
199 			/*
200 			 * Certain plaforms may have "undocumented" status
201 			 * pending on boot. So dont generate a severe warning
202 			 * here. Just mask it off to prevent the error from
203 			 * reoccuring and locking up the system.
204 			 */
205 			if (ret) {
206 				dev_err(l3->dev,
207 					"L3 %s error: target %d mod:%d %s\n",
208 					inttype ? "debug" : "application",
209 					err_src, i, "(unclearable)");
210 
211 				mask_reg = base + flag_mux->offset +
212 					   L3_FLAGMUX_MASK0 + (inttype << 3);
213 				mask_val = readl_relaxed(mask_reg);
214 				mask_val &= ~(1 << err_src);
215 				writel_relaxed(mask_val, mask_reg);
216 
217 				/* Mark these bits as to be ignored */
218 				if (inttype)
219 					flag_mux->mask_app_bits |= 1 << err_src;
220 				else
221 					flag_mux->mask_dbg_bits |= 1 << err_src;
222 			}
223 
224 			/* Error found so break the for loop */
225 			return IRQ_HANDLED;
226 		}
227 	}
228 
229 	dev_err(l3->dev, "L3 %s IRQ not handled!!\n",
230 		inttype ? "debug" : "application");
231 
232 	return IRQ_NONE;
233 }
234 
235 static const struct of_device_id l3_noc_match[] = {
236 	{.compatible = "ti,omap4-l3-noc", .data = &omap4_l3_data},
237 	{.compatible = "ti,omap5-l3-noc", .data = &omap5_l3_data},
238 	{.compatible = "ti,dra7-l3-noc", .data = &dra_l3_data},
239 	{.compatible = "ti,am4372-l3-noc", .data = &am4372_l3_data},
240 	{},
241 };
242 MODULE_DEVICE_TABLE(of, l3_noc_match);
243 
omap_l3_probe(struct platform_device * pdev)244 static int omap_l3_probe(struct platform_device *pdev)
245 {
246 	const struct of_device_id *of_id;
247 	static struct omap_l3 *l3;
248 	int ret, i, res_idx;
249 
250 	of_id = of_match_device(l3_noc_match, &pdev->dev);
251 	if (!of_id) {
252 		dev_err(&pdev->dev, "OF data missing\n");
253 		return -EINVAL;
254 	}
255 
256 	l3 = devm_kzalloc(&pdev->dev, sizeof(*l3), GFP_KERNEL);
257 	if (!l3)
258 		return -ENOMEM;
259 
260 	memcpy(l3, of_id->data, sizeof(*l3));
261 	l3->dev = &pdev->dev;
262 	platform_set_drvdata(pdev, l3);
263 
264 	/* Get mem resources */
265 	for (i = 0, res_idx = 0; i < l3->num_modules; i++) {
266 		struct resource	*res;
267 
268 		if (l3->l3_base[i] == L3_BASE_IS_SUBMODULE) {
269 			/* First entry cannot be submodule */
270 			BUG_ON(i == 0);
271 			l3->l3_base[i] = l3->l3_base[i - 1];
272 			continue;
273 		}
274 		res = platform_get_resource(pdev, IORESOURCE_MEM, res_idx);
275 		l3->l3_base[i] = devm_ioremap_resource(&pdev->dev, res);
276 		if (IS_ERR(l3->l3_base[i])) {
277 			dev_err(l3->dev, "ioremap %d failed\n", i);
278 			return PTR_ERR(l3->l3_base[i]);
279 		}
280 		res_idx++;
281 	}
282 
283 	/*
284 	 * Setup interrupt Handlers
285 	 */
286 	l3->debug_irq = platform_get_irq(pdev, 0);
287 	ret = devm_request_irq(l3->dev, l3->debug_irq, l3_interrupt_handler,
288 			       0x0, "l3-dbg-irq", l3);
289 	if (ret) {
290 		dev_err(l3->dev, "request_irq failed for %d\n",
291 			l3->debug_irq);
292 		return ret;
293 	}
294 
295 	l3->app_irq = platform_get_irq(pdev, 1);
296 	ret = devm_request_irq(l3->dev, l3->app_irq, l3_interrupt_handler,
297 			       0x0, "l3-app-irq", l3);
298 	if (ret)
299 		dev_err(l3->dev, "request_irq failed for %d\n", l3->app_irq);
300 
301 	return ret;
302 }
303 
304 #ifdef	CONFIG_PM_SLEEP
305 
306 /**
307  * l3_resume_noirq() - resume function for l3_noc
308  * @dev:	pointer to l3_noc device structure
309  *
310  * We only have the resume handler only since we
311  * have already maintained the delta register
312  * configuration as part of configuring the system
313  */
l3_resume_noirq(struct device * dev)314 static int l3_resume_noirq(struct device *dev)
315 {
316 	struct omap_l3 *l3 = dev_get_drvdata(dev);
317 	int i;
318 	struct l3_flagmux_data *flag_mux;
319 	void __iomem *base, *mask_regx = NULL;
320 	u32 mask_val;
321 
322 	for (i = 0; i < l3->num_modules; i++) {
323 		base = l3->l3_base[i];
324 		flag_mux = l3->l3_flagmux[i];
325 		if (!flag_mux->mask_app_bits && !flag_mux->mask_dbg_bits)
326 			continue;
327 
328 		mask_regx = base + flag_mux->offset + L3_FLAGMUX_MASK0 +
329 			   (L3_APPLICATION_ERROR << 3);
330 		mask_val = readl_relaxed(mask_regx);
331 		mask_val &= ~(flag_mux->mask_app_bits);
332 
333 		writel_relaxed(mask_val, mask_regx);
334 		mask_regx = base + flag_mux->offset + L3_FLAGMUX_MASK0 +
335 			   (L3_DEBUG_ERROR << 3);
336 		mask_val = readl_relaxed(mask_regx);
337 		mask_val &= ~(flag_mux->mask_dbg_bits);
338 
339 		writel_relaxed(mask_val, mask_regx);
340 	}
341 
342 	/* Dummy read to force OCP barrier */
343 	if (mask_regx)
344 		(void)readl(mask_regx);
345 
346 	return 0;
347 }
348 
349 static const struct dev_pm_ops l3_dev_pm_ops = {
350 	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, l3_resume_noirq)
351 };
352 
353 #define L3_DEV_PM_OPS (&l3_dev_pm_ops)
354 #else
355 #define L3_DEV_PM_OPS NULL
356 #endif
357 
358 static struct platform_driver omap_l3_driver = {
359 	.probe		= omap_l3_probe,
360 	.driver		= {
361 		.name		= "omap_l3_noc",
362 		.pm		= L3_DEV_PM_OPS,
363 		.of_match_table = of_match_ptr(l3_noc_match),
364 	},
365 };
366 
omap_l3_init(void)367 static int __init omap_l3_init(void)
368 {
369 	return platform_driver_register(&omap_l3_driver);
370 }
371 postcore_initcall_sync(omap_l3_init);
372 
omap_l3_exit(void)373 static void __exit omap_l3_exit(void)
374 {
375 	platform_driver_unregister(&omap_l3_driver);
376 }
377 module_exit(omap_l3_exit);
378 
379 MODULE_AUTHOR("Santosh Shilimkar");
380 MODULE_AUTHOR("Sricharan R");
381 MODULE_DESCRIPTION("OMAP L3 Interconnect error handling driver");
382 MODULE_LICENSE("GPL v2");
383