• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2014 - 2018, NVIDIA CORPORATION.  All rights reserved.
4  *
5  * Author:
6  *	Mikko Perttunen <mperttunen@nvidia.com>
7  *
8  * This software is licensed under the terms of the GNU General Public
9  * License version 2, as published by the Free Software Foundation, and
10  * may be copied, distributed, and modified under those terms.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  */
18 
19 #include <linux/debugfs.h>
20 #include <linux/bitops.h>
21 #include <linux/clk.h>
22 #include <linux/delay.h>
23 #include <linux/err.h>
24 #include <linux/interrupt.h>
25 #include <linux/io.h>
26 #include <linux/irq.h>
27 #include <linux/irqdomain.h>
28 #include <linux/module.h>
29 #include <linux/of.h>
30 #include <linux/platform_device.h>
31 #include <linux/reset.h>
32 #include <linux/thermal.h>
33 
34 #include <dt-bindings/thermal/tegra124-soctherm.h>
35 
36 #include "../thermal_core.h"
37 #include "soctherm.h"
38 
39 #define SENSOR_CONFIG0				0
40 #define SENSOR_CONFIG0_STOP			BIT(0)
41 #define SENSOR_CONFIG0_CPTR_OVER		BIT(2)
42 #define SENSOR_CONFIG0_OVER			BIT(3)
43 #define SENSOR_CONFIG0_TCALC_OVER		BIT(4)
44 #define SENSOR_CONFIG0_TALL_MASK		(0xfffff << 8)
45 #define SENSOR_CONFIG0_TALL_SHIFT		8
46 
47 #define SENSOR_CONFIG1				4
48 #define SENSOR_CONFIG1_TSAMPLE_MASK		0x3ff
49 #define SENSOR_CONFIG1_TSAMPLE_SHIFT		0
50 #define SENSOR_CONFIG1_TIDDQ_EN_MASK		(0x3f << 15)
51 #define SENSOR_CONFIG1_TIDDQ_EN_SHIFT		15
52 #define SENSOR_CONFIG1_TEN_COUNT_MASK		(0x3f << 24)
53 #define SENSOR_CONFIG1_TEN_COUNT_SHIFT		24
54 #define SENSOR_CONFIG1_TEMP_ENABLE		BIT(31)
55 
56 /*
57  * SENSOR_CONFIG2 is defined in soctherm.h
58  * because, it will be used by tegra_soctherm_fuse.c
59  */
60 
61 #define SENSOR_STATUS0				0xc
62 #define SENSOR_STATUS0_VALID_MASK		BIT(31)
63 #define SENSOR_STATUS0_CAPTURE_MASK		0xffff
64 
65 #define SENSOR_STATUS1				0x10
66 #define SENSOR_STATUS1_TEMP_VALID_MASK		BIT(31)
67 #define SENSOR_STATUS1_TEMP_MASK		0xffff
68 
69 #define READBACK_VALUE_MASK			0xff00
70 #define READBACK_VALUE_SHIFT			8
71 #define READBACK_ADD_HALF			BIT(7)
72 #define READBACK_NEGATE				BIT(0)
73 
74 /*
75  * THERMCTL_LEVEL0_GROUP_CPU is defined in soctherm.h
76  * because it will be used by tegraxxx_soctherm.c
77  */
78 #define THERMCTL_LVL0_CPU0_EN_MASK		BIT(8)
79 #define THERMCTL_LVL0_CPU0_CPU_THROT_MASK	(0x3 << 5)
80 #define THERMCTL_LVL0_CPU0_CPU_THROT_LIGHT	0x1
81 #define THERMCTL_LVL0_CPU0_CPU_THROT_HEAVY	0x2
82 #define THERMCTL_LVL0_CPU0_GPU_THROT_MASK	(0x3 << 3)
83 #define THERMCTL_LVL0_CPU0_GPU_THROT_LIGHT	0x1
84 #define THERMCTL_LVL0_CPU0_GPU_THROT_HEAVY	0x2
85 #define THERMCTL_LVL0_CPU0_MEM_THROT_MASK	BIT(2)
86 #define THERMCTL_LVL0_CPU0_STATUS_MASK		0x3
87 
88 #define THERMCTL_LVL0_UP_STATS			0x10
89 #define THERMCTL_LVL0_DN_STATS			0x14
90 
91 #define THERMCTL_INTR_STATUS			0x84
92 
93 #define TH_INTR_MD0_MASK			BIT(25)
94 #define TH_INTR_MU0_MASK			BIT(24)
95 #define TH_INTR_GD0_MASK			BIT(17)
96 #define TH_INTR_GU0_MASK			BIT(16)
97 #define TH_INTR_CD0_MASK			BIT(9)
98 #define TH_INTR_CU0_MASK			BIT(8)
99 #define TH_INTR_PD0_MASK			BIT(1)
100 #define TH_INTR_PU0_MASK			BIT(0)
101 #define TH_INTR_IGNORE_MASK			0xFCFCFCFC
102 
103 #define THERMCTL_STATS_CTL			0x94
104 #define STATS_CTL_CLR_DN			0x8
105 #define STATS_CTL_EN_DN				0x4
106 #define STATS_CTL_CLR_UP			0x2
107 #define STATS_CTL_EN_UP				0x1
108 
109 #define OC1_CFG					0x310
110 #define OC1_CFG_LONG_LATENCY_MASK		BIT(6)
111 #define OC1_CFG_HW_RESTORE_MASK			BIT(5)
112 #define OC1_CFG_PWR_GOOD_MASK_MASK		BIT(4)
113 #define OC1_CFG_THROTTLE_MODE_MASK		(0x3 << 2)
114 #define OC1_CFG_ALARM_POLARITY_MASK		BIT(1)
115 #define OC1_CFG_EN_THROTTLE_MASK		BIT(0)
116 
117 #define OC1_CNT_THRESHOLD			0x314
118 #define OC1_THROTTLE_PERIOD			0x318
119 #define OC1_ALARM_COUNT				0x31c
120 #define OC1_FILTER				0x320
121 #define OC1_STATS				0x3a8
122 
123 #define OC_INTR_STATUS				0x39c
124 #define OC_INTR_ENABLE				0x3a0
125 #define OC_INTR_DISABLE				0x3a4
126 #define OC_STATS_CTL				0x3c4
127 #define OC_STATS_CTL_CLR_ALL			0x2
128 #define OC_STATS_CTL_EN_ALL			0x1
129 
130 #define OC_INTR_OC1_MASK			BIT(0)
131 #define OC_INTR_OC2_MASK			BIT(1)
132 #define OC_INTR_OC3_MASK			BIT(2)
133 #define OC_INTR_OC4_MASK			BIT(3)
134 #define OC_INTR_OC5_MASK			BIT(4)
135 
136 #define THROT_GLOBAL_CFG			0x400
137 #define THROT_GLOBAL_ENB_MASK			BIT(0)
138 
139 #define CPU_PSKIP_STATUS			0x418
140 #define XPU_PSKIP_STATUS_M_MASK			(0xff << 12)
141 #define XPU_PSKIP_STATUS_N_MASK			(0xff << 4)
142 #define XPU_PSKIP_STATUS_SW_OVERRIDE_MASK	BIT(1)
143 #define XPU_PSKIP_STATUS_ENABLED_MASK		BIT(0)
144 
145 #define THROT_PRIORITY_LOCK			0x424
146 #define THROT_PRIORITY_LOCK_PRIORITY_MASK	0xff
147 
148 #define THROT_STATUS				0x428
149 #define THROT_STATUS_BREACH_MASK		BIT(12)
150 #define THROT_STATUS_STATE_MASK			(0xff << 4)
151 #define THROT_STATUS_ENABLED_MASK		BIT(0)
152 
153 #define THROT_PSKIP_CTRL_LITE_CPU		0x430
154 #define THROT_PSKIP_CTRL_ENABLE_MASK            BIT(31)
155 #define THROT_PSKIP_CTRL_DIVIDEND_MASK          (0xff << 8)
156 #define THROT_PSKIP_CTRL_DIVISOR_MASK           0xff
157 #define THROT_PSKIP_CTRL_VECT_GPU_MASK          (0x7 << 16)
158 #define THROT_PSKIP_CTRL_VECT_CPU_MASK          (0x7 << 8)
159 #define THROT_PSKIP_CTRL_VECT2_CPU_MASK         0x7
160 
161 #define THROT_VECT_NONE				0x0 /* 3'b000 */
162 #define THROT_VECT_LOW				0x1 /* 3'b001 */
163 #define THROT_VECT_MED				0x3 /* 3'b011 */
164 #define THROT_VECT_HIGH				0x7 /* 3'b111 */
165 
166 #define THROT_PSKIP_RAMP_LITE_CPU		0x434
167 #define THROT_PSKIP_RAMP_SEQ_BYPASS_MODE_MASK	BIT(31)
168 #define THROT_PSKIP_RAMP_DURATION_MASK		(0xffff << 8)
169 #define THROT_PSKIP_RAMP_STEP_MASK		0xff
170 
171 #define THROT_PRIORITY_LITE			0x444
172 #define THROT_PRIORITY_LITE_PRIO_MASK		0xff
173 
174 #define THROT_DELAY_LITE			0x448
175 #define THROT_DELAY_LITE_DELAY_MASK		0xff
176 
177 /* car register offsets needed for enabling HW throttling */
178 #define CAR_SUPER_CCLKG_DIVIDER			0x36c
179 #define CDIVG_USE_THERM_CONTROLS_MASK		BIT(30)
180 
181 /* ccroc register offsets needed for enabling HW throttling for Tegra132 */
182 #define CCROC_SUPER_CCLKG_DIVIDER		0x024
183 
184 #define CCROC_GLOBAL_CFG			0x148
185 
186 #define CCROC_THROT_PSKIP_RAMP_CPU		0x150
187 #define CCROC_THROT_PSKIP_RAMP_SEQ_BYPASS_MODE_MASK	BIT(31)
188 #define CCROC_THROT_PSKIP_RAMP_DURATION_MASK	(0xffff << 8)
189 #define CCROC_THROT_PSKIP_RAMP_STEP_MASK	0xff
190 
191 #define CCROC_THROT_PSKIP_CTRL_CPU		0x154
192 #define CCROC_THROT_PSKIP_CTRL_ENB_MASK		BIT(31)
193 #define CCROC_THROT_PSKIP_CTRL_DIVIDEND_MASK	(0xff << 8)
194 #define CCROC_THROT_PSKIP_CTRL_DIVISOR_MASK	0xff
195 
196 /* get val from register(r) mask bits(m) */
197 #define REG_GET_MASK(r, m)	(((r) & (m)) >> (ffs(m) - 1))
198 /* set val(v) to mask bits(m) of register(r) */
199 #define REG_SET_MASK(r, m, v)	(((r) & ~(m)) | \
200 				 (((v) & (m >> (ffs(m) - 1))) << (ffs(m) - 1)))
201 
202 /* get dividend from the depth */
203 #define THROT_DEPTH_DIVIDEND(depth)	((256 * (100 - (depth)) / 100) - 1)
204 
205 /* gk20a nv_therm interface N:3 Mapping. Levels defined in tegra124-soctherm.h
206  * level	vector
207  * NONE		3'b000
208  * LOW		3'b001
209  * MED		3'b011
210  * HIGH		3'b111
211  */
212 #define THROT_LEVEL_TO_DEPTH(level)	((0x1 << (level)) - 1)
213 
214 /* get THROT_PSKIP_xxx offset per LIGHT/HEAVY throt and CPU/GPU dev */
215 #define THROT_OFFSET			0x30
216 #define THROT_PSKIP_CTRL(throt, dev)	(THROT_PSKIP_CTRL_LITE_CPU + \
217 					(THROT_OFFSET * throt) + (8 * dev))
218 #define THROT_PSKIP_RAMP(throt, dev)	(THROT_PSKIP_RAMP_LITE_CPU + \
219 					(THROT_OFFSET * throt) + (8 * dev))
220 
221 /* get THROT_xxx_CTRL offset per LIGHT/HEAVY throt */
222 #define THROT_PRIORITY_CTRL(throt)	(THROT_PRIORITY_LITE + \
223 					(THROT_OFFSET * throt))
224 #define THROT_DELAY_CTRL(throt)		(THROT_DELAY_LITE + \
225 					(THROT_OFFSET * throt))
226 
227 #define ALARM_OFFSET			0x14
228 #define ALARM_CFG(throt)		(OC1_CFG + \
229 					(ALARM_OFFSET * (throt - THROTTLE_OC1)))
230 
231 #define ALARM_CNT_THRESHOLD(throt)	(OC1_CNT_THRESHOLD + \
232 					(ALARM_OFFSET * (throt - THROTTLE_OC1)))
233 
234 #define ALARM_THROTTLE_PERIOD(throt)	(OC1_THROTTLE_PERIOD + \
235 					(ALARM_OFFSET * (throt - THROTTLE_OC1)))
236 
237 #define ALARM_ALARM_COUNT(throt)	(OC1_ALARM_COUNT + \
238 					(ALARM_OFFSET * (throt - THROTTLE_OC1)))
239 
240 #define ALARM_FILTER(throt)		(OC1_FILTER + \
241 					(ALARM_OFFSET * (throt - THROTTLE_OC1)))
242 
243 #define ALARM_STATS(throt)		(OC1_STATS + \
244 					(4 * (throt - THROTTLE_OC1)))
245 
246 /* get CCROC_THROT_PSKIP_xxx offset per HIGH/MED/LOW vect*/
247 #define CCROC_THROT_OFFSET			0x0c
248 #define CCROC_THROT_PSKIP_CTRL_CPU_REG(vect)    (CCROC_THROT_PSKIP_CTRL_CPU + \
249 						(CCROC_THROT_OFFSET * vect))
250 #define CCROC_THROT_PSKIP_RAMP_CPU_REG(vect)    (CCROC_THROT_PSKIP_RAMP_CPU + \
251 						(CCROC_THROT_OFFSET * vect))
252 
253 /* get THERMCTL_LEVELx offset per CPU/GPU/MEM/TSENSE rg and LEVEL0~3 lv */
254 #define THERMCTL_LVL_REGS_SIZE		0x20
255 #define THERMCTL_LVL_REG(rg, lv)	((rg) + ((lv) * THERMCTL_LVL_REGS_SIZE))
256 
257 #define OC_THROTTLE_MODE_DISABLED	0
258 #define OC_THROTTLE_MODE_BRIEF		2
259 
260 static const int min_low_temp = -127000;
261 static const int max_high_temp = 127000;
262 
263 enum soctherm_throttle_id {
264 	THROTTLE_LIGHT = 0,
265 	THROTTLE_HEAVY,
266 	THROTTLE_OC1,
267 	THROTTLE_OC2,
268 	THROTTLE_OC3,
269 	THROTTLE_OC4,
270 	THROTTLE_OC5, /* OC5 is reserved */
271 	THROTTLE_SIZE,
272 };
273 
274 enum soctherm_oc_irq_id {
275 	TEGRA_SOC_OC_IRQ_1,
276 	TEGRA_SOC_OC_IRQ_2,
277 	TEGRA_SOC_OC_IRQ_3,
278 	TEGRA_SOC_OC_IRQ_4,
279 	TEGRA_SOC_OC_IRQ_5,
280 	TEGRA_SOC_OC_IRQ_MAX,
281 };
282 
283 enum soctherm_throttle_dev_id {
284 	THROTTLE_DEV_CPU = 0,
285 	THROTTLE_DEV_GPU,
286 	THROTTLE_DEV_SIZE,
287 };
288 
289 static const char *const throt_names[] = {
290 	[THROTTLE_LIGHT] = "light",
291 	[THROTTLE_HEAVY] = "heavy",
292 	[THROTTLE_OC1]   = "oc1",
293 	[THROTTLE_OC2]   = "oc2",
294 	[THROTTLE_OC3]   = "oc3",
295 	[THROTTLE_OC4]   = "oc4",
296 	[THROTTLE_OC5]   = "oc5",
297 };
298 
299 struct tegra_soctherm;
300 struct tegra_thermctl_zone {
301 	void __iomem *reg;
302 	struct device *dev;
303 	struct tegra_soctherm *ts;
304 	struct thermal_zone_device *tz;
305 	const struct tegra_tsensor_group *sg;
306 };
307 
308 struct soctherm_oc_cfg {
309 	u32 active_low;
310 	u32 throt_period;
311 	u32 alarm_cnt_thresh;
312 	u32 alarm_filter;
313 	u32 mode;
314 	bool intr_en;
315 };
316 
317 struct soctherm_throt_cfg {
318 	const char *name;
319 	unsigned int id;
320 	u8 priority;
321 	u8 cpu_throt_level;
322 	u32 cpu_throt_depth;
323 	u32 gpu_throt_level;
324 	struct soctherm_oc_cfg oc_cfg;
325 	struct thermal_cooling_device *cdev;
326 	bool init;
327 };
328 
329 struct tegra_soctherm {
330 	struct reset_control *reset;
331 	struct clk *clock_tsensor;
332 	struct clk *clock_soctherm;
333 	void __iomem *regs;
334 	void __iomem *clk_regs;
335 	void __iomem *ccroc_regs;
336 
337 	int thermal_irq;
338 	int edp_irq;
339 
340 	u32 *calib;
341 	struct thermal_zone_device **thermctl_tzs;
342 	struct tegra_soctherm_soc *soc;
343 
344 	struct soctherm_throt_cfg throt_cfgs[THROTTLE_SIZE];
345 
346 	struct dentry *debugfs_dir;
347 
348 	struct mutex thermctl_lock;
349 };
350 
351 struct soctherm_oc_irq_chip_data {
352 	struct mutex		irq_lock; /* serialize OC IRQs */
353 	struct irq_chip		irq_chip;
354 	struct irq_domain	*domain;
355 	int			irq_enable;
356 };
357 
358 static struct soctherm_oc_irq_chip_data soc_irq_cdata;
359 
360 /**
361  * ccroc_writel() - writes a value to a CCROC register
362  * @ts: pointer to a struct tegra_soctherm
363  * @value: the value to write
364  * @reg: the register offset
365  *
366  * Writes @v to @reg.  No return value.
367  */
ccroc_writel(struct tegra_soctherm * ts,u32 value,u32 reg)368 static inline void ccroc_writel(struct tegra_soctherm *ts, u32 value, u32 reg)
369 {
370 	writel(value, (ts->ccroc_regs + reg));
371 }
372 
373 /**
374  * ccroc_readl() - reads specified register from CCROC IP block
375  * @ts: pointer to a struct tegra_soctherm
376  * @reg: register address to be read
377  *
378  * Return: the value of the register
379  */
ccroc_readl(struct tegra_soctherm * ts,u32 reg)380 static inline u32 ccroc_readl(struct tegra_soctherm *ts, u32 reg)
381 {
382 	return readl(ts->ccroc_regs + reg);
383 }
384 
enable_tsensor(struct tegra_soctherm * tegra,unsigned int i)385 static void enable_tsensor(struct tegra_soctherm *tegra, unsigned int i)
386 {
387 	const struct tegra_tsensor *sensor = &tegra->soc->tsensors[i];
388 	void __iomem *base = tegra->regs + sensor->base;
389 	unsigned int val;
390 
391 	val = sensor->config->tall << SENSOR_CONFIG0_TALL_SHIFT;
392 	writel(val, base + SENSOR_CONFIG0);
393 
394 	val  = (sensor->config->tsample - 1) << SENSOR_CONFIG1_TSAMPLE_SHIFT;
395 	val |= sensor->config->tiddq_en << SENSOR_CONFIG1_TIDDQ_EN_SHIFT;
396 	val |= sensor->config->ten_count << SENSOR_CONFIG1_TEN_COUNT_SHIFT;
397 	val |= SENSOR_CONFIG1_TEMP_ENABLE;
398 	writel(val, base + SENSOR_CONFIG1);
399 
400 	writel(tegra->calib[i], base + SENSOR_CONFIG2);
401 }
402 
403 /*
404  * Translate from soctherm readback format to millicelsius.
405  * The soctherm readback format in bits is as follows:
406  *   TTTTTTTT H______N
407  * where T's contain the temperature in Celsius,
408  * H denotes an addition of 0.5 Celsius and N denotes negation
409  * of the final value.
410  */
translate_temp(u16 val)411 static int translate_temp(u16 val)
412 {
413 	int t;
414 
415 	t = ((val & READBACK_VALUE_MASK) >> READBACK_VALUE_SHIFT) * 1000;
416 	if (val & READBACK_ADD_HALF)
417 		t += 500;
418 	if (val & READBACK_NEGATE)
419 		t *= -1;
420 
421 	return t;
422 }
423 
tegra_thermctl_get_temp(void * data,int * out_temp)424 static int tegra_thermctl_get_temp(void *data, int *out_temp)
425 {
426 	struct tegra_thermctl_zone *zone = data;
427 	u32 val;
428 
429 	val = readl(zone->reg);
430 	val = REG_GET_MASK(val, zone->sg->sensor_temp_mask);
431 	*out_temp = translate_temp(val);
432 
433 	return 0;
434 }
435 
436 /**
437  * enforce_temp_range() - check and enforce temperature range [min, max]
438  * @dev: struct device * of the SOC_THERM instance
439  * @trip_temp: the trip temperature to check
440  *
441  * Checks and enforces the permitted temperature range that SOC_THERM
442  * HW can support This is
443  * done while taking care of precision.
444  *
445  * Return: The precision adjusted capped temperature in millicelsius.
446  */
enforce_temp_range(struct device * dev,int trip_temp)447 static int enforce_temp_range(struct device *dev, int trip_temp)
448 {
449 	int temp;
450 
451 	temp = clamp_val(trip_temp, min_low_temp, max_high_temp);
452 	if (temp != trip_temp)
453 		dev_info(dev, "soctherm: trip temperature %d forced to %d\n",
454 			 trip_temp, temp);
455 	return temp;
456 }
457 
458 /**
459  * thermtrip_program() - Configures the hardware to shut down the
460  * system if a given sensor group reaches a given temperature
461  * @dev: ptr to the struct device for the SOC_THERM IP block
462  * @sg: pointer to the sensor group to set the thermtrip temperature for
463  * @trip_temp: the temperature in millicelsius to trigger the thermal trip at
464  *
465  * Sets the thermal trip threshold of the given sensor group to be the
466  * @trip_temp.  If this threshold is crossed, the hardware will shut
467  * down.
468  *
469  * Note that, although @trip_temp is specified in millicelsius, the
470  * hardware is programmed in degrees Celsius.
471  *
472  * Return: 0 upon success, or %-EINVAL upon failure.
473  */
thermtrip_program(struct device * dev,const struct tegra_tsensor_group * sg,int trip_temp)474 static int thermtrip_program(struct device *dev,
475 			     const struct tegra_tsensor_group *sg,
476 			     int trip_temp)
477 {
478 	struct tegra_soctherm *ts = dev_get_drvdata(dev);
479 	int temp;
480 	u32 r;
481 
482 	if (!sg || !sg->thermtrip_threshold_mask)
483 		return -EINVAL;
484 
485 	temp = enforce_temp_range(dev, trip_temp) / ts->soc->thresh_grain;
486 
487 	r = readl(ts->regs + THERMCTL_THERMTRIP_CTL);
488 	r = REG_SET_MASK(r, sg->thermtrip_threshold_mask, temp);
489 	r = REG_SET_MASK(r, sg->thermtrip_enable_mask, 1);
490 	r = REG_SET_MASK(r, sg->thermtrip_any_en_mask, 0);
491 	writel(r, ts->regs + THERMCTL_THERMTRIP_CTL);
492 
493 	return 0;
494 }
495 
496 /**
497  * throttrip_program() - Configures the hardware to throttle the
498  * pulse if a given sensor group reaches a given temperature
499  * @dev: ptr to the struct device for the SOC_THERM IP block
500  * @sg: pointer to the sensor group to set the thermtrip temperature for
501  * @stc: pointer to the throttle need to be triggered
502  * @trip_temp: the temperature in millicelsius to trigger the thermal trip at
503  *
504  * Sets the thermal trip threshold and throttle event of the given sensor
505  * group. If this threshold is crossed, the hardware will trigger the
506  * throttle.
507  *
508  * Note that, although @trip_temp is specified in millicelsius, the
509  * hardware is programmed in degrees Celsius.
510  *
511  * Return: 0 upon success, or %-EINVAL upon failure.
512  */
throttrip_program(struct device * dev,const struct tegra_tsensor_group * sg,struct soctherm_throt_cfg * stc,int trip_temp)513 static int throttrip_program(struct device *dev,
514 			     const struct tegra_tsensor_group *sg,
515 			     struct soctherm_throt_cfg *stc,
516 			     int trip_temp)
517 {
518 	struct tegra_soctherm *ts = dev_get_drvdata(dev);
519 	int temp, cpu_throt, gpu_throt;
520 	unsigned int throt;
521 	u32 r, reg_off;
522 
523 	if (!sg || !stc || !stc->init)
524 		return -EINVAL;
525 
526 	temp = enforce_temp_range(dev, trip_temp) / ts->soc->thresh_grain;
527 
528 	/* Hardcode LIGHT on LEVEL1 and HEAVY on LEVEL2 */
529 	throt = stc->id;
530 	reg_off = THERMCTL_LVL_REG(sg->thermctl_lvl0_offset, throt + 1);
531 
532 	if (throt == THROTTLE_LIGHT) {
533 		cpu_throt = THERMCTL_LVL0_CPU0_CPU_THROT_LIGHT;
534 		gpu_throt = THERMCTL_LVL0_CPU0_GPU_THROT_LIGHT;
535 	} else {
536 		cpu_throt = THERMCTL_LVL0_CPU0_CPU_THROT_HEAVY;
537 		gpu_throt = THERMCTL_LVL0_CPU0_GPU_THROT_HEAVY;
538 		if (throt != THROTTLE_HEAVY)
539 			dev_warn(dev,
540 				 "invalid throt id %d - assuming HEAVY",
541 				 throt);
542 	}
543 
544 	r = readl(ts->regs + reg_off);
545 	r = REG_SET_MASK(r, sg->thermctl_lvl0_up_thresh_mask, temp);
546 	r = REG_SET_MASK(r, sg->thermctl_lvl0_dn_thresh_mask, temp);
547 	r = REG_SET_MASK(r, THERMCTL_LVL0_CPU0_CPU_THROT_MASK, cpu_throt);
548 	r = REG_SET_MASK(r, THERMCTL_LVL0_CPU0_GPU_THROT_MASK, gpu_throt);
549 	r = REG_SET_MASK(r, THERMCTL_LVL0_CPU0_EN_MASK, 1);
550 	writel(r, ts->regs + reg_off);
551 
552 	return 0;
553 }
554 
555 static struct soctherm_throt_cfg *
find_throttle_cfg_by_name(struct tegra_soctherm * ts,const char * name)556 find_throttle_cfg_by_name(struct tegra_soctherm *ts, const char *name)
557 {
558 	unsigned int i;
559 
560 	for (i = 0; ts->throt_cfgs[i].name; i++)
561 		if (!strcmp(ts->throt_cfgs[i].name, name))
562 			return &ts->throt_cfgs[i];
563 
564 	return NULL;
565 }
566 
tsensor_group_thermtrip_get(struct tegra_soctherm * ts,int id)567 static int tsensor_group_thermtrip_get(struct tegra_soctherm *ts, int id)
568 {
569 	int i, temp = min_low_temp;
570 	struct tsensor_group_thermtrips *tt = ts->soc->thermtrips;
571 
572 	if (id >= TEGRA124_SOCTHERM_SENSOR_NUM)
573 		return temp;
574 
575 	if (tt) {
576 		for (i = 0; i < ts->soc->num_ttgs; i++) {
577 			if (tt[i].id == id)
578 				return tt[i].temp;
579 		}
580 	}
581 
582 	return temp;
583 }
584 
tegra_thermctl_set_trip_temp(void * data,int trip,int temp)585 static int tegra_thermctl_set_trip_temp(void *data, int trip, int temp)
586 {
587 	struct tegra_thermctl_zone *zone = data;
588 	struct thermal_zone_device *tz = zone->tz;
589 	struct tegra_soctherm *ts = zone->ts;
590 	const struct tegra_tsensor_group *sg = zone->sg;
591 	struct device *dev = zone->dev;
592 	enum thermal_trip_type type;
593 	int ret;
594 
595 	if (!tz)
596 		return -EINVAL;
597 
598 	ret = tz->ops->get_trip_type(tz, trip, &type);
599 	if (ret)
600 		return ret;
601 
602 	if (type == THERMAL_TRIP_CRITICAL) {
603 		/*
604 		 * If thermtrips property is set in DT,
605 		 * doesn't need to program critical type trip to HW,
606 		 * if not, program critical trip to HW.
607 		 */
608 		if (min_low_temp == tsensor_group_thermtrip_get(ts, sg->id))
609 			return thermtrip_program(dev, sg, temp);
610 		else
611 			return 0;
612 
613 	} else if (type == THERMAL_TRIP_HOT) {
614 		int i;
615 
616 		for (i = 0; i < THROTTLE_SIZE; i++) {
617 			struct thermal_cooling_device *cdev;
618 			struct soctherm_throt_cfg *stc;
619 
620 			if (!ts->throt_cfgs[i].init)
621 				continue;
622 
623 			cdev = ts->throt_cfgs[i].cdev;
624 			if (get_thermal_instance(tz, cdev, trip))
625 				stc = find_throttle_cfg_by_name(ts, cdev->type);
626 			else
627 				continue;
628 
629 			return throttrip_program(dev, sg, stc, temp);
630 		}
631 	}
632 
633 	return 0;
634 }
635 
tegra_thermctl_get_trend(void * data,int trip,enum thermal_trend * trend)636 static int tegra_thermctl_get_trend(void *data, int trip,
637 				    enum thermal_trend *trend)
638 {
639 	struct tegra_thermctl_zone *zone = data;
640 	struct thermal_zone_device *tz = zone->tz;
641 	int trip_temp, temp, last_temp, ret;
642 
643 	if (!tz)
644 		return -EINVAL;
645 
646 	ret = tz->ops->get_trip_temp(zone->tz, trip, &trip_temp);
647 	if (ret)
648 		return ret;
649 
650 	temp = READ_ONCE(tz->temperature);
651 	last_temp = READ_ONCE(tz->last_temperature);
652 
653 	if (temp > trip_temp) {
654 		if (temp >= last_temp)
655 			*trend = THERMAL_TREND_RAISING;
656 		else
657 			*trend = THERMAL_TREND_STABLE;
658 	} else if (temp < trip_temp) {
659 		*trend = THERMAL_TREND_DROPPING;
660 	} else {
661 		*trend = THERMAL_TREND_STABLE;
662 	}
663 
664 	return 0;
665 }
666 
thermal_irq_enable(struct tegra_thermctl_zone * zn)667 static void thermal_irq_enable(struct tegra_thermctl_zone *zn)
668 {
669 	u32 r;
670 
671 	/* multiple zones could be handling and setting trips at once */
672 	mutex_lock(&zn->ts->thermctl_lock);
673 	r = readl(zn->ts->regs + THERMCTL_INTR_ENABLE);
674 	r = REG_SET_MASK(r, zn->sg->thermctl_isr_mask, TH_INTR_UP_DN_EN);
675 	writel(r, zn->ts->regs + THERMCTL_INTR_ENABLE);
676 	mutex_unlock(&zn->ts->thermctl_lock);
677 }
678 
thermal_irq_disable(struct tegra_thermctl_zone * zn)679 static void thermal_irq_disable(struct tegra_thermctl_zone *zn)
680 {
681 	u32 r;
682 
683 	/* multiple zones could be handling and setting trips at once */
684 	mutex_lock(&zn->ts->thermctl_lock);
685 	r = readl(zn->ts->regs + THERMCTL_INTR_DISABLE);
686 	r = REG_SET_MASK(r, zn->sg->thermctl_isr_mask, 0);
687 	writel(r, zn->ts->regs + THERMCTL_INTR_DISABLE);
688 	mutex_unlock(&zn->ts->thermctl_lock);
689 }
690 
tegra_thermctl_set_trips(void * data,int lo,int hi)691 static int tegra_thermctl_set_trips(void *data, int lo, int hi)
692 {
693 	struct tegra_thermctl_zone *zone = data;
694 	u32 r;
695 
696 	thermal_irq_disable(zone);
697 
698 	r = readl(zone->ts->regs + zone->sg->thermctl_lvl0_offset);
699 	r = REG_SET_MASK(r, THERMCTL_LVL0_CPU0_EN_MASK, 0);
700 	writel(r, zone->ts->regs + zone->sg->thermctl_lvl0_offset);
701 
702 	lo = enforce_temp_range(zone->dev, lo) / zone->ts->soc->thresh_grain;
703 	hi = enforce_temp_range(zone->dev, hi) / zone->ts->soc->thresh_grain;
704 	dev_dbg(zone->dev, "%s hi:%d, lo:%d\n", __func__, hi, lo);
705 
706 	r = REG_SET_MASK(r, zone->sg->thermctl_lvl0_up_thresh_mask, hi);
707 	r = REG_SET_MASK(r, zone->sg->thermctl_lvl0_dn_thresh_mask, lo);
708 	r = REG_SET_MASK(r, THERMCTL_LVL0_CPU0_EN_MASK, 1);
709 	writel(r, zone->ts->regs + zone->sg->thermctl_lvl0_offset);
710 
711 	thermal_irq_enable(zone);
712 
713 	return 0;
714 }
715 
716 static const struct thermal_zone_of_device_ops tegra_of_thermal_ops = {
717 	.get_temp = tegra_thermctl_get_temp,
718 	.set_trip_temp = tegra_thermctl_set_trip_temp,
719 	.get_trend = tegra_thermctl_get_trend,
720 	.set_trips = tegra_thermctl_set_trips,
721 };
722 
get_hot_temp(struct thermal_zone_device * tz,int * trip,int * temp)723 static int get_hot_temp(struct thermal_zone_device *tz, int *trip, int *temp)
724 {
725 	int ntrips, i, ret;
726 	enum thermal_trip_type type;
727 
728 	ntrips = of_thermal_get_ntrips(tz);
729 	if (ntrips <= 0)
730 		return -EINVAL;
731 
732 	for (i = 0; i < ntrips; i++) {
733 		ret = tz->ops->get_trip_type(tz, i, &type);
734 		if (ret)
735 			return -EINVAL;
736 		if (type == THERMAL_TRIP_HOT) {
737 			ret = tz->ops->get_trip_temp(tz, i, temp);
738 			if (!ret)
739 				*trip = i;
740 
741 			return ret;
742 		}
743 	}
744 
745 	return -EINVAL;
746 }
747 
748 /**
749  * tegra_soctherm_set_hwtrips() - set HW trip point from DT data
750  * @dev: struct device * of the SOC_THERM instance
751  * @sg: pointer to the sensor group to set the thermtrip temperature for
752  * @tz: struct thermal_zone_device *
753  *
754  * Configure the SOC_THERM HW trip points, setting "THERMTRIP"
755  * "THROTTLE" trip points , using "thermtrips", "critical" or "hot"
756  * type trip_temp
757  * from thermal zone.
758  * After they have been configured, THERMTRIP or THROTTLE will take
759  * action when the configured SoC thermal sensor group reaches a
760  * certain temperature.
761  *
762  * Return: 0 upon success, or a negative error code on failure.
763  * "Success" does not mean that trips was enabled; it could also
764  * mean that no node was found in DT.
765  * THERMTRIP has been enabled successfully when a message similar to
766  * this one appears on the serial console:
767  * "thermtrip: will shut down when sensor group XXX reaches YYYYYY mC"
768  * THROTTLE has been enabled successfully when a message similar to
769  * this one appears on the serial console:
770  * ""throttrip: will throttle when sensor group XXX reaches YYYYYY mC"
771  */
tegra_soctherm_set_hwtrips(struct device * dev,const struct tegra_tsensor_group * sg,struct thermal_zone_device * tz)772 static int tegra_soctherm_set_hwtrips(struct device *dev,
773 				      const struct tegra_tsensor_group *sg,
774 				      struct thermal_zone_device *tz)
775 {
776 	struct tegra_soctherm *ts = dev_get_drvdata(dev);
777 	struct soctherm_throt_cfg *stc;
778 	int i, trip, temperature, ret;
779 
780 	/* Get thermtrips. If missing, try to get critical trips. */
781 	temperature = tsensor_group_thermtrip_get(ts, sg->id);
782 	if (min_low_temp == temperature)
783 		if (tz->ops->get_crit_temp(tz, &temperature))
784 			temperature = max_high_temp;
785 
786 	ret = thermtrip_program(dev, sg, temperature);
787 	if (ret) {
788 		dev_err(dev, "thermtrip: %s: error during enable\n", sg->name);
789 		return ret;
790 	}
791 
792 	dev_info(dev, "thermtrip: will shut down when %s reaches %d mC\n",
793 		 sg->name, temperature);
794 
795 	ret = get_hot_temp(tz, &trip, &temperature);
796 	if (ret) {
797 		dev_info(dev, "throttrip: %s: missing hot temperature\n",
798 			 sg->name);
799 		return 0;
800 	}
801 
802 	for (i = 0; i < THROTTLE_OC1; i++) {
803 		struct thermal_cooling_device *cdev;
804 
805 		if (!ts->throt_cfgs[i].init)
806 			continue;
807 
808 		cdev = ts->throt_cfgs[i].cdev;
809 		if (get_thermal_instance(tz, cdev, trip))
810 			stc = find_throttle_cfg_by_name(ts, cdev->type);
811 		else
812 			continue;
813 
814 		ret = throttrip_program(dev, sg, stc, temperature);
815 		if (ret) {
816 			dev_err(dev, "throttrip: %s: error during enable\n",
817 				sg->name);
818 			return ret;
819 		}
820 
821 		dev_info(dev,
822 			 "throttrip: will throttle when %s reaches %d mC\n",
823 			 sg->name, temperature);
824 		break;
825 	}
826 
827 	if (i == THROTTLE_SIZE)
828 		dev_info(dev, "throttrip: %s: missing throttle cdev\n",
829 			 sg->name);
830 
831 	return 0;
832 }
833 
soctherm_thermal_isr(int irq,void * dev_id)834 static irqreturn_t soctherm_thermal_isr(int irq, void *dev_id)
835 {
836 	struct tegra_soctherm *ts = dev_id;
837 	u32 r;
838 
839 	/* Case for no lock:
840 	 * Although interrupts are enabled in set_trips, there is still no need
841 	 * to lock here because the interrupts are disabled before programming
842 	 * new trip points. Hence there cant be a interrupt on the same sensor.
843 	 * An interrupt can however occur on a sensor while trips are being
844 	 * programmed on a different one. This beign a LEVEL interrupt won't
845 	 * cause a new interrupt but this is taken care of by the re-reading of
846 	 * the STATUS register in the thread function.
847 	 */
848 	r = readl(ts->regs + THERMCTL_INTR_STATUS);
849 	writel(r, ts->regs + THERMCTL_INTR_DISABLE);
850 
851 	return IRQ_WAKE_THREAD;
852 }
853 
854 /**
855  * soctherm_thermal_isr_thread() - Handles a thermal interrupt request
856  * @irq:       The interrupt number being requested; not used
857  * @dev_id:    Opaque pointer to tegra_soctherm;
858  *
859  * Clears the interrupt status register if there are expected
860  * interrupt bits set.
861  * The interrupt(s) are then handled by updating the corresponding
862  * thermal zones.
863  *
864  * An error is logged if any unexpected interrupt bits are set.
865  *
866  * Disabled interrupts are re-enabled.
867  *
868  * Return: %IRQ_HANDLED. Interrupt was handled and no further processing
869  * is needed.
870  */
soctherm_thermal_isr_thread(int irq,void * dev_id)871 static irqreturn_t soctherm_thermal_isr_thread(int irq, void *dev_id)
872 {
873 	struct tegra_soctherm *ts = dev_id;
874 	struct thermal_zone_device *tz;
875 	u32 st, ex = 0, cp = 0, gp = 0, pl = 0, me = 0;
876 
877 	st = readl(ts->regs + THERMCTL_INTR_STATUS);
878 
879 	/* deliberately clear expected interrupts handled in SW */
880 	cp |= st & TH_INTR_CD0_MASK;
881 	cp |= st & TH_INTR_CU0_MASK;
882 
883 	gp |= st & TH_INTR_GD0_MASK;
884 	gp |= st & TH_INTR_GU0_MASK;
885 
886 	pl |= st & TH_INTR_PD0_MASK;
887 	pl |= st & TH_INTR_PU0_MASK;
888 
889 	me |= st & TH_INTR_MD0_MASK;
890 	me |= st & TH_INTR_MU0_MASK;
891 
892 	ex |= cp | gp | pl | me;
893 	if (ex) {
894 		writel(ex, ts->regs + THERMCTL_INTR_STATUS);
895 		st &= ~ex;
896 
897 		if (cp) {
898 			tz = ts->thermctl_tzs[TEGRA124_SOCTHERM_SENSOR_CPU];
899 			thermal_zone_device_update(tz,
900 						   THERMAL_EVENT_UNSPECIFIED);
901 		}
902 
903 		if (gp) {
904 			tz = ts->thermctl_tzs[TEGRA124_SOCTHERM_SENSOR_GPU];
905 			thermal_zone_device_update(tz,
906 						   THERMAL_EVENT_UNSPECIFIED);
907 		}
908 
909 		if (pl) {
910 			tz = ts->thermctl_tzs[TEGRA124_SOCTHERM_SENSOR_PLLX];
911 			thermal_zone_device_update(tz,
912 						   THERMAL_EVENT_UNSPECIFIED);
913 		}
914 
915 		if (me) {
916 			tz = ts->thermctl_tzs[TEGRA124_SOCTHERM_SENSOR_MEM];
917 			thermal_zone_device_update(tz,
918 						   THERMAL_EVENT_UNSPECIFIED);
919 		}
920 	}
921 
922 	/* deliberately ignore expected interrupts NOT handled in SW */
923 	ex |= TH_INTR_IGNORE_MASK;
924 	st &= ~ex;
925 
926 	if (st) {
927 		/* Whine about any other unexpected INTR bits still set */
928 		pr_err("soctherm: Ignored unexpected INTRs 0x%08x\n", st);
929 		writel(st, ts->regs + THERMCTL_INTR_STATUS);
930 	}
931 
932 	return IRQ_HANDLED;
933 }
934 
935 /**
936  * soctherm_oc_intr_enable() - Enables the soctherm over-current interrupt
937  * @ts:		pointer to a struct tegra_soctherm
938  * @alarm:		The soctherm throttle id
939  * @enable:		Flag indicating enable the soctherm over-current
940  *			interrupt or disable it
941  *
942  * Enables a specific over-current pins @alarm to raise an interrupt if the flag
943  * is set and the alarm corresponds to OC1, OC2, OC3, or OC4.
944  */
soctherm_oc_intr_enable(struct tegra_soctherm * ts,enum soctherm_throttle_id alarm,bool enable)945 static void soctherm_oc_intr_enable(struct tegra_soctherm *ts,
946 				    enum soctherm_throttle_id alarm,
947 				    bool enable)
948 {
949 	u32 r;
950 
951 	if (!enable)
952 		return;
953 
954 	r = readl(ts->regs + OC_INTR_ENABLE);
955 	switch (alarm) {
956 	case THROTTLE_OC1:
957 		r = REG_SET_MASK(r, OC_INTR_OC1_MASK, 1);
958 		break;
959 	case THROTTLE_OC2:
960 		r = REG_SET_MASK(r, OC_INTR_OC2_MASK, 1);
961 		break;
962 	case THROTTLE_OC3:
963 		r = REG_SET_MASK(r, OC_INTR_OC3_MASK, 1);
964 		break;
965 	case THROTTLE_OC4:
966 		r = REG_SET_MASK(r, OC_INTR_OC4_MASK, 1);
967 		break;
968 	default:
969 		r = 0;
970 		break;
971 	}
972 	writel(r, ts->regs + OC_INTR_ENABLE);
973 }
974 
975 /**
976  * soctherm_handle_alarm() - Handles soctherm alarms
977  * @alarm:		The soctherm throttle id
978  *
979  * "Handles" over-current alarms (OC1, OC2, OC3, and OC4) by printing
980  * a warning or informative message.
981  *
982  * Return: -EINVAL for @alarm = THROTTLE_OC3, otherwise 0 (success).
983  */
soctherm_handle_alarm(enum soctherm_throttle_id alarm)984 static int soctherm_handle_alarm(enum soctherm_throttle_id alarm)
985 {
986 	int rv = -EINVAL;
987 
988 	switch (alarm) {
989 	case THROTTLE_OC1:
990 		pr_debug("soctherm: Successfully handled OC1 alarm\n");
991 		rv = 0;
992 		break;
993 
994 	case THROTTLE_OC2:
995 		pr_debug("soctherm: Successfully handled OC2 alarm\n");
996 		rv = 0;
997 		break;
998 
999 	case THROTTLE_OC3:
1000 		pr_debug("soctherm: Successfully handled OC3 alarm\n");
1001 		rv = 0;
1002 		break;
1003 
1004 	case THROTTLE_OC4:
1005 		pr_debug("soctherm: Successfully handled OC4 alarm\n");
1006 		rv = 0;
1007 		break;
1008 
1009 	default:
1010 		break;
1011 	}
1012 
1013 	if (rv)
1014 		pr_err("soctherm: ERROR in handling %s alarm\n",
1015 		       throt_names[alarm]);
1016 
1017 	return rv;
1018 }
1019 
1020 /**
1021  * soctherm_edp_isr_thread() - log an over-current interrupt request
1022  * @irq:	OC irq number. Currently not being used. See description
1023  * @arg:	a void pointer for callback, currently not being used
1024  *
1025  * Over-current events are handled in hardware. This function is called to log
1026  * and handle any OC events that happened. Additionally, it checks every
1027  * over-current interrupt registers for registers are set but
1028  * was not expected (i.e. any discrepancy in interrupt status) by the function,
1029  * the discrepancy will logged.
1030  *
1031  * Return: %IRQ_HANDLED
1032  */
soctherm_edp_isr_thread(int irq,void * arg)1033 static irqreturn_t soctherm_edp_isr_thread(int irq, void *arg)
1034 {
1035 	struct tegra_soctherm *ts = arg;
1036 	u32 st, ex, oc1, oc2, oc3, oc4;
1037 
1038 	st = readl(ts->regs + OC_INTR_STATUS);
1039 
1040 	/* deliberately clear expected interrupts handled in SW */
1041 	oc1 = st & OC_INTR_OC1_MASK;
1042 	oc2 = st & OC_INTR_OC2_MASK;
1043 	oc3 = st & OC_INTR_OC3_MASK;
1044 	oc4 = st & OC_INTR_OC4_MASK;
1045 	ex = oc1 | oc2 | oc3 | oc4;
1046 
1047 	pr_err("soctherm: OC ALARM 0x%08x\n", ex);
1048 	if (ex) {
1049 		writel(st, ts->regs + OC_INTR_STATUS);
1050 		st &= ~ex;
1051 
1052 		if (oc1 && !soctherm_handle_alarm(THROTTLE_OC1))
1053 			soctherm_oc_intr_enable(ts, THROTTLE_OC1, true);
1054 
1055 		if (oc2 && !soctherm_handle_alarm(THROTTLE_OC2))
1056 			soctherm_oc_intr_enable(ts, THROTTLE_OC2, true);
1057 
1058 		if (oc3 && !soctherm_handle_alarm(THROTTLE_OC3))
1059 			soctherm_oc_intr_enable(ts, THROTTLE_OC3, true);
1060 
1061 		if (oc4 && !soctherm_handle_alarm(THROTTLE_OC4))
1062 			soctherm_oc_intr_enable(ts, THROTTLE_OC4, true);
1063 
1064 		if (oc1 && soc_irq_cdata.irq_enable & BIT(0))
1065 			handle_nested_irq(
1066 				irq_find_mapping(soc_irq_cdata.domain, 0));
1067 
1068 		if (oc2 && soc_irq_cdata.irq_enable & BIT(1))
1069 			handle_nested_irq(
1070 				irq_find_mapping(soc_irq_cdata.domain, 1));
1071 
1072 		if (oc3 && soc_irq_cdata.irq_enable & BIT(2))
1073 			handle_nested_irq(
1074 				irq_find_mapping(soc_irq_cdata.domain, 2));
1075 
1076 		if (oc4 && soc_irq_cdata.irq_enable & BIT(3))
1077 			handle_nested_irq(
1078 				irq_find_mapping(soc_irq_cdata.domain, 3));
1079 	}
1080 
1081 	if (st) {
1082 		pr_err("soctherm: Ignored unexpected OC ALARM 0x%08x\n", st);
1083 		writel(st, ts->regs + OC_INTR_STATUS);
1084 	}
1085 
1086 	return IRQ_HANDLED;
1087 }
1088 
1089 /**
1090  * soctherm_edp_isr() - Disables any active interrupts
1091  * @irq:	The interrupt request number
1092  * @arg:	Opaque pointer to an argument
1093  *
1094  * Writes to the OC_INTR_DISABLE register the over current interrupt status,
1095  * masking any asserted interrupts. Doing this prevents the same interrupts
1096  * from triggering this isr repeatedly. The thread woken by this isr will
1097  * handle asserted interrupts and subsequently unmask/re-enable them.
1098  *
1099  * The OC_INTR_DISABLE register indicates which OC interrupts
1100  * have been disabled.
1101  *
1102  * Return: %IRQ_WAKE_THREAD, handler requests to wake the handler thread
1103  */
soctherm_edp_isr(int irq,void * arg)1104 static irqreturn_t soctherm_edp_isr(int irq, void *arg)
1105 {
1106 	struct tegra_soctherm *ts = arg;
1107 	u32 r;
1108 
1109 	if (!ts)
1110 		return IRQ_NONE;
1111 
1112 	r = readl(ts->regs + OC_INTR_STATUS);
1113 	writel(r, ts->regs + OC_INTR_DISABLE);
1114 
1115 	return IRQ_WAKE_THREAD;
1116 }
1117 
1118 /**
1119  * soctherm_oc_irq_lock() - locks the over-current interrupt request
1120  * @data:	Interrupt request data
1121  *
1122  * Looks up the chip data from @data and locks the mutex associated with
1123  * a particular over-current interrupt request.
1124  */
soctherm_oc_irq_lock(struct irq_data * data)1125 static void soctherm_oc_irq_lock(struct irq_data *data)
1126 {
1127 	struct soctherm_oc_irq_chip_data *d = irq_data_get_irq_chip_data(data);
1128 
1129 	mutex_lock(&d->irq_lock);
1130 }
1131 
1132 /**
1133  * soctherm_oc_irq_sync_unlock() - Unlocks the OC interrupt request
1134  * @data:		Interrupt request data
1135  *
1136  * Looks up the interrupt request data @data and unlocks the mutex associated
1137  * with a particular over-current interrupt request.
1138  */
soctherm_oc_irq_sync_unlock(struct irq_data * data)1139 static void soctherm_oc_irq_sync_unlock(struct irq_data *data)
1140 {
1141 	struct soctherm_oc_irq_chip_data *d = irq_data_get_irq_chip_data(data);
1142 
1143 	mutex_unlock(&d->irq_lock);
1144 }
1145 
1146 /**
1147  * soctherm_oc_irq_enable() - Enables the SOC_THERM over-current interrupt queue
1148  * @data:       irq_data structure of the chip
1149  *
1150  * Sets the irq_enable bit of SOC_THERM allowing SOC_THERM
1151  * to respond to over-current interrupts.
1152  *
1153  */
soctherm_oc_irq_enable(struct irq_data * data)1154 static void soctherm_oc_irq_enable(struct irq_data *data)
1155 {
1156 	struct soctherm_oc_irq_chip_data *d = irq_data_get_irq_chip_data(data);
1157 
1158 	d->irq_enable |= BIT(data->hwirq);
1159 }
1160 
1161 /**
1162  * soctherm_oc_irq_disable() - Disables overcurrent interrupt requests
1163  * @data:	The interrupt request information
1164  *
1165  * Clears the interrupt request enable bit of the overcurrent
1166  * interrupt request chip data.
1167  *
1168  * Return: Nothing is returned (void)
1169  */
soctherm_oc_irq_disable(struct irq_data * data)1170 static void soctherm_oc_irq_disable(struct irq_data *data)
1171 {
1172 	struct soctherm_oc_irq_chip_data *d = irq_data_get_irq_chip_data(data);
1173 
1174 	d->irq_enable &= ~BIT(data->hwirq);
1175 }
1176 
soctherm_oc_irq_set_type(struct irq_data * data,unsigned int type)1177 static int soctherm_oc_irq_set_type(struct irq_data *data, unsigned int type)
1178 {
1179 	return 0;
1180 }
1181 
1182 /**
1183  * soctherm_oc_irq_map() - SOC_THERM interrupt request domain mapper
1184  * @h:		Interrupt request domain
1185  * @virq:	Virtual interrupt request number
1186  * @hw:		Hardware interrupt request number
1187  *
1188  * Mapping callback function for SOC_THERM's irq_domain. When a SOC_THERM
1189  * interrupt request is called, the irq_domain takes the request's virtual
1190  * request number (much like a virtual memory address) and maps it to a
1191  * physical hardware request number.
1192  *
1193  * When a mapping doesn't already exist for a virtual request number, the
1194  * irq_domain calls this function to associate the virtual request number with
1195  * a hardware request number.
1196  *
1197  * Return: 0
1198  */
soctherm_oc_irq_map(struct irq_domain * h,unsigned int virq,irq_hw_number_t hw)1199 static int soctherm_oc_irq_map(struct irq_domain *h, unsigned int virq,
1200 		irq_hw_number_t hw)
1201 {
1202 	struct soctherm_oc_irq_chip_data *data = h->host_data;
1203 
1204 	irq_set_chip_data(virq, data);
1205 	irq_set_chip(virq, &data->irq_chip);
1206 	irq_set_nested_thread(virq, 1);
1207 	return 0;
1208 }
1209 
1210 /**
1211  * soctherm_irq_domain_xlate_twocell() - xlate for soctherm interrupts
1212  * @d:      Interrupt request domain
1213  * @ctrlr:      Controller device tree node
1214  * @intspec:    Array of u32s from DTs "interrupt" property
1215  * @intsize:    Number of values inside the intspec array
1216  * @out_hwirq:  HW IRQ value associated with this interrupt
1217  * @out_type:   The IRQ SENSE type for this interrupt.
1218  *
1219  * This Device Tree IRQ specifier translation function will translate a
1220  * specific "interrupt" as defined by 2 DT values where the cell values map
1221  * the hwirq number + 1 and linux irq flags. Since the output is the hwirq
1222  * number, this function will subtract 1 from the value listed in DT.
1223  *
1224  * Return: 0
1225  */
soctherm_irq_domain_xlate_twocell(struct irq_domain * d,struct device_node * ctrlr,const u32 * intspec,unsigned int intsize,irq_hw_number_t * out_hwirq,unsigned int * out_type)1226 static int soctherm_irq_domain_xlate_twocell(struct irq_domain *d,
1227 	struct device_node *ctrlr, const u32 *intspec, unsigned int intsize,
1228 	irq_hw_number_t *out_hwirq, unsigned int *out_type)
1229 {
1230 	if (WARN_ON(intsize < 2))
1231 		return -EINVAL;
1232 
1233 	/*
1234 	 * The HW value is 1 index less than the DT IRQ values.
1235 	 * i.e. OC4 goes to HW index 3.
1236 	 */
1237 	*out_hwirq = intspec[0] - 1;
1238 	*out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
1239 	return 0;
1240 }
1241 
1242 static const struct irq_domain_ops soctherm_oc_domain_ops = {
1243 	.map	= soctherm_oc_irq_map,
1244 	.xlate	= soctherm_irq_domain_xlate_twocell,
1245 };
1246 
1247 /**
1248  * soctherm_oc_int_init() - Initial enabling of the over
1249  * current interrupts
1250  * @np:	The devicetree node for soctherm
1251  * @num_irqs:	The number of new interrupt requests
1252  *
1253  * Sets the over current interrupt request chip data
1254  *
1255  * Return: 0 on success or if overcurrent interrupts are not enabled,
1256  * -ENOMEM (out of memory), or irq_base if the function failed to
1257  * allocate the irqs
1258  */
soctherm_oc_int_init(struct device_node * np,int num_irqs)1259 static int soctherm_oc_int_init(struct device_node *np, int num_irqs)
1260 {
1261 	if (!num_irqs) {
1262 		pr_info("%s(): OC interrupts are not enabled\n", __func__);
1263 		return 0;
1264 	}
1265 
1266 	mutex_init(&soc_irq_cdata.irq_lock);
1267 	soc_irq_cdata.irq_enable = 0;
1268 
1269 	soc_irq_cdata.irq_chip.name = "soc_therm_oc";
1270 	soc_irq_cdata.irq_chip.irq_bus_lock = soctherm_oc_irq_lock;
1271 	soc_irq_cdata.irq_chip.irq_bus_sync_unlock =
1272 		soctherm_oc_irq_sync_unlock;
1273 	soc_irq_cdata.irq_chip.irq_disable = soctherm_oc_irq_disable;
1274 	soc_irq_cdata.irq_chip.irq_enable = soctherm_oc_irq_enable;
1275 	soc_irq_cdata.irq_chip.irq_set_type = soctherm_oc_irq_set_type;
1276 	soc_irq_cdata.irq_chip.irq_set_wake = NULL;
1277 
1278 	soc_irq_cdata.domain = irq_domain_add_linear(np, num_irqs,
1279 						     &soctherm_oc_domain_ops,
1280 						     &soc_irq_cdata);
1281 
1282 	if (!soc_irq_cdata.domain) {
1283 		pr_err("%s: Failed to create IRQ domain\n", __func__);
1284 		return -ENOMEM;
1285 	}
1286 
1287 	pr_debug("%s(): OC interrupts enabled successful\n", __func__);
1288 	return 0;
1289 }
1290 
1291 #ifdef CONFIG_DEBUG_FS
regs_show(struct seq_file * s,void * data)1292 static int regs_show(struct seq_file *s, void *data)
1293 {
1294 	struct platform_device *pdev = s->private;
1295 	struct tegra_soctherm *ts = platform_get_drvdata(pdev);
1296 	const struct tegra_tsensor *tsensors = ts->soc->tsensors;
1297 	const struct tegra_tsensor_group **ttgs = ts->soc->ttgs;
1298 	u32 r, state;
1299 	int i, level;
1300 
1301 	seq_puts(s, "-----TSENSE (convert HW)-----\n");
1302 
1303 	for (i = 0; i < ts->soc->num_tsensors; i++) {
1304 		r = readl(ts->regs + tsensors[i].base + SENSOR_CONFIG1);
1305 		state = REG_GET_MASK(r, SENSOR_CONFIG1_TEMP_ENABLE);
1306 
1307 		seq_printf(s, "%s: ", tsensors[i].name);
1308 		seq_printf(s, "En(%d) ", state);
1309 
1310 		if (!state) {
1311 			seq_puts(s, "\n");
1312 			continue;
1313 		}
1314 
1315 		state = REG_GET_MASK(r, SENSOR_CONFIG1_TIDDQ_EN_MASK);
1316 		seq_printf(s, "tiddq(%d) ", state);
1317 		state = REG_GET_MASK(r, SENSOR_CONFIG1_TEN_COUNT_MASK);
1318 		seq_printf(s, "ten_count(%d) ", state);
1319 		state = REG_GET_MASK(r, SENSOR_CONFIG1_TSAMPLE_MASK);
1320 		seq_printf(s, "tsample(%d) ", state + 1);
1321 
1322 		r = readl(ts->regs + tsensors[i].base + SENSOR_STATUS1);
1323 		state = REG_GET_MASK(r, SENSOR_STATUS1_TEMP_VALID_MASK);
1324 		seq_printf(s, "Temp(%d/", state);
1325 		state = REG_GET_MASK(r, SENSOR_STATUS1_TEMP_MASK);
1326 		seq_printf(s, "%d) ", translate_temp(state));
1327 
1328 		r = readl(ts->regs + tsensors[i].base + SENSOR_STATUS0);
1329 		state = REG_GET_MASK(r, SENSOR_STATUS0_VALID_MASK);
1330 		seq_printf(s, "Capture(%d/", state);
1331 		state = REG_GET_MASK(r, SENSOR_STATUS0_CAPTURE_MASK);
1332 		seq_printf(s, "%d) ", state);
1333 
1334 		r = readl(ts->regs + tsensors[i].base + SENSOR_CONFIG0);
1335 		state = REG_GET_MASK(r, SENSOR_CONFIG0_STOP);
1336 		seq_printf(s, "Stop(%d) ", state);
1337 		state = REG_GET_MASK(r, SENSOR_CONFIG0_TALL_MASK);
1338 		seq_printf(s, "Tall(%d) ", state);
1339 		state = REG_GET_MASK(r, SENSOR_CONFIG0_TCALC_OVER);
1340 		seq_printf(s, "Over(%d/", state);
1341 		state = REG_GET_MASK(r, SENSOR_CONFIG0_OVER);
1342 		seq_printf(s, "%d/", state);
1343 		state = REG_GET_MASK(r, SENSOR_CONFIG0_CPTR_OVER);
1344 		seq_printf(s, "%d) ", state);
1345 
1346 		r = readl(ts->regs + tsensors[i].base + SENSOR_CONFIG2);
1347 		state = REG_GET_MASK(r, SENSOR_CONFIG2_THERMA_MASK);
1348 		seq_printf(s, "Therm_A/B(%d/", state);
1349 		state = REG_GET_MASK(r, SENSOR_CONFIG2_THERMB_MASK);
1350 		seq_printf(s, "%d)\n", (s16)state);
1351 	}
1352 
1353 	r = readl(ts->regs + SENSOR_PDIV);
1354 	seq_printf(s, "PDIV: 0x%x\n", r);
1355 
1356 	r = readl(ts->regs + SENSOR_HOTSPOT_OFF);
1357 	seq_printf(s, "HOTSPOT: 0x%x\n", r);
1358 
1359 	seq_puts(s, "\n");
1360 	seq_puts(s, "-----SOC_THERM-----\n");
1361 
1362 	r = readl(ts->regs + SENSOR_TEMP1);
1363 	state = REG_GET_MASK(r, SENSOR_TEMP1_CPU_TEMP_MASK);
1364 	seq_printf(s, "Temperatures: CPU(%d) ", translate_temp(state));
1365 	state = REG_GET_MASK(r, SENSOR_TEMP1_GPU_TEMP_MASK);
1366 	seq_printf(s, " GPU(%d) ", translate_temp(state));
1367 	r = readl(ts->regs + SENSOR_TEMP2);
1368 	state = REG_GET_MASK(r, SENSOR_TEMP2_PLLX_TEMP_MASK);
1369 	seq_printf(s, " PLLX(%d) ", translate_temp(state));
1370 	state = REG_GET_MASK(r, SENSOR_TEMP2_MEM_TEMP_MASK);
1371 	seq_printf(s, " MEM(%d)\n", translate_temp(state));
1372 
1373 	for (i = 0; i < ts->soc->num_ttgs; i++) {
1374 		seq_printf(s, "%s:\n", ttgs[i]->name);
1375 		for (level = 0; level < 4; level++) {
1376 			s32 v;
1377 			u32 mask;
1378 			u16 off = ttgs[i]->thermctl_lvl0_offset;
1379 
1380 			r = readl(ts->regs + THERMCTL_LVL_REG(off, level));
1381 
1382 			mask = ttgs[i]->thermctl_lvl0_up_thresh_mask;
1383 			state = REG_GET_MASK(r, mask);
1384 			v = sign_extend32(state, ts->soc->bptt - 1);
1385 			v *= ts->soc->thresh_grain;
1386 			seq_printf(s, "   %d: Up/Dn(%d /", level, v);
1387 
1388 			mask = ttgs[i]->thermctl_lvl0_dn_thresh_mask;
1389 			state = REG_GET_MASK(r, mask);
1390 			v = sign_extend32(state, ts->soc->bptt - 1);
1391 			v *= ts->soc->thresh_grain;
1392 			seq_printf(s, "%d ) ", v);
1393 
1394 			mask = THERMCTL_LVL0_CPU0_EN_MASK;
1395 			state = REG_GET_MASK(r, mask);
1396 			seq_printf(s, "En(%d) ", state);
1397 
1398 			mask = THERMCTL_LVL0_CPU0_CPU_THROT_MASK;
1399 			state = REG_GET_MASK(r, mask);
1400 			seq_puts(s, "CPU Throt");
1401 			if (!state)
1402 				seq_printf(s, "(%s) ", "none");
1403 			else if (state == THERMCTL_LVL0_CPU0_CPU_THROT_LIGHT)
1404 				seq_printf(s, "(%s) ", "L");
1405 			else if (state == THERMCTL_LVL0_CPU0_CPU_THROT_HEAVY)
1406 				seq_printf(s, "(%s) ", "H");
1407 			else
1408 				seq_printf(s, "(%s) ", "H+L");
1409 
1410 			mask = THERMCTL_LVL0_CPU0_GPU_THROT_MASK;
1411 			state = REG_GET_MASK(r, mask);
1412 			seq_puts(s, "GPU Throt");
1413 			if (!state)
1414 				seq_printf(s, "(%s) ", "none");
1415 			else if (state == THERMCTL_LVL0_CPU0_GPU_THROT_LIGHT)
1416 				seq_printf(s, "(%s) ", "L");
1417 			else if (state == THERMCTL_LVL0_CPU0_GPU_THROT_HEAVY)
1418 				seq_printf(s, "(%s) ", "H");
1419 			else
1420 				seq_printf(s, "(%s) ", "H+L");
1421 
1422 			mask = THERMCTL_LVL0_CPU0_STATUS_MASK;
1423 			state = REG_GET_MASK(r, mask);
1424 			seq_printf(s, "Status(%s)\n",
1425 				   state == 0 ? "LO" :
1426 				   state == 1 ? "In" :
1427 				   state == 2 ? "Res" : "HI");
1428 		}
1429 	}
1430 
1431 	r = readl(ts->regs + THERMCTL_STATS_CTL);
1432 	seq_printf(s, "STATS: Up(%s) Dn(%s)\n",
1433 		   r & STATS_CTL_EN_UP ? "En" : "--",
1434 		   r & STATS_CTL_EN_DN ? "En" : "--");
1435 
1436 	for (level = 0; level < 4; level++) {
1437 		u16 off;
1438 
1439 		off = THERMCTL_LVL0_UP_STATS;
1440 		r = readl(ts->regs + THERMCTL_LVL_REG(off, level));
1441 		seq_printf(s, "  Level_%d Up(%d) ", level, r);
1442 
1443 		off = THERMCTL_LVL0_DN_STATS;
1444 		r = readl(ts->regs + THERMCTL_LVL_REG(off, level));
1445 		seq_printf(s, "Dn(%d)\n", r);
1446 	}
1447 
1448 	r = readl(ts->regs + THERMCTL_THERMTRIP_CTL);
1449 	state = REG_GET_MASK(r, ttgs[0]->thermtrip_any_en_mask);
1450 	seq_printf(s, "Thermtrip Any En(%d)\n", state);
1451 	for (i = 0; i < ts->soc->num_ttgs; i++) {
1452 		state = REG_GET_MASK(r, ttgs[i]->thermtrip_enable_mask);
1453 		seq_printf(s, "     %s En(%d) ", ttgs[i]->name, state);
1454 		state = REG_GET_MASK(r, ttgs[i]->thermtrip_threshold_mask);
1455 		state *= ts->soc->thresh_grain;
1456 		seq_printf(s, "Thresh(%d)\n", state);
1457 	}
1458 
1459 	r = readl(ts->regs + THROT_GLOBAL_CFG);
1460 	seq_puts(s, "\n");
1461 	seq_printf(s, "GLOBAL THROTTLE CONFIG: 0x%08x\n", r);
1462 
1463 	seq_puts(s, "---------------------------------------------------\n");
1464 	r = readl(ts->regs + THROT_STATUS);
1465 	state = REG_GET_MASK(r, THROT_STATUS_BREACH_MASK);
1466 	seq_printf(s, "THROT STATUS: breach(%d) ", state);
1467 	state = REG_GET_MASK(r, THROT_STATUS_STATE_MASK);
1468 	seq_printf(s, "state(%d) ", state);
1469 	state = REG_GET_MASK(r, THROT_STATUS_ENABLED_MASK);
1470 	seq_printf(s, "enabled(%d)\n", state);
1471 
1472 	r = readl(ts->regs + CPU_PSKIP_STATUS);
1473 	if (ts->soc->use_ccroc) {
1474 		state = REG_GET_MASK(r, XPU_PSKIP_STATUS_ENABLED_MASK);
1475 		seq_printf(s, "CPU PSKIP STATUS: enabled(%d)\n", state);
1476 	} else {
1477 		state = REG_GET_MASK(r, XPU_PSKIP_STATUS_M_MASK);
1478 		seq_printf(s, "CPU PSKIP STATUS: M(%d) ", state);
1479 		state = REG_GET_MASK(r, XPU_PSKIP_STATUS_N_MASK);
1480 		seq_printf(s, "N(%d) ", state);
1481 		state = REG_GET_MASK(r, XPU_PSKIP_STATUS_ENABLED_MASK);
1482 		seq_printf(s, "enabled(%d)\n", state);
1483 	}
1484 
1485 	return 0;
1486 }
1487 
1488 DEFINE_SHOW_ATTRIBUTE(regs);
1489 
soctherm_debug_init(struct platform_device * pdev)1490 static void soctherm_debug_init(struct platform_device *pdev)
1491 {
1492 	struct tegra_soctherm *tegra = platform_get_drvdata(pdev);
1493 	struct dentry *root;
1494 
1495 	root = debugfs_create_dir("soctherm", NULL);
1496 
1497 	tegra->debugfs_dir = root;
1498 
1499 	debugfs_create_file("reg_contents", 0644, root, pdev, &regs_fops);
1500 }
1501 #else
soctherm_debug_init(struct platform_device * pdev)1502 static inline void soctherm_debug_init(struct platform_device *pdev) {}
1503 #endif
1504 
soctherm_clk_enable(struct platform_device * pdev,bool enable)1505 static int soctherm_clk_enable(struct platform_device *pdev, bool enable)
1506 {
1507 	struct tegra_soctherm *tegra = platform_get_drvdata(pdev);
1508 	int err;
1509 
1510 	if (!tegra->clock_soctherm || !tegra->clock_tsensor)
1511 		return -EINVAL;
1512 
1513 	reset_control_assert(tegra->reset);
1514 
1515 	if (enable) {
1516 		err = clk_prepare_enable(tegra->clock_soctherm);
1517 		if (err) {
1518 			reset_control_deassert(tegra->reset);
1519 			return err;
1520 		}
1521 
1522 		err = clk_prepare_enable(tegra->clock_tsensor);
1523 		if (err) {
1524 			clk_disable_unprepare(tegra->clock_soctherm);
1525 			reset_control_deassert(tegra->reset);
1526 			return err;
1527 		}
1528 	} else {
1529 		clk_disable_unprepare(tegra->clock_tsensor);
1530 		clk_disable_unprepare(tegra->clock_soctherm);
1531 	}
1532 
1533 	reset_control_deassert(tegra->reset);
1534 
1535 	return 0;
1536 }
1537 
throt_get_cdev_max_state(struct thermal_cooling_device * cdev,unsigned long * max_state)1538 static int throt_get_cdev_max_state(struct thermal_cooling_device *cdev,
1539 				    unsigned long *max_state)
1540 {
1541 	*max_state = 1;
1542 	return 0;
1543 }
1544 
throt_get_cdev_cur_state(struct thermal_cooling_device * cdev,unsigned long * cur_state)1545 static int throt_get_cdev_cur_state(struct thermal_cooling_device *cdev,
1546 				    unsigned long *cur_state)
1547 {
1548 	struct tegra_soctherm *ts = cdev->devdata;
1549 	u32 r;
1550 
1551 	r = readl(ts->regs + THROT_STATUS);
1552 	if (REG_GET_MASK(r, THROT_STATUS_STATE_MASK))
1553 		*cur_state = 1;
1554 	else
1555 		*cur_state = 0;
1556 
1557 	return 0;
1558 }
1559 
throt_set_cdev_state(struct thermal_cooling_device * cdev,unsigned long cur_state)1560 static int throt_set_cdev_state(struct thermal_cooling_device *cdev,
1561 				unsigned long cur_state)
1562 {
1563 	return 0;
1564 }
1565 
1566 static const struct thermal_cooling_device_ops throt_cooling_ops = {
1567 	.get_max_state = throt_get_cdev_max_state,
1568 	.get_cur_state = throt_get_cdev_cur_state,
1569 	.set_cur_state = throt_set_cdev_state,
1570 };
1571 
soctherm_thermtrips_parse(struct platform_device * pdev)1572 static int soctherm_thermtrips_parse(struct platform_device *pdev)
1573 {
1574 	struct device *dev = &pdev->dev;
1575 	struct tegra_soctherm *ts = dev_get_drvdata(dev);
1576 	struct tsensor_group_thermtrips *tt = ts->soc->thermtrips;
1577 	const int max_num_prop = ts->soc->num_ttgs * 2;
1578 	u32 *tlb;
1579 	int i, j, n, ret;
1580 
1581 	if (!tt)
1582 		return -ENOMEM;
1583 
1584 	n = of_property_count_u32_elems(dev->of_node, "nvidia,thermtrips");
1585 	if (n <= 0) {
1586 		dev_info(dev,
1587 			 "missing thermtrips, will use critical trips as shut down temp\n");
1588 		return n;
1589 	}
1590 
1591 	n = min(max_num_prop, n);
1592 
1593 	tlb = devm_kcalloc(&pdev->dev, max_num_prop, sizeof(u32), GFP_KERNEL);
1594 	if (!tlb)
1595 		return -ENOMEM;
1596 	ret = of_property_read_u32_array(dev->of_node, "nvidia,thermtrips",
1597 					 tlb, n);
1598 	if (ret) {
1599 		dev_err(dev, "invalid num ele: thermtrips:%d\n", ret);
1600 		return ret;
1601 	}
1602 
1603 	i = 0;
1604 	for (j = 0; j < n; j = j + 2) {
1605 		if (tlb[j] >= TEGRA124_SOCTHERM_SENSOR_NUM)
1606 			continue;
1607 
1608 		tt[i].id = tlb[j];
1609 		tt[i].temp = tlb[j + 1];
1610 		i++;
1611 	}
1612 
1613 	return 0;
1614 }
1615 
soctherm_oc_cfg_parse(struct device * dev,struct device_node * np_oc,struct soctherm_throt_cfg * stc)1616 static void soctherm_oc_cfg_parse(struct device *dev,
1617 				struct device_node *np_oc,
1618 				struct soctherm_throt_cfg *stc)
1619 {
1620 	u32 val;
1621 
1622 	if (of_property_read_bool(np_oc, "nvidia,polarity-active-low"))
1623 		stc->oc_cfg.active_low = 1;
1624 	else
1625 		stc->oc_cfg.active_low = 0;
1626 
1627 	if (!of_property_read_u32(np_oc, "nvidia,count-threshold", &val)) {
1628 		stc->oc_cfg.intr_en = 1;
1629 		stc->oc_cfg.alarm_cnt_thresh = val;
1630 	}
1631 
1632 	if (!of_property_read_u32(np_oc, "nvidia,throttle-period-us", &val))
1633 		stc->oc_cfg.throt_period = val;
1634 
1635 	if (!of_property_read_u32(np_oc, "nvidia,alarm-filter", &val))
1636 		stc->oc_cfg.alarm_filter = val;
1637 
1638 	/* BRIEF throttling by default, do not support STICKY */
1639 	stc->oc_cfg.mode = OC_THROTTLE_MODE_BRIEF;
1640 }
1641 
soctherm_throt_cfg_parse(struct device * dev,struct device_node * np,struct soctherm_throt_cfg * stc)1642 static int soctherm_throt_cfg_parse(struct device *dev,
1643 				    struct device_node *np,
1644 				    struct soctherm_throt_cfg *stc)
1645 {
1646 	struct tegra_soctherm *ts = dev_get_drvdata(dev);
1647 	int ret;
1648 	u32 val;
1649 
1650 	ret = of_property_read_u32(np, "nvidia,priority", &val);
1651 	if (ret) {
1652 		dev_err(dev, "throttle-cfg: %s: invalid priority\n", stc->name);
1653 		return -EINVAL;
1654 	}
1655 	stc->priority = val;
1656 
1657 	ret = of_property_read_u32(np, ts->soc->use_ccroc ?
1658 				   "nvidia,cpu-throt-level" :
1659 				   "nvidia,cpu-throt-percent", &val);
1660 	if (!ret) {
1661 		if (ts->soc->use_ccroc &&
1662 		    val <= TEGRA_SOCTHERM_THROT_LEVEL_HIGH)
1663 			stc->cpu_throt_level = val;
1664 		else if (!ts->soc->use_ccroc && val <= 100)
1665 			stc->cpu_throt_depth = val;
1666 		else
1667 			goto err;
1668 	} else {
1669 		goto err;
1670 	}
1671 
1672 	ret = of_property_read_u32(np, "nvidia,gpu-throt-level", &val);
1673 	if (!ret && val <= TEGRA_SOCTHERM_THROT_LEVEL_HIGH)
1674 		stc->gpu_throt_level = val;
1675 	else
1676 		goto err;
1677 
1678 	return 0;
1679 
1680 err:
1681 	dev_err(dev, "throttle-cfg: %s: no throt prop or invalid prop\n",
1682 		stc->name);
1683 	return -EINVAL;
1684 }
1685 
1686 /**
1687  * soctherm_init_hw_throt_cdev() - Parse the HW throttle configurations
1688  * and register them as cooling devices.
1689  * @pdev: Pointer to platform_device struct
1690  */
soctherm_init_hw_throt_cdev(struct platform_device * pdev)1691 static void soctherm_init_hw_throt_cdev(struct platform_device *pdev)
1692 {
1693 	struct device *dev = &pdev->dev;
1694 	struct tegra_soctherm *ts = dev_get_drvdata(dev);
1695 	struct device_node *np_stc, *np_stcc;
1696 	const char *name;
1697 	int i;
1698 
1699 	for (i = 0; i < THROTTLE_SIZE; i++) {
1700 		ts->throt_cfgs[i].name = throt_names[i];
1701 		ts->throt_cfgs[i].id = i;
1702 		ts->throt_cfgs[i].init = false;
1703 	}
1704 
1705 	np_stc = of_get_child_by_name(dev->of_node, "throttle-cfgs");
1706 	if (!np_stc) {
1707 		dev_info(dev,
1708 			 "throttle-cfg: no throttle-cfgs - not enabling\n");
1709 		return;
1710 	}
1711 
1712 	for_each_child_of_node(np_stc, np_stcc) {
1713 		struct soctherm_throt_cfg *stc;
1714 		struct thermal_cooling_device *tcd;
1715 		int err;
1716 
1717 		name = np_stcc->name;
1718 		stc = find_throttle_cfg_by_name(ts, name);
1719 		if (!stc) {
1720 			dev_err(dev,
1721 				"throttle-cfg: could not find %s\n", name);
1722 			continue;
1723 		}
1724 
1725 		if (stc->init) {
1726 			dev_err(dev, "throttle-cfg: %s: redefined!\n", name);
1727 			of_node_put(np_stcc);
1728 			break;
1729 		}
1730 
1731 		err = soctherm_throt_cfg_parse(dev, np_stcc, stc);
1732 		if (err)
1733 			continue;
1734 
1735 		if (stc->id >= THROTTLE_OC1) {
1736 			soctherm_oc_cfg_parse(dev, np_stcc, stc);
1737 			stc->init = true;
1738 		} else {
1739 
1740 			tcd = thermal_of_cooling_device_register(np_stcc,
1741 							 (char *)name, ts,
1742 							 &throt_cooling_ops);
1743 			if (IS_ERR_OR_NULL(tcd)) {
1744 				dev_err(dev,
1745 					"throttle-cfg: %s: failed to register cooling device\n",
1746 					name);
1747 				continue;
1748 			}
1749 			stc->cdev = tcd;
1750 			stc->init = true;
1751 		}
1752 
1753 	}
1754 
1755 	of_node_put(np_stc);
1756 }
1757 
1758 /**
1759  * throttlectl_cpu_level_cfg() - programs CCROC NV_THERM level config
1760  * @ts: pointer to a struct tegra_soctherm
1761  * @level: describing the level LOW/MED/HIGH of throttling
1762  *
1763  * It's necessary to set up the CPU-local CCROC NV_THERM instance with
1764  * the M/N values desired for each level. This function does this.
1765  *
1766  * This function pre-programs the CCROC NV_THERM levels in terms of
1767  * pre-configured "Low", "Medium" or "Heavy" throttle levels which are
1768  * mapped to THROT_LEVEL_LOW, THROT_LEVEL_MED and THROT_LEVEL_HVY.
1769  */
throttlectl_cpu_level_cfg(struct tegra_soctherm * ts,int level)1770 static void throttlectl_cpu_level_cfg(struct tegra_soctherm *ts, int level)
1771 {
1772 	u8 depth, dividend;
1773 	u32 r;
1774 
1775 	switch (level) {
1776 	case TEGRA_SOCTHERM_THROT_LEVEL_LOW:
1777 		depth = 50;
1778 		break;
1779 	case TEGRA_SOCTHERM_THROT_LEVEL_MED:
1780 		depth = 75;
1781 		break;
1782 	case TEGRA_SOCTHERM_THROT_LEVEL_HIGH:
1783 		depth = 80;
1784 		break;
1785 	case TEGRA_SOCTHERM_THROT_LEVEL_NONE:
1786 		return;
1787 	default:
1788 		return;
1789 	}
1790 
1791 	dividend = THROT_DEPTH_DIVIDEND(depth);
1792 
1793 	/* setup PSKIP in ccroc nv_therm registers */
1794 	r = ccroc_readl(ts, CCROC_THROT_PSKIP_RAMP_CPU_REG(level));
1795 	r = REG_SET_MASK(r, CCROC_THROT_PSKIP_RAMP_DURATION_MASK, 0xff);
1796 	r = REG_SET_MASK(r, CCROC_THROT_PSKIP_RAMP_STEP_MASK, 0xf);
1797 	ccroc_writel(ts, r, CCROC_THROT_PSKIP_RAMP_CPU_REG(level));
1798 
1799 	r = ccroc_readl(ts, CCROC_THROT_PSKIP_CTRL_CPU_REG(level));
1800 	r = REG_SET_MASK(r, CCROC_THROT_PSKIP_CTRL_ENB_MASK, 1);
1801 	r = REG_SET_MASK(r, CCROC_THROT_PSKIP_CTRL_DIVIDEND_MASK, dividend);
1802 	r = REG_SET_MASK(r, CCROC_THROT_PSKIP_CTRL_DIVISOR_MASK, 0xff);
1803 	ccroc_writel(ts, r, CCROC_THROT_PSKIP_CTRL_CPU_REG(level));
1804 }
1805 
1806 /**
1807  * throttlectl_cpu_level_select() - program CPU pulse skipper config
1808  * @ts: pointer to a struct tegra_soctherm
1809  * @throt: the LIGHT/HEAVY of throttle event id
1810  *
1811  * Pulse skippers are used to throttle clock frequencies.  This
1812  * function programs the pulse skippers based on @throt and platform
1813  * data.  This function is used on SoCs which have CPU-local pulse
1814  * skipper control, such as T13x. It programs soctherm's interface to
1815  * Denver:CCROC NV_THERM in terms of Low, Medium and HIGH throttling
1816  * vectors. PSKIP_BYPASS mode is set as required per HW spec.
1817  */
throttlectl_cpu_level_select(struct tegra_soctherm * ts,enum soctherm_throttle_id throt)1818 static void throttlectl_cpu_level_select(struct tegra_soctherm *ts,
1819 					 enum soctherm_throttle_id throt)
1820 {
1821 	u32 r, throt_vect;
1822 
1823 	/* Denver:CCROC NV_THERM interface N:3 Mapping */
1824 	switch (ts->throt_cfgs[throt].cpu_throt_level) {
1825 	case TEGRA_SOCTHERM_THROT_LEVEL_LOW:
1826 		throt_vect = THROT_VECT_LOW;
1827 		break;
1828 	case TEGRA_SOCTHERM_THROT_LEVEL_MED:
1829 		throt_vect = THROT_VECT_MED;
1830 		break;
1831 	case TEGRA_SOCTHERM_THROT_LEVEL_HIGH:
1832 		throt_vect = THROT_VECT_HIGH;
1833 		break;
1834 	default:
1835 		throt_vect = THROT_VECT_NONE;
1836 		break;
1837 	}
1838 
1839 	r = readl(ts->regs + THROT_PSKIP_CTRL(throt, THROTTLE_DEV_CPU));
1840 	r = REG_SET_MASK(r, THROT_PSKIP_CTRL_ENABLE_MASK, 1);
1841 	r = REG_SET_MASK(r, THROT_PSKIP_CTRL_VECT_CPU_MASK, throt_vect);
1842 	r = REG_SET_MASK(r, THROT_PSKIP_CTRL_VECT2_CPU_MASK, throt_vect);
1843 	writel(r, ts->regs + THROT_PSKIP_CTRL(throt, THROTTLE_DEV_CPU));
1844 
1845 	/* bypass sequencer in soc_therm as it is programmed in ccroc */
1846 	r = REG_SET_MASK(0, THROT_PSKIP_RAMP_SEQ_BYPASS_MODE_MASK, 1);
1847 	writel(r, ts->regs + THROT_PSKIP_RAMP(throt, THROTTLE_DEV_CPU));
1848 }
1849 
1850 /**
1851  * throttlectl_cpu_mn() - program CPU pulse skipper configuration
1852  * @ts: pointer to a struct tegra_soctherm
1853  * @throt: the LIGHT/HEAVY of throttle event id
1854  *
1855  * Pulse skippers are used to throttle clock frequencies.  This
1856  * function programs the pulse skippers based on @throt and platform
1857  * data.  This function is used for CPUs that have "remote" pulse
1858  * skipper control, e.g., the CPU pulse skipper is controlled by the
1859  * SOC_THERM IP block.  (SOC_THERM is located outside the CPU
1860  * complex.)
1861  */
throttlectl_cpu_mn(struct tegra_soctherm * ts,enum soctherm_throttle_id throt)1862 static void throttlectl_cpu_mn(struct tegra_soctherm *ts,
1863 			       enum soctherm_throttle_id throt)
1864 {
1865 	u32 r;
1866 	int depth;
1867 	u8 dividend;
1868 
1869 	depth = ts->throt_cfgs[throt].cpu_throt_depth;
1870 	dividend = THROT_DEPTH_DIVIDEND(depth);
1871 
1872 	r = readl(ts->regs + THROT_PSKIP_CTRL(throt, THROTTLE_DEV_CPU));
1873 	r = REG_SET_MASK(r, THROT_PSKIP_CTRL_ENABLE_MASK, 1);
1874 	r = REG_SET_MASK(r, THROT_PSKIP_CTRL_DIVIDEND_MASK, dividend);
1875 	r = REG_SET_MASK(r, THROT_PSKIP_CTRL_DIVISOR_MASK, 0xff);
1876 	writel(r, ts->regs + THROT_PSKIP_CTRL(throt, THROTTLE_DEV_CPU));
1877 
1878 	r = readl(ts->regs + THROT_PSKIP_RAMP(throt, THROTTLE_DEV_CPU));
1879 	r = REG_SET_MASK(r, THROT_PSKIP_RAMP_DURATION_MASK, 0xff);
1880 	r = REG_SET_MASK(r, THROT_PSKIP_RAMP_STEP_MASK, 0xf);
1881 	writel(r, ts->regs + THROT_PSKIP_RAMP(throt, THROTTLE_DEV_CPU));
1882 }
1883 
1884 /**
1885  * throttlectl_gpu_level_select() - selects throttling level for GPU
1886  * @ts: pointer to a struct tegra_soctherm
1887  * @throt: the LIGHT/HEAVY of throttle event id
1888  *
1889  * This function programs soctherm's interface to GK20a NV_THERM to select
1890  * pre-configured "Low", "Medium" or "Heavy" throttle levels.
1891  *
1892  * Return: boolean true if HW was programmed
1893  */
throttlectl_gpu_level_select(struct tegra_soctherm * ts,enum soctherm_throttle_id throt)1894 static void throttlectl_gpu_level_select(struct tegra_soctherm *ts,
1895 					 enum soctherm_throttle_id throt)
1896 {
1897 	u32 r, level, throt_vect;
1898 
1899 	level = ts->throt_cfgs[throt].gpu_throt_level;
1900 	throt_vect = THROT_LEVEL_TO_DEPTH(level);
1901 	r = readl(ts->regs + THROT_PSKIP_CTRL(throt, THROTTLE_DEV_GPU));
1902 	r = REG_SET_MASK(r, THROT_PSKIP_CTRL_ENABLE_MASK, 1);
1903 	r = REG_SET_MASK(r, THROT_PSKIP_CTRL_VECT_GPU_MASK, throt_vect);
1904 	writel(r, ts->regs + THROT_PSKIP_CTRL(throt, THROTTLE_DEV_GPU));
1905 }
1906 
soctherm_oc_cfg_program(struct tegra_soctherm * ts,enum soctherm_throttle_id throt)1907 static int soctherm_oc_cfg_program(struct tegra_soctherm *ts,
1908 				      enum soctherm_throttle_id throt)
1909 {
1910 	u32 r;
1911 	struct soctherm_oc_cfg *oc = &ts->throt_cfgs[throt].oc_cfg;
1912 
1913 	if (oc->mode == OC_THROTTLE_MODE_DISABLED)
1914 		return -EINVAL;
1915 
1916 	r = REG_SET_MASK(0, OC1_CFG_HW_RESTORE_MASK, 1);
1917 	r = REG_SET_MASK(r, OC1_CFG_THROTTLE_MODE_MASK, oc->mode);
1918 	r = REG_SET_MASK(r, OC1_CFG_ALARM_POLARITY_MASK, oc->active_low);
1919 	r = REG_SET_MASK(r, OC1_CFG_EN_THROTTLE_MASK, 1);
1920 	writel(r, ts->regs + ALARM_CFG(throt));
1921 	writel(oc->throt_period, ts->regs + ALARM_THROTTLE_PERIOD(throt));
1922 	writel(oc->alarm_cnt_thresh, ts->regs + ALARM_CNT_THRESHOLD(throt));
1923 	writel(oc->alarm_filter, ts->regs + ALARM_FILTER(throt));
1924 	soctherm_oc_intr_enable(ts, throt, oc->intr_en);
1925 
1926 	return 0;
1927 }
1928 
1929 /**
1930  * soctherm_throttle_program() - programs pulse skippers' configuration
1931  * @ts: pointer to a struct tegra_soctherm
1932  * @throt: the LIGHT/HEAVY of the throttle event id.
1933  *
1934  * Pulse skippers are used to throttle clock frequencies.
1935  * This function programs the pulse skippers.
1936  */
soctherm_throttle_program(struct tegra_soctherm * ts,enum soctherm_throttle_id throt)1937 static void soctherm_throttle_program(struct tegra_soctherm *ts,
1938 				      enum soctherm_throttle_id throt)
1939 {
1940 	u32 r;
1941 	struct soctherm_throt_cfg stc = ts->throt_cfgs[throt];
1942 
1943 	if (!stc.init)
1944 		return;
1945 
1946 	if ((throt >= THROTTLE_OC1) && (soctherm_oc_cfg_program(ts, throt)))
1947 		return;
1948 
1949 	/* Setup PSKIP parameters */
1950 	if (ts->soc->use_ccroc)
1951 		throttlectl_cpu_level_select(ts, throt);
1952 	else
1953 		throttlectl_cpu_mn(ts, throt);
1954 
1955 	throttlectl_gpu_level_select(ts, throt);
1956 
1957 	r = REG_SET_MASK(0, THROT_PRIORITY_LITE_PRIO_MASK, stc.priority);
1958 	writel(r, ts->regs + THROT_PRIORITY_CTRL(throt));
1959 
1960 	r = REG_SET_MASK(0, THROT_DELAY_LITE_DELAY_MASK, 0);
1961 	writel(r, ts->regs + THROT_DELAY_CTRL(throt));
1962 
1963 	r = readl(ts->regs + THROT_PRIORITY_LOCK);
1964 	r = REG_GET_MASK(r, THROT_PRIORITY_LOCK_PRIORITY_MASK);
1965 	if (r >= stc.priority)
1966 		return;
1967 	r = REG_SET_MASK(0, THROT_PRIORITY_LOCK_PRIORITY_MASK,
1968 			 stc.priority);
1969 	writel(r, ts->regs + THROT_PRIORITY_LOCK);
1970 }
1971 
tegra_soctherm_throttle(struct device * dev)1972 static void tegra_soctherm_throttle(struct device *dev)
1973 {
1974 	struct tegra_soctherm *ts = dev_get_drvdata(dev);
1975 	u32 v;
1976 	int i;
1977 
1978 	/* configure LOW, MED and HIGH levels for CCROC NV_THERM */
1979 	if (ts->soc->use_ccroc) {
1980 		throttlectl_cpu_level_cfg(ts, TEGRA_SOCTHERM_THROT_LEVEL_LOW);
1981 		throttlectl_cpu_level_cfg(ts, TEGRA_SOCTHERM_THROT_LEVEL_MED);
1982 		throttlectl_cpu_level_cfg(ts, TEGRA_SOCTHERM_THROT_LEVEL_HIGH);
1983 	}
1984 
1985 	/* Thermal HW throttle programming */
1986 	for (i = 0; i < THROTTLE_SIZE; i++)
1987 		soctherm_throttle_program(ts, i);
1988 
1989 	v = REG_SET_MASK(0, THROT_GLOBAL_ENB_MASK, 1);
1990 	if (ts->soc->use_ccroc) {
1991 		ccroc_writel(ts, v, CCROC_GLOBAL_CFG);
1992 
1993 		v = ccroc_readl(ts, CCROC_SUPER_CCLKG_DIVIDER);
1994 		v = REG_SET_MASK(v, CDIVG_USE_THERM_CONTROLS_MASK, 1);
1995 		ccroc_writel(ts, v, CCROC_SUPER_CCLKG_DIVIDER);
1996 	} else {
1997 		writel(v, ts->regs + THROT_GLOBAL_CFG);
1998 
1999 		v = readl(ts->clk_regs + CAR_SUPER_CCLKG_DIVIDER);
2000 		v = REG_SET_MASK(v, CDIVG_USE_THERM_CONTROLS_MASK, 1);
2001 		writel(v, ts->clk_regs + CAR_SUPER_CCLKG_DIVIDER);
2002 	}
2003 
2004 	/* initialize stats collection */
2005 	v = STATS_CTL_CLR_DN | STATS_CTL_EN_DN |
2006 	    STATS_CTL_CLR_UP | STATS_CTL_EN_UP;
2007 	writel(v, ts->regs + THERMCTL_STATS_CTL);
2008 }
2009 
soctherm_interrupts_init(struct platform_device * pdev,struct tegra_soctherm * tegra)2010 static int soctherm_interrupts_init(struct platform_device *pdev,
2011 				    struct tegra_soctherm *tegra)
2012 {
2013 	struct device_node *np = pdev->dev.of_node;
2014 	int ret;
2015 
2016 	ret = soctherm_oc_int_init(np, TEGRA_SOC_OC_IRQ_MAX);
2017 	if (ret < 0) {
2018 		dev_err(&pdev->dev, "soctherm_oc_int_init failed\n");
2019 		return ret;
2020 	}
2021 
2022 	tegra->thermal_irq = platform_get_irq(pdev, 0);
2023 	if (tegra->thermal_irq < 0) {
2024 		dev_dbg(&pdev->dev, "get 'thermal_irq' failed.\n");
2025 		return 0;
2026 	}
2027 
2028 	tegra->edp_irq = platform_get_irq(pdev, 1);
2029 	if (tegra->edp_irq < 0) {
2030 		dev_dbg(&pdev->dev, "get 'edp_irq' failed.\n");
2031 		return 0;
2032 	}
2033 
2034 	ret = devm_request_threaded_irq(&pdev->dev,
2035 					tegra->thermal_irq,
2036 					soctherm_thermal_isr,
2037 					soctherm_thermal_isr_thread,
2038 					IRQF_ONESHOT,
2039 					dev_name(&pdev->dev),
2040 					tegra);
2041 	if (ret < 0) {
2042 		dev_err(&pdev->dev, "request_irq 'thermal_irq' failed.\n");
2043 		return ret;
2044 	}
2045 
2046 	ret = devm_request_threaded_irq(&pdev->dev,
2047 					tegra->edp_irq,
2048 					soctherm_edp_isr,
2049 					soctherm_edp_isr_thread,
2050 					IRQF_ONESHOT,
2051 					"soctherm_edp",
2052 					tegra);
2053 	if (ret < 0) {
2054 		dev_err(&pdev->dev, "request_irq 'edp_irq' failed.\n");
2055 		return ret;
2056 	}
2057 
2058 	return 0;
2059 }
2060 
soctherm_init(struct platform_device * pdev)2061 static void soctherm_init(struct platform_device *pdev)
2062 {
2063 	struct tegra_soctherm *tegra = platform_get_drvdata(pdev);
2064 	const struct tegra_tsensor_group **ttgs = tegra->soc->ttgs;
2065 	int i;
2066 	u32 pdiv, hotspot;
2067 
2068 	/* Initialize raw sensors */
2069 	for (i = 0; i < tegra->soc->num_tsensors; ++i)
2070 		enable_tsensor(tegra, i);
2071 
2072 	/* program pdiv and hotspot offsets per THERM */
2073 	pdiv = readl(tegra->regs + SENSOR_PDIV);
2074 	hotspot = readl(tegra->regs + SENSOR_HOTSPOT_OFF);
2075 	for (i = 0; i < tegra->soc->num_ttgs; ++i) {
2076 		pdiv = REG_SET_MASK(pdiv, ttgs[i]->pdiv_mask,
2077 				    ttgs[i]->pdiv);
2078 		/* hotspot offset from PLLX, doesn't need to configure PLLX */
2079 		if (ttgs[i]->id == TEGRA124_SOCTHERM_SENSOR_PLLX)
2080 			continue;
2081 		hotspot =  REG_SET_MASK(hotspot,
2082 					ttgs[i]->pllx_hotspot_mask,
2083 					ttgs[i]->pllx_hotspot_diff);
2084 	}
2085 	writel(pdiv, tegra->regs + SENSOR_PDIV);
2086 	writel(hotspot, tegra->regs + SENSOR_HOTSPOT_OFF);
2087 
2088 	/* Configure hw throttle */
2089 	tegra_soctherm_throttle(&pdev->dev);
2090 }
2091 
2092 static const struct of_device_id tegra_soctherm_of_match[] = {
2093 #ifdef CONFIG_ARCH_TEGRA_124_SOC
2094 	{
2095 		.compatible = "nvidia,tegra124-soctherm",
2096 		.data = &tegra124_soctherm,
2097 	},
2098 #endif
2099 #ifdef CONFIG_ARCH_TEGRA_132_SOC
2100 	{
2101 		.compatible = "nvidia,tegra132-soctherm",
2102 		.data = &tegra132_soctherm,
2103 	},
2104 #endif
2105 #ifdef CONFIG_ARCH_TEGRA_210_SOC
2106 	{
2107 		.compatible = "nvidia,tegra210-soctherm",
2108 		.data = &tegra210_soctherm,
2109 	},
2110 #endif
2111 	{ },
2112 };
2113 MODULE_DEVICE_TABLE(of, tegra_soctherm_of_match);
2114 
tegra_soctherm_probe(struct platform_device * pdev)2115 static int tegra_soctherm_probe(struct platform_device *pdev)
2116 {
2117 	const struct of_device_id *match;
2118 	struct tegra_soctherm *tegra;
2119 	struct thermal_zone_device *z;
2120 	struct tsensor_shared_calib shared_calib;
2121 	struct resource *res;
2122 	struct tegra_soctherm_soc *soc;
2123 	unsigned int i;
2124 	int err;
2125 
2126 	match = of_match_node(tegra_soctherm_of_match, pdev->dev.of_node);
2127 	if (!match)
2128 		return -ENODEV;
2129 
2130 	soc = (struct tegra_soctherm_soc *)match->data;
2131 	if (soc->num_ttgs > TEGRA124_SOCTHERM_SENSOR_NUM)
2132 		return -EINVAL;
2133 
2134 	tegra = devm_kzalloc(&pdev->dev, sizeof(*tegra), GFP_KERNEL);
2135 	if (!tegra)
2136 		return -ENOMEM;
2137 
2138 	mutex_init(&tegra->thermctl_lock);
2139 	dev_set_drvdata(&pdev->dev, tegra);
2140 
2141 	tegra->soc = soc;
2142 
2143 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2144 					   "soctherm-reg");
2145 	tegra->regs = devm_ioremap_resource(&pdev->dev, res);
2146 	if (IS_ERR(tegra->regs)) {
2147 		dev_err(&pdev->dev, "can't get soctherm registers");
2148 		return PTR_ERR(tegra->regs);
2149 	}
2150 
2151 	if (!tegra->soc->use_ccroc) {
2152 		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2153 						   "car-reg");
2154 		tegra->clk_regs = devm_ioremap_resource(&pdev->dev, res);
2155 		if (IS_ERR(tegra->clk_regs)) {
2156 			dev_err(&pdev->dev, "can't get car clk registers");
2157 			return PTR_ERR(tegra->clk_regs);
2158 		}
2159 	} else {
2160 		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2161 						   "ccroc-reg");
2162 		tegra->ccroc_regs = devm_ioremap_resource(&pdev->dev, res);
2163 		if (IS_ERR(tegra->ccroc_regs)) {
2164 			dev_err(&pdev->dev, "can't get ccroc registers");
2165 			return PTR_ERR(tegra->ccroc_regs);
2166 		}
2167 	}
2168 
2169 	tegra->reset = devm_reset_control_get(&pdev->dev, "soctherm");
2170 	if (IS_ERR(tegra->reset)) {
2171 		dev_err(&pdev->dev, "can't get soctherm reset\n");
2172 		return PTR_ERR(tegra->reset);
2173 	}
2174 
2175 	tegra->clock_tsensor = devm_clk_get(&pdev->dev, "tsensor");
2176 	if (IS_ERR(tegra->clock_tsensor)) {
2177 		dev_err(&pdev->dev, "can't get tsensor clock\n");
2178 		return PTR_ERR(tegra->clock_tsensor);
2179 	}
2180 
2181 	tegra->clock_soctherm = devm_clk_get(&pdev->dev, "soctherm");
2182 	if (IS_ERR(tegra->clock_soctherm)) {
2183 		dev_err(&pdev->dev, "can't get soctherm clock\n");
2184 		return PTR_ERR(tegra->clock_soctherm);
2185 	}
2186 
2187 	tegra->calib = devm_kcalloc(&pdev->dev,
2188 				    soc->num_tsensors, sizeof(u32),
2189 				    GFP_KERNEL);
2190 	if (!tegra->calib)
2191 		return -ENOMEM;
2192 
2193 	/* calculate shared calibration data */
2194 	err = tegra_calc_shared_calib(soc->tfuse, &shared_calib);
2195 	if (err)
2196 		return err;
2197 
2198 	/* calculate tsensor calibaration data */
2199 	for (i = 0; i < soc->num_tsensors; ++i) {
2200 		err = tegra_calc_tsensor_calib(&soc->tsensors[i],
2201 					       &shared_calib,
2202 					       &tegra->calib[i]);
2203 		if (err)
2204 			return err;
2205 	}
2206 
2207 	tegra->thermctl_tzs = devm_kcalloc(&pdev->dev,
2208 					   soc->num_ttgs, sizeof(z),
2209 					   GFP_KERNEL);
2210 	if (!tegra->thermctl_tzs)
2211 		return -ENOMEM;
2212 
2213 	err = soctherm_clk_enable(pdev, true);
2214 	if (err)
2215 		return err;
2216 
2217 	soctherm_thermtrips_parse(pdev);
2218 
2219 	soctherm_init_hw_throt_cdev(pdev);
2220 
2221 	soctherm_init(pdev);
2222 
2223 	for (i = 0; i < soc->num_ttgs; ++i) {
2224 		struct tegra_thermctl_zone *zone =
2225 			devm_kzalloc(&pdev->dev, sizeof(*zone), GFP_KERNEL);
2226 		if (!zone) {
2227 			err = -ENOMEM;
2228 			goto disable_clocks;
2229 		}
2230 
2231 		zone->reg = tegra->regs + soc->ttgs[i]->sensor_temp_offset;
2232 		zone->dev = &pdev->dev;
2233 		zone->sg = soc->ttgs[i];
2234 		zone->ts = tegra;
2235 
2236 		z = devm_thermal_zone_of_sensor_register(&pdev->dev,
2237 							 soc->ttgs[i]->id, zone,
2238 							 &tegra_of_thermal_ops);
2239 		if (IS_ERR(z)) {
2240 			err = PTR_ERR(z);
2241 			dev_err(&pdev->dev, "failed to register sensor: %d\n",
2242 				err);
2243 			goto disable_clocks;
2244 		}
2245 
2246 		zone->tz = z;
2247 		tegra->thermctl_tzs[soc->ttgs[i]->id] = z;
2248 
2249 		/* Configure hw trip points */
2250 		err = tegra_soctherm_set_hwtrips(&pdev->dev, soc->ttgs[i], z);
2251 		if (err)
2252 			goto disable_clocks;
2253 	}
2254 
2255 	err = soctherm_interrupts_init(pdev, tegra);
2256 
2257 	soctherm_debug_init(pdev);
2258 
2259 	return 0;
2260 
2261 disable_clocks:
2262 	soctherm_clk_enable(pdev, false);
2263 
2264 	return err;
2265 }
2266 
tegra_soctherm_remove(struct platform_device * pdev)2267 static int tegra_soctherm_remove(struct platform_device *pdev)
2268 {
2269 	struct tegra_soctherm *tegra = platform_get_drvdata(pdev);
2270 
2271 	debugfs_remove_recursive(tegra->debugfs_dir);
2272 
2273 	soctherm_clk_enable(pdev, false);
2274 
2275 	return 0;
2276 }
2277 
soctherm_suspend(struct device * dev)2278 static int __maybe_unused soctherm_suspend(struct device *dev)
2279 {
2280 	struct platform_device *pdev = to_platform_device(dev);
2281 
2282 	soctherm_clk_enable(pdev, false);
2283 
2284 	return 0;
2285 }
2286 
soctherm_resume(struct device * dev)2287 static int __maybe_unused soctherm_resume(struct device *dev)
2288 {
2289 	struct platform_device *pdev = to_platform_device(dev);
2290 	struct tegra_soctherm *tegra = platform_get_drvdata(pdev);
2291 	struct tegra_soctherm_soc *soc = tegra->soc;
2292 	int err, i;
2293 
2294 	err = soctherm_clk_enable(pdev, true);
2295 	if (err) {
2296 		dev_err(&pdev->dev,
2297 			"Resume failed: enable clocks failed\n");
2298 		return err;
2299 	}
2300 
2301 	soctherm_init(pdev);
2302 
2303 	for (i = 0; i < soc->num_ttgs; ++i) {
2304 		struct thermal_zone_device *tz;
2305 
2306 		tz = tegra->thermctl_tzs[soc->ttgs[i]->id];
2307 		err = tegra_soctherm_set_hwtrips(dev, soc->ttgs[i], tz);
2308 		if (err) {
2309 			dev_err(&pdev->dev,
2310 				"Resume failed: set hwtrips failed\n");
2311 			return err;
2312 		}
2313 	}
2314 
2315 	return 0;
2316 }
2317 
2318 static SIMPLE_DEV_PM_OPS(tegra_soctherm_pm, soctherm_suspend, soctherm_resume);
2319 
2320 static struct platform_driver tegra_soctherm_driver = {
2321 	.probe = tegra_soctherm_probe,
2322 	.remove = tegra_soctherm_remove,
2323 	.driver = {
2324 		.name = "tegra_soctherm",
2325 		.pm = &tegra_soctherm_pm,
2326 		.of_match_table = tegra_soctherm_of_match,
2327 	},
2328 };
2329 module_platform_driver(tegra_soctherm_driver);
2330 
2331 MODULE_AUTHOR("Mikko Perttunen <mperttunen@nvidia.com>");
2332 MODULE_DESCRIPTION("NVIDIA Tegra SOCTHERM thermal management driver");
2333 MODULE_LICENSE("GPL v2");
2334