1 /*
2 * exynos_tmu.c - Samsung EXYNOS TMU (Thermal Management Unit)
3 *
4 * Copyright (C) 2014 Samsung Electronics
5 * Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
6 * Lukasz Majewski <l.majewski@samsung.com>
7 *
8 * Copyright (C) 2011 Samsung Electronics
9 * Donggeun Kim <dg77.kim@samsung.com>
10 * Amit Daniel Kachhap <amit.kachhap@linaro.org>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 *
26 */
27
28 #include <linux/clk.h>
29 #include <linux/io.h>
30 #include <linux/interrupt.h>
31 #include <linux/module.h>
32 #include <linux/of.h>
33 #include <linux/of_address.h>
34 #include <linux/of_irq.h>
35 #include <linux/platform_device.h>
36 #include <linux/regulator/consumer.h>
37
38 #include "exynos_tmu.h"
39 #include "../thermal_core.h"
40
41 /* Exynos generic registers */
42 #define EXYNOS_TMU_REG_TRIMINFO 0x0
43 #define EXYNOS_TMU_REG_CONTROL 0x20
44 #define EXYNOS_TMU_REG_STATUS 0x28
45 #define EXYNOS_TMU_REG_CURRENT_TEMP 0x40
46 #define EXYNOS_TMU_REG_INTEN 0x70
47 #define EXYNOS_TMU_REG_INTSTAT 0x74
48 #define EXYNOS_TMU_REG_INTCLEAR 0x78
49
50 #define EXYNOS_TMU_TEMP_MASK 0xff
51 #define EXYNOS_TMU_REF_VOLTAGE_SHIFT 24
52 #define EXYNOS_TMU_REF_VOLTAGE_MASK 0x1f
53 #define EXYNOS_TMU_BUF_SLOPE_SEL_MASK 0xf
54 #define EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT 8
55 #define EXYNOS_TMU_CORE_EN_SHIFT 0
56
57 /* Exynos3250 specific registers */
58 #define EXYNOS_TMU_TRIMINFO_CON1 0x10
59
60 /* Exynos4210 specific registers */
61 #define EXYNOS4210_TMU_REG_THRESHOLD_TEMP 0x44
62 #define EXYNOS4210_TMU_REG_TRIG_LEVEL0 0x50
63
64 /* Exynos5250, Exynos4412, Exynos3250 specific registers */
65 #define EXYNOS_TMU_TRIMINFO_CON2 0x14
66 #define EXYNOS_THD_TEMP_RISE 0x50
67 #define EXYNOS_THD_TEMP_FALL 0x54
68 #define EXYNOS_EMUL_CON 0x80
69
70 #define EXYNOS_TRIMINFO_RELOAD_ENABLE 1
71 #define EXYNOS_TRIMINFO_25_SHIFT 0
72 #define EXYNOS_TRIMINFO_85_SHIFT 8
73 #define EXYNOS_TMU_TRIP_MODE_SHIFT 13
74 #define EXYNOS_TMU_TRIP_MODE_MASK 0x7
75 #define EXYNOS_TMU_THERM_TRIP_EN_SHIFT 12
76
77 #define EXYNOS_TMU_INTEN_RISE0_SHIFT 0
78 #define EXYNOS_TMU_INTEN_RISE1_SHIFT 4
79 #define EXYNOS_TMU_INTEN_RISE2_SHIFT 8
80 #define EXYNOS_TMU_INTEN_RISE3_SHIFT 12
81 #define EXYNOS_TMU_INTEN_FALL0_SHIFT 16
82
83 #define EXYNOS_EMUL_TIME 0x57F0
84 #define EXYNOS_EMUL_TIME_MASK 0xffff
85 #define EXYNOS_EMUL_TIME_SHIFT 16
86 #define EXYNOS_EMUL_DATA_SHIFT 8
87 #define EXYNOS_EMUL_DATA_MASK 0xFF
88 #define EXYNOS_EMUL_ENABLE 0x1
89
90 /* Exynos5260 specific */
91 #define EXYNOS5260_TMU_REG_INTEN 0xC0
92 #define EXYNOS5260_TMU_REG_INTSTAT 0xC4
93 #define EXYNOS5260_TMU_REG_INTCLEAR 0xC8
94 #define EXYNOS5260_EMUL_CON 0x100
95
96 /* Exynos4412 specific */
97 #define EXYNOS4412_MUX_ADDR_VALUE 6
98 #define EXYNOS4412_MUX_ADDR_SHIFT 20
99
100 /* Exynos5433 specific registers */
101 #define EXYNOS5433_TMU_REG_CONTROL1 0x024
102 #define EXYNOS5433_TMU_SAMPLING_INTERVAL 0x02c
103 #define EXYNOS5433_TMU_COUNTER_VALUE0 0x030
104 #define EXYNOS5433_TMU_COUNTER_VALUE1 0x034
105 #define EXYNOS5433_TMU_REG_CURRENT_TEMP1 0x044
106 #define EXYNOS5433_THD_TEMP_RISE3_0 0x050
107 #define EXYNOS5433_THD_TEMP_RISE7_4 0x054
108 #define EXYNOS5433_THD_TEMP_FALL3_0 0x060
109 #define EXYNOS5433_THD_TEMP_FALL7_4 0x064
110 #define EXYNOS5433_TMU_REG_INTEN 0x0c0
111 #define EXYNOS5433_TMU_REG_INTPEND 0x0c8
112 #define EXYNOS5433_TMU_EMUL_CON 0x110
113 #define EXYNOS5433_TMU_PD_DET_EN 0x130
114
115 #define EXYNOS5433_TRIMINFO_SENSOR_ID_SHIFT 16
116 #define EXYNOS5433_TRIMINFO_CALIB_SEL_SHIFT 23
117 #define EXYNOS5433_TRIMINFO_SENSOR_ID_MASK \
118 (0xf << EXYNOS5433_TRIMINFO_SENSOR_ID_SHIFT)
119 #define EXYNOS5433_TRIMINFO_CALIB_SEL_MASK BIT(23)
120
121 #define EXYNOS5433_TRIMINFO_ONE_POINT_TRIMMING 0
122 #define EXYNOS5433_TRIMINFO_TWO_POINT_TRIMMING 1
123
124 #define EXYNOS5433_PD_DET_EN 1
125
126 /*exynos5440 specific registers*/
127 #define EXYNOS5440_TMU_S0_7_TRIM 0x000
128 #define EXYNOS5440_TMU_S0_7_CTRL 0x020
129 #define EXYNOS5440_TMU_S0_7_DEBUG 0x040
130 #define EXYNOS5440_TMU_S0_7_TEMP 0x0f0
131 #define EXYNOS5440_TMU_S0_7_TH0 0x110
132 #define EXYNOS5440_TMU_S0_7_TH1 0x130
133 #define EXYNOS5440_TMU_S0_7_TH2 0x150
134 #define EXYNOS5440_TMU_S0_7_IRQEN 0x210
135 #define EXYNOS5440_TMU_S0_7_IRQ 0x230
136 /* exynos5440 common registers */
137 #define EXYNOS5440_TMU_IRQ_STATUS 0x000
138 #define EXYNOS5440_TMU_PMIN 0x004
139
140 #define EXYNOS5440_TMU_INTEN_RISE0_SHIFT 0
141 #define EXYNOS5440_TMU_INTEN_RISE1_SHIFT 1
142 #define EXYNOS5440_TMU_INTEN_RISE2_SHIFT 2
143 #define EXYNOS5440_TMU_INTEN_RISE3_SHIFT 3
144 #define EXYNOS5440_TMU_INTEN_FALL0_SHIFT 4
145 #define EXYNOS5440_TMU_TH_RISE4_SHIFT 24
146 #define EXYNOS5440_EFUSE_SWAP_OFFSET 8
147
148 /* Exynos7 specific registers */
149 #define EXYNOS7_THD_TEMP_RISE7_6 0x50
150 #define EXYNOS7_THD_TEMP_FALL7_6 0x60
151 #define EXYNOS7_TMU_REG_INTEN 0x110
152 #define EXYNOS7_TMU_REG_INTPEND 0x118
153 #define EXYNOS7_TMU_REG_EMUL_CON 0x160
154
155 #define EXYNOS7_TMU_TEMP_MASK 0x1ff
156 #define EXYNOS7_PD_DET_EN_SHIFT 23
157 #define EXYNOS7_TMU_INTEN_RISE0_SHIFT 0
158 #define EXYNOS7_TMU_INTEN_RISE1_SHIFT 1
159 #define EXYNOS7_TMU_INTEN_RISE2_SHIFT 2
160 #define EXYNOS7_TMU_INTEN_RISE3_SHIFT 3
161 #define EXYNOS7_TMU_INTEN_RISE4_SHIFT 4
162 #define EXYNOS7_TMU_INTEN_RISE5_SHIFT 5
163 #define EXYNOS7_TMU_INTEN_RISE6_SHIFT 6
164 #define EXYNOS7_TMU_INTEN_RISE7_SHIFT 7
165 #define EXYNOS7_EMUL_DATA_SHIFT 7
166 #define EXYNOS7_EMUL_DATA_MASK 0x1ff
167
168 #define MCELSIUS 1000
169 /**
170 * struct exynos_tmu_data : A structure to hold the private data of the TMU
171 driver
172 * @id: identifier of the one instance of the TMU controller.
173 * @pdata: pointer to the tmu platform/configuration data
174 * @base: base address of the single instance of the TMU controller.
175 * @base_second: base address of the common registers of the TMU controller.
176 * @irq: irq number of the TMU controller.
177 * @soc: id of the SOC type.
178 * @irq_work: pointer to the irq work structure.
179 * @lock: lock to implement synchronization.
180 * @clk: pointer to the clock structure.
181 * @clk_sec: pointer to the clock structure for accessing the base_second.
182 * @sclk: pointer to the clock structure for accessing the tmu special clk.
183 * @temp_error1: fused value of the first point trim.
184 * @temp_error2: fused value of the second point trim.
185 * @regulator: pointer to the TMU regulator structure.
186 * @reg_conf: pointer to structure to register with core thermal.
187 * @tmu_initialize: SoC specific TMU initialization method
188 * @tmu_control: SoC specific TMU control method
189 * @tmu_read: SoC specific TMU temperature read method
190 * @tmu_set_emulation: SoC specific TMU emulation setting method
191 * @tmu_clear_irqs: SoC specific TMU interrupts clearing method
192 */
193 struct exynos_tmu_data {
194 int id;
195 struct exynos_tmu_platform_data *pdata;
196 void __iomem *base;
197 void __iomem *base_second;
198 int irq;
199 enum soc_type soc;
200 struct work_struct irq_work;
201 struct mutex lock;
202 struct clk *clk, *clk_sec, *sclk;
203 u16 temp_error1, temp_error2;
204 struct regulator *regulator;
205 struct thermal_zone_device *tzd;
206
207 int (*tmu_initialize)(struct platform_device *pdev);
208 void (*tmu_control)(struct platform_device *pdev, bool on);
209 int (*tmu_read)(struct exynos_tmu_data *data);
210 void (*tmu_set_emulation)(struct exynos_tmu_data *data, int temp);
211 void (*tmu_clear_irqs)(struct exynos_tmu_data *data);
212 };
213
exynos_report_trigger(struct exynos_tmu_data * p)214 static void exynos_report_trigger(struct exynos_tmu_data *p)
215 {
216 char data[10], *envp[] = { data, NULL };
217 struct thermal_zone_device *tz = p->tzd;
218 int temp;
219 unsigned int i;
220
221 if (!tz) {
222 pr_err("No thermal zone device defined\n");
223 return;
224 }
225
226 thermal_zone_device_update(tz);
227
228 mutex_lock(&tz->lock);
229 /* Find the level for which trip happened */
230 for (i = 0; i < of_thermal_get_ntrips(tz); i++) {
231 tz->ops->get_trip_temp(tz, i, &temp);
232 if (tz->last_temperature < temp)
233 break;
234 }
235
236 snprintf(data, sizeof(data), "%u", i);
237 kobject_uevent_env(&tz->device.kobj, KOBJ_CHANGE, envp);
238 mutex_unlock(&tz->lock);
239 }
240
241 /*
242 * TMU treats temperature as a mapped temperature code.
243 * The temperature is converted differently depending on the calibration type.
244 */
temp_to_code(struct exynos_tmu_data * data,u8 temp)245 static int temp_to_code(struct exynos_tmu_data *data, u8 temp)
246 {
247 struct exynos_tmu_platform_data *pdata = data->pdata;
248 int temp_code;
249
250 switch (pdata->cal_type) {
251 case TYPE_TWO_POINT_TRIMMING:
252 temp_code = (temp - pdata->first_point_trim) *
253 (data->temp_error2 - data->temp_error1) /
254 (pdata->second_point_trim - pdata->first_point_trim) +
255 data->temp_error1;
256 break;
257 case TYPE_ONE_POINT_TRIMMING:
258 temp_code = temp + data->temp_error1 - pdata->first_point_trim;
259 break;
260 default:
261 temp_code = temp + pdata->default_temp_offset;
262 break;
263 }
264
265 return temp_code;
266 }
267
268 /*
269 * Calculate a temperature value from a temperature code.
270 * The unit of the temperature is degree Celsius.
271 */
code_to_temp(struct exynos_tmu_data * data,u16 temp_code)272 static int code_to_temp(struct exynos_tmu_data *data, u16 temp_code)
273 {
274 struct exynos_tmu_platform_data *pdata = data->pdata;
275 int temp;
276
277 switch (pdata->cal_type) {
278 case TYPE_TWO_POINT_TRIMMING:
279 temp = (temp_code - data->temp_error1) *
280 (pdata->second_point_trim - pdata->first_point_trim) /
281 (data->temp_error2 - data->temp_error1) +
282 pdata->first_point_trim;
283 break;
284 case TYPE_ONE_POINT_TRIMMING:
285 temp = temp_code - data->temp_error1 + pdata->first_point_trim;
286 break;
287 default:
288 temp = temp_code - pdata->default_temp_offset;
289 break;
290 }
291
292 return temp;
293 }
294
sanitize_temp_error(struct exynos_tmu_data * data,u32 trim_info)295 static void sanitize_temp_error(struct exynos_tmu_data *data, u32 trim_info)
296 {
297 struct exynos_tmu_platform_data *pdata = data->pdata;
298
299 data->temp_error1 = trim_info & EXYNOS_TMU_TEMP_MASK;
300 data->temp_error2 = ((trim_info >> EXYNOS_TRIMINFO_85_SHIFT) &
301 EXYNOS_TMU_TEMP_MASK);
302
303 if (!data->temp_error1 ||
304 (pdata->min_efuse_value > data->temp_error1) ||
305 (data->temp_error1 > pdata->max_efuse_value))
306 data->temp_error1 = pdata->efuse_value & EXYNOS_TMU_TEMP_MASK;
307
308 if (!data->temp_error2)
309 data->temp_error2 =
310 (pdata->efuse_value >> EXYNOS_TRIMINFO_85_SHIFT) &
311 EXYNOS_TMU_TEMP_MASK;
312 }
313
get_th_reg(struct exynos_tmu_data * data,u32 threshold,bool falling)314 static u32 get_th_reg(struct exynos_tmu_data *data, u32 threshold, bool falling)
315 {
316 struct thermal_zone_device *tz = data->tzd;
317 const struct thermal_trip * const trips =
318 of_thermal_get_trip_points(tz);
319 unsigned long temp;
320 int i;
321
322 if (!trips) {
323 pr_err("%s: Cannot get trip points from of-thermal.c!\n",
324 __func__);
325 return 0;
326 }
327
328 for (i = 0; i < of_thermal_get_ntrips(tz); i++) {
329 if (trips[i].type == THERMAL_TRIP_CRITICAL)
330 continue;
331
332 temp = trips[i].temperature / MCELSIUS;
333 if (falling)
334 temp -= (trips[i].hysteresis / MCELSIUS);
335 else
336 threshold &= ~(0xff << 8 * i);
337
338 threshold |= temp_to_code(data, temp) << 8 * i;
339 }
340
341 return threshold;
342 }
343
exynos_tmu_initialize(struct platform_device * pdev)344 static int exynos_tmu_initialize(struct platform_device *pdev)
345 {
346 struct exynos_tmu_data *data = platform_get_drvdata(pdev);
347 int ret;
348
349 mutex_lock(&data->lock);
350 clk_enable(data->clk);
351 if (!IS_ERR(data->clk_sec))
352 clk_enable(data->clk_sec);
353 ret = data->tmu_initialize(pdev);
354 clk_disable(data->clk);
355 mutex_unlock(&data->lock);
356 if (!IS_ERR(data->clk_sec))
357 clk_disable(data->clk_sec);
358
359 return ret;
360 }
361
get_con_reg(struct exynos_tmu_data * data,u32 con)362 static u32 get_con_reg(struct exynos_tmu_data *data, u32 con)
363 {
364 struct exynos_tmu_platform_data *pdata = data->pdata;
365
366 if (data->soc == SOC_ARCH_EXYNOS4412 ||
367 data->soc == SOC_ARCH_EXYNOS3250)
368 con |= (EXYNOS4412_MUX_ADDR_VALUE << EXYNOS4412_MUX_ADDR_SHIFT);
369
370 con &= ~(EXYNOS_TMU_REF_VOLTAGE_MASK << EXYNOS_TMU_REF_VOLTAGE_SHIFT);
371 con |= pdata->reference_voltage << EXYNOS_TMU_REF_VOLTAGE_SHIFT;
372
373 con &= ~(EXYNOS_TMU_BUF_SLOPE_SEL_MASK << EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT);
374 con |= (pdata->gain << EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT);
375
376 if (pdata->noise_cancel_mode) {
377 con &= ~(EXYNOS_TMU_TRIP_MODE_MASK << EXYNOS_TMU_TRIP_MODE_SHIFT);
378 con |= (pdata->noise_cancel_mode << EXYNOS_TMU_TRIP_MODE_SHIFT);
379 }
380
381 return con;
382 }
383
exynos_tmu_control(struct platform_device * pdev,bool on)384 static void exynos_tmu_control(struct platform_device *pdev, bool on)
385 {
386 struct exynos_tmu_data *data = platform_get_drvdata(pdev);
387
388 mutex_lock(&data->lock);
389 clk_enable(data->clk);
390 data->tmu_control(pdev, on);
391 clk_disable(data->clk);
392 mutex_unlock(&data->lock);
393 }
394
exynos4210_tmu_initialize(struct platform_device * pdev)395 static int exynos4210_tmu_initialize(struct platform_device *pdev)
396 {
397 struct exynos_tmu_data *data = platform_get_drvdata(pdev);
398 struct thermal_zone_device *tz = data->tzd;
399 const struct thermal_trip * const trips =
400 of_thermal_get_trip_points(tz);
401 int ret = 0, threshold_code, i;
402 unsigned long reference, temp;
403 unsigned int status;
404
405 if (!trips) {
406 pr_err("%s: Cannot get trip points from of-thermal.c!\n",
407 __func__);
408 ret = -ENODEV;
409 goto out;
410 }
411
412 status = readb(data->base + EXYNOS_TMU_REG_STATUS);
413 if (!status) {
414 ret = -EBUSY;
415 goto out;
416 }
417
418 sanitize_temp_error(data, readl(data->base + EXYNOS_TMU_REG_TRIMINFO));
419
420 /* Write temperature code for threshold */
421 reference = trips[0].temperature / MCELSIUS;
422 threshold_code = temp_to_code(data, reference);
423 if (threshold_code < 0) {
424 ret = threshold_code;
425 goto out;
426 }
427 writeb(threshold_code, data->base + EXYNOS4210_TMU_REG_THRESHOLD_TEMP);
428
429 for (i = 0; i < of_thermal_get_ntrips(tz); i++) {
430 temp = trips[i].temperature / MCELSIUS;
431 writeb(temp - reference, data->base +
432 EXYNOS4210_TMU_REG_TRIG_LEVEL0 + i * 4);
433 }
434
435 data->tmu_clear_irqs(data);
436 out:
437 return ret;
438 }
439
exynos4412_tmu_initialize(struct platform_device * pdev)440 static int exynos4412_tmu_initialize(struct platform_device *pdev)
441 {
442 struct exynos_tmu_data *data = platform_get_drvdata(pdev);
443 const struct thermal_trip * const trips =
444 of_thermal_get_trip_points(data->tzd);
445 unsigned int status, trim_info, con, ctrl, rising_threshold;
446 int ret = 0, threshold_code, i;
447 unsigned long crit_temp = 0;
448
449 status = readb(data->base + EXYNOS_TMU_REG_STATUS);
450 if (!status) {
451 ret = -EBUSY;
452 goto out;
453 }
454
455 if (data->soc == SOC_ARCH_EXYNOS3250 ||
456 data->soc == SOC_ARCH_EXYNOS4412 ||
457 data->soc == SOC_ARCH_EXYNOS5250) {
458 if (data->soc == SOC_ARCH_EXYNOS3250) {
459 ctrl = readl(data->base + EXYNOS_TMU_TRIMINFO_CON1);
460 ctrl |= EXYNOS_TRIMINFO_RELOAD_ENABLE;
461 writel(ctrl, data->base + EXYNOS_TMU_TRIMINFO_CON1);
462 }
463 ctrl = readl(data->base + EXYNOS_TMU_TRIMINFO_CON2);
464 ctrl |= EXYNOS_TRIMINFO_RELOAD_ENABLE;
465 writel(ctrl, data->base + EXYNOS_TMU_TRIMINFO_CON2);
466 }
467
468 /* On exynos5420 the triminfo register is in the shared space */
469 if (data->soc == SOC_ARCH_EXYNOS5420_TRIMINFO)
470 trim_info = readl(data->base_second + EXYNOS_TMU_REG_TRIMINFO);
471 else
472 trim_info = readl(data->base + EXYNOS_TMU_REG_TRIMINFO);
473
474 sanitize_temp_error(data, trim_info);
475
476 /* Write temperature code for rising and falling threshold */
477 rising_threshold = readl(data->base + EXYNOS_THD_TEMP_RISE);
478 rising_threshold = get_th_reg(data, rising_threshold, false);
479 writel(rising_threshold, data->base + EXYNOS_THD_TEMP_RISE);
480 writel(get_th_reg(data, 0, true), data->base + EXYNOS_THD_TEMP_FALL);
481
482 data->tmu_clear_irqs(data);
483
484 /* if last threshold limit is also present */
485 for (i = 0; i < of_thermal_get_ntrips(data->tzd); i++) {
486 if (trips[i].type == THERMAL_TRIP_CRITICAL) {
487 crit_temp = trips[i].temperature;
488 break;
489 }
490 }
491
492 if (i == of_thermal_get_ntrips(data->tzd)) {
493 pr_err("%s: No CRITICAL trip point defined at of-thermal.c!\n",
494 __func__);
495 ret = -EINVAL;
496 goto out;
497 }
498
499 threshold_code = temp_to_code(data, crit_temp / MCELSIUS);
500 /* 1-4 level to be assigned in th0 reg */
501 rising_threshold &= ~(0xff << 8 * i);
502 rising_threshold |= threshold_code << 8 * i;
503 writel(rising_threshold, data->base + EXYNOS_THD_TEMP_RISE);
504 con = readl(data->base + EXYNOS_TMU_REG_CONTROL);
505 con |= (1 << EXYNOS_TMU_THERM_TRIP_EN_SHIFT);
506 writel(con, data->base + EXYNOS_TMU_REG_CONTROL);
507
508 out:
509 return ret;
510 }
511
exynos5433_tmu_initialize(struct platform_device * pdev)512 static int exynos5433_tmu_initialize(struct platform_device *pdev)
513 {
514 struct exynos_tmu_data *data = platform_get_drvdata(pdev);
515 struct exynos_tmu_platform_data *pdata = data->pdata;
516 struct thermal_zone_device *tz = data->tzd;
517 unsigned int status, trim_info;
518 unsigned int rising_threshold = 0, falling_threshold = 0;
519 int temp, temp_hist;
520 int ret = 0, threshold_code, i, sensor_id, cal_type;
521
522 status = readb(data->base + EXYNOS_TMU_REG_STATUS);
523 if (!status) {
524 ret = -EBUSY;
525 goto out;
526 }
527
528 trim_info = readl(data->base + EXYNOS_TMU_REG_TRIMINFO);
529 sanitize_temp_error(data, trim_info);
530
531 /* Read the temperature sensor id */
532 sensor_id = (trim_info & EXYNOS5433_TRIMINFO_SENSOR_ID_MASK)
533 >> EXYNOS5433_TRIMINFO_SENSOR_ID_SHIFT;
534 dev_info(&pdev->dev, "Temperature sensor ID: 0x%x\n", sensor_id);
535
536 /* Read the calibration mode */
537 writel(trim_info, data->base + EXYNOS_TMU_REG_TRIMINFO);
538 cal_type = (trim_info & EXYNOS5433_TRIMINFO_CALIB_SEL_MASK)
539 >> EXYNOS5433_TRIMINFO_CALIB_SEL_SHIFT;
540
541 switch (cal_type) {
542 case EXYNOS5433_TRIMINFO_ONE_POINT_TRIMMING:
543 pdata->cal_type = TYPE_ONE_POINT_TRIMMING;
544 break;
545 case EXYNOS5433_TRIMINFO_TWO_POINT_TRIMMING:
546 pdata->cal_type = TYPE_TWO_POINT_TRIMMING;
547 break;
548 default:
549 pdata->cal_type = TYPE_ONE_POINT_TRIMMING;
550 break;
551 }
552
553 dev_info(&pdev->dev, "Calibration type is %d-point calibration\n",
554 cal_type ? 2 : 1);
555
556 /* Write temperature code for rising and falling threshold */
557 for (i = 0; i < of_thermal_get_ntrips(tz); i++) {
558 int rising_reg_offset, falling_reg_offset;
559 int j = 0;
560
561 switch (i) {
562 case 0:
563 case 1:
564 case 2:
565 case 3:
566 rising_reg_offset = EXYNOS5433_THD_TEMP_RISE3_0;
567 falling_reg_offset = EXYNOS5433_THD_TEMP_FALL3_0;
568 j = i;
569 break;
570 case 4:
571 case 5:
572 case 6:
573 case 7:
574 rising_reg_offset = EXYNOS5433_THD_TEMP_RISE7_4;
575 falling_reg_offset = EXYNOS5433_THD_TEMP_FALL7_4;
576 j = i - 4;
577 break;
578 default:
579 continue;
580 }
581
582 /* Write temperature code for rising threshold */
583 tz->ops->get_trip_temp(tz, i, &temp);
584 temp /= MCELSIUS;
585 threshold_code = temp_to_code(data, temp);
586
587 rising_threshold = readl(data->base + rising_reg_offset);
588 rising_threshold &= ~(0xff << j * 8);
589 rising_threshold |= (threshold_code << j * 8);
590 writel(rising_threshold, data->base + rising_reg_offset);
591
592 /* Write temperature code for falling threshold */
593 tz->ops->get_trip_hyst(tz, i, &temp_hist);
594 temp_hist = temp - (temp_hist / MCELSIUS);
595 threshold_code = temp_to_code(data, temp_hist);
596
597 falling_threshold = readl(data->base + falling_reg_offset);
598 falling_threshold &= ~(0xff << j * 8);
599 falling_threshold |= (threshold_code << j * 8);
600 writel(falling_threshold, data->base + falling_reg_offset);
601 }
602
603 data->tmu_clear_irqs(data);
604 out:
605 return ret;
606 }
607
exynos5440_tmu_initialize(struct platform_device * pdev)608 static int exynos5440_tmu_initialize(struct platform_device *pdev)
609 {
610 struct exynos_tmu_data *data = platform_get_drvdata(pdev);
611 unsigned int trim_info = 0, con, rising_threshold;
612 int threshold_code;
613 int crit_temp = 0;
614
615 /*
616 * For exynos5440 soc triminfo value is swapped between TMU0 and
617 * TMU2, so the below logic is needed.
618 */
619 switch (data->id) {
620 case 0:
621 trim_info = readl(data->base + EXYNOS5440_EFUSE_SWAP_OFFSET +
622 EXYNOS5440_TMU_S0_7_TRIM);
623 break;
624 case 1:
625 trim_info = readl(data->base + EXYNOS5440_TMU_S0_7_TRIM);
626 break;
627 case 2:
628 trim_info = readl(data->base - EXYNOS5440_EFUSE_SWAP_OFFSET +
629 EXYNOS5440_TMU_S0_7_TRIM);
630 }
631 sanitize_temp_error(data, trim_info);
632
633 /* Write temperature code for rising and falling threshold */
634 rising_threshold = readl(data->base + EXYNOS5440_TMU_S0_7_TH0);
635 rising_threshold = get_th_reg(data, rising_threshold, false);
636 writel(rising_threshold, data->base + EXYNOS5440_TMU_S0_7_TH0);
637 writel(0, data->base + EXYNOS5440_TMU_S0_7_TH1);
638
639 data->tmu_clear_irqs(data);
640
641 /* if last threshold limit is also present */
642 if (!data->tzd->ops->get_crit_temp(data->tzd, &crit_temp)) {
643 threshold_code = temp_to_code(data, crit_temp / MCELSIUS);
644 /* 5th level to be assigned in th2 reg */
645 rising_threshold =
646 threshold_code << EXYNOS5440_TMU_TH_RISE4_SHIFT;
647 writel(rising_threshold, data->base + EXYNOS5440_TMU_S0_7_TH2);
648 con = readl(data->base + EXYNOS5440_TMU_S0_7_CTRL);
649 con |= (1 << EXYNOS_TMU_THERM_TRIP_EN_SHIFT);
650 writel(con, data->base + EXYNOS5440_TMU_S0_7_CTRL);
651 }
652 /* Clear the PMIN in the common TMU register */
653 if (!data->id)
654 writel(0, data->base_second + EXYNOS5440_TMU_PMIN);
655
656 return 0;
657 }
658
exynos7_tmu_initialize(struct platform_device * pdev)659 static int exynos7_tmu_initialize(struct platform_device *pdev)
660 {
661 struct exynos_tmu_data *data = platform_get_drvdata(pdev);
662 struct thermal_zone_device *tz = data->tzd;
663 struct exynos_tmu_platform_data *pdata = data->pdata;
664 unsigned int status, trim_info;
665 unsigned int rising_threshold = 0, falling_threshold = 0;
666 int ret = 0, threshold_code, i;
667 int temp, temp_hist;
668 unsigned int reg_off, bit_off;
669
670 status = readb(data->base + EXYNOS_TMU_REG_STATUS);
671 if (!status) {
672 ret = -EBUSY;
673 goto out;
674 }
675
676 trim_info = readl(data->base + EXYNOS_TMU_REG_TRIMINFO);
677
678 data->temp_error1 = trim_info & EXYNOS7_TMU_TEMP_MASK;
679 if (!data->temp_error1 ||
680 (pdata->min_efuse_value > data->temp_error1) ||
681 (data->temp_error1 > pdata->max_efuse_value))
682 data->temp_error1 = pdata->efuse_value & EXYNOS_TMU_TEMP_MASK;
683
684 /* Write temperature code for rising and falling threshold */
685 for (i = (of_thermal_get_ntrips(tz) - 1); i >= 0; i--) {
686 /*
687 * On exynos7 there are 4 rising and 4 falling threshold
688 * registers (0x50-0x5c and 0x60-0x6c respectively). Each
689 * register holds the value of two threshold levels (at bit
690 * offsets 0 and 16). Based on the fact that there are atmost
691 * eight possible trigger levels, calculate the register and
692 * bit offsets where the threshold levels are to be written.
693 *
694 * e.g. EXYNOS7_THD_TEMP_RISE7_6 (0x50)
695 * [24:16] - Threshold level 7
696 * [8:0] - Threshold level 6
697 * e.g. EXYNOS7_THD_TEMP_RISE5_4 (0x54)
698 * [24:16] - Threshold level 5
699 * [8:0] - Threshold level 4
700 *
701 * and similarly for falling thresholds.
702 *
703 * Based on the above, calculate the register and bit offsets
704 * for rising/falling threshold levels and populate them.
705 */
706 reg_off = ((7 - i) / 2) * 4;
707 bit_off = ((8 - i) % 2);
708
709 tz->ops->get_trip_temp(tz, i, &temp);
710 temp /= MCELSIUS;
711
712 tz->ops->get_trip_hyst(tz, i, &temp_hist);
713 temp_hist = temp - (temp_hist / MCELSIUS);
714
715 /* Set 9-bit temperature code for rising threshold levels */
716 threshold_code = temp_to_code(data, temp);
717 rising_threshold = readl(data->base +
718 EXYNOS7_THD_TEMP_RISE7_6 + reg_off);
719 rising_threshold &= ~(EXYNOS7_TMU_TEMP_MASK << (16 * bit_off));
720 rising_threshold |= threshold_code << (16 * bit_off);
721 writel(rising_threshold,
722 data->base + EXYNOS7_THD_TEMP_RISE7_6 + reg_off);
723
724 /* Set 9-bit temperature code for falling threshold levels */
725 threshold_code = temp_to_code(data, temp_hist);
726 falling_threshold &= ~(EXYNOS7_TMU_TEMP_MASK << (16 * bit_off));
727 falling_threshold |= threshold_code << (16 * bit_off);
728 writel(falling_threshold,
729 data->base + EXYNOS7_THD_TEMP_FALL7_6 + reg_off);
730 }
731
732 data->tmu_clear_irqs(data);
733 out:
734 return ret;
735 }
736
exynos4210_tmu_control(struct platform_device * pdev,bool on)737 static void exynos4210_tmu_control(struct platform_device *pdev, bool on)
738 {
739 struct exynos_tmu_data *data = platform_get_drvdata(pdev);
740 struct thermal_zone_device *tz = data->tzd;
741 unsigned int con, interrupt_en;
742
743 con = get_con_reg(data, readl(data->base + EXYNOS_TMU_REG_CONTROL));
744
745 if (on) {
746 con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT);
747 interrupt_en =
748 (of_thermal_is_trip_valid(tz, 3)
749 << EXYNOS_TMU_INTEN_RISE3_SHIFT) |
750 (of_thermal_is_trip_valid(tz, 2)
751 << EXYNOS_TMU_INTEN_RISE2_SHIFT) |
752 (of_thermal_is_trip_valid(tz, 1)
753 << EXYNOS_TMU_INTEN_RISE1_SHIFT) |
754 (of_thermal_is_trip_valid(tz, 0)
755 << EXYNOS_TMU_INTEN_RISE0_SHIFT);
756
757 if (data->soc != SOC_ARCH_EXYNOS4210)
758 interrupt_en |=
759 interrupt_en << EXYNOS_TMU_INTEN_FALL0_SHIFT;
760 } else {
761 con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT);
762 interrupt_en = 0; /* Disable all interrupts */
763 }
764 writel(interrupt_en, data->base + EXYNOS_TMU_REG_INTEN);
765 writel(con, data->base + EXYNOS_TMU_REG_CONTROL);
766 }
767
exynos5433_tmu_control(struct platform_device * pdev,bool on)768 static void exynos5433_tmu_control(struct platform_device *pdev, bool on)
769 {
770 struct exynos_tmu_data *data = platform_get_drvdata(pdev);
771 struct thermal_zone_device *tz = data->tzd;
772 unsigned int con, interrupt_en, pd_det_en;
773
774 con = get_con_reg(data, readl(data->base + EXYNOS_TMU_REG_CONTROL));
775
776 if (on) {
777 con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT);
778 interrupt_en =
779 (of_thermal_is_trip_valid(tz, 7)
780 << EXYNOS7_TMU_INTEN_RISE7_SHIFT) |
781 (of_thermal_is_trip_valid(tz, 6)
782 << EXYNOS7_TMU_INTEN_RISE6_SHIFT) |
783 (of_thermal_is_trip_valid(tz, 5)
784 << EXYNOS7_TMU_INTEN_RISE5_SHIFT) |
785 (of_thermal_is_trip_valid(tz, 4)
786 << EXYNOS7_TMU_INTEN_RISE4_SHIFT) |
787 (of_thermal_is_trip_valid(tz, 3)
788 << EXYNOS7_TMU_INTEN_RISE3_SHIFT) |
789 (of_thermal_is_trip_valid(tz, 2)
790 << EXYNOS7_TMU_INTEN_RISE2_SHIFT) |
791 (of_thermal_is_trip_valid(tz, 1)
792 << EXYNOS7_TMU_INTEN_RISE1_SHIFT) |
793 (of_thermal_is_trip_valid(tz, 0)
794 << EXYNOS7_TMU_INTEN_RISE0_SHIFT);
795
796 interrupt_en |=
797 interrupt_en << EXYNOS_TMU_INTEN_FALL0_SHIFT;
798 } else {
799 con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT);
800 interrupt_en = 0; /* Disable all interrupts */
801 }
802
803 pd_det_en = on ? EXYNOS5433_PD_DET_EN : 0;
804
805 writel(pd_det_en, data->base + EXYNOS5433_TMU_PD_DET_EN);
806 writel(interrupt_en, data->base + EXYNOS5433_TMU_REG_INTEN);
807 writel(con, data->base + EXYNOS_TMU_REG_CONTROL);
808 }
809
exynos5440_tmu_control(struct platform_device * pdev,bool on)810 static void exynos5440_tmu_control(struct platform_device *pdev, bool on)
811 {
812 struct exynos_tmu_data *data = platform_get_drvdata(pdev);
813 struct thermal_zone_device *tz = data->tzd;
814 unsigned int con, interrupt_en;
815
816 con = get_con_reg(data, readl(data->base + EXYNOS5440_TMU_S0_7_CTRL));
817
818 if (on) {
819 con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT);
820 interrupt_en =
821 (of_thermal_is_trip_valid(tz, 3)
822 << EXYNOS5440_TMU_INTEN_RISE3_SHIFT) |
823 (of_thermal_is_trip_valid(tz, 2)
824 << EXYNOS5440_TMU_INTEN_RISE2_SHIFT) |
825 (of_thermal_is_trip_valid(tz, 1)
826 << EXYNOS5440_TMU_INTEN_RISE1_SHIFT) |
827 (of_thermal_is_trip_valid(tz, 0)
828 << EXYNOS5440_TMU_INTEN_RISE0_SHIFT);
829 interrupt_en |=
830 interrupt_en << EXYNOS5440_TMU_INTEN_FALL0_SHIFT;
831 } else {
832 con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT);
833 interrupt_en = 0; /* Disable all interrupts */
834 }
835 writel(interrupt_en, data->base + EXYNOS5440_TMU_S0_7_IRQEN);
836 writel(con, data->base + EXYNOS5440_TMU_S0_7_CTRL);
837 }
838
exynos7_tmu_control(struct platform_device * pdev,bool on)839 static void exynos7_tmu_control(struct platform_device *pdev, bool on)
840 {
841 struct exynos_tmu_data *data = platform_get_drvdata(pdev);
842 struct thermal_zone_device *tz = data->tzd;
843 unsigned int con, interrupt_en;
844
845 con = get_con_reg(data, readl(data->base + EXYNOS_TMU_REG_CONTROL));
846
847 if (on) {
848 con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT);
849 con |= (1 << EXYNOS7_PD_DET_EN_SHIFT);
850 interrupt_en =
851 (of_thermal_is_trip_valid(tz, 7)
852 << EXYNOS7_TMU_INTEN_RISE7_SHIFT) |
853 (of_thermal_is_trip_valid(tz, 6)
854 << EXYNOS7_TMU_INTEN_RISE6_SHIFT) |
855 (of_thermal_is_trip_valid(tz, 5)
856 << EXYNOS7_TMU_INTEN_RISE5_SHIFT) |
857 (of_thermal_is_trip_valid(tz, 4)
858 << EXYNOS7_TMU_INTEN_RISE4_SHIFT) |
859 (of_thermal_is_trip_valid(tz, 3)
860 << EXYNOS7_TMU_INTEN_RISE3_SHIFT) |
861 (of_thermal_is_trip_valid(tz, 2)
862 << EXYNOS7_TMU_INTEN_RISE2_SHIFT) |
863 (of_thermal_is_trip_valid(tz, 1)
864 << EXYNOS7_TMU_INTEN_RISE1_SHIFT) |
865 (of_thermal_is_trip_valid(tz, 0)
866 << EXYNOS7_TMU_INTEN_RISE0_SHIFT);
867
868 interrupt_en |=
869 interrupt_en << EXYNOS_TMU_INTEN_FALL0_SHIFT;
870 } else {
871 con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT);
872 con &= ~(1 << EXYNOS7_PD_DET_EN_SHIFT);
873 interrupt_en = 0; /* Disable all interrupts */
874 }
875
876 writel(interrupt_en, data->base + EXYNOS7_TMU_REG_INTEN);
877 writel(con, data->base + EXYNOS_TMU_REG_CONTROL);
878 }
879
exynos_get_temp(void * p,int * temp)880 static int exynos_get_temp(void *p, int *temp)
881 {
882 struct exynos_tmu_data *data = p;
883
884 if (!data || !data->tmu_read)
885 return -EINVAL;
886
887 mutex_lock(&data->lock);
888 clk_enable(data->clk);
889
890 *temp = code_to_temp(data, data->tmu_read(data)) * MCELSIUS;
891
892 clk_disable(data->clk);
893 mutex_unlock(&data->lock);
894
895 return 0;
896 }
897
898 #ifdef CONFIG_THERMAL_EMULATION
get_emul_con_reg(struct exynos_tmu_data * data,unsigned int val,int temp)899 static u32 get_emul_con_reg(struct exynos_tmu_data *data, unsigned int val,
900 int temp)
901 {
902 if (temp) {
903 temp /= MCELSIUS;
904
905 if (data->soc != SOC_ARCH_EXYNOS5440) {
906 val &= ~(EXYNOS_EMUL_TIME_MASK << EXYNOS_EMUL_TIME_SHIFT);
907 val |= (EXYNOS_EMUL_TIME << EXYNOS_EMUL_TIME_SHIFT);
908 }
909 if (data->soc == SOC_ARCH_EXYNOS7) {
910 val &= ~(EXYNOS7_EMUL_DATA_MASK <<
911 EXYNOS7_EMUL_DATA_SHIFT);
912 val |= (temp_to_code(data, temp) <<
913 EXYNOS7_EMUL_DATA_SHIFT) |
914 EXYNOS_EMUL_ENABLE;
915 } else {
916 val &= ~(EXYNOS_EMUL_DATA_MASK <<
917 EXYNOS_EMUL_DATA_SHIFT);
918 val |= (temp_to_code(data, temp) <<
919 EXYNOS_EMUL_DATA_SHIFT) |
920 EXYNOS_EMUL_ENABLE;
921 }
922 } else {
923 val &= ~EXYNOS_EMUL_ENABLE;
924 }
925
926 return val;
927 }
928
exynos4412_tmu_set_emulation(struct exynos_tmu_data * data,int temp)929 static void exynos4412_tmu_set_emulation(struct exynos_tmu_data *data,
930 int temp)
931 {
932 unsigned int val;
933 u32 emul_con;
934
935 if (data->soc == SOC_ARCH_EXYNOS5260)
936 emul_con = EXYNOS5260_EMUL_CON;
937 else if (data->soc == SOC_ARCH_EXYNOS5433)
938 emul_con = EXYNOS5433_TMU_EMUL_CON;
939 else if (data->soc == SOC_ARCH_EXYNOS7)
940 emul_con = EXYNOS7_TMU_REG_EMUL_CON;
941 else
942 emul_con = EXYNOS_EMUL_CON;
943
944 val = readl(data->base + emul_con);
945 val = get_emul_con_reg(data, val, temp);
946 writel(val, data->base + emul_con);
947 }
948
exynos5440_tmu_set_emulation(struct exynos_tmu_data * data,int temp)949 static void exynos5440_tmu_set_emulation(struct exynos_tmu_data *data,
950 int temp)
951 {
952 unsigned int val;
953
954 val = readl(data->base + EXYNOS5440_TMU_S0_7_DEBUG);
955 val = get_emul_con_reg(data, val, temp);
956 writel(val, data->base + EXYNOS5440_TMU_S0_7_DEBUG);
957 }
958
exynos_tmu_set_emulation(void * drv_data,int temp)959 static int exynos_tmu_set_emulation(void *drv_data, int temp)
960 {
961 struct exynos_tmu_data *data = drv_data;
962 int ret = -EINVAL;
963
964 if (data->soc == SOC_ARCH_EXYNOS4210)
965 goto out;
966
967 if (temp && temp < MCELSIUS)
968 goto out;
969
970 mutex_lock(&data->lock);
971 clk_enable(data->clk);
972 data->tmu_set_emulation(data, temp);
973 clk_disable(data->clk);
974 mutex_unlock(&data->lock);
975 return 0;
976 out:
977 return ret;
978 }
979 #else
980 #define exynos4412_tmu_set_emulation NULL
981 #define exynos5440_tmu_set_emulation NULL
exynos_tmu_set_emulation(void * drv_data,int temp)982 static int exynos_tmu_set_emulation(void *drv_data, int temp)
983 { return -EINVAL; }
984 #endif /* CONFIG_THERMAL_EMULATION */
985
exynos4210_tmu_read(struct exynos_tmu_data * data)986 static int exynos4210_tmu_read(struct exynos_tmu_data *data)
987 {
988 int ret = readb(data->base + EXYNOS_TMU_REG_CURRENT_TEMP);
989
990 /* "temp_code" should range between 75 and 175 */
991 return (ret < 75 || ret > 175) ? -ENODATA : ret;
992 }
993
exynos4412_tmu_read(struct exynos_tmu_data * data)994 static int exynos4412_tmu_read(struct exynos_tmu_data *data)
995 {
996 return readb(data->base + EXYNOS_TMU_REG_CURRENT_TEMP);
997 }
998
exynos5440_tmu_read(struct exynos_tmu_data * data)999 static int exynos5440_tmu_read(struct exynos_tmu_data *data)
1000 {
1001 return readb(data->base + EXYNOS5440_TMU_S0_7_TEMP);
1002 }
1003
exynos7_tmu_read(struct exynos_tmu_data * data)1004 static int exynos7_tmu_read(struct exynos_tmu_data *data)
1005 {
1006 return readw(data->base + EXYNOS_TMU_REG_CURRENT_TEMP) &
1007 EXYNOS7_TMU_TEMP_MASK;
1008 }
1009
exynos_tmu_work(struct work_struct * work)1010 static void exynos_tmu_work(struct work_struct *work)
1011 {
1012 struct exynos_tmu_data *data = container_of(work,
1013 struct exynos_tmu_data, irq_work);
1014 unsigned int val_type;
1015
1016 if (!IS_ERR(data->clk_sec))
1017 clk_enable(data->clk_sec);
1018 /* Find which sensor generated this interrupt */
1019 if (data->soc == SOC_ARCH_EXYNOS5440) {
1020 val_type = readl(data->base_second + EXYNOS5440_TMU_IRQ_STATUS);
1021 if (!((val_type >> data->id) & 0x1))
1022 goto out;
1023 }
1024 if (!IS_ERR(data->clk_sec))
1025 clk_disable(data->clk_sec);
1026
1027 exynos_report_trigger(data);
1028 mutex_lock(&data->lock);
1029 clk_enable(data->clk);
1030
1031 /* TODO: take action based on particular interrupt */
1032 data->tmu_clear_irqs(data);
1033
1034 clk_disable(data->clk);
1035 mutex_unlock(&data->lock);
1036 out:
1037 enable_irq(data->irq);
1038 }
1039
exynos4210_tmu_clear_irqs(struct exynos_tmu_data * data)1040 static void exynos4210_tmu_clear_irqs(struct exynos_tmu_data *data)
1041 {
1042 unsigned int val_irq;
1043 u32 tmu_intstat, tmu_intclear;
1044
1045 if (data->soc == SOC_ARCH_EXYNOS5260) {
1046 tmu_intstat = EXYNOS5260_TMU_REG_INTSTAT;
1047 tmu_intclear = EXYNOS5260_TMU_REG_INTCLEAR;
1048 } else if (data->soc == SOC_ARCH_EXYNOS7) {
1049 tmu_intstat = EXYNOS7_TMU_REG_INTPEND;
1050 tmu_intclear = EXYNOS7_TMU_REG_INTPEND;
1051 } else if (data->soc == SOC_ARCH_EXYNOS5433) {
1052 tmu_intstat = EXYNOS5433_TMU_REG_INTPEND;
1053 tmu_intclear = EXYNOS5433_TMU_REG_INTPEND;
1054 } else {
1055 tmu_intstat = EXYNOS_TMU_REG_INTSTAT;
1056 tmu_intclear = EXYNOS_TMU_REG_INTCLEAR;
1057 }
1058
1059 val_irq = readl(data->base + tmu_intstat);
1060 /*
1061 * Clear the interrupts. Please note that the documentation for
1062 * Exynos3250, Exynos4412, Exynos5250 and Exynos5260 incorrectly
1063 * states that INTCLEAR register has a different placing of bits
1064 * responsible for FALL IRQs than INTSTAT register. Exynos5420
1065 * and Exynos5440 documentation is correct (Exynos4210 doesn't
1066 * support FALL IRQs at all).
1067 */
1068 writel(val_irq, data->base + tmu_intclear);
1069 }
1070
exynos5440_tmu_clear_irqs(struct exynos_tmu_data * data)1071 static void exynos5440_tmu_clear_irqs(struct exynos_tmu_data *data)
1072 {
1073 unsigned int val_irq;
1074
1075 val_irq = readl(data->base + EXYNOS5440_TMU_S0_7_IRQ);
1076 /* clear the interrupts */
1077 writel(val_irq, data->base + EXYNOS5440_TMU_S0_7_IRQ);
1078 }
1079
exynos_tmu_irq(int irq,void * id)1080 static irqreturn_t exynos_tmu_irq(int irq, void *id)
1081 {
1082 struct exynos_tmu_data *data = id;
1083
1084 disable_irq_nosync(irq);
1085 schedule_work(&data->irq_work);
1086
1087 return IRQ_HANDLED;
1088 }
1089
1090 static const struct of_device_id exynos_tmu_match[] = {
1091 { .compatible = "samsung,exynos3250-tmu", },
1092 { .compatible = "samsung,exynos4210-tmu", },
1093 { .compatible = "samsung,exynos4412-tmu", },
1094 { .compatible = "samsung,exynos5250-tmu", },
1095 { .compatible = "samsung,exynos5260-tmu", },
1096 { .compatible = "samsung,exynos5420-tmu", },
1097 { .compatible = "samsung,exynos5420-tmu-ext-triminfo", },
1098 { .compatible = "samsung,exynos5433-tmu", },
1099 { .compatible = "samsung,exynos5440-tmu", },
1100 { .compatible = "samsung,exynos7-tmu", },
1101 { /* sentinel */ },
1102 };
1103 MODULE_DEVICE_TABLE(of, exynos_tmu_match);
1104
exynos_of_get_soc_type(struct device_node * np)1105 static int exynos_of_get_soc_type(struct device_node *np)
1106 {
1107 if (of_device_is_compatible(np, "samsung,exynos3250-tmu"))
1108 return SOC_ARCH_EXYNOS3250;
1109 else if (of_device_is_compatible(np, "samsung,exynos4210-tmu"))
1110 return SOC_ARCH_EXYNOS4210;
1111 else if (of_device_is_compatible(np, "samsung,exynos4412-tmu"))
1112 return SOC_ARCH_EXYNOS4412;
1113 else if (of_device_is_compatible(np, "samsung,exynos5250-tmu"))
1114 return SOC_ARCH_EXYNOS5250;
1115 else if (of_device_is_compatible(np, "samsung,exynos5260-tmu"))
1116 return SOC_ARCH_EXYNOS5260;
1117 else if (of_device_is_compatible(np, "samsung,exynos5420-tmu"))
1118 return SOC_ARCH_EXYNOS5420;
1119 else if (of_device_is_compatible(np,
1120 "samsung,exynos5420-tmu-ext-triminfo"))
1121 return SOC_ARCH_EXYNOS5420_TRIMINFO;
1122 else if (of_device_is_compatible(np, "samsung,exynos5433-tmu"))
1123 return SOC_ARCH_EXYNOS5433;
1124 else if (of_device_is_compatible(np, "samsung,exynos5440-tmu"))
1125 return SOC_ARCH_EXYNOS5440;
1126 else if (of_device_is_compatible(np, "samsung,exynos7-tmu"))
1127 return SOC_ARCH_EXYNOS7;
1128
1129 return -EINVAL;
1130 }
1131
exynos_of_sensor_conf(struct device_node * np,struct exynos_tmu_platform_data * pdata)1132 static int exynos_of_sensor_conf(struct device_node *np,
1133 struct exynos_tmu_platform_data *pdata)
1134 {
1135 u32 value;
1136 int ret;
1137
1138 of_node_get(np);
1139
1140 ret = of_property_read_u32(np, "samsung,tmu_gain", &value);
1141 pdata->gain = (u8)value;
1142 of_property_read_u32(np, "samsung,tmu_reference_voltage", &value);
1143 pdata->reference_voltage = (u8)value;
1144 of_property_read_u32(np, "samsung,tmu_noise_cancel_mode", &value);
1145 pdata->noise_cancel_mode = (u8)value;
1146
1147 of_property_read_u32(np, "samsung,tmu_efuse_value",
1148 &pdata->efuse_value);
1149 of_property_read_u32(np, "samsung,tmu_min_efuse_value",
1150 &pdata->min_efuse_value);
1151 of_property_read_u32(np, "samsung,tmu_max_efuse_value",
1152 &pdata->max_efuse_value);
1153
1154 of_property_read_u32(np, "samsung,tmu_first_point_trim", &value);
1155 pdata->first_point_trim = (u8)value;
1156 of_property_read_u32(np, "samsung,tmu_second_point_trim", &value);
1157 pdata->second_point_trim = (u8)value;
1158 of_property_read_u32(np, "samsung,tmu_default_temp_offset", &value);
1159 pdata->default_temp_offset = (u8)value;
1160
1161 of_property_read_u32(np, "samsung,tmu_cal_type", &pdata->cal_type);
1162 of_property_read_u32(np, "samsung,tmu_cal_mode", &pdata->cal_mode);
1163
1164 of_node_put(np);
1165 return 0;
1166 }
1167
exynos_map_dt_data(struct platform_device * pdev)1168 static int exynos_map_dt_data(struct platform_device *pdev)
1169 {
1170 struct exynos_tmu_data *data = platform_get_drvdata(pdev);
1171 struct exynos_tmu_platform_data *pdata;
1172 struct resource res;
1173
1174 if (!data || !pdev->dev.of_node)
1175 return -ENODEV;
1176
1177 data->id = of_alias_get_id(pdev->dev.of_node, "tmuctrl");
1178 if (data->id < 0)
1179 data->id = 0;
1180
1181 data->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
1182 if (data->irq <= 0) {
1183 dev_err(&pdev->dev, "failed to get IRQ\n");
1184 return -ENODEV;
1185 }
1186
1187 if (of_address_to_resource(pdev->dev.of_node, 0, &res)) {
1188 dev_err(&pdev->dev, "failed to get Resource 0\n");
1189 return -ENODEV;
1190 }
1191
1192 data->base = devm_ioremap(&pdev->dev, res.start, resource_size(&res));
1193 if (!data->base) {
1194 dev_err(&pdev->dev, "Failed to ioremap memory\n");
1195 return -EADDRNOTAVAIL;
1196 }
1197
1198 pdata = devm_kzalloc(&pdev->dev,
1199 sizeof(struct exynos_tmu_platform_data),
1200 GFP_KERNEL);
1201 if (!pdata)
1202 return -ENOMEM;
1203
1204 exynos_of_sensor_conf(pdev->dev.of_node, pdata);
1205 data->pdata = pdata;
1206 data->soc = exynos_of_get_soc_type(pdev->dev.of_node);
1207
1208 switch (data->soc) {
1209 case SOC_ARCH_EXYNOS4210:
1210 data->tmu_initialize = exynos4210_tmu_initialize;
1211 data->tmu_control = exynos4210_tmu_control;
1212 data->tmu_read = exynos4210_tmu_read;
1213 data->tmu_clear_irqs = exynos4210_tmu_clear_irqs;
1214 break;
1215 case SOC_ARCH_EXYNOS3250:
1216 case SOC_ARCH_EXYNOS4412:
1217 case SOC_ARCH_EXYNOS5250:
1218 case SOC_ARCH_EXYNOS5260:
1219 case SOC_ARCH_EXYNOS5420:
1220 case SOC_ARCH_EXYNOS5420_TRIMINFO:
1221 data->tmu_initialize = exynos4412_tmu_initialize;
1222 data->tmu_control = exynos4210_tmu_control;
1223 data->tmu_read = exynos4412_tmu_read;
1224 data->tmu_set_emulation = exynos4412_tmu_set_emulation;
1225 data->tmu_clear_irqs = exynos4210_tmu_clear_irqs;
1226 break;
1227 case SOC_ARCH_EXYNOS5433:
1228 data->tmu_initialize = exynos5433_tmu_initialize;
1229 data->tmu_control = exynos5433_tmu_control;
1230 data->tmu_read = exynos4412_tmu_read;
1231 data->tmu_set_emulation = exynos4412_tmu_set_emulation;
1232 data->tmu_clear_irqs = exynos4210_tmu_clear_irqs;
1233 break;
1234 case SOC_ARCH_EXYNOS5440:
1235 data->tmu_initialize = exynos5440_tmu_initialize;
1236 data->tmu_control = exynos5440_tmu_control;
1237 data->tmu_read = exynos5440_tmu_read;
1238 data->tmu_set_emulation = exynos5440_tmu_set_emulation;
1239 data->tmu_clear_irqs = exynos5440_tmu_clear_irqs;
1240 break;
1241 case SOC_ARCH_EXYNOS7:
1242 data->tmu_initialize = exynos7_tmu_initialize;
1243 data->tmu_control = exynos7_tmu_control;
1244 data->tmu_read = exynos7_tmu_read;
1245 data->tmu_set_emulation = exynos4412_tmu_set_emulation;
1246 data->tmu_clear_irqs = exynos4210_tmu_clear_irqs;
1247 break;
1248 default:
1249 dev_err(&pdev->dev, "Platform not supported\n");
1250 return -EINVAL;
1251 }
1252
1253 /*
1254 * Check if the TMU shares some registers and then try to map the
1255 * memory of common registers.
1256 */
1257 if (data->soc != SOC_ARCH_EXYNOS5420_TRIMINFO &&
1258 data->soc != SOC_ARCH_EXYNOS5440)
1259 return 0;
1260
1261 if (of_address_to_resource(pdev->dev.of_node, 1, &res)) {
1262 dev_err(&pdev->dev, "failed to get Resource 1\n");
1263 return -ENODEV;
1264 }
1265
1266 data->base_second = devm_ioremap(&pdev->dev, res.start,
1267 resource_size(&res));
1268 if (!data->base_second) {
1269 dev_err(&pdev->dev, "Failed to ioremap memory\n");
1270 return -ENOMEM;
1271 }
1272
1273 return 0;
1274 }
1275
1276 static struct thermal_zone_of_device_ops exynos_sensor_ops = {
1277 .get_temp = exynos_get_temp,
1278 .set_emul_temp = exynos_tmu_set_emulation,
1279 };
1280
exynos_tmu_probe(struct platform_device * pdev)1281 static int exynos_tmu_probe(struct platform_device *pdev)
1282 {
1283 struct exynos_tmu_data *data;
1284 int ret;
1285
1286 data = devm_kzalloc(&pdev->dev, sizeof(struct exynos_tmu_data),
1287 GFP_KERNEL);
1288 if (!data)
1289 return -ENOMEM;
1290
1291 platform_set_drvdata(pdev, data);
1292 mutex_init(&data->lock);
1293
1294 /*
1295 * Try enabling the regulator if found
1296 * TODO: Add regulator as an SOC feature, so that regulator enable
1297 * is a compulsory call.
1298 */
1299 data->regulator = devm_regulator_get(&pdev->dev, "vtmu");
1300 if (!IS_ERR(data->regulator)) {
1301 ret = regulator_enable(data->regulator);
1302 if (ret) {
1303 dev_err(&pdev->dev, "failed to enable vtmu\n");
1304 return ret;
1305 }
1306 } else {
1307 dev_info(&pdev->dev, "Regulator node (vtmu) not found\n");
1308 }
1309
1310 ret = exynos_map_dt_data(pdev);
1311 if (ret)
1312 goto err_sensor;
1313
1314 INIT_WORK(&data->irq_work, exynos_tmu_work);
1315
1316 data->clk = devm_clk_get(&pdev->dev, "tmu_apbif");
1317 if (IS_ERR(data->clk)) {
1318 dev_err(&pdev->dev, "Failed to get clock\n");
1319 ret = PTR_ERR(data->clk);
1320 goto err_sensor;
1321 }
1322
1323 data->clk_sec = devm_clk_get(&pdev->dev, "tmu_triminfo_apbif");
1324 if (IS_ERR(data->clk_sec)) {
1325 if (data->soc == SOC_ARCH_EXYNOS5420_TRIMINFO) {
1326 dev_err(&pdev->dev, "Failed to get triminfo clock\n");
1327 ret = PTR_ERR(data->clk_sec);
1328 goto err_sensor;
1329 }
1330 } else {
1331 ret = clk_prepare(data->clk_sec);
1332 if (ret) {
1333 dev_err(&pdev->dev, "Failed to get clock\n");
1334 goto err_sensor;
1335 }
1336 }
1337
1338 ret = clk_prepare(data->clk);
1339 if (ret) {
1340 dev_err(&pdev->dev, "Failed to get clock\n");
1341 goto err_clk_sec;
1342 }
1343
1344 switch (data->soc) {
1345 case SOC_ARCH_EXYNOS5433:
1346 case SOC_ARCH_EXYNOS7:
1347 data->sclk = devm_clk_get(&pdev->dev, "tmu_sclk");
1348 if (IS_ERR(data->sclk)) {
1349 dev_err(&pdev->dev, "Failed to get sclk\n");
1350 ret = PTR_ERR(data->sclk);
1351 goto err_clk;
1352 } else {
1353 ret = clk_prepare_enable(data->sclk);
1354 if (ret) {
1355 dev_err(&pdev->dev, "Failed to enable sclk\n");
1356 goto err_clk;
1357 }
1358 }
1359 break;
1360 default:
1361 break;
1362 }
1363
1364 /*
1365 * data->tzd must be registered before calling exynos_tmu_initialize(),
1366 * requesting irq and calling exynos_tmu_control().
1367 */
1368 data->tzd = thermal_zone_of_sensor_register(&pdev->dev, 0, data,
1369 &exynos_sensor_ops);
1370 if (IS_ERR(data->tzd)) {
1371 ret = PTR_ERR(data->tzd);
1372 dev_err(&pdev->dev, "Failed to register sensor: %d\n", ret);
1373 goto err_sclk;
1374 }
1375
1376 ret = exynos_tmu_initialize(pdev);
1377 if (ret) {
1378 dev_err(&pdev->dev, "Failed to initialize TMU\n");
1379 goto err_thermal;
1380 }
1381
1382 ret = devm_request_irq(&pdev->dev, data->irq, exynos_tmu_irq,
1383 IRQF_TRIGGER_RISING | IRQF_SHARED, dev_name(&pdev->dev), data);
1384 if (ret) {
1385 dev_err(&pdev->dev, "Failed to request irq: %d\n", data->irq);
1386 goto err_thermal;
1387 }
1388
1389 exynos_tmu_control(pdev, true);
1390 return 0;
1391
1392 err_thermal:
1393 thermal_zone_of_sensor_unregister(&pdev->dev, data->tzd);
1394 err_sclk:
1395 clk_disable_unprepare(data->sclk);
1396 err_clk:
1397 clk_unprepare(data->clk);
1398 err_clk_sec:
1399 if (!IS_ERR(data->clk_sec))
1400 clk_unprepare(data->clk_sec);
1401 err_sensor:
1402 if (!IS_ERR(data->regulator))
1403 regulator_disable(data->regulator);
1404
1405 return ret;
1406 }
1407
exynos_tmu_remove(struct platform_device * pdev)1408 static int exynos_tmu_remove(struct platform_device *pdev)
1409 {
1410 struct exynos_tmu_data *data = platform_get_drvdata(pdev);
1411 struct thermal_zone_device *tzd = data->tzd;
1412
1413 thermal_zone_of_sensor_unregister(&pdev->dev, tzd);
1414 exynos_tmu_control(pdev, false);
1415
1416 clk_disable_unprepare(data->sclk);
1417 clk_unprepare(data->clk);
1418 if (!IS_ERR(data->clk_sec))
1419 clk_unprepare(data->clk_sec);
1420
1421 if (!IS_ERR(data->regulator))
1422 regulator_disable(data->regulator);
1423
1424 return 0;
1425 }
1426
1427 #ifdef CONFIG_PM_SLEEP
exynos_tmu_suspend(struct device * dev)1428 static int exynos_tmu_suspend(struct device *dev)
1429 {
1430 exynos_tmu_control(to_platform_device(dev), false);
1431
1432 return 0;
1433 }
1434
exynos_tmu_resume(struct device * dev)1435 static int exynos_tmu_resume(struct device *dev)
1436 {
1437 struct platform_device *pdev = to_platform_device(dev);
1438
1439 exynos_tmu_initialize(pdev);
1440 exynos_tmu_control(pdev, true);
1441
1442 return 0;
1443 }
1444
1445 static SIMPLE_DEV_PM_OPS(exynos_tmu_pm,
1446 exynos_tmu_suspend, exynos_tmu_resume);
1447 #define EXYNOS_TMU_PM (&exynos_tmu_pm)
1448 #else
1449 #define EXYNOS_TMU_PM NULL
1450 #endif
1451
1452 static struct platform_driver exynos_tmu_driver = {
1453 .driver = {
1454 .name = "exynos-tmu",
1455 .pm = EXYNOS_TMU_PM,
1456 .of_match_table = exynos_tmu_match,
1457 },
1458 .probe = exynos_tmu_probe,
1459 .remove = exynos_tmu_remove,
1460 };
1461
1462 module_platform_driver(exynos_tmu_driver);
1463
1464 MODULE_DESCRIPTION("EXYNOS TMU Driver");
1465 MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>");
1466 MODULE_LICENSE("GPL");
1467 MODULE_ALIAS("platform:exynos-tmu");
1468