• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
4  *
5  */
6 
7 #ifndef _MHI_INT_H
8 #define _MHI_INT_H
9 
10 #include <linux/mhi.h>
11 
12 extern struct bus_type mhi_bus_type;
13 
14 #define MHIREGLEN (0x0)
15 #define MHIREGLEN_MHIREGLEN_MASK (0xFFFFFFFF)
16 #define MHIREGLEN_MHIREGLEN_SHIFT (0)
17 
18 #define MHIVER (0x8)
19 #define MHIVER_MHIVER_MASK (0xFFFFFFFF)
20 #define MHIVER_MHIVER_SHIFT (0)
21 
22 #define MHICFG (0x10)
23 #define MHICFG_NHWER_MASK (0xFF000000)
24 #define MHICFG_NHWER_SHIFT (24)
25 #define MHICFG_NER_MASK (0xFF0000)
26 #define MHICFG_NER_SHIFT (16)
27 #define MHICFG_NHWCH_MASK (0xFF00)
28 #define MHICFG_NHWCH_SHIFT (8)
29 #define MHICFG_NCH_MASK (0xFF)
30 #define MHICFG_NCH_SHIFT (0)
31 
32 #define CHDBOFF (0x18)
33 #define CHDBOFF_CHDBOFF_MASK (0xFFFFFFFF)
34 #define CHDBOFF_CHDBOFF_SHIFT (0)
35 
36 #define ERDBOFF (0x20)
37 #define ERDBOFF_ERDBOFF_MASK (0xFFFFFFFF)
38 #define ERDBOFF_ERDBOFF_SHIFT (0)
39 
40 #define BHIOFF (0x28)
41 #define BHIOFF_BHIOFF_MASK (0xFFFFFFFF)
42 #define BHIOFF_BHIOFF_SHIFT (0)
43 
44 #define BHIEOFF (0x2C)
45 #define BHIEOFF_BHIEOFF_MASK (0xFFFFFFFF)
46 #define BHIEOFF_BHIEOFF_SHIFT (0)
47 
48 #define DEBUGOFF (0x30)
49 #define DEBUGOFF_DEBUGOFF_MASK (0xFFFFFFFF)
50 #define DEBUGOFF_DEBUGOFF_SHIFT (0)
51 
52 #define MHICTRL (0x38)
53 #define MHICTRL_MHISTATE_MASK (0x0000FF00)
54 #define MHICTRL_MHISTATE_SHIFT (8)
55 #define MHICTRL_RESET_MASK (0x2)
56 #define MHICTRL_RESET_SHIFT (1)
57 
58 #define MHISTATUS (0x48)
59 #define MHISTATUS_MHISTATE_MASK (0x0000FF00)
60 #define MHISTATUS_MHISTATE_SHIFT (8)
61 #define MHISTATUS_SYSERR_MASK (0x4)
62 #define MHISTATUS_SYSERR_SHIFT (2)
63 #define MHISTATUS_READY_MASK (0x1)
64 #define MHISTATUS_READY_SHIFT (0)
65 
66 #define CCABAP_LOWER (0x58)
67 #define CCABAP_LOWER_CCABAP_LOWER_MASK (0xFFFFFFFF)
68 #define CCABAP_LOWER_CCABAP_LOWER_SHIFT (0)
69 
70 #define CCABAP_HIGHER (0x5C)
71 #define CCABAP_HIGHER_CCABAP_HIGHER_MASK (0xFFFFFFFF)
72 #define CCABAP_HIGHER_CCABAP_HIGHER_SHIFT (0)
73 
74 #define ECABAP_LOWER (0x60)
75 #define ECABAP_LOWER_ECABAP_LOWER_MASK (0xFFFFFFFF)
76 #define ECABAP_LOWER_ECABAP_LOWER_SHIFT (0)
77 
78 #define ECABAP_HIGHER (0x64)
79 #define ECABAP_HIGHER_ECABAP_HIGHER_MASK (0xFFFFFFFF)
80 #define ECABAP_HIGHER_ECABAP_HIGHER_SHIFT (0)
81 
82 #define CRCBAP_LOWER (0x68)
83 #define CRCBAP_LOWER_CRCBAP_LOWER_MASK (0xFFFFFFFF)
84 #define CRCBAP_LOWER_CRCBAP_LOWER_SHIFT (0)
85 
86 #define CRCBAP_HIGHER (0x6C)
87 #define CRCBAP_HIGHER_CRCBAP_HIGHER_MASK (0xFFFFFFFF)
88 #define CRCBAP_HIGHER_CRCBAP_HIGHER_SHIFT (0)
89 
90 #define CRDB_LOWER (0x70)
91 #define CRDB_LOWER_CRDB_LOWER_MASK (0xFFFFFFFF)
92 #define CRDB_LOWER_CRDB_LOWER_SHIFT (0)
93 
94 #define CRDB_HIGHER (0x74)
95 #define CRDB_HIGHER_CRDB_HIGHER_MASK (0xFFFFFFFF)
96 #define CRDB_HIGHER_CRDB_HIGHER_SHIFT (0)
97 
98 #define MHICTRLBASE_LOWER (0x80)
99 #define MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_MASK (0xFFFFFFFF)
100 #define MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_SHIFT (0)
101 
102 #define MHICTRLBASE_HIGHER (0x84)
103 #define MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_MASK (0xFFFFFFFF)
104 #define MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_SHIFT (0)
105 
106 #define MHICTRLLIMIT_LOWER (0x88)
107 #define MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_MASK (0xFFFFFFFF)
108 #define MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_SHIFT (0)
109 
110 #define MHICTRLLIMIT_HIGHER (0x8C)
111 #define MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_MASK (0xFFFFFFFF)
112 #define MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_SHIFT (0)
113 
114 #define MHIDATABASE_LOWER (0x98)
115 #define MHIDATABASE_LOWER_MHIDATABASE_LOWER_MASK (0xFFFFFFFF)
116 #define MHIDATABASE_LOWER_MHIDATABASE_LOWER_SHIFT (0)
117 
118 #define MHIDATABASE_HIGHER (0x9C)
119 #define MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_MASK (0xFFFFFFFF)
120 #define MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_SHIFT (0)
121 
122 #define MHIDATALIMIT_LOWER (0xA0)
123 #define MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_MASK (0xFFFFFFFF)
124 #define MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_SHIFT (0)
125 
126 #define MHIDATALIMIT_HIGHER (0xA4)
127 #define MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_MASK (0xFFFFFFFF)
128 #define MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_SHIFT (0)
129 
130 /* Host request register */
131 #define MHI_SOC_RESET_REQ_OFFSET (0xB0)
132 #define MHI_SOC_RESET_REQ BIT(0)
133 
134 /* MHI BHI offfsets */
135 #define BHI_BHIVERSION_MINOR (0x00)
136 #define BHI_BHIVERSION_MAJOR (0x04)
137 #define BHI_IMGADDR_LOW (0x08)
138 #define BHI_IMGADDR_HIGH (0x0C)
139 #define BHI_IMGSIZE (0x10)
140 #define BHI_RSVD1 (0x14)
141 #define BHI_IMGTXDB (0x18)
142 #define BHI_TXDB_SEQNUM_BMSK (0x3FFFFFFF)
143 #define BHI_TXDB_SEQNUM_SHFT (0)
144 #define BHI_RSVD2 (0x1C)
145 #define BHI_INTVEC (0x20)
146 #define BHI_RSVD3 (0x24)
147 #define BHI_EXECENV (0x28)
148 #define BHI_STATUS (0x2C)
149 #define BHI_ERRCODE (0x30)
150 #define BHI_ERRDBG1 (0x34)
151 #define BHI_ERRDBG2 (0x38)
152 #define BHI_ERRDBG3 (0x3C)
153 #define BHI_SERIALNU (0x40)
154 #define BHI_SBLANTIROLLVER (0x44)
155 #define BHI_NUMSEG (0x48)
156 #define BHI_MSMHWID(n) (0x4C + (0x4 * n))
157 #define BHI_OEMPKHASH(n) (0x64 + (0x4 * n))
158 #define BHI_RSVD5 (0xC4)
159 #define BHI_STATUS_MASK (0xC0000000)
160 #define BHI_STATUS_SHIFT (30)
161 #define BHI_STATUS_ERROR (3)
162 #define BHI_STATUS_SUCCESS (2)
163 #define BHI_STATUS_RESET (0)
164 
165 /* MHI BHIE offsets */
166 #define BHIE_MSMSOCID_OFFS (0x0000)
167 #define BHIE_TXVECADDR_LOW_OFFS (0x002C)
168 #define BHIE_TXVECADDR_HIGH_OFFS (0x0030)
169 #define BHIE_TXVECSIZE_OFFS (0x0034)
170 #define BHIE_TXVECDB_OFFS (0x003C)
171 #define BHIE_TXVECDB_SEQNUM_BMSK (0x3FFFFFFF)
172 #define BHIE_TXVECDB_SEQNUM_SHFT (0)
173 #define BHIE_TXVECSTATUS_OFFS (0x0044)
174 #define BHIE_TXVECSTATUS_SEQNUM_BMSK (0x3FFFFFFF)
175 #define BHIE_TXVECSTATUS_SEQNUM_SHFT (0)
176 #define BHIE_TXVECSTATUS_STATUS_BMSK (0xC0000000)
177 #define BHIE_TXVECSTATUS_STATUS_SHFT (30)
178 #define BHIE_TXVECSTATUS_STATUS_RESET (0x00)
179 #define BHIE_TXVECSTATUS_STATUS_XFER_COMPL (0x02)
180 #define BHIE_TXVECSTATUS_STATUS_ERROR (0x03)
181 #define BHIE_RXVECADDR_LOW_OFFS (0x0060)
182 #define BHIE_RXVECADDR_HIGH_OFFS (0x0064)
183 #define BHIE_RXVECSIZE_OFFS (0x0068)
184 #define BHIE_RXVECDB_OFFS (0x0070)
185 #define BHIE_RXVECDB_SEQNUM_BMSK (0x3FFFFFFF)
186 #define BHIE_RXVECDB_SEQNUM_SHFT (0)
187 #define BHIE_RXVECSTATUS_OFFS (0x0078)
188 #define BHIE_RXVECSTATUS_SEQNUM_BMSK (0x3FFFFFFF)
189 #define BHIE_RXVECSTATUS_SEQNUM_SHFT (0)
190 #define BHIE_RXVECSTATUS_STATUS_BMSK (0xC0000000)
191 #define BHIE_RXVECSTATUS_STATUS_SHFT (30)
192 #define BHIE_RXVECSTATUS_STATUS_RESET (0x00)
193 #define BHIE_RXVECSTATUS_STATUS_XFER_COMPL (0x02)
194 #define BHIE_RXVECSTATUS_STATUS_ERROR (0x03)
195 
196 #define SOC_HW_VERSION_OFFS (0x224)
197 #define SOC_HW_VERSION_FAM_NUM_BMSK (0xF0000000)
198 #define SOC_HW_VERSION_FAM_NUM_SHFT (28)
199 #define SOC_HW_VERSION_DEV_NUM_BMSK (0x0FFF0000)
200 #define SOC_HW_VERSION_DEV_NUM_SHFT (16)
201 #define SOC_HW_VERSION_MAJOR_VER_BMSK (0x0000FF00)
202 #define SOC_HW_VERSION_MAJOR_VER_SHFT (8)
203 #define SOC_HW_VERSION_MINOR_VER_BMSK (0x000000FF)
204 #define SOC_HW_VERSION_MINOR_VER_SHFT (0)
205 
206 #define EV_CTX_RESERVED_MASK GENMASK(7, 0)
207 #define EV_CTX_INTMODC_MASK GENMASK(15, 8)
208 #define EV_CTX_INTMODC_SHIFT 8
209 #define EV_CTX_INTMODT_MASK GENMASK(31, 16)
210 #define EV_CTX_INTMODT_SHIFT 16
211 struct mhi_event_ctxt {
212 	__u32 intmod;
213 	__u32 ertype;
214 	__u32 msivec;
215 
216 	__u64 rbase __packed __aligned(4);
217 	__u64 rlen __packed __aligned(4);
218 	__u64 rp __packed __aligned(4);
219 	__u64 wp __packed __aligned(4);
220 };
221 
222 #define CHAN_CTX_CHSTATE_MASK GENMASK(7, 0)
223 #define CHAN_CTX_CHSTATE_SHIFT 0
224 #define CHAN_CTX_BRSTMODE_MASK GENMASK(9, 8)
225 #define CHAN_CTX_BRSTMODE_SHIFT 8
226 #define CHAN_CTX_POLLCFG_MASK GENMASK(15, 10)
227 #define CHAN_CTX_POLLCFG_SHIFT 10
228 #define CHAN_CTX_RESERVED_MASK GENMASK(31, 16)
229 struct mhi_chan_ctxt {
230 	__u32 chcfg;
231 	__u32 chtype;
232 	__u32 erindex;
233 
234 	__u64 rbase __packed __aligned(4);
235 	__u64 rlen __packed __aligned(4);
236 	__u64 rp __packed __aligned(4);
237 	__u64 wp __packed __aligned(4);
238 };
239 
240 struct mhi_cmd_ctxt {
241 	__u32 reserved0;
242 	__u32 reserved1;
243 	__u32 reserved2;
244 
245 	__u64 rbase __packed __aligned(4);
246 	__u64 rlen __packed __aligned(4);
247 	__u64 rp __packed __aligned(4);
248 	__u64 wp __packed __aligned(4);
249 };
250 
251 struct mhi_ctxt {
252 	struct mhi_event_ctxt *er_ctxt;
253 	struct mhi_chan_ctxt *chan_ctxt;
254 	struct mhi_cmd_ctxt *cmd_ctxt;
255 	dma_addr_t er_ctxt_addr;
256 	dma_addr_t chan_ctxt_addr;
257 	dma_addr_t cmd_ctxt_addr;
258 };
259 
260 struct mhi_tre {
261 	u64 ptr;
262 	u32 dword[2];
263 };
264 
265 struct bhi_vec_entry {
266 	u64 dma_addr;
267 	u64 size;
268 };
269 
270 enum mhi_cmd_type {
271 	MHI_CMD_NOP = 1,
272 	MHI_CMD_RESET_CHAN = 16,
273 	MHI_CMD_STOP_CHAN = 17,
274 	MHI_CMD_START_CHAN = 18,
275 };
276 
277 /* No operation command */
278 #define MHI_TRE_CMD_NOOP_PTR (0)
279 #define MHI_TRE_CMD_NOOP_DWORD0 (0)
280 #define MHI_TRE_CMD_NOOP_DWORD1 (MHI_CMD_NOP << 16)
281 
282 /* Channel reset command */
283 #define MHI_TRE_CMD_RESET_PTR (0)
284 #define MHI_TRE_CMD_RESET_DWORD0 (0)
285 #define MHI_TRE_CMD_RESET_DWORD1(chid) ((chid << 24) | \
286 					(MHI_CMD_RESET_CHAN << 16))
287 
288 /* Channel stop command */
289 #define MHI_TRE_CMD_STOP_PTR (0)
290 #define MHI_TRE_CMD_STOP_DWORD0 (0)
291 #define MHI_TRE_CMD_STOP_DWORD1(chid) ((chid << 24) | \
292 				       (MHI_CMD_STOP_CHAN << 16))
293 
294 /* Channel start command */
295 #define MHI_TRE_CMD_START_PTR (0)
296 #define MHI_TRE_CMD_START_DWORD0 (0)
297 #define MHI_TRE_CMD_START_DWORD1(chid) ((chid << 24) | \
298 					(MHI_CMD_START_CHAN << 16))
299 
300 #define MHI_TRE_GET_CMD_CHID(tre) (((tre)->dword[1] >> 24) & 0xFF)
301 #define MHI_TRE_GET_CMD_TYPE(tre) (((tre)->dword[1] >> 16) & 0xFF)
302 
303 /* Event descriptor macros */
304 #define MHI_TRE_EV_PTR(ptr) (ptr)
305 #define MHI_TRE_EV_DWORD0(code, len) ((code << 24) | len)
306 #define MHI_TRE_EV_DWORD1(chid, type) ((chid << 24) | (type << 16))
307 #define MHI_TRE_GET_EV_PTR(tre) ((tre)->ptr)
308 #define MHI_TRE_GET_EV_CODE(tre) (((tre)->dword[0] >> 24) & 0xFF)
309 #define MHI_TRE_GET_EV_LEN(tre) ((tre)->dword[0] & 0xFFFF)
310 #define MHI_TRE_GET_EV_CHID(tre) (((tre)->dword[1] >> 24) & 0xFF)
311 #define MHI_TRE_GET_EV_TYPE(tre) (((tre)->dword[1] >> 16) & 0xFF)
312 #define MHI_TRE_GET_EV_STATE(tre) (((tre)->dword[0] >> 24) & 0xFF)
313 #define MHI_TRE_GET_EV_EXECENV(tre) (((tre)->dword[0] >> 24) & 0xFF)
314 #define MHI_TRE_GET_EV_SEQ(tre) ((tre)->dword[0])
315 #define MHI_TRE_GET_EV_TIME(tre) ((tre)->ptr)
316 #define MHI_TRE_GET_EV_COOKIE(tre) lower_32_bits((tre)->ptr)
317 #define MHI_TRE_GET_EV_VEID(tre) (((tre)->dword[0] >> 16) & 0xFF)
318 #define MHI_TRE_GET_EV_LINKSPEED(tre) (((tre)->dword[1] >> 24) & 0xFF)
319 #define MHI_TRE_GET_EV_LINKWIDTH(tre) ((tre)->dword[0] & 0xFF)
320 
321 /* Transfer descriptor macros */
322 #define MHI_TRE_DATA_PTR(ptr) (ptr)
323 #define MHI_TRE_DATA_DWORD0(len) (len & MHI_MAX_MTU)
324 #define MHI_TRE_DATA_DWORD1(bei, ieot, ieob, chain) ((2 << 16) | (bei << 10) \
325 	| (ieot << 9) | (ieob << 8) | chain)
326 
327 /* RSC transfer descriptor macros */
328 #define MHI_RSCTRE_DATA_PTR(ptr, len) (((u64)len << 48) | ptr)
329 #define MHI_RSCTRE_DATA_DWORD0(cookie) (cookie)
330 #define MHI_RSCTRE_DATA_DWORD1 (MHI_PKT_TYPE_COALESCING << 16)
331 
332 enum mhi_pkt_type {
333 	MHI_PKT_TYPE_INVALID = 0x0,
334 	MHI_PKT_TYPE_NOOP_CMD = 0x1,
335 	MHI_PKT_TYPE_TRANSFER = 0x2,
336 	MHI_PKT_TYPE_COALESCING = 0x8,
337 	MHI_PKT_TYPE_RESET_CHAN_CMD = 0x10,
338 	MHI_PKT_TYPE_STOP_CHAN_CMD = 0x11,
339 	MHI_PKT_TYPE_START_CHAN_CMD = 0x12,
340 	MHI_PKT_TYPE_STATE_CHANGE_EVENT = 0x20,
341 	MHI_PKT_TYPE_CMD_COMPLETION_EVENT = 0x21,
342 	MHI_PKT_TYPE_TX_EVENT = 0x22,
343 	MHI_PKT_TYPE_RSC_TX_EVENT = 0x28,
344 	MHI_PKT_TYPE_EE_EVENT = 0x40,
345 	MHI_PKT_TYPE_TSYNC_EVENT = 0x48,
346 	MHI_PKT_TYPE_BW_REQ_EVENT = 0x50,
347 	MHI_PKT_TYPE_STALE_EVENT, /* internal event */
348 };
349 
350 /* MHI transfer completion events */
351 enum mhi_ev_ccs {
352 	MHI_EV_CC_INVALID = 0x0,
353 	MHI_EV_CC_SUCCESS = 0x1,
354 	MHI_EV_CC_EOT = 0x2, /* End of transfer event */
355 	MHI_EV_CC_OVERFLOW = 0x3,
356 	MHI_EV_CC_EOB = 0x4, /* End of block event */
357 	MHI_EV_CC_OOB = 0x5, /* Out of block event */
358 	MHI_EV_CC_DB_MODE = 0x6,
359 	MHI_EV_CC_UNDEFINED_ERR = 0x10,
360 	MHI_EV_CC_BAD_TRE = 0x11,
361 };
362 
363 enum mhi_ch_state {
364 	MHI_CH_STATE_DISABLED = 0x0,
365 	MHI_CH_STATE_ENABLED = 0x1,
366 	MHI_CH_STATE_RUNNING = 0x2,
367 	MHI_CH_STATE_SUSPENDED = 0x3,
368 	MHI_CH_STATE_STOP = 0x4,
369 	MHI_CH_STATE_ERROR = 0x5,
370 };
371 
372 #define MHI_INVALID_BRSTMODE(mode) (mode != MHI_DB_BRST_DISABLE && \
373 				    mode != MHI_DB_BRST_ENABLE)
374 
375 extern const char * const mhi_ee_str[MHI_EE_MAX];
376 #define TO_MHI_EXEC_STR(ee) (((ee) >= MHI_EE_MAX) ? \
377 			     "INVALID_EE" : mhi_ee_str[ee])
378 
379 #define MHI_IN_PBL(ee) (ee == MHI_EE_PBL || ee == MHI_EE_PTHRU || \
380 			ee == MHI_EE_EDL)
381 
382 #define MHI_IN_MISSION_MODE(ee) (ee == MHI_EE_AMSS || ee == MHI_EE_WFW)
383 
384 enum dev_st_transition {
385 	DEV_ST_TRANSITION_PBL,
386 	DEV_ST_TRANSITION_READY,
387 	DEV_ST_TRANSITION_SBL,
388 	DEV_ST_TRANSITION_MISSION_MODE,
389 	DEV_ST_TRANSITION_SYS_ERR,
390 	DEV_ST_TRANSITION_DISABLE,
391 	DEV_ST_TRANSITION_MAX,
392 };
393 
394 extern const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX];
395 #define TO_DEV_STATE_TRANS_STR(state) (((state) >= DEV_ST_TRANSITION_MAX) ? \
396 				"INVALID_STATE" : dev_state_tran_str[state])
397 
398 extern const char * const mhi_state_str[MHI_STATE_MAX];
399 #define TO_MHI_STATE_STR(state) ((state >= MHI_STATE_MAX || \
400 				  !mhi_state_str[state]) ? \
401 				"INVALID_STATE" : mhi_state_str[state])
402 
403 /* internal power states */
404 enum mhi_pm_state {
405 	MHI_PM_STATE_DISABLE,
406 	MHI_PM_STATE_POR,
407 	MHI_PM_STATE_M0,
408 	MHI_PM_STATE_M2,
409 	MHI_PM_STATE_M3_ENTER,
410 	MHI_PM_STATE_M3,
411 	MHI_PM_STATE_M3_EXIT,
412 	MHI_PM_STATE_FW_DL_ERR,
413 	MHI_PM_STATE_SYS_ERR_DETECT,
414 	MHI_PM_STATE_SYS_ERR_PROCESS,
415 	MHI_PM_STATE_SHUTDOWN_PROCESS,
416 	MHI_PM_STATE_LD_ERR_FATAL_DETECT,
417 	MHI_PM_STATE_MAX
418 };
419 
420 #define MHI_PM_DISABLE			BIT(0)
421 #define MHI_PM_POR			BIT(1)
422 #define MHI_PM_M0			BIT(2)
423 #define MHI_PM_M2			BIT(3)
424 #define MHI_PM_M3_ENTER			BIT(4)
425 #define MHI_PM_M3			BIT(5)
426 #define MHI_PM_M3_EXIT			BIT(6)
427 /* firmware download failure state */
428 #define MHI_PM_FW_DL_ERR		BIT(7)
429 #define MHI_PM_SYS_ERR_DETECT		BIT(8)
430 #define MHI_PM_SYS_ERR_PROCESS		BIT(9)
431 #define MHI_PM_SHUTDOWN_PROCESS		BIT(10)
432 /* link not accessible */
433 #define MHI_PM_LD_ERR_FATAL_DETECT	BIT(11)
434 
435 #define MHI_REG_ACCESS_VALID(pm_state) ((pm_state & (MHI_PM_POR | MHI_PM_M0 | \
436 		MHI_PM_M2 | MHI_PM_M3_ENTER | MHI_PM_M3_EXIT | \
437 		MHI_PM_SYS_ERR_DETECT | MHI_PM_SYS_ERR_PROCESS | \
438 		MHI_PM_SHUTDOWN_PROCESS | MHI_PM_FW_DL_ERR)))
439 #define MHI_PM_IN_ERROR_STATE(pm_state) (pm_state >= MHI_PM_FW_DL_ERR)
440 #define MHI_PM_IN_FATAL_STATE(pm_state) (pm_state == MHI_PM_LD_ERR_FATAL_DETECT)
441 #define MHI_DB_ACCESS_VALID(mhi_cntrl) (mhi_cntrl->pm_state & \
442 					mhi_cntrl->db_access)
443 #define MHI_WAKE_DB_CLEAR_VALID(pm_state) (pm_state & (MHI_PM_M0 | \
444 						MHI_PM_M2 | MHI_PM_M3_EXIT))
445 #define MHI_WAKE_DB_SET_VALID(pm_state) (pm_state & MHI_PM_M2)
446 #define MHI_WAKE_DB_FORCE_SET_VALID(pm_state) MHI_WAKE_DB_CLEAR_VALID(pm_state)
447 #define MHI_EVENT_ACCESS_INVALID(pm_state) (pm_state == MHI_PM_DISABLE || \
448 					    MHI_PM_IN_ERROR_STATE(pm_state))
449 #define MHI_PM_IN_SUSPEND_STATE(pm_state) (pm_state & \
450 					   (MHI_PM_M3_ENTER | MHI_PM_M3))
451 
452 #define NR_OF_CMD_RINGS			1
453 #define CMD_EL_PER_RING			128
454 #define PRIMARY_CMD_RING		0
455 #define MHI_DEV_WAKE_DB			127
456 #define MHI_MAX_MTU			0xffff
457 #define MHI_RANDOM_U32_NONZERO(bmsk)	(prandom_u32_max(bmsk) + 1)
458 
459 enum mhi_er_type {
460 	MHI_ER_TYPE_INVALID = 0x0,
461 	MHI_ER_TYPE_VALID = 0x1,
462 };
463 
464 struct db_cfg {
465 	bool reset_req;
466 	bool db_mode;
467 	u32 pollcfg;
468 	enum mhi_db_brst_mode brstmode;
469 	dma_addr_t db_val;
470 	void (*process_db)(struct mhi_controller *mhi_cntrl,
471 			   struct db_cfg *db_cfg, void __iomem *io_addr,
472 			   dma_addr_t db_val);
473 };
474 
475 struct mhi_pm_transitions {
476 	enum mhi_pm_state from_state;
477 	u32 to_states;
478 };
479 
480 struct state_transition {
481 	struct list_head node;
482 	enum dev_st_transition state;
483 };
484 
485 struct mhi_ring {
486 	dma_addr_t dma_handle;
487 	dma_addr_t iommu_base;
488 	u64 *ctxt_wp; /* point to ctxt wp */
489 	void *pre_aligned;
490 	void *base;
491 	void *rp;
492 	void *wp;
493 	size_t el_size;
494 	size_t len;
495 	size_t elements;
496 	size_t alloc_size;
497 	void __iomem *db_addr;
498 };
499 
500 struct mhi_cmd {
501 	struct mhi_ring ring;
502 	spinlock_t lock;
503 };
504 
505 struct mhi_buf_info {
506 	void *v_addr;
507 	void *bb_addr;
508 	void *wp;
509 	void *cb_buf;
510 	dma_addr_t p_addr;
511 	size_t len;
512 	enum dma_data_direction dir;
513 	bool used; /* Indicates whether the buffer is used or not */
514 	bool pre_mapped; /* Already pre-mapped by client */
515 };
516 
517 struct mhi_event {
518 	struct mhi_controller *mhi_cntrl;
519 	struct mhi_chan *mhi_chan; /* dedicated to channel */
520 	u32 er_index;
521 	u32 intmod;
522 	u32 irq;
523 	int chan; /* this event ring is dedicated to a channel (optional) */
524 	u32 priority;
525 	enum mhi_er_data_type data_type;
526 	struct mhi_ring ring;
527 	struct db_cfg db_cfg;
528 	struct tasklet_struct task;
529 	spinlock_t lock;
530 	int (*process_event)(struct mhi_controller *mhi_cntrl,
531 			     struct mhi_event *mhi_event,
532 			     u32 event_quota);
533 	bool hw_ring;
534 	bool cl_manage;
535 	bool offload_ev; /* managed by a device driver */
536 };
537 
538 struct mhi_chan {
539 	const char *name;
540 	/*
541 	 * Important: When consuming, increment tre_ring first and when
542 	 * releasing, decrement buf_ring first. If tre_ring has space, buf_ring
543 	 * is guranteed to have space so we do not need to check both rings.
544 	 */
545 	struct mhi_ring buf_ring;
546 	struct mhi_ring tre_ring;
547 	u32 chan;
548 	u32 er_index;
549 	u32 intmod;
550 	enum mhi_ch_type type;
551 	enum dma_data_direction dir;
552 	struct db_cfg db_cfg;
553 	enum mhi_ch_ee_mask ee_mask;
554 	enum mhi_ch_state ch_state;
555 	enum mhi_ev_ccs ccs;
556 	struct mhi_device *mhi_dev;
557 	void (*xfer_cb)(struct mhi_device *mhi_dev, struct mhi_result *result);
558 	struct mutex mutex;
559 	struct completion completion;
560 	rwlock_t lock;
561 	struct list_head node;
562 	bool lpm_notify;
563 	bool configured;
564 	bool offload_ch;
565 	bool pre_alloc;
566 	bool auto_start;
567 	bool wake_capable;
568 };
569 
570 /* Default MHI timeout */
571 #define MHI_TIMEOUT_MS (1000)
572 
573 /* debugfs related functions */
574 #ifdef CONFIG_MHI_BUS_DEBUG
575 void mhi_create_debugfs(struct mhi_controller *mhi_cntrl);
576 void mhi_destroy_debugfs(struct mhi_controller *mhi_cntrl);
577 void mhi_debugfs_init(void);
578 void mhi_debugfs_exit(void);
579 #else
mhi_create_debugfs(struct mhi_controller * mhi_cntrl)580 static inline void mhi_create_debugfs(struct mhi_controller *mhi_cntrl)
581 {
582 }
583 
mhi_destroy_debugfs(struct mhi_controller * mhi_cntrl)584 static inline void mhi_destroy_debugfs(struct mhi_controller *mhi_cntrl)
585 {
586 }
587 
mhi_debugfs_init(void)588 static inline void mhi_debugfs_init(void)
589 {
590 }
591 
mhi_debugfs_exit(void)592 static inline void mhi_debugfs_exit(void)
593 {
594 }
595 #endif
596 
597 struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl);
598 
599 int mhi_destroy_device(struct device *dev, void *data);
600 void mhi_create_devices(struct mhi_controller *mhi_cntrl);
601 
602 int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl,
603 			 struct image_info **image_info, size_t alloc_size);
604 void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl,
605 			 struct image_info *image_info);
606 
607 /* Power management APIs */
608 enum mhi_pm_state __must_check mhi_tryset_pm_state(
609 					struct mhi_controller *mhi_cntrl,
610 					enum mhi_pm_state state);
611 const char *to_mhi_pm_state_str(enum mhi_pm_state state);
612 enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl);
613 int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl,
614 			       enum dev_st_transition state);
615 void mhi_pm_st_worker(struct work_struct *work);
616 void mhi_pm_sys_err_handler(struct mhi_controller *mhi_cntrl);
617 void mhi_fw_load_worker(struct work_struct *work);
618 int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl);
619 int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl);
620 void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl);
621 int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl);
622 int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl);
623 int mhi_send_cmd(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
624 		 enum mhi_cmd_type cmd);
mhi_is_active(struct mhi_controller * mhi_cntrl)625 static inline bool mhi_is_active(struct mhi_controller *mhi_cntrl)
626 {
627 	return (mhi_cntrl->dev_state >= MHI_STATE_M0 &&
628 		mhi_cntrl->dev_state <= MHI_STATE_M3_FAST);
629 }
630 
mhi_trigger_resume(struct mhi_controller * mhi_cntrl)631 static inline void mhi_trigger_resume(struct mhi_controller *mhi_cntrl)
632 {
633 	pm_wakeup_event(&mhi_cntrl->mhi_dev->dev, 0);
634 	mhi_cntrl->runtime_get(mhi_cntrl);
635 	mhi_cntrl->runtime_put(mhi_cntrl);
636 }
637 
638 /* Register access methods */
639 void mhi_db_brstmode(struct mhi_controller *mhi_cntrl, struct db_cfg *db_cfg,
640 		     void __iomem *db_addr, dma_addr_t db_val);
641 void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl,
642 			     struct db_cfg *db_mode, void __iomem *db_addr,
643 			     dma_addr_t db_val);
644 int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl,
645 			      void __iomem *base, u32 offset, u32 *out);
646 int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl,
647 				    void __iomem *base, u32 offset, u32 mask,
648 				    u32 shift, u32 *out);
649 void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base,
650 		   u32 offset, u32 val);
651 void mhi_write_reg_field(struct mhi_controller *mhi_cntrl, void __iomem *base,
652 			 u32 offset, u32 mask, u32 shift, u32 val);
653 void mhi_ring_er_db(struct mhi_event *mhi_event);
654 void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr,
655 		  dma_addr_t db_val);
656 void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd);
657 void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl,
658 		      struct mhi_chan *mhi_chan);
659 
660 /* Initialization methods */
661 int mhi_init_mmio(struct mhi_controller *mhi_cntrl);
662 int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl);
663 void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl);
664 int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl);
665 void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl);
666 void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
667 		      struct image_info *img_info);
668 void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl);
669 int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
670 			struct mhi_chan *mhi_chan);
671 int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
672 		       struct mhi_chan *mhi_chan);
673 void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
674 			  struct mhi_chan *mhi_chan);
675 void mhi_reset_chan(struct mhi_controller *mhi_cntrl,
676 		    struct mhi_chan *mhi_chan);
677 
678 /* Memory allocation methods */
mhi_alloc_coherent(struct mhi_controller * mhi_cntrl,size_t size,dma_addr_t * dma_handle,gfp_t gfp)679 static inline void *mhi_alloc_coherent(struct mhi_controller *mhi_cntrl,
680 				       size_t size,
681 				       dma_addr_t *dma_handle,
682 				       gfp_t gfp)
683 {
684 	void *buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, size, dma_handle,
685 				       gfp);
686 
687 	return buf;
688 }
689 
mhi_free_coherent(struct mhi_controller * mhi_cntrl,size_t size,void * vaddr,dma_addr_t dma_handle)690 static inline void mhi_free_coherent(struct mhi_controller *mhi_cntrl,
691 				     size_t size,
692 				     void *vaddr,
693 				     dma_addr_t dma_handle)
694 {
695 	dma_free_coherent(mhi_cntrl->cntrl_dev, size, vaddr, dma_handle);
696 }
697 
698 /* Event processing methods */
699 void mhi_ctrl_ev_task(unsigned long data);
700 void mhi_ev_task(unsigned long data);
701 int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
702 				struct mhi_event *mhi_event, u32 event_quota);
703 int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
704 			     struct mhi_event *mhi_event, u32 event_quota);
705 
706 /* ISR handlers */
707 irqreturn_t mhi_irq_handler(int irq_number, void *dev);
708 irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *dev);
709 irqreturn_t mhi_intvec_handler(int irq_number, void *dev);
710 
711 int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
712 		struct mhi_buf_info *info, enum mhi_flags flags);
713 int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl,
714 			 struct mhi_buf_info *buf_info);
715 int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl,
716 			  struct mhi_buf_info *buf_info);
717 void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl,
718 			    struct mhi_buf_info *buf_info);
719 void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl,
720 			     struct mhi_buf_info *buf_info);
721 
722 #endif /* _MHI_INT_H */
723