1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * NVIDIA Tegra XUSB device mode controller
4 *
5 * Copyright (c) 2013-2019, NVIDIA CORPORATION. All rights reserved.
6 * Copyright (c) 2015, Google Inc.
7 */
8
9 #include <linux/clk.h>
10 #include <linux/completion.h>
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/dmapool.h>
14 #include <linux/interrupt.h>
15 #include <linux/iopoll.h>
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/of.h>
19 #include <linux/of_device.h>
20 #include <linux/phy/phy.h>
21 #include <linux/phy/tegra/xusb.h>
22 #include <linux/pm_domain.h>
23 #include <linux/platform_device.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/regulator/consumer.h>
26 #include <linux/reset.h>
27 #include <linux/usb/ch9.h>
28 #include <linux/usb/gadget.h>
29 #include <linux/usb/otg.h>
30 #include <linux/usb/role.h>
31 #include <linux/usb/phy.h>
32 #include <linux/workqueue.h>
33
34 /* XUSB_DEV registers */
35 #define DB 0x004
36 #define DB_TARGET_MASK GENMASK(15, 8)
37 #define DB_TARGET(x) (((x) << 8) & DB_TARGET_MASK)
38 #define DB_STREAMID_MASK GENMASK(31, 16)
39 #define DB_STREAMID(x) (((x) << 16) & DB_STREAMID_MASK)
40 #define ERSTSZ 0x008
41 #define ERSTSZ_ERSTXSZ_SHIFT(x) ((x) * 16)
42 #define ERSTSZ_ERSTXSZ_MASK GENMASK(15, 0)
43 #define ERSTXBALO(x) (0x010 + 8 * (x))
44 #define ERSTXBAHI(x) (0x014 + 8 * (x))
45 #define ERDPLO 0x020
46 #define ERDPLO_EHB BIT(3)
47 #define ERDPHI 0x024
48 #define EREPLO 0x028
49 #define EREPLO_ECS BIT(0)
50 #define EREPLO_SEGI BIT(1)
51 #define EREPHI 0x02c
52 #define CTRL 0x030
53 #define CTRL_RUN BIT(0)
54 #define CTRL_LSE BIT(1)
55 #define CTRL_IE BIT(4)
56 #define CTRL_SMI_EVT BIT(5)
57 #define CTRL_SMI_DSE BIT(6)
58 #define CTRL_EWE BIT(7)
59 #define CTRL_DEVADDR_MASK GENMASK(30, 24)
60 #define CTRL_DEVADDR(x) (((x) << 24) & CTRL_DEVADDR_MASK)
61 #define CTRL_ENABLE BIT(31)
62 #define ST 0x034
63 #define ST_RC BIT(0)
64 #define ST_IP BIT(4)
65 #define RT_IMOD 0x038
66 #define RT_IMOD_IMODI_MASK GENMASK(15, 0)
67 #define RT_IMOD_IMODI(x) ((x) & RT_IMOD_IMODI_MASK)
68 #define RT_IMOD_IMODC_MASK GENMASK(31, 16)
69 #define RT_IMOD_IMODC(x) (((x) << 16) & RT_IMOD_IMODC_MASK)
70 #define PORTSC 0x03c
71 #define PORTSC_CCS BIT(0)
72 #define PORTSC_PED BIT(1)
73 #define PORTSC_PR BIT(4)
74 #define PORTSC_PLS_SHIFT 5
75 #define PORTSC_PLS_MASK GENMASK(8, 5)
76 #define PORTSC_PLS_U0 0x0
77 #define PORTSC_PLS_U2 0x2
78 #define PORTSC_PLS_U3 0x3
79 #define PORTSC_PLS_DISABLED 0x4
80 #define PORTSC_PLS_RXDETECT 0x5
81 #define PORTSC_PLS_INACTIVE 0x6
82 #define PORTSC_PLS_RESUME 0xf
83 #define PORTSC_PLS(x) (((x) << PORTSC_PLS_SHIFT) & PORTSC_PLS_MASK)
84 #define PORTSC_PS_SHIFT 10
85 #define PORTSC_PS_MASK GENMASK(13, 10)
86 #define PORTSC_PS_UNDEFINED 0x0
87 #define PORTSC_PS_FS 0x1
88 #define PORTSC_PS_LS 0x2
89 #define PORTSC_PS_HS 0x3
90 #define PORTSC_PS_SS 0x4
91 #define PORTSC_LWS BIT(16)
92 #define PORTSC_CSC BIT(17)
93 #define PORTSC_WRC BIT(19)
94 #define PORTSC_PRC BIT(21)
95 #define PORTSC_PLC BIT(22)
96 #define PORTSC_CEC BIT(23)
97 #define PORTSC_WPR BIT(30)
98 #define PORTSC_CHANGE_MASK (PORTSC_CSC | PORTSC_WRC | PORTSC_PRC | \
99 PORTSC_PLC | PORTSC_CEC)
100 #define ECPLO 0x040
101 #define ECPHI 0x044
102 #define MFINDEX 0x048
103 #define MFINDEX_FRAME_SHIFT 3
104 #define MFINDEX_FRAME_MASK GENMASK(13, 3)
105 #define PORTPM 0x04c
106 #define PORTPM_L1S_MASK GENMASK(1, 0)
107 #define PORTPM_L1S_DROP 0x0
108 #define PORTPM_L1S_ACCEPT 0x1
109 #define PORTPM_L1S_NYET 0x2
110 #define PORTPM_L1S_STALL 0x3
111 #define PORTPM_L1S(x) ((x) & PORTPM_L1S_MASK)
112 #define PORTPM_RWE BIT(3)
113 #define PORTPM_U2TIMEOUT_MASK GENMASK(15, 8)
114 #define PORTPM_U1TIMEOUT_MASK GENMASK(23, 16)
115 #define PORTPM_FLA BIT(24)
116 #define PORTPM_VBA BIT(25)
117 #define PORTPM_WOC BIT(26)
118 #define PORTPM_WOD BIT(27)
119 #define PORTPM_U1E BIT(28)
120 #define PORTPM_U2E BIT(29)
121 #define PORTPM_FRWE BIT(30)
122 #define PORTPM_PNG_CYA BIT(31)
123 #define EP_HALT 0x050
124 #define EP_PAUSE 0x054
125 #define EP_RELOAD 0x058
126 #define EP_STCHG 0x05c
127 #define DEVNOTIF_LO 0x064
128 #define DEVNOTIF_LO_TRIG BIT(0)
129 #define DEVNOTIF_LO_TYPE_MASK GENMASK(7, 4)
130 #define DEVNOTIF_LO_TYPE(x) (((x) << 4) & DEVNOTIF_LO_TYPE_MASK)
131 #define DEVNOTIF_LO_TYPE_FUNCTION_WAKE 0x1
132 #define DEVNOTIF_HI 0x068
133 #define PORTHALT 0x06c
134 #define PORTHALT_HALT_LTSSM BIT(0)
135 #define PORTHALT_HALT_REJECT BIT(1)
136 #define PORTHALT_STCHG_REQ BIT(20)
137 #define PORTHALT_STCHG_INTR_EN BIT(24)
138 #define PORT_TM 0x070
139 #define EP_THREAD_ACTIVE 0x074
140 #define EP_STOPPED 0x078
141 #define HSFSPI_COUNT0 0x100
142 #define HSFSPI_COUNT13 0x134
143 #define HSFSPI_COUNT13_U2_RESUME_K_DURATION_MASK GENMASK(29, 0)
144 #define HSFSPI_COUNT13_U2_RESUME_K_DURATION(x) ((x) & \
145 HSFSPI_COUNT13_U2_RESUME_K_DURATION_MASK)
146 #define BLCG 0x840
147 #define SSPX_CORE_CNT0 0x610
148 #define SSPX_CORE_CNT0_PING_TBURST_MASK GENMASK(7, 0)
149 #define SSPX_CORE_CNT0_PING_TBURST(x) ((x) & SSPX_CORE_CNT0_PING_TBURST_MASK)
150 #define SSPX_CORE_CNT30 0x688
151 #define SSPX_CORE_CNT30_LMPITP_TIMER_MASK GENMASK(19, 0)
152 #define SSPX_CORE_CNT30_LMPITP_TIMER(x) ((x) & \
153 SSPX_CORE_CNT30_LMPITP_TIMER_MASK)
154 #define SSPX_CORE_CNT32 0x690
155 #define SSPX_CORE_CNT32_POLL_TBURST_MAX_MASK GENMASK(7, 0)
156 #define SSPX_CORE_CNT32_POLL_TBURST_MAX(x) ((x) & \
157 SSPX_CORE_CNT32_POLL_TBURST_MAX_MASK)
158 #define SSPX_CORE_CNT56 0x6fc
159 #define SSPX_CORE_CNT56_SCD_BIT0_TRPT_MAX_MASK GENMASK(19, 0)
160 #define SSPX_CORE_CNT56_SCD_BIT0_TRPT_MAX(x) ((x) & \
161 SSPX_CORE_CNT56_SCD_BIT0_TRPT_MAX_MASK)
162 #define SSPX_CORE_CNT57 0x700
163 #define SSPX_CORE_CNT57_SCD_BIT1_TRPT_MAX_MASK GENMASK(19, 0)
164 #define SSPX_CORE_CNT57_SCD_BIT1_TRPT_MAX(x) ((x) & \
165 SSPX_CORE_CNT57_SCD_BIT1_TRPT_MAX_MASK)
166 #define SSPX_CORE_CNT65 0x720
167 #define SSPX_CORE_CNT65_TX_SCD_END_TRPT_MID_MASK GENMASK(19, 0)
168 #define SSPX_CORE_CNT65_TX_SCD_END_TRPT_MID(x) ((x) & \
169 SSPX_CORE_CNT65_TX_SCD_END_TRPT_MID_MASK)
170 #define SSPX_CORE_CNT66 0x724
171 #define SSPX_CORE_CNT66_TX_SCD_BIT0_TRPT_MID_MASK GENMASK(19, 0)
172 #define SSPX_CORE_CNT66_TX_SCD_BIT0_TRPT_MID(x) ((x) & \
173 SSPX_CORE_CNT66_TX_SCD_BIT0_TRPT_MID_MASK)
174 #define SSPX_CORE_CNT67 0x728
175 #define SSPX_CORE_CNT67_TX_SCD_BIT1_TRPT_MID_MASK GENMASK(19, 0)
176 #define SSPX_CORE_CNT67_TX_SCD_BIT1_TRPT_MID(x) ((x) & \
177 SSPX_CORE_CNT67_TX_SCD_BIT1_TRPT_MID_MASK)
178 #define SSPX_CORE_CNT72 0x73c
179 #define SSPX_CORE_CNT72_SCD_LFPS_TIMEOUT_MASK GENMASK(19, 0)
180 #define SSPX_CORE_CNT72_SCD_LFPS_TIMEOUT(x) ((x) & \
181 SSPX_CORE_CNT72_SCD_LFPS_TIMEOUT_MASK)
182 #define SSPX_CORE_PADCTL4 0x750
183 #define SSPX_CORE_PADCTL4_RXDAT_VLD_TIMEOUT_U3_MASK GENMASK(19, 0)
184 #define SSPX_CORE_PADCTL4_RXDAT_VLD_TIMEOUT_U3(x) ((x) & \
185 SSPX_CORE_PADCTL4_RXDAT_VLD_TIMEOUT_U3_MASK)
186 #define BLCG_DFPCI BIT(0)
187 #define BLCG_UFPCI BIT(1)
188 #define BLCG_FE BIT(2)
189 #define BLCG_COREPLL_PWRDN BIT(8)
190 #define BLCG_IOPLL_0_PWRDN BIT(9)
191 #define BLCG_IOPLL_1_PWRDN BIT(10)
192 #define BLCG_IOPLL_2_PWRDN BIT(11)
193 #define BLCG_ALL 0x1ff
194 #define CFG_DEV_SSPI_XFER 0x858
195 #define CFG_DEV_SSPI_XFER_ACKTIMEOUT_MASK GENMASK(31, 0)
196 #define CFG_DEV_SSPI_XFER_ACKTIMEOUT(x) ((x) & \
197 CFG_DEV_SSPI_XFER_ACKTIMEOUT_MASK)
198 #define CFG_DEV_FE 0x85c
199 #define CFG_DEV_FE_PORTREGSEL_MASK GENMASK(1, 0)
200 #define CFG_DEV_FE_PORTREGSEL_SS_PI 1
201 #define CFG_DEV_FE_PORTREGSEL_HSFS_PI 2
202 #define CFG_DEV_FE_PORTREGSEL(x) ((x) & CFG_DEV_FE_PORTREGSEL_MASK)
203 #define CFG_DEV_FE_INFINITE_SS_RETRY BIT(29)
204
205 /* FPCI registers */
206 #define XUSB_DEV_CFG_1 0x004
207 #define XUSB_DEV_CFG_1_IO_SPACE_EN BIT(0)
208 #define XUSB_DEV_CFG_1_MEMORY_SPACE_EN BIT(1)
209 #define XUSB_DEV_CFG_1_BUS_MASTER_EN BIT(2)
210 #define XUSB_DEV_CFG_4 0x010
211 #define XUSB_DEV_CFG_4_BASE_ADDR_MASK GENMASK(31, 15)
212 #define XUSB_DEV_CFG_5 0x014
213
214 /* IPFS registers */
215 #define XUSB_DEV_CONFIGURATION_0 0x180
216 #define XUSB_DEV_CONFIGURATION_0_EN_FPCI BIT(0)
217 #define XUSB_DEV_INTR_MASK_0 0x188
218 #define XUSB_DEV_INTR_MASK_0_IP_INT_MASK BIT(16)
219
220 struct tegra_xudc_ep_context {
221 __le32 info0;
222 __le32 info1;
223 __le32 deq_lo;
224 __le32 deq_hi;
225 __le32 tx_info;
226 __le32 rsvd[11];
227 };
228
229 #define EP_STATE_DISABLED 0
230 #define EP_STATE_RUNNING 1
231 #define EP_STATE_HALTED 2
232 #define EP_STATE_STOPPED 3
233 #define EP_STATE_ERROR 4
234
235 #define EP_TYPE_INVALID 0
236 #define EP_TYPE_ISOCH_OUT 1
237 #define EP_TYPE_BULK_OUT 2
238 #define EP_TYPE_INTERRUPT_OUT 3
239 #define EP_TYPE_CONTROL 4
240 #define EP_TYPE_ISCOH_IN 5
241 #define EP_TYPE_BULK_IN 6
242 #define EP_TYPE_INTERRUPT_IN 7
243
244 #define BUILD_EP_CONTEXT_RW(name, member, shift, mask) \
245 static inline u32 ep_ctx_read_##name(struct tegra_xudc_ep_context *ctx) \
246 { \
247 return (le32_to_cpu(ctx->member) >> (shift)) & (mask); \
248 } \
249 static inline void \
250 ep_ctx_write_##name(struct tegra_xudc_ep_context *ctx, u32 val) \
251 { \
252 u32 tmp; \
253 \
254 tmp = le32_to_cpu(ctx->member) & ~((mask) << (shift)); \
255 tmp |= (val & (mask)) << (shift); \
256 ctx->member = cpu_to_le32(tmp); \
257 }
258
259 BUILD_EP_CONTEXT_RW(state, info0, 0, 0x7)
260 BUILD_EP_CONTEXT_RW(mult, info0, 8, 0x3)
261 BUILD_EP_CONTEXT_RW(max_pstreams, info0, 10, 0x1f)
262 BUILD_EP_CONTEXT_RW(lsa, info0, 15, 0x1)
263 BUILD_EP_CONTEXT_RW(interval, info0, 16, 0xff)
264 BUILD_EP_CONTEXT_RW(cerr, info1, 1, 0x3)
265 BUILD_EP_CONTEXT_RW(type, info1, 3, 0x7)
266 BUILD_EP_CONTEXT_RW(hid, info1, 7, 0x1)
267 BUILD_EP_CONTEXT_RW(max_burst_size, info1, 8, 0xff)
268 BUILD_EP_CONTEXT_RW(max_packet_size, info1, 16, 0xffff)
269 BUILD_EP_CONTEXT_RW(dcs, deq_lo, 0, 0x1)
270 BUILD_EP_CONTEXT_RW(deq_lo, deq_lo, 4, 0xfffffff)
271 BUILD_EP_CONTEXT_RW(deq_hi, deq_hi, 0, 0xffffffff)
272 BUILD_EP_CONTEXT_RW(avg_trb_len, tx_info, 0, 0xffff)
273 BUILD_EP_CONTEXT_RW(max_esit_payload, tx_info, 16, 0xffff)
274 BUILD_EP_CONTEXT_RW(edtla, rsvd[0], 0, 0xffffff)
275 BUILD_EP_CONTEXT_RW(rsvd, rsvd[0], 24, 0x1)
276 BUILD_EP_CONTEXT_RW(partial_td, rsvd[0], 25, 0x1)
277 BUILD_EP_CONTEXT_RW(splitxstate, rsvd[0], 26, 0x1)
278 BUILD_EP_CONTEXT_RW(seq_num, rsvd[0], 27, 0x1f)
279 BUILD_EP_CONTEXT_RW(cerrcnt, rsvd[1], 18, 0x3)
280 BUILD_EP_CONTEXT_RW(data_offset, rsvd[2], 0, 0x1ffff)
281 BUILD_EP_CONTEXT_RW(numtrbs, rsvd[2], 22, 0x1f)
282 BUILD_EP_CONTEXT_RW(devaddr, rsvd[6], 0, 0x7f)
283
ep_ctx_read_deq_ptr(struct tegra_xudc_ep_context * ctx)284 static inline u64 ep_ctx_read_deq_ptr(struct tegra_xudc_ep_context *ctx)
285 {
286 return ((u64)ep_ctx_read_deq_hi(ctx) << 32) |
287 (ep_ctx_read_deq_lo(ctx) << 4);
288 }
289
290 static inline void
ep_ctx_write_deq_ptr(struct tegra_xudc_ep_context * ctx,u64 addr)291 ep_ctx_write_deq_ptr(struct tegra_xudc_ep_context *ctx, u64 addr)
292 {
293 ep_ctx_write_deq_lo(ctx, lower_32_bits(addr) >> 4);
294 ep_ctx_write_deq_hi(ctx, upper_32_bits(addr));
295 }
296
297 struct tegra_xudc_trb {
298 __le32 data_lo;
299 __le32 data_hi;
300 __le32 status;
301 __le32 control;
302 };
303
304 #define TRB_TYPE_RSVD 0
305 #define TRB_TYPE_NORMAL 1
306 #define TRB_TYPE_SETUP_STAGE 2
307 #define TRB_TYPE_DATA_STAGE 3
308 #define TRB_TYPE_STATUS_STAGE 4
309 #define TRB_TYPE_ISOCH 5
310 #define TRB_TYPE_LINK 6
311 #define TRB_TYPE_TRANSFER_EVENT 32
312 #define TRB_TYPE_PORT_STATUS_CHANGE_EVENT 34
313 #define TRB_TYPE_STREAM 48
314 #define TRB_TYPE_SETUP_PACKET_EVENT 63
315
316 #define TRB_CMPL_CODE_INVALID 0
317 #define TRB_CMPL_CODE_SUCCESS 1
318 #define TRB_CMPL_CODE_DATA_BUFFER_ERR 2
319 #define TRB_CMPL_CODE_BABBLE_DETECTED_ERR 3
320 #define TRB_CMPL_CODE_USB_TRANS_ERR 4
321 #define TRB_CMPL_CODE_TRB_ERR 5
322 #define TRB_CMPL_CODE_STALL 6
323 #define TRB_CMPL_CODE_INVALID_STREAM_TYPE_ERR 10
324 #define TRB_CMPL_CODE_SHORT_PACKET 13
325 #define TRB_CMPL_CODE_RING_UNDERRUN 14
326 #define TRB_CMPL_CODE_RING_OVERRUN 15
327 #define TRB_CMPL_CODE_EVENT_RING_FULL_ERR 21
328 #define TRB_CMPL_CODE_STOPPED 26
329 #define TRB_CMPL_CODE_ISOCH_BUFFER_OVERRUN 31
330 #define TRB_CMPL_CODE_STREAM_NUMP_ERROR 219
331 #define TRB_CMPL_CODE_PRIME_PIPE_RECEIVED 220
332 #define TRB_CMPL_CODE_HOST_REJECTED 221
333 #define TRB_CMPL_CODE_CTRL_DIR_ERR 222
334 #define TRB_CMPL_CODE_CTRL_SEQNUM_ERR 223
335
336 #define BUILD_TRB_RW(name, member, shift, mask) \
337 static inline u32 trb_read_##name(struct tegra_xudc_trb *trb) \
338 { \
339 return (le32_to_cpu(trb->member) >> (shift)) & (mask); \
340 } \
341 static inline void \
342 trb_write_##name(struct tegra_xudc_trb *trb, u32 val) \
343 { \
344 u32 tmp; \
345 \
346 tmp = le32_to_cpu(trb->member) & ~((mask) << (shift)); \
347 tmp |= (val & (mask)) << (shift); \
348 trb->member = cpu_to_le32(tmp); \
349 }
350
351 BUILD_TRB_RW(data_lo, data_lo, 0, 0xffffffff)
352 BUILD_TRB_RW(data_hi, data_hi, 0, 0xffffffff)
353 BUILD_TRB_RW(seq_num, status, 0, 0xffff)
354 BUILD_TRB_RW(transfer_len, status, 0, 0xffffff)
355 BUILD_TRB_RW(td_size, status, 17, 0x1f)
356 BUILD_TRB_RW(cmpl_code, status, 24, 0xff)
357 BUILD_TRB_RW(cycle, control, 0, 0x1)
358 BUILD_TRB_RW(toggle_cycle, control, 1, 0x1)
359 BUILD_TRB_RW(isp, control, 2, 0x1)
360 BUILD_TRB_RW(chain, control, 4, 0x1)
361 BUILD_TRB_RW(ioc, control, 5, 0x1)
362 BUILD_TRB_RW(type, control, 10, 0x3f)
363 BUILD_TRB_RW(stream_id, control, 16, 0xffff)
364 BUILD_TRB_RW(endpoint_id, control, 16, 0x1f)
365 BUILD_TRB_RW(tlbpc, control, 16, 0xf)
366 BUILD_TRB_RW(data_stage_dir, control, 16, 0x1)
367 BUILD_TRB_RW(frame_id, control, 20, 0x7ff)
368 BUILD_TRB_RW(sia, control, 31, 0x1)
369
trb_read_data_ptr(struct tegra_xudc_trb * trb)370 static inline u64 trb_read_data_ptr(struct tegra_xudc_trb *trb)
371 {
372 return ((u64)trb_read_data_hi(trb) << 32) |
373 trb_read_data_lo(trb);
374 }
375
trb_write_data_ptr(struct tegra_xudc_trb * trb,u64 addr)376 static inline void trb_write_data_ptr(struct tegra_xudc_trb *trb, u64 addr)
377 {
378 trb_write_data_lo(trb, lower_32_bits(addr));
379 trb_write_data_hi(trb, upper_32_bits(addr));
380 }
381
382 struct tegra_xudc_request {
383 struct usb_request usb_req;
384
385 size_t buf_queued;
386 unsigned int trbs_queued;
387 unsigned int trbs_needed;
388 bool need_zlp;
389
390 struct tegra_xudc_trb *first_trb;
391 struct tegra_xudc_trb *last_trb;
392
393 struct list_head list;
394 };
395
396 struct tegra_xudc_ep {
397 struct tegra_xudc *xudc;
398 struct usb_ep usb_ep;
399 unsigned int index;
400 char name[8];
401
402 struct tegra_xudc_ep_context *context;
403
404 #define XUDC_TRANSFER_RING_SIZE 64
405 struct tegra_xudc_trb *transfer_ring;
406 dma_addr_t transfer_ring_phys;
407
408 unsigned int enq_ptr;
409 unsigned int deq_ptr;
410 bool pcs;
411 bool ring_full;
412 bool stream_rejected;
413
414 struct list_head queue;
415 const struct usb_endpoint_descriptor *desc;
416 const struct usb_ss_ep_comp_descriptor *comp_desc;
417 };
418
419 struct tegra_xudc_sel_timing {
420 __u8 u1sel;
421 __u8 u1pel;
422 __le16 u2sel;
423 __le16 u2pel;
424 };
425
426 enum tegra_xudc_setup_state {
427 WAIT_FOR_SETUP,
428 DATA_STAGE_XFER,
429 DATA_STAGE_RECV,
430 STATUS_STAGE_XFER,
431 STATUS_STAGE_RECV,
432 };
433
434 struct tegra_xudc_setup_packet {
435 struct usb_ctrlrequest ctrl_req;
436 unsigned int seq_num;
437 };
438
439 struct tegra_xudc_save_regs {
440 u32 ctrl;
441 u32 portpm;
442 };
443
444 struct tegra_xudc {
445 struct device *dev;
446 const struct tegra_xudc_soc *soc;
447 struct tegra_xusb_padctl *padctl;
448
449 spinlock_t lock;
450
451 struct usb_gadget gadget;
452 struct usb_gadget_driver *driver;
453
454 #define XUDC_NR_EVENT_RINGS 2
455 #define XUDC_EVENT_RING_SIZE 4096
456 struct tegra_xudc_trb *event_ring[XUDC_NR_EVENT_RINGS];
457 dma_addr_t event_ring_phys[XUDC_NR_EVENT_RINGS];
458 unsigned int event_ring_index;
459 unsigned int event_ring_deq_ptr;
460 bool ccs;
461
462 #define XUDC_NR_EPS 32
463 struct tegra_xudc_ep ep[XUDC_NR_EPS];
464 struct tegra_xudc_ep_context *ep_context;
465 dma_addr_t ep_context_phys;
466
467 struct device *genpd_dev_device;
468 struct device *genpd_dev_ss;
469 struct device_link *genpd_dl_device;
470 struct device_link *genpd_dl_ss;
471
472 struct dma_pool *transfer_ring_pool;
473
474 bool queued_setup_packet;
475 struct tegra_xudc_setup_packet setup_packet;
476 enum tegra_xudc_setup_state setup_state;
477 u16 setup_seq_num;
478
479 u16 dev_addr;
480 u16 isoch_delay;
481 struct tegra_xudc_sel_timing sel_timing;
482 u8 test_mode_pattern;
483 u16 status_buf;
484 struct tegra_xudc_request *ep0_req;
485
486 bool pullup;
487
488 unsigned int nr_enabled_eps;
489 unsigned int nr_isoch_eps;
490
491 unsigned int device_state;
492 unsigned int resume_state;
493
494 int irq;
495
496 void __iomem *base;
497 resource_size_t phys_base;
498 void __iomem *ipfs;
499 void __iomem *fpci;
500
501 struct regulator_bulk_data *supplies;
502
503 struct clk_bulk_data *clks;
504
505 bool device_mode;
506 struct work_struct usb_role_sw_work;
507
508 struct phy **usb3_phy;
509 struct phy *curr_usb3_phy;
510 struct phy **utmi_phy;
511 struct phy *curr_utmi_phy;
512
513 struct tegra_xudc_save_regs saved_regs;
514 bool suspended;
515 bool powergated;
516
517 struct usb_phy **usbphy;
518 struct usb_phy *curr_usbphy;
519 struct notifier_block vbus_nb;
520
521 struct completion disconnect_complete;
522
523 bool selfpowered;
524
525 #define TOGGLE_VBUS_WAIT_MS 100
526 struct delayed_work plc_reset_work;
527 bool wait_csc;
528
529 struct delayed_work port_reset_war_work;
530 bool wait_for_sec_prc;
531 };
532
533 #define XUDC_TRB_MAX_BUFFER_SIZE 65536
534 #define XUDC_MAX_ISOCH_EPS 4
535 #define XUDC_INTERRUPT_MODERATION_US 0
536
537 static struct usb_endpoint_descriptor tegra_xudc_ep0_desc = {
538 .bLength = USB_DT_ENDPOINT_SIZE,
539 .bDescriptorType = USB_DT_ENDPOINT,
540 .bEndpointAddress = 0,
541 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
542 .wMaxPacketSize = cpu_to_le16(64),
543 };
544
545 struct tegra_xudc_soc {
546 const char * const *supply_names;
547 unsigned int num_supplies;
548 const char * const *clock_names;
549 unsigned int num_clks;
550 unsigned int num_phys;
551 bool u1_enable;
552 bool u2_enable;
553 bool lpm_enable;
554 bool invalid_seq_num;
555 bool pls_quirk;
556 bool port_reset_quirk;
557 bool port_speed_quirk;
558 bool has_ipfs;
559 };
560
fpci_readl(struct tegra_xudc * xudc,unsigned int offset)561 static inline u32 fpci_readl(struct tegra_xudc *xudc, unsigned int offset)
562 {
563 return readl(xudc->fpci + offset);
564 }
565
fpci_writel(struct tegra_xudc * xudc,u32 val,unsigned int offset)566 static inline void fpci_writel(struct tegra_xudc *xudc, u32 val,
567 unsigned int offset)
568 {
569 writel(val, xudc->fpci + offset);
570 }
571
ipfs_readl(struct tegra_xudc * xudc,unsigned int offset)572 static inline u32 ipfs_readl(struct tegra_xudc *xudc, unsigned int offset)
573 {
574 return readl(xudc->ipfs + offset);
575 }
576
ipfs_writel(struct tegra_xudc * xudc,u32 val,unsigned int offset)577 static inline void ipfs_writel(struct tegra_xudc *xudc, u32 val,
578 unsigned int offset)
579 {
580 writel(val, xudc->ipfs + offset);
581 }
582
xudc_readl(struct tegra_xudc * xudc,unsigned int offset)583 static inline u32 xudc_readl(struct tegra_xudc *xudc, unsigned int offset)
584 {
585 return readl(xudc->base + offset);
586 }
587
xudc_writel(struct tegra_xudc * xudc,u32 val,unsigned int offset)588 static inline void xudc_writel(struct tegra_xudc *xudc, u32 val,
589 unsigned int offset)
590 {
591 writel(val, xudc->base + offset);
592 }
593
xudc_readl_poll(struct tegra_xudc * xudc,unsigned int offset,u32 mask,u32 val)594 static inline int xudc_readl_poll(struct tegra_xudc *xudc,
595 unsigned int offset, u32 mask, u32 val)
596 {
597 u32 regval;
598
599 return readl_poll_timeout_atomic(xudc->base + offset, regval,
600 (regval & mask) == val, 1, 100);
601 }
602
to_xudc(struct usb_gadget * gadget)603 static inline struct tegra_xudc *to_xudc(struct usb_gadget *gadget)
604 {
605 return container_of(gadget, struct tegra_xudc, gadget);
606 }
607
to_xudc_ep(struct usb_ep * ep)608 static inline struct tegra_xudc_ep *to_xudc_ep(struct usb_ep *ep)
609 {
610 return container_of(ep, struct tegra_xudc_ep, usb_ep);
611 }
612
to_xudc_req(struct usb_request * req)613 static inline struct tegra_xudc_request *to_xudc_req(struct usb_request *req)
614 {
615 return container_of(req, struct tegra_xudc_request, usb_req);
616 }
617
dump_trb(struct tegra_xudc * xudc,const char * type,struct tegra_xudc_trb * trb)618 static inline void dump_trb(struct tegra_xudc *xudc, const char *type,
619 struct tegra_xudc_trb *trb)
620 {
621 dev_dbg(xudc->dev,
622 "%s: %p, lo = %#x, hi = %#x, status = %#x, control = %#x\n",
623 type, trb, trb->data_lo, trb->data_hi, trb->status,
624 trb->control);
625 }
626
tegra_xudc_limit_port_speed(struct tegra_xudc * xudc)627 static void tegra_xudc_limit_port_speed(struct tegra_xudc *xudc)
628 {
629 u32 val;
630
631 /* limit port speed to gen 1 */
632 val = xudc_readl(xudc, SSPX_CORE_CNT56);
633 val &= ~(SSPX_CORE_CNT56_SCD_BIT0_TRPT_MAX_MASK);
634 val |= SSPX_CORE_CNT56_SCD_BIT0_TRPT_MAX(0x260);
635 xudc_writel(xudc, val, SSPX_CORE_CNT56);
636
637 val = xudc_readl(xudc, SSPX_CORE_CNT57);
638 val &= ~(SSPX_CORE_CNT57_SCD_BIT1_TRPT_MAX_MASK);
639 val |= SSPX_CORE_CNT57_SCD_BIT1_TRPT_MAX(0x6D6);
640 xudc_writel(xudc, val, SSPX_CORE_CNT57);
641
642 val = xudc_readl(xudc, SSPX_CORE_CNT65);
643 val &= ~(SSPX_CORE_CNT65_TX_SCD_END_TRPT_MID_MASK);
644 val |= SSPX_CORE_CNT65_TX_SCD_END_TRPT_MID(0x4B0);
645 xudc_writel(xudc, val, SSPX_CORE_CNT66);
646
647 val = xudc_readl(xudc, SSPX_CORE_CNT66);
648 val &= ~(SSPX_CORE_CNT66_TX_SCD_BIT0_TRPT_MID_MASK);
649 val |= SSPX_CORE_CNT66_TX_SCD_BIT0_TRPT_MID(0x4B0);
650 xudc_writel(xudc, val, SSPX_CORE_CNT66);
651
652 val = xudc_readl(xudc, SSPX_CORE_CNT67);
653 val &= ~(SSPX_CORE_CNT67_TX_SCD_BIT1_TRPT_MID_MASK);
654 val |= SSPX_CORE_CNT67_TX_SCD_BIT1_TRPT_MID(0x4B0);
655 xudc_writel(xudc, val, SSPX_CORE_CNT67);
656
657 val = xudc_readl(xudc, SSPX_CORE_CNT72);
658 val &= ~(SSPX_CORE_CNT72_SCD_LFPS_TIMEOUT_MASK);
659 val |= SSPX_CORE_CNT72_SCD_LFPS_TIMEOUT(0x10);
660 xudc_writel(xudc, val, SSPX_CORE_CNT72);
661 }
662
tegra_xudc_restore_port_speed(struct tegra_xudc * xudc)663 static void tegra_xudc_restore_port_speed(struct tegra_xudc *xudc)
664 {
665 u32 val;
666
667 /* restore port speed to gen2 */
668 val = xudc_readl(xudc, SSPX_CORE_CNT56);
669 val &= ~(SSPX_CORE_CNT56_SCD_BIT0_TRPT_MAX_MASK);
670 val |= SSPX_CORE_CNT56_SCD_BIT0_TRPT_MAX(0x438);
671 xudc_writel(xudc, val, SSPX_CORE_CNT56);
672
673 val = xudc_readl(xudc, SSPX_CORE_CNT57);
674 val &= ~(SSPX_CORE_CNT57_SCD_BIT1_TRPT_MAX_MASK);
675 val |= SSPX_CORE_CNT57_SCD_BIT1_TRPT_MAX(0x528);
676 xudc_writel(xudc, val, SSPX_CORE_CNT57);
677
678 val = xudc_readl(xudc, SSPX_CORE_CNT65);
679 val &= ~(SSPX_CORE_CNT65_TX_SCD_END_TRPT_MID_MASK);
680 val |= SSPX_CORE_CNT65_TX_SCD_END_TRPT_MID(0xE10);
681 xudc_writel(xudc, val, SSPX_CORE_CNT66);
682
683 val = xudc_readl(xudc, SSPX_CORE_CNT66);
684 val &= ~(SSPX_CORE_CNT66_TX_SCD_BIT0_TRPT_MID_MASK);
685 val |= SSPX_CORE_CNT66_TX_SCD_BIT0_TRPT_MID(0x348);
686 xudc_writel(xudc, val, SSPX_CORE_CNT66);
687
688 val = xudc_readl(xudc, SSPX_CORE_CNT67);
689 val &= ~(SSPX_CORE_CNT67_TX_SCD_BIT1_TRPT_MID_MASK);
690 val |= SSPX_CORE_CNT67_TX_SCD_BIT1_TRPT_MID(0x5a0);
691 xudc_writel(xudc, val, SSPX_CORE_CNT67);
692
693 val = xudc_readl(xudc, SSPX_CORE_CNT72);
694 val &= ~(SSPX_CORE_CNT72_SCD_LFPS_TIMEOUT_MASK);
695 val |= SSPX_CORE_CNT72_SCD_LFPS_TIMEOUT(0x1c21);
696 xudc_writel(xudc, val, SSPX_CORE_CNT72);
697 }
698
tegra_xudc_device_mode_on(struct tegra_xudc * xudc)699 static void tegra_xudc_device_mode_on(struct tegra_xudc *xudc)
700 {
701 int err;
702
703 pm_runtime_get_sync(xudc->dev);
704
705 err = phy_power_on(xudc->curr_utmi_phy);
706 if (err < 0)
707 dev_err(xudc->dev, "UTMI power on failed: %d\n", err);
708
709 err = phy_power_on(xudc->curr_usb3_phy);
710 if (err < 0)
711 dev_err(xudc->dev, "USB3 PHY power on failed: %d\n", err);
712
713 dev_dbg(xudc->dev, "device mode on\n");
714
715 phy_set_mode_ext(xudc->curr_utmi_phy, PHY_MODE_USB_OTG,
716 USB_ROLE_DEVICE);
717 }
718
tegra_xudc_device_mode_off(struct tegra_xudc * xudc)719 static void tegra_xudc_device_mode_off(struct tegra_xudc *xudc)
720 {
721 bool connected = false;
722 u32 pls, val;
723 int err;
724
725 dev_dbg(xudc->dev, "device mode off\n");
726
727 connected = !!(xudc_readl(xudc, PORTSC) & PORTSC_CCS);
728
729 reinit_completion(&xudc->disconnect_complete);
730
731 if (xudc->soc->port_speed_quirk)
732 tegra_xudc_restore_port_speed(xudc);
733
734 phy_set_mode_ext(xudc->curr_utmi_phy, PHY_MODE_USB_OTG, USB_ROLE_NONE);
735
736 pls = (xudc_readl(xudc, PORTSC) & PORTSC_PLS_MASK) >>
737 PORTSC_PLS_SHIFT;
738
739 /* Direct link to U0 if disconnected in RESUME or U2. */
740 if (xudc->soc->pls_quirk && xudc->gadget.speed == USB_SPEED_SUPER &&
741 (pls == PORTSC_PLS_RESUME || pls == PORTSC_PLS_U2)) {
742 val = xudc_readl(xudc, PORTPM);
743 val |= PORTPM_FRWE;
744 xudc_writel(xudc, val, PORTPM);
745
746 val = xudc_readl(xudc, PORTSC);
747 val &= ~(PORTSC_CHANGE_MASK | PORTSC_PLS_MASK);
748 val |= PORTSC_LWS | PORTSC_PLS(PORTSC_PLS_U0);
749 xudc_writel(xudc, val, PORTSC);
750 }
751
752 /* Wait for disconnect event. */
753 if (connected)
754 wait_for_completion(&xudc->disconnect_complete);
755
756 /* Make sure interrupt handler has completed before powergating. */
757 synchronize_irq(xudc->irq);
758
759 err = phy_power_off(xudc->curr_utmi_phy);
760 if (err < 0)
761 dev_err(xudc->dev, "UTMI PHY power off failed: %d\n", err);
762
763 err = phy_power_off(xudc->curr_usb3_phy);
764 if (err < 0)
765 dev_err(xudc->dev, "USB3 PHY power off failed: %d\n", err);
766
767 pm_runtime_put(xudc->dev);
768 }
769
tegra_xudc_usb_role_sw_work(struct work_struct * work)770 static void tegra_xudc_usb_role_sw_work(struct work_struct *work)
771 {
772 struct tegra_xudc *xudc = container_of(work, struct tegra_xudc,
773 usb_role_sw_work);
774
775 if (xudc->device_mode)
776 tegra_xudc_device_mode_on(xudc);
777 else
778 tegra_xudc_device_mode_off(xudc);
779 }
780
tegra_xudc_get_phy_index(struct tegra_xudc * xudc,struct usb_phy * usbphy)781 static int tegra_xudc_get_phy_index(struct tegra_xudc *xudc,
782 struct usb_phy *usbphy)
783 {
784 unsigned int i;
785
786 for (i = 0; i < xudc->soc->num_phys; i++) {
787 if (xudc->usbphy[i] && usbphy == xudc->usbphy[i])
788 return i;
789 }
790
791 dev_info(xudc->dev, "phy index could not be found for shared USB PHY");
792 return -1;
793 }
794
tegra_xudc_vbus_notify(struct notifier_block * nb,unsigned long action,void * data)795 static int tegra_xudc_vbus_notify(struct notifier_block *nb,
796 unsigned long action, void *data)
797 {
798 struct tegra_xudc *xudc = container_of(nb, struct tegra_xudc,
799 vbus_nb);
800 struct usb_phy *usbphy = (struct usb_phy *)data;
801 int phy_index;
802
803 dev_dbg(xudc->dev, "%s(): event is %d\n", __func__, usbphy->last_event);
804
805 if ((xudc->device_mode && usbphy->last_event == USB_EVENT_VBUS) ||
806 (!xudc->device_mode && usbphy->last_event != USB_EVENT_VBUS)) {
807 dev_dbg(xudc->dev, "Same role(%d) received. Ignore",
808 xudc->device_mode);
809 return NOTIFY_OK;
810 }
811
812 xudc->device_mode = (usbphy->last_event == USB_EVENT_VBUS) ? true :
813 false;
814
815 phy_index = tegra_xudc_get_phy_index(xudc, usbphy);
816 dev_dbg(xudc->dev, "%s(): current phy index is %d\n", __func__,
817 phy_index);
818
819 if (!xudc->suspended && phy_index != -1) {
820 xudc->curr_utmi_phy = xudc->utmi_phy[phy_index];
821 xudc->curr_usb3_phy = xudc->usb3_phy[phy_index];
822 xudc->curr_usbphy = usbphy;
823 schedule_work(&xudc->usb_role_sw_work);
824 }
825
826 return NOTIFY_OK;
827 }
828
tegra_xudc_plc_reset_work(struct work_struct * work)829 static void tegra_xudc_plc_reset_work(struct work_struct *work)
830 {
831 struct delayed_work *dwork = to_delayed_work(work);
832 struct tegra_xudc *xudc = container_of(dwork, struct tegra_xudc,
833 plc_reset_work);
834 unsigned long flags;
835
836 spin_lock_irqsave(&xudc->lock, flags);
837
838 if (xudc->wait_csc) {
839 u32 pls = (xudc_readl(xudc, PORTSC) & PORTSC_PLS_MASK) >>
840 PORTSC_PLS_SHIFT;
841
842 if (pls == PORTSC_PLS_INACTIVE) {
843 dev_info(xudc->dev, "PLS = Inactive. Toggle VBUS\n");
844 phy_set_mode_ext(xudc->curr_utmi_phy, PHY_MODE_USB_OTG,
845 USB_ROLE_NONE);
846 phy_set_mode_ext(xudc->curr_utmi_phy, PHY_MODE_USB_OTG,
847 USB_ROLE_DEVICE);
848
849 xudc->wait_csc = false;
850 }
851 }
852
853 spin_unlock_irqrestore(&xudc->lock, flags);
854 }
855
tegra_xudc_port_reset_war_work(struct work_struct * work)856 static void tegra_xudc_port_reset_war_work(struct work_struct *work)
857 {
858 struct delayed_work *dwork = to_delayed_work(work);
859 struct tegra_xudc *xudc =
860 container_of(dwork, struct tegra_xudc, port_reset_war_work);
861 unsigned long flags;
862 u32 pls;
863 int ret;
864
865 spin_lock_irqsave(&xudc->lock, flags);
866
867 if (xudc->device_mode && xudc->wait_for_sec_prc) {
868 pls = (xudc_readl(xudc, PORTSC) & PORTSC_PLS_MASK) >>
869 PORTSC_PLS_SHIFT;
870 dev_dbg(xudc->dev, "pls = %x\n", pls);
871
872 if (pls == PORTSC_PLS_DISABLED) {
873 dev_dbg(xudc->dev, "toggle vbus\n");
874 /* PRC doesn't complete in 100ms, toggle the vbus */
875 ret = tegra_phy_xusb_utmi_port_reset(
876 xudc->curr_utmi_phy);
877 if (ret == 1)
878 xudc->wait_for_sec_prc = 0;
879 }
880 }
881
882 spin_unlock_irqrestore(&xudc->lock, flags);
883 }
884
trb_virt_to_phys(struct tegra_xudc_ep * ep,struct tegra_xudc_trb * trb)885 static dma_addr_t trb_virt_to_phys(struct tegra_xudc_ep *ep,
886 struct tegra_xudc_trb *trb)
887 {
888 unsigned int index;
889
890 index = trb - ep->transfer_ring;
891
892 if (WARN_ON(index >= XUDC_TRANSFER_RING_SIZE))
893 return 0;
894
895 return (ep->transfer_ring_phys + index * sizeof(*trb));
896 }
897
trb_phys_to_virt(struct tegra_xudc_ep * ep,dma_addr_t addr)898 static struct tegra_xudc_trb *trb_phys_to_virt(struct tegra_xudc_ep *ep,
899 dma_addr_t addr)
900 {
901 struct tegra_xudc_trb *trb;
902 unsigned int index;
903
904 index = (addr - ep->transfer_ring_phys) / sizeof(*trb);
905
906 if (WARN_ON(index >= XUDC_TRANSFER_RING_SIZE))
907 return NULL;
908
909 trb = &ep->transfer_ring[index];
910
911 return trb;
912 }
913
ep_reload(struct tegra_xudc * xudc,unsigned int ep)914 static void ep_reload(struct tegra_xudc *xudc, unsigned int ep)
915 {
916 xudc_writel(xudc, BIT(ep), EP_RELOAD);
917 xudc_readl_poll(xudc, EP_RELOAD, BIT(ep), 0);
918 }
919
ep_pause(struct tegra_xudc * xudc,unsigned int ep)920 static void ep_pause(struct tegra_xudc *xudc, unsigned int ep)
921 {
922 u32 val;
923
924 val = xudc_readl(xudc, EP_PAUSE);
925 if (val & BIT(ep))
926 return;
927 val |= BIT(ep);
928
929 xudc_writel(xudc, val, EP_PAUSE);
930
931 xudc_readl_poll(xudc, EP_STCHG, BIT(ep), BIT(ep));
932
933 xudc_writel(xudc, BIT(ep), EP_STCHG);
934 }
935
ep_unpause(struct tegra_xudc * xudc,unsigned int ep)936 static void ep_unpause(struct tegra_xudc *xudc, unsigned int ep)
937 {
938 u32 val;
939
940 val = xudc_readl(xudc, EP_PAUSE);
941 if (!(val & BIT(ep)))
942 return;
943 val &= ~BIT(ep);
944
945 xudc_writel(xudc, val, EP_PAUSE);
946
947 xudc_readl_poll(xudc, EP_STCHG, BIT(ep), BIT(ep));
948
949 xudc_writel(xudc, BIT(ep), EP_STCHG);
950 }
951
ep_unpause_all(struct tegra_xudc * xudc)952 static void ep_unpause_all(struct tegra_xudc *xudc)
953 {
954 u32 val;
955
956 val = xudc_readl(xudc, EP_PAUSE);
957
958 xudc_writel(xudc, 0, EP_PAUSE);
959
960 xudc_readl_poll(xudc, EP_STCHG, val, val);
961
962 xudc_writel(xudc, val, EP_STCHG);
963 }
964
ep_halt(struct tegra_xudc * xudc,unsigned int ep)965 static void ep_halt(struct tegra_xudc *xudc, unsigned int ep)
966 {
967 u32 val;
968
969 val = xudc_readl(xudc, EP_HALT);
970 if (val & BIT(ep))
971 return;
972 val |= BIT(ep);
973 xudc_writel(xudc, val, EP_HALT);
974
975 xudc_readl_poll(xudc, EP_STCHG, BIT(ep), BIT(ep));
976
977 xudc_writel(xudc, BIT(ep), EP_STCHG);
978 }
979
ep_unhalt(struct tegra_xudc * xudc,unsigned int ep)980 static void ep_unhalt(struct tegra_xudc *xudc, unsigned int ep)
981 {
982 u32 val;
983
984 val = xudc_readl(xudc, EP_HALT);
985 if (!(val & BIT(ep)))
986 return;
987 val &= ~BIT(ep);
988 xudc_writel(xudc, val, EP_HALT);
989
990 xudc_readl_poll(xudc, EP_STCHG, BIT(ep), BIT(ep));
991
992 xudc_writel(xudc, BIT(ep), EP_STCHG);
993 }
994
ep_unhalt_all(struct tegra_xudc * xudc)995 static void ep_unhalt_all(struct tegra_xudc *xudc)
996 {
997 u32 val;
998
999 val = xudc_readl(xudc, EP_HALT);
1000 if (!val)
1001 return;
1002 xudc_writel(xudc, 0, EP_HALT);
1003
1004 xudc_readl_poll(xudc, EP_STCHG, val, val);
1005
1006 xudc_writel(xudc, val, EP_STCHG);
1007 }
1008
ep_wait_for_stopped(struct tegra_xudc * xudc,unsigned int ep)1009 static void ep_wait_for_stopped(struct tegra_xudc *xudc, unsigned int ep)
1010 {
1011 xudc_readl_poll(xudc, EP_STOPPED, BIT(ep), BIT(ep));
1012 xudc_writel(xudc, BIT(ep), EP_STOPPED);
1013 }
1014
ep_wait_for_inactive(struct tegra_xudc * xudc,unsigned int ep)1015 static void ep_wait_for_inactive(struct tegra_xudc *xudc, unsigned int ep)
1016 {
1017 xudc_readl_poll(xudc, EP_THREAD_ACTIVE, BIT(ep), 0);
1018 }
1019
tegra_xudc_req_done(struct tegra_xudc_ep * ep,struct tegra_xudc_request * req,int status)1020 static void tegra_xudc_req_done(struct tegra_xudc_ep *ep,
1021 struct tegra_xudc_request *req, int status)
1022 {
1023 struct tegra_xudc *xudc = ep->xudc;
1024
1025 dev_dbg(xudc->dev, "completing request %p on EP %u with status %d\n",
1026 req, ep->index, status);
1027
1028 if (likely(req->usb_req.status == -EINPROGRESS))
1029 req->usb_req.status = status;
1030
1031 list_del_init(&req->list);
1032
1033 if (usb_endpoint_xfer_control(ep->desc)) {
1034 usb_gadget_unmap_request(&xudc->gadget, &req->usb_req,
1035 (xudc->setup_state ==
1036 DATA_STAGE_XFER));
1037 } else {
1038 usb_gadget_unmap_request(&xudc->gadget, &req->usb_req,
1039 usb_endpoint_dir_in(ep->desc));
1040 }
1041
1042 spin_unlock(&xudc->lock);
1043 usb_gadget_giveback_request(&ep->usb_ep, &req->usb_req);
1044 spin_lock(&xudc->lock);
1045 }
1046
tegra_xudc_ep_nuke(struct tegra_xudc_ep * ep,int status)1047 static void tegra_xudc_ep_nuke(struct tegra_xudc_ep *ep, int status)
1048 {
1049 struct tegra_xudc_request *req;
1050
1051 while (!list_empty(&ep->queue)) {
1052 req = list_first_entry(&ep->queue, struct tegra_xudc_request,
1053 list);
1054 tegra_xudc_req_done(ep, req, status);
1055 }
1056 }
1057
ep_available_trbs(struct tegra_xudc_ep * ep)1058 static unsigned int ep_available_trbs(struct tegra_xudc_ep *ep)
1059 {
1060 if (ep->ring_full)
1061 return 0;
1062
1063 if (ep->deq_ptr > ep->enq_ptr)
1064 return ep->deq_ptr - ep->enq_ptr - 1;
1065
1066 return XUDC_TRANSFER_RING_SIZE - (ep->enq_ptr - ep->deq_ptr) - 2;
1067 }
1068
tegra_xudc_queue_one_trb(struct tegra_xudc_ep * ep,struct tegra_xudc_request * req,struct tegra_xudc_trb * trb,bool ioc)1069 static void tegra_xudc_queue_one_trb(struct tegra_xudc_ep *ep,
1070 struct tegra_xudc_request *req,
1071 struct tegra_xudc_trb *trb,
1072 bool ioc)
1073 {
1074 struct tegra_xudc *xudc = ep->xudc;
1075 dma_addr_t buf_addr;
1076 size_t len;
1077
1078 len = min_t(size_t, XUDC_TRB_MAX_BUFFER_SIZE, req->usb_req.length -
1079 req->buf_queued);
1080 if (len > 0)
1081 buf_addr = req->usb_req.dma + req->buf_queued;
1082 else
1083 buf_addr = 0;
1084
1085 trb_write_data_ptr(trb, buf_addr);
1086
1087 trb_write_transfer_len(trb, len);
1088 trb_write_td_size(trb, req->trbs_needed - req->trbs_queued - 1);
1089
1090 if (req->trbs_queued == req->trbs_needed - 1 ||
1091 (req->need_zlp && req->trbs_queued == req->trbs_needed - 2))
1092 trb_write_chain(trb, 0);
1093 else
1094 trb_write_chain(trb, 1);
1095
1096 trb_write_ioc(trb, ioc);
1097
1098 if (usb_endpoint_dir_out(ep->desc) ||
1099 (usb_endpoint_xfer_control(ep->desc) &&
1100 (xudc->setup_state == DATA_STAGE_RECV)))
1101 trb_write_isp(trb, 1);
1102 else
1103 trb_write_isp(trb, 0);
1104
1105 if (usb_endpoint_xfer_control(ep->desc)) {
1106 if (xudc->setup_state == DATA_STAGE_XFER ||
1107 xudc->setup_state == DATA_STAGE_RECV)
1108 trb_write_type(trb, TRB_TYPE_DATA_STAGE);
1109 else
1110 trb_write_type(trb, TRB_TYPE_STATUS_STAGE);
1111
1112 if (xudc->setup_state == DATA_STAGE_XFER ||
1113 xudc->setup_state == STATUS_STAGE_XFER)
1114 trb_write_data_stage_dir(trb, 1);
1115 else
1116 trb_write_data_stage_dir(trb, 0);
1117 } else if (usb_endpoint_xfer_isoc(ep->desc)) {
1118 trb_write_type(trb, TRB_TYPE_ISOCH);
1119 trb_write_sia(trb, 1);
1120 trb_write_frame_id(trb, 0);
1121 trb_write_tlbpc(trb, 0);
1122 } else if (usb_ss_max_streams(ep->comp_desc)) {
1123 trb_write_type(trb, TRB_TYPE_STREAM);
1124 trb_write_stream_id(trb, req->usb_req.stream_id);
1125 } else {
1126 trb_write_type(trb, TRB_TYPE_NORMAL);
1127 trb_write_stream_id(trb, 0);
1128 }
1129
1130 trb_write_cycle(trb, ep->pcs);
1131
1132 req->trbs_queued++;
1133 req->buf_queued += len;
1134
1135 dump_trb(xudc, "TRANSFER", trb);
1136 }
1137
tegra_xudc_queue_trbs(struct tegra_xudc_ep * ep,struct tegra_xudc_request * req)1138 static unsigned int tegra_xudc_queue_trbs(struct tegra_xudc_ep *ep,
1139 struct tegra_xudc_request *req)
1140 {
1141 unsigned int i, count, available;
1142 bool wait_td = false;
1143
1144 available = ep_available_trbs(ep);
1145 count = req->trbs_needed - req->trbs_queued;
1146 if (available < count) {
1147 count = available;
1148 ep->ring_full = true;
1149 }
1150
1151 /*
1152 * To generate zero-length packet on USB bus, SW needs schedule a
1153 * standalone zero-length TD. According to HW's behavior, SW needs
1154 * to schedule TDs in different ways for different endpoint types.
1155 *
1156 * For control endpoint:
1157 * - Data stage TD (IOC = 1, CH = 0)
1158 * - Ring doorbell and wait transfer event
1159 * - Data stage TD for ZLP (IOC = 1, CH = 0)
1160 * - Ring doorbell
1161 *
1162 * For bulk and interrupt endpoints:
1163 * - Normal transfer TD (IOC = 0, CH = 0)
1164 * - Normal transfer TD for ZLP (IOC = 1, CH = 0)
1165 * - Ring doorbell
1166 */
1167
1168 if (req->need_zlp && usb_endpoint_xfer_control(ep->desc) && count > 1)
1169 wait_td = true;
1170
1171 if (!req->first_trb)
1172 req->first_trb = &ep->transfer_ring[ep->enq_ptr];
1173
1174 for (i = 0; i < count; i++) {
1175 struct tegra_xudc_trb *trb = &ep->transfer_ring[ep->enq_ptr];
1176 bool ioc = false;
1177
1178 if ((i == count - 1) || (wait_td && i == count - 2))
1179 ioc = true;
1180
1181 tegra_xudc_queue_one_trb(ep, req, trb, ioc);
1182 req->last_trb = trb;
1183
1184 ep->enq_ptr++;
1185 if (ep->enq_ptr == XUDC_TRANSFER_RING_SIZE - 1) {
1186 trb = &ep->transfer_ring[ep->enq_ptr];
1187 trb_write_cycle(trb, ep->pcs);
1188 ep->pcs = !ep->pcs;
1189 ep->enq_ptr = 0;
1190 }
1191
1192 if (ioc)
1193 break;
1194 }
1195
1196 return count;
1197 }
1198
tegra_xudc_ep_ring_doorbell(struct tegra_xudc_ep * ep)1199 static void tegra_xudc_ep_ring_doorbell(struct tegra_xudc_ep *ep)
1200 {
1201 struct tegra_xudc *xudc = ep->xudc;
1202 u32 val;
1203
1204 if (list_empty(&ep->queue))
1205 return;
1206
1207 val = DB_TARGET(ep->index);
1208 if (usb_endpoint_xfer_control(ep->desc)) {
1209 val |= DB_STREAMID(xudc->setup_seq_num);
1210 } else if (usb_ss_max_streams(ep->comp_desc) > 0) {
1211 struct tegra_xudc_request *req;
1212
1213 /* Don't ring doorbell if the stream has been rejected. */
1214 if (ep->stream_rejected)
1215 return;
1216
1217 req = list_first_entry(&ep->queue, struct tegra_xudc_request,
1218 list);
1219 val |= DB_STREAMID(req->usb_req.stream_id);
1220 }
1221
1222 dev_dbg(xudc->dev, "ring doorbell: %#x\n", val);
1223 xudc_writel(xudc, val, DB);
1224 }
1225
tegra_xudc_ep_kick_queue(struct tegra_xudc_ep * ep)1226 static void tegra_xudc_ep_kick_queue(struct tegra_xudc_ep *ep)
1227 {
1228 struct tegra_xudc_request *req;
1229 bool trbs_queued = false;
1230
1231 list_for_each_entry(req, &ep->queue, list) {
1232 if (ep->ring_full)
1233 break;
1234
1235 if (tegra_xudc_queue_trbs(ep, req) > 0)
1236 trbs_queued = true;
1237 }
1238
1239 if (trbs_queued)
1240 tegra_xudc_ep_ring_doorbell(ep);
1241 }
1242
1243 static int
__tegra_xudc_ep_queue(struct tegra_xudc_ep * ep,struct tegra_xudc_request * req)1244 __tegra_xudc_ep_queue(struct tegra_xudc_ep *ep, struct tegra_xudc_request *req)
1245 {
1246 struct tegra_xudc *xudc = ep->xudc;
1247 int err;
1248
1249 if (usb_endpoint_xfer_control(ep->desc) && !list_empty(&ep->queue)) {
1250 dev_err(xudc->dev, "control EP has pending transfers\n");
1251 return -EINVAL;
1252 }
1253
1254 if (usb_endpoint_xfer_control(ep->desc)) {
1255 err = usb_gadget_map_request(&xudc->gadget, &req->usb_req,
1256 (xudc->setup_state ==
1257 DATA_STAGE_XFER));
1258 } else {
1259 err = usb_gadget_map_request(&xudc->gadget, &req->usb_req,
1260 usb_endpoint_dir_in(ep->desc));
1261 }
1262
1263 if (err < 0) {
1264 dev_err(xudc->dev, "failed to map request: %d\n", err);
1265 return err;
1266 }
1267
1268 req->first_trb = NULL;
1269 req->last_trb = NULL;
1270 req->buf_queued = 0;
1271 req->trbs_queued = 0;
1272 req->need_zlp = false;
1273 req->trbs_needed = DIV_ROUND_UP(req->usb_req.length,
1274 XUDC_TRB_MAX_BUFFER_SIZE);
1275 if (req->usb_req.length == 0)
1276 req->trbs_needed++;
1277
1278 if (!usb_endpoint_xfer_isoc(ep->desc) &&
1279 req->usb_req.zero && req->usb_req.length &&
1280 ((req->usb_req.length % ep->usb_ep.maxpacket) == 0)) {
1281 req->trbs_needed++;
1282 req->need_zlp = true;
1283 }
1284
1285 req->usb_req.status = -EINPROGRESS;
1286 req->usb_req.actual = 0;
1287
1288 list_add_tail(&req->list, &ep->queue);
1289
1290 tegra_xudc_ep_kick_queue(ep);
1291
1292 return 0;
1293 }
1294
1295 static int
tegra_xudc_ep_queue(struct usb_ep * usb_ep,struct usb_request * usb_req,gfp_t gfp)1296 tegra_xudc_ep_queue(struct usb_ep *usb_ep, struct usb_request *usb_req,
1297 gfp_t gfp)
1298 {
1299 struct tegra_xudc_request *req;
1300 struct tegra_xudc_ep *ep;
1301 struct tegra_xudc *xudc;
1302 unsigned long flags;
1303 int ret;
1304
1305 if (!usb_ep || !usb_req)
1306 return -EINVAL;
1307
1308 ep = to_xudc_ep(usb_ep);
1309 req = to_xudc_req(usb_req);
1310 xudc = ep->xudc;
1311
1312 spin_lock_irqsave(&xudc->lock, flags);
1313 if (xudc->powergated || !ep->desc) {
1314 ret = -ESHUTDOWN;
1315 goto unlock;
1316 }
1317
1318 ret = __tegra_xudc_ep_queue(ep, req);
1319 unlock:
1320 spin_unlock_irqrestore(&xudc->lock, flags);
1321
1322 return ret;
1323 }
1324
squeeze_transfer_ring(struct tegra_xudc_ep * ep,struct tegra_xudc_request * req)1325 static void squeeze_transfer_ring(struct tegra_xudc_ep *ep,
1326 struct tegra_xudc_request *req)
1327 {
1328 struct tegra_xudc_trb *trb = req->first_trb;
1329 bool pcs_enq = trb_read_cycle(trb);
1330 bool pcs;
1331
1332 /*
1333 * Clear out all the TRBs part of or after the cancelled request,
1334 * and must correct trb cycle bit to the last un-enqueued state.
1335 */
1336 while (trb != &ep->transfer_ring[ep->enq_ptr]) {
1337 pcs = trb_read_cycle(trb);
1338 memset(trb, 0, sizeof(*trb));
1339 trb_write_cycle(trb, !pcs);
1340 trb++;
1341
1342 if (trb_read_type(trb) == TRB_TYPE_LINK)
1343 trb = ep->transfer_ring;
1344 }
1345
1346 /* Requests will be re-queued at the start of the cancelled request. */
1347 ep->enq_ptr = req->first_trb - ep->transfer_ring;
1348 /*
1349 * Retrieve the correct cycle bit state from the first trb of
1350 * the cancelled request.
1351 */
1352 ep->pcs = pcs_enq;
1353 ep->ring_full = false;
1354 list_for_each_entry_continue(req, &ep->queue, list) {
1355 req->usb_req.status = -EINPROGRESS;
1356 req->usb_req.actual = 0;
1357
1358 req->first_trb = NULL;
1359 req->last_trb = NULL;
1360 req->buf_queued = 0;
1361 req->trbs_queued = 0;
1362 }
1363 }
1364
1365 /*
1366 * Determine if the given TRB is in the range [first trb, last trb] for the
1367 * given request.
1368 */
trb_in_request(struct tegra_xudc_ep * ep,struct tegra_xudc_request * req,struct tegra_xudc_trb * trb)1369 static bool trb_in_request(struct tegra_xudc_ep *ep,
1370 struct tegra_xudc_request *req,
1371 struct tegra_xudc_trb *trb)
1372 {
1373 dev_dbg(ep->xudc->dev, "%s: request %p -> %p; trb %p\n", __func__,
1374 req->first_trb, req->last_trb, trb);
1375
1376 if (trb >= req->first_trb && (trb <= req->last_trb ||
1377 req->last_trb < req->first_trb))
1378 return true;
1379
1380 if (trb < req->first_trb && trb <= req->last_trb &&
1381 req->last_trb < req->first_trb)
1382 return true;
1383
1384 return false;
1385 }
1386
1387 /*
1388 * Determine if the given TRB is in the range [EP enqueue pointer, first TRB)
1389 * for the given endpoint and request.
1390 */
trb_before_request(struct tegra_xudc_ep * ep,struct tegra_xudc_request * req,struct tegra_xudc_trb * trb)1391 static bool trb_before_request(struct tegra_xudc_ep *ep,
1392 struct tegra_xudc_request *req,
1393 struct tegra_xudc_trb *trb)
1394 {
1395 struct tegra_xudc_trb *enq_trb = &ep->transfer_ring[ep->enq_ptr];
1396
1397 dev_dbg(ep->xudc->dev, "%s: request %p -> %p; enq ptr: %p; trb %p\n",
1398 __func__, req->first_trb, req->last_trb, enq_trb, trb);
1399
1400 if (trb < req->first_trb && (enq_trb <= trb ||
1401 req->first_trb < enq_trb))
1402 return true;
1403
1404 if (trb > req->first_trb && req->first_trb < enq_trb && enq_trb <= trb)
1405 return true;
1406
1407 return false;
1408 }
1409
1410 static int
__tegra_xudc_ep_dequeue(struct tegra_xudc_ep * ep,struct tegra_xudc_request * req)1411 __tegra_xudc_ep_dequeue(struct tegra_xudc_ep *ep,
1412 struct tegra_xudc_request *req)
1413 {
1414 struct tegra_xudc *xudc = ep->xudc;
1415 struct tegra_xudc_request *r;
1416 struct tegra_xudc_trb *deq_trb;
1417 bool busy, kick_queue = false;
1418 int ret = 0;
1419
1420 /* Make sure the request is actually queued to this endpoint. */
1421 list_for_each_entry(r, &ep->queue, list) {
1422 if (r == req)
1423 break;
1424 }
1425
1426 if (r != req)
1427 return -EINVAL;
1428
1429 /* Request hasn't been queued in the transfer ring yet. */
1430 if (!req->trbs_queued) {
1431 tegra_xudc_req_done(ep, req, -ECONNRESET);
1432 return 0;
1433 }
1434
1435 /* Halt DMA for this endpiont. */
1436 if (ep_ctx_read_state(ep->context) == EP_STATE_RUNNING) {
1437 ep_pause(xudc, ep->index);
1438 ep_wait_for_inactive(xudc, ep->index);
1439 }
1440
1441 deq_trb = trb_phys_to_virt(ep, ep_ctx_read_deq_ptr(ep->context));
1442 /* Is the hardware processing the TRB at the dequeue pointer? */
1443 busy = (trb_read_cycle(deq_trb) == ep_ctx_read_dcs(ep->context));
1444
1445 if (trb_in_request(ep, req, deq_trb) && busy) {
1446 /*
1447 * Request has been partially completed or it hasn't
1448 * started processing yet.
1449 */
1450 dma_addr_t deq_ptr;
1451
1452 squeeze_transfer_ring(ep, req);
1453
1454 req->usb_req.actual = ep_ctx_read_edtla(ep->context);
1455 tegra_xudc_req_done(ep, req, -ECONNRESET);
1456 kick_queue = true;
1457
1458 /* EDTLA is > 0: request has been partially completed */
1459 if (req->usb_req.actual > 0) {
1460 /*
1461 * Abort the pending transfer and update the dequeue
1462 * pointer
1463 */
1464 ep_ctx_write_edtla(ep->context, 0);
1465 ep_ctx_write_partial_td(ep->context, 0);
1466 ep_ctx_write_data_offset(ep->context, 0);
1467
1468 deq_ptr = trb_virt_to_phys(ep,
1469 &ep->transfer_ring[ep->enq_ptr]);
1470
1471 if (dma_mapping_error(xudc->dev, deq_ptr)) {
1472 ret = -EINVAL;
1473 } else {
1474 ep_ctx_write_deq_ptr(ep->context, deq_ptr);
1475 ep_ctx_write_dcs(ep->context, ep->pcs);
1476 ep_reload(xudc, ep->index);
1477 }
1478 }
1479 } else if (trb_before_request(ep, req, deq_trb) && busy) {
1480 /* Request hasn't started processing yet. */
1481 squeeze_transfer_ring(ep, req);
1482
1483 tegra_xudc_req_done(ep, req, -ECONNRESET);
1484 kick_queue = true;
1485 } else {
1486 /*
1487 * Request has completed, but we haven't processed the
1488 * completion event yet.
1489 */
1490 tegra_xudc_req_done(ep, req, -ECONNRESET);
1491 ret = -EINVAL;
1492 }
1493
1494 /* Resume the endpoint. */
1495 ep_unpause(xudc, ep->index);
1496
1497 if (kick_queue)
1498 tegra_xudc_ep_kick_queue(ep);
1499
1500 return ret;
1501 }
1502
1503 static int
tegra_xudc_ep_dequeue(struct usb_ep * usb_ep,struct usb_request * usb_req)1504 tegra_xudc_ep_dequeue(struct usb_ep *usb_ep, struct usb_request *usb_req)
1505 {
1506 struct tegra_xudc_request *req;
1507 struct tegra_xudc_ep *ep;
1508 struct tegra_xudc *xudc;
1509 unsigned long flags;
1510 int ret;
1511
1512 if (!usb_ep || !usb_req)
1513 return -EINVAL;
1514
1515 ep = to_xudc_ep(usb_ep);
1516 req = to_xudc_req(usb_req);
1517 xudc = ep->xudc;
1518
1519 spin_lock_irqsave(&xudc->lock, flags);
1520
1521 if (xudc->powergated || !ep->desc) {
1522 ret = -ESHUTDOWN;
1523 goto unlock;
1524 }
1525
1526 ret = __tegra_xudc_ep_dequeue(ep, req);
1527 unlock:
1528 spin_unlock_irqrestore(&xudc->lock, flags);
1529
1530 return ret;
1531 }
1532
__tegra_xudc_ep_set_halt(struct tegra_xudc_ep * ep,bool halt)1533 static int __tegra_xudc_ep_set_halt(struct tegra_xudc_ep *ep, bool halt)
1534 {
1535 struct tegra_xudc *xudc = ep->xudc;
1536
1537 if (!ep->desc)
1538 return -EINVAL;
1539
1540 if (usb_endpoint_xfer_isoc(ep->desc)) {
1541 dev_err(xudc->dev, "can't halt isochronous EP\n");
1542 return -ENOTSUPP;
1543 }
1544
1545 if (!!(xudc_readl(xudc, EP_HALT) & BIT(ep->index)) == halt) {
1546 dev_dbg(xudc->dev, "EP %u already %s\n", ep->index,
1547 halt ? "halted" : "not halted");
1548 return 0;
1549 }
1550
1551 if (halt) {
1552 ep_halt(xudc, ep->index);
1553 } else {
1554 ep_ctx_write_state(ep->context, EP_STATE_DISABLED);
1555
1556 ep_reload(xudc, ep->index);
1557
1558 ep_ctx_write_state(ep->context, EP_STATE_RUNNING);
1559 ep_ctx_write_rsvd(ep->context, 0);
1560 ep_ctx_write_partial_td(ep->context, 0);
1561 ep_ctx_write_splitxstate(ep->context, 0);
1562 ep_ctx_write_seq_num(ep->context, 0);
1563
1564 ep_reload(xudc, ep->index);
1565 ep_unpause(xudc, ep->index);
1566 ep_unhalt(xudc, ep->index);
1567
1568 tegra_xudc_ep_ring_doorbell(ep);
1569 }
1570
1571 return 0;
1572 }
1573
tegra_xudc_ep_set_halt(struct usb_ep * usb_ep,int value)1574 static int tegra_xudc_ep_set_halt(struct usb_ep *usb_ep, int value)
1575 {
1576 struct tegra_xudc_ep *ep;
1577 struct tegra_xudc *xudc;
1578 unsigned long flags;
1579 int ret;
1580
1581 if (!usb_ep)
1582 return -EINVAL;
1583
1584 ep = to_xudc_ep(usb_ep);
1585 xudc = ep->xudc;
1586
1587 spin_lock_irqsave(&xudc->lock, flags);
1588 if (xudc->powergated) {
1589 ret = -ESHUTDOWN;
1590 goto unlock;
1591 }
1592
1593 if (value && usb_endpoint_dir_in(ep->desc) &&
1594 !list_empty(&ep->queue)) {
1595 dev_err(xudc->dev, "can't halt EP with requests pending\n");
1596 ret = -EAGAIN;
1597 goto unlock;
1598 }
1599
1600 ret = __tegra_xudc_ep_set_halt(ep, value);
1601 unlock:
1602 spin_unlock_irqrestore(&xudc->lock, flags);
1603
1604 return ret;
1605 }
1606
tegra_xudc_ep_context_setup(struct tegra_xudc_ep * ep)1607 static void tegra_xudc_ep_context_setup(struct tegra_xudc_ep *ep)
1608 {
1609 const struct usb_endpoint_descriptor *desc = ep->desc;
1610 const struct usb_ss_ep_comp_descriptor *comp_desc = ep->comp_desc;
1611 struct tegra_xudc *xudc = ep->xudc;
1612 u16 maxpacket, maxburst = 0, esit = 0;
1613 u32 val;
1614
1615 maxpacket = usb_endpoint_maxp(desc);
1616 if (xudc->gadget.speed == USB_SPEED_SUPER) {
1617 if (!usb_endpoint_xfer_control(desc))
1618 maxburst = comp_desc->bMaxBurst;
1619
1620 if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc))
1621 esit = le16_to_cpu(comp_desc->wBytesPerInterval);
1622 } else if ((xudc->gadget.speed < USB_SPEED_SUPER) &&
1623 (usb_endpoint_xfer_int(desc) ||
1624 usb_endpoint_xfer_isoc(desc))) {
1625 if (xudc->gadget.speed == USB_SPEED_HIGH) {
1626 maxburst = usb_endpoint_maxp_mult(desc) - 1;
1627 if (maxburst == 0x3) {
1628 dev_warn(xudc->dev,
1629 "invalid endpoint maxburst\n");
1630 maxburst = 0x2;
1631 }
1632 }
1633 esit = maxpacket * (maxburst + 1);
1634 }
1635
1636 memset(ep->context, 0, sizeof(*ep->context));
1637
1638 ep_ctx_write_state(ep->context, EP_STATE_RUNNING);
1639 ep_ctx_write_interval(ep->context, desc->bInterval);
1640 if (xudc->gadget.speed == USB_SPEED_SUPER) {
1641 if (usb_endpoint_xfer_isoc(desc)) {
1642 ep_ctx_write_mult(ep->context,
1643 comp_desc->bmAttributes & 0x3);
1644 }
1645
1646 if (usb_endpoint_xfer_bulk(desc)) {
1647 ep_ctx_write_max_pstreams(ep->context,
1648 comp_desc->bmAttributes &
1649 0x1f);
1650 ep_ctx_write_lsa(ep->context, 1);
1651 }
1652 }
1653
1654 if (!usb_endpoint_xfer_control(desc) && usb_endpoint_dir_out(desc))
1655 val = usb_endpoint_type(desc);
1656 else
1657 val = usb_endpoint_type(desc) + EP_TYPE_CONTROL;
1658
1659 ep_ctx_write_type(ep->context, val);
1660 ep_ctx_write_cerr(ep->context, 0x3);
1661 ep_ctx_write_max_packet_size(ep->context, maxpacket);
1662 ep_ctx_write_max_burst_size(ep->context, maxburst);
1663
1664 ep_ctx_write_deq_ptr(ep->context, ep->transfer_ring_phys);
1665 ep_ctx_write_dcs(ep->context, ep->pcs);
1666
1667 /* Select a reasonable average TRB length based on endpoint type. */
1668 switch (usb_endpoint_type(desc)) {
1669 case USB_ENDPOINT_XFER_CONTROL:
1670 val = 8;
1671 break;
1672 case USB_ENDPOINT_XFER_INT:
1673 val = 1024;
1674 break;
1675 case USB_ENDPOINT_XFER_BULK:
1676 case USB_ENDPOINT_XFER_ISOC:
1677 default:
1678 val = 3072;
1679 break;
1680 }
1681
1682 ep_ctx_write_avg_trb_len(ep->context, val);
1683 ep_ctx_write_max_esit_payload(ep->context, esit);
1684
1685 ep_ctx_write_cerrcnt(ep->context, 0x3);
1686 }
1687
setup_link_trb(struct tegra_xudc_ep * ep,struct tegra_xudc_trb * trb)1688 static void setup_link_trb(struct tegra_xudc_ep *ep,
1689 struct tegra_xudc_trb *trb)
1690 {
1691 trb_write_data_ptr(trb, ep->transfer_ring_phys);
1692 trb_write_type(trb, TRB_TYPE_LINK);
1693 trb_write_toggle_cycle(trb, 1);
1694 }
1695
__tegra_xudc_ep_disable(struct tegra_xudc_ep * ep)1696 static int __tegra_xudc_ep_disable(struct tegra_xudc_ep *ep)
1697 {
1698 struct tegra_xudc *xudc = ep->xudc;
1699
1700 if (ep_ctx_read_state(ep->context) == EP_STATE_DISABLED) {
1701 dev_err(xudc->dev, "endpoint %u already disabled\n",
1702 ep->index);
1703 return -EINVAL;
1704 }
1705
1706 ep_ctx_write_state(ep->context, EP_STATE_DISABLED);
1707
1708 ep_reload(xudc, ep->index);
1709
1710 tegra_xudc_ep_nuke(ep, -ESHUTDOWN);
1711
1712 xudc->nr_enabled_eps--;
1713 if (usb_endpoint_xfer_isoc(ep->desc))
1714 xudc->nr_isoch_eps--;
1715
1716 ep->desc = NULL;
1717 ep->comp_desc = NULL;
1718
1719 memset(ep->context, 0, sizeof(*ep->context));
1720
1721 ep_unpause(xudc, ep->index);
1722 ep_unhalt(xudc, ep->index);
1723 if (xudc_readl(xudc, EP_STOPPED) & BIT(ep->index))
1724 xudc_writel(xudc, BIT(ep->index), EP_STOPPED);
1725
1726 /*
1727 * If this is the last endpoint disabled in a de-configure request,
1728 * switch back to address state.
1729 */
1730 if ((xudc->device_state == USB_STATE_CONFIGURED) &&
1731 (xudc->nr_enabled_eps == 1)) {
1732 u32 val;
1733
1734 xudc->device_state = USB_STATE_ADDRESS;
1735 usb_gadget_set_state(&xudc->gadget, xudc->device_state);
1736
1737 val = xudc_readl(xudc, CTRL);
1738 val &= ~CTRL_RUN;
1739 xudc_writel(xudc, val, CTRL);
1740 }
1741
1742 dev_info(xudc->dev, "ep %u disabled\n", ep->index);
1743
1744 return 0;
1745 }
1746
tegra_xudc_ep_disable(struct usb_ep * usb_ep)1747 static int tegra_xudc_ep_disable(struct usb_ep *usb_ep)
1748 {
1749 struct tegra_xudc_ep *ep;
1750 struct tegra_xudc *xudc;
1751 unsigned long flags;
1752 int ret;
1753
1754 if (!usb_ep)
1755 return -EINVAL;
1756
1757 ep = to_xudc_ep(usb_ep);
1758 xudc = ep->xudc;
1759
1760 spin_lock_irqsave(&xudc->lock, flags);
1761 if (xudc->powergated) {
1762 ret = -ESHUTDOWN;
1763 goto unlock;
1764 }
1765
1766 ret = __tegra_xudc_ep_disable(ep);
1767 unlock:
1768 spin_unlock_irqrestore(&xudc->lock, flags);
1769
1770 return ret;
1771 }
1772
__tegra_xudc_ep_enable(struct tegra_xudc_ep * ep,const struct usb_endpoint_descriptor * desc)1773 static int __tegra_xudc_ep_enable(struct tegra_xudc_ep *ep,
1774 const struct usb_endpoint_descriptor *desc)
1775 {
1776 struct tegra_xudc *xudc = ep->xudc;
1777 unsigned int i;
1778 u32 val;
1779
1780 if (xudc->gadget.speed == USB_SPEED_SUPER &&
1781 !usb_endpoint_xfer_control(desc) && !ep->usb_ep.comp_desc)
1782 return -EINVAL;
1783
1784 /* Disable the EP if it is not disabled */
1785 if (ep_ctx_read_state(ep->context) != EP_STATE_DISABLED)
1786 __tegra_xudc_ep_disable(ep);
1787
1788 ep->desc = desc;
1789 ep->comp_desc = ep->usb_ep.comp_desc;
1790
1791 if (usb_endpoint_xfer_isoc(desc)) {
1792 if (xudc->nr_isoch_eps > XUDC_MAX_ISOCH_EPS) {
1793 dev_err(xudc->dev, "too many isochronous endpoints\n");
1794 return -EBUSY;
1795 }
1796 xudc->nr_isoch_eps++;
1797 }
1798
1799 memset(ep->transfer_ring, 0, XUDC_TRANSFER_RING_SIZE *
1800 sizeof(*ep->transfer_ring));
1801 setup_link_trb(ep, &ep->transfer_ring[XUDC_TRANSFER_RING_SIZE - 1]);
1802
1803 ep->enq_ptr = 0;
1804 ep->deq_ptr = 0;
1805 ep->pcs = true;
1806 ep->ring_full = false;
1807 xudc->nr_enabled_eps++;
1808
1809 tegra_xudc_ep_context_setup(ep);
1810
1811 /*
1812 * No need to reload and un-halt EP0. This will be done automatically
1813 * once a valid SETUP packet is received.
1814 */
1815 if (usb_endpoint_xfer_control(desc))
1816 goto out;
1817
1818 /*
1819 * Transition to configured state once the first non-control
1820 * endpoint is enabled.
1821 */
1822 if (xudc->device_state == USB_STATE_ADDRESS) {
1823 val = xudc_readl(xudc, CTRL);
1824 val |= CTRL_RUN;
1825 xudc_writel(xudc, val, CTRL);
1826
1827 xudc->device_state = USB_STATE_CONFIGURED;
1828 usb_gadget_set_state(&xudc->gadget, xudc->device_state);
1829 }
1830
1831 if (usb_endpoint_xfer_isoc(desc)) {
1832 /*
1833 * Pause all bulk endpoints when enabling an isoch endpoint
1834 * to ensure the isoch endpoint is allocated enough bandwidth.
1835 */
1836 for (i = 0; i < ARRAY_SIZE(xudc->ep); i++) {
1837 if (xudc->ep[i].desc &&
1838 usb_endpoint_xfer_bulk(xudc->ep[i].desc))
1839 ep_pause(xudc, i);
1840 }
1841 }
1842
1843 ep_reload(xudc, ep->index);
1844 ep_unpause(xudc, ep->index);
1845 ep_unhalt(xudc, ep->index);
1846
1847 if (usb_endpoint_xfer_isoc(desc)) {
1848 for (i = 0; i < ARRAY_SIZE(xudc->ep); i++) {
1849 if (xudc->ep[i].desc &&
1850 usb_endpoint_xfer_bulk(xudc->ep[i].desc))
1851 ep_unpause(xudc, i);
1852 }
1853 }
1854
1855 out:
1856 dev_info(xudc->dev, "EP %u (type: %s, dir: %s) enabled\n", ep->index,
1857 usb_ep_type_string(usb_endpoint_type(ep->desc)),
1858 usb_endpoint_dir_in(ep->desc) ? "in" : "out");
1859
1860 return 0;
1861 }
1862
tegra_xudc_ep_enable(struct usb_ep * usb_ep,const struct usb_endpoint_descriptor * desc)1863 static int tegra_xudc_ep_enable(struct usb_ep *usb_ep,
1864 const struct usb_endpoint_descriptor *desc)
1865 {
1866 struct tegra_xudc_ep *ep;
1867 struct tegra_xudc *xudc;
1868 unsigned long flags;
1869 int ret;
1870
1871 if (!usb_ep || !desc || (desc->bDescriptorType != USB_DT_ENDPOINT))
1872 return -EINVAL;
1873
1874 ep = to_xudc_ep(usb_ep);
1875 xudc = ep->xudc;
1876
1877 spin_lock_irqsave(&xudc->lock, flags);
1878 if (xudc->powergated) {
1879 ret = -ESHUTDOWN;
1880 goto unlock;
1881 }
1882
1883 ret = __tegra_xudc_ep_enable(ep, desc);
1884 unlock:
1885 spin_unlock_irqrestore(&xudc->lock, flags);
1886
1887 return ret;
1888 }
1889
1890 static struct usb_request *
tegra_xudc_ep_alloc_request(struct usb_ep * usb_ep,gfp_t gfp)1891 tegra_xudc_ep_alloc_request(struct usb_ep *usb_ep, gfp_t gfp)
1892 {
1893 struct tegra_xudc_request *req;
1894
1895 req = kzalloc(sizeof(*req), gfp);
1896 if (!req)
1897 return NULL;
1898
1899 INIT_LIST_HEAD(&req->list);
1900
1901 return &req->usb_req;
1902 }
1903
tegra_xudc_ep_free_request(struct usb_ep * usb_ep,struct usb_request * usb_req)1904 static void tegra_xudc_ep_free_request(struct usb_ep *usb_ep,
1905 struct usb_request *usb_req)
1906 {
1907 struct tegra_xudc_request *req = to_xudc_req(usb_req);
1908
1909 kfree(req);
1910 }
1911
1912 static const struct usb_ep_ops tegra_xudc_ep_ops = {
1913 .enable = tegra_xudc_ep_enable,
1914 .disable = tegra_xudc_ep_disable,
1915 .alloc_request = tegra_xudc_ep_alloc_request,
1916 .free_request = tegra_xudc_ep_free_request,
1917 .queue = tegra_xudc_ep_queue,
1918 .dequeue = tegra_xudc_ep_dequeue,
1919 .set_halt = tegra_xudc_ep_set_halt,
1920 };
1921
tegra_xudc_ep0_enable(struct usb_ep * usb_ep,const struct usb_endpoint_descriptor * desc)1922 static int tegra_xudc_ep0_enable(struct usb_ep *usb_ep,
1923 const struct usb_endpoint_descriptor *desc)
1924 {
1925 return -EBUSY;
1926 }
1927
tegra_xudc_ep0_disable(struct usb_ep * usb_ep)1928 static int tegra_xudc_ep0_disable(struct usb_ep *usb_ep)
1929 {
1930 return -EBUSY;
1931 }
1932
1933 static const struct usb_ep_ops tegra_xudc_ep0_ops = {
1934 .enable = tegra_xudc_ep0_enable,
1935 .disable = tegra_xudc_ep0_disable,
1936 .alloc_request = tegra_xudc_ep_alloc_request,
1937 .free_request = tegra_xudc_ep_free_request,
1938 .queue = tegra_xudc_ep_queue,
1939 .dequeue = tegra_xudc_ep_dequeue,
1940 .set_halt = tegra_xudc_ep_set_halt,
1941 };
1942
tegra_xudc_gadget_get_frame(struct usb_gadget * gadget)1943 static int tegra_xudc_gadget_get_frame(struct usb_gadget *gadget)
1944 {
1945 struct tegra_xudc *xudc = to_xudc(gadget);
1946 unsigned long flags;
1947 int ret;
1948
1949 spin_lock_irqsave(&xudc->lock, flags);
1950 if (xudc->powergated) {
1951 ret = -ESHUTDOWN;
1952 goto unlock;
1953 }
1954
1955 ret = (xudc_readl(xudc, MFINDEX) & MFINDEX_FRAME_MASK) >>
1956 MFINDEX_FRAME_SHIFT;
1957 unlock:
1958 spin_unlock_irqrestore(&xudc->lock, flags);
1959
1960 return ret;
1961 }
1962
tegra_xudc_resume_device_state(struct tegra_xudc * xudc)1963 static void tegra_xudc_resume_device_state(struct tegra_xudc *xudc)
1964 {
1965 unsigned int i;
1966 u32 val;
1967
1968 ep_unpause_all(xudc);
1969
1970 /* Direct link to U0. */
1971 val = xudc_readl(xudc, PORTSC);
1972 if (((val & PORTSC_PLS_MASK) >> PORTSC_PLS_SHIFT) != PORTSC_PLS_U0) {
1973 val &= ~(PORTSC_CHANGE_MASK | PORTSC_PLS_MASK);
1974 val |= PORTSC_LWS | PORTSC_PLS(PORTSC_PLS_U0);
1975 xudc_writel(xudc, val, PORTSC);
1976 }
1977
1978 if (xudc->device_state == USB_STATE_SUSPENDED) {
1979 xudc->device_state = xudc->resume_state;
1980 usb_gadget_set_state(&xudc->gadget, xudc->device_state);
1981 xudc->resume_state = 0;
1982 }
1983
1984 /*
1985 * Doorbells may be dropped if they are sent too soon (< ~200ns)
1986 * after unpausing the endpoint. Wait for 500ns just to be safe.
1987 */
1988 ndelay(500);
1989 for (i = 0; i < ARRAY_SIZE(xudc->ep); i++)
1990 tegra_xudc_ep_ring_doorbell(&xudc->ep[i]);
1991 }
1992
tegra_xudc_gadget_wakeup(struct usb_gadget * gadget)1993 static int tegra_xudc_gadget_wakeup(struct usb_gadget *gadget)
1994 {
1995 struct tegra_xudc *xudc = to_xudc(gadget);
1996 unsigned long flags;
1997 int ret = 0;
1998 u32 val;
1999
2000 spin_lock_irqsave(&xudc->lock, flags);
2001
2002 if (xudc->powergated) {
2003 ret = -ESHUTDOWN;
2004 goto unlock;
2005 }
2006 val = xudc_readl(xudc, PORTPM);
2007 dev_dbg(xudc->dev, "%s: PORTPM=%#x, speed=%x\n", __func__,
2008 val, gadget->speed);
2009
2010 if (((xudc->gadget.speed <= USB_SPEED_HIGH) &&
2011 (val & PORTPM_RWE)) ||
2012 ((xudc->gadget.speed == USB_SPEED_SUPER) &&
2013 (val & PORTPM_FRWE))) {
2014 tegra_xudc_resume_device_state(xudc);
2015
2016 /* Send Device Notification packet. */
2017 if (xudc->gadget.speed == USB_SPEED_SUPER) {
2018 val = DEVNOTIF_LO_TYPE(DEVNOTIF_LO_TYPE_FUNCTION_WAKE)
2019 | DEVNOTIF_LO_TRIG;
2020 xudc_writel(xudc, 0, DEVNOTIF_HI);
2021 xudc_writel(xudc, val, DEVNOTIF_LO);
2022 }
2023 }
2024
2025 unlock:
2026 dev_dbg(xudc->dev, "%s: ret value is %d", __func__, ret);
2027 spin_unlock_irqrestore(&xudc->lock, flags);
2028
2029 return ret;
2030 }
2031
tegra_xudc_gadget_pullup(struct usb_gadget * gadget,int is_on)2032 static int tegra_xudc_gadget_pullup(struct usb_gadget *gadget, int is_on)
2033 {
2034 struct tegra_xudc *xudc = to_xudc(gadget);
2035 unsigned long flags;
2036 u32 val;
2037
2038 pm_runtime_get_sync(xudc->dev);
2039
2040 spin_lock_irqsave(&xudc->lock, flags);
2041
2042 if (is_on != xudc->pullup) {
2043 val = xudc_readl(xudc, CTRL);
2044 if (is_on)
2045 val |= CTRL_ENABLE;
2046 else
2047 val &= ~CTRL_ENABLE;
2048 xudc_writel(xudc, val, CTRL);
2049 }
2050
2051 xudc->pullup = is_on;
2052 dev_dbg(xudc->dev, "%s: pullup:%d", __func__, is_on);
2053
2054 spin_unlock_irqrestore(&xudc->lock, flags);
2055
2056 pm_runtime_put(xudc->dev);
2057
2058 return 0;
2059 }
2060
tegra_xudc_gadget_start(struct usb_gadget * gadget,struct usb_gadget_driver * driver)2061 static int tegra_xudc_gadget_start(struct usb_gadget *gadget,
2062 struct usb_gadget_driver *driver)
2063 {
2064 struct tegra_xudc *xudc = to_xudc(gadget);
2065 unsigned long flags;
2066 u32 val;
2067 int ret;
2068 unsigned int i;
2069
2070 if (!driver)
2071 return -EINVAL;
2072
2073 pm_runtime_get_sync(xudc->dev);
2074
2075 spin_lock_irqsave(&xudc->lock, flags);
2076
2077 if (xudc->driver) {
2078 ret = -EBUSY;
2079 goto unlock;
2080 }
2081
2082 xudc->setup_state = WAIT_FOR_SETUP;
2083 xudc->device_state = USB_STATE_DEFAULT;
2084 usb_gadget_set_state(&xudc->gadget, xudc->device_state);
2085
2086 ret = __tegra_xudc_ep_enable(&xudc->ep[0], &tegra_xudc_ep0_desc);
2087 if (ret < 0)
2088 goto unlock;
2089
2090 val = xudc_readl(xudc, CTRL);
2091 val |= CTRL_IE | CTRL_LSE;
2092 xudc_writel(xudc, val, CTRL);
2093
2094 val = xudc_readl(xudc, PORTHALT);
2095 val |= PORTHALT_STCHG_INTR_EN;
2096 xudc_writel(xudc, val, PORTHALT);
2097
2098 if (xudc->pullup) {
2099 val = xudc_readl(xudc, CTRL);
2100 val |= CTRL_ENABLE;
2101 xudc_writel(xudc, val, CTRL);
2102 }
2103
2104 for (i = 0; i < xudc->soc->num_phys; i++)
2105 if (xudc->usbphy[i])
2106 otg_set_peripheral(xudc->usbphy[i]->otg, gadget);
2107
2108 xudc->driver = driver;
2109 unlock:
2110 dev_dbg(xudc->dev, "%s: ret value is %d", __func__, ret);
2111 spin_unlock_irqrestore(&xudc->lock, flags);
2112
2113 pm_runtime_put(xudc->dev);
2114
2115 return ret;
2116 }
2117
tegra_xudc_gadget_stop(struct usb_gadget * gadget)2118 static int tegra_xudc_gadget_stop(struct usb_gadget *gadget)
2119 {
2120 struct tegra_xudc *xudc = to_xudc(gadget);
2121 unsigned long flags;
2122 u32 val;
2123 unsigned int i;
2124
2125 pm_runtime_get_sync(xudc->dev);
2126
2127 spin_lock_irqsave(&xudc->lock, flags);
2128
2129 for (i = 0; i < xudc->soc->num_phys; i++)
2130 if (xudc->usbphy[i])
2131 otg_set_peripheral(xudc->usbphy[i]->otg, NULL);
2132
2133 val = xudc_readl(xudc, CTRL);
2134 val &= ~(CTRL_IE | CTRL_ENABLE);
2135 xudc_writel(xudc, val, CTRL);
2136
2137 __tegra_xudc_ep_disable(&xudc->ep[0]);
2138
2139 xudc->driver = NULL;
2140 dev_dbg(xudc->dev, "Gadget stopped");
2141
2142 spin_unlock_irqrestore(&xudc->lock, flags);
2143
2144 pm_runtime_put(xudc->dev);
2145
2146 return 0;
2147 }
2148
tegra_xudc_gadget_vbus_draw(struct usb_gadget * gadget,unsigned int m_a)2149 static int tegra_xudc_gadget_vbus_draw(struct usb_gadget *gadget,
2150 unsigned int m_a)
2151 {
2152 int ret = 0;
2153 struct tegra_xudc *xudc = to_xudc(gadget);
2154
2155 dev_dbg(xudc->dev, "%s: %u mA\n", __func__, m_a);
2156
2157 if (xudc->curr_usbphy && xudc->curr_usbphy->chg_type == SDP_TYPE)
2158 ret = usb_phy_set_power(xudc->curr_usbphy, m_a);
2159
2160 return ret;
2161 }
2162
tegra_xudc_set_selfpowered(struct usb_gadget * gadget,int is_on)2163 static int tegra_xudc_set_selfpowered(struct usb_gadget *gadget, int is_on)
2164 {
2165 struct tegra_xudc *xudc = to_xudc(gadget);
2166
2167 dev_dbg(xudc->dev, "%s: %d\n", __func__, is_on);
2168 xudc->selfpowered = !!is_on;
2169
2170 return 0;
2171 }
2172
2173 static const struct usb_gadget_ops tegra_xudc_gadget_ops = {
2174 .get_frame = tegra_xudc_gadget_get_frame,
2175 .wakeup = tegra_xudc_gadget_wakeup,
2176 .pullup = tegra_xudc_gadget_pullup,
2177 .udc_start = tegra_xudc_gadget_start,
2178 .udc_stop = tegra_xudc_gadget_stop,
2179 .vbus_draw = tegra_xudc_gadget_vbus_draw,
2180 .set_selfpowered = tegra_xudc_set_selfpowered,
2181 };
2182
no_op_complete(struct usb_ep * ep,struct usb_request * req)2183 static void no_op_complete(struct usb_ep *ep, struct usb_request *req)
2184 {
2185 }
2186
2187 static int
tegra_xudc_ep0_queue_status(struct tegra_xudc * xudc,void (* cmpl)(struct usb_ep *,struct usb_request *))2188 tegra_xudc_ep0_queue_status(struct tegra_xudc *xudc,
2189 void (*cmpl)(struct usb_ep *, struct usb_request *))
2190 {
2191 xudc->ep0_req->usb_req.buf = NULL;
2192 xudc->ep0_req->usb_req.dma = 0;
2193 xudc->ep0_req->usb_req.length = 0;
2194 xudc->ep0_req->usb_req.complete = cmpl;
2195 xudc->ep0_req->usb_req.context = xudc;
2196
2197 return __tegra_xudc_ep_queue(&xudc->ep[0], xudc->ep0_req);
2198 }
2199
2200 static int
tegra_xudc_ep0_queue_data(struct tegra_xudc * xudc,void * buf,size_t len,void (* cmpl)(struct usb_ep *,struct usb_request *))2201 tegra_xudc_ep0_queue_data(struct tegra_xudc *xudc, void *buf, size_t len,
2202 void (*cmpl)(struct usb_ep *, struct usb_request *))
2203 {
2204 xudc->ep0_req->usb_req.buf = buf;
2205 xudc->ep0_req->usb_req.length = len;
2206 xudc->ep0_req->usb_req.complete = cmpl;
2207 xudc->ep0_req->usb_req.context = xudc;
2208
2209 return __tegra_xudc_ep_queue(&xudc->ep[0], xudc->ep0_req);
2210 }
2211
tegra_xudc_ep0_req_done(struct tegra_xudc * xudc)2212 static void tegra_xudc_ep0_req_done(struct tegra_xudc *xudc)
2213 {
2214 switch (xudc->setup_state) {
2215 case DATA_STAGE_XFER:
2216 xudc->setup_state = STATUS_STAGE_RECV;
2217 tegra_xudc_ep0_queue_status(xudc, no_op_complete);
2218 break;
2219 case DATA_STAGE_RECV:
2220 xudc->setup_state = STATUS_STAGE_XFER;
2221 tegra_xudc_ep0_queue_status(xudc, no_op_complete);
2222 break;
2223 default:
2224 xudc->setup_state = WAIT_FOR_SETUP;
2225 break;
2226 }
2227 }
2228
tegra_xudc_ep0_delegate_req(struct tegra_xudc * xudc,struct usb_ctrlrequest * ctrl)2229 static int tegra_xudc_ep0_delegate_req(struct tegra_xudc *xudc,
2230 struct usb_ctrlrequest *ctrl)
2231 {
2232 int ret;
2233
2234 spin_unlock(&xudc->lock);
2235 ret = xudc->driver->setup(&xudc->gadget, ctrl);
2236 spin_lock(&xudc->lock);
2237
2238 return ret;
2239 }
2240
set_feature_complete(struct usb_ep * ep,struct usb_request * req)2241 static void set_feature_complete(struct usb_ep *ep, struct usb_request *req)
2242 {
2243 struct tegra_xudc *xudc = req->context;
2244
2245 if (xudc->test_mode_pattern) {
2246 xudc_writel(xudc, xudc->test_mode_pattern, PORT_TM);
2247 xudc->test_mode_pattern = 0;
2248 }
2249 }
2250
tegra_xudc_ep0_set_feature(struct tegra_xudc * xudc,struct usb_ctrlrequest * ctrl)2251 static int tegra_xudc_ep0_set_feature(struct tegra_xudc *xudc,
2252 struct usb_ctrlrequest *ctrl)
2253 {
2254 bool set = (ctrl->bRequest == USB_REQ_SET_FEATURE);
2255 u32 feature = le16_to_cpu(ctrl->wValue);
2256 u32 index = le16_to_cpu(ctrl->wIndex);
2257 u32 val, ep;
2258 int ret;
2259
2260 if (le16_to_cpu(ctrl->wLength) != 0)
2261 return -EINVAL;
2262
2263 switch (ctrl->bRequestType & USB_RECIP_MASK) {
2264 case USB_RECIP_DEVICE:
2265 switch (feature) {
2266 case USB_DEVICE_REMOTE_WAKEUP:
2267 if ((xudc->gadget.speed == USB_SPEED_SUPER) ||
2268 (xudc->device_state == USB_STATE_DEFAULT))
2269 return -EINVAL;
2270
2271 val = xudc_readl(xudc, PORTPM);
2272 if (set)
2273 val |= PORTPM_RWE;
2274 else
2275 val &= ~PORTPM_RWE;
2276
2277 xudc_writel(xudc, val, PORTPM);
2278 break;
2279 case USB_DEVICE_U1_ENABLE:
2280 case USB_DEVICE_U2_ENABLE:
2281 if ((xudc->device_state != USB_STATE_CONFIGURED) ||
2282 (xudc->gadget.speed != USB_SPEED_SUPER))
2283 return -EINVAL;
2284
2285 val = xudc_readl(xudc, PORTPM);
2286 if ((feature == USB_DEVICE_U1_ENABLE) &&
2287 xudc->soc->u1_enable) {
2288 if (set)
2289 val |= PORTPM_U1E;
2290 else
2291 val &= ~PORTPM_U1E;
2292 }
2293
2294 if ((feature == USB_DEVICE_U2_ENABLE) &&
2295 xudc->soc->u2_enable) {
2296 if (set)
2297 val |= PORTPM_U2E;
2298 else
2299 val &= ~PORTPM_U2E;
2300 }
2301
2302 xudc_writel(xudc, val, PORTPM);
2303 break;
2304 case USB_DEVICE_TEST_MODE:
2305 if (xudc->gadget.speed != USB_SPEED_HIGH)
2306 return -EINVAL;
2307
2308 if (!set)
2309 return -EINVAL;
2310
2311 xudc->test_mode_pattern = index >> 8;
2312 break;
2313 default:
2314 return -EINVAL;
2315 }
2316
2317 break;
2318 case USB_RECIP_INTERFACE:
2319 if (xudc->device_state != USB_STATE_CONFIGURED)
2320 return -EINVAL;
2321
2322 switch (feature) {
2323 case USB_INTRF_FUNC_SUSPEND:
2324 if (set) {
2325 val = xudc_readl(xudc, PORTPM);
2326
2327 if (index & USB_INTRF_FUNC_SUSPEND_RW)
2328 val |= PORTPM_FRWE;
2329 else
2330 val &= ~PORTPM_FRWE;
2331
2332 xudc_writel(xudc, val, PORTPM);
2333 }
2334
2335 return tegra_xudc_ep0_delegate_req(xudc, ctrl);
2336 default:
2337 return -EINVAL;
2338 }
2339
2340 break;
2341 case USB_RECIP_ENDPOINT:
2342 ep = (index & USB_ENDPOINT_NUMBER_MASK) * 2 +
2343 ((index & USB_DIR_IN) ? 1 : 0);
2344
2345 if ((xudc->device_state == USB_STATE_DEFAULT) ||
2346 ((xudc->device_state == USB_STATE_ADDRESS) &&
2347 (index != 0)))
2348 return -EINVAL;
2349
2350 ret = __tegra_xudc_ep_set_halt(&xudc->ep[ep], set);
2351 if (ret < 0)
2352 return ret;
2353 break;
2354 default:
2355 return -EINVAL;
2356 }
2357
2358 return tegra_xudc_ep0_queue_status(xudc, set_feature_complete);
2359 }
2360
tegra_xudc_ep0_get_status(struct tegra_xudc * xudc,struct usb_ctrlrequest * ctrl)2361 static int tegra_xudc_ep0_get_status(struct tegra_xudc *xudc,
2362 struct usb_ctrlrequest *ctrl)
2363 {
2364 struct tegra_xudc_ep_context *ep_ctx;
2365 u32 val, ep, index = le16_to_cpu(ctrl->wIndex);
2366 u16 status = 0;
2367
2368 if (!(ctrl->bRequestType & USB_DIR_IN))
2369 return -EINVAL;
2370
2371 if ((le16_to_cpu(ctrl->wValue) != 0) ||
2372 (le16_to_cpu(ctrl->wLength) != 2))
2373 return -EINVAL;
2374
2375 switch (ctrl->bRequestType & USB_RECIP_MASK) {
2376 case USB_RECIP_DEVICE:
2377 val = xudc_readl(xudc, PORTPM);
2378
2379 if (xudc->selfpowered)
2380 status |= BIT(USB_DEVICE_SELF_POWERED);
2381
2382 if ((xudc->gadget.speed < USB_SPEED_SUPER) &&
2383 (val & PORTPM_RWE))
2384 status |= BIT(USB_DEVICE_REMOTE_WAKEUP);
2385
2386 if (xudc->gadget.speed == USB_SPEED_SUPER) {
2387 if (val & PORTPM_U1E)
2388 status |= BIT(USB_DEV_STAT_U1_ENABLED);
2389 if (val & PORTPM_U2E)
2390 status |= BIT(USB_DEV_STAT_U2_ENABLED);
2391 }
2392 break;
2393 case USB_RECIP_INTERFACE:
2394 if (xudc->gadget.speed == USB_SPEED_SUPER) {
2395 status |= USB_INTRF_STAT_FUNC_RW_CAP;
2396 val = xudc_readl(xudc, PORTPM);
2397 if (val & PORTPM_FRWE)
2398 status |= USB_INTRF_STAT_FUNC_RW;
2399 }
2400 break;
2401 case USB_RECIP_ENDPOINT:
2402 ep = (index & USB_ENDPOINT_NUMBER_MASK) * 2 +
2403 ((index & USB_DIR_IN) ? 1 : 0);
2404 ep_ctx = &xudc->ep_context[ep];
2405
2406 if ((xudc->device_state != USB_STATE_CONFIGURED) &&
2407 ((xudc->device_state != USB_STATE_ADDRESS) || (ep != 0)))
2408 return -EINVAL;
2409
2410 if (ep_ctx_read_state(ep_ctx) == EP_STATE_DISABLED)
2411 return -EINVAL;
2412
2413 if (xudc_readl(xudc, EP_HALT) & BIT(ep))
2414 status |= BIT(USB_ENDPOINT_HALT);
2415 break;
2416 default:
2417 return -EINVAL;
2418 }
2419
2420 xudc->status_buf = cpu_to_le16(status);
2421 return tegra_xudc_ep0_queue_data(xudc, &xudc->status_buf,
2422 sizeof(xudc->status_buf),
2423 no_op_complete);
2424 }
2425
set_sel_complete(struct usb_ep * ep,struct usb_request * req)2426 static void set_sel_complete(struct usb_ep *ep, struct usb_request *req)
2427 {
2428 /* Nothing to do with SEL values */
2429 }
2430
tegra_xudc_ep0_set_sel(struct tegra_xudc * xudc,struct usb_ctrlrequest * ctrl)2431 static int tegra_xudc_ep0_set_sel(struct tegra_xudc *xudc,
2432 struct usb_ctrlrequest *ctrl)
2433 {
2434 if (ctrl->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE |
2435 USB_TYPE_STANDARD))
2436 return -EINVAL;
2437
2438 if (xudc->device_state == USB_STATE_DEFAULT)
2439 return -EINVAL;
2440
2441 if ((le16_to_cpu(ctrl->wIndex) != 0) ||
2442 (le16_to_cpu(ctrl->wValue) != 0) ||
2443 (le16_to_cpu(ctrl->wLength) != 6))
2444 return -EINVAL;
2445
2446 return tegra_xudc_ep0_queue_data(xudc, &xudc->sel_timing,
2447 sizeof(xudc->sel_timing),
2448 set_sel_complete);
2449 }
2450
set_isoch_delay_complete(struct usb_ep * ep,struct usb_request * req)2451 static void set_isoch_delay_complete(struct usb_ep *ep, struct usb_request *req)
2452 {
2453 /* Nothing to do with isoch delay */
2454 }
2455
tegra_xudc_ep0_set_isoch_delay(struct tegra_xudc * xudc,struct usb_ctrlrequest * ctrl)2456 static int tegra_xudc_ep0_set_isoch_delay(struct tegra_xudc *xudc,
2457 struct usb_ctrlrequest *ctrl)
2458 {
2459 u32 delay = le16_to_cpu(ctrl->wValue);
2460
2461 if (ctrl->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE |
2462 USB_TYPE_STANDARD))
2463 return -EINVAL;
2464
2465 if ((delay > 65535) || (le16_to_cpu(ctrl->wIndex) != 0) ||
2466 (le16_to_cpu(ctrl->wLength) != 0))
2467 return -EINVAL;
2468
2469 xudc->isoch_delay = delay;
2470
2471 return tegra_xudc_ep0_queue_status(xudc, set_isoch_delay_complete);
2472 }
2473
set_address_complete(struct usb_ep * ep,struct usb_request * req)2474 static void set_address_complete(struct usb_ep *ep, struct usb_request *req)
2475 {
2476 struct tegra_xudc *xudc = req->context;
2477
2478 if ((xudc->device_state == USB_STATE_DEFAULT) &&
2479 (xudc->dev_addr != 0)) {
2480 xudc->device_state = USB_STATE_ADDRESS;
2481 usb_gadget_set_state(&xudc->gadget, xudc->device_state);
2482 } else if ((xudc->device_state == USB_STATE_ADDRESS) &&
2483 (xudc->dev_addr == 0)) {
2484 xudc->device_state = USB_STATE_DEFAULT;
2485 usb_gadget_set_state(&xudc->gadget, xudc->device_state);
2486 }
2487 }
2488
tegra_xudc_ep0_set_address(struct tegra_xudc * xudc,struct usb_ctrlrequest * ctrl)2489 static int tegra_xudc_ep0_set_address(struct tegra_xudc *xudc,
2490 struct usb_ctrlrequest *ctrl)
2491 {
2492 struct tegra_xudc_ep *ep0 = &xudc->ep[0];
2493 u32 val, addr = le16_to_cpu(ctrl->wValue);
2494
2495 if (ctrl->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE |
2496 USB_TYPE_STANDARD))
2497 return -EINVAL;
2498
2499 if ((addr > 127) || (le16_to_cpu(ctrl->wIndex) != 0) ||
2500 (le16_to_cpu(ctrl->wLength) != 0))
2501 return -EINVAL;
2502
2503 if (xudc->device_state == USB_STATE_CONFIGURED)
2504 return -EINVAL;
2505
2506 dev_dbg(xudc->dev, "set address: %u\n", addr);
2507
2508 xudc->dev_addr = addr;
2509 val = xudc_readl(xudc, CTRL);
2510 val &= ~(CTRL_DEVADDR_MASK);
2511 val |= CTRL_DEVADDR(addr);
2512 xudc_writel(xudc, val, CTRL);
2513
2514 ep_ctx_write_devaddr(ep0->context, addr);
2515
2516 return tegra_xudc_ep0_queue_status(xudc, set_address_complete);
2517 }
2518
tegra_xudc_ep0_standard_req(struct tegra_xudc * xudc,struct usb_ctrlrequest * ctrl)2519 static int tegra_xudc_ep0_standard_req(struct tegra_xudc *xudc,
2520 struct usb_ctrlrequest *ctrl)
2521 {
2522 int ret;
2523
2524 switch (ctrl->bRequest) {
2525 case USB_REQ_GET_STATUS:
2526 dev_dbg(xudc->dev, "USB_REQ_GET_STATUS\n");
2527 ret = tegra_xudc_ep0_get_status(xudc, ctrl);
2528 break;
2529 case USB_REQ_SET_ADDRESS:
2530 dev_dbg(xudc->dev, "USB_REQ_SET_ADDRESS\n");
2531 ret = tegra_xudc_ep0_set_address(xudc, ctrl);
2532 break;
2533 case USB_REQ_SET_SEL:
2534 dev_dbg(xudc->dev, "USB_REQ_SET_SEL\n");
2535 ret = tegra_xudc_ep0_set_sel(xudc, ctrl);
2536 break;
2537 case USB_REQ_SET_ISOCH_DELAY:
2538 dev_dbg(xudc->dev, "USB_REQ_SET_ISOCH_DELAY\n");
2539 ret = tegra_xudc_ep0_set_isoch_delay(xudc, ctrl);
2540 break;
2541 case USB_REQ_CLEAR_FEATURE:
2542 case USB_REQ_SET_FEATURE:
2543 dev_dbg(xudc->dev, "USB_REQ_CLEAR/SET_FEATURE\n");
2544 ret = tegra_xudc_ep0_set_feature(xudc, ctrl);
2545 break;
2546 case USB_REQ_SET_CONFIGURATION:
2547 dev_dbg(xudc->dev, "USB_REQ_SET_CONFIGURATION\n");
2548 /*
2549 * In theory we need to clear RUN bit before status stage of
2550 * deconfig request sent, but this seems to be causing problems.
2551 * Clear RUN once all endpoints are disabled instead.
2552 */
2553 fallthrough;
2554 default:
2555 ret = tegra_xudc_ep0_delegate_req(xudc, ctrl);
2556 break;
2557 }
2558
2559 return ret;
2560 }
2561
tegra_xudc_handle_ep0_setup_packet(struct tegra_xudc * xudc,struct usb_ctrlrequest * ctrl,u16 seq_num)2562 static void tegra_xudc_handle_ep0_setup_packet(struct tegra_xudc *xudc,
2563 struct usb_ctrlrequest *ctrl,
2564 u16 seq_num)
2565 {
2566 int ret;
2567
2568 xudc->setup_seq_num = seq_num;
2569
2570 /* Ensure EP0 is unhalted. */
2571 ep_unhalt(xudc, 0);
2572
2573 /*
2574 * On Tegra210, setup packets with sequence numbers 0xfffe or 0xffff
2575 * are invalid. Halt EP0 until we get a valid packet.
2576 */
2577 if (xudc->soc->invalid_seq_num &&
2578 (seq_num == 0xfffe || seq_num == 0xffff)) {
2579 dev_warn(xudc->dev, "invalid sequence number detected\n");
2580 ep_halt(xudc, 0);
2581 return;
2582 }
2583
2584 if (ctrl->wLength)
2585 xudc->setup_state = (ctrl->bRequestType & USB_DIR_IN) ?
2586 DATA_STAGE_XFER : DATA_STAGE_RECV;
2587 else
2588 xudc->setup_state = STATUS_STAGE_XFER;
2589
2590 if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
2591 ret = tegra_xudc_ep0_standard_req(xudc, ctrl);
2592 else
2593 ret = tegra_xudc_ep0_delegate_req(xudc, ctrl);
2594
2595 if (ret < 0) {
2596 dev_warn(xudc->dev, "setup request failed: %d\n", ret);
2597 xudc->setup_state = WAIT_FOR_SETUP;
2598 ep_halt(xudc, 0);
2599 }
2600 }
2601
tegra_xudc_handle_ep0_event(struct tegra_xudc * xudc,struct tegra_xudc_trb * event)2602 static void tegra_xudc_handle_ep0_event(struct tegra_xudc *xudc,
2603 struct tegra_xudc_trb *event)
2604 {
2605 struct usb_ctrlrequest *ctrl = (struct usb_ctrlrequest *)event;
2606 u16 seq_num = trb_read_seq_num(event);
2607
2608 if (xudc->setup_state != WAIT_FOR_SETUP) {
2609 /*
2610 * The controller is in the process of handling another
2611 * setup request. Queue subsequent requests and handle
2612 * the last one once the controller reports a sequence
2613 * number error.
2614 */
2615 memcpy(&xudc->setup_packet.ctrl_req, ctrl, sizeof(*ctrl));
2616 xudc->setup_packet.seq_num = seq_num;
2617 xudc->queued_setup_packet = true;
2618 } else {
2619 tegra_xudc_handle_ep0_setup_packet(xudc, ctrl, seq_num);
2620 }
2621 }
2622
2623 static struct tegra_xudc_request *
trb_to_request(struct tegra_xudc_ep * ep,struct tegra_xudc_trb * trb)2624 trb_to_request(struct tegra_xudc_ep *ep, struct tegra_xudc_trb *trb)
2625 {
2626 struct tegra_xudc_request *req;
2627
2628 list_for_each_entry(req, &ep->queue, list) {
2629 if (!req->trbs_queued)
2630 break;
2631
2632 if (trb_in_request(ep, req, trb))
2633 return req;
2634 }
2635
2636 return NULL;
2637 }
2638
tegra_xudc_handle_transfer_completion(struct tegra_xudc * xudc,struct tegra_xudc_ep * ep,struct tegra_xudc_trb * event)2639 static void tegra_xudc_handle_transfer_completion(struct tegra_xudc *xudc,
2640 struct tegra_xudc_ep *ep,
2641 struct tegra_xudc_trb *event)
2642 {
2643 struct tegra_xudc_request *req;
2644 struct tegra_xudc_trb *trb;
2645 bool short_packet;
2646
2647 short_packet = (trb_read_cmpl_code(event) ==
2648 TRB_CMPL_CODE_SHORT_PACKET);
2649
2650 trb = trb_phys_to_virt(ep, trb_read_data_ptr(event));
2651 req = trb_to_request(ep, trb);
2652
2653 /*
2654 * TDs are complete on short packet or when the completed TRB is the
2655 * last TRB in the TD (the CHAIN bit is unset).
2656 */
2657 if (req && (short_packet || (!trb_read_chain(trb) &&
2658 (req->trbs_needed == req->trbs_queued)))) {
2659 struct tegra_xudc_trb *last = req->last_trb;
2660 unsigned int residual;
2661
2662 residual = trb_read_transfer_len(event);
2663 req->usb_req.actual = req->usb_req.length - residual;
2664
2665 dev_dbg(xudc->dev, "bytes transferred %u / %u\n",
2666 req->usb_req.actual, req->usb_req.length);
2667
2668 tegra_xudc_req_done(ep, req, 0);
2669
2670 if (ep->desc && usb_endpoint_xfer_control(ep->desc))
2671 tegra_xudc_ep0_req_done(xudc);
2672
2673 /*
2674 * Advance the dequeue pointer past the end of the current TD
2675 * on short packet completion.
2676 */
2677 if (short_packet) {
2678 ep->deq_ptr = (last - ep->transfer_ring) + 1;
2679 if (ep->deq_ptr == XUDC_TRANSFER_RING_SIZE - 1)
2680 ep->deq_ptr = 0;
2681 }
2682 } else if (!req) {
2683 dev_warn(xudc->dev, "transfer event on dequeued request\n");
2684 }
2685
2686 if (ep->desc)
2687 tegra_xudc_ep_kick_queue(ep);
2688 }
2689
tegra_xudc_handle_transfer_event(struct tegra_xudc * xudc,struct tegra_xudc_trb * event)2690 static void tegra_xudc_handle_transfer_event(struct tegra_xudc *xudc,
2691 struct tegra_xudc_trb *event)
2692 {
2693 unsigned int ep_index = trb_read_endpoint_id(event);
2694 struct tegra_xudc_ep *ep = &xudc->ep[ep_index];
2695 struct tegra_xudc_trb *trb;
2696 u16 comp_code;
2697
2698 if (ep_ctx_read_state(ep->context) == EP_STATE_DISABLED) {
2699 dev_warn(xudc->dev, "transfer event on disabled EP %u\n",
2700 ep_index);
2701 return;
2702 }
2703
2704 /* Update transfer ring dequeue pointer. */
2705 trb = trb_phys_to_virt(ep, trb_read_data_ptr(event));
2706 comp_code = trb_read_cmpl_code(event);
2707 if (comp_code != TRB_CMPL_CODE_BABBLE_DETECTED_ERR) {
2708 ep->deq_ptr = (trb - ep->transfer_ring) + 1;
2709
2710 if (ep->deq_ptr == XUDC_TRANSFER_RING_SIZE - 1)
2711 ep->deq_ptr = 0;
2712 ep->ring_full = false;
2713 }
2714
2715 switch (comp_code) {
2716 case TRB_CMPL_CODE_SUCCESS:
2717 case TRB_CMPL_CODE_SHORT_PACKET:
2718 tegra_xudc_handle_transfer_completion(xudc, ep, event);
2719 break;
2720 case TRB_CMPL_CODE_HOST_REJECTED:
2721 dev_info(xudc->dev, "stream rejected on EP %u\n", ep_index);
2722
2723 ep->stream_rejected = true;
2724 break;
2725 case TRB_CMPL_CODE_PRIME_PIPE_RECEIVED:
2726 dev_info(xudc->dev, "prime pipe received on EP %u\n", ep_index);
2727
2728 if (ep->stream_rejected) {
2729 ep->stream_rejected = false;
2730 /*
2731 * An EP is stopped when a stream is rejected. Wait
2732 * for the EP to report that it is stopped and then
2733 * un-stop it.
2734 */
2735 ep_wait_for_stopped(xudc, ep_index);
2736 }
2737 tegra_xudc_ep_ring_doorbell(ep);
2738 break;
2739 case TRB_CMPL_CODE_BABBLE_DETECTED_ERR:
2740 /*
2741 * Wait for the EP to be stopped so the controller stops
2742 * processing doorbells.
2743 */
2744 ep_wait_for_stopped(xudc, ep_index);
2745 ep->enq_ptr = ep->deq_ptr;
2746 tegra_xudc_ep_nuke(ep, -EIO);
2747 fallthrough;
2748 case TRB_CMPL_CODE_STREAM_NUMP_ERROR:
2749 case TRB_CMPL_CODE_CTRL_DIR_ERR:
2750 case TRB_CMPL_CODE_INVALID_STREAM_TYPE_ERR:
2751 case TRB_CMPL_CODE_RING_UNDERRUN:
2752 case TRB_CMPL_CODE_RING_OVERRUN:
2753 case TRB_CMPL_CODE_ISOCH_BUFFER_OVERRUN:
2754 case TRB_CMPL_CODE_USB_TRANS_ERR:
2755 case TRB_CMPL_CODE_TRB_ERR:
2756 dev_err(xudc->dev, "completion error %#x on EP %u\n",
2757 comp_code, ep_index);
2758
2759 ep_halt(xudc, ep_index);
2760 break;
2761 case TRB_CMPL_CODE_CTRL_SEQNUM_ERR:
2762 dev_info(xudc->dev, "sequence number error\n");
2763
2764 /*
2765 * Kill any queued control request and skip to the last
2766 * setup packet we received.
2767 */
2768 tegra_xudc_ep_nuke(ep, -EINVAL);
2769 xudc->setup_state = WAIT_FOR_SETUP;
2770 if (!xudc->queued_setup_packet)
2771 break;
2772
2773 tegra_xudc_handle_ep0_setup_packet(xudc,
2774 &xudc->setup_packet.ctrl_req,
2775 xudc->setup_packet.seq_num);
2776 xudc->queued_setup_packet = false;
2777 break;
2778 case TRB_CMPL_CODE_STOPPED:
2779 dev_dbg(xudc->dev, "stop completion code on EP %u\n",
2780 ep_index);
2781
2782 /* Disconnected. */
2783 tegra_xudc_ep_nuke(ep, -ECONNREFUSED);
2784 break;
2785 default:
2786 dev_dbg(xudc->dev, "completion event %#x on EP %u\n",
2787 comp_code, ep_index);
2788 break;
2789 }
2790 }
2791
tegra_xudc_reset(struct tegra_xudc * xudc)2792 static void tegra_xudc_reset(struct tegra_xudc *xudc)
2793 {
2794 struct tegra_xudc_ep *ep0 = &xudc->ep[0];
2795 dma_addr_t deq_ptr;
2796 unsigned int i;
2797
2798 xudc->setup_state = WAIT_FOR_SETUP;
2799 xudc->device_state = USB_STATE_DEFAULT;
2800 usb_gadget_set_state(&xudc->gadget, xudc->device_state);
2801
2802 ep_unpause_all(xudc);
2803
2804 for (i = 0; i < ARRAY_SIZE(xudc->ep); i++)
2805 tegra_xudc_ep_nuke(&xudc->ep[i], -ESHUTDOWN);
2806
2807 /*
2808 * Reset sequence number and dequeue pointer to flush the transfer
2809 * ring.
2810 */
2811 ep0->deq_ptr = ep0->enq_ptr;
2812 ep0->ring_full = false;
2813
2814 xudc->setup_seq_num = 0;
2815 xudc->queued_setup_packet = false;
2816
2817 ep_ctx_write_rsvd(ep0->context, 0);
2818 ep_ctx_write_partial_td(ep0->context, 0);
2819 ep_ctx_write_splitxstate(ep0->context, 0);
2820 ep_ctx_write_seq_num(ep0->context, 0);
2821
2822 deq_ptr = trb_virt_to_phys(ep0, &ep0->transfer_ring[ep0->deq_ptr]);
2823
2824 if (!dma_mapping_error(xudc->dev, deq_ptr)) {
2825 ep_ctx_write_deq_ptr(ep0->context, deq_ptr);
2826 ep_ctx_write_dcs(ep0->context, ep0->pcs);
2827 }
2828
2829 ep_unhalt_all(xudc);
2830 ep_reload(xudc, 0);
2831 ep_unpause(xudc, 0);
2832 }
2833
tegra_xudc_port_connect(struct tegra_xudc * xudc)2834 static void tegra_xudc_port_connect(struct tegra_xudc *xudc)
2835 {
2836 struct tegra_xudc_ep *ep0 = &xudc->ep[0];
2837 u16 maxpacket;
2838 u32 val;
2839
2840 val = (xudc_readl(xudc, PORTSC) & PORTSC_PS_MASK) >> PORTSC_PS_SHIFT;
2841 switch (val) {
2842 case PORTSC_PS_LS:
2843 xudc->gadget.speed = USB_SPEED_LOW;
2844 break;
2845 case PORTSC_PS_FS:
2846 xudc->gadget.speed = USB_SPEED_FULL;
2847 break;
2848 case PORTSC_PS_HS:
2849 xudc->gadget.speed = USB_SPEED_HIGH;
2850 break;
2851 case PORTSC_PS_SS:
2852 xudc->gadget.speed = USB_SPEED_SUPER;
2853 break;
2854 default:
2855 xudc->gadget.speed = USB_SPEED_UNKNOWN;
2856 break;
2857 }
2858
2859 xudc->device_state = USB_STATE_DEFAULT;
2860 usb_gadget_set_state(&xudc->gadget, xudc->device_state);
2861
2862 xudc->setup_state = WAIT_FOR_SETUP;
2863
2864 if (xudc->gadget.speed == USB_SPEED_SUPER)
2865 maxpacket = 512;
2866 else
2867 maxpacket = 64;
2868
2869 ep_ctx_write_max_packet_size(ep0->context, maxpacket);
2870 tegra_xudc_ep0_desc.wMaxPacketSize = cpu_to_le16(maxpacket);
2871 usb_ep_set_maxpacket_limit(&ep0->usb_ep, maxpacket);
2872
2873 if (!xudc->soc->u1_enable) {
2874 val = xudc_readl(xudc, PORTPM);
2875 val &= ~(PORTPM_U1TIMEOUT_MASK);
2876 xudc_writel(xudc, val, PORTPM);
2877 }
2878
2879 if (!xudc->soc->u2_enable) {
2880 val = xudc_readl(xudc, PORTPM);
2881 val &= ~(PORTPM_U2TIMEOUT_MASK);
2882 xudc_writel(xudc, val, PORTPM);
2883 }
2884
2885 if (xudc->gadget.speed <= USB_SPEED_HIGH) {
2886 val = xudc_readl(xudc, PORTPM);
2887 val &= ~(PORTPM_L1S_MASK);
2888 if (xudc->soc->lpm_enable)
2889 val |= PORTPM_L1S(PORTPM_L1S_ACCEPT);
2890 else
2891 val |= PORTPM_L1S(PORTPM_L1S_NYET);
2892 xudc_writel(xudc, val, PORTPM);
2893 }
2894
2895 val = xudc_readl(xudc, ST);
2896 if (val & ST_RC)
2897 xudc_writel(xudc, ST_RC, ST);
2898 }
2899
tegra_xudc_port_disconnect(struct tegra_xudc * xudc)2900 static void tegra_xudc_port_disconnect(struct tegra_xudc *xudc)
2901 {
2902 tegra_xudc_reset(xudc);
2903
2904 if (xudc->driver && xudc->driver->disconnect) {
2905 spin_unlock(&xudc->lock);
2906 xudc->driver->disconnect(&xudc->gadget);
2907 spin_lock(&xudc->lock);
2908 }
2909
2910 xudc->device_state = USB_STATE_NOTATTACHED;
2911 usb_gadget_set_state(&xudc->gadget, xudc->device_state);
2912
2913 complete(&xudc->disconnect_complete);
2914 }
2915
tegra_xudc_port_reset(struct tegra_xudc * xudc)2916 static void tegra_xudc_port_reset(struct tegra_xudc *xudc)
2917 {
2918 tegra_xudc_reset(xudc);
2919
2920 if (xudc->driver) {
2921 spin_unlock(&xudc->lock);
2922 usb_gadget_udc_reset(&xudc->gadget, xudc->driver);
2923 spin_lock(&xudc->lock);
2924 }
2925
2926 tegra_xudc_port_connect(xudc);
2927 }
2928
tegra_xudc_port_suspend(struct tegra_xudc * xudc)2929 static void tegra_xudc_port_suspend(struct tegra_xudc *xudc)
2930 {
2931 dev_dbg(xudc->dev, "port suspend\n");
2932
2933 xudc->resume_state = xudc->device_state;
2934 xudc->device_state = USB_STATE_SUSPENDED;
2935 usb_gadget_set_state(&xudc->gadget, xudc->device_state);
2936
2937 if (xudc->driver->suspend) {
2938 spin_unlock(&xudc->lock);
2939 xudc->driver->suspend(&xudc->gadget);
2940 spin_lock(&xudc->lock);
2941 }
2942 }
2943
tegra_xudc_port_resume(struct tegra_xudc * xudc)2944 static void tegra_xudc_port_resume(struct tegra_xudc *xudc)
2945 {
2946 dev_dbg(xudc->dev, "port resume\n");
2947
2948 tegra_xudc_resume_device_state(xudc);
2949
2950 if (xudc->driver->resume) {
2951 spin_unlock(&xudc->lock);
2952 xudc->driver->resume(&xudc->gadget);
2953 spin_lock(&xudc->lock);
2954 }
2955 }
2956
clear_port_change(struct tegra_xudc * xudc,u32 flag)2957 static inline void clear_port_change(struct tegra_xudc *xudc, u32 flag)
2958 {
2959 u32 val;
2960
2961 val = xudc_readl(xudc, PORTSC);
2962 val &= ~PORTSC_CHANGE_MASK;
2963 val |= flag;
2964 xudc_writel(xudc, val, PORTSC);
2965 }
2966
__tegra_xudc_handle_port_status(struct tegra_xudc * xudc)2967 static void __tegra_xudc_handle_port_status(struct tegra_xudc *xudc)
2968 {
2969 u32 portsc, porthalt;
2970
2971 porthalt = xudc_readl(xudc, PORTHALT);
2972 if ((porthalt & PORTHALT_STCHG_REQ) &&
2973 (porthalt & PORTHALT_HALT_LTSSM)) {
2974 dev_dbg(xudc->dev, "STCHG_REQ, PORTHALT = %#x\n", porthalt);
2975 porthalt &= ~PORTHALT_HALT_LTSSM;
2976 xudc_writel(xudc, porthalt, PORTHALT);
2977 }
2978
2979 portsc = xudc_readl(xudc, PORTSC);
2980 if ((portsc & PORTSC_PRC) && (portsc & PORTSC_PR)) {
2981 dev_dbg(xudc->dev, "PRC, PR, PORTSC = %#x\n", portsc);
2982 clear_port_change(xudc, PORTSC_PRC | PORTSC_PED);
2983 #define TOGGLE_VBUS_WAIT_MS 100
2984 if (xudc->soc->port_reset_quirk) {
2985 schedule_delayed_work(&xudc->port_reset_war_work,
2986 msecs_to_jiffies(TOGGLE_VBUS_WAIT_MS));
2987 xudc->wait_for_sec_prc = 1;
2988 }
2989 }
2990
2991 if ((portsc & PORTSC_PRC) && !(portsc & PORTSC_PR)) {
2992 dev_dbg(xudc->dev, "PRC, Not PR, PORTSC = %#x\n", portsc);
2993 clear_port_change(xudc, PORTSC_PRC | PORTSC_PED);
2994 tegra_xudc_port_reset(xudc);
2995 cancel_delayed_work(&xudc->port_reset_war_work);
2996 xudc->wait_for_sec_prc = 0;
2997 }
2998
2999 portsc = xudc_readl(xudc, PORTSC);
3000 if (portsc & PORTSC_WRC) {
3001 dev_dbg(xudc->dev, "WRC, PORTSC = %#x\n", portsc);
3002 clear_port_change(xudc, PORTSC_WRC | PORTSC_PED);
3003 if (!(xudc_readl(xudc, PORTSC) & PORTSC_WPR))
3004 tegra_xudc_port_reset(xudc);
3005 }
3006
3007 portsc = xudc_readl(xudc, PORTSC);
3008 if (portsc & PORTSC_CSC) {
3009 dev_dbg(xudc->dev, "CSC, PORTSC = %#x\n", portsc);
3010 clear_port_change(xudc, PORTSC_CSC);
3011
3012 if (portsc & PORTSC_CCS)
3013 tegra_xudc_port_connect(xudc);
3014 else
3015 tegra_xudc_port_disconnect(xudc);
3016
3017 if (xudc->wait_csc) {
3018 cancel_delayed_work(&xudc->plc_reset_work);
3019 xudc->wait_csc = false;
3020 }
3021 }
3022
3023 portsc = xudc_readl(xudc, PORTSC);
3024 if (portsc & PORTSC_PLC) {
3025 u32 pls = (portsc & PORTSC_PLS_MASK) >> PORTSC_PLS_SHIFT;
3026
3027 dev_dbg(xudc->dev, "PLC, PORTSC = %#x\n", portsc);
3028 clear_port_change(xudc, PORTSC_PLC);
3029 switch (pls) {
3030 case PORTSC_PLS_U3:
3031 tegra_xudc_port_suspend(xudc);
3032 break;
3033 case PORTSC_PLS_U0:
3034 if (xudc->gadget.speed < USB_SPEED_SUPER)
3035 tegra_xudc_port_resume(xudc);
3036 break;
3037 case PORTSC_PLS_RESUME:
3038 if (xudc->gadget.speed == USB_SPEED_SUPER)
3039 tegra_xudc_port_resume(xudc);
3040 break;
3041 case PORTSC_PLS_INACTIVE:
3042 schedule_delayed_work(&xudc->plc_reset_work,
3043 msecs_to_jiffies(TOGGLE_VBUS_WAIT_MS));
3044 xudc->wait_csc = true;
3045 break;
3046 default:
3047 break;
3048 }
3049 }
3050
3051 if (portsc & PORTSC_CEC) {
3052 dev_warn(xudc->dev, "CEC, PORTSC = %#x\n", portsc);
3053 clear_port_change(xudc, PORTSC_CEC);
3054 }
3055
3056 dev_dbg(xudc->dev, "PORTSC = %#x\n", xudc_readl(xudc, PORTSC));
3057 }
3058
tegra_xudc_handle_port_status(struct tegra_xudc * xudc)3059 static void tegra_xudc_handle_port_status(struct tegra_xudc *xudc)
3060 {
3061 while ((xudc_readl(xudc, PORTSC) & PORTSC_CHANGE_MASK) ||
3062 (xudc_readl(xudc, PORTHALT) & PORTHALT_STCHG_REQ))
3063 __tegra_xudc_handle_port_status(xudc);
3064 }
3065
tegra_xudc_handle_event(struct tegra_xudc * xudc,struct tegra_xudc_trb * event)3066 static void tegra_xudc_handle_event(struct tegra_xudc *xudc,
3067 struct tegra_xudc_trb *event)
3068 {
3069 u32 type = trb_read_type(event);
3070
3071 dump_trb(xudc, "EVENT", event);
3072
3073 switch (type) {
3074 case TRB_TYPE_PORT_STATUS_CHANGE_EVENT:
3075 tegra_xudc_handle_port_status(xudc);
3076 break;
3077 case TRB_TYPE_TRANSFER_EVENT:
3078 tegra_xudc_handle_transfer_event(xudc, event);
3079 break;
3080 case TRB_TYPE_SETUP_PACKET_EVENT:
3081 tegra_xudc_handle_ep0_event(xudc, event);
3082 break;
3083 default:
3084 dev_info(xudc->dev, "Unrecognized TRB type = %#x\n", type);
3085 break;
3086 }
3087 }
3088
tegra_xudc_process_event_ring(struct tegra_xudc * xudc)3089 static void tegra_xudc_process_event_ring(struct tegra_xudc *xudc)
3090 {
3091 struct tegra_xudc_trb *event;
3092 dma_addr_t erdp;
3093
3094 while (true) {
3095 event = xudc->event_ring[xudc->event_ring_index] +
3096 xudc->event_ring_deq_ptr;
3097
3098 if (trb_read_cycle(event) != xudc->ccs)
3099 break;
3100
3101 tegra_xudc_handle_event(xudc, event);
3102
3103 xudc->event_ring_deq_ptr++;
3104 if (xudc->event_ring_deq_ptr == XUDC_EVENT_RING_SIZE) {
3105 xudc->event_ring_deq_ptr = 0;
3106 xudc->event_ring_index++;
3107 }
3108
3109 if (xudc->event_ring_index == XUDC_NR_EVENT_RINGS) {
3110 xudc->event_ring_index = 0;
3111 xudc->ccs = !xudc->ccs;
3112 }
3113 }
3114
3115 erdp = xudc->event_ring_phys[xudc->event_ring_index] +
3116 xudc->event_ring_deq_ptr * sizeof(*event);
3117
3118 xudc_writel(xudc, upper_32_bits(erdp), ERDPHI);
3119 xudc_writel(xudc, lower_32_bits(erdp) | ERDPLO_EHB, ERDPLO);
3120 }
3121
tegra_xudc_irq(int irq,void * data)3122 static irqreturn_t tegra_xudc_irq(int irq, void *data)
3123 {
3124 struct tegra_xudc *xudc = data;
3125 unsigned long flags;
3126 u32 val;
3127
3128 val = xudc_readl(xudc, ST);
3129 if (!(val & ST_IP))
3130 return IRQ_NONE;
3131 xudc_writel(xudc, ST_IP, ST);
3132
3133 spin_lock_irqsave(&xudc->lock, flags);
3134 tegra_xudc_process_event_ring(xudc);
3135 spin_unlock_irqrestore(&xudc->lock, flags);
3136
3137 return IRQ_HANDLED;
3138 }
3139
tegra_xudc_alloc_ep(struct tegra_xudc * xudc,unsigned int index)3140 static int tegra_xudc_alloc_ep(struct tegra_xudc *xudc, unsigned int index)
3141 {
3142 struct tegra_xudc_ep *ep = &xudc->ep[index];
3143
3144 ep->xudc = xudc;
3145 ep->index = index;
3146 ep->context = &xudc->ep_context[index];
3147 INIT_LIST_HEAD(&ep->queue);
3148
3149 /*
3150 * EP1 would be the input endpoint corresponding to EP0, but since
3151 * EP0 is bi-directional, EP1 is unused.
3152 */
3153 if (index == 1)
3154 return 0;
3155
3156 ep->transfer_ring = dma_pool_alloc(xudc->transfer_ring_pool,
3157 GFP_KERNEL,
3158 &ep->transfer_ring_phys);
3159 if (!ep->transfer_ring)
3160 return -ENOMEM;
3161
3162 if (index) {
3163 snprintf(ep->name, sizeof(ep->name), "ep%u%s", index / 2,
3164 (index % 2 == 0) ? "out" : "in");
3165 ep->usb_ep.name = ep->name;
3166 usb_ep_set_maxpacket_limit(&ep->usb_ep, 1024);
3167 ep->usb_ep.max_streams = 16;
3168 ep->usb_ep.ops = &tegra_xudc_ep_ops;
3169 ep->usb_ep.caps.type_bulk = true;
3170 ep->usb_ep.caps.type_int = true;
3171 if (index & 1)
3172 ep->usb_ep.caps.dir_in = true;
3173 else
3174 ep->usb_ep.caps.dir_out = true;
3175 list_add_tail(&ep->usb_ep.ep_list, &xudc->gadget.ep_list);
3176 } else {
3177 strscpy(ep->name, "ep0", 3);
3178 ep->usb_ep.name = ep->name;
3179 usb_ep_set_maxpacket_limit(&ep->usb_ep, 512);
3180 ep->usb_ep.ops = &tegra_xudc_ep0_ops;
3181 ep->usb_ep.caps.type_control = true;
3182 ep->usb_ep.caps.dir_in = true;
3183 ep->usb_ep.caps.dir_out = true;
3184 }
3185
3186 return 0;
3187 }
3188
tegra_xudc_free_ep(struct tegra_xudc * xudc,unsigned int index)3189 static void tegra_xudc_free_ep(struct tegra_xudc *xudc, unsigned int index)
3190 {
3191 struct tegra_xudc_ep *ep = &xudc->ep[index];
3192
3193 /*
3194 * EP1 would be the input endpoint corresponding to EP0, but since
3195 * EP0 is bi-directional, EP1 is unused.
3196 */
3197 if (index == 1)
3198 return;
3199
3200 dma_pool_free(xudc->transfer_ring_pool, ep->transfer_ring,
3201 ep->transfer_ring_phys);
3202 }
3203
tegra_xudc_alloc_eps(struct tegra_xudc * xudc)3204 static int tegra_xudc_alloc_eps(struct tegra_xudc *xudc)
3205 {
3206 struct usb_request *req;
3207 unsigned int i;
3208 int err;
3209
3210 xudc->ep_context =
3211 dma_alloc_coherent(xudc->dev, XUDC_NR_EPS *
3212 sizeof(*xudc->ep_context),
3213 &xudc->ep_context_phys, GFP_KERNEL);
3214 if (!xudc->ep_context)
3215 return -ENOMEM;
3216
3217 xudc->transfer_ring_pool =
3218 dmam_pool_create(dev_name(xudc->dev), xudc->dev,
3219 XUDC_TRANSFER_RING_SIZE *
3220 sizeof(struct tegra_xudc_trb),
3221 sizeof(struct tegra_xudc_trb), 0);
3222 if (!xudc->transfer_ring_pool) {
3223 err = -ENOMEM;
3224 goto free_ep_context;
3225 }
3226
3227 INIT_LIST_HEAD(&xudc->gadget.ep_list);
3228 for (i = 0; i < ARRAY_SIZE(xudc->ep); i++) {
3229 err = tegra_xudc_alloc_ep(xudc, i);
3230 if (err < 0)
3231 goto free_eps;
3232 }
3233
3234 req = tegra_xudc_ep_alloc_request(&xudc->ep[0].usb_ep, GFP_KERNEL);
3235 if (!req) {
3236 err = -ENOMEM;
3237 goto free_eps;
3238 }
3239 xudc->ep0_req = to_xudc_req(req);
3240
3241 return 0;
3242
3243 free_eps:
3244 for (; i > 0; i--)
3245 tegra_xudc_free_ep(xudc, i - 1);
3246 free_ep_context:
3247 dma_free_coherent(xudc->dev, XUDC_NR_EPS * sizeof(*xudc->ep_context),
3248 xudc->ep_context, xudc->ep_context_phys);
3249 return err;
3250 }
3251
tegra_xudc_init_eps(struct tegra_xudc * xudc)3252 static void tegra_xudc_init_eps(struct tegra_xudc *xudc)
3253 {
3254 xudc_writel(xudc, lower_32_bits(xudc->ep_context_phys), ECPLO);
3255 xudc_writel(xudc, upper_32_bits(xudc->ep_context_phys), ECPHI);
3256 }
3257
tegra_xudc_free_eps(struct tegra_xudc * xudc)3258 static void tegra_xudc_free_eps(struct tegra_xudc *xudc)
3259 {
3260 unsigned int i;
3261
3262 tegra_xudc_ep_free_request(&xudc->ep[0].usb_ep,
3263 &xudc->ep0_req->usb_req);
3264
3265 for (i = 0; i < ARRAY_SIZE(xudc->ep); i++)
3266 tegra_xudc_free_ep(xudc, i);
3267
3268 dma_free_coherent(xudc->dev, XUDC_NR_EPS * sizeof(*xudc->ep_context),
3269 xudc->ep_context, xudc->ep_context_phys);
3270 }
3271
tegra_xudc_alloc_event_ring(struct tegra_xudc * xudc)3272 static int tegra_xudc_alloc_event_ring(struct tegra_xudc *xudc)
3273 {
3274 unsigned int i;
3275
3276 for (i = 0; i < ARRAY_SIZE(xudc->event_ring); i++) {
3277 xudc->event_ring[i] =
3278 dma_alloc_coherent(xudc->dev, XUDC_EVENT_RING_SIZE *
3279 sizeof(*xudc->event_ring[i]),
3280 &xudc->event_ring_phys[i],
3281 GFP_KERNEL);
3282 if (!xudc->event_ring[i])
3283 goto free_dma;
3284 }
3285
3286 return 0;
3287
3288 free_dma:
3289 for (; i > 0; i--) {
3290 dma_free_coherent(xudc->dev, XUDC_EVENT_RING_SIZE *
3291 sizeof(*xudc->event_ring[i - 1]),
3292 xudc->event_ring[i - 1],
3293 xudc->event_ring_phys[i - 1]);
3294 }
3295 return -ENOMEM;
3296 }
3297
tegra_xudc_init_event_ring(struct tegra_xudc * xudc)3298 static void tegra_xudc_init_event_ring(struct tegra_xudc *xudc)
3299 {
3300 unsigned int i;
3301 u32 val;
3302
3303 for (i = 0; i < ARRAY_SIZE(xudc->event_ring); i++) {
3304 memset(xudc->event_ring[i], 0, XUDC_EVENT_RING_SIZE *
3305 sizeof(*xudc->event_ring[i]));
3306
3307 val = xudc_readl(xudc, ERSTSZ);
3308 val &= ~(ERSTSZ_ERSTXSZ_MASK << ERSTSZ_ERSTXSZ_SHIFT(i));
3309 val |= XUDC_EVENT_RING_SIZE << ERSTSZ_ERSTXSZ_SHIFT(i);
3310 xudc_writel(xudc, val, ERSTSZ);
3311
3312 xudc_writel(xudc, lower_32_bits(xudc->event_ring_phys[i]),
3313 ERSTXBALO(i));
3314 xudc_writel(xudc, upper_32_bits(xudc->event_ring_phys[i]),
3315 ERSTXBAHI(i));
3316 }
3317
3318 val = lower_32_bits(xudc->event_ring_phys[0]);
3319 xudc_writel(xudc, val, ERDPLO);
3320 val |= EREPLO_ECS;
3321 xudc_writel(xudc, val, EREPLO);
3322
3323 val = upper_32_bits(xudc->event_ring_phys[0]);
3324 xudc_writel(xudc, val, ERDPHI);
3325 xudc_writel(xudc, val, EREPHI);
3326
3327 xudc->ccs = true;
3328 xudc->event_ring_index = 0;
3329 xudc->event_ring_deq_ptr = 0;
3330 }
3331
tegra_xudc_free_event_ring(struct tegra_xudc * xudc)3332 static void tegra_xudc_free_event_ring(struct tegra_xudc *xudc)
3333 {
3334 unsigned int i;
3335
3336 for (i = 0; i < ARRAY_SIZE(xudc->event_ring); i++) {
3337 dma_free_coherent(xudc->dev, XUDC_EVENT_RING_SIZE *
3338 sizeof(*xudc->event_ring[i]),
3339 xudc->event_ring[i],
3340 xudc->event_ring_phys[i]);
3341 }
3342 }
3343
tegra_xudc_fpci_ipfs_init(struct tegra_xudc * xudc)3344 static void tegra_xudc_fpci_ipfs_init(struct tegra_xudc *xudc)
3345 {
3346 u32 val;
3347
3348 if (xudc->soc->has_ipfs) {
3349 val = ipfs_readl(xudc, XUSB_DEV_CONFIGURATION_0);
3350 val |= XUSB_DEV_CONFIGURATION_0_EN_FPCI;
3351 ipfs_writel(xudc, val, XUSB_DEV_CONFIGURATION_0);
3352 usleep_range(10, 15);
3353 }
3354
3355 /* Enable bus master */
3356 val = XUSB_DEV_CFG_1_IO_SPACE_EN | XUSB_DEV_CFG_1_MEMORY_SPACE_EN |
3357 XUSB_DEV_CFG_1_BUS_MASTER_EN;
3358 fpci_writel(xudc, val, XUSB_DEV_CFG_1);
3359
3360 /* Program BAR0 space */
3361 val = fpci_readl(xudc, XUSB_DEV_CFG_4);
3362 val &= ~(XUSB_DEV_CFG_4_BASE_ADDR_MASK);
3363 val |= xudc->phys_base & (XUSB_DEV_CFG_4_BASE_ADDR_MASK);
3364
3365 fpci_writel(xudc, val, XUSB_DEV_CFG_4);
3366 fpci_writel(xudc, upper_32_bits(xudc->phys_base), XUSB_DEV_CFG_5);
3367
3368 usleep_range(100, 200);
3369
3370 if (xudc->soc->has_ipfs) {
3371 /* Enable interrupt assertion */
3372 val = ipfs_readl(xudc, XUSB_DEV_INTR_MASK_0);
3373 val |= XUSB_DEV_INTR_MASK_0_IP_INT_MASK;
3374 ipfs_writel(xudc, val, XUSB_DEV_INTR_MASK_0);
3375 }
3376 }
3377
tegra_xudc_device_params_init(struct tegra_xudc * xudc)3378 static void tegra_xudc_device_params_init(struct tegra_xudc *xudc)
3379 {
3380 u32 val, imod;
3381
3382 if (xudc->soc->has_ipfs) {
3383 val = xudc_readl(xudc, BLCG);
3384 val |= BLCG_ALL;
3385 val &= ~(BLCG_DFPCI | BLCG_UFPCI | BLCG_FE |
3386 BLCG_COREPLL_PWRDN);
3387 val |= BLCG_IOPLL_0_PWRDN;
3388 val |= BLCG_IOPLL_1_PWRDN;
3389 val |= BLCG_IOPLL_2_PWRDN;
3390
3391 xudc_writel(xudc, val, BLCG);
3392 }
3393
3394 if (xudc->soc->port_speed_quirk)
3395 tegra_xudc_limit_port_speed(xudc);
3396
3397 /* Set a reasonable U3 exit timer value. */
3398 val = xudc_readl(xudc, SSPX_CORE_PADCTL4);
3399 val &= ~(SSPX_CORE_PADCTL4_RXDAT_VLD_TIMEOUT_U3_MASK);
3400 val |= SSPX_CORE_PADCTL4_RXDAT_VLD_TIMEOUT_U3(0x5dc0);
3401 xudc_writel(xudc, val, SSPX_CORE_PADCTL4);
3402
3403 /* Default ping LFPS tBurst is too large. */
3404 val = xudc_readl(xudc, SSPX_CORE_CNT0);
3405 val &= ~(SSPX_CORE_CNT0_PING_TBURST_MASK);
3406 val |= SSPX_CORE_CNT0_PING_TBURST(0xa);
3407 xudc_writel(xudc, val, SSPX_CORE_CNT0);
3408
3409 /* Default tPortConfiguration timeout is too small. */
3410 val = xudc_readl(xudc, SSPX_CORE_CNT30);
3411 val &= ~(SSPX_CORE_CNT30_LMPITP_TIMER_MASK);
3412 val |= SSPX_CORE_CNT30_LMPITP_TIMER(0x978);
3413 xudc_writel(xudc, val, SSPX_CORE_CNT30);
3414
3415 if (xudc->soc->lpm_enable) {
3416 /* Set L1 resume duration to 95 us. */
3417 val = xudc_readl(xudc, HSFSPI_COUNT13);
3418 val &= ~(HSFSPI_COUNT13_U2_RESUME_K_DURATION_MASK);
3419 val |= HSFSPI_COUNT13_U2_RESUME_K_DURATION(0x2c88);
3420 xudc_writel(xudc, val, HSFSPI_COUNT13);
3421 }
3422
3423 /*
3424 * Compliacne suite appears to be violating polling LFPS tBurst max
3425 * of 1.4us. Send 1.45us instead.
3426 */
3427 val = xudc_readl(xudc, SSPX_CORE_CNT32);
3428 val &= ~(SSPX_CORE_CNT32_POLL_TBURST_MAX_MASK);
3429 val |= SSPX_CORE_CNT32_POLL_TBURST_MAX(0xb0);
3430 xudc_writel(xudc, val, SSPX_CORE_CNT32);
3431
3432 /* Direct HS/FS port instance to RxDetect. */
3433 val = xudc_readl(xudc, CFG_DEV_FE);
3434 val &= ~(CFG_DEV_FE_PORTREGSEL_MASK);
3435 val |= CFG_DEV_FE_PORTREGSEL(CFG_DEV_FE_PORTREGSEL_HSFS_PI);
3436 xudc_writel(xudc, val, CFG_DEV_FE);
3437
3438 val = xudc_readl(xudc, PORTSC);
3439 val &= ~(PORTSC_CHANGE_MASK | PORTSC_PLS_MASK);
3440 val |= PORTSC_LWS | PORTSC_PLS(PORTSC_PLS_RXDETECT);
3441 xudc_writel(xudc, val, PORTSC);
3442
3443 /* Direct SS port instance to RxDetect. */
3444 val = xudc_readl(xudc, CFG_DEV_FE);
3445 val &= ~(CFG_DEV_FE_PORTREGSEL_MASK);
3446 val |= CFG_DEV_FE_PORTREGSEL_SS_PI & CFG_DEV_FE_PORTREGSEL_MASK;
3447 xudc_writel(xudc, val, CFG_DEV_FE);
3448
3449 val = xudc_readl(xudc, PORTSC);
3450 val &= ~(PORTSC_CHANGE_MASK | PORTSC_PLS_MASK);
3451 val |= PORTSC_LWS | PORTSC_PLS(PORTSC_PLS_RXDETECT);
3452 xudc_writel(xudc, val, PORTSC);
3453
3454 /* Restore port instance. */
3455 val = xudc_readl(xudc, CFG_DEV_FE);
3456 val &= ~(CFG_DEV_FE_PORTREGSEL_MASK);
3457 xudc_writel(xudc, val, CFG_DEV_FE);
3458
3459 /*
3460 * Enable INFINITE_SS_RETRY to prevent device from entering
3461 * Disabled.Error when attached to buggy SuperSpeed hubs.
3462 */
3463 val = xudc_readl(xudc, CFG_DEV_FE);
3464 val |= CFG_DEV_FE_INFINITE_SS_RETRY;
3465 xudc_writel(xudc, val, CFG_DEV_FE);
3466
3467 /* Set interrupt moderation. */
3468 imod = XUDC_INTERRUPT_MODERATION_US * 4;
3469 val = xudc_readl(xudc, RT_IMOD);
3470 val &= ~((RT_IMOD_IMODI_MASK) | (RT_IMOD_IMODC_MASK));
3471 val |= (RT_IMOD_IMODI(imod) | RT_IMOD_IMODC(imod));
3472 xudc_writel(xudc, val, RT_IMOD);
3473
3474 /* increase SSPI transaction timeout from 32us to 512us */
3475 val = xudc_readl(xudc, CFG_DEV_SSPI_XFER);
3476 val &= ~(CFG_DEV_SSPI_XFER_ACKTIMEOUT_MASK);
3477 val |= CFG_DEV_SSPI_XFER_ACKTIMEOUT(0xf000);
3478 xudc_writel(xudc, val, CFG_DEV_SSPI_XFER);
3479 }
3480
tegra_xudc_phy_get(struct tegra_xudc * xudc)3481 static int tegra_xudc_phy_get(struct tegra_xudc *xudc)
3482 {
3483 int err = 0, usb3;
3484 unsigned int i;
3485
3486 xudc->utmi_phy = devm_kcalloc(xudc->dev, xudc->soc->num_phys,
3487 sizeof(*xudc->utmi_phy), GFP_KERNEL);
3488 if (!xudc->utmi_phy)
3489 return -ENOMEM;
3490
3491 xudc->usb3_phy = devm_kcalloc(xudc->dev, xudc->soc->num_phys,
3492 sizeof(*xudc->usb3_phy), GFP_KERNEL);
3493 if (!xudc->usb3_phy)
3494 return -ENOMEM;
3495
3496 xudc->usbphy = devm_kcalloc(xudc->dev, xudc->soc->num_phys,
3497 sizeof(*xudc->usbphy), GFP_KERNEL);
3498 if (!xudc->usbphy)
3499 return -ENOMEM;
3500
3501 xudc->vbus_nb.notifier_call = tegra_xudc_vbus_notify;
3502
3503 for (i = 0; i < xudc->soc->num_phys; i++) {
3504 char phy_name[] = "usb.-.";
3505
3506 /* Get USB2 phy */
3507 snprintf(phy_name, sizeof(phy_name), "usb2-%d", i);
3508 xudc->utmi_phy[i] = devm_phy_optional_get(xudc->dev, phy_name);
3509 if (IS_ERR(xudc->utmi_phy[i])) {
3510 err = PTR_ERR(xudc->utmi_phy[i]);
3511 dev_err_probe(xudc->dev, err,
3512 "failed to get usb2-%d PHY\n", i);
3513 goto clean_up;
3514 } else if (xudc->utmi_phy[i]) {
3515 /* Get usb-phy, if utmi phy is available */
3516 xudc->usbphy[i] = devm_usb_get_phy_by_node(xudc->dev,
3517 xudc->utmi_phy[i]->dev.of_node,
3518 &xudc->vbus_nb);
3519 if (IS_ERR(xudc->usbphy[i])) {
3520 err = PTR_ERR(xudc->usbphy[i]);
3521 dev_err_probe(xudc->dev, err,
3522 "failed to get usbphy-%d\n", i);
3523 goto clean_up;
3524 }
3525 } else if (!xudc->utmi_phy[i]) {
3526 /* if utmi phy is not available, ignore USB3 phy get */
3527 continue;
3528 }
3529
3530 /* Get USB3 phy */
3531 usb3 = tegra_xusb_padctl_get_usb3_companion(xudc->padctl, i);
3532 if (usb3 < 0)
3533 continue;
3534
3535 snprintf(phy_name, sizeof(phy_name), "usb3-%d", usb3);
3536 xudc->usb3_phy[i] = devm_phy_optional_get(xudc->dev, phy_name);
3537 if (IS_ERR(xudc->usb3_phy[i])) {
3538 err = PTR_ERR(xudc->usb3_phy[i]);
3539 dev_err_probe(xudc->dev, err,
3540 "failed to get usb3-%d PHY\n", usb3);
3541 goto clean_up;
3542 } else if (xudc->usb3_phy[i])
3543 dev_dbg(xudc->dev, "usb3-%d PHY registered", usb3);
3544 }
3545
3546 return err;
3547
3548 clean_up:
3549 for (i = 0; i < xudc->soc->num_phys; i++) {
3550 xudc->usb3_phy[i] = NULL;
3551 xudc->utmi_phy[i] = NULL;
3552 xudc->usbphy[i] = NULL;
3553 }
3554
3555 return err;
3556 }
3557
tegra_xudc_phy_exit(struct tegra_xudc * xudc)3558 static void tegra_xudc_phy_exit(struct tegra_xudc *xudc)
3559 {
3560 unsigned int i;
3561
3562 for (i = 0; i < xudc->soc->num_phys; i++) {
3563 phy_exit(xudc->usb3_phy[i]);
3564 phy_exit(xudc->utmi_phy[i]);
3565 }
3566 }
3567
tegra_xudc_phy_init(struct tegra_xudc * xudc)3568 static int tegra_xudc_phy_init(struct tegra_xudc *xudc)
3569 {
3570 int err;
3571 unsigned int i;
3572
3573 for (i = 0; i < xudc->soc->num_phys; i++) {
3574 err = phy_init(xudc->utmi_phy[i]);
3575 if (err < 0) {
3576 dev_err(xudc->dev, "UTMI PHY #%u initialization failed: %d\n", i, err);
3577 goto exit_phy;
3578 }
3579
3580 err = phy_init(xudc->usb3_phy[i]);
3581 if (err < 0) {
3582 dev_err(xudc->dev, "USB3 PHY #%u initialization failed: %d\n", i, err);
3583 goto exit_phy;
3584 }
3585 }
3586 return 0;
3587
3588 exit_phy:
3589 tegra_xudc_phy_exit(xudc);
3590 return err;
3591 }
3592
3593 static const char * const tegra210_xudc_supply_names[] = {
3594 "hvdd-usb",
3595 "avddio-usb",
3596 };
3597
3598 static const char * const tegra210_xudc_clock_names[] = {
3599 "dev",
3600 "ss",
3601 "ss_src",
3602 "hs_src",
3603 "fs_src",
3604 };
3605
3606 static const char * const tegra186_xudc_clock_names[] = {
3607 "dev",
3608 "ss",
3609 "ss_src",
3610 "fs_src",
3611 };
3612
3613 static struct tegra_xudc_soc tegra210_xudc_soc_data = {
3614 .supply_names = tegra210_xudc_supply_names,
3615 .num_supplies = ARRAY_SIZE(tegra210_xudc_supply_names),
3616 .clock_names = tegra210_xudc_clock_names,
3617 .num_clks = ARRAY_SIZE(tegra210_xudc_clock_names),
3618 .num_phys = 4,
3619 .u1_enable = false,
3620 .u2_enable = true,
3621 .lpm_enable = false,
3622 .invalid_seq_num = true,
3623 .pls_quirk = true,
3624 .port_reset_quirk = true,
3625 .port_speed_quirk = false,
3626 .has_ipfs = true,
3627 };
3628
3629 static struct tegra_xudc_soc tegra186_xudc_soc_data = {
3630 .clock_names = tegra186_xudc_clock_names,
3631 .num_clks = ARRAY_SIZE(tegra186_xudc_clock_names),
3632 .num_phys = 4,
3633 .u1_enable = true,
3634 .u2_enable = true,
3635 .lpm_enable = false,
3636 .invalid_seq_num = false,
3637 .pls_quirk = false,
3638 .port_reset_quirk = false,
3639 .port_speed_quirk = false,
3640 .has_ipfs = false,
3641 };
3642
3643 static struct tegra_xudc_soc tegra194_xudc_soc_data = {
3644 .clock_names = tegra186_xudc_clock_names,
3645 .num_clks = ARRAY_SIZE(tegra186_xudc_clock_names),
3646 .num_phys = 4,
3647 .u1_enable = true,
3648 .u2_enable = true,
3649 .lpm_enable = true,
3650 .invalid_seq_num = false,
3651 .pls_quirk = false,
3652 .port_reset_quirk = false,
3653 .port_speed_quirk = true,
3654 .has_ipfs = false,
3655 };
3656
3657 static const struct of_device_id tegra_xudc_of_match[] = {
3658 {
3659 .compatible = "nvidia,tegra210-xudc",
3660 .data = &tegra210_xudc_soc_data
3661 },
3662 {
3663 .compatible = "nvidia,tegra186-xudc",
3664 .data = &tegra186_xudc_soc_data
3665 },
3666 {
3667 .compatible = "nvidia,tegra194-xudc",
3668 .data = &tegra194_xudc_soc_data
3669 },
3670 { }
3671 };
3672 MODULE_DEVICE_TABLE(of, tegra_xudc_of_match);
3673
tegra_xudc_powerdomain_remove(struct tegra_xudc * xudc)3674 static void tegra_xudc_powerdomain_remove(struct tegra_xudc *xudc)
3675 {
3676 if (xudc->genpd_dl_ss)
3677 device_link_del(xudc->genpd_dl_ss);
3678 if (xudc->genpd_dl_device)
3679 device_link_del(xudc->genpd_dl_device);
3680 if (xudc->genpd_dev_ss)
3681 dev_pm_domain_detach(xudc->genpd_dev_ss, true);
3682 if (xudc->genpd_dev_device)
3683 dev_pm_domain_detach(xudc->genpd_dev_device, true);
3684 }
3685
tegra_xudc_powerdomain_init(struct tegra_xudc * xudc)3686 static int tegra_xudc_powerdomain_init(struct tegra_xudc *xudc)
3687 {
3688 struct device *dev = xudc->dev;
3689 int err;
3690
3691 xudc->genpd_dev_device = dev_pm_domain_attach_by_name(dev, "dev");
3692 if (IS_ERR(xudc->genpd_dev_device)) {
3693 err = PTR_ERR(xudc->genpd_dev_device);
3694 dev_err(dev, "failed to get device power domain: %d\n", err);
3695 return err;
3696 }
3697
3698 xudc->genpd_dev_ss = dev_pm_domain_attach_by_name(dev, "ss");
3699 if (IS_ERR(xudc->genpd_dev_ss)) {
3700 err = PTR_ERR(xudc->genpd_dev_ss);
3701 dev_err(dev, "failed to get SuperSpeed power domain: %d\n", err);
3702 return err;
3703 }
3704
3705 xudc->genpd_dl_device = device_link_add(dev, xudc->genpd_dev_device,
3706 DL_FLAG_PM_RUNTIME |
3707 DL_FLAG_STATELESS);
3708 if (!xudc->genpd_dl_device) {
3709 dev_err(dev, "failed to add USB device link\n");
3710 return -ENODEV;
3711 }
3712
3713 xudc->genpd_dl_ss = device_link_add(dev, xudc->genpd_dev_ss,
3714 DL_FLAG_PM_RUNTIME |
3715 DL_FLAG_STATELESS);
3716 if (!xudc->genpd_dl_ss) {
3717 dev_err(dev, "failed to add SuperSpeed device link\n");
3718 return -ENODEV;
3719 }
3720
3721 return 0;
3722 }
3723
tegra_xudc_probe(struct platform_device * pdev)3724 static int tegra_xudc_probe(struct platform_device *pdev)
3725 {
3726 struct tegra_xudc *xudc;
3727 struct resource *res;
3728 unsigned int i;
3729 int err;
3730
3731 xudc = devm_kzalloc(&pdev->dev, sizeof(*xudc), GFP_KERNEL);
3732 if (!xudc)
3733 return -ENOMEM;
3734
3735 xudc->dev = &pdev->dev;
3736 platform_set_drvdata(pdev, xudc);
3737
3738 xudc->soc = of_device_get_match_data(&pdev->dev);
3739 if (!xudc->soc)
3740 return -ENODEV;
3741
3742 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
3743 xudc->base = devm_ioremap_resource(&pdev->dev, res);
3744 if (IS_ERR(xudc->base))
3745 return PTR_ERR(xudc->base);
3746 xudc->phys_base = res->start;
3747
3748 xudc->fpci = devm_platform_ioremap_resource_byname(pdev, "fpci");
3749 if (IS_ERR(xudc->fpci))
3750 return PTR_ERR(xudc->fpci);
3751
3752 if (xudc->soc->has_ipfs) {
3753 xudc->ipfs = devm_platform_ioremap_resource_byname(pdev, "ipfs");
3754 if (IS_ERR(xudc->ipfs))
3755 return PTR_ERR(xudc->ipfs);
3756 }
3757
3758 xudc->irq = platform_get_irq(pdev, 0);
3759 if (xudc->irq < 0)
3760 return xudc->irq;
3761
3762 err = devm_request_irq(&pdev->dev, xudc->irq, tegra_xudc_irq, 0,
3763 dev_name(&pdev->dev), xudc);
3764 if (err < 0) {
3765 dev_err(xudc->dev, "failed to claim IRQ#%u: %d\n", xudc->irq,
3766 err);
3767 return err;
3768 }
3769
3770 xudc->clks = devm_kcalloc(&pdev->dev, xudc->soc->num_clks, sizeof(*xudc->clks),
3771 GFP_KERNEL);
3772 if (!xudc->clks)
3773 return -ENOMEM;
3774
3775 for (i = 0; i < xudc->soc->num_clks; i++)
3776 xudc->clks[i].id = xudc->soc->clock_names[i];
3777
3778 err = devm_clk_bulk_get(&pdev->dev, xudc->soc->num_clks, xudc->clks);
3779 if (err) {
3780 dev_err_probe(xudc->dev, err, "failed to request clocks\n");
3781 return err;
3782 }
3783
3784 xudc->supplies = devm_kcalloc(&pdev->dev, xudc->soc->num_supplies,
3785 sizeof(*xudc->supplies), GFP_KERNEL);
3786 if (!xudc->supplies)
3787 return -ENOMEM;
3788
3789 for (i = 0; i < xudc->soc->num_supplies; i++)
3790 xudc->supplies[i].supply = xudc->soc->supply_names[i];
3791
3792 err = devm_regulator_bulk_get(&pdev->dev, xudc->soc->num_supplies,
3793 xudc->supplies);
3794 if (err) {
3795 dev_err_probe(xudc->dev, err, "failed to request regulators\n");
3796 return err;
3797 }
3798
3799 xudc->padctl = tegra_xusb_padctl_get(&pdev->dev);
3800 if (IS_ERR(xudc->padctl))
3801 return PTR_ERR(xudc->padctl);
3802
3803 err = regulator_bulk_enable(xudc->soc->num_supplies, xudc->supplies);
3804 if (err) {
3805 dev_err(xudc->dev, "failed to enable regulators: %d\n", err);
3806 goto put_padctl;
3807 }
3808
3809 err = tegra_xudc_phy_get(xudc);
3810 if (err)
3811 goto disable_regulator;
3812
3813 err = tegra_xudc_powerdomain_init(xudc);
3814 if (err)
3815 goto put_powerdomains;
3816
3817 err = tegra_xudc_phy_init(xudc);
3818 if (err)
3819 goto put_powerdomains;
3820
3821 err = tegra_xudc_alloc_event_ring(xudc);
3822 if (err)
3823 goto disable_phy;
3824
3825 err = tegra_xudc_alloc_eps(xudc);
3826 if (err)
3827 goto free_event_ring;
3828
3829 spin_lock_init(&xudc->lock);
3830
3831 init_completion(&xudc->disconnect_complete);
3832
3833 INIT_WORK(&xudc->usb_role_sw_work, tegra_xudc_usb_role_sw_work);
3834
3835 INIT_DELAYED_WORK(&xudc->plc_reset_work, tegra_xudc_plc_reset_work);
3836
3837 INIT_DELAYED_WORK(&xudc->port_reset_war_work,
3838 tegra_xudc_port_reset_war_work);
3839
3840 pm_runtime_enable(&pdev->dev);
3841
3842 xudc->gadget.ops = &tegra_xudc_gadget_ops;
3843 xudc->gadget.ep0 = &xudc->ep[0].usb_ep;
3844 xudc->gadget.name = "tegra-xudc";
3845 xudc->gadget.max_speed = USB_SPEED_SUPER;
3846
3847 err = usb_add_gadget_udc(&pdev->dev, &xudc->gadget);
3848 if (err) {
3849 dev_err(&pdev->dev, "failed to add USB gadget: %d\n", err);
3850 goto free_eps;
3851 }
3852
3853 return 0;
3854
3855 free_eps:
3856 pm_runtime_disable(&pdev->dev);
3857 tegra_xudc_free_eps(xudc);
3858 free_event_ring:
3859 tegra_xudc_free_event_ring(xudc);
3860 disable_phy:
3861 tegra_xudc_phy_exit(xudc);
3862 put_powerdomains:
3863 tegra_xudc_powerdomain_remove(xudc);
3864 disable_regulator:
3865 regulator_bulk_disable(xudc->soc->num_supplies, xudc->supplies);
3866 put_padctl:
3867 tegra_xusb_padctl_put(xudc->padctl);
3868
3869 return err;
3870 }
3871
tegra_xudc_remove(struct platform_device * pdev)3872 static int tegra_xudc_remove(struct platform_device *pdev)
3873 {
3874 struct tegra_xudc *xudc = platform_get_drvdata(pdev);
3875 unsigned int i;
3876
3877 pm_runtime_get_sync(xudc->dev);
3878
3879 cancel_delayed_work_sync(&xudc->plc_reset_work);
3880 cancel_work_sync(&xudc->usb_role_sw_work);
3881
3882 usb_del_gadget_udc(&xudc->gadget);
3883
3884 tegra_xudc_free_eps(xudc);
3885 tegra_xudc_free_event_ring(xudc);
3886
3887 tegra_xudc_powerdomain_remove(xudc);
3888
3889 regulator_bulk_disable(xudc->soc->num_supplies, xudc->supplies);
3890
3891 for (i = 0; i < xudc->soc->num_phys; i++) {
3892 phy_power_off(xudc->utmi_phy[i]);
3893 phy_power_off(xudc->usb3_phy[i]);
3894 }
3895
3896 tegra_xudc_phy_exit(xudc);
3897
3898 pm_runtime_disable(xudc->dev);
3899 pm_runtime_put(xudc->dev);
3900
3901 tegra_xusb_padctl_put(xudc->padctl);
3902
3903 return 0;
3904 }
3905
tegra_xudc_powergate(struct tegra_xudc * xudc)3906 static int __maybe_unused tegra_xudc_powergate(struct tegra_xudc *xudc)
3907 {
3908 unsigned long flags;
3909
3910 dev_dbg(xudc->dev, "entering ELPG\n");
3911
3912 spin_lock_irqsave(&xudc->lock, flags);
3913
3914 xudc->powergated = true;
3915 xudc->saved_regs.ctrl = xudc_readl(xudc, CTRL);
3916 xudc->saved_regs.portpm = xudc_readl(xudc, PORTPM);
3917 xudc_writel(xudc, 0, CTRL);
3918
3919 spin_unlock_irqrestore(&xudc->lock, flags);
3920
3921 clk_bulk_disable_unprepare(xudc->soc->num_clks, xudc->clks);
3922
3923 regulator_bulk_disable(xudc->soc->num_supplies, xudc->supplies);
3924
3925 dev_dbg(xudc->dev, "entering ELPG done\n");
3926 return 0;
3927 }
3928
tegra_xudc_unpowergate(struct tegra_xudc * xudc)3929 static int __maybe_unused tegra_xudc_unpowergate(struct tegra_xudc *xudc)
3930 {
3931 unsigned long flags;
3932 int err;
3933
3934 dev_dbg(xudc->dev, "exiting ELPG\n");
3935
3936 err = regulator_bulk_enable(xudc->soc->num_supplies,
3937 xudc->supplies);
3938 if (err < 0)
3939 return err;
3940
3941 err = clk_bulk_prepare_enable(xudc->soc->num_clks, xudc->clks);
3942 if (err < 0)
3943 return err;
3944
3945 tegra_xudc_fpci_ipfs_init(xudc);
3946
3947 tegra_xudc_device_params_init(xudc);
3948
3949 tegra_xudc_init_event_ring(xudc);
3950
3951 tegra_xudc_init_eps(xudc);
3952
3953 xudc_writel(xudc, xudc->saved_regs.portpm, PORTPM);
3954 xudc_writel(xudc, xudc->saved_regs.ctrl, CTRL);
3955
3956 spin_lock_irqsave(&xudc->lock, flags);
3957 xudc->powergated = false;
3958 spin_unlock_irqrestore(&xudc->lock, flags);
3959
3960 dev_dbg(xudc->dev, "exiting ELPG done\n");
3961 return 0;
3962 }
3963
tegra_xudc_suspend(struct device * dev)3964 static int __maybe_unused tegra_xudc_suspend(struct device *dev)
3965 {
3966 struct tegra_xudc *xudc = dev_get_drvdata(dev);
3967 unsigned long flags;
3968
3969 spin_lock_irqsave(&xudc->lock, flags);
3970 xudc->suspended = true;
3971 spin_unlock_irqrestore(&xudc->lock, flags);
3972
3973 flush_work(&xudc->usb_role_sw_work);
3974
3975 if (!pm_runtime_status_suspended(dev)) {
3976 /* Forcibly disconnect before powergating. */
3977 tegra_xudc_device_mode_off(xudc);
3978 tegra_xudc_powergate(xudc);
3979 }
3980
3981 pm_runtime_disable(dev);
3982
3983 return 0;
3984 }
3985
tegra_xudc_resume(struct device * dev)3986 static int __maybe_unused tegra_xudc_resume(struct device *dev)
3987 {
3988 struct tegra_xudc *xudc = dev_get_drvdata(dev);
3989 unsigned long flags;
3990 int err;
3991
3992 err = tegra_xudc_unpowergate(xudc);
3993 if (err < 0)
3994 return err;
3995
3996 spin_lock_irqsave(&xudc->lock, flags);
3997 xudc->suspended = false;
3998 spin_unlock_irqrestore(&xudc->lock, flags);
3999
4000 schedule_work(&xudc->usb_role_sw_work);
4001
4002 pm_runtime_enable(dev);
4003
4004 return 0;
4005 }
4006
tegra_xudc_runtime_suspend(struct device * dev)4007 static int __maybe_unused tegra_xudc_runtime_suspend(struct device *dev)
4008 {
4009 struct tegra_xudc *xudc = dev_get_drvdata(dev);
4010
4011 return tegra_xudc_powergate(xudc);
4012 }
4013
tegra_xudc_runtime_resume(struct device * dev)4014 static int __maybe_unused tegra_xudc_runtime_resume(struct device *dev)
4015 {
4016 struct tegra_xudc *xudc = dev_get_drvdata(dev);
4017
4018 return tegra_xudc_unpowergate(xudc);
4019 }
4020
4021 static const struct dev_pm_ops tegra_xudc_pm_ops = {
4022 SET_SYSTEM_SLEEP_PM_OPS(tegra_xudc_suspend, tegra_xudc_resume)
4023 SET_RUNTIME_PM_OPS(tegra_xudc_runtime_suspend,
4024 tegra_xudc_runtime_resume, NULL)
4025 };
4026
4027 static struct platform_driver tegra_xudc_driver = {
4028 .probe = tegra_xudc_probe,
4029 .remove = tegra_xudc_remove,
4030 .driver = {
4031 .name = "tegra-xudc",
4032 .pm = &tegra_xudc_pm_ops,
4033 .of_match_table = tegra_xudc_of_match,
4034 },
4035 };
4036 module_platform_driver(tegra_xudc_driver);
4037
4038 MODULE_DESCRIPTION("NVIDIA Tegra XUSB Device Controller");
4039 MODULE_AUTHOR("Andrew Bresticker <abrestic@chromium.org>");
4040 MODULE_AUTHOR("Hui Fu <hfu@nvidia.com>");
4041 MODULE_AUTHOR("Nagarjuna Kristam <nkristam@nvidia.com>");
4042 MODULE_LICENSE("GPL v2");
4043