• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright 2015-2017 Google, Inc
4  *
5  * USB Power Delivery protocol stack.
6  */
7 
8 #include <linux/completion.h>
9 #include <linux/debugfs.h>
10 #include <linux/device.h>
11 #include <linux/hrtimer.h>
12 #include <linux/jiffies.h>
13 #include <linux/kernel.h>
14 #include <linux/kthread.h>
15 #include <linux/module.h>
16 #include <linux/mutex.h>
17 #include <linux/power_supply.h>
18 #include <linux/proc_fs.h>
19 #include <linux/property.h>
20 #include <linux/sched/clock.h>
21 #include <linux/seq_file.h>
22 #include <linux/slab.h>
23 #include <linux/spinlock.h>
24 #include <linux/string_choices.h>
25 #include <linux/usb.h>
26 #include <linux/usb/pd.h>
27 #include <linux/usb/pd_ado.h>
28 #include <linux/usb/pd_bdo.h>
29 #include <linux/usb/pd_ext_sdb.h>
30 #include <linux/usb/pd_vdo.h>
31 #include <linux/usb/role.h>
32 #include <linux/usb/tcpm.h>
33 #include <linux/usb/typec_altmode.h>
34 
35 #include <uapi/linux/sched/types.h>
36 
37 #define FOREACH_STATE(S)			\
38 	S(INVALID_STATE),			\
39 	S(TOGGLING),			\
40 	S(CHECK_CONTAMINANT),			\
41 	S(SRC_UNATTACHED),			\
42 	S(SRC_ATTACH_WAIT),			\
43 	S(SRC_ATTACHED),			\
44 	S(SRC_STARTUP),				\
45 	S(SRC_SEND_CAPABILITIES),		\
46 	S(SRC_SEND_CAPABILITIES_TIMEOUT),	\
47 	S(SRC_NEGOTIATE_CAPABILITIES),		\
48 	S(SRC_TRANSITION_SUPPLY),		\
49 	S(SRC_READY),				\
50 	S(SRC_WAIT_NEW_CAPABILITIES),		\
51 						\
52 	S(SNK_UNATTACHED),			\
53 	S(SNK_ATTACH_WAIT),			\
54 	S(SNK_DEBOUNCED),			\
55 	S(SNK_ATTACHED),			\
56 	S(SNK_STARTUP),				\
57 	S(SNK_DISCOVERY),			\
58 	S(SNK_DISCOVERY_DEBOUNCE),		\
59 	S(SNK_DISCOVERY_DEBOUNCE_DONE),		\
60 	S(SNK_WAIT_CAPABILITIES),		\
61 	S(SNK_WAIT_CAPABILITIES_TIMEOUT),	\
62 	S(SNK_NEGOTIATE_CAPABILITIES),		\
63 	S(SNK_NEGOTIATE_PPS_CAPABILITIES),	\
64 	S(SNK_TRANSITION_SINK),			\
65 	S(SNK_TRANSITION_SINK_VBUS),		\
66 	S(SNK_READY),				\
67 						\
68 	S(ACC_UNATTACHED),			\
69 	S(DEBUG_ACC_ATTACHED),			\
70 	S(AUDIO_ACC_ATTACHED),			\
71 	S(AUDIO_ACC_DEBOUNCE),			\
72 						\
73 	S(HARD_RESET_SEND),			\
74 	S(HARD_RESET_START),			\
75 	S(SRC_HARD_RESET_VBUS_OFF),		\
76 	S(SRC_HARD_RESET_VBUS_ON),		\
77 	S(SNK_HARD_RESET_SINK_OFF),		\
78 	S(SNK_HARD_RESET_WAIT_VBUS),		\
79 	S(SNK_HARD_RESET_SINK_ON),		\
80 						\
81 	S(SOFT_RESET),				\
82 	S(SRC_SOFT_RESET_WAIT_SNK_TX),		\
83 	S(SNK_SOFT_RESET),			\
84 	S(SOFT_RESET_SEND),			\
85 						\
86 	S(DR_SWAP_ACCEPT),			\
87 	S(DR_SWAP_SEND),			\
88 	S(DR_SWAP_SEND_TIMEOUT),		\
89 	S(DR_SWAP_CANCEL),			\
90 	S(DR_SWAP_CHANGE_DR),			\
91 						\
92 	S(PR_SWAP_ACCEPT),			\
93 	S(PR_SWAP_SEND),			\
94 	S(PR_SWAP_SEND_TIMEOUT),		\
95 	S(PR_SWAP_CANCEL),			\
96 	S(PR_SWAP_START),			\
97 	S(PR_SWAP_SRC_SNK_TRANSITION_OFF),	\
98 	S(PR_SWAP_SRC_SNK_SOURCE_OFF),		\
99 	S(PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED), \
100 	S(PR_SWAP_SRC_SNK_SINK_ON),		\
101 	S(PR_SWAP_SNK_SRC_SINK_OFF),		\
102 	S(PR_SWAP_SNK_SRC_SOURCE_ON),		\
103 	S(PR_SWAP_SNK_SRC_SOURCE_ON_VBUS_RAMPED_UP),    \
104 						\
105 	S(VCONN_SWAP_ACCEPT),			\
106 	S(VCONN_SWAP_SEND),			\
107 	S(VCONN_SWAP_SEND_TIMEOUT),		\
108 	S(VCONN_SWAP_CANCEL),			\
109 	S(VCONN_SWAP_START),			\
110 	S(VCONN_SWAP_WAIT_FOR_VCONN),		\
111 	S(VCONN_SWAP_TURN_ON_VCONN),		\
112 	S(VCONN_SWAP_TURN_OFF_VCONN),		\
113 	S(VCONN_SWAP_SEND_SOFT_RESET),		\
114 						\
115 	S(FR_SWAP_SEND),			\
116 	S(FR_SWAP_SEND_TIMEOUT),		\
117 	S(FR_SWAP_SNK_SRC_TRANSITION_TO_OFF),			\
118 	S(FR_SWAP_SNK_SRC_NEW_SINK_READY),		\
119 	S(FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED),	\
120 	S(FR_SWAP_CANCEL),			\
121 						\
122 	S(SNK_TRY),				\
123 	S(SNK_TRY_WAIT),			\
124 	S(SNK_TRY_WAIT_DEBOUNCE),               \
125 	S(SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS),    \
126 	S(SRC_TRYWAIT),				\
127 	S(SRC_TRYWAIT_DEBOUNCE),		\
128 	S(SRC_TRYWAIT_UNATTACHED),		\
129 						\
130 	S(SRC_TRY),				\
131 	S(SRC_TRY_WAIT),                        \
132 	S(SRC_TRY_DEBOUNCE),			\
133 	S(SNK_TRYWAIT),				\
134 	S(SNK_TRYWAIT_DEBOUNCE),		\
135 	S(SNK_TRYWAIT_VBUS),			\
136 	S(BIST_RX),				\
137 						\
138 	S(GET_STATUS_SEND),			\
139 	S(GET_STATUS_SEND_TIMEOUT),		\
140 	S(GET_PPS_STATUS_SEND),			\
141 	S(GET_PPS_STATUS_SEND_TIMEOUT),		\
142 						\
143 	S(GET_SINK_CAP),			\
144 	S(GET_SINK_CAP_TIMEOUT),		\
145 						\
146 	S(ERROR_RECOVERY),			\
147 	S(PORT_RESET),				\
148 	S(PORT_RESET_WAIT_OFF),			\
149 						\
150 	S(AMS_START),				\
151 	S(CHUNK_NOT_SUPP),			\
152 						\
153 	S(SRC_VDM_IDENTITY_REQUEST)
154 
155 #define FOREACH_AMS(S)				\
156 	S(NONE_AMS),				\
157 	S(POWER_NEGOTIATION),			\
158 	S(GOTOMIN),				\
159 	S(SOFT_RESET_AMS),			\
160 	S(HARD_RESET),				\
161 	S(CABLE_RESET),				\
162 	S(GET_SOURCE_CAPABILITIES),		\
163 	S(GET_SINK_CAPABILITIES),		\
164 	S(POWER_ROLE_SWAP),			\
165 	S(FAST_ROLE_SWAP),			\
166 	S(DATA_ROLE_SWAP),			\
167 	S(VCONN_SWAP),				\
168 	S(SOURCE_ALERT),			\
169 	S(GETTING_SOURCE_EXTENDED_CAPABILITIES),\
170 	S(GETTING_SOURCE_SINK_STATUS),		\
171 	S(GETTING_BATTERY_CAPABILITIES),	\
172 	S(GETTING_BATTERY_STATUS),		\
173 	S(GETTING_MANUFACTURER_INFORMATION),	\
174 	S(SECURITY),				\
175 	S(FIRMWARE_UPDATE),			\
176 	S(DISCOVER_IDENTITY),			\
177 	S(SOURCE_STARTUP_CABLE_PLUG_DISCOVER_IDENTITY),	\
178 	S(DISCOVER_SVIDS),			\
179 	S(DISCOVER_MODES),			\
180 	S(DFP_TO_UFP_ENTER_MODE),		\
181 	S(DFP_TO_UFP_EXIT_MODE),		\
182 	S(DFP_TO_CABLE_PLUG_ENTER_MODE),	\
183 	S(DFP_TO_CABLE_PLUG_EXIT_MODE),		\
184 	S(ATTENTION),				\
185 	S(BIST),				\
186 	S(UNSTRUCTURED_VDMS),			\
187 	S(STRUCTURED_VDMS),			\
188 	S(COUNTRY_INFO),			\
189 	S(COUNTRY_CODES),			\
190 	S(REVISION_INFORMATION)
191 
192 #define GENERATE_ENUM(e)	e
193 #define GENERATE_STRING(s)	#s
194 
195 enum tcpm_state {
196 	FOREACH_STATE(GENERATE_ENUM)
197 };
198 
199 static const char * const tcpm_states[] = {
200 	FOREACH_STATE(GENERATE_STRING)
201 };
202 
203 enum tcpm_ams {
204 	FOREACH_AMS(GENERATE_ENUM)
205 };
206 
207 static const char * const tcpm_ams_str[] = {
208 	FOREACH_AMS(GENERATE_STRING)
209 };
210 
211 enum vdm_states {
212 	VDM_STATE_ERR_BUSY = -3,
213 	VDM_STATE_ERR_SEND = -2,
214 	VDM_STATE_ERR_TMOUT = -1,
215 	VDM_STATE_DONE = 0,
216 	/* Anything >0 represents an active state */
217 	VDM_STATE_READY = 1,
218 	VDM_STATE_BUSY = 2,
219 	VDM_STATE_WAIT_RSP_BUSY = 3,
220 	VDM_STATE_SEND_MESSAGE = 4,
221 };
222 
223 enum pd_msg_request {
224 	PD_MSG_NONE = 0,
225 	PD_MSG_CTRL_REJECT,
226 	PD_MSG_CTRL_WAIT,
227 	PD_MSG_CTRL_NOT_SUPP,
228 	PD_MSG_DATA_SINK_CAP,
229 	PD_MSG_DATA_SOURCE_CAP,
230 	PD_MSG_DATA_REV,
231 };
232 
233 enum adev_actions {
234 	ADEV_NONE = 0,
235 	ADEV_NOTIFY_USB_AND_QUEUE_VDM,
236 	ADEV_QUEUE_VDM,
237 	ADEV_QUEUE_VDM_SEND_EXIT_MODE_ON_FAIL,
238 	ADEV_ATTENTION,
239 };
240 
241 /*
242  * Initial current capability of the new source when vSafe5V is applied during PD3.0 Fast Role Swap.
243  * Based on "Table 6-14 Fixed Supply PDO - Sink" of "USB Power Delivery Specification Revision 3.0,
244  * Version 1.2"
245  */
246 enum frs_typec_current {
247 	FRS_NOT_SUPPORTED,
248 	FRS_DEFAULT_POWER,
249 	FRS_5V_1P5A,
250 	FRS_5V_3A,
251 };
252 
253 /* Events from low level driver */
254 
255 #define TCPM_CC_EVENT		BIT(0)
256 #define TCPM_VBUS_EVENT		BIT(1)
257 #define TCPM_RESET_EVENT	BIT(2)
258 #define TCPM_FRS_EVENT		BIT(3)
259 #define TCPM_SOURCING_VBUS	BIT(4)
260 #define TCPM_PORT_CLEAN		BIT(5)
261 #define TCPM_PORT_ERROR		BIT(6)
262 
263 #define LOG_BUFFER_ENTRIES	1024
264 #define LOG_BUFFER_ENTRY_SIZE	128
265 
266 /* Alternate mode support */
267 
268 #define SVID_DISCOVERY_MAX	16
269 #define ALTMODE_DISCOVERY_MAX	(SVID_DISCOVERY_MAX * MODE_DISCOVERY_MAX)
270 
271 #define GET_SINK_CAP_RETRY_MS	100
272 #define SEND_DISCOVER_RETRY_MS	100
273 
274 struct pd_mode_data {
275 	int svid_index;		/* current SVID index		*/
276 	int nsvids;
277 	u16 svids[SVID_DISCOVERY_MAX];
278 	int altmodes;		/* number of alternate modes	*/
279 	struct typec_altmode_desc altmode_desc[ALTMODE_DISCOVERY_MAX];
280 };
281 
282 /*
283  * @min_volt: Actual min voltage at the local port
284  * @req_min_volt: Requested min voltage to the port partner
285  * @max_volt: Actual max voltage at the local port
286  * @req_max_volt: Requested max voltage to the port partner
287  * @max_curr: Actual max current at the local port
288  * @req_max_curr: Requested max current of the port partner
289  * @req_out_volt: Requested output voltage to the port partner
290  * @req_op_curr: Requested operating current to the port partner
291  * @supported: Parter has at least one APDO hence supports PPS
292  * @active: PPS mode is active
293  */
294 struct pd_pps_data {
295 	u32 min_volt;
296 	u32 req_min_volt;
297 	u32 max_volt;
298 	u32 req_max_volt;
299 	u32 max_curr;
300 	u32 req_max_curr;
301 	u32 req_out_volt;
302 	u32 req_op_curr;
303 	bool supported;
304 	bool active;
305 };
306 
307 struct pd_data {
308 	struct usb_power_delivery *pd;
309 	struct usb_power_delivery_capabilities *source_cap;
310 	struct usb_power_delivery_capabilities_desc source_desc;
311 	struct usb_power_delivery_capabilities *sink_cap;
312 	struct usb_power_delivery_capabilities_desc sink_desc;
313 	unsigned int operating_snk_mw;
314 };
315 
316 struct pd_revision_info {
317 	u8 rev_major;
318 	u8 rev_minor;
319 	u8 ver_major;
320 	u8 ver_minor;
321 };
322 
323 /*
324  * @sink_wait_cap_time: Deadline (in ms) for tTypeCSinkWaitCap timer
325  * @ps_src_wait_off_time: Deadline (in ms) for tPSSourceOff timer
326  * @cc_debounce_time: Deadline (in ms) for tCCDebounce timer
327  */
328 struct pd_timings {
329 	u32 sink_wait_cap_time;
330 	u32 ps_src_off_time;
331 	u32 cc_debounce_time;
332 	u32 snk_bc12_cmpletion_time;
333 };
334 
335 struct tcpm_port {
336 	struct device *dev;
337 
338 	struct mutex lock;		/* tcpm state machine lock */
339 	struct kthread_worker *wq;
340 
341 	struct typec_capability typec_caps;
342 	struct typec_port *typec_port;
343 
344 	struct tcpc_dev	*tcpc;
345 	struct usb_role_switch *role_sw;
346 
347 	enum typec_role vconn_role;
348 	enum typec_role pwr_role;
349 	enum typec_data_role data_role;
350 	enum typec_pwr_opmode pwr_opmode;
351 
352 	struct usb_pd_identity partner_ident;
353 	struct typec_partner_desc partner_desc;
354 	struct typec_partner *partner;
355 
356 	struct usb_pd_identity cable_ident;
357 	struct typec_cable_desc cable_desc;
358 	struct typec_cable *cable;
359 	struct typec_plug_desc plug_prime_desc;
360 	struct typec_plug *plug_prime;
361 
362 	enum typec_cc_status cc_req;
363 	enum typec_cc_status src_rp;	/* work only if pd_supported == false */
364 
365 	enum typec_cc_status cc1;
366 	enum typec_cc_status cc2;
367 	enum typec_cc_polarity polarity;
368 
369 	bool attached;
370 	bool connected;
371 	bool registered;
372 	bool pd_supported;
373 	enum typec_port_type port_type;
374 
375 	/*
376 	 * Set to true when vbus is greater than VSAFE5V min.
377 	 * Set to false when vbus falls below vSinkDisconnect max threshold.
378 	 */
379 	bool vbus_present;
380 
381 	/*
382 	 * Set to true when vbus is less than VSAFE0V max.
383 	 * Set to false when vbus is greater than VSAFE0V max.
384 	 */
385 	bool vbus_vsafe0v;
386 
387 	bool vbus_never_low;
388 	bool vbus_source;
389 	bool vbus_charge;
390 
391 	/* Set to true when Discover_Identity Command is expected to be sent in Ready states. */
392 	bool send_discover;
393 	bool op_vsafe5v;
394 
395 	int try_role;
396 	int try_snk_count;
397 	int try_src_count;
398 
399 	enum pd_msg_request queued_message;
400 
401 	enum tcpm_state enter_state;
402 	enum tcpm_state prev_state;
403 	enum tcpm_state state;
404 	enum tcpm_state delayed_state;
405 	ktime_t delayed_runtime;
406 	unsigned long delay_ms;
407 
408 	spinlock_t pd_event_lock;
409 	u32 pd_events;
410 
411 	struct kthread_work event_work;
412 	struct hrtimer state_machine_timer;
413 	struct kthread_work state_machine;
414 	struct hrtimer vdm_state_machine_timer;
415 	struct kthread_work vdm_state_machine;
416 	struct hrtimer enable_frs_timer;
417 	struct kthread_work enable_frs;
418 	struct hrtimer send_discover_timer;
419 	struct kthread_work send_discover_work;
420 	bool state_machine_running;
421 	/* Set to true when VDM State Machine has following actions. */
422 	bool vdm_sm_running;
423 
424 	struct completion tx_complete;
425 	enum tcpm_transmit_status tx_status;
426 
427 	struct mutex swap_lock;		/* swap command lock */
428 	bool swap_pending;
429 	bool non_pd_role_swap;
430 	struct completion swap_complete;
431 	int swap_status;
432 
433 	unsigned int negotiated_rev;
434 	unsigned int message_id;
435 	unsigned int caps_count;
436 	unsigned int hard_reset_count;
437 	bool pd_capable;
438 	bool explicit_contract;
439 	unsigned int rx_msgid;
440 
441 	/* USB PD objects */
442 	struct usb_power_delivery **pds;
443 	struct pd_data **pd_list;
444 	struct usb_power_delivery_capabilities *port_source_caps;
445 	struct usb_power_delivery_capabilities *port_sink_caps;
446 	struct usb_power_delivery *partner_pd;
447 	struct usb_power_delivery_capabilities *partner_source_caps;
448 	struct usb_power_delivery_capabilities *partner_sink_caps;
449 	struct usb_power_delivery *selected_pd;
450 
451 	/* Partner capabilities/requests */
452 	u32 sink_request;
453 	u32 source_caps[PDO_MAX_OBJECTS];
454 	unsigned int nr_source_caps;
455 	u32 sink_caps[PDO_MAX_OBJECTS];
456 	unsigned int nr_sink_caps;
457 
458 	/* Local capabilities */
459 	unsigned int pd_count;
460 	u32 src_pdo[PDO_MAX_OBJECTS];
461 	unsigned int nr_src_pdo;
462 	u32 snk_pdo[PDO_MAX_OBJECTS];
463 	unsigned int nr_snk_pdo;
464 	u32 snk_vdo_v1[VDO_MAX_OBJECTS];
465 	unsigned int nr_snk_vdo_v1;
466 	u32 snk_vdo[VDO_MAX_OBJECTS];
467 	unsigned int nr_snk_vdo;
468 
469 	unsigned int operating_snk_mw;
470 	bool update_sink_caps;
471 
472 	/* Requested current / voltage to the port partner */
473 	u32 req_current_limit;
474 	u32 req_supply_voltage;
475 	/* Actual current / voltage limit of the local port */
476 	u32 current_limit;
477 	u32 supply_voltage;
478 
479 	/* Used to export TA voltage and current */
480 	struct power_supply *psy;
481 	struct power_supply_desc psy_desc;
482 	enum power_supply_usb_type usb_type;
483 
484 	u32 bist_request;
485 
486 	/* PD state for Vendor Defined Messages */
487 	enum vdm_states vdm_state;
488 	u32 vdm_retries;
489 	/* next Vendor Defined Message to send */
490 	u32 vdo_data[VDO_MAX_SIZE];
491 	u8 vdo_count;
492 	/* VDO to retry if UFP responder replied busy */
493 	u32 vdo_retry;
494 
495 	/* PPS */
496 	struct pd_pps_data pps_data;
497 	struct completion pps_complete;
498 	bool pps_pending;
499 	int pps_status;
500 
501 	/* Alternate mode data */
502 	struct pd_mode_data mode_data;
503 	struct pd_mode_data mode_data_prime;
504 	struct typec_altmode *partner_altmode[ALTMODE_DISCOVERY_MAX];
505 	struct typec_altmode *plug_prime_altmode[ALTMODE_DISCOVERY_MAX];
506 	struct typec_altmode *port_altmode[ALTMODE_DISCOVERY_MAX];
507 
508 	/* Deadline in jiffies to exit src_try_wait state */
509 	unsigned long max_wait;
510 
511 	/* port belongs to a self powered device */
512 	bool self_powered;
513 
514 	/* Sink FRS */
515 	enum frs_typec_current new_source_frs_current;
516 
517 	/* Sink caps have been queried */
518 	bool sink_cap_done;
519 
520 	/* Collision Avoidance and Atomic Message Sequence */
521 	enum tcpm_state upcoming_state;
522 	enum tcpm_ams ams;
523 	enum tcpm_ams next_ams;
524 	bool in_ams;
525 
526 	/* Auto vbus discharge status */
527 	bool auto_vbus_discharge_enabled;
528 
529 	/*
530 	 * When set, port requests PD_P_SNK_STDBY_MW upon entering SNK_DISCOVERY and
531 	 * the actual current limit after RX of PD_CTRL_PSRDY for PD link,
532 	 * SNK_READY for non-pd link.
533 	 */
534 	bool slow_charger_loop;
535 
536 	/*
537 	 * When true indicates that the lower level drivers indicate potential presence
538 	 * of contaminant in the connector pins based on the tcpm state machine
539 	 * transitions.
540 	 */
541 	bool potential_contaminant;
542 
543 	/* SOP* Related Fields */
544 	/*
545 	 * Flag to determine if SOP' Discover Identity is available. The flag
546 	 * is set if Discover Identity on SOP' does not immediately follow
547 	 * Discover Identity on SOP.
548 	 */
549 	bool send_discover_prime;
550 	/*
551 	 * tx_sop_type determines which SOP* a message is being sent on.
552 	 * For messages that are queued and not sent immediately such as in
553 	 * tcpm_queue_message or messages that send after state changes,
554 	 * the tx_sop_type is set accordingly.
555 	 */
556 	enum tcpm_transmit_type tx_sop_type;
557 	/*
558 	 * Prior to discovering the port partner's Specification Revision, the
559 	 * Vconn source and cable plug will use the lower of their two revisions.
560 	 *
561 	 * When the port partner's Specification Revision is discovered, the following
562 	 * rules are put in place.
563 	 *	1. If the cable revision (1) is lower than the revision negotiated
564 	 * between the port and partner (2), the port and partner will communicate
565 	 * on revision (2), but the port and cable will communicate on revision (1).
566 	 *	2. If the cable revision (1) is higher than the revision negotiated
567 	 * between the port and partner (2), the port and partner will communicate
568 	 * on revision (2), and the port and cable will communicate on revision (2)
569 	 * as well.
570 	 */
571 	unsigned int negotiated_rev_prime;
572 	/*
573 	 * Each SOP* type must maintain their own tx and rx message IDs
574 	 */
575 	unsigned int message_id_prime;
576 	unsigned int rx_msgid_prime;
577 
578 	/* Timer deadline values configured at runtime */
579 	struct pd_timings timings;
580 
581 	/* Indicates maximum (revision, version) supported */
582 	struct pd_revision_info pd_rev;
583 #ifdef CONFIG_DEBUG_FS
584 	struct dentry *dentry;
585 	struct mutex logbuffer_lock;	/* log buffer access lock */
586 	int logbuffer_head;
587 	int logbuffer_tail;
588 	u8 *logbuffer[LOG_BUFFER_ENTRIES];
589 #endif
590 };
591 
592 struct pd_rx_event {
593 	struct kthread_work work;
594 	struct tcpm_port *port;
595 	struct pd_message msg;
596 	enum tcpm_transmit_type rx_sop_type;
597 };
598 
599 struct altmode_vdm_event {
600 	struct kthread_work work;
601 	struct tcpm_port *port;
602 	u32 header;
603 	u32 *data;
604 	int cnt;
605 	enum tcpm_transmit_type tx_sop_type;
606 };
607 
608 static const char * const pd_rev[] = {
609 	[PD_REV10]		= "rev1",
610 	[PD_REV20]		= "rev2",
611 	[PD_REV30]		= "rev3",
612 };
613 
614 #define tcpm_cc_is_sink(cc) \
615 	((cc) == TYPEC_CC_RP_DEF || (cc) == TYPEC_CC_RP_1_5 || \
616 	 (cc) == TYPEC_CC_RP_3_0)
617 
618 /* As long as cc is pulled up, we can consider it as sink. */
619 #define tcpm_port_is_sink(port) \
620 	(tcpm_cc_is_sink((port)->cc1) || tcpm_cc_is_sink((port)->cc2))
621 
622 #define tcpm_cc_is_source(cc) ((cc) == TYPEC_CC_RD)
623 #define tcpm_cc_is_audio(cc) ((cc) == TYPEC_CC_RA)
624 #define tcpm_cc_is_open(cc) ((cc) == TYPEC_CC_OPEN)
625 
626 #define tcpm_port_is_source(port) \
627 	((tcpm_cc_is_source((port)->cc1) && \
628 	 !tcpm_cc_is_source((port)->cc2)) || \
629 	 (tcpm_cc_is_source((port)->cc2) && \
630 	  !tcpm_cc_is_source((port)->cc1)))
631 
632 #define tcpm_port_is_debug(port) \
633 	(tcpm_cc_is_source((port)->cc1) && tcpm_cc_is_source((port)->cc2))
634 
635 #define tcpm_port_is_audio(port) \
636 	(tcpm_cc_is_audio((port)->cc1) && tcpm_cc_is_audio((port)->cc2))
637 
638 #define tcpm_port_is_audio_detached(port) \
639 	((tcpm_cc_is_audio((port)->cc1) && tcpm_cc_is_open((port)->cc2)) || \
640 	 (tcpm_cc_is_audio((port)->cc2) && tcpm_cc_is_open((port)->cc1)))
641 
642 #define tcpm_try_snk(port) \
643 	((port)->try_snk_count == 0 && (port)->try_role == TYPEC_SINK && \
644 	(port)->port_type == TYPEC_PORT_DRP)
645 
646 #define tcpm_try_src(port) \
647 	((port)->try_src_count == 0 && (port)->try_role == TYPEC_SOURCE && \
648 	(port)->port_type == TYPEC_PORT_DRP)
649 
650 #define tcpm_data_role_for_source(port) \
651 	((port)->typec_caps.data == TYPEC_PORT_UFP ? \
652 	TYPEC_DEVICE : TYPEC_HOST)
653 
654 #define tcpm_data_role_for_sink(port) \
655 	((port)->typec_caps.data == TYPEC_PORT_DFP ? \
656 	TYPEC_HOST : TYPEC_DEVICE)
657 
658 #define tcpm_sink_tx_ok(port) \
659 	(tcpm_port_is_sink(port) && \
660 	((port)->cc1 == TYPEC_CC_RP_3_0 || (port)->cc2 == TYPEC_CC_RP_3_0))
661 
662 #define tcpm_wait_for_discharge(port) \
663 	(((port)->auto_vbus_discharge_enabled && !(port)->vbus_vsafe0v) ? PD_T_SAFE_0V : 0)
664 
tcpm_default_state(struct tcpm_port * port)665 static enum tcpm_state tcpm_default_state(struct tcpm_port *port)
666 {
667 	if (port->port_type == TYPEC_PORT_DRP) {
668 		if (port->try_role == TYPEC_SINK)
669 			return SNK_UNATTACHED;
670 		else if (port->try_role == TYPEC_SOURCE)
671 			return SRC_UNATTACHED;
672 		/* Fall through to return SRC_UNATTACHED */
673 	} else if (port->port_type == TYPEC_PORT_SNK) {
674 		return SNK_UNATTACHED;
675 	}
676 	return SRC_UNATTACHED;
677 }
678 
tcpm_port_is_disconnected(struct tcpm_port * port)679 static bool tcpm_port_is_disconnected(struct tcpm_port *port)
680 {
681 	return (!port->attached && port->cc1 == TYPEC_CC_OPEN &&
682 		port->cc2 == TYPEC_CC_OPEN) ||
683 	       (port->attached && ((port->polarity == TYPEC_POLARITY_CC1 &&
684 				    port->cc1 == TYPEC_CC_OPEN) ||
685 				   (port->polarity == TYPEC_POLARITY_CC2 &&
686 				    port->cc2 == TYPEC_CC_OPEN)));
687 }
688 
689 /*
690  * Logging
691  */
692 
693 #ifdef CONFIG_DEBUG_FS
694 
tcpm_log_full(struct tcpm_port * port)695 static bool tcpm_log_full(struct tcpm_port *port)
696 {
697 	return port->logbuffer_tail ==
698 		(port->logbuffer_head + 1) % LOG_BUFFER_ENTRIES;
699 }
700 
701 __printf(2, 0)
_tcpm_log(struct tcpm_port * port,const char * fmt,va_list args)702 static void _tcpm_log(struct tcpm_port *port, const char *fmt, va_list args)
703 {
704 	char tmpbuffer[LOG_BUFFER_ENTRY_SIZE];
705 	u64 ts_nsec = local_clock();
706 	unsigned long rem_nsec;
707 
708 	mutex_lock(&port->logbuffer_lock);
709 	if (!port->logbuffer[port->logbuffer_head]) {
710 		port->logbuffer[port->logbuffer_head] =
711 				kzalloc(LOG_BUFFER_ENTRY_SIZE, GFP_KERNEL);
712 		if (!port->logbuffer[port->logbuffer_head]) {
713 			mutex_unlock(&port->logbuffer_lock);
714 			return;
715 		}
716 	}
717 
718 	vsnprintf(tmpbuffer, sizeof(tmpbuffer), fmt, args);
719 
720 	if (tcpm_log_full(port)) {
721 		port->logbuffer_head = max(port->logbuffer_head - 1, 0);
722 		strcpy(tmpbuffer, "overflow");
723 	}
724 
725 	if (port->logbuffer_head < 0 ||
726 	    port->logbuffer_head >= LOG_BUFFER_ENTRIES) {
727 		dev_warn(port->dev,
728 			 "Bad log buffer index %d\n", port->logbuffer_head);
729 		goto abort;
730 	}
731 
732 	if (!port->logbuffer[port->logbuffer_head]) {
733 		dev_warn(port->dev,
734 			 "Log buffer index %d is NULL\n", port->logbuffer_head);
735 		goto abort;
736 	}
737 
738 	rem_nsec = do_div(ts_nsec, 1000000000);
739 	scnprintf(port->logbuffer[port->logbuffer_head],
740 		  LOG_BUFFER_ENTRY_SIZE, "[%5lu.%06lu] %s",
741 		  (unsigned long)ts_nsec, rem_nsec / 1000,
742 		  tmpbuffer);
743 	port->logbuffer_head = (port->logbuffer_head + 1) % LOG_BUFFER_ENTRIES;
744 
745 abort:
746 	mutex_unlock(&port->logbuffer_lock);
747 }
748 
749 __printf(2, 3)
tcpm_log(struct tcpm_port * port,const char * fmt,...)750 static void tcpm_log(struct tcpm_port *port, const char *fmt, ...)
751 {
752 	va_list args;
753 
754 	/* Do not log while disconnected and unattached */
755 	if (tcpm_port_is_disconnected(port) &&
756 	    (port->state == SRC_UNATTACHED || port->state == SNK_UNATTACHED ||
757 	     port->state == TOGGLING || port->state == CHECK_CONTAMINANT))
758 		return;
759 
760 	va_start(args, fmt);
761 	_tcpm_log(port, fmt, args);
762 	va_end(args);
763 }
764 
765 __printf(2, 3)
tcpm_log_force(struct tcpm_port * port,const char * fmt,...)766 static void tcpm_log_force(struct tcpm_port *port, const char *fmt, ...)
767 {
768 	va_list args;
769 
770 	va_start(args, fmt);
771 	_tcpm_log(port, fmt, args);
772 	va_end(args);
773 }
774 
tcpm_log_source_caps(struct tcpm_port * port)775 static void tcpm_log_source_caps(struct tcpm_port *port)
776 {
777 	int i;
778 
779 	for (i = 0; i < port->nr_source_caps; i++) {
780 		u32 pdo = port->source_caps[i];
781 		enum pd_pdo_type type = pdo_type(pdo);
782 		char msg[64];
783 
784 		switch (type) {
785 		case PDO_TYPE_FIXED:
786 			scnprintf(msg, sizeof(msg),
787 				  "%u mV, %u mA [%s%s%s%s%s%s]",
788 				  pdo_fixed_voltage(pdo),
789 				  pdo_max_current(pdo),
790 				  (pdo & PDO_FIXED_DUAL_ROLE) ?
791 							"R" : "",
792 				  (pdo & PDO_FIXED_SUSPEND) ?
793 							"S" : "",
794 				  (pdo & PDO_FIXED_HIGHER_CAP) ?
795 							"H" : "",
796 				  (pdo & PDO_FIXED_USB_COMM) ?
797 							"U" : "",
798 				  (pdo & PDO_FIXED_DATA_SWAP) ?
799 							"D" : "",
800 				  (pdo & PDO_FIXED_EXTPOWER) ?
801 							"E" : "");
802 			break;
803 		case PDO_TYPE_VAR:
804 			scnprintf(msg, sizeof(msg),
805 				  "%u-%u mV, %u mA",
806 				  pdo_min_voltage(pdo),
807 				  pdo_max_voltage(pdo),
808 				  pdo_max_current(pdo));
809 			break;
810 		case PDO_TYPE_BATT:
811 			scnprintf(msg, sizeof(msg),
812 				  "%u-%u mV, %u mW",
813 				  pdo_min_voltage(pdo),
814 				  pdo_max_voltage(pdo),
815 				  pdo_max_power(pdo));
816 			break;
817 		case PDO_TYPE_APDO:
818 			if (pdo_apdo_type(pdo) == APDO_TYPE_PPS)
819 				scnprintf(msg, sizeof(msg),
820 					  "%u-%u mV, %u mA",
821 					  pdo_pps_apdo_min_voltage(pdo),
822 					  pdo_pps_apdo_max_voltage(pdo),
823 					  pdo_pps_apdo_max_current(pdo));
824 			else
825 				strcpy(msg, "undefined APDO");
826 			break;
827 		default:
828 			strcpy(msg, "undefined");
829 			break;
830 		}
831 		tcpm_log(port, " PDO %d: type %d, %s",
832 			 i, type, msg);
833 	}
834 }
835 
tcpm_debug_show(struct seq_file * s,void * v)836 static int tcpm_debug_show(struct seq_file *s, void *v)
837 {
838 	struct tcpm_port *port = s->private;
839 	int tail;
840 
841 	mutex_lock(&port->logbuffer_lock);
842 	tail = port->logbuffer_tail;
843 	while (tail != port->logbuffer_head) {
844 		seq_printf(s, "%s\n", port->logbuffer[tail]);
845 		tail = (tail + 1) % LOG_BUFFER_ENTRIES;
846 	}
847 	if (!seq_has_overflowed(s))
848 		port->logbuffer_tail = tail;
849 	mutex_unlock(&port->logbuffer_lock);
850 
851 	return 0;
852 }
853 DEFINE_SHOW_ATTRIBUTE(tcpm_debug);
854 
tcpm_debugfs_init(struct tcpm_port * port)855 static void tcpm_debugfs_init(struct tcpm_port *port)
856 {
857 	char name[NAME_MAX];
858 
859 	mutex_init(&port->logbuffer_lock);
860 	snprintf(name, NAME_MAX, "tcpm-%s", dev_name(port->dev));
861 	port->dentry = debugfs_create_dir(name, usb_debug_root);
862 	debugfs_create_file("log", S_IFREG | 0444, port->dentry, port,
863 			    &tcpm_debug_fops);
864 }
865 
tcpm_debugfs_exit(struct tcpm_port * port)866 static void tcpm_debugfs_exit(struct tcpm_port *port)
867 {
868 	int i;
869 
870 	mutex_lock(&port->logbuffer_lock);
871 	for (i = 0; i < LOG_BUFFER_ENTRIES; i++) {
872 		kfree(port->logbuffer[i]);
873 		port->logbuffer[i] = NULL;
874 	}
875 	mutex_unlock(&port->logbuffer_lock);
876 
877 	debugfs_remove(port->dentry);
878 }
879 
880 #else
881 
882 __printf(2, 3)
tcpm_log(const struct tcpm_port * port,const char * fmt,...)883 static void tcpm_log(const struct tcpm_port *port, const char *fmt, ...) { }
884 __printf(2, 3)
tcpm_log_force(struct tcpm_port * port,const char * fmt,...)885 static void tcpm_log_force(struct tcpm_port *port, const char *fmt, ...) { }
tcpm_log_source_caps(struct tcpm_port * port)886 static void tcpm_log_source_caps(struct tcpm_port *port) { }
tcpm_debugfs_init(const struct tcpm_port * port)887 static void tcpm_debugfs_init(const struct tcpm_port *port) { }
tcpm_debugfs_exit(const struct tcpm_port * port)888 static void tcpm_debugfs_exit(const struct tcpm_port *port) { }
889 
890 #endif
891 
tcpm_set_cc(struct tcpm_port * port,enum typec_cc_status cc)892 static void tcpm_set_cc(struct tcpm_port *port, enum typec_cc_status cc)
893 {
894 	tcpm_log(port, "cc:=%d", cc);
895 	port->cc_req = cc;
896 	port->tcpc->set_cc(port->tcpc, cc);
897 }
898 
tcpm_enable_auto_vbus_discharge(struct tcpm_port * port,bool enable)899 static int tcpm_enable_auto_vbus_discharge(struct tcpm_port *port, bool enable)
900 {
901 	int ret = 0;
902 
903 	if (port->tcpc->enable_auto_vbus_discharge) {
904 		ret = port->tcpc->enable_auto_vbus_discharge(port->tcpc, enable);
905 		tcpm_log_force(port, "%s vbus discharge ret:%d",
906 			       str_enable_disable(enable), ret);
907 		if (!ret)
908 			port->auto_vbus_discharge_enabled = enable;
909 	}
910 
911 	return ret;
912 }
913 
tcpm_apply_rc(struct tcpm_port * port)914 static void tcpm_apply_rc(struct tcpm_port *port)
915 {
916 	/*
917 	 * TCPCI: Move to APPLY_RC state to prevent disconnect during PR_SWAP
918 	 * when Vbus auto discharge on disconnect is enabled.
919 	 */
920 	if (port->tcpc->enable_auto_vbus_discharge && port->tcpc->apply_rc) {
921 		tcpm_log(port, "Apply_RC");
922 		port->tcpc->apply_rc(port->tcpc, port->cc_req, port->polarity);
923 		tcpm_enable_auto_vbus_discharge(port, false);
924 	}
925 }
926 
927 /*
928  * Determine RP value to set based on maximum current supported
929  * by a port if configured as source.
930  * Returns CC value to report to link partner.
931  */
tcpm_rp_cc(struct tcpm_port * port)932 static enum typec_cc_status tcpm_rp_cc(struct tcpm_port *port)
933 {
934 	const u32 *src_pdo = port->src_pdo;
935 	int nr_pdo = port->nr_src_pdo;
936 	int i;
937 
938 	if (!port->pd_supported)
939 		return port->src_rp;
940 
941 	/*
942 	 * Search for first entry with matching voltage.
943 	 * It should report the maximum supported current.
944 	 */
945 	for (i = 0; i < nr_pdo; i++) {
946 		const u32 pdo = src_pdo[i];
947 
948 		if (pdo_type(pdo) == PDO_TYPE_FIXED &&
949 		    pdo_fixed_voltage(pdo) == 5000) {
950 			unsigned int curr = pdo_max_current(pdo);
951 
952 			if (curr >= 3000)
953 				return TYPEC_CC_RP_3_0;
954 			else if (curr >= 1500)
955 				return TYPEC_CC_RP_1_5;
956 			return TYPEC_CC_RP_DEF;
957 		}
958 	}
959 
960 	return TYPEC_CC_RP_DEF;
961 }
962 
tcpm_ams_finish(struct tcpm_port * port)963 static void tcpm_ams_finish(struct tcpm_port *port)
964 {
965 	tcpm_log(port, "AMS %s finished", tcpm_ams_str[port->ams]);
966 
967 	if (port->pd_capable && port->pwr_role == TYPEC_SOURCE) {
968 		if (port->negotiated_rev >= PD_REV30)
969 			tcpm_set_cc(port, SINK_TX_OK);
970 		else
971 			tcpm_set_cc(port, SINK_TX_NG);
972 	} else if (port->pwr_role == TYPEC_SOURCE) {
973 		tcpm_set_cc(port, tcpm_rp_cc(port));
974 	}
975 
976 	port->in_ams = false;
977 	port->ams = NONE_AMS;
978 }
979 
tcpm_pd_transmit(struct tcpm_port * port,enum tcpm_transmit_type tx_sop_type,const struct pd_message * msg)980 static int tcpm_pd_transmit(struct tcpm_port *port,
981 			    enum tcpm_transmit_type tx_sop_type,
982 			    const struct pd_message *msg)
983 {
984 	unsigned long time_left;
985 	int ret;
986 	unsigned int negotiated_rev;
987 
988 	switch (tx_sop_type) {
989 	case TCPC_TX_SOP_PRIME:
990 		negotiated_rev = port->negotiated_rev_prime;
991 		break;
992 	case TCPC_TX_SOP:
993 	default:
994 		negotiated_rev = port->negotiated_rev;
995 		break;
996 	}
997 
998 	if (msg)
999 		tcpm_log(port, "PD TX, header: %#x", le16_to_cpu(msg->header));
1000 	else
1001 		tcpm_log(port, "PD TX, type: %#x", tx_sop_type);
1002 
1003 	reinit_completion(&port->tx_complete);
1004 	ret = port->tcpc->pd_transmit(port->tcpc, tx_sop_type, msg, negotiated_rev);
1005 	if (ret < 0)
1006 		return ret;
1007 
1008 	mutex_unlock(&port->lock);
1009 	time_left = wait_for_completion_timeout(&port->tx_complete,
1010 						msecs_to_jiffies(PD_T_TCPC_TX_TIMEOUT));
1011 	mutex_lock(&port->lock);
1012 	if (!time_left)
1013 		return -ETIMEDOUT;
1014 
1015 	switch (port->tx_status) {
1016 	case TCPC_TX_SUCCESS:
1017 		switch (tx_sop_type) {
1018 		case TCPC_TX_SOP_PRIME:
1019 			port->message_id_prime = (port->message_id_prime + 1) &
1020 						 PD_HEADER_ID_MASK;
1021 			break;
1022 		case TCPC_TX_SOP:
1023 		default:
1024 			port->message_id = (port->message_id + 1) &
1025 					   PD_HEADER_ID_MASK;
1026 			break;
1027 		}
1028 		/*
1029 		 * USB PD rev 2.0, 8.3.2.2.1:
1030 		 * USB PD rev 3.0, 8.3.2.1.3:
1031 		 * "... Note that every AMS is Interruptible until the first
1032 		 * Message in the sequence has been successfully sent (GoodCRC
1033 		 * Message received)."
1034 		 */
1035 		if (port->ams != NONE_AMS)
1036 			port->in_ams = true;
1037 		break;
1038 	case TCPC_TX_DISCARDED:
1039 		ret = -EAGAIN;
1040 		break;
1041 	case TCPC_TX_FAILED:
1042 	default:
1043 		ret = -EIO;
1044 		break;
1045 	}
1046 
1047 	/* Some AMS don't expect responses. Finish them here. */
1048 	if (port->ams == ATTENTION || port->ams == SOURCE_ALERT)
1049 		tcpm_ams_finish(port);
1050 
1051 	return ret;
1052 }
1053 
tcpm_pd_transmit_complete(struct tcpm_port * port,enum tcpm_transmit_status status)1054 void tcpm_pd_transmit_complete(struct tcpm_port *port,
1055 			       enum tcpm_transmit_status status)
1056 {
1057 	tcpm_log(port, "PD TX complete, status: %u", status);
1058 	port->tx_status = status;
1059 	complete(&port->tx_complete);
1060 }
1061 EXPORT_SYMBOL_GPL(tcpm_pd_transmit_complete);
1062 
tcpm_mux_set(struct tcpm_port * port,int state,enum usb_role usb_role,enum typec_orientation orientation)1063 static int tcpm_mux_set(struct tcpm_port *port, int state,
1064 			enum usb_role usb_role,
1065 			enum typec_orientation orientation)
1066 {
1067 	int ret;
1068 
1069 	tcpm_log(port, "Requesting mux state %d, usb-role %d, orientation %d",
1070 		 state, usb_role, orientation);
1071 
1072 	ret = typec_set_orientation(port->typec_port, orientation);
1073 	if (ret)
1074 		return ret;
1075 
1076 	if (port->role_sw) {
1077 		ret = usb_role_switch_set_role(port->role_sw, usb_role);
1078 		if (ret)
1079 			return ret;
1080 	}
1081 
1082 	return typec_set_mode(port->typec_port, state);
1083 }
1084 
tcpm_set_polarity(struct tcpm_port * port,enum typec_cc_polarity polarity)1085 static int tcpm_set_polarity(struct tcpm_port *port,
1086 			     enum typec_cc_polarity polarity)
1087 {
1088 	int ret;
1089 
1090 	tcpm_log(port, "polarity %d", polarity);
1091 
1092 	ret = port->tcpc->set_polarity(port->tcpc, polarity);
1093 	if (ret < 0)
1094 		return ret;
1095 
1096 	port->polarity = polarity;
1097 
1098 	return 0;
1099 }
1100 
tcpm_set_vconn(struct tcpm_port * port,bool enable)1101 static int tcpm_set_vconn(struct tcpm_port *port, bool enable)
1102 {
1103 	int ret;
1104 
1105 	tcpm_log(port, "vconn:=%d", enable);
1106 
1107 	ret = port->tcpc->set_vconn(port->tcpc, enable);
1108 	if (!ret) {
1109 		port->vconn_role = enable ? TYPEC_SOURCE : TYPEC_SINK;
1110 		typec_set_vconn_role(port->typec_port, port->vconn_role);
1111 	}
1112 
1113 	return ret;
1114 }
1115 
tcpm_get_current_limit(struct tcpm_port * port)1116 static u32 tcpm_get_current_limit(struct tcpm_port *port)
1117 {
1118 	enum typec_cc_status cc;
1119 	u32 limit;
1120 
1121 	cc = port->polarity ? port->cc2 : port->cc1;
1122 	switch (cc) {
1123 	case TYPEC_CC_RP_1_5:
1124 		limit = 1500;
1125 		break;
1126 	case TYPEC_CC_RP_3_0:
1127 		limit = 3000;
1128 		break;
1129 	case TYPEC_CC_RP_DEF:
1130 	default:
1131 		if (port->tcpc->get_current_limit)
1132 			limit = port->tcpc->get_current_limit(port->tcpc);
1133 		else
1134 			limit = 0;
1135 		break;
1136 	}
1137 
1138 	return limit;
1139 }
1140 
tcpm_set_current_limit(struct tcpm_port * port,u32 max_ma,u32 mv)1141 static int tcpm_set_current_limit(struct tcpm_port *port, u32 max_ma, u32 mv)
1142 {
1143 	int ret = -EOPNOTSUPP;
1144 
1145 	tcpm_log(port, "Setting voltage/current limit %u mV %u mA", mv, max_ma);
1146 
1147 	port->supply_voltage = mv;
1148 	port->current_limit = max_ma;
1149 	power_supply_changed(port->psy);
1150 
1151 	if (port->tcpc->set_current_limit)
1152 		ret = port->tcpc->set_current_limit(port->tcpc, max_ma, mv);
1153 
1154 	return ret;
1155 }
1156 
tcpm_set_attached_state(struct tcpm_port * port,bool attached)1157 static int tcpm_set_attached_state(struct tcpm_port *port, bool attached)
1158 {
1159 	return port->tcpc->set_roles(port->tcpc, attached, port->pwr_role,
1160 				     port->data_role);
1161 }
1162 
tcpm_set_roles(struct tcpm_port * port,bool attached,int state,enum typec_role role,enum typec_data_role data)1163 static int tcpm_set_roles(struct tcpm_port *port, bool attached, int state,
1164 			  enum typec_role role, enum typec_data_role data)
1165 {
1166 	enum typec_orientation orientation;
1167 	enum usb_role usb_role;
1168 	int ret;
1169 
1170 	if (port->polarity == TYPEC_POLARITY_CC1)
1171 		orientation = TYPEC_ORIENTATION_NORMAL;
1172 	else
1173 		orientation = TYPEC_ORIENTATION_REVERSE;
1174 
1175 	if (port->typec_caps.data == TYPEC_PORT_DRD) {
1176 		if (data == TYPEC_HOST)
1177 			usb_role = USB_ROLE_HOST;
1178 		else
1179 			usb_role = USB_ROLE_DEVICE;
1180 	} else if (port->typec_caps.data == TYPEC_PORT_DFP) {
1181 		if (data == TYPEC_HOST) {
1182 			if (role == TYPEC_SOURCE)
1183 				usb_role = USB_ROLE_HOST;
1184 			else
1185 				usb_role = USB_ROLE_NONE;
1186 		} else {
1187 			return -ENOTSUPP;
1188 		}
1189 	} else {
1190 		if (data == TYPEC_DEVICE) {
1191 			if (role == TYPEC_SINK)
1192 				usb_role = USB_ROLE_DEVICE;
1193 			else
1194 				usb_role = USB_ROLE_NONE;
1195 		} else {
1196 			return -ENOTSUPP;
1197 		}
1198 	}
1199 
1200 	ret = tcpm_mux_set(port, state, usb_role, orientation);
1201 	if (ret < 0)
1202 		return ret;
1203 
1204 	ret = port->tcpc->set_roles(port->tcpc, attached, role, data);
1205 	if (ret < 0)
1206 		return ret;
1207 
1208 	if (port->tcpc->set_orientation) {
1209 		ret = port->tcpc->set_orientation(port->tcpc, orientation);
1210 		if (ret < 0)
1211 			return ret;
1212 	}
1213 
1214 	port->pwr_role = role;
1215 	port->data_role = data;
1216 	typec_set_data_role(port->typec_port, data);
1217 	typec_set_pwr_role(port->typec_port, role);
1218 
1219 	return 0;
1220 }
1221 
tcpm_set_pwr_role(struct tcpm_port * port,enum typec_role role)1222 static int tcpm_set_pwr_role(struct tcpm_port *port, enum typec_role role)
1223 {
1224 	int ret;
1225 
1226 	ret = port->tcpc->set_roles(port->tcpc, true, role,
1227 				    port->data_role);
1228 	if (ret < 0)
1229 		return ret;
1230 
1231 	port->pwr_role = role;
1232 	typec_set_pwr_role(port->typec_port, role);
1233 
1234 	return 0;
1235 }
1236 
1237 /*
1238  * Transform the PDO to be compliant to PD rev2.0.
1239  * Return 0 if the PDO type is not defined in PD rev2.0.
1240  * Otherwise, return the converted PDO.
1241  */
tcpm_forge_legacy_pdo(struct tcpm_port * port,u32 pdo,enum typec_role role)1242 static u32 tcpm_forge_legacy_pdo(struct tcpm_port *port, u32 pdo, enum typec_role role)
1243 {
1244 	switch (pdo_type(pdo)) {
1245 	case PDO_TYPE_FIXED:
1246 		if (role == TYPEC_SINK)
1247 			return pdo & ~PDO_FIXED_FRS_CURR_MASK;
1248 		else
1249 			return pdo & ~PDO_FIXED_UNCHUNK_EXT;
1250 	case PDO_TYPE_VAR:
1251 	case PDO_TYPE_BATT:
1252 		return pdo;
1253 	case PDO_TYPE_APDO:
1254 	default:
1255 		return 0;
1256 	}
1257 }
1258 
tcpm_pd_send_revision(struct tcpm_port * port)1259 static int tcpm_pd_send_revision(struct tcpm_port *port)
1260 {
1261 	struct pd_message msg;
1262 	u32 rmdo;
1263 
1264 	memset(&msg, 0, sizeof(msg));
1265 	rmdo = RMDO(port->pd_rev.rev_major, port->pd_rev.rev_minor,
1266 		    port->pd_rev.ver_major, port->pd_rev.ver_minor);
1267 	msg.payload[0] = cpu_to_le32(rmdo);
1268 	msg.header = PD_HEADER_LE(PD_DATA_REVISION,
1269 				  port->pwr_role,
1270 				  port->data_role,
1271 				  port->negotiated_rev,
1272 				  port->message_id,
1273 				  1);
1274 	return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
1275 }
1276 
tcpm_pd_send_source_caps(struct tcpm_port * port)1277 static int tcpm_pd_send_source_caps(struct tcpm_port *port)
1278 {
1279 	struct pd_message msg;
1280 	u32 pdo;
1281 	unsigned int i, nr_pdo = 0;
1282 
1283 	memset(&msg, 0, sizeof(msg));
1284 
1285 	for (i = 0; i < port->nr_src_pdo; i++) {
1286 		if (port->negotiated_rev >= PD_REV30) {
1287 			msg.payload[nr_pdo++] =	cpu_to_le32(port->src_pdo[i]);
1288 		} else {
1289 			pdo = tcpm_forge_legacy_pdo(port, port->src_pdo[i], TYPEC_SOURCE);
1290 			if (pdo)
1291 				msg.payload[nr_pdo++] = cpu_to_le32(pdo);
1292 		}
1293 	}
1294 
1295 	if (!nr_pdo) {
1296 		/* No source capabilities defined, sink only */
1297 		msg.header = PD_HEADER_LE(PD_CTRL_REJECT,
1298 					  port->pwr_role,
1299 					  port->data_role,
1300 					  port->negotiated_rev,
1301 					  port->message_id, 0);
1302 	} else {
1303 		msg.header = PD_HEADER_LE(PD_DATA_SOURCE_CAP,
1304 					  port->pwr_role,
1305 					  port->data_role,
1306 					  port->negotiated_rev,
1307 					  port->message_id,
1308 					  nr_pdo);
1309 	}
1310 
1311 	return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
1312 }
1313 
tcpm_pd_send_sink_caps(struct tcpm_port * port)1314 static int tcpm_pd_send_sink_caps(struct tcpm_port *port)
1315 {
1316 	struct pd_message msg;
1317 	u32 pdo;
1318 	unsigned int i, nr_pdo = 0;
1319 
1320 	memset(&msg, 0, sizeof(msg));
1321 
1322 	for (i = 0; i < port->nr_snk_pdo; i++) {
1323 		if (port->negotiated_rev >= PD_REV30) {
1324 			msg.payload[nr_pdo++] =	cpu_to_le32(port->snk_pdo[i]);
1325 		} else {
1326 			pdo = tcpm_forge_legacy_pdo(port, port->snk_pdo[i], TYPEC_SINK);
1327 			if (pdo)
1328 				msg.payload[nr_pdo++] = cpu_to_le32(pdo);
1329 		}
1330 	}
1331 
1332 	if (!nr_pdo) {
1333 		/* No sink capabilities defined, source only */
1334 		msg.header = PD_HEADER_LE(PD_CTRL_REJECT,
1335 					  port->pwr_role,
1336 					  port->data_role,
1337 					  port->negotiated_rev,
1338 					  port->message_id, 0);
1339 	} else {
1340 		msg.header = PD_HEADER_LE(PD_DATA_SINK_CAP,
1341 					  port->pwr_role,
1342 					  port->data_role,
1343 					  port->negotiated_rev,
1344 					  port->message_id,
1345 					  nr_pdo);
1346 	}
1347 
1348 	return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
1349 }
1350 
mod_tcpm_delayed_work(struct tcpm_port * port,unsigned int delay_ms)1351 static void mod_tcpm_delayed_work(struct tcpm_port *port, unsigned int delay_ms)
1352 {
1353 	if (delay_ms) {
1354 		hrtimer_start(&port->state_machine_timer, ms_to_ktime(delay_ms), HRTIMER_MODE_REL);
1355 	} else {
1356 		hrtimer_cancel(&port->state_machine_timer);
1357 		kthread_queue_work(port->wq, &port->state_machine);
1358 	}
1359 }
1360 
mod_vdm_delayed_work(struct tcpm_port * port,unsigned int delay_ms)1361 static void mod_vdm_delayed_work(struct tcpm_port *port, unsigned int delay_ms)
1362 {
1363 	if (delay_ms) {
1364 		hrtimer_start(&port->vdm_state_machine_timer, ms_to_ktime(delay_ms),
1365 			      HRTIMER_MODE_REL);
1366 	} else {
1367 		hrtimer_cancel(&port->vdm_state_machine_timer);
1368 		kthread_queue_work(port->wq, &port->vdm_state_machine);
1369 	}
1370 }
1371 
mod_enable_frs_delayed_work(struct tcpm_port * port,unsigned int delay_ms)1372 static void mod_enable_frs_delayed_work(struct tcpm_port *port, unsigned int delay_ms)
1373 {
1374 	if (delay_ms) {
1375 		hrtimer_start(&port->enable_frs_timer, ms_to_ktime(delay_ms), HRTIMER_MODE_REL);
1376 	} else {
1377 		hrtimer_cancel(&port->enable_frs_timer);
1378 		kthread_queue_work(port->wq, &port->enable_frs);
1379 	}
1380 }
1381 
mod_send_discover_delayed_work(struct tcpm_port * port,unsigned int delay_ms)1382 static void mod_send_discover_delayed_work(struct tcpm_port *port, unsigned int delay_ms)
1383 {
1384 	if (delay_ms) {
1385 		hrtimer_start(&port->send_discover_timer, ms_to_ktime(delay_ms), HRTIMER_MODE_REL);
1386 	} else {
1387 		hrtimer_cancel(&port->send_discover_timer);
1388 		kthread_queue_work(port->wq, &port->send_discover_work);
1389 	}
1390 }
1391 
tcpm_set_state(struct tcpm_port * port,enum tcpm_state state,unsigned int delay_ms)1392 static void tcpm_set_state(struct tcpm_port *port, enum tcpm_state state,
1393 			   unsigned int delay_ms)
1394 {
1395 	if (delay_ms) {
1396 		tcpm_log(port, "pending state change %s -> %s @ %u ms [%s %s]",
1397 			 tcpm_states[port->state], tcpm_states[state], delay_ms,
1398 			 pd_rev[port->negotiated_rev], tcpm_ams_str[port->ams]);
1399 		port->delayed_state = state;
1400 		mod_tcpm_delayed_work(port, delay_ms);
1401 		port->delayed_runtime = ktime_add(ktime_get(), ms_to_ktime(delay_ms));
1402 		port->delay_ms = delay_ms;
1403 	} else {
1404 		tcpm_log(port, "state change %s -> %s [%s %s]",
1405 			 tcpm_states[port->state], tcpm_states[state],
1406 			 pd_rev[port->negotiated_rev], tcpm_ams_str[port->ams]);
1407 		port->delayed_state = INVALID_STATE;
1408 		port->prev_state = port->state;
1409 		port->state = state;
1410 		/*
1411 		 * Don't re-queue the state machine work item if we're currently
1412 		 * in the state machine and we're immediately changing states.
1413 		 * tcpm_state_machine_work() will continue running the state
1414 		 * machine.
1415 		 */
1416 		if (!port->state_machine_running)
1417 			mod_tcpm_delayed_work(port, 0);
1418 	}
1419 }
1420 
tcpm_set_state_cond(struct tcpm_port * port,enum tcpm_state state,unsigned int delay_ms)1421 static void tcpm_set_state_cond(struct tcpm_port *port, enum tcpm_state state,
1422 				unsigned int delay_ms)
1423 {
1424 	if (port->enter_state == port->state)
1425 		tcpm_set_state(port, state, delay_ms);
1426 	else
1427 		tcpm_log(port,
1428 			 "skipped %sstate change %s -> %s [%u ms], context state %s [%s %s]",
1429 			 delay_ms ? "delayed " : "",
1430 			 tcpm_states[port->state], tcpm_states[state],
1431 			 delay_ms, tcpm_states[port->enter_state],
1432 			 pd_rev[port->negotiated_rev], tcpm_ams_str[port->ams]);
1433 }
1434 
tcpm_queue_message(struct tcpm_port * port,enum pd_msg_request message)1435 static void tcpm_queue_message(struct tcpm_port *port,
1436 			       enum pd_msg_request message)
1437 {
1438 	port->queued_message = message;
1439 	mod_tcpm_delayed_work(port, 0);
1440 }
1441 
tcpm_vdm_ams(struct tcpm_port * port)1442 static bool tcpm_vdm_ams(struct tcpm_port *port)
1443 {
1444 	switch (port->ams) {
1445 	case DISCOVER_IDENTITY:
1446 	case SOURCE_STARTUP_CABLE_PLUG_DISCOVER_IDENTITY:
1447 	case DISCOVER_SVIDS:
1448 	case DISCOVER_MODES:
1449 	case DFP_TO_UFP_ENTER_MODE:
1450 	case DFP_TO_UFP_EXIT_MODE:
1451 	case DFP_TO_CABLE_PLUG_ENTER_MODE:
1452 	case DFP_TO_CABLE_PLUG_EXIT_MODE:
1453 	case ATTENTION:
1454 	case UNSTRUCTURED_VDMS:
1455 	case STRUCTURED_VDMS:
1456 		break;
1457 	default:
1458 		return false;
1459 	}
1460 
1461 	return true;
1462 }
1463 
tcpm_ams_interruptible(struct tcpm_port * port)1464 static bool tcpm_ams_interruptible(struct tcpm_port *port)
1465 {
1466 	switch (port->ams) {
1467 	/* Interruptible AMS */
1468 	case NONE_AMS:
1469 	case SECURITY:
1470 	case FIRMWARE_UPDATE:
1471 	case DISCOVER_IDENTITY:
1472 	case SOURCE_STARTUP_CABLE_PLUG_DISCOVER_IDENTITY:
1473 	case DISCOVER_SVIDS:
1474 	case DISCOVER_MODES:
1475 	case DFP_TO_UFP_ENTER_MODE:
1476 	case DFP_TO_UFP_EXIT_MODE:
1477 	case DFP_TO_CABLE_PLUG_ENTER_MODE:
1478 	case DFP_TO_CABLE_PLUG_EXIT_MODE:
1479 	case UNSTRUCTURED_VDMS:
1480 	case STRUCTURED_VDMS:
1481 	case COUNTRY_INFO:
1482 	case COUNTRY_CODES:
1483 		break;
1484 	/* Non-Interruptible AMS */
1485 	default:
1486 		if (port->in_ams)
1487 			return false;
1488 		break;
1489 	}
1490 
1491 	return true;
1492 }
1493 
tcpm_ams_start(struct tcpm_port * port,enum tcpm_ams ams)1494 static int tcpm_ams_start(struct tcpm_port *port, enum tcpm_ams ams)
1495 {
1496 	int ret = 0;
1497 
1498 	tcpm_log(port, "AMS %s start", tcpm_ams_str[ams]);
1499 
1500 	if (!tcpm_ams_interruptible(port) &&
1501 	    !(ams == HARD_RESET || ams == SOFT_RESET_AMS)) {
1502 		port->upcoming_state = INVALID_STATE;
1503 		tcpm_log(port, "AMS %s not interruptible, aborting",
1504 			 tcpm_ams_str[port->ams]);
1505 		return -EAGAIN;
1506 	}
1507 
1508 	if (port->pwr_role == TYPEC_SOURCE) {
1509 		enum typec_cc_status cc_req = port->cc_req;
1510 
1511 		port->ams = ams;
1512 
1513 		if (ams == HARD_RESET) {
1514 			tcpm_set_cc(port, tcpm_rp_cc(port));
1515 			tcpm_pd_transmit(port, TCPC_TX_HARD_RESET, NULL);
1516 			tcpm_set_state(port, HARD_RESET_START, 0);
1517 			return ret;
1518 		} else if (ams == SOFT_RESET_AMS) {
1519 			if (!port->explicit_contract)
1520 				tcpm_set_cc(port, tcpm_rp_cc(port));
1521 			tcpm_set_state(port, SOFT_RESET_SEND, 0);
1522 			return ret;
1523 		} else if (tcpm_vdm_ams(port)) {
1524 			/* tSinkTx is enforced in vdm_run_state_machine */
1525 			if (port->negotiated_rev >= PD_REV30)
1526 				tcpm_set_cc(port, SINK_TX_NG);
1527 			return ret;
1528 		}
1529 
1530 		if (port->negotiated_rev >= PD_REV30)
1531 			tcpm_set_cc(port, SINK_TX_NG);
1532 
1533 		switch (port->state) {
1534 		case SRC_READY:
1535 		case SRC_STARTUP:
1536 		case SRC_SOFT_RESET_WAIT_SNK_TX:
1537 		case SOFT_RESET:
1538 		case SOFT_RESET_SEND:
1539 			if (port->negotiated_rev >= PD_REV30)
1540 				tcpm_set_state(port, AMS_START,
1541 					       cc_req == SINK_TX_OK ?
1542 					       PD_T_SINK_TX : 0);
1543 			else
1544 				tcpm_set_state(port, AMS_START, 0);
1545 			break;
1546 		default:
1547 			if (port->negotiated_rev >= PD_REV30)
1548 				tcpm_set_state(port, SRC_READY,
1549 					       cc_req == SINK_TX_OK ?
1550 					       PD_T_SINK_TX : 0);
1551 			else
1552 				tcpm_set_state(port, SRC_READY, 0);
1553 			break;
1554 		}
1555 	} else {
1556 		if (port->negotiated_rev >= PD_REV30 &&
1557 		    !tcpm_sink_tx_ok(port) &&
1558 		    ams != SOFT_RESET_AMS &&
1559 		    ams != HARD_RESET) {
1560 			port->upcoming_state = INVALID_STATE;
1561 			tcpm_log(port, "Sink TX No Go");
1562 			return -EAGAIN;
1563 		}
1564 
1565 		port->ams = ams;
1566 
1567 		if (ams == HARD_RESET) {
1568 			tcpm_pd_transmit(port, TCPC_TX_HARD_RESET, NULL);
1569 			tcpm_set_state(port, HARD_RESET_START, 0);
1570 			return ret;
1571 		} else if (tcpm_vdm_ams(port)) {
1572 			return ret;
1573 		}
1574 
1575 		if (port->state == SNK_READY ||
1576 		    port->state == SNK_SOFT_RESET)
1577 			tcpm_set_state(port, AMS_START, 0);
1578 		else
1579 			tcpm_set_state(port, SNK_READY, 0);
1580 	}
1581 
1582 	return ret;
1583 }
1584 
1585 /*
1586  * VDM/VDO handling functions
1587  */
tcpm_queue_vdm(struct tcpm_port * port,const u32 header,const u32 * data,int cnt,enum tcpm_transmit_type tx_sop_type)1588 static void tcpm_queue_vdm(struct tcpm_port *port, const u32 header,
1589 			   const u32 *data, int cnt, enum tcpm_transmit_type tx_sop_type)
1590 {
1591 	u32 vdo_hdr = port->vdo_data[0];
1592 
1593 	WARN_ON(!mutex_is_locked(&port->lock));
1594 
1595 	/* If is sending discover_identity, handle received message first */
1596 	if (PD_VDO_SVDM(vdo_hdr) && PD_VDO_CMD(vdo_hdr) == CMD_DISCOVER_IDENT) {
1597 		if (tx_sop_type == TCPC_TX_SOP_PRIME)
1598 			port->send_discover_prime = true;
1599 		else
1600 			port->send_discover = true;
1601 		mod_send_discover_delayed_work(port, SEND_DISCOVER_RETRY_MS);
1602 	} else {
1603 		/* Make sure we are not still processing a previous VDM packet */
1604 		WARN_ON(port->vdm_state > VDM_STATE_DONE);
1605 	}
1606 
1607 	port->vdo_count = cnt + 1;
1608 	port->vdo_data[0] = header;
1609 	memcpy(&port->vdo_data[1], data, sizeof(u32) * cnt);
1610 	/* Set ready, vdm state machine will actually send */
1611 	port->vdm_retries = 0;
1612 	port->vdm_state = VDM_STATE_READY;
1613 	port->vdm_sm_running = true;
1614 
1615 	port->tx_sop_type = tx_sop_type;
1616 
1617 	mod_vdm_delayed_work(port, 0);
1618 }
1619 
tcpm_queue_vdm_work(struct kthread_work * work)1620 static void tcpm_queue_vdm_work(struct kthread_work *work)
1621 {
1622 	struct altmode_vdm_event *event = container_of(work,
1623 						       struct altmode_vdm_event,
1624 						       work);
1625 	struct tcpm_port *port = event->port;
1626 
1627 	mutex_lock(&port->lock);
1628 	if (port->state != SRC_READY && port->state != SNK_READY &&
1629 	    port->state != SRC_VDM_IDENTITY_REQUEST) {
1630 		tcpm_log_force(port, "dropping altmode_vdm_event");
1631 		goto port_unlock;
1632 	}
1633 
1634 	tcpm_queue_vdm(port, event->header, event->data, event->cnt, event->tx_sop_type);
1635 
1636 port_unlock:
1637 	kfree(event->data);
1638 	kfree(event);
1639 	mutex_unlock(&port->lock);
1640 }
1641 
tcpm_queue_vdm_unlocked(struct tcpm_port * port,const u32 header,const u32 * data,int cnt,enum tcpm_transmit_type tx_sop_type)1642 static int tcpm_queue_vdm_unlocked(struct tcpm_port *port, const u32 header,
1643 				   const u32 *data, int cnt, enum tcpm_transmit_type tx_sop_type)
1644 {
1645 	struct altmode_vdm_event *event;
1646 	u32 *data_cpy;
1647 	int ret = -ENOMEM;
1648 
1649 	event = kzalloc(sizeof(*event), GFP_KERNEL);
1650 	if (!event)
1651 		goto err_event;
1652 
1653 	data_cpy = kcalloc(cnt, sizeof(u32), GFP_KERNEL);
1654 	if (!data_cpy)
1655 		goto err_data;
1656 
1657 	kthread_init_work(&event->work, tcpm_queue_vdm_work);
1658 	event->port = port;
1659 	event->header = header;
1660 	memcpy(data_cpy, data, sizeof(u32) * cnt);
1661 	event->data = data_cpy;
1662 	event->cnt = cnt;
1663 	event->tx_sop_type = tx_sop_type;
1664 
1665 	ret = kthread_queue_work(port->wq, &event->work);
1666 	if (!ret) {
1667 		ret = -EBUSY;
1668 		goto err_queue;
1669 	}
1670 
1671 	return 0;
1672 
1673 err_queue:
1674 	kfree(data_cpy);
1675 err_data:
1676 	kfree(event);
1677 err_event:
1678 	tcpm_log_force(port, "failed to queue altmode vdm, err:%d", ret);
1679 	return ret;
1680 }
1681 
svdm_consume_identity(struct tcpm_port * port,const u32 * p,int cnt)1682 static void svdm_consume_identity(struct tcpm_port *port, const u32 *p, int cnt)
1683 {
1684 	u32 vdo = p[VDO_INDEX_IDH];
1685 	u32 product = p[VDO_INDEX_PRODUCT];
1686 
1687 	memset(&port->mode_data, 0, sizeof(port->mode_data));
1688 
1689 	port->partner_ident.id_header = vdo;
1690 	port->partner_ident.cert_stat = p[VDO_INDEX_CSTAT];
1691 	port->partner_ident.product = product;
1692 
1693 	if (port->partner)
1694 		typec_partner_set_identity(port->partner);
1695 
1696 	tcpm_log(port, "Identity: %04x:%04x.%04x",
1697 		 PD_IDH_VID(vdo),
1698 		 PD_PRODUCT_PID(product), product & 0xffff);
1699 }
1700 
svdm_consume_identity_sop_prime(struct tcpm_port * port,const u32 * p,int cnt)1701 static void svdm_consume_identity_sop_prime(struct tcpm_port *port, const u32 *p, int cnt)
1702 {
1703 	u32 idh = p[VDO_INDEX_IDH];
1704 	u32 product = p[VDO_INDEX_PRODUCT];
1705 	int svdm_version;
1706 
1707 	/*
1708 	 * Attempt to consume identity only if cable currently is not set
1709 	 */
1710 	if (!IS_ERR_OR_NULL(port->cable))
1711 		goto register_plug;
1712 
1713 	/* Reset cable identity */
1714 	memset(&port->cable_ident, 0, sizeof(port->cable_ident));
1715 
1716 	/* Fill out id header, cert, product, cable VDO 1 */
1717 	port->cable_ident.id_header = idh;
1718 	port->cable_ident.cert_stat = p[VDO_INDEX_CSTAT];
1719 	port->cable_ident.product = product;
1720 	port->cable_ident.vdo[0] = p[VDO_INDEX_CABLE_1];
1721 
1722 	/* Fill out cable desc, infer svdm_version from pd revision */
1723 	port->cable_desc.type = (enum typec_plug_type) (VDO_TYPEC_CABLE_TYPE(p[VDO_INDEX_CABLE_1]) +
1724 							USB_PLUG_TYPE_A);
1725 	port->cable_desc.active = PD_IDH_PTYPE(idh) == IDH_PTYPE_ACABLE ? 1 : 0;
1726 	/* Log PD Revision and additional cable VDO from negotiated revision */
1727 	switch (port->negotiated_rev_prime) {
1728 	case PD_REV30:
1729 		port->cable_desc.pd_revision = 0x0300;
1730 		if (port->cable_desc.active)
1731 			port->cable_ident.vdo[1] = p[VDO_INDEX_CABLE_2];
1732 		break;
1733 	case PD_REV20:
1734 		port->cable_desc.pd_revision = 0x0200;
1735 		break;
1736 	default:
1737 		port->cable_desc.pd_revision = 0x0200;
1738 		break;
1739 	}
1740 	port->cable_desc.identity = &port->cable_ident;
1741 	/* Register Cable, set identity and svdm_version */
1742 	port->cable = typec_register_cable(port->typec_port, &port->cable_desc);
1743 	if (IS_ERR_OR_NULL(port->cable))
1744 		return;
1745 	typec_cable_set_identity(port->cable);
1746 	/* Get SVDM version */
1747 	svdm_version = PD_VDO_SVDM_VER(p[VDO_INDEX_HDR]);
1748 	typec_cable_set_svdm_version(port->cable, svdm_version);
1749 
1750 register_plug:
1751 	if (IS_ERR_OR_NULL(port->plug_prime)) {
1752 		port->plug_prime_desc.index = TYPEC_PLUG_SOP_P;
1753 		port->plug_prime = typec_register_plug(port->cable,
1754 						       &port->plug_prime_desc);
1755 	}
1756 }
1757 
svdm_consume_svids(struct tcpm_port * port,const u32 * p,int cnt,enum tcpm_transmit_type rx_sop_type)1758 static bool svdm_consume_svids(struct tcpm_port *port, const u32 *p, int cnt,
1759 			       enum tcpm_transmit_type rx_sop_type)
1760 {
1761 	struct pd_mode_data *pmdata = rx_sop_type == TCPC_TX_SOP_PRIME ?
1762 				      &port->mode_data_prime : &port->mode_data;
1763 	int i;
1764 
1765 	for (i = 1; i < cnt; i++) {
1766 		u16 svid;
1767 
1768 		svid = (p[i] >> 16) & 0xffff;
1769 		if (!svid)
1770 			return false;
1771 
1772 		if (pmdata->nsvids >= SVID_DISCOVERY_MAX)
1773 			goto abort;
1774 
1775 		pmdata->svids[pmdata->nsvids++] = svid;
1776 		tcpm_log(port, "SVID %d: 0x%x", pmdata->nsvids, svid);
1777 
1778 		svid = p[i] & 0xffff;
1779 		if (!svid)
1780 			return false;
1781 
1782 		if (pmdata->nsvids >= SVID_DISCOVERY_MAX)
1783 			goto abort;
1784 
1785 		pmdata->svids[pmdata->nsvids++] = svid;
1786 		tcpm_log(port, "SVID %d: 0x%x", pmdata->nsvids, svid);
1787 	}
1788 
1789 	/*
1790 	 * PD3.0 Spec 6.4.4.3.2: The SVIDs are returned 2 per VDO (see Table
1791 	 * 6-43), and can be returned maximum 6 VDOs per response (see Figure
1792 	 * 6-19). If the Respondersupports 12 or more SVID then the Discover
1793 	 * SVIDs Command Shall be executed multiple times until a Discover
1794 	 * SVIDs VDO is returned ending either with a SVID value of 0x0000 in
1795 	 * the last part of the last VDO or with a VDO containing two SVIDs
1796 	 * with values of 0x0000.
1797 	 *
1798 	 * However, some odd dockers support SVIDs less than 12 but without
1799 	 * 0x0000 in the last VDO, so we need to break the Discover SVIDs
1800 	 * request and return false here.
1801 	 */
1802 	return cnt == 7;
1803 abort:
1804 	tcpm_log(port, "SVID_DISCOVERY_MAX(%d) too low!", SVID_DISCOVERY_MAX);
1805 	return false;
1806 }
1807 
svdm_consume_modes(struct tcpm_port * port,const u32 * p,int cnt,enum tcpm_transmit_type rx_sop_type)1808 static void svdm_consume_modes(struct tcpm_port *port, const u32 *p, int cnt,
1809 			       enum tcpm_transmit_type rx_sop_type)
1810 {
1811 	struct pd_mode_data *pmdata = &port->mode_data;
1812 	struct typec_altmode_desc *paltmode;
1813 	int i;
1814 
1815 	switch (rx_sop_type) {
1816 	case TCPC_TX_SOP_PRIME:
1817 		pmdata = &port->mode_data_prime;
1818 		if (pmdata->altmodes >= ARRAY_SIZE(port->plug_prime_altmode)) {
1819 			/* Already logged in svdm_consume_svids() */
1820 			return;
1821 		}
1822 		break;
1823 	case TCPC_TX_SOP:
1824 		pmdata = &port->mode_data;
1825 		if (pmdata->altmodes >= ARRAY_SIZE(port->partner_altmode)) {
1826 			/* Already logged in svdm_consume_svids() */
1827 			return;
1828 		}
1829 		break;
1830 	default:
1831 		return;
1832 	}
1833 
1834 	for (i = 1; i < cnt; i++) {
1835 		paltmode = &pmdata->altmode_desc[pmdata->altmodes];
1836 		memset(paltmode, 0, sizeof(*paltmode));
1837 
1838 		paltmode->svid = pmdata->svids[pmdata->svid_index];
1839 		paltmode->mode = i;
1840 		paltmode->vdo = p[i];
1841 
1842 		tcpm_log(port, " Alternate mode %d: SVID 0x%04x, VDO %d: 0x%08x",
1843 			 pmdata->altmodes, paltmode->svid,
1844 			 paltmode->mode, paltmode->vdo);
1845 
1846 		pmdata->altmodes++;
1847 	}
1848 }
1849 
tcpm_register_partner_altmodes(struct tcpm_port * port)1850 static void tcpm_register_partner_altmodes(struct tcpm_port *port)
1851 {
1852 	struct pd_mode_data *modep = &port->mode_data;
1853 	struct typec_altmode *altmode;
1854 	int i;
1855 
1856 	if (!port->partner)
1857 		return;
1858 
1859 	for (i = 0; i < modep->altmodes; i++) {
1860 		altmode = typec_partner_register_altmode(port->partner,
1861 						&modep->altmode_desc[i]);
1862 		if (IS_ERR(altmode)) {
1863 			tcpm_log(port, "Failed to register partner SVID 0x%04x",
1864 				 modep->altmode_desc[i].svid);
1865 			altmode = NULL;
1866 		}
1867 		port->partner_altmode[i] = altmode;
1868 	}
1869 }
1870 
tcpm_register_plug_altmodes(struct tcpm_port * port)1871 static void tcpm_register_plug_altmodes(struct tcpm_port *port)
1872 {
1873 	struct pd_mode_data *modep = &port->mode_data_prime;
1874 	struct typec_altmode *altmode;
1875 	int i;
1876 
1877 	typec_plug_set_num_altmodes(port->plug_prime, modep->altmodes);
1878 
1879 	for (i = 0; i < modep->altmodes; i++) {
1880 		altmode = typec_plug_register_altmode(port->plug_prime,
1881 						&modep->altmode_desc[i]);
1882 		if (IS_ERR(altmode)) {
1883 			tcpm_log(port, "Failed to register plug SVID 0x%04x",
1884 				 modep->altmode_desc[i].svid);
1885 			altmode = NULL;
1886 		}
1887 		port->plug_prime_altmode[i] = altmode;
1888 	}
1889 }
1890 
1891 #define supports_modal(port)	PD_IDH_MODAL_SUPP((port)->partner_ident.id_header)
1892 #define supports_modal_cable(port)     PD_IDH_MODAL_SUPP((port)->cable_ident.id_header)
1893 #define supports_host(port)    PD_IDH_HOST_SUPP((port->partner_ident.id_header))
1894 
1895 /*
1896  * Helper to determine whether the port is capable of SOP' communication at the
1897  * current point in time.
1898  */
tcpm_can_communicate_sop_prime(struct tcpm_port * port)1899 static bool tcpm_can_communicate_sop_prime(struct tcpm_port *port)
1900 {
1901 	/* Check to see if tcpc supports SOP' communication */
1902 	if (!port->tcpc->cable_comm_capable || !port->tcpc->cable_comm_capable(port->tcpc))
1903 		return false;
1904 	/*
1905 	 * Power Delivery 2.0 Section 6.3.11
1906 	 * Before communicating with a Cable Plug a Port Should ensure that it
1907 	 * is the Vconn Source and that the Cable Plugs are powered by
1908 	 * performing a Vconn swap if necessary. Since it cannot be guaranteed
1909 	 * that the present Vconn Source is supplying Vconn, the only means to
1910 	 * ensure that the Cable Plugs are powered is for a Port wishing to
1911 	 * communicate with a Cable Plug is to become the Vconn Source.
1912 	 *
1913 	 * Power Delivery 3.0 Section 6.3.11
1914 	 * Before communicating with a Cable Plug a Port Shall ensure that it
1915 	 * is the Vconn source.
1916 	 */
1917 	if (port->vconn_role != TYPEC_SOURCE)
1918 		return false;
1919 	/*
1920 	 * Power Delivery 2.0 Section 2.4.4
1921 	 * When no Contract or an Implicit Contract is in place the Source can
1922 	 * communicate with a Cable Plug using SOP' packets in order to discover
1923 	 * its characteristics.
1924 	 *
1925 	 * Power Delivery 3.0 Section 2.4.4
1926 	 * When no Contract or an Implicit Contract is in place only the Source
1927 	 * port that is supplying Vconn is allowed to send packets to a Cable
1928 	 * Plug and is allowed to respond to packets from the Cable Plug.
1929 	 */
1930 	if (!port->explicit_contract)
1931 		return port->pwr_role == TYPEC_SOURCE;
1932 	if (port->negotiated_rev == PD_REV30)
1933 		return true;
1934 	/*
1935 	 * Power Delivery 2.0 Section 2.4.4
1936 	 *
1937 	 * When an Explicit Contract is in place the DFP (either the Source or
1938 	 * the Sink) can communicate with the Cable Plug(s) using SOP’/SOP”
1939 	 * Packets (see Figure 2-3).
1940 	 */
1941 	if (port->negotiated_rev == PD_REV20)
1942 		return port->data_role == TYPEC_HOST;
1943 	return false;
1944 }
1945 
tcpm_attempt_vconn_swap_discovery(struct tcpm_port * port)1946 static bool tcpm_attempt_vconn_swap_discovery(struct tcpm_port *port)
1947 {
1948 	if (!port->tcpc->attempt_vconn_swap_discovery)
1949 		return false;
1950 
1951 	/* Port is already source, no need to perform swap */
1952 	if (port->vconn_role == TYPEC_SOURCE)
1953 		return false;
1954 
1955 	/*
1956 	 * Partner needs to support Alternate Modes with modal support. If
1957 	 * partner is also capable of being a USB Host, it could be a device
1958 	 * that supports Alternate Modes as the DFP.
1959 	 */
1960 	if (!supports_modal(port) || supports_host(port))
1961 		return false;
1962 
1963 	if ((port->negotiated_rev == PD_REV20 && port->data_role == TYPEC_HOST) ||
1964 	    port->negotiated_rev == PD_REV30)
1965 		return port->tcpc->attempt_vconn_swap_discovery(port->tcpc);
1966 
1967 	return false;
1968 }
1969 
1970 
tcpm_cable_vdm_supported(struct tcpm_port * port)1971 static bool tcpm_cable_vdm_supported(struct tcpm_port *port)
1972 {
1973 	return !IS_ERR_OR_NULL(port->cable) &&
1974 	       typec_cable_is_active(port->cable) &&
1975 	       supports_modal_cable(port) &&
1976 	       tcpm_can_communicate_sop_prime(port);
1977 }
1978 
tcpm_pd_svdm(struct tcpm_port * port,struct typec_altmode * adev,const u32 * p,int cnt,u32 * response,enum adev_actions * adev_action,enum tcpm_transmit_type rx_sop_type,enum tcpm_transmit_type * response_tx_sop_type)1979 static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev,
1980 			const u32 *p, int cnt, u32 *response,
1981 			enum adev_actions *adev_action,
1982 			enum tcpm_transmit_type rx_sop_type,
1983 			enum tcpm_transmit_type *response_tx_sop_type)
1984 {
1985 	struct typec_port *typec = port->typec_port;
1986 	struct typec_altmode *pdev, *pdev_prime;
1987 	struct pd_mode_data *modep, *modep_prime;
1988 	int svdm_version;
1989 	int rlen = 0;
1990 	int cmd_type;
1991 	int cmd;
1992 	int i;
1993 	int ret;
1994 
1995 	cmd_type = PD_VDO_CMDT(p[0]);
1996 	cmd = PD_VDO_CMD(p[0]);
1997 
1998 	tcpm_log(port, "Rx VDM cmd 0x%x type %d cmd %d len %d",
1999 		 p[0], cmd_type, cmd, cnt);
2000 
2001 	switch (rx_sop_type) {
2002 	case TCPC_TX_SOP_PRIME:
2003 		modep_prime = &port->mode_data_prime;
2004 		pdev_prime = typec_match_altmode(port->plug_prime_altmode,
2005 						 ALTMODE_DISCOVERY_MAX,
2006 						 PD_VDO_VID(p[0]),
2007 						 PD_VDO_OPOS(p[0]));
2008 		svdm_version = typec_get_cable_svdm_version(typec);
2009 		/*
2010 		 * Update SVDM version if cable was discovered before port partner.
2011 		 */
2012 		if (!IS_ERR_OR_NULL(port->cable) &&
2013 		    PD_VDO_SVDM_VER(p[0]) < svdm_version)
2014 			typec_cable_set_svdm_version(port->cable, svdm_version);
2015 		break;
2016 	case TCPC_TX_SOP:
2017 		modep = &port->mode_data;
2018 		pdev = typec_match_altmode(port->partner_altmode,
2019 					   ALTMODE_DISCOVERY_MAX,
2020 					   PD_VDO_VID(p[0]),
2021 					   PD_VDO_OPOS(p[0]));
2022 		svdm_version = typec_get_negotiated_svdm_version(typec);
2023 		if (svdm_version < 0)
2024 			return 0;
2025 		break;
2026 	default:
2027 		modep = &port->mode_data;
2028 		pdev = typec_match_altmode(port->partner_altmode,
2029 					   ALTMODE_DISCOVERY_MAX,
2030 					   PD_VDO_VID(p[0]),
2031 					   PD_VDO_OPOS(p[0]));
2032 		svdm_version = typec_get_negotiated_svdm_version(typec);
2033 		if (svdm_version < 0)
2034 			return 0;
2035 		break;
2036 	}
2037 
2038 	switch (cmd_type) {
2039 	case CMDT_INIT:
2040 		/*
2041 		 * Only the port or port partner is allowed to initialize SVDM
2042 		 * commands over SOP'. In case the port partner initializes a
2043 		 * sequence when it is not allowed to send SOP' messages, drop
2044 		 * the message should the TCPM port try to process it.
2045 		 */
2046 		if (rx_sop_type == TCPC_TX_SOP_PRIME)
2047 			return 0;
2048 
2049 		switch (cmd) {
2050 		case CMD_DISCOVER_IDENT:
2051 			if (PD_VDO_VID(p[0]) != USB_SID_PD)
2052 				break;
2053 
2054 			if (IS_ERR_OR_NULL(port->partner))
2055 				break;
2056 
2057 			if (PD_VDO_SVDM_VER(p[0]) < svdm_version) {
2058 				typec_partner_set_svdm_version(port->partner,
2059 							       PD_VDO_SVDM_VER(p[0]));
2060 				svdm_version = PD_VDO_SVDM_VER(p[0]);
2061 			}
2062 
2063 			port->ams = DISCOVER_IDENTITY;
2064 			/*
2065 			 * PD2.0 Spec 6.10.3: respond with NAK as DFP (data host)
2066 			 * PD3.1 Spec 6.4.4.2.5.1: respond with NAK if "invalid field" or
2067 			 * "wrong configuation" or "Unrecognized"
2068 			 */
2069 			if ((port->data_role == TYPEC_DEVICE || svdm_version >= SVDM_VER_2_0) &&
2070 			    port->nr_snk_vdo) {
2071 				if (svdm_version < SVDM_VER_2_0) {
2072 					for (i = 0; i < port->nr_snk_vdo_v1; i++)
2073 						response[i + 1] = port->snk_vdo_v1[i];
2074 					rlen = port->nr_snk_vdo_v1 + 1;
2075 
2076 				} else {
2077 					for (i = 0; i < port->nr_snk_vdo; i++)
2078 						response[i + 1] = port->snk_vdo[i];
2079 					rlen = port->nr_snk_vdo + 1;
2080 				}
2081 			}
2082 			break;
2083 		case CMD_DISCOVER_SVID:
2084 			port->ams = DISCOVER_SVIDS;
2085 			break;
2086 		case CMD_DISCOVER_MODES:
2087 			port->ams = DISCOVER_MODES;
2088 			break;
2089 		case CMD_ENTER_MODE:
2090 			port->ams = DFP_TO_UFP_ENTER_MODE;
2091 			break;
2092 		case CMD_EXIT_MODE:
2093 			port->ams = DFP_TO_UFP_EXIT_MODE;
2094 			break;
2095 		case CMD_ATTENTION:
2096 			/* Attention command does not have response */
2097 			*adev_action = ADEV_ATTENTION;
2098 			return 0;
2099 		default:
2100 			break;
2101 		}
2102 		if (rlen >= 1) {
2103 			response[0] = p[0] | VDO_CMDT(CMDT_RSP_ACK);
2104 		} else if (rlen == 0) {
2105 			response[0] = p[0] | VDO_CMDT(CMDT_RSP_NAK);
2106 			rlen = 1;
2107 		} else {
2108 			response[0] = p[0] | VDO_CMDT(CMDT_RSP_BUSY);
2109 			rlen = 1;
2110 		}
2111 		response[0] = (response[0] & ~VDO_SVDM_VERS_MASK) |
2112 			      (VDO_SVDM_VERS(typec_get_negotiated_svdm_version(typec)));
2113 		break;
2114 	case CMDT_RSP_ACK:
2115 		/*
2116 		 * Silently drop message if we are not connected, but can process
2117 		 * if SOP' Discover Identity prior to explicit contract.
2118 		 */
2119 		if (IS_ERR_OR_NULL(port->partner) &&
2120 		    !(rx_sop_type == TCPC_TX_SOP_PRIME && cmd == CMD_DISCOVER_IDENT))
2121 			break;
2122 
2123 		tcpm_ams_finish(port);
2124 
2125 		switch (cmd) {
2126 		/*
2127 		 * SVDM Command Flow for SOP and SOP':
2128 		 * SOP		Discover Identity
2129 		 * SOP'		Discover Identity
2130 		 * SOP		Discover SVIDs
2131 		 *		Discover Modes
2132 		 * (Active Cables)
2133 		 * SOP'		Discover SVIDs
2134 		 *		Discover Modes
2135 		 *
2136 		 * Perform Discover SOP' if the port can communicate with cable
2137 		 * plug.
2138 		 */
2139 		case CMD_DISCOVER_IDENT:
2140 			switch (rx_sop_type) {
2141 			case TCPC_TX_SOP:
2142 				if (PD_VDO_SVDM_VER(p[0]) < svdm_version) {
2143 					typec_partner_set_svdm_version(port->partner,
2144 								       PD_VDO_SVDM_VER(p[0]));
2145 					/* If cable is discovered before partner, downgrade svdm */
2146 					if (!IS_ERR_OR_NULL(port->cable) &&
2147 					    (typec_get_cable_svdm_version(port->typec_port) >
2148 					    svdm_version))
2149 						typec_cable_set_svdm_version(port->cable,
2150 									     svdm_version);
2151 				}
2152 				/* 6.4.4.3.1 */
2153 				svdm_consume_identity(port, p, cnt);
2154 				/* Attempt Vconn swap, delay SOP' discovery if necessary */
2155 				if (tcpm_attempt_vconn_swap_discovery(port)) {
2156 					port->send_discover_prime = true;
2157 					port->upcoming_state = VCONN_SWAP_SEND;
2158 					ret = tcpm_ams_start(port, VCONN_SWAP);
2159 					if (!ret)
2160 						return 0;
2161 					/* Cannot perform Vconn swap */
2162 					port->upcoming_state = INVALID_STATE;
2163 					port->send_discover_prime = false;
2164 				}
2165 
2166 				/*
2167 				 * Attempt Discover Identity on SOP' if the
2168 				 * cable was not discovered previously, and use
2169 				 * the SVDM version of the partner to probe.
2170 				 */
2171 				if (IS_ERR_OR_NULL(port->cable) &&
2172 				    tcpm_can_communicate_sop_prime(port)) {
2173 					*response_tx_sop_type = TCPC_TX_SOP_PRIME;
2174 					port->send_discover_prime = true;
2175 					response[0] = VDO(USB_SID_PD, 1,
2176 							  typec_get_negotiated_svdm_version(typec),
2177 							  CMD_DISCOVER_IDENT);
2178 					rlen = 1;
2179 				} else {
2180 					*response_tx_sop_type = TCPC_TX_SOP;
2181 					response[0] = VDO(USB_SID_PD, 1,
2182 							  typec_get_negotiated_svdm_version(typec),
2183 							  CMD_DISCOVER_SVID);
2184 					rlen = 1;
2185 				}
2186 				break;
2187 			case TCPC_TX_SOP_PRIME:
2188 				/*
2189 				 * svdm_consume_identity_sop_prime will determine
2190 				 * the svdm_version for the cable moving forward.
2191 				 */
2192 				svdm_consume_identity_sop_prime(port, p, cnt);
2193 
2194 				/*
2195 				 * If received in SRC_VDM_IDENTITY_REQUEST, continue
2196 				 * to SRC_SEND_CAPABILITIES
2197 				 */
2198 				if (port->state == SRC_VDM_IDENTITY_REQUEST) {
2199 					tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
2200 					return 0;
2201 				}
2202 
2203 				*response_tx_sop_type = TCPC_TX_SOP;
2204 				response[0] = VDO(USB_SID_PD, 1,
2205 						  typec_get_negotiated_svdm_version(typec),
2206 						  CMD_DISCOVER_SVID);
2207 				rlen = 1;
2208 				break;
2209 			default:
2210 				return 0;
2211 			}
2212 			break;
2213 		case CMD_DISCOVER_SVID:
2214 			*response_tx_sop_type = rx_sop_type;
2215 			/* 6.4.4.3.2 */
2216 			if (svdm_consume_svids(port, p, cnt, rx_sop_type)) {
2217 				response[0] = VDO(USB_SID_PD, 1, svdm_version, CMD_DISCOVER_SVID);
2218 				rlen = 1;
2219 			} else {
2220 				if (rx_sop_type == TCPC_TX_SOP) {
2221 					if (modep->nsvids && supports_modal(port)) {
2222 						response[0] = VDO(modep->svids[0], 1, svdm_version,
2223 								CMD_DISCOVER_MODES);
2224 						rlen = 1;
2225 					}
2226 				} else if (rx_sop_type == TCPC_TX_SOP_PRIME) {
2227 					if (modep_prime->nsvids) {
2228 						response[0] = VDO(modep_prime->svids[0], 1,
2229 								  svdm_version, CMD_DISCOVER_MODES);
2230 						rlen = 1;
2231 					}
2232 				}
2233 			}
2234 			break;
2235 		case CMD_DISCOVER_MODES:
2236 			if (rx_sop_type == TCPC_TX_SOP) {
2237 				/* 6.4.4.3.3 */
2238 				svdm_consume_modes(port, p, cnt, rx_sop_type);
2239 				modep->svid_index++;
2240 				if (modep->svid_index < modep->nsvids) {
2241 					u16 svid = modep->svids[modep->svid_index];
2242 					*response_tx_sop_type = TCPC_TX_SOP;
2243 					response[0] = VDO(svid, 1, svdm_version,
2244 							  CMD_DISCOVER_MODES);
2245 					rlen = 1;
2246 				} else if (tcpm_cable_vdm_supported(port)) {
2247 					*response_tx_sop_type = TCPC_TX_SOP_PRIME;
2248 					response[0] = VDO(USB_SID_PD, 1,
2249 							  typec_get_cable_svdm_version(typec),
2250 							  CMD_DISCOVER_SVID);
2251 					rlen = 1;
2252 				} else {
2253 					tcpm_register_partner_altmodes(port);
2254 				}
2255 			} else if (rx_sop_type == TCPC_TX_SOP_PRIME) {
2256 				/* 6.4.4.3.3 */
2257 				svdm_consume_modes(port, p, cnt, rx_sop_type);
2258 				modep_prime->svid_index++;
2259 				if (modep_prime->svid_index < modep_prime->nsvids) {
2260 					u16 svid = modep_prime->svids[modep_prime->svid_index];
2261 					*response_tx_sop_type = TCPC_TX_SOP_PRIME;
2262 					response[0] = VDO(svid, 1,
2263 							  typec_get_cable_svdm_version(typec),
2264 							  CMD_DISCOVER_MODES);
2265 					rlen = 1;
2266 				} else {
2267 					tcpm_register_plug_altmodes(port);
2268 					tcpm_register_partner_altmodes(port);
2269 				}
2270 			}
2271 			break;
2272 		case CMD_ENTER_MODE:
2273 			*response_tx_sop_type = rx_sop_type;
2274 			if (rx_sop_type == TCPC_TX_SOP) {
2275 				if (adev && pdev) {
2276 					typec_altmode_update_active(pdev, true);
2277 					*adev_action = ADEV_QUEUE_VDM_SEND_EXIT_MODE_ON_FAIL;
2278 				}
2279 			} else if (rx_sop_type == TCPC_TX_SOP_PRIME) {
2280 				if (adev && pdev_prime) {
2281 					typec_altmode_update_active(pdev_prime, true);
2282 					*adev_action = ADEV_QUEUE_VDM_SEND_EXIT_MODE_ON_FAIL;
2283 				}
2284 			}
2285 			return 0;
2286 		case CMD_EXIT_MODE:
2287 			*response_tx_sop_type = rx_sop_type;
2288 			if (rx_sop_type == TCPC_TX_SOP) {
2289 				if (adev && pdev) {
2290 					typec_altmode_update_active(pdev, false);
2291 					/* Back to USB Operation */
2292 					*adev_action = ADEV_NOTIFY_USB_AND_QUEUE_VDM;
2293 					return 0;
2294 				}
2295 			}
2296 			break;
2297 		case VDO_CMD_VENDOR(0) ... VDO_CMD_VENDOR(15):
2298 			break;
2299 		default:
2300 			/* Unrecognized SVDM */
2301 			response[0] = p[0] | VDO_CMDT(CMDT_RSP_NAK);
2302 			rlen = 1;
2303 			response[0] = (response[0] & ~VDO_SVDM_VERS_MASK) |
2304 				      (VDO_SVDM_VERS(svdm_version));
2305 			break;
2306 		}
2307 		break;
2308 	case CMDT_RSP_NAK:
2309 		tcpm_ams_finish(port);
2310 		switch (cmd) {
2311 		case CMD_DISCOVER_IDENT:
2312 		case CMD_DISCOVER_SVID:
2313 		case CMD_DISCOVER_MODES:
2314 		case VDO_CMD_VENDOR(0) ... VDO_CMD_VENDOR(15):
2315 			break;
2316 		case CMD_ENTER_MODE:
2317 			/* Back to USB Operation */
2318 			*adev_action = ADEV_NOTIFY_USB_AND_QUEUE_VDM;
2319 			return 0;
2320 		default:
2321 			/* Unrecognized SVDM */
2322 			response[0] = p[0] | VDO_CMDT(CMDT_RSP_NAK);
2323 			rlen = 1;
2324 			response[0] = (response[0] & ~VDO_SVDM_VERS_MASK) |
2325 				      (VDO_SVDM_VERS(svdm_version));
2326 			break;
2327 		}
2328 		break;
2329 	default:
2330 		response[0] = p[0] | VDO_CMDT(CMDT_RSP_NAK);
2331 		rlen = 1;
2332 		response[0] = (response[0] & ~VDO_SVDM_VERS_MASK) |
2333 			      (VDO_SVDM_VERS(svdm_version));
2334 		break;
2335 	}
2336 
2337 	/* Informing the alternate mode drivers about everything */
2338 	*adev_action = ADEV_QUEUE_VDM;
2339 	return rlen;
2340 }
2341 
2342 static void tcpm_pd_handle_msg(struct tcpm_port *port,
2343 			       enum pd_msg_request message,
2344 			       enum tcpm_ams ams);
2345 
tcpm_handle_vdm_request(struct tcpm_port * port,const __le32 * payload,int cnt,enum tcpm_transmit_type rx_sop_type)2346 static void tcpm_handle_vdm_request(struct tcpm_port *port,
2347 				    const __le32 *payload, int cnt,
2348 				    enum tcpm_transmit_type rx_sop_type)
2349 {
2350 	enum adev_actions adev_action = ADEV_NONE;
2351 	struct typec_altmode *adev;
2352 	u32 p[PD_MAX_PAYLOAD];
2353 	u32 response[8] = { };
2354 	int i, rlen = 0;
2355 	enum tcpm_transmit_type response_tx_sop_type = TCPC_TX_SOP;
2356 
2357 	for (i = 0; i < cnt; i++)
2358 		p[i] = le32_to_cpu(payload[i]);
2359 
2360 	adev = typec_match_altmode(port->port_altmode, ALTMODE_DISCOVERY_MAX,
2361 				   PD_VDO_VID(p[0]), PD_VDO_OPOS(p[0]));
2362 
2363 	if (port->vdm_state == VDM_STATE_BUSY) {
2364 		/* If UFP responded busy retry after timeout */
2365 		if (PD_VDO_CMDT(p[0]) == CMDT_RSP_BUSY) {
2366 			port->vdm_state = VDM_STATE_WAIT_RSP_BUSY;
2367 			port->vdo_retry = (p[0] & ~VDO_CMDT_MASK) |
2368 				CMDT_INIT;
2369 			mod_vdm_delayed_work(port, PD_T_VDM_BUSY);
2370 			return;
2371 		}
2372 		port->vdm_state = VDM_STATE_DONE;
2373 	}
2374 
2375 	if (PD_VDO_SVDM(p[0]) && (adev || tcpm_vdm_ams(port) || port->nr_snk_vdo)) {
2376 		/*
2377 		 * Here a SVDM is received (INIT or RSP or unknown). Set the vdm_sm_running in
2378 		 * advance because we are dropping the lock but may send VDMs soon.
2379 		 * For the cases of INIT received:
2380 		 *  - If no response to send, it will be cleared later in this function.
2381 		 *  - If there are responses to send, it will be cleared in the state machine.
2382 		 * For the cases of RSP received:
2383 		 *  - If no further INIT to send, it will be cleared later in this function.
2384 		 *  - Otherwise, it will be cleared in the state machine if timeout or it will go
2385 		 *    back here until no further INIT to send.
2386 		 * For the cases of unknown type received:
2387 		 *  - We will send NAK and the flag will be cleared in the state machine.
2388 		 */
2389 		port->vdm_sm_running = true;
2390 		rlen = tcpm_pd_svdm(port, adev, p, cnt, response, &adev_action,
2391 				    rx_sop_type, &response_tx_sop_type);
2392 	} else {
2393 		if (port->negotiated_rev >= PD_REV30)
2394 			tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
2395 	}
2396 
2397 	/*
2398 	 * We are done with any state stored in the port struct now, except
2399 	 * for any port struct changes done by the tcpm_queue_vdm() call
2400 	 * below, which is a separate operation.
2401 	 *
2402 	 * So we can safely release the lock here; and we MUST release the
2403 	 * lock here to avoid an AB BA lock inversion:
2404 	 *
2405 	 * If we keep the lock here then the lock ordering in this path is:
2406 	 * 1. tcpm_pd_rx_handler take the tcpm port lock
2407 	 * 2. One of the typec_altmode_* calls below takes the alt-mode's lock
2408 	 *
2409 	 * And we also have this ordering:
2410 	 * 1. alt-mode driver takes the alt-mode's lock
2411 	 * 2. alt-mode driver calls tcpm_altmode_enter which takes the
2412 	 *    tcpm port lock
2413 	 *
2414 	 * Dropping our lock here avoids this.
2415 	 */
2416 	mutex_unlock(&port->lock);
2417 
2418 	if (adev) {
2419 		switch (adev_action) {
2420 		case ADEV_NONE:
2421 			break;
2422 		case ADEV_NOTIFY_USB_AND_QUEUE_VDM:
2423 			if (rx_sop_type == TCPC_TX_SOP_PRIME) {
2424 				typec_cable_altmode_vdm(adev, TYPEC_PLUG_SOP_P, p[0], &p[1], cnt);
2425 			} else {
2426 				WARN_ON(typec_altmode_notify(adev, TYPEC_STATE_USB, NULL));
2427 				typec_altmode_vdm(adev, p[0], &p[1], cnt);
2428 			}
2429 			break;
2430 		case ADEV_QUEUE_VDM:
2431 			if (rx_sop_type == TCPC_TX_SOP_PRIME)
2432 				typec_cable_altmode_vdm(adev, TYPEC_PLUG_SOP_P, p[0], &p[1], cnt);
2433 			else
2434 				typec_altmode_vdm(adev, p[0], &p[1], cnt);
2435 			break;
2436 		case ADEV_QUEUE_VDM_SEND_EXIT_MODE_ON_FAIL:
2437 			if (rx_sop_type == TCPC_TX_SOP_PRIME) {
2438 				if (typec_cable_altmode_vdm(adev, TYPEC_PLUG_SOP_P,
2439 							    p[0], &p[1], cnt)) {
2440 					int svdm_version = typec_get_cable_svdm_version(
2441 										port->typec_port);
2442 					if (svdm_version < 0)
2443 						break;
2444 
2445 					response[0] = VDO(adev->svid, 1, svdm_version,
2446 							CMD_EXIT_MODE);
2447 					response[0] |= VDO_OPOS(adev->mode);
2448 					rlen = 1;
2449 				}
2450 			} else {
2451 				if (typec_altmode_vdm(adev, p[0], &p[1], cnt)) {
2452 					int svdm_version = typec_get_negotiated_svdm_version(
2453 										port->typec_port);
2454 					if (svdm_version < 0)
2455 						break;
2456 
2457 					response[0] = VDO(adev->svid, 1, svdm_version,
2458 							CMD_EXIT_MODE);
2459 					response[0] |= VDO_OPOS(adev->mode);
2460 					rlen = 1;
2461 				}
2462 			}
2463 			break;
2464 		case ADEV_ATTENTION:
2465 			if (typec_altmode_attention(adev, p[1]))
2466 				tcpm_log(port, "typec_altmode_attention no port partner altmode");
2467 			break;
2468 		}
2469 	}
2470 
2471 	/*
2472 	 * We must re-take the lock here to balance the unlock in
2473 	 * tcpm_pd_rx_handler, note that no changes, other then the
2474 	 * tcpm_queue_vdm call, are made while the lock is held again.
2475 	 * All that is done after the call is unwinding the call stack until
2476 	 * we return to tcpm_pd_rx_handler and do the unlock there.
2477 	 */
2478 	mutex_lock(&port->lock);
2479 
2480 	if (rlen > 0)
2481 		tcpm_queue_vdm(port, response[0], &response[1], rlen - 1, response_tx_sop_type);
2482 	else
2483 		port->vdm_sm_running = false;
2484 }
2485 
tcpm_send_vdm(struct tcpm_port * port,u32 vid,int cmd,const u32 * data,int count,enum tcpm_transmit_type tx_sop_type)2486 static void tcpm_send_vdm(struct tcpm_port *port, u32 vid, int cmd,
2487 			  const u32 *data, int count, enum tcpm_transmit_type tx_sop_type)
2488 {
2489 	int svdm_version;
2490 	u32 header;
2491 
2492 	switch (tx_sop_type) {
2493 	case TCPC_TX_SOP_PRIME:
2494 		/*
2495 		 * If the port partner is discovered, then the port partner's
2496 		 * SVDM Version will be returned
2497 		 */
2498 		svdm_version = typec_get_cable_svdm_version(port->typec_port);
2499 		if (svdm_version < 0)
2500 			svdm_version = SVDM_VER_MAX;
2501 		break;
2502 	case TCPC_TX_SOP:
2503 		svdm_version = typec_get_negotiated_svdm_version(port->typec_port);
2504 		if (svdm_version < 0)
2505 			return;
2506 		break;
2507 	default:
2508 		svdm_version = typec_get_negotiated_svdm_version(port->typec_port);
2509 		if (svdm_version < 0)
2510 			return;
2511 		break;
2512 	}
2513 
2514 	if (WARN_ON(count > VDO_MAX_SIZE - 1))
2515 		count = VDO_MAX_SIZE - 1;
2516 
2517 	/* set VDM header with VID & CMD */
2518 	header = VDO(vid, ((vid & USB_SID_PD) == USB_SID_PD) ?
2519 			1 : (PD_VDO_CMD(cmd) <= CMD_ATTENTION),
2520 			svdm_version, cmd);
2521 	tcpm_queue_vdm(port, header, data, count, tx_sop_type);
2522 }
2523 
vdm_ready_timeout(u32 vdm_hdr)2524 static unsigned int vdm_ready_timeout(u32 vdm_hdr)
2525 {
2526 	unsigned int timeout;
2527 	int cmd = PD_VDO_CMD(vdm_hdr);
2528 
2529 	/* its not a structured VDM command */
2530 	if (!PD_VDO_SVDM(vdm_hdr))
2531 		return PD_T_VDM_UNSTRUCTURED;
2532 
2533 	switch (PD_VDO_CMDT(vdm_hdr)) {
2534 	case CMDT_INIT:
2535 		if (cmd == CMD_ENTER_MODE || cmd == CMD_EXIT_MODE)
2536 			timeout = PD_T_VDM_WAIT_MODE_E;
2537 		else
2538 			timeout = PD_T_VDM_SNDR_RSP;
2539 		break;
2540 	default:
2541 		if (cmd == CMD_ENTER_MODE || cmd == CMD_EXIT_MODE)
2542 			timeout = PD_T_VDM_E_MODE;
2543 		else
2544 			timeout = PD_T_VDM_RCVR_RSP;
2545 		break;
2546 	}
2547 	return timeout;
2548 }
2549 
vdm_run_state_machine(struct tcpm_port * port)2550 static void vdm_run_state_machine(struct tcpm_port *port)
2551 {
2552 	struct pd_message msg;
2553 	int i, res = 0;
2554 	u32 vdo_hdr = port->vdo_data[0];
2555 	u32 response[8] = { };
2556 
2557 	switch (port->vdm_state) {
2558 	case VDM_STATE_READY:
2559 		/* Only transmit VDM if attached */
2560 		if (!port->attached) {
2561 			port->vdm_state = VDM_STATE_ERR_BUSY;
2562 			break;
2563 		}
2564 
2565 		/*
2566 		 * if there's traffic or we're not in PDO ready state don't send
2567 		 * a VDM.
2568 		 */
2569 		if (port->state != SRC_READY && port->state != SNK_READY &&
2570 		    port->state != SRC_VDM_IDENTITY_REQUEST) {
2571 			port->vdm_sm_running = false;
2572 			break;
2573 		}
2574 
2575 		/* TODO: AMS operation for Unstructured VDM */
2576 		if (PD_VDO_SVDM(vdo_hdr) && PD_VDO_CMDT(vdo_hdr) == CMDT_INIT) {
2577 			switch (PD_VDO_CMD(vdo_hdr)) {
2578 			case CMD_DISCOVER_IDENT:
2579 				res = tcpm_ams_start(port, DISCOVER_IDENTITY);
2580 				if (res == 0) {
2581 					switch (port->tx_sop_type) {
2582 					case TCPC_TX_SOP_PRIME:
2583 						port->send_discover_prime = false;
2584 						break;
2585 					case TCPC_TX_SOP:
2586 						port->send_discover = false;
2587 						break;
2588 					default:
2589 						port->send_discover = false;
2590 						break;
2591 					}
2592 				} else if (res == -EAGAIN) {
2593 					port->vdo_data[0] = 0;
2594 					mod_send_discover_delayed_work(port,
2595 								       SEND_DISCOVER_RETRY_MS);
2596 				}
2597 				break;
2598 			case CMD_DISCOVER_SVID:
2599 				res = tcpm_ams_start(port, DISCOVER_SVIDS);
2600 				break;
2601 			case CMD_DISCOVER_MODES:
2602 				res = tcpm_ams_start(port, DISCOVER_MODES);
2603 				break;
2604 			case CMD_ENTER_MODE:
2605 				res = tcpm_ams_start(port, DFP_TO_UFP_ENTER_MODE);
2606 				break;
2607 			case CMD_EXIT_MODE:
2608 				res = tcpm_ams_start(port, DFP_TO_UFP_EXIT_MODE);
2609 				break;
2610 			case CMD_ATTENTION:
2611 				res = tcpm_ams_start(port, ATTENTION);
2612 				break;
2613 			case VDO_CMD_VENDOR(0) ... VDO_CMD_VENDOR(15):
2614 				res = tcpm_ams_start(port, STRUCTURED_VDMS);
2615 				break;
2616 			default:
2617 				res = -EOPNOTSUPP;
2618 				break;
2619 			}
2620 
2621 			if (res < 0) {
2622 				port->vdm_state = VDM_STATE_ERR_BUSY;
2623 				return;
2624 			}
2625 		}
2626 
2627 		port->vdm_state = VDM_STATE_SEND_MESSAGE;
2628 		mod_vdm_delayed_work(port, (port->negotiated_rev >= PD_REV30 &&
2629 					    port->pwr_role == TYPEC_SOURCE &&
2630 					    PD_VDO_SVDM(vdo_hdr) &&
2631 					    PD_VDO_CMDT(vdo_hdr) == CMDT_INIT) ?
2632 					   PD_T_SINK_TX : 0);
2633 		break;
2634 	case VDM_STATE_WAIT_RSP_BUSY:
2635 		port->vdo_data[0] = port->vdo_retry;
2636 		port->vdo_count = 1;
2637 		port->vdm_state = VDM_STATE_READY;
2638 		tcpm_ams_finish(port);
2639 		break;
2640 	case VDM_STATE_BUSY:
2641 		port->vdm_state = VDM_STATE_ERR_TMOUT;
2642 		if (port->ams != NONE_AMS)
2643 			tcpm_ams_finish(port);
2644 		break;
2645 	case VDM_STATE_ERR_SEND:
2646 		/*
2647 		 * When sending Discover Identity to SOP' before establishing an
2648 		 * explicit contract, do not retry. Instead, weave sending
2649 		 * Source_Capabilities over SOP and Discover Identity over SOP'.
2650 		 */
2651 		if (port->state == SRC_VDM_IDENTITY_REQUEST) {
2652 			tcpm_ams_finish(port);
2653 			port->vdm_state = VDM_STATE_DONE;
2654 			tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
2655 		/*
2656 		 * A partner which does not support USB PD will not reply,
2657 		 * so this is not a fatal error. At the same time, some
2658 		 * devices may not return GoodCRC under some circumstances,
2659 		 * so we need to retry.
2660 		 */
2661 		} else if (port->vdm_retries < 3) {
2662 			tcpm_log(port, "VDM Tx error, retry");
2663 			port->vdm_retries++;
2664 			port->vdm_state = VDM_STATE_READY;
2665 			if (PD_VDO_SVDM(vdo_hdr) && PD_VDO_CMDT(vdo_hdr) == CMDT_INIT)
2666 				tcpm_ams_finish(port);
2667 		} else {
2668 			tcpm_ams_finish(port);
2669 			if (port->tx_sop_type == TCPC_TX_SOP)
2670 				break;
2671 			/* Handle SOP' Transmission Errors */
2672 			switch (PD_VDO_CMD(vdo_hdr)) {
2673 			/*
2674 			 * If Discover Identity fails on SOP', then resume
2675 			 * discovery process on SOP only.
2676 			 */
2677 			case CMD_DISCOVER_IDENT:
2678 				port->vdo_data[0] = 0;
2679 				response[0] = VDO(USB_SID_PD, 1,
2680 						  typec_get_negotiated_svdm_version(
2681 									port->typec_port),
2682 						  CMD_DISCOVER_SVID);
2683 				tcpm_queue_vdm(port, response[0], &response[1],
2684 					       0, TCPC_TX_SOP);
2685 				break;
2686 			/*
2687 			 * If Discover SVIDs or Discover Modes fail, then
2688 			 * proceed with Alt Mode discovery process on SOP.
2689 			 */
2690 			case CMD_DISCOVER_SVID:
2691 				tcpm_register_partner_altmodes(port);
2692 				break;
2693 			case CMD_DISCOVER_MODES:
2694 				tcpm_register_partner_altmodes(port);
2695 				break;
2696 			default:
2697 				break;
2698 			}
2699 		}
2700 		break;
2701 	case VDM_STATE_SEND_MESSAGE:
2702 		/* Prepare and send VDM */
2703 		memset(&msg, 0, sizeof(msg));
2704 		if (port->tx_sop_type == TCPC_TX_SOP_PRIME) {
2705 			msg.header = PD_HEADER_LE(PD_DATA_VENDOR_DEF,
2706 						  0,	/* Cable Plug Indicator for DFP/UFP */
2707 						  0,	/* Reserved */
2708 						  port->negotiated_rev_prime,
2709 						  port->message_id_prime,
2710 						  port->vdo_count);
2711 		} else {
2712 			msg.header = PD_HEADER_LE(PD_DATA_VENDOR_DEF,
2713 						  port->pwr_role,
2714 						  port->data_role,
2715 						  port->negotiated_rev,
2716 						  port->message_id,
2717 						  port->vdo_count);
2718 		}
2719 		for (i = 0; i < port->vdo_count; i++)
2720 			msg.payload[i] = cpu_to_le32(port->vdo_data[i]);
2721 		res = tcpm_pd_transmit(port, port->tx_sop_type, &msg);
2722 		if (res < 0) {
2723 			port->vdm_state = VDM_STATE_ERR_SEND;
2724 		} else {
2725 			unsigned long timeout;
2726 
2727 			port->vdm_retries = 0;
2728 			port->vdo_data[0] = 0;
2729 			port->vdm_state = VDM_STATE_BUSY;
2730 			timeout = vdm_ready_timeout(vdo_hdr);
2731 			mod_vdm_delayed_work(port, timeout);
2732 		}
2733 		break;
2734 	default:
2735 		break;
2736 	}
2737 }
2738 
vdm_state_machine_work(struct kthread_work * work)2739 static void vdm_state_machine_work(struct kthread_work *work)
2740 {
2741 	struct tcpm_port *port = container_of(work, struct tcpm_port, vdm_state_machine);
2742 	enum vdm_states prev_state;
2743 
2744 	mutex_lock(&port->lock);
2745 
2746 	/*
2747 	 * Continue running as long as the port is not busy and there was
2748 	 * a state change.
2749 	 */
2750 	do {
2751 		prev_state = port->vdm_state;
2752 		vdm_run_state_machine(port);
2753 	} while (port->vdm_state != prev_state &&
2754 		 port->vdm_state != VDM_STATE_BUSY &&
2755 		 port->vdm_state != VDM_STATE_SEND_MESSAGE);
2756 
2757 	if (port->vdm_state < VDM_STATE_READY)
2758 		port->vdm_sm_running = false;
2759 
2760 	mutex_unlock(&port->lock);
2761 }
2762 
2763 enum pdo_err {
2764 	PDO_NO_ERR,
2765 	PDO_ERR_NO_VSAFE5V,
2766 	PDO_ERR_VSAFE5V_NOT_FIRST,
2767 	PDO_ERR_PDO_TYPE_NOT_IN_ORDER,
2768 	PDO_ERR_FIXED_NOT_SORTED,
2769 	PDO_ERR_VARIABLE_BATT_NOT_SORTED,
2770 	PDO_ERR_DUPE_PDO,
2771 	PDO_ERR_PPS_APDO_NOT_SORTED,
2772 	PDO_ERR_DUPE_PPS_APDO,
2773 };
2774 
2775 static const char * const pdo_err_msg[] = {
2776 	[PDO_ERR_NO_VSAFE5V] =
2777 	" err: source/sink caps should at least have vSafe5V",
2778 	[PDO_ERR_VSAFE5V_NOT_FIRST] =
2779 	" err: vSafe5V Fixed Supply Object Shall always be the first object",
2780 	[PDO_ERR_PDO_TYPE_NOT_IN_ORDER] =
2781 	" err: PDOs should be in the following order: Fixed; Battery; Variable",
2782 	[PDO_ERR_FIXED_NOT_SORTED] =
2783 	" err: Fixed supply pdos should be in increasing order of their fixed voltage",
2784 	[PDO_ERR_VARIABLE_BATT_NOT_SORTED] =
2785 	" err: Variable/Battery supply pdos should be in increasing order of their minimum voltage",
2786 	[PDO_ERR_DUPE_PDO] =
2787 	" err: Variable/Batt supply pdos cannot have same min/max voltage",
2788 	[PDO_ERR_PPS_APDO_NOT_SORTED] =
2789 	" err: Programmable power supply apdos should be in increasing order of their maximum voltage",
2790 	[PDO_ERR_DUPE_PPS_APDO] =
2791 	" err: Programmable power supply apdos cannot have same min/max voltage and max current",
2792 };
2793 
tcpm_caps_err(struct tcpm_port * port,const u32 * pdo,unsigned int nr_pdo)2794 static enum pdo_err tcpm_caps_err(struct tcpm_port *port, const u32 *pdo,
2795 				  unsigned int nr_pdo)
2796 {
2797 	unsigned int i;
2798 
2799 	/* Should at least contain vSafe5v */
2800 	if (nr_pdo < 1)
2801 		return PDO_ERR_NO_VSAFE5V;
2802 
2803 	/* The vSafe5V Fixed Supply Object Shall always be the first object */
2804 	if (pdo_type(pdo[0]) != PDO_TYPE_FIXED ||
2805 	    pdo_fixed_voltage(pdo[0]) != VSAFE5V)
2806 		return PDO_ERR_VSAFE5V_NOT_FIRST;
2807 
2808 	for (i = 1; i < nr_pdo; i++) {
2809 		if (pdo_type(pdo[i]) < pdo_type(pdo[i - 1])) {
2810 			return PDO_ERR_PDO_TYPE_NOT_IN_ORDER;
2811 		} else if (pdo_type(pdo[i]) == pdo_type(pdo[i - 1])) {
2812 			enum pd_pdo_type type = pdo_type(pdo[i]);
2813 
2814 			switch (type) {
2815 			/*
2816 			 * The remaining Fixed Supply Objects, if
2817 			 * present, shall be sent in voltage order;
2818 			 * lowest to highest.
2819 			 */
2820 			case PDO_TYPE_FIXED:
2821 				if (pdo_fixed_voltage(pdo[i]) <=
2822 				    pdo_fixed_voltage(pdo[i - 1]))
2823 					return PDO_ERR_FIXED_NOT_SORTED;
2824 				break;
2825 			/*
2826 			 * The Battery Supply Objects and Variable
2827 			 * supply, if present shall be sent in Minimum
2828 			 * Voltage order; lowest to highest.
2829 			 */
2830 			case PDO_TYPE_VAR:
2831 			case PDO_TYPE_BATT:
2832 				if (pdo_min_voltage(pdo[i]) <
2833 				    pdo_min_voltage(pdo[i - 1]))
2834 					return PDO_ERR_VARIABLE_BATT_NOT_SORTED;
2835 				else if ((pdo_min_voltage(pdo[i]) ==
2836 					  pdo_min_voltage(pdo[i - 1])) &&
2837 					 (pdo_max_voltage(pdo[i]) ==
2838 					  pdo_max_voltage(pdo[i - 1])))
2839 					return PDO_ERR_DUPE_PDO;
2840 				break;
2841 			/*
2842 			 * The Programmable Power Supply APDOs, if present,
2843 			 * shall be sent in Maximum Voltage order;
2844 			 * lowest to highest.
2845 			 */
2846 			case PDO_TYPE_APDO:
2847 				if (pdo_apdo_type(pdo[i]) != APDO_TYPE_PPS)
2848 					break;
2849 
2850 				if (pdo_pps_apdo_max_voltage(pdo[i]) <
2851 				    pdo_pps_apdo_max_voltage(pdo[i - 1]))
2852 					return PDO_ERR_PPS_APDO_NOT_SORTED;
2853 				else if (pdo_pps_apdo_min_voltage(pdo[i]) ==
2854 					  pdo_pps_apdo_min_voltage(pdo[i - 1]) &&
2855 					 pdo_pps_apdo_max_voltage(pdo[i]) ==
2856 					  pdo_pps_apdo_max_voltage(pdo[i - 1]) &&
2857 					 pdo_pps_apdo_max_current(pdo[i]) ==
2858 					  pdo_pps_apdo_max_current(pdo[i - 1]))
2859 					return PDO_ERR_DUPE_PPS_APDO;
2860 				break;
2861 			default:
2862 				tcpm_log_force(port, " Unknown pdo type");
2863 			}
2864 		}
2865 	}
2866 
2867 	return PDO_NO_ERR;
2868 }
2869 
tcpm_validate_caps(struct tcpm_port * port,const u32 * pdo,unsigned int nr_pdo)2870 static int tcpm_validate_caps(struct tcpm_port *port, const u32 *pdo,
2871 			      unsigned int nr_pdo)
2872 {
2873 	enum pdo_err err_index = tcpm_caps_err(port, pdo, nr_pdo);
2874 
2875 	if (err_index != PDO_NO_ERR) {
2876 		tcpm_log_force(port, " %s", pdo_err_msg[err_index]);
2877 		return -EINVAL;
2878 	}
2879 
2880 	return 0;
2881 }
2882 
tcpm_altmode_enter(struct typec_altmode * altmode,u32 * vdo)2883 static int tcpm_altmode_enter(struct typec_altmode *altmode, u32 *vdo)
2884 {
2885 	struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
2886 	int svdm_version;
2887 	u32 header;
2888 
2889 	svdm_version = typec_get_negotiated_svdm_version(port->typec_port);
2890 	if (svdm_version < 0)
2891 		return svdm_version;
2892 
2893 	header = VDO(altmode->svid, vdo ? 2 : 1, svdm_version, CMD_ENTER_MODE);
2894 	header |= VDO_OPOS(altmode->mode);
2895 
2896 	return tcpm_queue_vdm_unlocked(port, header, vdo, vdo ? 1 : 0, TCPC_TX_SOP);
2897 }
2898 
tcpm_altmode_exit(struct typec_altmode * altmode)2899 static int tcpm_altmode_exit(struct typec_altmode *altmode)
2900 {
2901 	struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
2902 	int svdm_version;
2903 	u32 header;
2904 
2905 	svdm_version = typec_get_negotiated_svdm_version(port->typec_port);
2906 	if (svdm_version < 0)
2907 		return svdm_version;
2908 
2909 	header = VDO(altmode->svid, 1, svdm_version, CMD_EXIT_MODE);
2910 	header |= VDO_OPOS(altmode->mode);
2911 
2912 	return tcpm_queue_vdm_unlocked(port, header, NULL, 0, TCPC_TX_SOP);
2913 }
2914 
tcpm_altmode_vdm(struct typec_altmode * altmode,u32 header,const u32 * data,int count)2915 static int tcpm_altmode_vdm(struct typec_altmode *altmode,
2916 			    u32 header, const u32 *data, int count)
2917 {
2918 	struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
2919 
2920 	return tcpm_queue_vdm_unlocked(port, header, data, count - 1, TCPC_TX_SOP);
2921 }
2922 
2923 static const struct typec_altmode_ops tcpm_altmode_ops = {
2924 	.enter = tcpm_altmode_enter,
2925 	.exit = tcpm_altmode_exit,
2926 	.vdm = tcpm_altmode_vdm,
2927 };
2928 
2929 
tcpm_cable_altmode_enter(struct typec_altmode * altmode,enum typec_plug_index sop,u32 * vdo)2930 static int tcpm_cable_altmode_enter(struct typec_altmode *altmode, enum typec_plug_index sop,
2931 				    u32 *vdo)
2932 {
2933 	struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
2934 	int svdm_version;
2935 	u32 header;
2936 
2937 	svdm_version = typec_get_cable_svdm_version(port->typec_port);
2938 	if (svdm_version < 0)
2939 		return svdm_version;
2940 
2941 	header = VDO(altmode->svid, vdo ? 2 : 1, svdm_version, CMD_ENTER_MODE);
2942 	header |= VDO_OPOS(altmode->mode);
2943 
2944 	return tcpm_queue_vdm_unlocked(port, header, vdo, vdo ? 1 : 0, TCPC_TX_SOP_PRIME);
2945 }
2946 
tcpm_cable_altmode_exit(struct typec_altmode * altmode,enum typec_plug_index sop)2947 static int tcpm_cable_altmode_exit(struct typec_altmode *altmode, enum typec_plug_index sop)
2948 {
2949 	struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
2950 	int svdm_version;
2951 	u32 header;
2952 
2953 	svdm_version = typec_get_cable_svdm_version(port->typec_port);
2954 	if (svdm_version < 0)
2955 		return svdm_version;
2956 
2957 	header = VDO(altmode->svid, 1, svdm_version, CMD_EXIT_MODE);
2958 	header |= VDO_OPOS(altmode->mode);
2959 
2960 	return tcpm_queue_vdm_unlocked(port, header, NULL, 0, TCPC_TX_SOP_PRIME);
2961 }
2962 
tcpm_cable_altmode_vdm(struct typec_altmode * altmode,enum typec_plug_index sop,u32 header,const u32 * data,int count)2963 static int tcpm_cable_altmode_vdm(struct typec_altmode *altmode, enum typec_plug_index sop,
2964 				  u32 header, const u32 *data, int count)
2965 {
2966 	struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
2967 
2968 	return tcpm_queue_vdm_unlocked(port, header, data, count - 1, TCPC_TX_SOP_PRIME);
2969 }
2970 
2971 static const struct typec_cable_ops tcpm_cable_ops = {
2972 	.enter = tcpm_cable_altmode_enter,
2973 	.exit = tcpm_cable_altmode_exit,
2974 	.vdm = tcpm_cable_altmode_vdm,
2975 };
2976 
2977 /*
2978  * PD (data, control) command handling functions
2979  */
ready_state(struct tcpm_port * port)2980 static inline enum tcpm_state ready_state(struct tcpm_port *port)
2981 {
2982 	if (port->pwr_role == TYPEC_SOURCE)
2983 		return SRC_READY;
2984 	else
2985 		return SNK_READY;
2986 }
2987 
2988 static int tcpm_pd_send_control(struct tcpm_port *port,
2989 				enum pd_ctrl_msg_type type,
2990 				enum tcpm_transmit_type tx_sop_type);
2991 
tcpm_handle_alert(struct tcpm_port * port,const __le32 * payload,int cnt)2992 static void tcpm_handle_alert(struct tcpm_port *port, const __le32 *payload,
2993 			      int cnt)
2994 {
2995 	u32 p0 = le32_to_cpu(payload[0]);
2996 	unsigned int type = usb_pd_ado_type(p0);
2997 
2998 	if (!type) {
2999 		tcpm_log(port, "Alert message received with no type");
3000 		tcpm_queue_message(port, PD_MSG_CTRL_NOT_SUPP);
3001 		return;
3002 	}
3003 
3004 	/* Just handling non-battery alerts for now */
3005 	if (!(type & USB_PD_ADO_TYPE_BATT_STATUS_CHANGE)) {
3006 		if (port->pwr_role == TYPEC_SOURCE) {
3007 			port->upcoming_state = GET_STATUS_SEND;
3008 			tcpm_ams_start(port, GETTING_SOURCE_SINK_STATUS);
3009 		} else {
3010 			/*
3011 			 * Do not check SinkTxOk here in case the Source doesn't set its Rp to
3012 			 * SinkTxOk in time.
3013 			 */
3014 			port->ams = GETTING_SOURCE_SINK_STATUS;
3015 			tcpm_set_state(port, GET_STATUS_SEND, 0);
3016 		}
3017 	} else {
3018 		tcpm_queue_message(port, PD_MSG_CTRL_NOT_SUPP);
3019 	}
3020 }
3021 
tcpm_set_auto_vbus_discharge_threshold(struct tcpm_port * port,enum typec_pwr_opmode mode,bool pps_active,u32 requested_vbus_voltage)3022 static int tcpm_set_auto_vbus_discharge_threshold(struct tcpm_port *port,
3023 						  enum typec_pwr_opmode mode, bool pps_active,
3024 						  u32 requested_vbus_voltage)
3025 {
3026 	int ret;
3027 
3028 	if (!port->tcpc->set_auto_vbus_discharge_threshold)
3029 		return 0;
3030 
3031 	ret = port->tcpc->set_auto_vbus_discharge_threshold(port->tcpc, mode, pps_active,
3032 							    requested_vbus_voltage,
3033 							    port->pps_data.min_volt);
3034 	tcpm_log_force(port,
3035 		       "set_auto_vbus_discharge_threshold mode:%d pps_active:%c vbus:%u pps_apdo_min_volt:%u ret:%d",
3036 		       mode, pps_active ? 'y' : 'n', requested_vbus_voltage,
3037 		       port->pps_data.min_volt, ret);
3038 
3039 	return ret;
3040 }
3041 
tcpm_pd_handle_state(struct tcpm_port * port,enum tcpm_state state,enum tcpm_ams ams,unsigned int delay_ms)3042 static void tcpm_pd_handle_state(struct tcpm_port *port,
3043 				 enum tcpm_state state,
3044 				 enum tcpm_ams ams,
3045 				 unsigned int delay_ms)
3046 {
3047 	switch (port->state) {
3048 	case SRC_READY:
3049 	case SNK_READY:
3050 		port->ams = ams;
3051 		tcpm_set_state(port, state, delay_ms);
3052 		break;
3053 	/* 8.3.3.4.1.1 and 6.8.1 power transitioning */
3054 	case SNK_TRANSITION_SINK:
3055 	case SNK_TRANSITION_SINK_VBUS:
3056 	case SRC_TRANSITION_SUPPLY:
3057 		tcpm_set_state(port, HARD_RESET_SEND, 0);
3058 		break;
3059 	default:
3060 		if (!tcpm_ams_interruptible(port)) {
3061 			tcpm_set_state(port, port->pwr_role == TYPEC_SOURCE ?
3062 				       SRC_SOFT_RESET_WAIT_SNK_TX :
3063 				       SNK_SOFT_RESET,
3064 				       0);
3065 		} else {
3066 			/* process the Message 6.8.1 */
3067 			port->upcoming_state = state;
3068 			port->next_ams = ams;
3069 			tcpm_set_state(port, ready_state(port), delay_ms);
3070 		}
3071 		break;
3072 	}
3073 }
3074 
tcpm_pd_handle_msg(struct tcpm_port * port,enum pd_msg_request message,enum tcpm_ams ams)3075 static void tcpm_pd_handle_msg(struct tcpm_port *port,
3076 			       enum pd_msg_request message,
3077 			       enum tcpm_ams ams)
3078 {
3079 	switch (port->state) {
3080 	case SRC_READY:
3081 	case SNK_READY:
3082 		port->ams = ams;
3083 		tcpm_queue_message(port, message);
3084 		break;
3085 	/* PD 3.0 Spec 8.3.3.4.1.1 and 6.8.1 */
3086 	case SNK_TRANSITION_SINK:
3087 	case SNK_TRANSITION_SINK_VBUS:
3088 	case SRC_TRANSITION_SUPPLY:
3089 		tcpm_set_state(port, HARD_RESET_SEND, 0);
3090 		break;
3091 	default:
3092 		if (!tcpm_ams_interruptible(port)) {
3093 			tcpm_set_state(port, port->pwr_role == TYPEC_SOURCE ?
3094 				       SRC_SOFT_RESET_WAIT_SNK_TX :
3095 				       SNK_SOFT_RESET,
3096 				       0);
3097 		} else {
3098 			port->next_ams = ams;
3099 			tcpm_set_state(port, ready_state(port), 0);
3100 			/* 6.8.1 process the Message */
3101 			tcpm_queue_message(port, message);
3102 		}
3103 		break;
3104 	}
3105 }
3106 
tcpm_register_source_caps(struct tcpm_port * port)3107 static int tcpm_register_source_caps(struct tcpm_port *port)
3108 {
3109 	struct usb_power_delivery_desc desc = { port->negotiated_rev };
3110 	struct usb_power_delivery_capabilities_desc caps = { };
3111 	struct usb_power_delivery_capabilities *cap = port->partner_source_caps;
3112 
3113 	if (!port->partner_pd)
3114 		port->partner_pd = usb_power_delivery_register(NULL, &desc);
3115 	if (IS_ERR(port->partner_pd))
3116 		return PTR_ERR(port->partner_pd);
3117 
3118 	memcpy(caps.pdo, port->source_caps, sizeof(u32) * port->nr_source_caps);
3119 	caps.role = TYPEC_SOURCE;
3120 
3121 	if (cap) {
3122 		usb_power_delivery_unregister_capabilities(cap);
3123 		port->partner_source_caps = NULL;
3124 	}
3125 
3126 	cap = usb_power_delivery_register_capabilities(port->partner_pd, &caps);
3127 	if (IS_ERR(cap))
3128 		return PTR_ERR(cap);
3129 
3130 	port->partner_source_caps = cap;
3131 
3132 	return 0;
3133 }
3134 
tcpm_register_sink_caps(struct tcpm_port * port)3135 static int tcpm_register_sink_caps(struct tcpm_port *port)
3136 {
3137 	struct usb_power_delivery_desc desc = { port->negotiated_rev };
3138 	struct usb_power_delivery_capabilities_desc caps = { };
3139 	struct usb_power_delivery_capabilities *cap;
3140 
3141 	if (!port->partner_pd)
3142 		port->partner_pd = usb_power_delivery_register(NULL, &desc);
3143 	if (IS_ERR(port->partner_pd))
3144 		return PTR_ERR(port->partner_pd);
3145 
3146 	memcpy(caps.pdo, port->sink_caps, sizeof(u32) * port->nr_sink_caps);
3147 	caps.role = TYPEC_SINK;
3148 
3149 	cap = usb_power_delivery_register_capabilities(port->partner_pd, &caps);
3150 	if (IS_ERR(cap))
3151 		return PTR_ERR(cap);
3152 
3153 	port->partner_sink_caps = cap;
3154 
3155 	return 0;
3156 }
3157 
tcpm_pd_data_request(struct tcpm_port * port,const struct pd_message * msg,enum tcpm_transmit_type rx_sop_type)3158 static void tcpm_pd_data_request(struct tcpm_port *port,
3159 				 const struct pd_message *msg,
3160 				 enum tcpm_transmit_type rx_sop_type)
3161 {
3162 	enum pd_data_msg_type type = pd_header_type_le(msg->header);
3163 	unsigned int cnt = pd_header_cnt_le(msg->header);
3164 	unsigned int rev = pd_header_rev_le(msg->header);
3165 	unsigned int i;
3166 	enum frs_typec_current partner_frs_current;
3167 	bool frs_enable;
3168 	int ret;
3169 
3170 	if (tcpm_vdm_ams(port) && type != PD_DATA_VENDOR_DEF) {
3171 		port->vdm_state = VDM_STATE_ERR_BUSY;
3172 		tcpm_ams_finish(port);
3173 		mod_vdm_delayed_work(port, 0);
3174 	}
3175 
3176 	switch (type) {
3177 	case PD_DATA_SOURCE_CAP:
3178 		for (i = 0; i < cnt; i++)
3179 			port->source_caps[i] = le32_to_cpu(msg->payload[i]);
3180 
3181 		port->nr_source_caps = cnt;
3182 
3183 		tcpm_log_source_caps(port);
3184 
3185 		tcpm_validate_caps(port, port->source_caps,
3186 				   port->nr_source_caps);
3187 
3188 		tcpm_register_source_caps(port);
3189 
3190 		/*
3191 		 * Adjust revision in subsequent message headers, as required,
3192 		 * to comply with 6.2.1.1.5 of the USB PD 3.0 spec. We don't
3193 		 * support Rev 1.0 so just do nothing in that scenario.
3194 		 */
3195 		if (rev == PD_REV10) {
3196 			if (port->ams == GET_SOURCE_CAPABILITIES)
3197 				tcpm_ams_finish(port);
3198 			break;
3199 		}
3200 
3201 		if (rev < PD_MAX_REV) {
3202 			port->negotiated_rev = rev;
3203 			if (port->negotiated_rev_prime > port->negotiated_rev)
3204 				port->negotiated_rev_prime = port->negotiated_rev;
3205 		}
3206 
3207 		if (port->pwr_role == TYPEC_SOURCE) {
3208 			if (port->ams == GET_SOURCE_CAPABILITIES)
3209 				tcpm_pd_handle_state(port, SRC_READY, NONE_AMS, 0);
3210 			/* Unexpected Source Capabilities */
3211 			else
3212 				tcpm_pd_handle_msg(port,
3213 						   port->negotiated_rev < PD_REV30 ?
3214 						   PD_MSG_CTRL_REJECT :
3215 						   PD_MSG_CTRL_NOT_SUPP,
3216 						   NONE_AMS);
3217 		} else if (port->state == SNK_WAIT_CAPABILITIES ||
3218 			   port->state == SNK_WAIT_CAPABILITIES_TIMEOUT) {
3219 		/*
3220 		 * This message may be received even if VBUS is not
3221 		 * present. This is quite unexpected; see USB PD
3222 		 * specification, sections 8.3.3.6.3.1 and 8.3.3.6.3.2.
3223 		 * However, at the same time, we must be ready to
3224 		 * receive this message and respond to it 15ms after
3225 		 * receiving PS_RDY during power swap operations, no matter
3226 		 * if VBUS is available or not (USB PD specification,
3227 		 * section 6.5.9.2).
3228 		 * So we need to accept the message either way,
3229 		 * but be prepared to keep waiting for VBUS after it was
3230 		 * handled.
3231 		 */
3232 			port->ams = POWER_NEGOTIATION;
3233 			port->in_ams = true;
3234 			tcpm_set_state(port, SNK_NEGOTIATE_CAPABILITIES, 0);
3235 		} else {
3236 			if (port->ams == GET_SOURCE_CAPABILITIES)
3237 				tcpm_ams_finish(port);
3238 			tcpm_pd_handle_state(port, SNK_NEGOTIATE_CAPABILITIES,
3239 					     POWER_NEGOTIATION, 0);
3240 		}
3241 		break;
3242 	case PD_DATA_REQUEST:
3243 		/*
3244 		 * Adjust revision in subsequent message headers, as required,
3245 		 * to comply with 6.2.1.1.5 of the USB PD 3.0 spec. We don't
3246 		 * support Rev 1.0 so just reject in that scenario.
3247 		 */
3248 		if (rev == PD_REV10) {
3249 			tcpm_pd_handle_msg(port,
3250 					   port->negotiated_rev < PD_REV30 ?
3251 					   PD_MSG_CTRL_REJECT :
3252 					   PD_MSG_CTRL_NOT_SUPP,
3253 					   NONE_AMS);
3254 			break;
3255 		}
3256 
3257 		if (rev < PD_MAX_REV) {
3258 			port->negotiated_rev = rev;
3259 			if (port->negotiated_rev_prime > port->negotiated_rev)
3260 				port->negotiated_rev_prime = port->negotiated_rev;
3261 		}
3262 
3263 		if (port->pwr_role != TYPEC_SOURCE || cnt != 1) {
3264 			tcpm_pd_handle_msg(port,
3265 					   port->negotiated_rev < PD_REV30 ?
3266 					   PD_MSG_CTRL_REJECT :
3267 					   PD_MSG_CTRL_NOT_SUPP,
3268 					   NONE_AMS);
3269 			break;
3270 		}
3271 
3272 		port->sink_request = le32_to_cpu(msg->payload[0]);
3273 
3274 		if (port->vdm_sm_running && port->explicit_contract) {
3275 			tcpm_pd_handle_msg(port, PD_MSG_CTRL_WAIT, port->ams);
3276 			break;
3277 		}
3278 
3279 		if (port->state == SRC_SEND_CAPABILITIES)
3280 			tcpm_set_state(port, SRC_NEGOTIATE_CAPABILITIES, 0);
3281 		else
3282 			tcpm_pd_handle_state(port, SRC_NEGOTIATE_CAPABILITIES,
3283 					     POWER_NEGOTIATION, 0);
3284 		break;
3285 	case PD_DATA_SINK_CAP:
3286 		/* We don't do anything with this at the moment... */
3287 		for (i = 0; i < cnt; i++)
3288 			port->sink_caps[i] = le32_to_cpu(msg->payload[i]);
3289 
3290 		partner_frs_current = (port->sink_caps[0] & PDO_FIXED_FRS_CURR_MASK) >>
3291 			PDO_FIXED_FRS_CURR_SHIFT;
3292 		frs_enable = partner_frs_current && (partner_frs_current <=
3293 						     port->new_source_frs_current);
3294 		tcpm_log(port,
3295 			 "Port partner FRS capable partner_frs_current:%u port_frs_current:%u enable:%c",
3296 			 partner_frs_current, port->new_source_frs_current, frs_enable ? 'y' : 'n');
3297 		if (frs_enable) {
3298 			ret  = port->tcpc->enable_frs(port->tcpc, true);
3299 			tcpm_log(port, "Enable FRS %s, ret:%d\n", ret ? "fail" : "success", ret);
3300 		}
3301 
3302 		port->nr_sink_caps = cnt;
3303 		port->sink_cap_done = true;
3304 		tcpm_register_sink_caps(port);
3305 
3306 		if (port->ams == GET_SINK_CAPABILITIES)
3307 			tcpm_set_state(port, ready_state(port), 0);
3308 		/* Unexpected Sink Capabilities */
3309 		else
3310 			tcpm_pd_handle_msg(port,
3311 					   port->negotiated_rev < PD_REV30 ?
3312 					   PD_MSG_CTRL_REJECT :
3313 					   PD_MSG_CTRL_NOT_SUPP,
3314 					   NONE_AMS);
3315 		break;
3316 	case PD_DATA_VENDOR_DEF:
3317 		tcpm_handle_vdm_request(port, msg->payload, cnt, rx_sop_type);
3318 		break;
3319 	case PD_DATA_BIST:
3320 		port->bist_request = le32_to_cpu(msg->payload[0]);
3321 		tcpm_pd_handle_state(port, BIST_RX, BIST, 0);
3322 		break;
3323 	case PD_DATA_ALERT:
3324 		if (port->state != SRC_READY && port->state != SNK_READY)
3325 			tcpm_pd_handle_state(port, port->pwr_role == TYPEC_SOURCE ?
3326 					     SRC_SOFT_RESET_WAIT_SNK_TX : SNK_SOFT_RESET,
3327 					     NONE_AMS, 0);
3328 		else
3329 			tcpm_handle_alert(port, msg->payload, cnt);
3330 		break;
3331 	case PD_DATA_BATT_STATUS:
3332 	case PD_DATA_GET_COUNTRY_INFO:
3333 		/* Currently unsupported */
3334 		tcpm_pd_handle_msg(port, port->negotiated_rev < PD_REV30 ?
3335 				   PD_MSG_CTRL_REJECT :
3336 				   PD_MSG_CTRL_NOT_SUPP,
3337 				   NONE_AMS);
3338 		break;
3339 	default:
3340 		tcpm_pd_handle_msg(port, port->negotiated_rev < PD_REV30 ?
3341 				   PD_MSG_CTRL_REJECT :
3342 				   PD_MSG_CTRL_NOT_SUPP,
3343 				   NONE_AMS);
3344 		tcpm_log(port, "Unrecognized data message type %#x", type);
3345 		break;
3346 	}
3347 }
3348 
tcpm_pps_complete(struct tcpm_port * port,int result)3349 static void tcpm_pps_complete(struct tcpm_port *port, int result)
3350 {
3351 	if (port->pps_pending) {
3352 		port->pps_status = result;
3353 		port->pps_pending = false;
3354 		complete(&port->pps_complete);
3355 	}
3356 }
3357 
tcpm_pd_ctrl_request(struct tcpm_port * port,const struct pd_message * msg,enum tcpm_transmit_type rx_sop_type)3358 static void tcpm_pd_ctrl_request(struct tcpm_port *port,
3359 				 const struct pd_message *msg,
3360 				 enum tcpm_transmit_type rx_sop_type)
3361 {
3362 	enum pd_ctrl_msg_type type = pd_header_type_le(msg->header);
3363 	enum tcpm_state next_state;
3364 	unsigned int rev = pd_header_rev_le(msg->header);
3365 
3366 	/*
3367 	 * Stop VDM state machine if interrupted by other Messages while NOT_SUPP is allowed in
3368 	 * VDM AMS if waiting for VDM responses and will be handled later.
3369 	 */
3370 	if (tcpm_vdm_ams(port) && type != PD_CTRL_NOT_SUPP && type != PD_CTRL_GOOD_CRC) {
3371 		port->vdm_state = VDM_STATE_ERR_BUSY;
3372 		tcpm_ams_finish(port);
3373 		mod_vdm_delayed_work(port, 0);
3374 	}
3375 
3376 	switch (type) {
3377 	case PD_CTRL_GOOD_CRC:
3378 	case PD_CTRL_PING:
3379 		break;
3380 	case PD_CTRL_GET_SOURCE_CAP:
3381 		tcpm_pd_handle_msg(port, PD_MSG_DATA_SOURCE_CAP, GET_SOURCE_CAPABILITIES);
3382 		break;
3383 	case PD_CTRL_GET_SINK_CAP:
3384 		tcpm_pd_handle_msg(port, PD_MSG_DATA_SINK_CAP, GET_SINK_CAPABILITIES);
3385 		break;
3386 	case PD_CTRL_GOTO_MIN:
3387 		break;
3388 	case PD_CTRL_PS_RDY:
3389 		switch (port->state) {
3390 		case SNK_TRANSITION_SINK:
3391 			if (port->vbus_present) {
3392 				tcpm_set_current_limit(port,
3393 						       port->req_current_limit,
3394 						       port->req_supply_voltage);
3395 				port->explicit_contract = true;
3396 				tcpm_set_auto_vbus_discharge_threshold(port,
3397 								       TYPEC_PWR_MODE_PD,
3398 								       port->pps_data.active,
3399 								       port->supply_voltage);
3400 				tcpm_set_state(port, SNK_READY, 0);
3401 			} else {
3402 				/*
3403 				 * Seen after power swap. Keep waiting for VBUS
3404 				 * in a transitional state.
3405 				 */
3406 				tcpm_set_state(port,
3407 					       SNK_TRANSITION_SINK_VBUS, 0);
3408 			}
3409 			break;
3410 		case PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED:
3411 			tcpm_set_state(port, PR_SWAP_SRC_SNK_SINK_ON, 0);
3412 			break;
3413 		case PR_SWAP_SNK_SRC_SINK_OFF:
3414 			tcpm_set_state(port, PR_SWAP_SNK_SRC_SOURCE_ON, 0);
3415 			break;
3416 		case VCONN_SWAP_WAIT_FOR_VCONN:
3417 			tcpm_set_state(port, VCONN_SWAP_TURN_OFF_VCONN, 0);
3418 			break;
3419 		case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF:
3420 			tcpm_set_state(port, FR_SWAP_SNK_SRC_NEW_SINK_READY, 0);
3421 			break;
3422 		default:
3423 			tcpm_pd_handle_state(port,
3424 					     port->pwr_role == TYPEC_SOURCE ?
3425 					     SRC_SOFT_RESET_WAIT_SNK_TX :
3426 					     SNK_SOFT_RESET,
3427 					     NONE_AMS, 0);
3428 			break;
3429 		}
3430 		break;
3431 	case PD_CTRL_REJECT:
3432 	case PD_CTRL_WAIT:
3433 	case PD_CTRL_NOT_SUPP:
3434 		switch (port->state) {
3435 		case SNK_NEGOTIATE_CAPABILITIES:
3436 			/* USB PD specification, Figure 8-43 */
3437 			if (port->explicit_contract)
3438 				next_state = SNK_READY;
3439 			else
3440 				next_state = SNK_WAIT_CAPABILITIES;
3441 
3442 			/* Threshold was relaxed before sending Request. Restore it back. */
3443 			tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
3444 							       port->pps_data.active,
3445 							       port->supply_voltage);
3446 			tcpm_set_state(port, next_state, 0);
3447 			break;
3448 		case SNK_NEGOTIATE_PPS_CAPABILITIES:
3449 			/* Revert data back from any requested PPS updates */
3450 			port->pps_data.req_out_volt = port->supply_voltage;
3451 			port->pps_data.req_op_curr = port->current_limit;
3452 			port->pps_status = (type == PD_CTRL_WAIT ?
3453 					    -EAGAIN : -EOPNOTSUPP);
3454 
3455 			/* Threshold was relaxed before sending Request. Restore it back. */
3456 			tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
3457 							       port->pps_data.active,
3458 							       port->supply_voltage);
3459 
3460 			tcpm_set_state(port, SNK_READY, 0);
3461 			break;
3462 		case DR_SWAP_SEND:
3463 			port->swap_status = (type == PD_CTRL_WAIT ?
3464 					     -EAGAIN : -EOPNOTSUPP);
3465 			tcpm_set_state(port, DR_SWAP_CANCEL, 0);
3466 			break;
3467 		case PR_SWAP_SEND:
3468 			port->swap_status = (type == PD_CTRL_WAIT ?
3469 					     -EAGAIN : -EOPNOTSUPP);
3470 			tcpm_set_state(port, PR_SWAP_CANCEL, 0);
3471 			break;
3472 		case VCONN_SWAP_SEND:
3473 			port->swap_status = (type == PD_CTRL_WAIT ?
3474 					     -EAGAIN : -EOPNOTSUPP);
3475 			tcpm_set_state(port, VCONN_SWAP_CANCEL, 0);
3476 			break;
3477 		case FR_SWAP_SEND:
3478 			tcpm_set_state(port, FR_SWAP_CANCEL, 0);
3479 			break;
3480 		case GET_SINK_CAP:
3481 			port->sink_cap_done = true;
3482 			tcpm_set_state(port, ready_state(port), 0);
3483 			break;
3484 		/*
3485 		 * Some port partners do not support GET_STATUS, avoid soft reset the link to
3486 		 * prevent redundant power re-negotiation
3487 		 */
3488 		case GET_STATUS_SEND:
3489 			tcpm_set_state(port, ready_state(port), 0);
3490 			break;
3491 		case SRC_READY:
3492 		case SNK_READY:
3493 			if (port->vdm_state > VDM_STATE_READY) {
3494 				port->vdm_state = VDM_STATE_DONE;
3495 				if (tcpm_vdm_ams(port))
3496 					tcpm_ams_finish(port);
3497 				mod_vdm_delayed_work(port, 0);
3498 				break;
3499 			}
3500 			fallthrough;
3501 		default:
3502 			tcpm_pd_handle_state(port,
3503 					     port->pwr_role == TYPEC_SOURCE ?
3504 					     SRC_SOFT_RESET_WAIT_SNK_TX :
3505 					     SNK_SOFT_RESET,
3506 					     NONE_AMS, 0);
3507 			break;
3508 		}
3509 		break;
3510 	case PD_CTRL_ACCEPT:
3511 		switch (port->state) {
3512 		case SNK_NEGOTIATE_CAPABILITIES:
3513 			port->pps_data.active = false;
3514 			tcpm_set_state(port, SNK_TRANSITION_SINK, 0);
3515 			break;
3516 		case SNK_NEGOTIATE_PPS_CAPABILITIES:
3517 			port->pps_data.active = true;
3518 			port->pps_data.min_volt = port->pps_data.req_min_volt;
3519 			port->pps_data.max_volt = port->pps_data.req_max_volt;
3520 			port->pps_data.max_curr = port->pps_data.req_max_curr;
3521 			port->req_supply_voltage = port->pps_data.req_out_volt;
3522 			port->req_current_limit = port->pps_data.req_op_curr;
3523 			power_supply_changed(port->psy);
3524 			tcpm_set_state(port, SNK_TRANSITION_SINK, 0);
3525 			break;
3526 		case SOFT_RESET_SEND:
3527 			if (port->ams == SOFT_RESET_AMS)
3528 				tcpm_ams_finish(port);
3529 			/*
3530 			 * SOP' Soft Reset is done after Vconn Swap,
3531 			 * which returns to ready state
3532 			 */
3533 			if (rx_sop_type == TCPC_TX_SOP_PRIME) {
3534 				if (rev < port->negotiated_rev_prime)
3535 					port->negotiated_rev_prime = rev;
3536 				tcpm_set_state(port, ready_state(port), 0);
3537 				break;
3538 			}
3539 			if (port->pwr_role == TYPEC_SOURCE) {
3540 				port->upcoming_state = SRC_SEND_CAPABILITIES;
3541 				tcpm_ams_start(port, POWER_NEGOTIATION);
3542 			} else {
3543 				tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
3544 			}
3545 			break;
3546 		case DR_SWAP_SEND:
3547 			tcpm_set_state(port, DR_SWAP_CHANGE_DR, 0);
3548 			break;
3549 		case PR_SWAP_SEND:
3550 			tcpm_set_state(port, PR_SWAP_START, 0);
3551 			break;
3552 		case VCONN_SWAP_SEND:
3553 			tcpm_set_state(port, VCONN_SWAP_START, 0);
3554 			break;
3555 		case FR_SWAP_SEND:
3556 			tcpm_set_state(port, FR_SWAP_SNK_SRC_TRANSITION_TO_OFF, 0);
3557 			break;
3558 		default:
3559 			tcpm_pd_handle_state(port,
3560 					     port->pwr_role == TYPEC_SOURCE ?
3561 					     SRC_SOFT_RESET_WAIT_SNK_TX :
3562 					     SNK_SOFT_RESET,
3563 					     NONE_AMS, 0);
3564 			break;
3565 		}
3566 		break;
3567 	case PD_CTRL_SOFT_RESET:
3568 		port->ams = SOFT_RESET_AMS;
3569 		tcpm_set_state(port, SOFT_RESET, 0);
3570 		break;
3571 	case PD_CTRL_DR_SWAP:
3572 		/*
3573 		 * XXX
3574 		 * 6.3.9: If an alternate mode is active, a request to swap
3575 		 * alternate modes shall trigger a port reset.
3576 		 */
3577 		if (port->typec_caps.data != TYPEC_PORT_DRD) {
3578 			tcpm_pd_handle_msg(port,
3579 					   port->negotiated_rev < PD_REV30 ?
3580 					   PD_MSG_CTRL_REJECT :
3581 					   PD_MSG_CTRL_NOT_SUPP,
3582 					   NONE_AMS);
3583 		} else {
3584 			if (port->send_discover && port->negotiated_rev < PD_REV30) {
3585 				tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
3586 				break;
3587 			}
3588 
3589 			tcpm_pd_handle_state(port, DR_SWAP_ACCEPT, DATA_ROLE_SWAP, 0);
3590 		}
3591 		break;
3592 	case PD_CTRL_PR_SWAP:
3593 		if (port->port_type != TYPEC_PORT_DRP) {
3594 			tcpm_pd_handle_msg(port,
3595 					   port->negotiated_rev < PD_REV30 ?
3596 					   PD_MSG_CTRL_REJECT :
3597 					   PD_MSG_CTRL_NOT_SUPP,
3598 					   NONE_AMS);
3599 		} else {
3600 			if (port->send_discover && port->negotiated_rev < PD_REV30) {
3601 				tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
3602 				break;
3603 			}
3604 
3605 			tcpm_pd_handle_state(port, PR_SWAP_ACCEPT, POWER_ROLE_SWAP, 0);
3606 		}
3607 		break;
3608 	case PD_CTRL_VCONN_SWAP:
3609 		if (port->send_discover && port->negotiated_rev < PD_REV30) {
3610 			tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
3611 			break;
3612 		}
3613 
3614 		tcpm_pd_handle_state(port, VCONN_SWAP_ACCEPT, VCONN_SWAP, 0);
3615 		break;
3616 	case PD_CTRL_GET_SOURCE_CAP_EXT:
3617 	case PD_CTRL_GET_STATUS:
3618 	case PD_CTRL_FR_SWAP:
3619 	case PD_CTRL_GET_PPS_STATUS:
3620 	case PD_CTRL_GET_COUNTRY_CODES:
3621 		/* Currently not supported */
3622 		tcpm_pd_handle_msg(port,
3623 				   port->negotiated_rev < PD_REV30 ?
3624 				   PD_MSG_CTRL_REJECT :
3625 				   PD_MSG_CTRL_NOT_SUPP,
3626 				   NONE_AMS);
3627 		break;
3628 	case PD_CTRL_GET_REVISION:
3629 		if (port->negotiated_rev >= PD_REV30 && port->pd_rev.rev_major)
3630 			tcpm_pd_handle_msg(port, PD_MSG_DATA_REV,
3631 					   REVISION_INFORMATION);
3632 		else
3633 			tcpm_pd_handle_msg(port,
3634 					   port->negotiated_rev < PD_REV30 ?
3635 					   PD_MSG_CTRL_REJECT :
3636 					   PD_MSG_CTRL_NOT_SUPP,
3637 					   NONE_AMS);
3638 		break;
3639 	default:
3640 		tcpm_pd_handle_msg(port,
3641 				   port->negotiated_rev < PD_REV30 ?
3642 				   PD_MSG_CTRL_REJECT :
3643 				   PD_MSG_CTRL_NOT_SUPP,
3644 				   NONE_AMS);
3645 		tcpm_log(port, "Unrecognized ctrl message type %#x", type);
3646 		break;
3647 	}
3648 }
3649 
tcpm_pd_ext_msg_request(struct tcpm_port * port,const struct pd_message * msg)3650 static void tcpm_pd_ext_msg_request(struct tcpm_port *port,
3651 				    const struct pd_message *msg)
3652 {
3653 	enum pd_ext_msg_type type = pd_header_type_le(msg->header);
3654 	unsigned int data_size = pd_ext_header_data_size_le(msg->ext_msg.header);
3655 
3656 	/* stopping VDM state machine if interrupted by other Messages */
3657 	if (tcpm_vdm_ams(port)) {
3658 		port->vdm_state = VDM_STATE_ERR_BUSY;
3659 		tcpm_ams_finish(port);
3660 		mod_vdm_delayed_work(port, 0);
3661 	}
3662 
3663 	if (!(le16_to_cpu(msg->ext_msg.header) & PD_EXT_HDR_CHUNKED)) {
3664 		tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
3665 		tcpm_log(port, "Unchunked extended messages unsupported");
3666 		return;
3667 	}
3668 
3669 	if (data_size > PD_EXT_MAX_CHUNK_DATA) {
3670 		tcpm_pd_handle_state(port, CHUNK_NOT_SUPP, NONE_AMS, PD_T_CHUNK_NOT_SUPP);
3671 		tcpm_log(port, "Chunk handling not yet supported");
3672 		return;
3673 	}
3674 
3675 	switch (type) {
3676 	case PD_EXT_STATUS:
3677 	case PD_EXT_PPS_STATUS:
3678 		if (port->ams == GETTING_SOURCE_SINK_STATUS) {
3679 			tcpm_ams_finish(port);
3680 			tcpm_set_state(port, ready_state(port), 0);
3681 		} else {
3682 			/* unexpected Status or PPS_Status Message */
3683 			tcpm_pd_handle_state(port, port->pwr_role == TYPEC_SOURCE ?
3684 					     SRC_SOFT_RESET_WAIT_SNK_TX : SNK_SOFT_RESET,
3685 					     NONE_AMS, 0);
3686 		}
3687 		break;
3688 	case PD_EXT_SOURCE_CAP_EXT:
3689 	case PD_EXT_GET_BATT_CAP:
3690 	case PD_EXT_GET_BATT_STATUS:
3691 	case PD_EXT_BATT_CAP:
3692 	case PD_EXT_GET_MANUFACTURER_INFO:
3693 	case PD_EXT_MANUFACTURER_INFO:
3694 	case PD_EXT_SECURITY_REQUEST:
3695 	case PD_EXT_SECURITY_RESPONSE:
3696 	case PD_EXT_FW_UPDATE_REQUEST:
3697 	case PD_EXT_FW_UPDATE_RESPONSE:
3698 	case PD_EXT_COUNTRY_INFO:
3699 	case PD_EXT_COUNTRY_CODES:
3700 		tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
3701 		break;
3702 	default:
3703 		tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
3704 		tcpm_log(port, "Unrecognized extended message type %#x", type);
3705 		break;
3706 	}
3707 }
3708 
tcpm_pd_rx_handler(struct kthread_work * work)3709 static void tcpm_pd_rx_handler(struct kthread_work *work)
3710 {
3711 	struct pd_rx_event *event = container_of(work,
3712 						 struct pd_rx_event, work);
3713 	const struct pd_message *msg = &event->msg;
3714 	unsigned int cnt = pd_header_cnt_le(msg->header);
3715 	struct tcpm_port *port = event->port;
3716 	enum tcpm_transmit_type rx_sop_type = event->rx_sop_type;
3717 
3718 	mutex_lock(&port->lock);
3719 
3720 	tcpm_log(port, "PD RX, header: %#x [%d]", le16_to_cpu(msg->header),
3721 		 port->attached);
3722 
3723 	if (port->attached) {
3724 		enum pd_ctrl_msg_type type = pd_header_type_le(msg->header);
3725 		unsigned int msgid = pd_header_msgid_le(msg->header);
3726 
3727 		/*
3728 		 * Drop SOP' messages if cannot receive via
3729 		 * tcpm_can_communicate_sop_prime
3730 		 */
3731 		if (rx_sop_type == TCPC_TX_SOP_PRIME &&
3732 		    !tcpm_can_communicate_sop_prime(port))
3733 			goto done;
3734 
3735 		/*
3736 		 * USB PD standard, 6.6.1.2:
3737 		 * "... if MessageID value in a received Message is the
3738 		 * same as the stored value, the receiver shall return a
3739 		 * GoodCRC Message with that MessageID value and drop
3740 		 * the Message (this is a retry of an already received
3741 		 * Message). Note: this shall not apply to the Soft_Reset
3742 		 * Message which always has a MessageID value of zero."
3743 		 */
3744 		switch (rx_sop_type) {
3745 		case TCPC_TX_SOP_PRIME:
3746 			if (msgid == port->rx_msgid_prime)
3747 				goto done;
3748 			port->rx_msgid_prime = msgid;
3749 			break;
3750 		case TCPC_TX_SOP:
3751 		default:
3752 			if (msgid == port->rx_msgid && type != PD_CTRL_SOFT_RESET)
3753 				goto done;
3754 			port->rx_msgid = msgid;
3755 			break;
3756 		}
3757 
3758 		/*
3759 		 * If both ends believe to be DFP/host, we have a data role
3760 		 * mismatch.
3761 		 */
3762 		if (!!(le16_to_cpu(msg->header) & PD_HEADER_DATA_ROLE) ==
3763 		    (port->data_role == TYPEC_HOST) && rx_sop_type == TCPC_TX_SOP) {
3764 			tcpm_log(port,
3765 				 "Data role mismatch, initiating error recovery");
3766 			tcpm_set_state(port, ERROR_RECOVERY, 0);
3767 		} else {
3768 			if (le16_to_cpu(msg->header) & PD_HEADER_EXT_HDR)
3769 				tcpm_pd_ext_msg_request(port, msg);
3770 			else if (cnt)
3771 				tcpm_pd_data_request(port, msg, rx_sop_type);
3772 			else
3773 				tcpm_pd_ctrl_request(port, msg, rx_sop_type);
3774 		}
3775 	}
3776 
3777 done:
3778 	mutex_unlock(&port->lock);
3779 	kfree(event);
3780 }
3781 
tcpm_pd_receive(struct tcpm_port * port,const struct pd_message * msg,enum tcpm_transmit_type rx_sop_type)3782 void tcpm_pd_receive(struct tcpm_port *port, const struct pd_message *msg,
3783 		     enum tcpm_transmit_type rx_sop_type)
3784 {
3785 	struct pd_rx_event *event;
3786 
3787 	event = kzalloc(sizeof(*event), GFP_ATOMIC);
3788 	if (!event)
3789 		return;
3790 
3791 	kthread_init_work(&event->work, tcpm_pd_rx_handler);
3792 	event->port = port;
3793 	event->rx_sop_type = rx_sop_type;
3794 	memcpy(&event->msg, msg, sizeof(*msg));
3795 	kthread_queue_work(port->wq, &event->work);
3796 }
3797 EXPORT_SYMBOL_GPL(tcpm_pd_receive);
3798 
tcpm_pd_send_control(struct tcpm_port * port,enum pd_ctrl_msg_type type,enum tcpm_transmit_type tx_sop_type)3799 static int tcpm_pd_send_control(struct tcpm_port *port,
3800 				enum pd_ctrl_msg_type type,
3801 				enum tcpm_transmit_type tx_sop_type)
3802 {
3803 	struct pd_message msg;
3804 
3805 	memset(&msg, 0, sizeof(msg));
3806 	switch (tx_sop_type) {
3807 	case TCPC_TX_SOP_PRIME:
3808 		msg.header = PD_HEADER_LE(type,
3809 					  0,	/* Cable Plug Indicator for DFP/UFP */
3810 					  0,	/* Reserved */
3811 					  port->negotiated_rev,
3812 					  port->message_id_prime,
3813 					  0);
3814 		break;
3815 	case TCPC_TX_SOP:
3816 		msg.header = PD_HEADER_LE(type,
3817 					  port->pwr_role,
3818 					  port->data_role,
3819 					  port->negotiated_rev,
3820 					  port->message_id,
3821 					  0);
3822 		break;
3823 	default:
3824 		msg.header = PD_HEADER_LE(type,
3825 					  port->pwr_role,
3826 					  port->data_role,
3827 					  port->negotiated_rev,
3828 					  port->message_id,
3829 					  0);
3830 		break;
3831 	}
3832 
3833 	return tcpm_pd_transmit(port, tx_sop_type, &msg);
3834 }
3835 
3836 /*
3837  * Send queued message without affecting state.
3838  * Return true if state machine should go back to sleep,
3839  * false otherwise.
3840  */
tcpm_send_queued_message(struct tcpm_port * port)3841 static bool tcpm_send_queued_message(struct tcpm_port *port)
3842 {
3843 	enum pd_msg_request queued_message;
3844 	int ret;
3845 
3846 	do {
3847 		queued_message = port->queued_message;
3848 		port->queued_message = PD_MSG_NONE;
3849 
3850 		switch (queued_message) {
3851 		case PD_MSG_CTRL_WAIT:
3852 			tcpm_pd_send_control(port, PD_CTRL_WAIT, TCPC_TX_SOP);
3853 			break;
3854 		case PD_MSG_CTRL_REJECT:
3855 			tcpm_pd_send_control(port, PD_CTRL_REJECT, TCPC_TX_SOP);
3856 			break;
3857 		case PD_MSG_CTRL_NOT_SUPP:
3858 			tcpm_pd_send_control(port, PD_CTRL_NOT_SUPP, TCPC_TX_SOP);
3859 			break;
3860 		case PD_MSG_DATA_SINK_CAP:
3861 			ret = tcpm_pd_send_sink_caps(port);
3862 			if (ret < 0) {
3863 				tcpm_log(port, "Unable to send snk caps, ret=%d", ret);
3864 				tcpm_set_state(port, SNK_SOFT_RESET, 0);
3865 			}
3866 			tcpm_ams_finish(port);
3867 			break;
3868 		case PD_MSG_DATA_SOURCE_CAP:
3869 			ret = tcpm_pd_send_source_caps(port);
3870 			if (ret < 0) {
3871 				tcpm_log(port,
3872 					 "Unable to send src caps, ret=%d",
3873 					 ret);
3874 				tcpm_set_state(port, SOFT_RESET_SEND, 0);
3875 			} else if (port->pwr_role == TYPEC_SOURCE) {
3876 				tcpm_ams_finish(port);
3877 				tcpm_set_state(port, HARD_RESET_SEND,
3878 					       PD_T_SENDER_RESPONSE);
3879 			} else {
3880 				tcpm_ams_finish(port);
3881 			}
3882 			break;
3883 		case PD_MSG_DATA_REV:
3884 			ret = tcpm_pd_send_revision(port);
3885 			if (ret)
3886 				tcpm_log(port,
3887 					 "Unable to send revision msg, ret=%d",
3888 					 ret);
3889 			tcpm_ams_finish(port);
3890 			break;
3891 		default:
3892 			break;
3893 		}
3894 	} while (port->queued_message != PD_MSG_NONE);
3895 
3896 	if (port->delayed_state != INVALID_STATE) {
3897 		if (ktime_after(port->delayed_runtime, ktime_get())) {
3898 			mod_tcpm_delayed_work(port, ktime_to_ms(ktime_sub(port->delayed_runtime,
3899 									  ktime_get())));
3900 			return true;
3901 		}
3902 		port->delayed_state = INVALID_STATE;
3903 	}
3904 	return false;
3905 }
3906 
tcpm_pd_check_request(struct tcpm_port * port)3907 static int tcpm_pd_check_request(struct tcpm_port *port)
3908 {
3909 	u32 pdo, rdo = port->sink_request;
3910 	unsigned int max, op, pdo_max, index;
3911 	enum pd_pdo_type type;
3912 
3913 	index = rdo_index(rdo);
3914 	if (!index || index > port->nr_src_pdo)
3915 		return -EINVAL;
3916 
3917 	pdo = port->src_pdo[index - 1];
3918 	type = pdo_type(pdo);
3919 	switch (type) {
3920 	case PDO_TYPE_FIXED:
3921 	case PDO_TYPE_VAR:
3922 		max = rdo_max_current(rdo);
3923 		op = rdo_op_current(rdo);
3924 		pdo_max = pdo_max_current(pdo);
3925 
3926 		if (op > pdo_max)
3927 			return -EINVAL;
3928 		if (max > pdo_max && !(rdo & RDO_CAP_MISMATCH))
3929 			return -EINVAL;
3930 
3931 		if (type == PDO_TYPE_FIXED)
3932 			tcpm_log(port,
3933 				 "Requested %u mV, %u mA for %u / %u mA",
3934 				 pdo_fixed_voltage(pdo), pdo_max, op, max);
3935 		else
3936 			tcpm_log(port,
3937 				 "Requested %u -> %u mV, %u mA for %u / %u mA",
3938 				 pdo_min_voltage(pdo), pdo_max_voltage(pdo),
3939 				 pdo_max, op, max);
3940 		break;
3941 	case PDO_TYPE_BATT:
3942 		max = rdo_max_power(rdo);
3943 		op = rdo_op_power(rdo);
3944 		pdo_max = pdo_max_power(pdo);
3945 
3946 		if (op > pdo_max)
3947 			return -EINVAL;
3948 		if (max > pdo_max && !(rdo & RDO_CAP_MISMATCH))
3949 			return -EINVAL;
3950 		tcpm_log(port,
3951 			 "Requested %u -> %u mV, %u mW for %u / %u mW",
3952 			 pdo_min_voltage(pdo), pdo_max_voltage(pdo),
3953 			 pdo_max, op, max);
3954 		break;
3955 	default:
3956 		return -EINVAL;
3957 	}
3958 
3959 	port->op_vsafe5v = index == 1;
3960 
3961 	return 0;
3962 }
3963 
3964 #define min_power(x, y) min(pdo_max_power(x), pdo_max_power(y))
3965 #define min_current(x, y) min(pdo_max_current(x), pdo_max_current(y))
3966 
tcpm_pd_select_pdo(struct tcpm_port * port,int * sink_pdo,int * src_pdo)3967 static int tcpm_pd_select_pdo(struct tcpm_port *port, int *sink_pdo,
3968 			      int *src_pdo)
3969 {
3970 	unsigned int i, j, max_src_mv = 0, min_src_mv = 0, max_mw = 0,
3971 		     max_mv = 0, src_mw = 0, src_ma = 0, max_snk_mv = 0,
3972 		     min_snk_mv = 0;
3973 	int ret = -EINVAL;
3974 
3975 	port->pps_data.supported = false;
3976 	port->usb_type = POWER_SUPPLY_USB_TYPE_PD;
3977 	power_supply_changed(port->psy);
3978 
3979 	/*
3980 	 * Select the source PDO providing the most power which has a
3981 	 * matchig sink cap.
3982 	 */
3983 	for (i = 0; i < port->nr_source_caps; i++) {
3984 		u32 pdo = port->source_caps[i];
3985 		enum pd_pdo_type type = pdo_type(pdo);
3986 
3987 		switch (type) {
3988 		case PDO_TYPE_FIXED:
3989 			max_src_mv = pdo_fixed_voltage(pdo);
3990 			min_src_mv = max_src_mv;
3991 			break;
3992 		case PDO_TYPE_BATT:
3993 		case PDO_TYPE_VAR:
3994 			max_src_mv = pdo_max_voltage(pdo);
3995 			min_src_mv = pdo_min_voltage(pdo);
3996 			break;
3997 		case PDO_TYPE_APDO:
3998 			if (pdo_apdo_type(pdo) == APDO_TYPE_PPS) {
3999 				port->pps_data.supported = true;
4000 				port->usb_type =
4001 					POWER_SUPPLY_USB_TYPE_PD_PPS;
4002 				power_supply_changed(port->psy);
4003 			}
4004 			continue;
4005 		default:
4006 			tcpm_log(port, "Invalid source PDO type, ignoring");
4007 			continue;
4008 		}
4009 
4010 		switch (type) {
4011 		case PDO_TYPE_FIXED:
4012 		case PDO_TYPE_VAR:
4013 			src_ma = pdo_max_current(pdo);
4014 			src_mw = src_ma * min_src_mv / 1000;
4015 			break;
4016 		case PDO_TYPE_BATT:
4017 			src_mw = pdo_max_power(pdo);
4018 			break;
4019 		case PDO_TYPE_APDO:
4020 			continue;
4021 		default:
4022 			tcpm_log(port, "Invalid source PDO type, ignoring");
4023 			continue;
4024 		}
4025 
4026 		for (j = 0; j < port->nr_snk_pdo; j++) {
4027 			pdo = port->snk_pdo[j];
4028 
4029 			switch (pdo_type(pdo)) {
4030 			case PDO_TYPE_FIXED:
4031 				max_snk_mv = pdo_fixed_voltage(pdo);
4032 				min_snk_mv = max_snk_mv;
4033 				break;
4034 			case PDO_TYPE_BATT:
4035 			case PDO_TYPE_VAR:
4036 				max_snk_mv = pdo_max_voltage(pdo);
4037 				min_snk_mv = pdo_min_voltage(pdo);
4038 				break;
4039 			case PDO_TYPE_APDO:
4040 				continue;
4041 			default:
4042 				tcpm_log(port, "Invalid sink PDO type, ignoring");
4043 				continue;
4044 			}
4045 
4046 			if (max_src_mv <= max_snk_mv &&
4047 				min_src_mv >= min_snk_mv) {
4048 				/* Prefer higher voltages if available */
4049 				if ((src_mw == max_mw && min_src_mv > max_mv) ||
4050 							src_mw > max_mw) {
4051 					*src_pdo = i;
4052 					*sink_pdo = j;
4053 					max_mw = src_mw;
4054 					max_mv = min_src_mv;
4055 					ret = 0;
4056 				}
4057 			}
4058 		}
4059 	}
4060 
4061 	return ret;
4062 }
4063 
tcpm_pd_select_pps_apdo(struct tcpm_port * port)4064 static unsigned int tcpm_pd_select_pps_apdo(struct tcpm_port *port)
4065 {
4066 	unsigned int i, src_ma, max_temp_mw = 0, max_op_ma, op_mw;
4067 	unsigned int src_pdo = 0;
4068 	u32 pdo, src;
4069 
4070 	for (i = 1; i < port->nr_source_caps; ++i) {
4071 		pdo = port->source_caps[i];
4072 
4073 		switch (pdo_type(pdo)) {
4074 		case PDO_TYPE_APDO:
4075 			if (pdo_apdo_type(pdo) != APDO_TYPE_PPS) {
4076 				tcpm_log(port, "Not PPS APDO (source), ignoring");
4077 				continue;
4078 			}
4079 
4080 			if (port->pps_data.req_out_volt > pdo_pps_apdo_max_voltage(pdo) ||
4081 			    port->pps_data.req_out_volt < pdo_pps_apdo_min_voltage(pdo))
4082 				continue;
4083 
4084 			src_ma = pdo_pps_apdo_max_current(pdo);
4085 			max_op_ma = min(src_ma, port->pps_data.req_op_curr);
4086 			op_mw = max_op_ma * port->pps_data.req_out_volt / 1000;
4087 			if (op_mw > max_temp_mw) {
4088 				src_pdo = i;
4089 				max_temp_mw = op_mw;
4090 			}
4091 			break;
4092 		default:
4093 			tcpm_log(port, "Not APDO type (source), ignoring");
4094 			continue;
4095 		}
4096 	}
4097 
4098 	if (src_pdo) {
4099 		src = port->source_caps[src_pdo];
4100 
4101 		port->pps_data.req_min_volt = pdo_pps_apdo_min_voltage(src);
4102 		port->pps_data.req_max_volt = pdo_pps_apdo_max_voltage(src);
4103 		port->pps_data.req_max_curr = pdo_pps_apdo_max_current(src);
4104 		port->pps_data.req_op_curr = min(port->pps_data.req_max_curr,
4105 						 port->pps_data.req_op_curr);
4106 	}
4107 
4108 	return src_pdo;
4109 }
4110 
tcpm_pd_build_request(struct tcpm_port * port,u32 * rdo)4111 static int tcpm_pd_build_request(struct tcpm_port *port, u32 *rdo)
4112 {
4113 	unsigned int mv, ma, mw, flags;
4114 	unsigned int max_ma, max_mw;
4115 	enum pd_pdo_type type;
4116 	u32 pdo, matching_snk_pdo;
4117 	int src_pdo_index = 0;
4118 	int snk_pdo_index = 0;
4119 	int ret;
4120 
4121 	ret = tcpm_pd_select_pdo(port, &snk_pdo_index, &src_pdo_index);
4122 	if (ret < 0)
4123 		return ret;
4124 
4125 	pdo = port->source_caps[src_pdo_index];
4126 	matching_snk_pdo = port->snk_pdo[snk_pdo_index];
4127 	type = pdo_type(pdo);
4128 
4129 	switch (type) {
4130 	case PDO_TYPE_FIXED:
4131 		mv = pdo_fixed_voltage(pdo);
4132 		break;
4133 	case PDO_TYPE_BATT:
4134 	case PDO_TYPE_VAR:
4135 		mv = pdo_min_voltage(pdo);
4136 		break;
4137 	default:
4138 		tcpm_log(port, "Invalid PDO selected!");
4139 		return -EINVAL;
4140 	}
4141 
4142 	/* Select maximum available current within the sink pdo's limit */
4143 	if (type == PDO_TYPE_BATT) {
4144 		mw = min_power(pdo, matching_snk_pdo);
4145 		ma = 1000 * mw / mv;
4146 	} else {
4147 		ma = min_current(pdo, matching_snk_pdo);
4148 		mw = ma * mv / 1000;
4149 	}
4150 
4151 	flags = RDO_USB_COMM | RDO_NO_SUSPEND;
4152 
4153 	/* Set mismatch bit if offered power is less than operating power */
4154 	max_ma = ma;
4155 	max_mw = mw;
4156 	if (mw < port->operating_snk_mw) {
4157 		flags |= RDO_CAP_MISMATCH;
4158 		if (type == PDO_TYPE_BATT &&
4159 		    (pdo_max_power(matching_snk_pdo) > pdo_max_power(pdo)))
4160 			max_mw = pdo_max_power(matching_snk_pdo);
4161 		else if (pdo_max_current(matching_snk_pdo) >
4162 			 pdo_max_current(pdo))
4163 			max_ma = pdo_max_current(matching_snk_pdo);
4164 	}
4165 
4166 	tcpm_log(port, "cc=%d cc1=%d cc2=%d vbus=%d vconn=%s polarity=%d",
4167 		 port->cc_req, port->cc1, port->cc2, port->vbus_source,
4168 		 port->vconn_role == TYPEC_SOURCE ? "source" : "sink",
4169 		 port->polarity);
4170 
4171 	if (type == PDO_TYPE_BATT) {
4172 		*rdo = RDO_BATT(src_pdo_index + 1, mw, max_mw, flags);
4173 
4174 		tcpm_log(port, "Requesting PDO %d: %u mV, %u mW%s",
4175 			 src_pdo_index, mv, mw,
4176 			 flags & RDO_CAP_MISMATCH ? " [mismatch]" : "");
4177 	} else {
4178 		*rdo = RDO_FIXED(src_pdo_index + 1, ma, max_ma, flags);
4179 
4180 		tcpm_log(port, "Requesting PDO %d: %u mV, %u mA%s",
4181 			 src_pdo_index, mv, ma,
4182 			 flags & RDO_CAP_MISMATCH ? " [mismatch]" : "");
4183 	}
4184 
4185 	port->req_current_limit = ma;
4186 	port->req_supply_voltage = mv;
4187 
4188 	return 0;
4189 }
4190 
tcpm_pd_send_request(struct tcpm_port * port)4191 static int tcpm_pd_send_request(struct tcpm_port *port)
4192 {
4193 	struct pd_message msg;
4194 	int ret;
4195 	u32 rdo;
4196 
4197 	ret = tcpm_pd_build_request(port, &rdo);
4198 	if (ret < 0)
4199 		return ret;
4200 
4201 	/*
4202 	 * Relax the threshold as voltage will be adjusted after Accept Message plus tSrcTransition.
4203 	 * It is safer to modify the threshold here.
4204 	 */
4205 	tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, 0);
4206 
4207 	memset(&msg, 0, sizeof(msg));
4208 	msg.header = PD_HEADER_LE(PD_DATA_REQUEST,
4209 				  port->pwr_role,
4210 				  port->data_role,
4211 				  port->negotiated_rev,
4212 				  port->message_id, 1);
4213 	msg.payload[0] = cpu_to_le32(rdo);
4214 
4215 	return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
4216 }
4217 
tcpm_pd_build_pps_request(struct tcpm_port * port,u32 * rdo)4218 static int tcpm_pd_build_pps_request(struct tcpm_port *port, u32 *rdo)
4219 {
4220 	unsigned int out_mv, op_ma, op_mw, max_mv, max_ma, flags;
4221 	unsigned int src_pdo_index;
4222 
4223 	src_pdo_index = tcpm_pd_select_pps_apdo(port);
4224 	if (!src_pdo_index)
4225 		return -EOPNOTSUPP;
4226 
4227 	max_mv = port->pps_data.req_max_volt;
4228 	max_ma = port->pps_data.req_max_curr;
4229 	out_mv = port->pps_data.req_out_volt;
4230 	op_ma = port->pps_data.req_op_curr;
4231 
4232 	flags = RDO_USB_COMM | RDO_NO_SUSPEND;
4233 
4234 	op_mw = (op_ma * out_mv) / 1000;
4235 	if (op_mw < port->operating_snk_mw) {
4236 		/*
4237 		 * Try raising current to meet power needs. If that's not enough
4238 		 * then try upping the voltage. If that's still not enough
4239 		 * then we've obviously chosen a PPS APDO which really isn't
4240 		 * suitable so abandon ship.
4241 		 */
4242 		op_ma = (port->operating_snk_mw * 1000) / out_mv;
4243 		if ((port->operating_snk_mw * 1000) % out_mv)
4244 			++op_ma;
4245 		op_ma += RDO_PROG_CURR_MA_STEP - (op_ma % RDO_PROG_CURR_MA_STEP);
4246 
4247 		if (op_ma > max_ma) {
4248 			op_ma = max_ma;
4249 			out_mv = (port->operating_snk_mw * 1000) / op_ma;
4250 			if ((port->operating_snk_mw * 1000) % op_ma)
4251 				++out_mv;
4252 			out_mv += RDO_PROG_VOLT_MV_STEP -
4253 				  (out_mv % RDO_PROG_VOLT_MV_STEP);
4254 
4255 			if (out_mv > max_mv) {
4256 				tcpm_log(port, "Invalid PPS APDO selected!");
4257 				return -EINVAL;
4258 			}
4259 		}
4260 	}
4261 
4262 	tcpm_log(port, "cc=%d cc1=%d cc2=%d vbus=%d vconn=%s polarity=%d",
4263 		 port->cc_req, port->cc1, port->cc2, port->vbus_source,
4264 		 port->vconn_role == TYPEC_SOURCE ? "source" : "sink",
4265 		 port->polarity);
4266 
4267 	*rdo = RDO_PROG(src_pdo_index + 1, out_mv, op_ma, flags);
4268 
4269 	tcpm_log(port, "Requesting APDO %d: %u mV, %u mA",
4270 		 src_pdo_index, out_mv, op_ma);
4271 
4272 	port->pps_data.req_op_curr = op_ma;
4273 	port->pps_data.req_out_volt = out_mv;
4274 
4275 	return 0;
4276 }
4277 
tcpm_pd_send_pps_request(struct tcpm_port * port)4278 static int tcpm_pd_send_pps_request(struct tcpm_port *port)
4279 {
4280 	struct pd_message msg;
4281 	int ret;
4282 	u32 rdo;
4283 
4284 	ret = tcpm_pd_build_pps_request(port, &rdo);
4285 	if (ret < 0)
4286 		return ret;
4287 
4288 	/* Relax the threshold as voltage will be adjusted right after Accept Message. */
4289 	tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, 0);
4290 
4291 	memset(&msg, 0, sizeof(msg));
4292 	msg.header = PD_HEADER_LE(PD_DATA_REQUEST,
4293 				  port->pwr_role,
4294 				  port->data_role,
4295 				  port->negotiated_rev,
4296 				  port->message_id, 1);
4297 	msg.payload[0] = cpu_to_le32(rdo);
4298 
4299 	return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
4300 }
4301 
tcpm_set_vbus(struct tcpm_port * port,bool enable)4302 static int tcpm_set_vbus(struct tcpm_port *port, bool enable)
4303 {
4304 	int ret;
4305 
4306 	if (enable && port->vbus_charge)
4307 		return -EINVAL;
4308 
4309 	tcpm_log(port, "vbus:=%d charge=%d", enable, port->vbus_charge);
4310 
4311 	ret = port->tcpc->set_vbus(port->tcpc, enable, port->vbus_charge);
4312 	if (ret < 0)
4313 		return ret;
4314 
4315 	port->vbus_source = enable;
4316 	return 0;
4317 }
4318 
tcpm_set_charge(struct tcpm_port * port,bool charge)4319 static int tcpm_set_charge(struct tcpm_port *port, bool charge)
4320 {
4321 	int ret;
4322 
4323 	if (charge && port->vbus_source)
4324 		return -EINVAL;
4325 
4326 	if (charge != port->vbus_charge) {
4327 		tcpm_log(port, "vbus=%d charge:=%d", port->vbus_source, charge);
4328 		ret = port->tcpc->set_vbus(port->tcpc, port->vbus_source,
4329 					   charge);
4330 		if (ret < 0)
4331 			return ret;
4332 	}
4333 	port->vbus_charge = charge;
4334 	power_supply_changed(port->psy);
4335 	return 0;
4336 }
4337 
tcpm_start_toggling(struct tcpm_port * port,enum typec_cc_status cc)4338 static bool tcpm_start_toggling(struct tcpm_port *port, enum typec_cc_status cc)
4339 {
4340 	int ret;
4341 
4342 	if (!port->tcpc->start_toggling)
4343 		return false;
4344 
4345 	tcpm_log_force(port, "Start toggling");
4346 	ret = port->tcpc->start_toggling(port->tcpc, port->port_type, cc);
4347 	return ret == 0;
4348 }
4349 
tcpm_init_vbus(struct tcpm_port * port)4350 static int tcpm_init_vbus(struct tcpm_port *port)
4351 {
4352 	int ret;
4353 
4354 	ret = port->tcpc->set_vbus(port->tcpc, false, false);
4355 	port->vbus_source = false;
4356 	port->vbus_charge = false;
4357 	return ret;
4358 }
4359 
tcpm_init_vconn(struct tcpm_port * port)4360 static int tcpm_init_vconn(struct tcpm_port *port)
4361 {
4362 	int ret;
4363 
4364 	ret = port->tcpc->set_vconn(port->tcpc, false);
4365 	port->vconn_role = TYPEC_SINK;
4366 	return ret;
4367 }
4368 
tcpm_typec_connect(struct tcpm_port * port)4369 static void tcpm_typec_connect(struct tcpm_port *port)
4370 {
4371 	struct typec_partner *partner;
4372 
4373 	if (!port->connected) {
4374 		port->connected = true;
4375 		/* Make sure we don't report stale identity information */
4376 		memset(&port->partner_ident, 0, sizeof(port->partner_ident));
4377 		port->partner_desc.usb_pd = port->pd_capable;
4378 		if (tcpm_port_is_debug(port))
4379 			port->partner_desc.accessory = TYPEC_ACCESSORY_DEBUG;
4380 		else if (tcpm_port_is_audio(port))
4381 			port->partner_desc.accessory = TYPEC_ACCESSORY_AUDIO;
4382 		else
4383 			port->partner_desc.accessory = TYPEC_ACCESSORY_NONE;
4384 		partner = typec_register_partner(port->typec_port, &port->partner_desc);
4385 		if (IS_ERR(partner)) {
4386 			dev_err(port->dev, "Failed to register partner (%ld)\n", PTR_ERR(partner));
4387 			return;
4388 		}
4389 
4390 		port->partner = partner;
4391 		typec_partner_set_usb_power_delivery(port->partner, port->partner_pd);
4392 	}
4393 }
4394 
tcpm_src_attach(struct tcpm_port * port)4395 static int tcpm_src_attach(struct tcpm_port *port)
4396 {
4397 	enum typec_cc_polarity polarity =
4398 				port->cc2 == TYPEC_CC_RD ? TYPEC_POLARITY_CC2
4399 							 : TYPEC_POLARITY_CC1;
4400 	int ret;
4401 
4402 	if (port->attached)
4403 		return 0;
4404 
4405 	ret = tcpm_set_polarity(port, polarity);
4406 	if (ret < 0)
4407 		return ret;
4408 
4409 	tcpm_enable_auto_vbus_discharge(port, true);
4410 
4411 	/*
4412 	 * USB Type-C specification, version 1.2,
4413 	 * chapter 4.5.2.2.8.1 (Attached.SRC Requirements)
4414 	 * Enable VCONN only if the non-RD port is set to RA.
4415 	 */
4416 	if ((polarity == TYPEC_POLARITY_CC1 && port->cc2 == TYPEC_CC_RA) ||
4417 	    (polarity == TYPEC_POLARITY_CC2 && port->cc1 == TYPEC_CC_RA)) {
4418 		ret = tcpm_set_vconn(port, true);
4419 		if (ret < 0)
4420 			return ret;
4421 	}
4422 
4423 	ret = tcpm_set_vbus(port, true);
4424 	if (ret < 0)
4425 		goto out_disable_vconn;
4426 
4427 	ret = tcpm_set_roles(port, true, TYPEC_STATE_USB, TYPEC_SOURCE,
4428 			     tcpm_data_role_for_source(port));
4429 	if (ret < 0)
4430 		goto out_disable_vbus;
4431 
4432 	if (port->pd_supported) {
4433 		ret = port->tcpc->set_pd_rx(port->tcpc, true);
4434 		if (ret < 0)
4435 			goto out_disable_mux;
4436 	}
4437 
4438 	port->pd_capable = false;
4439 
4440 	port->partner = NULL;
4441 
4442 	port->attached = true;
4443 	port->send_discover = true;
4444 	port->send_discover_prime = false;
4445 
4446 	return 0;
4447 
4448 out_disable_mux:
4449 	tcpm_mux_set(port, TYPEC_STATE_SAFE, USB_ROLE_NONE,
4450 		     TYPEC_ORIENTATION_NONE);
4451 out_disable_vbus:
4452 	tcpm_set_vbus(port, false);
4453 out_disable_vconn:
4454 	tcpm_set_vconn(port, false);
4455 
4456 	return ret;
4457 }
4458 
tcpm_typec_disconnect(struct tcpm_port * port)4459 static void tcpm_typec_disconnect(struct tcpm_port *port)
4460 {
4461 	/*
4462 	 * Unregister plug/cable outside of port->connected because cable can
4463 	 * be discovered before SRC_READY/SNK_READY states where port->connected
4464 	 * is set.
4465 	 */
4466 	typec_unregister_plug(port->plug_prime);
4467 	typec_unregister_cable(port->cable);
4468 	port->plug_prime = NULL;
4469 	port->cable = NULL;
4470 	if (port->connected) {
4471 		if (port->partner) {
4472 			typec_partner_set_usb_power_delivery(port->partner, NULL);
4473 			typec_unregister_partner(port->partner);
4474 			port->partner = NULL;
4475 		}
4476 		port->connected = false;
4477 	}
4478 }
4479 
tcpm_unregister_altmodes(struct tcpm_port * port)4480 static void tcpm_unregister_altmodes(struct tcpm_port *port)
4481 {
4482 	struct pd_mode_data *modep = &port->mode_data;
4483 	struct pd_mode_data *modep_prime = &port->mode_data_prime;
4484 	int i;
4485 
4486 	for (i = 0; i < modep->altmodes; i++) {
4487 		typec_unregister_altmode(port->partner_altmode[i]);
4488 		port->partner_altmode[i] = NULL;
4489 	}
4490 	for (i = 0; i < modep_prime->altmodes; i++) {
4491 		typec_unregister_altmode(port->plug_prime_altmode[i]);
4492 		port->plug_prime_altmode[i] = NULL;
4493 	}
4494 
4495 	memset(modep, 0, sizeof(*modep));
4496 	memset(modep_prime, 0, sizeof(*modep_prime));
4497 }
4498 
tcpm_set_partner_usb_comm_capable(struct tcpm_port * port,bool capable)4499 static void tcpm_set_partner_usb_comm_capable(struct tcpm_port *port, bool capable)
4500 {
4501 	tcpm_log(port, "Setting usb_comm capable %s", str_true_false(capable));
4502 
4503 	if (port->tcpc->set_partner_usb_comm_capable)
4504 		port->tcpc->set_partner_usb_comm_capable(port->tcpc, capable);
4505 }
4506 
tcpm_reset_port(struct tcpm_port * port)4507 static void tcpm_reset_port(struct tcpm_port *port)
4508 {
4509 	tcpm_enable_auto_vbus_discharge(port, false);
4510 	port->in_ams = false;
4511 	port->ams = NONE_AMS;
4512 	port->vdm_sm_running = false;
4513 	tcpm_unregister_altmodes(port);
4514 	tcpm_typec_disconnect(port);
4515 	port->attached = false;
4516 	port->pd_capable = false;
4517 	port->pps_data.supported = false;
4518 	tcpm_set_partner_usb_comm_capable(port, false);
4519 
4520 	/*
4521 	 * First Rx ID should be 0; set this to a sentinel of -1 so that
4522 	 * we can check tcpm_pd_rx_handler() if we had seen it before.
4523 	 */
4524 	port->rx_msgid = -1;
4525 	port->rx_msgid_prime = -1;
4526 
4527 	port->tcpc->set_pd_rx(port->tcpc, false);
4528 	tcpm_init_vbus(port);	/* also disables charging */
4529 	tcpm_init_vconn(port);
4530 	tcpm_set_current_limit(port, 0, 0);
4531 	tcpm_set_polarity(port, TYPEC_POLARITY_CC1);
4532 	tcpm_mux_set(port, TYPEC_STATE_SAFE, USB_ROLE_NONE,
4533 		     TYPEC_ORIENTATION_NONE);
4534 	tcpm_set_attached_state(port, false);
4535 	port->try_src_count = 0;
4536 	port->try_snk_count = 0;
4537 	port->usb_type = POWER_SUPPLY_USB_TYPE_C;
4538 	power_supply_changed(port->psy);
4539 	port->nr_sink_caps = 0;
4540 	port->sink_cap_done = false;
4541 	if (port->tcpc->enable_frs)
4542 		port->tcpc->enable_frs(port->tcpc, false);
4543 
4544 	usb_power_delivery_unregister_capabilities(port->partner_sink_caps);
4545 	port->partner_sink_caps = NULL;
4546 	usb_power_delivery_unregister_capabilities(port->partner_source_caps);
4547 	port->partner_source_caps = NULL;
4548 	usb_power_delivery_unregister(port->partner_pd);
4549 	port->partner_pd = NULL;
4550 }
4551 
tcpm_detach(struct tcpm_port * port)4552 static void tcpm_detach(struct tcpm_port *port)
4553 {
4554 	if (tcpm_port_is_disconnected(port))
4555 		port->hard_reset_count = 0;
4556 
4557 	if (!port->attached)
4558 		return;
4559 
4560 	if (port->tcpc->set_bist_data) {
4561 		tcpm_log(port, "disable BIST MODE TESTDATA");
4562 		port->tcpc->set_bist_data(port->tcpc, false);
4563 	}
4564 
4565 	tcpm_reset_port(port);
4566 }
4567 
tcpm_src_detach(struct tcpm_port * port)4568 static void tcpm_src_detach(struct tcpm_port *port)
4569 {
4570 	tcpm_detach(port);
4571 }
4572 
tcpm_snk_attach(struct tcpm_port * port)4573 static int tcpm_snk_attach(struct tcpm_port *port)
4574 {
4575 	int ret;
4576 
4577 	if (port->attached)
4578 		return 0;
4579 
4580 	ret = tcpm_set_polarity(port, port->cc2 != TYPEC_CC_OPEN ?
4581 				TYPEC_POLARITY_CC2 : TYPEC_POLARITY_CC1);
4582 	if (ret < 0)
4583 		return ret;
4584 
4585 	tcpm_enable_auto_vbus_discharge(port, true);
4586 
4587 	ret = tcpm_set_roles(port, true, TYPEC_STATE_USB,
4588 			     TYPEC_SINK, tcpm_data_role_for_sink(port));
4589 	if (ret < 0)
4590 		return ret;
4591 
4592 	port->pd_capable = false;
4593 
4594 	port->partner = NULL;
4595 
4596 	port->attached = true;
4597 	port->send_discover = true;
4598 	port->send_discover_prime = false;
4599 
4600 	return 0;
4601 }
4602 
tcpm_snk_detach(struct tcpm_port * port)4603 static void tcpm_snk_detach(struct tcpm_port *port)
4604 {
4605 	tcpm_detach(port);
4606 }
4607 
tcpm_acc_attach(struct tcpm_port * port)4608 static int tcpm_acc_attach(struct tcpm_port *port)
4609 {
4610 	int ret;
4611 	enum typec_role role;
4612 	enum typec_data_role data;
4613 	int state = TYPEC_STATE_USB;
4614 
4615 	if (port->attached)
4616 		return 0;
4617 
4618 	role = tcpm_port_is_sink(port) ? TYPEC_SINK : TYPEC_SOURCE;
4619 	data = tcpm_port_is_sink(port) ? tcpm_data_role_for_sink(port)
4620 				       : tcpm_data_role_for_source(port);
4621 
4622 	if (tcpm_port_is_audio(port))
4623 		state = TYPEC_MODE_AUDIO;
4624 
4625 	if (tcpm_port_is_debug(port))
4626 		state = TYPEC_MODE_DEBUG;
4627 
4628 	ret = tcpm_set_roles(port, true, state, role, data);
4629 	if (ret < 0)
4630 		return ret;
4631 
4632 	port->partner = NULL;
4633 
4634 	tcpm_typec_connect(port);
4635 
4636 	port->attached = true;
4637 
4638 	return 0;
4639 }
4640 
tcpm_acc_detach(struct tcpm_port * port)4641 static void tcpm_acc_detach(struct tcpm_port *port)
4642 {
4643 	tcpm_detach(port);
4644 }
4645 
hard_reset_state(struct tcpm_port * port)4646 static inline enum tcpm_state hard_reset_state(struct tcpm_port *port)
4647 {
4648 	if (port->hard_reset_count < PD_N_HARD_RESET_COUNT)
4649 		return HARD_RESET_SEND;
4650 	if (port->pd_capable)
4651 		return ERROR_RECOVERY;
4652 	if (port->pwr_role == TYPEC_SOURCE)
4653 		return SRC_UNATTACHED;
4654 	if (port->state == SNK_WAIT_CAPABILITIES ||
4655 	    port->state == SNK_WAIT_CAPABILITIES_TIMEOUT)
4656 		return SNK_READY;
4657 	return SNK_UNATTACHED;
4658 }
4659 
unattached_state(struct tcpm_port * port)4660 static inline enum tcpm_state unattached_state(struct tcpm_port *port)
4661 {
4662 	if (port->port_type == TYPEC_PORT_DRP) {
4663 		if (port->pwr_role == TYPEC_SOURCE)
4664 			return SRC_UNATTACHED;
4665 		else
4666 			return SNK_UNATTACHED;
4667 	} else if (port->port_type == TYPEC_PORT_SRC) {
4668 		return SRC_UNATTACHED;
4669 	}
4670 
4671 	return SNK_UNATTACHED;
4672 }
4673 
tcpm_swap_complete(struct tcpm_port * port,int result)4674 static void tcpm_swap_complete(struct tcpm_port *port, int result)
4675 {
4676 	if (port->swap_pending) {
4677 		port->swap_status = result;
4678 		port->swap_pending = false;
4679 		port->non_pd_role_swap = false;
4680 		complete(&port->swap_complete);
4681 	}
4682 }
4683 
tcpm_get_pwr_opmode(enum typec_cc_status cc)4684 static enum typec_pwr_opmode tcpm_get_pwr_opmode(enum typec_cc_status cc)
4685 {
4686 	switch (cc) {
4687 	case TYPEC_CC_RP_1_5:
4688 		return TYPEC_PWR_MODE_1_5A;
4689 	case TYPEC_CC_RP_3_0:
4690 		return TYPEC_PWR_MODE_3_0A;
4691 	case TYPEC_CC_RP_DEF:
4692 	default:
4693 		return TYPEC_PWR_MODE_USB;
4694 	}
4695 }
4696 
tcpm_pwr_opmode_to_rp(enum typec_pwr_opmode opmode)4697 static enum typec_cc_status tcpm_pwr_opmode_to_rp(enum typec_pwr_opmode opmode)
4698 {
4699 	switch (opmode) {
4700 	case TYPEC_PWR_MODE_USB:
4701 		return TYPEC_CC_RP_DEF;
4702 	case TYPEC_PWR_MODE_1_5A:
4703 		return TYPEC_CC_RP_1_5;
4704 	case TYPEC_PWR_MODE_3_0A:
4705 	case TYPEC_PWR_MODE_PD:
4706 	default:
4707 		return TYPEC_CC_RP_3_0;
4708 	}
4709 }
4710 
tcpm_set_initial_svdm_version(struct tcpm_port * port)4711 static void tcpm_set_initial_svdm_version(struct tcpm_port *port)
4712 {
4713 	if (!port->partner)
4714 		return;
4715 
4716 	switch (port->negotiated_rev) {
4717 	case PD_REV30:
4718 		break;
4719 	/*
4720 	 * 6.4.4.2.3 Structured VDM Version
4721 	 * 2.0 states "At this time, there is only one version (1.0) defined.
4722 	 * This field Shall be set to zero to indicate Version 1.0."
4723 	 * 3.0 states "This field Shall be set to 01b to indicate Version 2.0."
4724 	 * To ensure that we follow the Power Delivery revision we are currently
4725 	 * operating on, downgrade the SVDM version to the highest one supported
4726 	 * by the Power Delivery revision.
4727 	 */
4728 	case PD_REV20:
4729 		typec_partner_set_svdm_version(port->partner, SVDM_VER_1_0);
4730 		break;
4731 	default:
4732 		typec_partner_set_svdm_version(port->partner, SVDM_VER_1_0);
4733 		break;
4734 	}
4735 }
4736 
run_state_machine(struct tcpm_port * port)4737 static void run_state_machine(struct tcpm_port *port)
4738 {
4739 	int ret;
4740 	enum typec_pwr_opmode opmode;
4741 	unsigned int msecs;
4742 	enum tcpm_state upcoming_state;
4743 
4744 	if (port->tcpc->check_contaminant && port->state != CHECK_CONTAMINANT)
4745 		port->potential_contaminant = ((port->enter_state == SRC_ATTACH_WAIT &&
4746 						port->state == SRC_UNATTACHED) ||
4747 					       (port->enter_state == SNK_ATTACH_WAIT &&
4748 						port->state == SNK_UNATTACHED) ||
4749 					       (port->enter_state == SNK_DEBOUNCED &&
4750 						port->state == SNK_UNATTACHED));
4751 
4752 	port->enter_state = port->state;
4753 	switch (port->state) {
4754 	case TOGGLING:
4755 		break;
4756 	case CHECK_CONTAMINANT:
4757 		port->tcpc->check_contaminant(port->tcpc);
4758 		break;
4759 	/* SRC states */
4760 	case SRC_UNATTACHED:
4761 		if (!port->non_pd_role_swap)
4762 			tcpm_swap_complete(port, -ENOTCONN);
4763 		tcpm_src_detach(port);
4764 		if (port->potential_contaminant) {
4765 			tcpm_set_state(port, CHECK_CONTAMINANT, 0);
4766 			break;
4767 		}
4768 		if (tcpm_start_toggling(port, tcpm_rp_cc(port))) {
4769 			tcpm_set_state(port, TOGGLING, 0);
4770 			break;
4771 		}
4772 		tcpm_set_cc(port, tcpm_rp_cc(port));
4773 		if (port->port_type == TYPEC_PORT_DRP)
4774 			tcpm_set_state(port, SNK_UNATTACHED, PD_T_DRP_SNK);
4775 		break;
4776 	case SRC_ATTACH_WAIT:
4777 		if (tcpm_port_is_debug(port))
4778 			tcpm_set_state(port, DEBUG_ACC_ATTACHED,
4779 				       port->timings.cc_debounce_time);
4780 		else if (tcpm_port_is_audio(port))
4781 			tcpm_set_state(port, AUDIO_ACC_ATTACHED,
4782 				       port->timings.cc_debounce_time);
4783 		else if (tcpm_port_is_source(port) && port->vbus_vsafe0v)
4784 			tcpm_set_state(port,
4785 				       tcpm_try_snk(port) ? SNK_TRY
4786 							  : SRC_ATTACHED,
4787 				       port->timings.cc_debounce_time);
4788 		break;
4789 
4790 	case SNK_TRY:
4791 		port->try_snk_count++;
4792 		/*
4793 		 * Requirements:
4794 		 * - Do not drive vconn or vbus
4795 		 * - Terminate CC pins (both) to Rd
4796 		 * Action:
4797 		 * - Wait for tDRPTry (PD_T_DRP_TRY).
4798 		 *   Until then, ignore any state changes.
4799 		 */
4800 		tcpm_set_cc(port, TYPEC_CC_RD);
4801 		tcpm_set_state(port, SNK_TRY_WAIT, PD_T_DRP_TRY);
4802 		break;
4803 	case SNK_TRY_WAIT:
4804 		if (tcpm_port_is_sink(port)) {
4805 			tcpm_set_state(port, SNK_TRY_WAIT_DEBOUNCE, 0);
4806 		} else {
4807 			tcpm_set_state(port, SRC_TRYWAIT, 0);
4808 			port->max_wait = 0;
4809 		}
4810 		break;
4811 	case SNK_TRY_WAIT_DEBOUNCE:
4812 		tcpm_set_state(port, SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS,
4813 			       PD_T_TRY_CC_DEBOUNCE);
4814 		break;
4815 	case SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS:
4816 		if (port->vbus_present && tcpm_port_is_sink(port))
4817 			tcpm_set_state(port, SNK_ATTACHED, 0);
4818 		else
4819 			port->max_wait = 0;
4820 		break;
4821 	case SRC_TRYWAIT:
4822 		tcpm_set_cc(port, tcpm_rp_cc(port));
4823 		if (port->max_wait == 0) {
4824 			port->max_wait = jiffies +
4825 					 msecs_to_jiffies(PD_T_DRP_TRY);
4826 			tcpm_set_state(port, SRC_TRYWAIT_UNATTACHED,
4827 				       PD_T_DRP_TRY);
4828 		} else {
4829 			if (time_is_after_jiffies(port->max_wait))
4830 				tcpm_set_state(port, SRC_TRYWAIT_UNATTACHED,
4831 					       jiffies_to_msecs(port->max_wait -
4832 								jiffies));
4833 			else
4834 				tcpm_set_state(port, SNK_UNATTACHED, 0);
4835 		}
4836 		break;
4837 	case SRC_TRYWAIT_DEBOUNCE:
4838 		tcpm_set_state(port, SRC_ATTACHED, port->timings.cc_debounce_time);
4839 		break;
4840 	case SRC_TRYWAIT_UNATTACHED:
4841 		tcpm_set_state(port, SNK_UNATTACHED, 0);
4842 		break;
4843 
4844 	case SRC_ATTACHED:
4845 		ret = tcpm_src_attach(port);
4846 		tcpm_set_state(port, SRC_UNATTACHED,
4847 			       ret < 0 ? 0 : PD_T_PS_SOURCE_ON);
4848 		break;
4849 	case SRC_STARTUP:
4850 		opmode =  tcpm_get_pwr_opmode(tcpm_rp_cc(port));
4851 		typec_set_pwr_opmode(port->typec_port, opmode);
4852 		port->pwr_opmode = TYPEC_PWR_MODE_USB;
4853 		port->caps_count = 0;
4854 		port->negotiated_rev = PD_MAX_REV;
4855 		port->negotiated_rev_prime = PD_MAX_REV;
4856 		port->message_id = 0;
4857 		port->message_id_prime = 0;
4858 		port->rx_msgid = -1;
4859 		port->rx_msgid_prime = -1;
4860 		port->explicit_contract = false;
4861 		/* SNK -> SRC POWER/FAST_ROLE_SWAP finished */
4862 		if (port->ams == POWER_ROLE_SWAP ||
4863 		    port->ams == FAST_ROLE_SWAP)
4864 			tcpm_ams_finish(port);
4865 		if (!port->pd_supported) {
4866 			tcpm_set_state(port, SRC_READY, 0);
4867 			break;
4868 		}
4869 		port->upcoming_state = SRC_SEND_CAPABILITIES;
4870 		tcpm_ams_start(port, POWER_NEGOTIATION);
4871 		break;
4872 	case SRC_SEND_CAPABILITIES:
4873 		port->caps_count++;
4874 		if (port->caps_count > PD_N_CAPS_COUNT) {
4875 			tcpm_set_state(port, SRC_READY, 0);
4876 			break;
4877 		}
4878 		ret = tcpm_pd_send_source_caps(port);
4879 		if (ret < 0) {
4880 			if (tcpm_can_communicate_sop_prime(port) &&
4881 			    IS_ERR_OR_NULL(port->cable))
4882 				tcpm_set_state(port, SRC_VDM_IDENTITY_REQUEST, 0);
4883 			else
4884 				tcpm_set_state(port, SRC_SEND_CAPABILITIES,
4885 					       PD_T_SEND_SOURCE_CAP);
4886 		} else {
4887 			/*
4888 			 * Per standard, we should clear the reset counter here.
4889 			 * However, that can result in state machine hang-ups.
4890 			 * Reset it only in READY state to improve stability.
4891 			 */
4892 			/* port->hard_reset_count = 0; */
4893 			port->caps_count = 0;
4894 			port->pd_capable = true;
4895 			tcpm_set_state_cond(port, SRC_SEND_CAPABILITIES_TIMEOUT,
4896 					    PD_T_SENDER_RESPONSE);
4897 		}
4898 		break;
4899 	case SRC_SEND_CAPABILITIES_TIMEOUT:
4900 		/*
4901 		 * Error recovery for a PD_DATA_SOURCE_CAP reply timeout.
4902 		 *
4903 		 * PD 2.0 sinks are supposed to accept src-capabilities with a
4904 		 * 3.0 header and simply ignore any src PDOs which the sink does
4905 		 * not understand such as PPS but some 2.0 sinks instead ignore
4906 		 * the entire PD_DATA_SOURCE_CAP message, causing contract
4907 		 * negotiation to fail.
4908 		 *
4909 		 * After PD_N_HARD_RESET_COUNT hard-reset attempts, we try
4910 		 * sending src-capabilities with a lower PD revision to
4911 		 * make these broken sinks work.
4912 		 */
4913 		if (port->hard_reset_count < PD_N_HARD_RESET_COUNT) {
4914 			tcpm_set_state(port, HARD_RESET_SEND, 0);
4915 		} else if (port->negotiated_rev > PD_REV20) {
4916 			port->negotiated_rev--;
4917 			port->hard_reset_count = 0;
4918 			tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
4919 		} else {
4920 			tcpm_set_state(port, hard_reset_state(port), 0);
4921 		}
4922 		break;
4923 	case SRC_NEGOTIATE_CAPABILITIES:
4924 		ret = tcpm_pd_check_request(port);
4925 		if (ret < 0) {
4926 			tcpm_pd_send_control(port, PD_CTRL_REJECT, TCPC_TX_SOP);
4927 			if (!port->explicit_contract) {
4928 				tcpm_set_state(port,
4929 					       SRC_WAIT_NEW_CAPABILITIES, 0);
4930 			} else {
4931 				tcpm_set_state(port, SRC_READY, 0);
4932 			}
4933 		} else {
4934 			tcpm_pd_send_control(port, PD_CTRL_ACCEPT, TCPC_TX_SOP);
4935 			tcpm_set_partner_usb_comm_capable(port,
4936 							  !!(port->sink_request & RDO_USB_COMM));
4937 			tcpm_set_state(port, SRC_TRANSITION_SUPPLY,
4938 				       PD_T_SRC_TRANSITION);
4939 		}
4940 		break;
4941 	case SRC_TRANSITION_SUPPLY:
4942 		/* XXX: regulator_set_voltage(vbus, ...) */
4943 		tcpm_pd_send_control(port, PD_CTRL_PS_RDY, TCPC_TX_SOP);
4944 		port->explicit_contract = true;
4945 		typec_set_pwr_opmode(port->typec_port, TYPEC_PWR_MODE_PD);
4946 		port->pwr_opmode = TYPEC_PWR_MODE_PD;
4947 		tcpm_set_state_cond(port, SRC_READY, 0);
4948 		break;
4949 	case SRC_READY:
4950 #if 1
4951 		port->hard_reset_count = 0;
4952 #endif
4953 		port->try_src_count = 0;
4954 
4955 		tcpm_swap_complete(port, 0);
4956 		tcpm_typec_connect(port);
4957 
4958 		if (port->ams != NONE_AMS)
4959 			tcpm_ams_finish(port);
4960 		if (port->next_ams != NONE_AMS) {
4961 			port->ams = port->next_ams;
4962 			port->next_ams = NONE_AMS;
4963 		}
4964 
4965 		/*
4966 		 * If previous AMS is interrupted, switch to the upcoming
4967 		 * state.
4968 		 */
4969 		if (port->upcoming_state != INVALID_STATE) {
4970 			upcoming_state = port->upcoming_state;
4971 			port->upcoming_state = INVALID_STATE;
4972 			tcpm_set_state(port, upcoming_state, 0);
4973 			break;
4974 		}
4975 
4976 		/*
4977 		 * 6.4.4.3.1 Discover Identity
4978 		 * "The Discover Identity Command Shall only be sent to SOP when there is an
4979 		 * Explicit Contract."
4980 		 *
4981 		 * Discover Identity on SOP' should be discovered prior to the
4982 		 * ready state, but if done after a Vconn Swap following Discover
4983 		 * Identity on SOP then the discovery process can be run here
4984 		 * as well.
4985 		 */
4986 		if (port->explicit_contract) {
4987 			if (port->send_discover_prime) {
4988 				port->tx_sop_type = TCPC_TX_SOP_PRIME;
4989 			} else {
4990 				port->tx_sop_type = TCPC_TX_SOP;
4991 				tcpm_set_initial_svdm_version(port);
4992 			}
4993 			mod_send_discover_delayed_work(port, 0);
4994 		} else {
4995 			port->send_discover = false;
4996 			port->send_discover_prime = false;
4997 		}
4998 
4999 		/*
5000 		 * 6.3.5
5001 		 * Sending ping messages is not necessary if
5002 		 * - the source operates at vSafe5V
5003 		 * or
5004 		 * - The system is not operating in PD mode
5005 		 * or
5006 		 * - Both partners are connected using a Type-C connector
5007 		 *
5008 		 * There is no actual need to send PD messages since the local
5009 		 * port type-c and the spec does not clearly say whether PD is
5010 		 * possible when type-c is connected to Type-A/B
5011 		 */
5012 		break;
5013 	case SRC_WAIT_NEW_CAPABILITIES:
5014 		/* Nothing to do... */
5015 		break;
5016 
5017 	/* SNK states */
5018 	case SNK_UNATTACHED:
5019 		if (!port->non_pd_role_swap)
5020 			tcpm_swap_complete(port, -ENOTCONN);
5021 		tcpm_pps_complete(port, -ENOTCONN);
5022 		tcpm_snk_detach(port);
5023 		if (port->potential_contaminant) {
5024 			tcpm_set_state(port, CHECK_CONTAMINANT, 0);
5025 			break;
5026 		}
5027 		if (tcpm_start_toggling(port, TYPEC_CC_RD)) {
5028 			tcpm_set_state(port, TOGGLING, 0);
5029 			break;
5030 		}
5031 		tcpm_set_cc(port, TYPEC_CC_RD);
5032 		if (port->port_type == TYPEC_PORT_DRP)
5033 			tcpm_set_state(port, SRC_UNATTACHED, PD_T_DRP_SRC);
5034 		break;
5035 	case SNK_ATTACH_WAIT:
5036 		if ((port->cc1 == TYPEC_CC_OPEN &&
5037 		     port->cc2 != TYPEC_CC_OPEN) ||
5038 		    (port->cc1 != TYPEC_CC_OPEN &&
5039 		     port->cc2 == TYPEC_CC_OPEN))
5040 			tcpm_set_state(port, SNK_DEBOUNCED,
5041 				       port->timings.cc_debounce_time);
5042 		else if (tcpm_port_is_disconnected(port))
5043 			tcpm_set_state(port, SNK_UNATTACHED,
5044 				       PD_T_PD_DEBOUNCE);
5045 		break;
5046 	case SNK_DEBOUNCED:
5047 		if (tcpm_port_is_disconnected(port))
5048 			tcpm_set_state(port, SNK_UNATTACHED,
5049 				       PD_T_PD_DEBOUNCE);
5050 		else if (port->vbus_present)
5051 			tcpm_set_state(port,
5052 				       tcpm_try_src(port) ? SRC_TRY
5053 							  : SNK_ATTACHED,
5054 				       0);
5055 		break;
5056 	case SRC_TRY:
5057 		port->try_src_count++;
5058 		tcpm_set_cc(port, tcpm_rp_cc(port));
5059 		port->max_wait = 0;
5060 		tcpm_set_state(port, SRC_TRY_WAIT, 0);
5061 		break;
5062 	case SRC_TRY_WAIT:
5063 		if (port->max_wait == 0) {
5064 			port->max_wait = jiffies +
5065 					 msecs_to_jiffies(PD_T_DRP_TRY);
5066 			msecs = PD_T_DRP_TRY;
5067 		} else {
5068 			if (time_is_after_jiffies(port->max_wait))
5069 				msecs = jiffies_to_msecs(port->max_wait -
5070 							 jiffies);
5071 			else
5072 				msecs = 0;
5073 		}
5074 		tcpm_set_state(port, SNK_TRYWAIT, msecs);
5075 		break;
5076 	case SRC_TRY_DEBOUNCE:
5077 		tcpm_set_state(port, SRC_ATTACHED, PD_T_PD_DEBOUNCE);
5078 		break;
5079 	case SNK_TRYWAIT:
5080 		tcpm_set_cc(port, TYPEC_CC_RD);
5081 		tcpm_set_state(port, SNK_TRYWAIT_VBUS, port->timings.cc_debounce_time);
5082 		break;
5083 	case SNK_TRYWAIT_VBUS:
5084 		/*
5085 		 * TCPM stays in this state indefinitely until VBUS
5086 		 * is detected as long as Rp is not detected for
5087 		 * more than a time period of tPDDebounce.
5088 		 */
5089 		if (port->vbus_present && tcpm_port_is_sink(port)) {
5090 			tcpm_set_state(port, SNK_ATTACHED, 0);
5091 			break;
5092 		}
5093 		if (!tcpm_port_is_sink(port))
5094 			tcpm_set_state(port, SNK_TRYWAIT_DEBOUNCE, 0);
5095 		break;
5096 	case SNK_TRYWAIT_DEBOUNCE:
5097 		tcpm_set_state(port, SNK_UNATTACHED, PD_T_PD_DEBOUNCE);
5098 		break;
5099 	case SNK_ATTACHED:
5100 		ret = tcpm_snk_attach(port);
5101 		if (ret < 0)
5102 			tcpm_set_state(port, SNK_UNATTACHED, 0);
5103 		else
5104 			/*
5105 			 * For Type C port controllers that use Battery Charging
5106 			 * Detection (based on BCv1.2 spec) to detect USB
5107 			 * charger type, add a delay of "snk_bc12_cmpletion_time"
5108 			 * before transitioning to SNK_STARTUP to allow BC1.2
5109 			 * detection to complete before PD is eventually enabled
5110 			 * in later states.
5111 			 */
5112 			tcpm_set_state(port, SNK_STARTUP,
5113 				       port->timings.snk_bc12_cmpletion_time);
5114 		break;
5115 	case SNK_STARTUP:
5116 		opmode =  tcpm_get_pwr_opmode(port->polarity ?
5117 					      port->cc2 : port->cc1);
5118 		typec_set_pwr_opmode(port->typec_port, opmode);
5119 		port->pwr_opmode = TYPEC_PWR_MODE_USB;
5120 		port->negotiated_rev = PD_MAX_REV;
5121 		port->negotiated_rev_prime = PD_MAX_REV;
5122 		port->message_id = 0;
5123 		port->message_id_prime = 0;
5124 		port->rx_msgid = -1;
5125 		port->rx_msgid_prime = -1;
5126 		port->explicit_contract = false;
5127 
5128 		if (port->ams == POWER_ROLE_SWAP ||
5129 		    port->ams == FAST_ROLE_SWAP)
5130 			/* SRC -> SNK POWER/FAST_ROLE_SWAP finished */
5131 			tcpm_ams_finish(port);
5132 
5133 		tcpm_set_state(port, SNK_DISCOVERY, 0);
5134 		break;
5135 	case SNK_DISCOVERY:
5136 		if (port->vbus_present) {
5137 			u32 current_lim = tcpm_get_current_limit(port);
5138 
5139 			if (port->slow_charger_loop && (current_lim > PD_P_SNK_STDBY_MW / 5))
5140 				current_lim = PD_P_SNK_STDBY_MW / 5;
5141 			tcpm_set_current_limit(port, current_lim, 5000);
5142 			/* Not sink vbus if operational current is 0mA */
5143 			tcpm_set_charge(port, !port->pd_supported ||
5144 					pdo_max_current(port->snk_pdo[0]));
5145 
5146 			if (!port->pd_supported)
5147 				tcpm_set_state(port, SNK_READY, 0);
5148 			else
5149 				tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
5150 			break;
5151 		}
5152 		/*
5153 		 * For DRP, timeouts differ. Also, handling is supposed to be
5154 		 * different and much more complex (dead battery detection;
5155 		 * see USB power delivery specification, section 8.3.3.6.1.5.1).
5156 		 */
5157 		tcpm_set_state(port, hard_reset_state(port),
5158 			       port->port_type == TYPEC_PORT_DRP ?
5159 					PD_T_DB_DETECT : PD_T_NO_RESPONSE);
5160 		break;
5161 	case SNK_DISCOVERY_DEBOUNCE:
5162 		tcpm_set_state(port, SNK_DISCOVERY_DEBOUNCE_DONE,
5163 			       port->timings.cc_debounce_time);
5164 		break;
5165 	case SNK_DISCOVERY_DEBOUNCE_DONE:
5166 		if (!tcpm_port_is_disconnected(port) &&
5167 		    tcpm_port_is_sink(port) &&
5168 		    ktime_after(port->delayed_runtime, ktime_get())) {
5169 			tcpm_set_state(port, SNK_DISCOVERY,
5170 				       ktime_to_ms(ktime_sub(port->delayed_runtime, ktime_get())));
5171 			break;
5172 		}
5173 		tcpm_set_state(port, unattached_state(port), 0);
5174 		break;
5175 	case SNK_WAIT_CAPABILITIES:
5176 		ret = port->tcpc->set_pd_rx(port->tcpc, true);
5177 		if (ret < 0) {
5178 			tcpm_set_state(port, SNK_READY, 0);
5179 			break;
5180 		}
5181 		/*
5182 		 * If VBUS has never been low, and we time out waiting
5183 		 * for source cap, try a soft reset first, in case we
5184 		 * were already in a stable contract before this boot.
5185 		 * Do this only once.
5186 		 */
5187 		if (port->vbus_never_low) {
5188 			port->vbus_never_low = false;
5189 			tcpm_set_state(port, SNK_SOFT_RESET,
5190 				       port->timings.sink_wait_cap_time);
5191 		} else {
5192 			if (!port->self_powered)
5193 				upcoming_state = SNK_WAIT_CAPABILITIES_TIMEOUT;
5194 			else
5195 				upcoming_state = hard_reset_state(port);
5196 			tcpm_set_state(port, upcoming_state, port->timings.sink_wait_cap_time);
5197 		}
5198 		break;
5199 	case SNK_WAIT_CAPABILITIES_TIMEOUT:
5200 		/*
5201 		 * There are some USB PD sources in the field, which do not
5202 		 * properly implement the specification and fail to start
5203 		 * sending Source Capability messages after a soft reset. The
5204 		 * specification suggests to do a hard reset when no Source
5205 		 * capability message is received within PD_T_SINK_WAIT_CAP,
5206 		 * but that might effectively kil the machine's power source.
5207 		 *
5208 		 * This slightly diverges from the specification and tries to
5209 		 * recover from this by explicitly asking for the capabilities
5210 		 * using the Get_Source_Cap control message before falling back
5211 		 * to a hard reset. The control message should also be supported
5212 		 * and handled by all USB PD source and dual role devices
5213 		 * according to the specification.
5214 		 */
5215 		if (tcpm_pd_send_control(port, PD_CTRL_GET_SOURCE_CAP, TCPC_TX_SOP))
5216 			tcpm_set_state_cond(port, hard_reset_state(port), 0);
5217 		else
5218 			tcpm_set_state(port, hard_reset_state(port),
5219 				       port->timings.sink_wait_cap_time);
5220 		break;
5221 	case SNK_NEGOTIATE_CAPABILITIES:
5222 		port->pd_capable = true;
5223 		tcpm_set_partner_usb_comm_capable(port,
5224 						  !!(port->source_caps[0] & PDO_FIXED_USB_COMM));
5225 		port->hard_reset_count = 0;
5226 		ret = tcpm_pd_send_request(port);
5227 		if (ret < 0) {
5228 			/* Restore back to the original state */
5229 			tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
5230 							       port->pps_data.active,
5231 							       port->supply_voltage);
5232 			/* Let the Source send capabilities again. */
5233 			tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
5234 		} else {
5235 			tcpm_set_state_cond(port, hard_reset_state(port),
5236 					    PD_T_SENDER_RESPONSE);
5237 		}
5238 		break;
5239 	case SNK_NEGOTIATE_PPS_CAPABILITIES:
5240 		ret = tcpm_pd_send_pps_request(port);
5241 		if (ret < 0) {
5242 			/* Restore back to the original state */
5243 			tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
5244 							       port->pps_data.active,
5245 							       port->supply_voltage);
5246 			port->pps_status = ret;
5247 			/*
5248 			 * If this was called due to updates to sink
5249 			 * capabilities, and pps is no longer valid, we should
5250 			 * safely fall back to a standard PDO.
5251 			 */
5252 			if (port->update_sink_caps)
5253 				tcpm_set_state(port, SNK_NEGOTIATE_CAPABILITIES, 0);
5254 			else
5255 				tcpm_set_state(port, SNK_READY, 0);
5256 		} else {
5257 			tcpm_set_state_cond(port, hard_reset_state(port),
5258 					    PD_T_SENDER_RESPONSE);
5259 		}
5260 		break;
5261 	case SNK_TRANSITION_SINK:
5262 		/* From the USB PD spec:
5263 		 * "The Sink Shall transition to Sink Standby before a positive or
5264 		 * negative voltage transition of VBUS. During Sink Standby
5265 		 * the Sink Shall reduce its power draw to pSnkStdby."
5266 		 *
5267 		 * This is not applicable to PPS though as the port can continue
5268 		 * to draw negotiated power without switching to standby.
5269 		 */
5270 		if (port->supply_voltage != port->req_supply_voltage && !port->pps_data.active &&
5271 		    port->current_limit * port->supply_voltage / 1000 > PD_P_SNK_STDBY_MW) {
5272 			u32 stdby_ma = PD_P_SNK_STDBY_MW * 1000 / port->supply_voltage;
5273 
5274 			tcpm_log(port, "Setting standby current %u mV @ %u mA",
5275 				 port->supply_voltage, stdby_ma);
5276 			tcpm_set_current_limit(port, stdby_ma, port->supply_voltage);
5277 		}
5278 		fallthrough;
5279 	case SNK_TRANSITION_SINK_VBUS:
5280 		tcpm_set_state(port, hard_reset_state(port),
5281 			       PD_T_PS_TRANSITION);
5282 		break;
5283 	case SNK_READY:
5284 		port->try_snk_count = 0;
5285 		port->update_sink_caps = false;
5286 		if (port->explicit_contract) {
5287 			typec_set_pwr_opmode(port->typec_port,
5288 					     TYPEC_PWR_MODE_PD);
5289 			port->pwr_opmode = TYPEC_PWR_MODE_PD;
5290 		}
5291 
5292 		if (!port->pd_capable && port->slow_charger_loop)
5293 			tcpm_set_current_limit(port, tcpm_get_current_limit(port), 5000);
5294 		tcpm_swap_complete(port, 0);
5295 		tcpm_typec_connect(port);
5296 		if (port->pd_capable && port->source_caps[0] & PDO_FIXED_DUAL_ROLE)
5297 			mod_enable_frs_delayed_work(port, 0);
5298 		tcpm_pps_complete(port, port->pps_status);
5299 
5300 		if (port->ams != NONE_AMS)
5301 			tcpm_ams_finish(port);
5302 		if (port->next_ams != NONE_AMS) {
5303 			port->ams = port->next_ams;
5304 			port->next_ams = NONE_AMS;
5305 		}
5306 
5307 		/*
5308 		 * If previous AMS is interrupted, switch to the upcoming
5309 		 * state.
5310 		 */
5311 		if (port->upcoming_state != INVALID_STATE) {
5312 			upcoming_state = port->upcoming_state;
5313 			port->upcoming_state = INVALID_STATE;
5314 			tcpm_set_state(port, upcoming_state, 0);
5315 			break;
5316 		}
5317 
5318 		/*
5319 		 * 6.4.4.3.1 Discover Identity
5320 		 * "The Discover Identity Command Shall only be sent to SOP when there is an
5321 		 * Explicit Contract."
5322 		 *
5323 		 * Discover Identity on SOP' should be discovered prior to the
5324 		 * ready state, but if done after a Vconn Swap following Discover
5325 		 * Identity on SOP then the discovery process can be run here
5326 		 * as well.
5327 		 */
5328 		if (port->explicit_contract) {
5329 			if (port->send_discover_prime) {
5330 				port->tx_sop_type = TCPC_TX_SOP_PRIME;
5331 			} else {
5332 				port->tx_sop_type = TCPC_TX_SOP;
5333 				tcpm_set_initial_svdm_version(port);
5334 			}
5335 			mod_send_discover_delayed_work(port, 0);
5336 		} else {
5337 			port->send_discover = false;
5338 			port->send_discover_prime = false;
5339 		}
5340 
5341 		power_supply_changed(port->psy);
5342 		break;
5343 
5344 	/* Accessory states */
5345 	case ACC_UNATTACHED:
5346 		tcpm_acc_detach(port);
5347 		tcpm_set_state(port, SRC_UNATTACHED, 0);
5348 		break;
5349 	case DEBUG_ACC_ATTACHED:
5350 	case AUDIO_ACC_ATTACHED:
5351 		ret = tcpm_acc_attach(port);
5352 		if (ret < 0)
5353 			tcpm_set_state(port, ACC_UNATTACHED, 0);
5354 		break;
5355 	case AUDIO_ACC_DEBOUNCE:
5356 		tcpm_set_state(port, ACC_UNATTACHED, port->timings.cc_debounce_time);
5357 		break;
5358 
5359 	/* Hard_Reset states */
5360 	case HARD_RESET_SEND:
5361 		if (port->ams != NONE_AMS)
5362 			tcpm_ams_finish(port);
5363 		if (!port->self_powered && port->port_type == TYPEC_PORT_SNK)
5364 			dev_err(port->dev, "Initiating hard-reset, which might result in machine power-loss.\n");
5365 		/*
5366 		 * State machine will be directed to HARD_RESET_START,
5367 		 * thus set upcoming_state to INVALID_STATE.
5368 		 */
5369 		port->upcoming_state = INVALID_STATE;
5370 		tcpm_ams_start(port, HARD_RESET);
5371 		break;
5372 	case HARD_RESET_START:
5373 		port->sink_cap_done = false;
5374 		if (port->tcpc->enable_frs)
5375 			port->tcpc->enable_frs(port->tcpc, false);
5376 		port->hard_reset_count++;
5377 		port->tcpc->set_pd_rx(port->tcpc, false);
5378 		tcpm_unregister_altmodes(port);
5379 		port->nr_sink_caps = 0;
5380 		port->send_discover = true;
5381 		port->send_discover_prime = false;
5382 		if (port->pwr_role == TYPEC_SOURCE)
5383 			tcpm_set_state(port, SRC_HARD_RESET_VBUS_OFF,
5384 				       PD_T_PS_HARD_RESET);
5385 		else
5386 			tcpm_set_state(port, SNK_HARD_RESET_SINK_OFF, 0);
5387 		break;
5388 	case SRC_HARD_RESET_VBUS_OFF:
5389 		/*
5390 		 * 7.1.5 Response to Hard Resets
5391 		 * Hard Reset Signaling indicates a communication failure has occurred and the
5392 		 * Source Shall stop driving VCONN, Shall remove Rp from the VCONN pin and Shall
5393 		 * drive VBUS to vSafe0V as shown in Figure 7-9.
5394 		 */
5395 		tcpm_set_vconn(port, false);
5396 		tcpm_set_vbus(port, false);
5397 		tcpm_set_roles(port, port->self_powered, TYPEC_STATE_USB, TYPEC_SOURCE,
5398 			       tcpm_data_role_for_source(port));
5399 		/*
5400 		 * If tcpc fails to notify vbus off, TCPM will wait for PD_T_SAFE_0V +
5401 		 * PD_T_SRC_RECOVER before turning vbus back on.
5402 		 * From Table 7-12 Sequence Description for a Source Initiated Hard Reset:
5403 		 * 4. Policy Engine waits tPSHardReset after sending Hard Reset Signaling and then
5404 		 * tells the Device Policy Manager to instruct the power supply to perform a
5405 		 * Hard Reset. The transition to vSafe0V Shall occur within tSafe0V (t2).
5406 		 * 5. After tSrcRecover the Source applies power to VBUS in an attempt to
5407 		 * re-establish communication with the Sink and resume USB Default Operation.
5408 		 * The transition to vSafe5V Shall occur within tSrcTurnOn(t4).
5409 		 */
5410 		tcpm_set_state(port, SRC_HARD_RESET_VBUS_ON, PD_T_SAFE_0V + PD_T_SRC_RECOVER);
5411 		break;
5412 	case SRC_HARD_RESET_VBUS_ON:
5413 		tcpm_set_vconn(port, true);
5414 		tcpm_set_vbus(port, true);
5415 		if (port->ams == HARD_RESET)
5416 			tcpm_ams_finish(port);
5417 		if (port->pd_supported)
5418 			port->tcpc->set_pd_rx(port->tcpc, true);
5419 		tcpm_set_attached_state(port, true);
5420 		tcpm_set_state(port, SRC_UNATTACHED, PD_T_PS_SOURCE_ON);
5421 		break;
5422 	case SNK_HARD_RESET_SINK_OFF:
5423 		/* Do not discharge/disconnect during hard reseet */
5424 		tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, 0);
5425 		memset(&port->pps_data, 0, sizeof(port->pps_data));
5426 		tcpm_set_vconn(port, false);
5427 		if (port->pd_capable)
5428 			tcpm_set_charge(port, false);
5429 		tcpm_set_roles(port, port->self_powered, TYPEC_STATE_USB, TYPEC_SINK,
5430 			       tcpm_data_role_for_sink(port));
5431 		/*
5432 		 * VBUS may or may not toggle, depending on the adapter.
5433 		 * If it doesn't toggle, transition to SNK_HARD_RESET_SINK_ON
5434 		 * directly after timeout.
5435 		 */
5436 		tcpm_set_state(port, SNK_HARD_RESET_SINK_ON, PD_T_SAFE_0V);
5437 		break;
5438 	case SNK_HARD_RESET_WAIT_VBUS:
5439 		if (port->ams == HARD_RESET)
5440 			tcpm_ams_finish(port);
5441 		/* Assume we're disconnected if VBUS doesn't come back. */
5442 		tcpm_set_state(port, SNK_UNATTACHED,
5443 			       PD_T_SRC_RECOVER_MAX + PD_T_SRC_TURN_ON);
5444 		break;
5445 	case SNK_HARD_RESET_SINK_ON:
5446 		/* Note: There is no guarantee that VBUS is on in this state */
5447 		/*
5448 		 * XXX:
5449 		 * The specification suggests that dual mode ports in sink
5450 		 * mode should transition to state PE_SRC_Transition_to_default.
5451 		 * See USB power delivery specification chapter 8.3.3.6.1.3.
5452 		 * This would mean to
5453 		 * - turn off VCONN, reset power supply
5454 		 * - request hardware reset
5455 		 * - turn on VCONN
5456 		 * - Transition to state PE_Src_Startup
5457 		 * SNK only ports shall transition to state Snk_Startup
5458 		 * (see chapter 8.3.3.3.8).
5459 		 * Similar, dual-mode ports in source mode should transition
5460 		 * to PE_SNK_Transition_to_default.
5461 		 */
5462 		if (port->pd_capable) {
5463 			tcpm_set_current_limit(port,
5464 					       tcpm_get_current_limit(port),
5465 					       5000);
5466 			/* Not sink vbus if operational current is 0mA */
5467 			tcpm_set_charge(port, !!pdo_max_current(port->snk_pdo[0]));
5468 		}
5469 		if (port->ams == HARD_RESET)
5470 			tcpm_ams_finish(port);
5471 		tcpm_set_attached_state(port, true);
5472 		tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, VSAFE5V);
5473 		tcpm_set_state(port, SNK_STARTUP, 0);
5474 		break;
5475 
5476 	/* Soft_Reset states */
5477 	case SOFT_RESET:
5478 		port->message_id = 0;
5479 		port->rx_msgid = -1;
5480 		/* remove existing capabilities */
5481 		usb_power_delivery_unregister_capabilities(port->partner_source_caps);
5482 		port->partner_source_caps = NULL;
5483 		tcpm_pd_send_control(port, PD_CTRL_ACCEPT, TCPC_TX_SOP);
5484 		tcpm_ams_finish(port);
5485 		if (port->pwr_role == TYPEC_SOURCE) {
5486 			port->upcoming_state = SRC_SEND_CAPABILITIES;
5487 			tcpm_ams_start(port, POWER_NEGOTIATION);
5488 		} else {
5489 			tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
5490 		}
5491 		break;
5492 	case SRC_SOFT_RESET_WAIT_SNK_TX:
5493 	case SNK_SOFT_RESET:
5494 		if (port->ams != NONE_AMS)
5495 			tcpm_ams_finish(port);
5496 		port->upcoming_state = SOFT_RESET_SEND;
5497 		tcpm_ams_start(port, SOFT_RESET_AMS);
5498 		break;
5499 	case SOFT_RESET_SEND:
5500 		/*
5501 		 * Power Delivery 3.0 Section 6.3.13
5502 		 *
5503 		 * A Soft_Reset Message Shall be targeted at a specific entity
5504 		 * depending on the type of SOP* packet used.
5505 		 */
5506 		if (port->tx_sop_type == TCPC_TX_SOP_PRIME) {
5507 			port->message_id_prime = 0;
5508 			port->rx_msgid_prime = -1;
5509 			tcpm_pd_send_control(port, PD_CTRL_SOFT_RESET, TCPC_TX_SOP_PRIME);
5510 			tcpm_set_state_cond(port, ready_state(port), PD_T_SENDER_RESPONSE);
5511 		} else {
5512 			port->message_id = 0;
5513 			port->rx_msgid = -1;
5514 			/* remove existing capabilities */
5515 			usb_power_delivery_unregister_capabilities(port->partner_source_caps);
5516 			port->partner_source_caps = NULL;
5517 			if (tcpm_pd_send_control(port, PD_CTRL_SOFT_RESET, TCPC_TX_SOP))
5518 				tcpm_set_state_cond(port, hard_reset_state(port), 0);
5519 			else
5520 				tcpm_set_state_cond(port, hard_reset_state(port),
5521 						    PD_T_SENDER_RESPONSE);
5522 		}
5523 		break;
5524 
5525 	/* DR_Swap states */
5526 	case DR_SWAP_SEND:
5527 		tcpm_pd_send_control(port, PD_CTRL_DR_SWAP, TCPC_TX_SOP);
5528 		if (port->data_role == TYPEC_DEVICE || port->negotiated_rev > PD_REV20) {
5529 			port->send_discover = true;
5530 			port->send_discover_prime = false;
5531 		}
5532 		tcpm_set_state_cond(port, DR_SWAP_SEND_TIMEOUT,
5533 				    PD_T_SENDER_RESPONSE);
5534 		break;
5535 	case DR_SWAP_ACCEPT:
5536 		tcpm_pd_send_control(port, PD_CTRL_ACCEPT, TCPC_TX_SOP);
5537 		if (port->data_role == TYPEC_DEVICE || port->negotiated_rev > PD_REV20) {
5538 			port->send_discover = true;
5539 			port->send_discover_prime = false;
5540 		}
5541 		tcpm_set_state_cond(port, DR_SWAP_CHANGE_DR, 0);
5542 		break;
5543 	case DR_SWAP_SEND_TIMEOUT:
5544 		tcpm_swap_complete(port, -ETIMEDOUT);
5545 		port->send_discover = false;
5546 		port->send_discover_prime = false;
5547 		tcpm_ams_finish(port);
5548 		tcpm_set_state(port, ready_state(port), 0);
5549 		break;
5550 	case DR_SWAP_CHANGE_DR:
5551 		tcpm_unregister_altmodes(port);
5552 		if (port->data_role == TYPEC_HOST)
5553 			tcpm_set_roles(port, true, TYPEC_STATE_USB, port->pwr_role,
5554 				       TYPEC_DEVICE);
5555 		else
5556 			tcpm_set_roles(port, true, TYPEC_STATE_USB, port->pwr_role,
5557 				       TYPEC_HOST);
5558 		tcpm_ams_finish(port);
5559 		tcpm_set_state(port, ready_state(port), 0);
5560 		break;
5561 
5562 	case FR_SWAP_SEND:
5563 		if (tcpm_pd_send_control(port, PD_CTRL_FR_SWAP, TCPC_TX_SOP)) {
5564 			tcpm_set_state(port, ERROR_RECOVERY, 0);
5565 			break;
5566 		}
5567 		tcpm_set_state_cond(port, FR_SWAP_SEND_TIMEOUT, PD_T_SENDER_RESPONSE);
5568 		break;
5569 	case FR_SWAP_SEND_TIMEOUT:
5570 		tcpm_set_state(port, ERROR_RECOVERY, 0);
5571 		break;
5572 	case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF:
5573 		tcpm_set_state(port, ERROR_RECOVERY, port->timings.ps_src_off_time);
5574 		break;
5575 	case FR_SWAP_SNK_SRC_NEW_SINK_READY:
5576 		if (port->vbus_source)
5577 			tcpm_set_state(port, FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED, 0);
5578 		else
5579 			tcpm_set_state(port, ERROR_RECOVERY, PD_T_RECEIVER_RESPONSE);
5580 		break;
5581 	case FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED:
5582 		tcpm_set_pwr_role(port, TYPEC_SOURCE);
5583 		if (tcpm_pd_send_control(port, PD_CTRL_PS_RDY, TCPC_TX_SOP)) {
5584 			tcpm_set_state(port, ERROR_RECOVERY, 0);
5585 			break;
5586 		}
5587 		tcpm_set_cc(port, tcpm_rp_cc(port));
5588 		tcpm_set_state(port, SRC_STARTUP, PD_T_SWAP_SRC_START);
5589 		break;
5590 
5591 	/* PR_Swap states */
5592 	case PR_SWAP_ACCEPT:
5593 		tcpm_pd_send_control(port, PD_CTRL_ACCEPT, TCPC_TX_SOP);
5594 		tcpm_set_state(port, PR_SWAP_START, 0);
5595 		break;
5596 	case PR_SWAP_SEND:
5597 		tcpm_pd_send_control(port, PD_CTRL_PR_SWAP, TCPC_TX_SOP);
5598 		tcpm_set_state_cond(port, PR_SWAP_SEND_TIMEOUT,
5599 				    PD_T_SENDER_RESPONSE);
5600 		break;
5601 	case PR_SWAP_SEND_TIMEOUT:
5602 		tcpm_swap_complete(port, -ETIMEDOUT);
5603 		tcpm_set_state(port, ready_state(port), 0);
5604 		break;
5605 	case PR_SWAP_START:
5606 		tcpm_apply_rc(port);
5607 		if (port->pwr_role == TYPEC_SOURCE)
5608 			tcpm_set_state(port, PR_SWAP_SRC_SNK_TRANSITION_OFF,
5609 				       PD_T_SRC_TRANSITION);
5610 		else
5611 			tcpm_set_state(port, PR_SWAP_SNK_SRC_SINK_OFF, 0);
5612 		break;
5613 	case PR_SWAP_SRC_SNK_TRANSITION_OFF:
5614 		/*
5615 		 * Prevent vbus discharge circuit from turning on during PR_SWAP
5616 		 * as this is not a disconnect.
5617 		 */
5618 		tcpm_set_vbus(port, false);
5619 		port->explicit_contract = false;
5620 		/* allow time for Vbus discharge, must be < tSrcSwapStdby */
5621 		tcpm_set_state(port, PR_SWAP_SRC_SNK_SOURCE_OFF,
5622 			       PD_T_SRCSWAPSTDBY);
5623 		break;
5624 	case PR_SWAP_SRC_SNK_SOURCE_OFF:
5625 		tcpm_set_cc(port, TYPEC_CC_RD);
5626 		/* allow CC debounce */
5627 		tcpm_set_state(port, PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED,
5628 			       port->timings.cc_debounce_time);
5629 		break;
5630 	case PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED:
5631 		/*
5632 		 * USB-PD standard, 6.2.1.4, Port Power Role:
5633 		 * "During the Power Role Swap Sequence, for the initial Source
5634 		 * Port, the Port Power Role field shall be set to Sink in the
5635 		 * PS_RDY Message indicating that the initial Source’s power
5636 		 * supply is turned off"
5637 		 */
5638 		tcpm_set_pwr_role(port, TYPEC_SINK);
5639 		if (tcpm_pd_send_control(port, PD_CTRL_PS_RDY, TCPC_TX_SOP)) {
5640 			tcpm_set_state(port, ERROR_RECOVERY, 0);
5641 			break;
5642 		}
5643 		tcpm_set_state(port, ERROR_RECOVERY, PD_T_PS_SOURCE_ON_PRS);
5644 		break;
5645 	case PR_SWAP_SRC_SNK_SINK_ON:
5646 		tcpm_enable_auto_vbus_discharge(port, true);
5647 		/* Set the vbus disconnect threshold for implicit contract */
5648 		tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, VSAFE5V);
5649 		tcpm_set_state(port, SNK_STARTUP, 0);
5650 		break;
5651 	case PR_SWAP_SNK_SRC_SINK_OFF:
5652 		/* will be source, remove existing capabilities */
5653 		usb_power_delivery_unregister_capabilities(port->partner_source_caps);
5654 		port->partner_source_caps = NULL;
5655 		/*
5656 		 * Prevent vbus discharge circuit from turning on during PR_SWAP
5657 		 * as this is not a disconnect.
5658 		 */
5659 		tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB,
5660 						       port->pps_data.active, 0);
5661 		tcpm_set_charge(port, false);
5662 		tcpm_set_state(port, ERROR_RECOVERY,
5663 			       port->timings.ps_src_off_time);
5664 		break;
5665 	case PR_SWAP_SNK_SRC_SOURCE_ON:
5666 		tcpm_enable_auto_vbus_discharge(port, true);
5667 		tcpm_set_cc(port, tcpm_rp_cc(port));
5668 		tcpm_set_vbus(port, true);
5669 		/*
5670 		 * allow time VBUS ramp-up, must be < tNewSrc
5671 		 * Also, this window overlaps with CC debounce as well.
5672 		 * So, Wait for the max of two which is PD_T_NEWSRC
5673 		 */
5674 		tcpm_set_state(port, PR_SWAP_SNK_SRC_SOURCE_ON_VBUS_RAMPED_UP,
5675 			       PD_T_NEWSRC);
5676 		break;
5677 	case PR_SWAP_SNK_SRC_SOURCE_ON_VBUS_RAMPED_UP:
5678 		/*
5679 		 * USB PD standard, 6.2.1.4:
5680 		 * "Subsequent Messages initiated by the Policy Engine,
5681 		 * such as the PS_RDY Message sent to indicate that Vbus
5682 		 * is ready, will have the Port Power Role field set to
5683 		 * Source."
5684 		 */
5685 		tcpm_set_pwr_role(port, TYPEC_SOURCE);
5686 		tcpm_pd_send_control(port, PD_CTRL_PS_RDY, TCPC_TX_SOP);
5687 		tcpm_set_state(port, SRC_STARTUP, PD_T_SWAP_SRC_START);
5688 		break;
5689 
5690 	case VCONN_SWAP_ACCEPT:
5691 		tcpm_pd_send_control(port, PD_CTRL_ACCEPT, TCPC_TX_SOP);
5692 		tcpm_ams_finish(port);
5693 		tcpm_set_state(port, VCONN_SWAP_START, 0);
5694 		break;
5695 	case VCONN_SWAP_SEND:
5696 		tcpm_pd_send_control(port, PD_CTRL_VCONN_SWAP, TCPC_TX_SOP);
5697 		tcpm_set_state(port, VCONN_SWAP_SEND_TIMEOUT,
5698 			       PD_T_SENDER_RESPONSE);
5699 		break;
5700 	case VCONN_SWAP_SEND_TIMEOUT:
5701 		tcpm_swap_complete(port, -ETIMEDOUT);
5702 		tcpm_set_state(port, ready_state(port), 0);
5703 		break;
5704 	case VCONN_SWAP_START:
5705 		if (port->vconn_role == TYPEC_SOURCE)
5706 			tcpm_set_state(port, VCONN_SWAP_WAIT_FOR_VCONN, 0);
5707 		else
5708 			tcpm_set_state(port, VCONN_SWAP_TURN_ON_VCONN, 0);
5709 		break;
5710 	case VCONN_SWAP_WAIT_FOR_VCONN:
5711 		tcpm_set_state(port, hard_reset_state(port),
5712 			       PD_T_VCONN_SOURCE_ON);
5713 		break;
5714 	case VCONN_SWAP_TURN_ON_VCONN:
5715 		ret = tcpm_set_vconn(port, true);
5716 		tcpm_pd_send_control(port, PD_CTRL_PS_RDY, TCPC_TX_SOP);
5717 		/*
5718 		 * USB PD 3.0 Section 6.4.4.3.1
5719 		 *
5720 		 * Note that a Cable Plug or VPD will not be ready for PD
5721 		 * Communication until tVCONNStable after VCONN has been applied
5722 		 */
5723 		if (!ret)
5724 			tcpm_set_state(port, VCONN_SWAP_SEND_SOFT_RESET,
5725 				       PD_T_VCONN_STABLE);
5726 		else
5727 			tcpm_set_state(port, ready_state(port), 0);
5728 		break;
5729 	case VCONN_SWAP_TURN_OFF_VCONN:
5730 		tcpm_set_vconn(port, false);
5731 		tcpm_set_state(port, ready_state(port), 0);
5732 		break;
5733 	case VCONN_SWAP_SEND_SOFT_RESET:
5734 		tcpm_swap_complete(port, port->swap_status);
5735 		if (tcpm_can_communicate_sop_prime(port)) {
5736 			port->tx_sop_type = TCPC_TX_SOP_PRIME;
5737 			port->upcoming_state = SOFT_RESET_SEND;
5738 			tcpm_ams_start(port, SOFT_RESET_AMS);
5739 		} else {
5740 			tcpm_set_state(port, ready_state(port), 0);
5741 		}
5742 		break;
5743 
5744 	case DR_SWAP_CANCEL:
5745 	case PR_SWAP_CANCEL:
5746 	case VCONN_SWAP_CANCEL:
5747 		tcpm_swap_complete(port, port->swap_status);
5748 		if (port->pwr_role == TYPEC_SOURCE)
5749 			tcpm_set_state(port, SRC_READY, 0);
5750 		else
5751 			tcpm_set_state(port, SNK_READY, 0);
5752 		break;
5753 	case FR_SWAP_CANCEL:
5754 		if (port->pwr_role == TYPEC_SOURCE)
5755 			tcpm_set_state(port, SRC_READY, 0);
5756 		else
5757 			tcpm_set_state(port, SNK_READY, 0);
5758 		break;
5759 
5760 	case BIST_RX:
5761 		switch (BDO_MODE_MASK(port->bist_request)) {
5762 		case BDO_MODE_CARRIER2:
5763 			tcpm_pd_transmit(port, TCPC_TX_BIST_MODE_2, NULL);
5764 			tcpm_set_state(port, unattached_state(port),
5765 				       PD_T_BIST_CONT_MODE);
5766 			break;
5767 		case BDO_MODE_TESTDATA:
5768 			if (port->tcpc->set_bist_data) {
5769 				tcpm_log(port, "Enable BIST MODE TESTDATA");
5770 				port->tcpc->set_bist_data(port->tcpc, true);
5771 			}
5772 			break;
5773 		default:
5774 			break;
5775 		}
5776 		break;
5777 	case GET_STATUS_SEND:
5778 		tcpm_pd_send_control(port, PD_CTRL_GET_STATUS, TCPC_TX_SOP);
5779 		tcpm_set_state(port, GET_STATUS_SEND_TIMEOUT,
5780 			       PD_T_SENDER_RESPONSE);
5781 		break;
5782 	case GET_STATUS_SEND_TIMEOUT:
5783 		tcpm_set_state(port, ready_state(port), 0);
5784 		break;
5785 	case GET_PPS_STATUS_SEND:
5786 		tcpm_pd_send_control(port, PD_CTRL_GET_PPS_STATUS, TCPC_TX_SOP);
5787 		tcpm_set_state(port, GET_PPS_STATUS_SEND_TIMEOUT,
5788 			       PD_T_SENDER_RESPONSE);
5789 		break;
5790 	case GET_PPS_STATUS_SEND_TIMEOUT:
5791 		tcpm_set_state(port, ready_state(port), 0);
5792 		break;
5793 	case GET_SINK_CAP:
5794 		tcpm_pd_send_control(port, PD_CTRL_GET_SINK_CAP, TCPC_TX_SOP);
5795 		tcpm_set_state(port, GET_SINK_CAP_TIMEOUT, PD_T_SENDER_RESPONSE);
5796 		break;
5797 	case GET_SINK_CAP_TIMEOUT:
5798 		port->sink_cap_done = true;
5799 		tcpm_set_state(port, ready_state(port), 0);
5800 		break;
5801 	case ERROR_RECOVERY:
5802 		tcpm_swap_complete(port, -EPROTO);
5803 		tcpm_pps_complete(port, -EPROTO);
5804 		tcpm_set_state(port, PORT_RESET, 0);
5805 		break;
5806 	case PORT_RESET:
5807 		tcpm_reset_port(port);
5808 		if (port->self_powered)
5809 			tcpm_set_cc(port, TYPEC_CC_OPEN);
5810 		else
5811 			tcpm_set_cc(port, tcpm_default_state(port) == SNK_UNATTACHED ?
5812 				    TYPEC_CC_RD : tcpm_rp_cc(port));
5813 		tcpm_set_state(port, PORT_RESET_WAIT_OFF,
5814 			       PD_T_ERROR_RECOVERY);
5815 		break;
5816 	case PORT_RESET_WAIT_OFF:
5817 		tcpm_set_state(port,
5818 			       tcpm_default_state(port),
5819 			       port->vbus_present ? port->timings.ps_src_off_time : 0);
5820 		break;
5821 
5822 	/* AMS intermediate state */
5823 	case AMS_START:
5824 		if (port->upcoming_state == INVALID_STATE) {
5825 			tcpm_set_state(port, port->pwr_role == TYPEC_SOURCE ?
5826 				       SRC_READY : SNK_READY, 0);
5827 			break;
5828 		}
5829 
5830 		upcoming_state = port->upcoming_state;
5831 		port->upcoming_state = INVALID_STATE;
5832 		tcpm_set_state(port, upcoming_state, 0);
5833 		break;
5834 
5835 	/* Chunk state */
5836 	case CHUNK_NOT_SUPP:
5837 		tcpm_pd_send_control(port, PD_CTRL_NOT_SUPP, TCPC_TX_SOP);
5838 		tcpm_set_state(port, port->pwr_role == TYPEC_SOURCE ? SRC_READY : SNK_READY, 0);
5839 		break;
5840 
5841 	/* Cable states */
5842 	case SRC_VDM_IDENTITY_REQUEST:
5843 		port->send_discover_prime = true;
5844 		port->tx_sop_type = TCPC_TX_SOP_PRIME;
5845 		mod_send_discover_delayed_work(port, 0);
5846 		port->upcoming_state = SRC_SEND_CAPABILITIES;
5847 		break;
5848 
5849 	default:
5850 		WARN(1, "Unexpected port state %d\n", port->state);
5851 		break;
5852 	}
5853 }
5854 
tcpm_state_machine_work(struct kthread_work * work)5855 static void tcpm_state_machine_work(struct kthread_work *work)
5856 {
5857 	struct tcpm_port *port = container_of(work, struct tcpm_port, state_machine);
5858 	enum tcpm_state prev_state;
5859 
5860 	mutex_lock(&port->lock);
5861 	port->state_machine_running = true;
5862 
5863 	if (port->queued_message && tcpm_send_queued_message(port))
5864 		goto done;
5865 
5866 	/* If we were queued due to a delayed state change, update it now */
5867 	if (port->delayed_state) {
5868 		tcpm_log(port, "state change %s -> %s [delayed %ld ms]",
5869 			 tcpm_states[port->state],
5870 			 tcpm_states[port->delayed_state], port->delay_ms);
5871 		port->prev_state = port->state;
5872 		port->state = port->delayed_state;
5873 		port->delayed_state = INVALID_STATE;
5874 	}
5875 
5876 	/*
5877 	 * Continue running as long as we have (non-delayed) state changes
5878 	 * to make.
5879 	 */
5880 	do {
5881 		prev_state = port->state;
5882 		run_state_machine(port);
5883 		if (port->queued_message)
5884 			tcpm_send_queued_message(port);
5885 	} while (port->state != prev_state && !port->delayed_state);
5886 
5887 done:
5888 	port->state_machine_running = false;
5889 	mutex_unlock(&port->lock);
5890 }
5891 
_tcpm_cc_change(struct tcpm_port * port,enum typec_cc_status cc1,enum typec_cc_status cc2)5892 static void _tcpm_cc_change(struct tcpm_port *port, enum typec_cc_status cc1,
5893 			    enum typec_cc_status cc2)
5894 {
5895 	enum typec_cc_status old_cc1, old_cc2;
5896 	enum tcpm_state new_state;
5897 
5898 	old_cc1 = port->cc1;
5899 	old_cc2 = port->cc2;
5900 	port->cc1 = cc1;
5901 	port->cc2 = cc2;
5902 
5903 	tcpm_log_force(port,
5904 		       "CC1: %u -> %u, CC2: %u -> %u [state %s, polarity %d, %s]",
5905 		       old_cc1, cc1, old_cc2, cc2, tcpm_states[port->state],
5906 		       port->polarity,
5907 		       tcpm_port_is_disconnected(port) ? "disconnected"
5908 						       : "connected");
5909 
5910 	switch (port->state) {
5911 	case TOGGLING:
5912 		if (tcpm_port_is_debug(port) || tcpm_port_is_audio(port) ||
5913 		    tcpm_port_is_source(port))
5914 			tcpm_set_state(port, SRC_ATTACH_WAIT, 0);
5915 		else if (tcpm_port_is_sink(port))
5916 			tcpm_set_state(port, SNK_ATTACH_WAIT, 0);
5917 		break;
5918 	case CHECK_CONTAMINANT:
5919 		/* Wait for Toggling to be resumed */
5920 		break;
5921 	case SRC_UNATTACHED:
5922 	case ACC_UNATTACHED:
5923 		if (tcpm_port_is_debug(port) || tcpm_port_is_audio(port) ||
5924 		    tcpm_port_is_source(port))
5925 			tcpm_set_state(port, SRC_ATTACH_WAIT, 0);
5926 		break;
5927 	case SRC_ATTACH_WAIT:
5928 		if (tcpm_port_is_disconnected(port) ||
5929 		    tcpm_port_is_audio_detached(port))
5930 			tcpm_set_state(port, SRC_UNATTACHED, 0);
5931 		else if (cc1 != old_cc1 || cc2 != old_cc2)
5932 			tcpm_set_state(port, SRC_ATTACH_WAIT, 0);
5933 		break;
5934 	case SRC_ATTACHED:
5935 	case SRC_STARTUP:
5936 	case SRC_SEND_CAPABILITIES:
5937 	case SRC_READY:
5938 		if (tcpm_port_is_disconnected(port) ||
5939 		    !tcpm_port_is_source(port)) {
5940 			if (port->port_type == TYPEC_PORT_SRC)
5941 				tcpm_set_state(port, SRC_UNATTACHED, tcpm_wait_for_discharge(port));
5942 			else
5943 				tcpm_set_state(port, SNK_UNATTACHED, tcpm_wait_for_discharge(port));
5944 		}
5945 		break;
5946 	case SNK_UNATTACHED:
5947 		if (tcpm_port_is_sink(port))
5948 			tcpm_set_state(port, SNK_ATTACH_WAIT, 0);
5949 		break;
5950 	case SNK_ATTACH_WAIT:
5951 		if ((port->cc1 == TYPEC_CC_OPEN &&
5952 		     port->cc2 != TYPEC_CC_OPEN) ||
5953 		    (port->cc1 != TYPEC_CC_OPEN &&
5954 		     port->cc2 == TYPEC_CC_OPEN))
5955 			new_state = SNK_DEBOUNCED;
5956 		else if (tcpm_port_is_disconnected(port))
5957 			new_state = SNK_UNATTACHED;
5958 		else
5959 			break;
5960 		if (new_state != port->delayed_state)
5961 			tcpm_set_state(port, SNK_ATTACH_WAIT, 0);
5962 		break;
5963 	case SNK_DEBOUNCED:
5964 		if (tcpm_port_is_disconnected(port))
5965 			new_state = SNK_UNATTACHED;
5966 		else if (port->vbus_present)
5967 			new_state = tcpm_try_src(port) ? SRC_TRY : SNK_ATTACHED;
5968 		else
5969 			new_state = SNK_UNATTACHED;
5970 		if (new_state != port->delayed_state)
5971 			tcpm_set_state(port, SNK_DEBOUNCED, 0);
5972 		break;
5973 	case SNK_READY:
5974 		/*
5975 		 * EXIT condition is based primarily on vbus disconnect and CC is secondary.
5976 		 * "A port that has entered into USB PD communications with the Source and
5977 		 * has seen the CC voltage exceed vRd-USB may monitor the CC pin to detect
5978 		 * cable disconnect in addition to monitoring VBUS.
5979 		 *
5980 		 * A port that is monitoring the CC voltage for disconnect (but is not in
5981 		 * the process of a USB PD PR_Swap or USB PD FR_Swap) shall transition to
5982 		 * Unattached.SNK within tSinkDisconnect after the CC voltage remains below
5983 		 * vRd-USB for tPDDebounce."
5984 		 *
5985 		 * When set_auto_vbus_discharge_threshold is enabled, CC pins go
5986 		 * away before vbus decays to disconnect threshold. Allow
5987 		 * disconnect to be driven by vbus disconnect when auto vbus
5988 		 * discharge is enabled.
5989 		 */
5990 		if (!port->auto_vbus_discharge_enabled && tcpm_port_is_disconnected(port))
5991 			tcpm_set_state(port, unattached_state(port), 0);
5992 		else if (!port->pd_capable &&
5993 			 (cc1 != old_cc1 || cc2 != old_cc2))
5994 			tcpm_set_current_limit(port,
5995 					       tcpm_get_current_limit(port),
5996 					       5000);
5997 		break;
5998 
5999 	case AUDIO_ACC_ATTACHED:
6000 		if (cc1 == TYPEC_CC_OPEN || cc2 == TYPEC_CC_OPEN)
6001 			tcpm_set_state(port, AUDIO_ACC_DEBOUNCE, 0);
6002 		break;
6003 	case AUDIO_ACC_DEBOUNCE:
6004 		if (tcpm_port_is_audio(port))
6005 			tcpm_set_state(port, AUDIO_ACC_ATTACHED, 0);
6006 		break;
6007 
6008 	case DEBUG_ACC_ATTACHED:
6009 		if (cc1 == TYPEC_CC_OPEN || cc2 == TYPEC_CC_OPEN)
6010 			tcpm_set_state(port, ACC_UNATTACHED, 0);
6011 		break;
6012 
6013 	case SNK_TRY:
6014 		/* Do nothing, waiting for timeout */
6015 		break;
6016 
6017 	case SNK_DISCOVERY:
6018 		/* CC line is unstable, wait for debounce */
6019 		if (tcpm_port_is_disconnected(port))
6020 			tcpm_set_state(port, SNK_DISCOVERY_DEBOUNCE, 0);
6021 		break;
6022 	case SNK_DISCOVERY_DEBOUNCE:
6023 		break;
6024 
6025 	case SRC_TRYWAIT:
6026 		/* Hand over to state machine if needed */
6027 		if (!port->vbus_present && tcpm_port_is_source(port))
6028 			tcpm_set_state(port, SRC_TRYWAIT_DEBOUNCE, 0);
6029 		break;
6030 	case SRC_TRYWAIT_DEBOUNCE:
6031 		if (port->vbus_present || !tcpm_port_is_source(port))
6032 			tcpm_set_state(port, SRC_TRYWAIT, 0);
6033 		break;
6034 	case SNK_TRY_WAIT_DEBOUNCE:
6035 		if (!tcpm_port_is_sink(port)) {
6036 			port->max_wait = 0;
6037 			tcpm_set_state(port, SRC_TRYWAIT, PD_T_PD_DEBOUNCE);
6038 		}
6039 		break;
6040 	case SRC_TRY_WAIT:
6041 		if (tcpm_port_is_source(port))
6042 			tcpm_set_state(port, SRC_TRY_DEBOUNCE, 0);
6043 		break;
6044 	case SRC_TRY_DEBOUNCE:
6045 		tcpm_set_state(port, SRC_TRY_WAIT, 0);
6046 		break;
6047 	case SNK_TRYWAIT_DEBOUNCE:
6048 		if (tcpm_port_is_sink(port))
6049 			tcpm_set_state(port, SNK_TRYWAIT_VBUS, 0);
6050 		break;
6051 	case SNK_TRYWAIT_VBUS:
6052 		if (!tcpm_port_is_sink(port))
6053 			tcpm_set_state(port, SNK_TRYWAIT_DEBOUNCE, 0);
6054 		break;
6055 	case SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS:
6056 		if (!tcpm_port_is_sink(port))
6057 			tcpm_set_state(port, SRC_TRYWAIT, PD_T_TRY_CC_DEBOUNCE);
6058 		else
6059 			tcpm_set_state(port, SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS, 0);
6060 		break;
6061 	case SNK_TRYWAIT:
6062 		/* Do nothing, waiting for tCCDebounce */
6063 		break;
6064 	case PR_SWAP_SNK_SRC_SINK_OFF:
6065 	case PR_SWAP_SRC_SNK_TRANSITION_OFF:
6066 	case PR_SWAP_SRC_SNK_SOURCE_OFF:
6067 	case PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED:
6068 	case PR_SWAP_SNK_SRC_SOURCE_ON:
6069 		/*
6070 		 * CC state change is expected in PR_SWAP
6071 		 * Ignore it.
6072 		 */
6073 		break;
6074 	case FR_SWAP_SEND:
6075 	case FR_SWAP_SEND_TIMEOUT:
6076 	case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF:
6077 	case FR_SWAP_SNK_SRC_NEW_SINK_READY:
6078 	case FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED:
6079 		/* Do nothing, CC change expected */
6080 		break;
6081 
6082 	case PORT_RESET:
6083 	case PORT_RESET_WAIT_OFF:
6084 		/*
6085 		 * State set back to default mode once the timer completes.
6086 		 * Ignore CC changes here.
6087 		 */
6088 		break;
6089 	default:
6090 		/*
6091 		 * While acting as sink and auto vbus discharge is enabled, Allow disconnect
6092 		 * to be driven by vbus disconnect.
6093 		 */
6094 		if (tcpm_port_is_disconnected(port) && !(port->pwr_role == TYPEC_SINK &&
6095 							 port->auto_vbus_discharge_enabled))
6096 			tcpm_set_state(port, unattached_state(port), 0);
6097 		break;
6098 	}
6099 }
6100 
_tcpm_pd_vbus_on(struct tcpm_port * port)6101 static void _tcpm_pd_vbus_on(struct tcpm_port *port)
6102 {
6103 	tcpm_log_force(port, "VBUS on");
6104 	port->vbus_present = true;
6105 	/*
6106 	 * When vbus_present is true i.e. Voltage at VBUS is greater than VSAFE5V implicitly
6107 	 * states that vbus is not at VSAFE0V, hence clear the vbus_vsafe0v flag here.
6108 	 */
6109 	port->vbus_vsafe0v = false;
6110 
6111 	switch (port->state) {
6112 	case SNK_TRANSITION_SINK_VBUS:
6113 		port->explicit_contract = true;
6114 		tcpm_set_state(port, SNK_READY, 0);
6115 		break;
6116 	case SNK_DISCOVERY:
6117 		tcpm_set_state(port, SNK_DISCOVERY, 0);
6118 		break;
6119 
6120 	case SNK_DEBOUNCED:
6121 		tcpm_set_state(port, tcpm_try_src(port) ? SRC_TRY
6122 							: SNK_ATTACHED,
6123 				       0);
6124 		break;
6125 	case SNK_HARD_RESET_WAIT_VBUS:
6126 		tcpm_set_state(port, SNK_HARD_RESET_SINK_ON, 0);
6127 		break;
6128 	case SRC_ATTACHED:
6129 		tcpm_set_state(port, SRC_STARTUP, 0);
6130 		break;
6131 	case SRC_HARD_RESET_VBUS_ON:
6132 		tcpm_set_state(port, SRC_STARTUP, 0);
6133 		break;
6134 
6135 	case SNK_TRY:
6136 		/* Do nothing, waiting for timeout */
6137 		break;
6138 	case SRC_TRYWAIT:
6139 		/* Do nothing, Waiting for Rd to be detected */
6140 		break;
6141 	case SRC_TRYWAIT_DEBOUNCE:
6142 		tcpm_set_state(port, SRC_TRYWAIT, 0);
6143 		break;
6144 	case SNK_TRY_WAIT_DEBOUNCE:
6145 		/* Do nothing, waiting for PD_DEBOUNCE to do be done */
6146 		break;
6147 	case SNK_TRYWAIT:
6148 		/* Do nothing, waiting for tCCDebounce */
6149 		break;
6150 	case SNK_TRYWAIT_VBUS:
6151 		if (tcpm_port_is_sink(port))
6152 			tcpm_set_state(port, SNK_ATTACHED, 0);
6153 		break;
6154 	case SNK_TRYWAIT_DEBOUNCE:
6155 		/* Do nothing, waiting for Rp */
6156 		break;
6157 	case SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS:
6158 		if (port->vbus_present && tcpm_port_is_sink(port))
6159 			tcpm_set_state(port, SNK_ATTACHED, 0);
6160 		break;
6161 	case SRC_TRY_WAIT:
6162 	case SRC_TRY_DEBOUNCE:
6163 		/* Do nothing, waiting for sink detection */
6164 		break;
6165 	case FR_SWAP_SEND:
6166 	case FR_SWAP_SEND_TIMEOUT:
6167 	case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF:
6168 	case FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED:
6169 		if (port->tcpc->frs_sourcing_vbus)
6170 			port->tcpc->frs_sourcing_vbus(port->tcpc);
6171 		break;
6172 	case FR_SWAP_SNK_SRC_NEW_SINK_READY:
6173 		if (port->tcpc->frs_sourcing_vbus)
6174 			port->tcpc->frs_sourcing_vbus(port->tcpc);
6175 		tcpm_set_state(port, FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED, 0);
6176 		break;
6177 
6178 	case PORT_RESET:
6179 	case PORT_RESET_WAIT_OFF:
6180 		/*
6181 		 * State set back to default mode once the timer completes.
6182 		 * Ignore vbus changes here.
6183 		 */
6184 		break;
6185 
6186 	default:
6187 		break;
6188 	}
6189 }
6190 
_tcpm_pd_vbus_off(struct tcpm_port * port)6191 static void _tcpm_pd_vbus_off(struct tcpm_port *port)
6192 {
6193 	tcpm_log_force(port, "VBUS off");
6194 	port->vbus_present = false;
6195 	port->vbus_never_low = false;
6196 	switch (port->state) {
6197 	case SNK_HARD_RESET_SINK_OFF:
6198 		tcpm_set_state(port, SNK_HARD_RESET_WAIT_VBUS, 0);
6199 		break;
6200 	case HARD_RESET_SEND:
6201 		break;
6202 	case SNK_TRY:
6203 		/* Do nothing, waiting for timeout */
6204 		break;
6205 	case SRC_TRYWAIT:
6206 		/* Hand over to state machine if needed */
6207 		if (tcpm_port_is_source(port))
6208 			tcpm_set_state(port, SRC_TRYWAIT_DEBOUNCE, 0);
6209 		break;
6210 	case SNK_TRY_WAIT_DEBOUNCE:
6211 		/* Do nothing, waiting for PD_DEBOUNCE to do be done */
6212 		break;
6213 	case SNK_TRYWAIT:
6214 	case SNK_TRYWAIT_VBUS:
6215 	case SNK_TRYWAIT_DEBOUNCE:
6216 		break;
6217 	case SNK_ATTACH_WAIT:
6218 	case SNK_DEBOUNCED:
6219 		/* Do nothing, as TCPM is still waiting for vbus to reaach VSAFE5V to connect */
6220 		break;
6221 
6222 	case SNK_NEGOTIATE_CAPABILITIES:
6223 		break;
6224 
6225 	case PR_SWAP_SRC_SNK_TRANSITION_OFF:
6226 		tcpm_set_state(port, PR_SWAP_SRC_SNK_SOURCE_OFF, 0);
6227 		break;
6228 
6229 	case PR_SWAP_SNK_SRC_SINK_OFF:
6230 		/* Do nothing, expected */
6231 		break;
6232 
6233 	case PR_SWAP_SNK_SRC_SOURCE_ON:
6234 		/*
6235 		 * Do nothing when vbus off notification is received.
6236 		 * TCPM can wait for PD_T_NEWSRC in PR_SWAP_SNK_SRC_SOURCE_ON
6237 		 * for the vbus source to ramp up.
6238 		 */
6239 		break;
6240 
6241 	case PORT_RESET_WAIT_OFF:
6242 		tcpm_set_state(port, tcpm_default_state(port), 0);
6243 		break;
6244 
6245 	case SRC_TRY_WAIT:
6246 	case SRC_TRY_DEBOUNCE:
6247 		/* Do nothing, waiting for sink detection */
6248 		break;
6249 
6250 	case SRC_STARTUP:
6251 	case SRC_SEND_CAPABILITIES:
6252 	case SRC_SEND_CAPABILITIES_TIMEOUT:
6253 	case SRC_NEGOTIATE_CAPABILITIES:
6254 	case SRC_TRANSITION_SUPPLY:
6255 	case SRC_READY:
6256 	case SRC_WAIT_NEW_CAPABILITIES:
6257 		/*
6258 		 * Force to unattached state to re-initiate connection.
6259 		 * DRP port should move to Unattached.SNK instead of Unattached.SRC if
6260 		 * sink removed. Although sink removal here is due to source's vbus collapse,
6261 		 * treat it the same way for consistency.
6262 		 */
6263 		if (port->port_type == TYPEC_PORT_SRC)
6264 			tcpm_set_state(port, SRC_UNATTACHED, tcpm_wait_for_discharge(port));
6265 		else
6266 			tcpm_set_state(port, SNK_UNATTACHED, tcpm_wait_for_discharge(port));
6267 		break;
6268 
6269 	case PORT_RESET:
6270 		/*
6271 		 * State set back to default mode once the timer completes.
6272 		 * Ignore vbus changes here.
6273 		 */
6274 		break;
6275 
6276 	case FR_SWAP_SEND:
6277 	case FR_SWAP_SEND_TIMEOUT:
6278 	case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF:
6279 	case FR_SWAP_SNK_SRC_NEW_SINK_READY:
6280 	case FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED:
6281 		/* Do nothing, vbus drop expected */
6282 		break;
6283 
6284 	case SNK_HARD_RESET_WAIT_VBUS:
6285 		/* Do nothing, its OK to receive vbus off events */
6286 		break;
6287 
6288 	default:
6289 		if (port->pwr_role == TYPEC_SINK && port->attached)
6290 			tcpm_set_state(port, SNK_UNATTACHED, tcpm_wait_for_discharge(port));
6291 		break;
6292 	}
6293 }
6294 
_tcpm_pd_vbus_vsafe0v(struct tcpm_port * port)6295 static void _tcpm_pd_vbus_vsafe0v(struct tcpm_port *port)
6296 {
6297 	tcpm_log_force(port, "VBUS VSAFE0V");
6298 	port->vbus_vsafe0v = true;
6299 	switch (port->state) {
6300 	case SRC_HARD_RESET_VBUS_OFF:
6301 		/*
6302 		 * After establishing the vSafe0V voltage condition on VBUS, the Source Shall wait
6303 		 * tSrcRecover before re-applying VCONN and restoring VBUS to vSafe5V.
6304 		 */
6305 		tcpm_set_state(port, SRC_HARD_RESET_VBUS_ON, PD_T_SRC_RECOVER);
6306 		break;
6307 	case SRC_ATTACH_WAIT:
6308 		if (tcpm_port_is_source(port))
6309 			tcpm_set_state(port, tcpm_try_snk(port) ? SNK_TRY : SRC_ATTACHED,
6310 				       port->timings.cc_debounce_time);
6311 		break;
6312 	case SRC_STARTUP:
6313 	case SRC_SEND_CAPABILITIES:
6314 	case SRC_SEND_CAPABILITIES_TIMEOUT:
6315 	case SRC_NEGOTIATE_CAPABILITIES:
6316 	case SRC_TRANSITION_SUPPLY:
6317 	case SRC_READY:
6318 	case SRC_WAIT_NEW_CAPABILITIES:
6319 		if (port->auto_vbus_discharge_enabled) {
6320 			if (port->port_type == TYPEC_PORT_SRC)
6321 				tcpm_set_state(port, SRC_UNATTACHED, 0);
6322 			else
6323 				tcpm_set_state(port, SNK_UNATTACHED, 0);
6324 		}
6325 		break;
6326 	case PR_SWAP_SNK_SRC_SINK_OFF:
6327 	case PR_SWAP_SNK_SRC_SOURCE_ON:
6328 		/* Do nothing, vsafe0v is expected during transition */
6329 		break;
6330 	case SNK_ATTACH_WAIT:
6331 	case SNK_DEBOUNCED:
6332 		/*Do nothing, still waiting for VSAFE5V for connect */
6333 		break;
6334 	case SNK_HARD_RESET_WAIT_VBUS:
6335 		/* Do nothing, its OK to receive vbus off events */
6336 		break;
6337 	default:
6338 		if (port->pwr_role == TYPEC_SINK && port->auto_vbus_discharge_enabled)
6339 			tcpm_set_state(port, SNK_UNATTACHED, 0);
6340 		break;
6341 	}
6342 }
6343 
_tcpm_pd_hard_reset(struct tcpm_port * port)6344 static void _tcpm_pd_hard_reset(struct tcpm_port *port)
6345 {
6346 	tcpm_log_force(port, "Received hard reset");
6347 	if (port->bist_request == BDO_MODE_TESTDATA && port->tcpc->set_bist_data)
6348 		port->tcpc->set_bist_data(port->tcpc, false);
6349 
6350 	switch (port->state) {
6351 	case TOGGLING:
6352 	case ERROR_RECOVERY:
6353 	case PORT_RESET:
6354 	case PORT_RESET_WAIT_OFF:
6355 		return;
6356 	default:
6357 		break;
6358 	}
6359 
6360 	if (port->ams != NONE_AMS)
6361 		port->ams = NONE_AMS;
6362 	if (port->hard_reset_count < PD_N_HARD_RESET_COUNT)
6363 		port->ams = HARD_RESET;
6364 	/*
6365 	 * If we keep receiving hard reset requests, executing the hard reset
6366 	 * must have failed. Revert to error recovery if that happens.
6367 	 */
6368 	tcpm_set_state(port,
6369 		       port->hard_reset_count < PD_N_HARD_RESET_COUNT ?
6370 				HARD_RESET_START : ERROR_RECOVERY,
6371 		       0);
6372 }
6373 
tcpm_pd_event_handler(struct kthread_work * work)6374 static void tcpm_pd_event_handler(struct kthread_work *work)
6375 {
6376 	struct tcpm_port *port = container_of(work, struct tcpm_port,
6377 					      event_work);
6378 	u32 events;
6379 
6380 	mutex_lock(&port->lock);
6381 
6382 	spin_lock(&port->pd_event_lock);
6383 	while (port->pd_events) {
6384 		events = port->pd_events;
6385 		port->pd_events = 0;
6386 		spin_unlock(&port->pd_event_lock);
6387 		if (events & TCPM_RESET_EVENT)
6388 			_tcpm_pd_hard_reset(port);
6389 		if (events & TCPM_VBUS_EVENT) {
6390 			bool vbus;
6391 
6392 			vbus = port->tcpc->get_vbus(port->tcpc);
6393 			if (vbus) {
6394 				_tcpm_pd_vbus_on(port);
6395 			} else {
6396 				_tcpm_pd_vbus_off(port);
6397 				/*
6398 				 * When TCPC does not support detecting vsafe0v voltage level,
6399 				 * treat vbus absent as vsafe0v. Else invoke is_vbus_vsafe0v
6400 				 * to see if vbus has discharge to VSAFE0V.
6401 				 */
6402 				if (!port->tcpc->is_vbus_vsafe0v ||
6403 				    port->tcpc->is_vbus_vsafe0v(port->tcpc))
6404 					_tcpm_pd_vbus_vsafe0v(port);
6405 			}
6406 		}
6407 		if (events & TCPM_CC_EVENT) {
6408 			enum typec_cc_status cc1, cc2;
6409 
6410 			if (port->tcpc->get_cc(port->tcpc, &cc1, &cc2) == 0)
6411 				_tcpm_cc_change(port, cc1, cc2);
6412 		}
6413 		if (events & TCPM_FRS_EVENT) {
6414 			if (port->state == SNK_READY) {
6415 				int ret;
6416 
6417 				port->upcoming_state = FR_SWAP_SEND;
6418 				ret = tcpm_ams_start(port, FAST_ROLE_SWAP);
6419 				if (ret == -EAGAIN)
6420 					port->upcoming_state = INVALID_STATE;
6421 			} else {
6422 				tcpm_log(port, "Discarding FRS_SIGNAL! Not in sink ready");
6423 			}
6424 		}
6425 		if (events & TCPM_SOURCING_VBUS) {
6426 			tcpm_log(port, "sourcing vbus");
6427 			/*
6428 			 * In fast role swap case TCPC autonomously sources vbus. Set vbus_source
6429 			 * true as TCPM wouldn't have called tcpm_set_vbus.
6430 			 *
6431 			 * When vbus is sourced on the command on TCPM i.e. TCPM called
6432 			 * tcpm_set_vbus to source vbus, vbus_source would already be true.
6433 			 */
6434 			port->vbus_source = true;
6435 			_tcpm_pd_vbus_on(port);
6436 		}
6437 		if (events & TCPM_PORT_CLEAN) {
6438 			tcpm_log(port, "port clean");
6439 			if (port->state == CHECK_CONTAMINANT) {
6440 				if (tcpm_start_toggling(port, tcpm_rp_cc(port)))
6441 					tcpm_set_state(port, TOGGLING, 0);
6442 				else
6443 					tcpm_set_state(port, tcpm_default_state(port), 0);
6444 			}
6445 		}
6446 		if (events & TCPM_PORT_ERROR) {
6447 			tcpm_log(port, "port triggering error recovery");
6448 			tcpm_set_state(port, ERROR_RECOVERY, 0);
6449 		}
6450 
6451 		spin_lock(&port->pd_event_lock);
6452 	}
6453 	spin_unlock(&port->pd_event_lock);
6454 	mutex_unlock(&port->lock);
6455 }
6456 
tcpm_cc_change(struct tcpm_port * port)6457 void tcpm_cc_change(struct tcpm_port *port)
6458 {
6459 	spin_lock(&port->pd_event_lock);
6460 	port->pd_events |= TCPM_CC_EVENT;
6461 	spin_unlock(&port->pd_event_lock);
6462 	kthread_queue_work(port->wq, &port->event_work);
6463 }
6464 EXPORT_SYMBOL_GPL(tcpm_cc_change);
6465 
tcpm_vbus_change(struct tcpm_port * port)6466 void tcpm_vbus_change(struct tcpm_port *port)
6467 {
6468 	spin_lock(&port->pd_event_lock);
6469 	port->pd_events |= TCPM_VBUS_EVENT;
6470 	spin_unlock(&port->pd_event_lock);
6471 	kthread_queue_work(port->wq, &port->event_work);
6472 }
6473 EXPORT_SYMBOL_GPL(tcpm_vbus_change);
6474 
tcpm_pd_hard_reset(struct tcpm_port * port)6475 void tcpm_pd_hard_reset(struct tcpm_port *port)
6476 {
6477 	spin_lock(&port->pd_event_lock);
6478 	port->pd_events = TCPM_RESET_EVENT;
6479 	spin_unlock(&port->pd_event_lock);
6480 	kthread_queue_work(port->wq, &port->event_work);
6481 }
6482 EXPORT_SYMBOL_GPL(tcpm_pd_hard_reset);
6483 
tcpm_sink_frs(struct tcpm_port * port)6484 void tcpm_sink_frs(struct tcpm_port *port)
6485 {
6486 	spin_lock(&port->pd_event_lock);
6487 	port->pd_events |= TCPM_FRS_EVENT;
6488 	spin_unlock(&port->pd_event_lock);
6489 	kthread_queue_work(port->wq, &port->event_work);
6490 }
6491 EXPORT_SYMBOL_GPL(tcpm_sink_frs);
6492 
tcpm_sourcing_vbus(struct tcpm_port * port)6493 void tcpm_sourcing_vbus(struct tcpm_port *port)
6494 {
6495 	spin_lock(&port->pd_event_lock);
6496 	port->pd_events |= TCPM_SOURCING_VBUS;
6497 	spin_unlock(&port->pd_event_lock);
6498 	kthread_queue_work(port->wq, &port->event_work);
6499 }
6500 EXPORT_SYMBOL_GPL(tcpm_sourcing_vbus);
6501 
tcpm_port_clean(struct tcpm_port * port)6502 void tcpm_port_clean(struct tcpm_port *port)
6503 {
6504 	spin_lock(&port->pd_event_lock);
6505 	port->pd_events |= TCPM_PORT_CLEAN;
6506 	spin_unlock(&port->pd_event_lock);
6507 	kthread_queue_work(port->wq, &port->event_work);
6508 }
6509 EXPORT_SYMBOL_GPL(tcpm_port_clean);
6510 
tcpm_port_is_toggling(struct tcpm_port * port)6511 bool tcpm_port_is_toggling(struct tcpm_port *port)
6512 {
6513 	return port->port_type == TYPEC_PORT_DRP && port->state == TOGGLING;
6514 }
6515 EXPORT_SYMBOL_GPL(tcpm_port_is_toggling);
6516 
tcpm_port_error_recovery(struct tcpm_port * port)6517 void tcpm_port_error_recovery(struct tcpm_port *port)
6518 {
6519 	spin_lock(&port->pd_event_lock);
6520 	port->pd_events |= TCPM_PORT_ERROR;
6521 	spin_unlock(&port->pd_event_lock);
6522 	kthread_queue_work(port->wq, &port->event_work);
6523 }
6524 EXPORT_SYMBOL_GPL(tcpm_port_error_recovery);
6525 
tcpm_enable_frs_work(struct kthread_work * work)6526 static void tcpm_enable_frs_work(struct kthread_work *work)
6527 {
6528 	struct tcpm_port *port = container_of(work, struct tcpm_port, enable_frs);
6529 	int ret;
6530 
6531 	mutex_lock(&port->lock);
6532 	/* Not FRS capable */
6533 	if (!port->connected || port->port_type != TYPEC_PORT_DRP ||
6534 	    port->pwr_opmode != TYPEC_PWR_MODE_PD ||
6535 	    !port->tcpc->enable_frs ||
6536 	    /* Sink caps queried */
6537 	    port->sink_cap_done || port->negotiated_rev < PD_REV30)
6538 		goto unlock;
6539 
6540 	/* Send when the state machine is idle */
6541 	if (port->state != SNK_READY || port->vdm_sm_running || port->send_discover ||
6542 	    port->send_discover_prime)
6543 		goto resched;
6544 
6545 	port->upcoming_state = GET_SINK_CAP;
6546 	ret = tcpm_ams_start(port, GET_SINK_CAPABILITIES);
6547 	if (ret == -EAGAIN) {
6548 		port->upcoming_state = INVALID_STATE;
6549 	} else {
6550 		port->sink_cap_done = true;
6551 		goto unlock;
6552 	}
6553 resched:
6554 	mod_enable_frs_delayed_work(port, GET_SINK_CAP_RETRY_MS);
6555 unlock:
6556 	mutex_unlock(&port->lock);
6557 }
6558 
tcpm_send_discover_work(struct kthread_work * work)6559 static void tcpm_send_discover_work(struct kthread_work *work)
6560 {
6561 	struct tcpm_port *port = container_of(work, struct tcpm_port, send_discover_work);
6562 
6563 	mutex_lock(&port->lock);
6564 	/* No need to send DISCOVER_IDENTITY anymore */
6565 	if (!port->send_discover && !port->send_discover_prime)
6566 		goto unlock;
6567 
6568 	if (port->data_role == TYPEC_DEVICE && port->negotiated_rev < PD_REV30) {
6569 		port->send_discover = false;
6570 		port->send_discover_prime = false;
6571 		goto unlock;
6572 	}
6573 
6574 	/* Retry if the port is not idle */
6575 	if ((port->state != SRC_READY && port->state != SNK_READY &&
6576 	     port->state != SRC_VDM_IDENTITY_REQUEST) || port->vdm_sm_running) {
6577 		mod_send_discover_delayed_work(port, SEND_DISCOVER_RETRY_MS);
6578 		goto unlock;
6579 	}
6580 
6581 	tcpm_send_vdm(port, USB_SID_PD, CMD_DISCOVER_IDENT, NULL, 0, port->tx_sop_type);
6582 
6583 unlock:
6584 	mutex_unlock(&port->lock);
6585 }
6586 
tcpm_dr_set(struct typec_port * p,enum typec_data_role data)6587 static int tcpm_dr_set(struct typec_port *p, enum typec_data_role data)
6588 {
6589 	struct tcpm_port *port = typec_get_drvdata(p);
6590 	int ret;
6591 
6592 	mutex_lock(&port->swap_lock);
6593 	mutex_lock(&port->lock);
6594 
6595 	if (port->typec_caps.data != TYPEC_PORT_DRD) {
6596 		ret = -EINVAL;
6597 		goto port_unlock;
6598 	}
6599 	if (port->state != SRC_READY && port->state != SNK_READY) {
6600 		ret = -EAGAIN;
6601 		goto port_unlock;
6602 	}
6603 
6604 	if (port->data_role == data) {
6605 		ret = 0;
6606 		goto port_unlock;
6607 	}
6608 
6609 	/*
6610 	 * XXX
6611 	 * 6.3.9: If an alternate mode is active, a request to swap
6612 	 * alternate modes shall trigger a port reset.
6613 	 * Reject data role swap request in this case.
6614 	 */
6615 
6616 	if (!port->pd_capable) {
6617 		/*
6618 		 * If the partner is not PD capable, reset the port to
6619 		 * trigger a role change. This can only work if a preferred
6620 		 * role is configured, and if it matches the requested role.
6621 		 */
6622 		if (port->try_role == TYPEC_NO_PREFERRED_ROLE ||
6623 		    port->try_role == port->pwr_role) {
6624 			ret = -EINVAL;
6625 			goto port_unlock;
6626 		}
6627 		port->non_pd_role_swap = true;
6628 		tcpm_set_state(port, PORT_RESET, 0);
6629 	} else {
6630 		port->upcoming_state = DR_SWAP_SEND;
6631 		ret = tcpm_ams_start(port, DATA_ROLE_SWAP);
6632 		if (ret == -EAGAIN) {
6633 			port->upcoming_state = INVALID_STATE;
6634 			goto port_unlock;
6635 		}
6636 	}
6637 
6638 	port->swap_status = 0;
6639 	port->swap_pending = true;
6640 	reinit_completion(&port->swap_complete);
6641 	mutex_unlock(&port->lock);
6642 
6643 	if (!wait_for_completion_timeout(&port->swap_complete,
6644 				msecs_to_jiffies(PD_ROLE_SWAP_TIMEOUT)))
6645 		ret = -ETIMEDOUT;
6646 	else
6647 		ret = port->swap_status;
6648 
6649 	port->non_pd_role_swap = false;
6650 	goto swap_unlock;
6651 
6652 port_unlock:
6653 	mutex_unlock(&port->lock);
6654 swap_unlock:
6655 	mutex_unlock(&port->swap_lock);
6656 	return ret;
6657 }
6658 
tcpm_pr_set(struct typec_port * p,enum typec_role role)6659 static int tcpm_pr_set(struct typec_port *p, enum typec_role role)
6660 {
6661 	struct tcpm_port *port = typec_get_drvdata(p);
6662 	int ret;
6663 
6664 	mutex_lock(&port->swap_lock);
6665 	mutex_lock(&port->lock);
6666 
6667 	if (port->port_type != TYPEC_PORT_DRP) {
6668 		ret = -EINVAL;
6669 		goto port_unlock;
6670 	}
6671 	if (port->state != SRC_READY && port->state != SNK_READY) {
6672 		ret = -EAGAIN;
6673 		goto port_unlock;
6674 	}
6675 
6676 	if (role == port->pwr_role) {
6677 		ret = 0;
6678 		goto port_unlock;
6679 	}
6680 
6681 	port->upcoming_state = PR_SWAP_SEND;
6682 	ret = tcpm_ams_start(port, POWER_ROLE_SWAP);
6683 	if (ret == -EAGAIN) {
6684 		port->upcoming_state = INVALID_STATE;
6685 		goto port_unlock;
6686 	}
6687 
6688 	port->swap_status = 0;
6689 	port->swap_pending = true;
6690 	reinit_completion(&port->swap_complete);
6691 	mutex_unlock(&port->lock);
6692 
6693 	if (!wait_for_completion_timeout(&port->swap_complete,
6694 				msecs_to_jiffies(PD_ROLE_SWAP_TIMEOUT)))
6695 		ret = -ETIMEDOUT;
6696 	else
6697 		ret = port->swap_status;
6698 
6699 	goto swap_unlock;
6700 
6701 port_unlock:
6702 	mutex_unlock(&port->lock);
6703 swap_unlock:
6704 	mutex_unlock(&port->swap_lock);
6705 	return ret;
6706 }
6707 
tcpm_vconn_set(struct typec_port * p,enum typec_role role)6708 static int tcpm_vconn_set(struct typec_port *p, enum typec_role role)
6709 {
6710 	struct tcpm_port *port = typec_get_drvdata(p);
6711 	int ret;
6712 
6713 	mutex_lock(&port->swap_lock);
6714 	mutex_lock(&port->lock);
6715 
6716 	if (port->state != SRC_READY && port->state != SNK_READY) {
6717 		ret = -EAGAIN;
6718 		goto port_unlock;
6719 	}
6720 
6721 	if (role == port->vconn_role) {
6722 		ret = 0;
6723 		goto port_unlock;
6724 	}
6725 
6726 	port->upcoming_state = VCONN_SWAP_SEND;
6727 	ret = tcpm_ams_start(port, VCONN_SWAP);
6728 	if (ret == -EAGAIN) {
6729 		port->upcoming_state = INVALID_STATE;
6730 		goto port_unlock;
6731 	}
6732 
6733 	port->swap_status = 0;
6734 	port->swap_pending = true;
6735 	reinit_completion(&port->swap_complete);
6736 	mutex_unlock(&port->lock);
6737 
6738 	if (!wait_for_completion_timeout(&port->swap_complete,
6739 				msecs_to_jiffies(PD_ROLE_SWAP_TIMEOUT)))
6740 		ret = -ETIMEDOUT;
6741 	else
6742 		ret = port->swap_status;
6743 
6744 	goto swap_unlock;
6745 
6746 port_unlock:
6747 	mutex_unlock(&port->lock);
6748 swap_unlock:
6749 	mutex_unlock(&port->swap_lock);
6750 	return ret;
6751 }
6752 
tcpm_try_role(struct typec_port * p,int role)6753 static int tcpm_try_role(struct typec_port *p, int role)
6754 {
6755 	struct tcpm_port *port = typec_get_drvdata(p);
6756 	struct tcpc_dev	*tcpc = port->tcpc;
6757 	int ret = 0;
6758 
6759 	mutex_lock(&port->lock);
6760 	if (tcpc->try_role)
6761 		ret = tcpc->try_role(tcpc, role);
6762 	if (!ret)
6763 		port->try_role = role;
6764 	port->try_src_count = 0;
6765 	port->try_snk_count = 0;
6766 	mutex_unlock(&port->lock);
6767 
6768 	return ret;
6769 }
6770 
tcpm_pps_set_op_curr(struct tcpm_port * port,u16 req_op_curr)6771 static int tcpm_pps_set_op_curr(struct tcpm_port *port, u16 req_op_curr)
6772 {
6773 	unsigned int target_mw;
6774 	int ret;
6775 
6776 	mutex_lock(&port->swap_lock);
6777 	mutex_lock(&port->lock);
6778 
6779 	if (!port->pps_data.active) {
6780 		ret = -EOPNOTSUPP;
6781 		goto port_unlock;
6782 	}
6783 
6784 	if (port->state != SNK_READY) {
6785 		ret = -EAGAIN;
6786 		goto port_unlock;
6787 	}
6788 
6789 	if (req_op_curr > port->pps_data.max_curr) {
6790 		ret = -EINVAL;
6791 		goto port_unlock;
6792 	}
6793 
6794 	target_mw = (req_op_curr * port->supply_voltage) / 1000;
6795 	if (target_mw < port->operating_snk_mw) {
6796 		ret = -EINVAL;
6797 		goto port_unlock;
6798 	}
6799 
6800 	port->upcoming_state = SNK_NEGOTIATE_PPS_CAPABILITIES;
6801 	ret = tcpm_ams_start(port, POWER_NEGOTIATION);
6802 	if (ret == -EAGAIN) {
6803 		port->upcoming_state = INVALID_STATE;
6804 		goto port_unlock;
6805 	}
6806 
6807 	/* Round down operating current to align with PPS valid steps */
6808 	req_op_curr = req_op_curr - (req_op_curr % RDO_PROG_CURR_MA_STEP);
6809 
6810 	reinit_completion(&port->pps_complete);
6811 	port->pps_data.req_op_curr = req_op_curr;
6812 	port->pps_status = 0;
6813 	port->pps_pending = true;
6814 	mutex_unlock(&port->lock);
6815 
6816 	if (!wait_for_completion_timeout(&port->pps_complete,
6817 				msecs_to_jiffies(PD_PPS_CTRL_TIMEOUT)))
6818 		ret = -ETIMEDOUT;
6819 	else
6820 		ret = port->pps_status;
6821 
6822 	goto swap_unlock;
6823 
6824 port_unlock:
6825 	mutex_unlock(&port->lock);
6826 swap_unlock:
6827 	mutex_unlock(&port->swap_lock);
6828 
6829 	return ret;
6830 }
6831 
tcpm_pps_set_out_volt(struct tcpm_port * port,u16 req_out_volt)6832 static int tcpm_pps_set_out_volt(struct tcpm_port *port, u16 req_out_volt)
6833 {
6834 	unsigned int target_mw;
6835 	int ret;
6836 
6837 	mutex_lock(&port->swap_lock);
6838 	mutex_lock(&port->lock);
6839 
6840 	if (!port->pps_data.active) {
6841 		ret = -EOPNOTSUPP;
6842 		goto port_unlock;
6843 	}
6844 
6845 	if (port->state != SNK_READY) {
6846 		ret = -EAGAIN;
6847 		goto port_unlock;
6848 	}
6849 
6850 	target_mw = (port->current_limit * req_out_volt) / 1000;
6851 	if (target_mw < port->operating_snk_mw) {
6852 		ret = -EINVAL;
6853 		goto port_unlock;
6854 	}
6855 
6856 	port->upcoming_state = SNK_NEGOTIATE_PPS_CAPABILITIES;
6857 	ret = tcpm_ams_start(port, POWER_NEGOTIATION);
6858 	if (ret == -EAGAIN) {
6859 		port->upcoming_state = INVALID_STATE;
6860 		goto port_unlock;
6861 	}
6862 
6863 	/* Round down output voltage to align with PPS valid steps */
6864 	req_out_volt = req_out_volt - (req_out_volt % RDO_PROG_VOLT_MV_STEP);
6865 
6866 	reinit_completion(&port->pps_complete);
6867 	port->pps_data.req_out_volt = req_out_volt;
6868 	port->pps_status = 0;
6869 	port->pps_pending = true;
6870 	mutex_unlock(&port->lock);
6871 
6872 	if (!wait_for_completion_timeout(&port->pps_complete,
6873 				msecs_to_jiffies(PD_PPS_CTRL_TIMEOUT)))
6874 		ret = -ETIMEDOUT;
6875 	else
6876 		ret = port->pps_status;
6877 
6878 	goto swap_unlock;
6879 
6880 port_unlock:
6881 	mutex_unlock(&port->lock);
6882 swap_unlock:
6883 	mutex_unlock(&port->swap_lock);
6884 
6885 	return ret;
6886 }
6887 
tcpm_pps_activate(struct tcpm_port * port,bool activate)6888 static int tcpm_pps_activate(struct tcpm_port *port, bool activate)
6889 {
6890 	int ret = 0;
6891 
6892 	mutex_lock(&port->swap_lock);
6893 	mutex_lock(&port->lock);
6894 
6895 	if (!port->pps_data.supported) {
6896 		ret = -EOPNOTSUPP;
6897 		goto port_unlock;
6898 	}
6899 
6900 	/* Trying to deactivate PPS when already deactivated so just bail */
6901 	if (!port->pps_data.active && !activate)
6902 		goto port_unlock;
6903 
6904 	if (port->state != SNK_READY) {
6905 		ret = -EAGAIN;
6906 		goto port_unlock;
6907 	}
6908 
6909 	if (activate)
6910 		port->upcoming_state = SNK_NEGOTIATE_PPS_CAPABILITIES;
6911 	else
6912 		port->upcoming_state = SNK_NEGOTIATE_CAPABILITIES;
6913 	ret = tcpm_ams_start(port, POWER_NEGOTIATION);
6914 	if (ret == -EAGAIN) {
6915 		port->upcoming_state = INVALID_STATE;
6916 		goto port_unlock;
6917 	}
6918 
6919 	reinit_completion(&port->pps_complete);
6920 	port->pps_status = 0;
6921 	port->pps_pending = true;
6922 
6923 	/* Trigger PPS request or move back to standard PDO contract */
6924 	if (activate) {
6925 		port->pps_data.req_out_volt = port->supply_voltage;
6926 		port->pps_data.req_op_curr = port->current_limit;
6927 	}
6928 	mutex_unlock(&port->lock);
6929 
6930 	if (!wait_for_completion_timeout(&port->pps_complete,
6931 				msecs_to_jiffies(PD_PPS_CTRL_TIMEOUT)))
6932 		ret = -ETIMEDOUT;
6933 	else
6934 		ret = port->pps_status;
6935 
6936 	goto swap_unlock;
6937 
6938 port_unlock:
6939 	mutex_unlock(&port->lock);
6940 swap_unlock:
6941 	mutex_unlock(&port->swap_lock);
6942 
6943 	return ret;
6944 }
6945 
tcpm_init(struct tcpm_port * port)6946 static void tcpm_init(struct tcpm_port *port)
6947 {
6948 	enum typec_cc_status cc1, cc2;
6949 
6950 	port->tcpc->init(port->tcpc);
6951 
6952 	tcpm_reset_port(port);
6953 
6954 	/*
6955 	 * XXX
6956 	 * Should possibly wait for VBUS to settle if it was enabled locally
6957 	 * since tcpm_reset_port() will disable VBUS.
6958 	 */
6959 	port->vbus_present = port->tcpc->get_vbus(port->tcpc);
6960 	if (port->vbus_present)
6961 		port->vbus_never_low = true;
6962 
6963 	/*
6964 	 * 1. When vbus_present is true, voltage on VBUS is already at VSAFE5V.
6965 	 * So implicitly vbus_vsafe0v = false.
6966 	 *
6967 	 * 2. When vbus_present is false and TCPC does NOT support querying
6968 	 * vsafe0v status, then, it's best to assume vbus is at VSAFE0V i.e.
6969 	 * vbus_vsafe0v is true.
6970 	 *
6971 	 * 3. When vbus_present is false and TCPC does support querying vsafe0v,
6972 	 * then, query tcpc for vsafe0v status.
6973 	 */
6974 	if (port->vbus_present)
6975 		port->vbus_vsafe0v = false;
6976 	else if (!port->tcpc->is_vbus_vsafe0v)
6977 		port->vbus_vsafe0v = true;
6978 	else
6979 		port->vbus_vsafe0v = port->tcpc->is_vbus_vsafe0v(port->tcpc);
6980 
6981 	tcpm_set_state(port, tcpm_default_state(port), 0);
6982 
6983 	if (port->tcpc->get_cc(port->tcpc, &cc1, &cc2) == 0)
6984 		_tcpm_cc_change(port, cc1, cc2);
6985 
6986 	/*
6987 	 * Some adapters need a clean slate at startup, and won't recover
6988 	 * otherwise. So do not try to be fancy and force a clean disconnect.
6989 	 */
6990 	tcpm_set_state(port, PORT_RESET, 0);
6991 }
6992 
tcpm_port_type_set(struct typec_port * p,enum typec_port_type type)6993 static int tcpm_port_type_set(struct typec_port *p, enum typec_port_type type)
6994 {
6995 	struct tcpm_port *port = typec_get_drvdata(p);
6996 
6997 	mutex_lock(&port->lock);
6998 	if (type == port->port_type)
6999 		goto port_unlock;
7000 
7001 	port->port_type = type;
7002 
7003 	if (!port->connected) {
7004 		tcpm_set_state(port, PORT_RESET, 0);
7005 	} else if (type == TYPEC_PORT_SNK) {
7006 		if (!(port->pwr_role == TYPEC_SINK &&
7007 		      port->data_role == TYPEC_DEVICE))
7008 			tcpm_set_state(port, PORT_RESET, 0);
7009 	} else if (type == TYPEC_PORT_SRC) {
7010 		if (!(port->pwr_role == TYPEC_SOURCE &&
7011 		      port->data_role == TYPEC_HOST))
7012 			tcpm_set_state(port, PORT_RESET, 0);
7013 	}
7014 
7015 port_unlock:
7016 	mutex_unlock(&port->lock);
7017 	return 0;
7018 }
7019 
tcpm_find_pd_data(struct tcpm_port * port,struct usb_power_delivery * pd)7020 static struct pd_data *tcpm_find_pd_data(struct tcpm_port *port, struct usb_power_delivery *pd)
7021 {
7022 	int i;
7023 
7024 	for (i = 0; port->pd_list[i]; i++) {
7025 		if (port->pd_list[i]->pd == pd)
7026 			return port->pd_list[i];
7027 	}
7028 
7029 	return ERR_PTR(-ENODATA);
7030 }
7031 
tcpm_pd_get(struct typec_port * p)7032 static struct usb_power_delivery **tcpm_pd_get(struct typec_port *p)
7033 {
7034 	struct tcpm_port *port = typec_get_drvdata(p);
7035 
7036 	return port->pds;
7037 }
7038 
tcpm_pd_set(struct typec_port * p,struct usb_power_delivery * pd)7039 static int tcpm_pd_set(struct typec_port *p, struct usb_power_delivery *pd)
7040 {
7041 	struct tcpm_port *port = typec_get_drvdata(p);
7042 	struct pd_data *data;
7043 	int i, ret = 0;
7044 
7045 	mutex_lock(&port->lock);
7046 
7047 	if (port->selected_pd == pd)
7048 		goto unlock;
7049 
7050 	data = tcpm_find_pd_data(port, pd);
7051 	if (IS_ERR(data)) {
7052 		ret = PTR_ERR(data);
7053 		goto unlock;
7054 	}
7055 
7056 	if (data->sink_desc.pdo[0]) {
7057 		for (i = 0; i < PDO_MAX_OBJECTS && data->sink_desc.pdo[i]; i++)
7058 			port->snk_pdo[i] = data->sink_desc.pdo[i];
7059 		port->nr_snk_pdo = i;
7060 		port->operating_snk_mw = data->operating_snk_mw;
7061 	}
7062 
7063 	if (data->source_desc.pdo[0]) {
7064 		for (i = 0; i < PDO_MAX_OBJECTS && data->source_desc.pdo[i]; i++)
7065 			port->src_pdo[i] = data->source_desc.pdo[i];
7066 		port->nr_src_pdo = i;
7067 	}
7068 
7069 	switch (port->state) {
7070 	case SRC_UNATTACHED:
7071 	case SRC_ATTACH_WAIT:
7072 	case SRC_TRYWAIT:
7073 		tcpm_set_cc(port, tcpm_rp_cc(port));
7074 		break;
7075 	case SRC_SEND_CAPABILITIES:
7076 	case SRC_SEND_CAPABILITIES_TIMEOUT:
7077 	case SRC_NEGOTIATE_CAPABILITIES:
7078 	case SRC_READY:
7079 	case SRC_WAIT_NEW_CAPABILITIES:
7080 		port->caps_count = 0;
7081 		port->upcoming_state = SRC_SEND_CAPABILITIES;
7082 		ret = tcpm_ams_start(port, POWER_NEGOTIATION);
7083 		if (ret == -EAGAIN) {
7084 			port->upcoming_state = INVALID_STATE;
7085 			goto unlock;
7086 		}
7087 		break;
7088 	case SNK_NEGOTIATE_CAPABILITIES:
7089 	case SNK_NEGOTIATE_PPS_CAPABILITIES:
7090 	case SNK_READY:
7091 	case SNK_TRANSITION_SINK:
7092 	case SNK_TRANSITION_SINK_VBUS:
7093 		if (port->pps_data.active)
7094 			port->upcoming_state = SNK_NEGOTIATE_PPS_CAPABILITIES;
7095 		else if (port->pd_capable)
7096 			port->upcoming_state = SNK_NEGOTIATE_CAPABILITIES;
7097 		else
7098 			break;
7099 
7100 		port->update_sink_caps = true;
7101 
7102 		ret = tcpm_ams_start(port, POWER_NEGOTIATION);
7103 		if (ret == -EAGAIN) {
7104 			port->upcoming_state = INVALID_STATE;
7105 			goto unlock;
7106 		}
7107 		break;
7108 	default:
7109 		break;
7110 	}
7111 
7112 	port->port_source_caps = data->source_cap;
7113 	port->port_sink_caps = data->sink_cap;
7114 	typec_port_set_usb_power_delivery(p, NULL);
7115 	port->selected_pd = pd;
7116 	typec_port_set_usb_power_delivery(p, port->selected_pd);
7117 unlock:
7118 	mutex_unlock(&port->lock);
7119 	return ret;
7120 }
7121 
7122 static const struct typec_operations tcpm_ops = {
7123 	.try_role = tcpm_try_role,
7124 	.dr_set = tcpm_dr_set,
7125 	.pr_set = tcpm_pr_set,
7126 	.vconn_set = tcpm_vconn_set,
7127 	.port_type_set = tcpm_port_type_set,
7128 	.pd_get = tcpm_pd_get,
7129 	.pd_set = tcpm_pd_set
7130 };
7131 
tcpm_tcpc_reset(struct tcpm_port * port)7132 void tcpm_tcpc_reset(struct tcpm_port *port)
7133 {
7134 	mutex_lock(&port->lock);
7135 	/* XXX: Maintain PD connection if possible? */
7136 	tcpm_init(port);
7137 	mutex_unlock(&port->lock);
7138 }
7139 EXPORT_SYMBOL_GPL(tcpm_tcpc_reset);
7140 
tcpm_port_unregister_pd(struct tcpm_port * port)7141 static void tcpm_port_unregister_pd(struct tcpm_port *port)
7142 {
7143 	int i;
7144 
7145 	port->port_sink_caps = NULL;
7146 	port->port_source_caps = NULL;
7147 	for (i = 0; i < port->pd_count; i++) {
7148 		usb_power_delivery_unregister_capabilities(port->pd_list[i]->sink_cap);
7149 		usb_power_delivery_unregister_capabilities(port->pd_list[i]->source_cap);
7150 		devm_kfree(port->dev, port->pd_list[i]);
7151 		port->pd_list[i] = NULL;
7152 		usb_power_delivery_unregister(port->pds[i]);
7153 		port->pds[i] = NULL;
7154 	}
7155 }
7156 
tcpm_port_register_pd(struct tcpm_port * port)7157 static int tcpm_port_register_pd(struct tcpm_port *port)
7158 {
7159 	u16 pd_revision = port->typec_caps.pd_revision;
7160 	u16 pd_version = port->pd_rev.ver_major << 8 | port->pd_rev.ver_minor;
7161 	struct usb_power_delivery_desc desc = { pd_revision, pd_version };
7162 	struct usb_power_delivery_capabilities *cap;
7163 	int ret, i;
7164 
7165 	if (!port->nr_src_pdo && !port->nr_snk_pdo)
7166 		return 0;
7167 
7168 	for (i = 0; i < port->pd_count; i++) {
7169 		port->pds[i] = usb_power_delivery_register(port->dev, &desc);
7170 		if (IS_ERR(port->pds[i])) {
7171 			ret = PTR_ERR(port->pds[i]);
7172 			goto err_unregister;
7173 		}
7174 		port->pd_list[i]->pd = port->pds[i];
7175 
7176 		if (port->pd_list[i]->source_desc.pdo[0]) {
7177 			cap = usb_power_delivery_register_capabilities(port->pds[i],
7178 								&port->pd_list[i]->source_desc);
7179 			if (IS_ERR(cap)) {
7180 				ret = PTR_ERR(cap);
7181 				goto err_unregister;
7182 			}
7183 			port->pd_list[i]->source_cap = cap;
7184 		}
7185 
7186 		if (port->pd_list[i]->sink_desc.pdo[0]) {
7187 			cap = usb_power_delivery_register_capabilities(port->pds[i],
7188 								&port->pd_list[i]->sink_desc);
7189 			if (IS_ERR(cap)) {
7190 				ret = PTR_ERR(cap);
7191 				goto err_unregister;
7192 			}
7193 			port->pd_list[i]->sink_cap = cap;
7194 		}
7195 	}
7196 
7197 	port->port_source_caps = port->pd_list[0]->source_cap;
7198 	port->port_sink_caps = port->pd_list[0]->sink_cap;
7199 	port->selected_pd = port->pds[0];
7200 	return 0;
7201 
7202 err_unregister:
7203 	tcpm_port_unregister_pd(port);
7204 
7205 	return ret;
7206 }
7207 
tcpm_fw_get_timings(struct tcpm_port * port,struct fwnode_handle * fwnode)7208 static void tcpm_fw_get_timings(struct tcpm_port *port, struct fwnode_handle *fwnode)
7209 {
7210 	int ret;
7211 	u32 val;
7212 
7213 	ret = fwnode_property_read_u32(fwnode, "sink-wait-cap-time-ms", &val);
7214 	if (!ret)
7215 		port->timings.sink_wait_cap_time = val;
7216 	else
7217 		port->timings.sink_wait_cap_time = PD_T_SINK_WAIT_CAP;
7218 
7219 	ret = fwnode_property_read_u32(fwnode, "ps-source-off-time-ms", &val);
7220 	if (!ret)
7221 		port->timings.ps_src_off_time = val;
7222 	else
7223 		port->timings.ps_src_off_time = PD_T_PS_SOURCE_OFF;
7224 
7225 	ret = fwnode_property_read_u32(fwnode, "cc-debounce-time-ms", &val);
7226 	if (!ret)
7227 		port->timings.cc_debounce_time = val;
7228 	else
7229 		port->timings.cc_debounce_time = PD_T_CC_DEBOUNCE;
7230 
7231 	ret = fwnode_property_read_u32(fwnode, "sink-bc12-completion-time-ms", &val);
7232 	if (!ret)
7233 		port->timings.snk_bc12_cmpletion_time = val;
7234 }
7235 
tcpm_fw_get_caps(struct tcpm_port * port,struct fwnode_handle * fwnode)7236 static int tcpm_fw_get_caps(struct tcpm_port *port, struct fwnode_handle *fwnode)
7237 {
7238 	struct fwnode_handle *capabilities, *child, *caps = NULL;
7239 	unsigned int nr_src_pdo, nr_snk_pdo;
7240 	const char *opmode_str;
7241 	u32 *src_pdo, *snk_pdo;
7242 	u32 uw, frs_current;
7243 	int ret = 0, i;
7244 	int mode;
7245 
7246 	if (!fwnode)
7247 		return -EINVAL;
7248 
7249 	/*
7250 	 * This fwnode has a "compatible" property, but is never populated as a
7251 	 * struct device. Instead we simply parse it to read the properties.
7252 	 * This it breaks fw_devlink=on. To maintain backward compatibility
7253 	 * with existing DT files, we work around this by deleting any
7254 	 * fwnode_links to/from this fwnode.
7255 	 */
7256 	fw_devlink_purge_absent_suppliers(fwnode);
7257 
7258 	ret = typec_get_fw_cap(&port->typec_caps, fwnode);
7259 	if (ret < 0)
7260 		return ret;
7261 
7262 	mode = 0;
7263 
7264 	if (fwnode_property_read_bool(fwnode, "accessory-mode-audio"))
7265 		port->typec_caps.accessory[mode++] = TYPEC_ACCESSORY_AUDIO;
7266 
7267 	if (fwnode_property_read_bool(fwnode, "accessory-mode-debug"))
7268 		port->typec_caps.accessory[mode++] = TYPEC_ACCESSORY_DEBUG;
7269 
7270 	port->port_type = port->typec_caps.type;
7271 	port->pd_supported = !fwnode_property_read_bool(fwnode, "pd-disable");
7272 	port->slow_charger_loop = fwnode_property_read_bool(fwnode, "slow-charger-loop");
7273 	port->self_powered = fwnode_property_read_bool(fwnode, "self-powered");
7274 
7275 	if (!port->pd_supported) {
7276 		ret = fwnode_property_read_string(fwnode, "typec-power-opmode", &opmode_str);
7277 		if (ret)
7278 			return ret;
7279 		ret = typec_find_pwr_opmode(opmode_str);
7280 		if (ret < 0)
7281 			return ret;
7282 		port->src_rp = tcpm_pwr_opmode_to_rp(ret);
7283 		return 0;
7284 	}
7285 
7286 	/* The following code are applicable to pd-capable ports, i.e. pd_supported is true. */
7287 
7288 	/* FRS can only be supported by DRP ports */
7289 	if (port->port_type == TYPEC_PORT_DRP) {
7290 		ret = fwnode_property_read_u32(fwnode, "new-source-frs-typec-current",
7291 					       &frs_current);
7292 		if (!ret && frs_current <= FRS_5V_3A)
7293 			port->new_source_frs_current = frs_current;
7294 
7295 		if (ret)
7296 			ret = 0;
7297 	}
7298 
7299 	/* For the backward compatibility, "capabilities" node is optional. */
7300 	capabilities = fwnode_get_named_child_node(fwnode, "capabilities");
7301 	if (!capabilities) {
7302 		port->pd_count = 1;
7303 	} else {
7304 		fwnode_for_each_child_node(capabilities, child)
7305 			port->pd_count++;
7306 
7307 		if (!port->pd_count) {
7308 			ret = -ENODATA;
7309 			goto put_capabilities;
7310 		}
7311 	}
7312 
7313 	port->pds = devm_kcalloc(port->dev, port->pd_count, sizeof(struct usb_power_delivery *),
7314 				 GFP_KERNEL);
7315 	if (!port->pds) {
7316 		ret = -ENOMEM;
7317 		goto put_capabilities;
7318 	}
7319 
7320 	port->pd_list = devm_kcalloc(port->dev, port->pd_count, sizeof(struct pd_data *),
7321 				     GFP_KERNEL);
7322 	if (!port->pd_list) {
7323 		ret = -ENOMEM;
7324 		goto put_capabilities;
7325 	}
7326 
7327 	for (i = 0; i < port->pd_count; i++) {
7328 		port->pd_list[i] = devm_kzalloc(port->dev, sizeof(struct pd_data), GFP_KERNEL);
7329 		if (!port->pd_list[i]) {
7330 			ret = -ENOMEM;
7331 			goto put_capabilities;
7332 		}
7333 
7334 		src_pdo = port->pd_list[i]->source_desc.pdo;
7335 		port->pd_list[i]->source_desc.role = TYPEC_SOURCE;
7336 		snk_pdo = port->pd_list[i]->sink_desc.pdo;
7337 		port->pd_list[i]->sink_desc.role = TYPEC_SINK;
7338 
7339 		/* If "capabilities" is NULL, fall back to single pd cap population. */
7340 		if (!capabilities)
7341 			caps = fwnode;
7342 		else
7343 			caps = fwnode_get_next_child_node(capabilities, caps);
7344 
7345 		if (port->port_type != TYPEC_PORT_SNK) {
7346 			ret = fwnode_property_count_u32(caps, "source-pdos");
7347 			if (ret == 0) {
7348 				ret = -EINVAL;
7349 				goto put_caps;
7350 			}
7351 			if (ret < 0)
7352 				goto put_caps;
7353 
7354 			nr_src_pdo = min(ret, PDO_MAX_OBJECTS);
7355 			ret = fwnode_property_read_u32_array(caps, "source-pdos", src_pdo,
7356 							     nr_src_pdo);
7357 			if (ret)
7358 				goto put_caps;
7359 
7360 			ret = tcpm_validate_caps(port, src_pdo, nr_src_pdo);
7361 			if (ret)
7362 				goto put_caps;
7363 
7364 			if (i == 0) {
7365 				port->nr_src_pdo = nr_src_pdo;
7366 				memcpy_and_pad(port->src_pdo, sizeof(u32) * PDO_MAX_OBJECTS,
7367 					       port->pd_list[0]->source_desc.pdo,
7368 					       sizeof(u32) * nr_src_pdo,
7369 					       0);
7370 			}
7371 		}
7372 
7373 		if (port->port_type != TYPEC_PORT_SRC) {
7374 			ret = fwnode_property_count_u32(caps, "sink-pdos");
7375 			if (ret == 0) {
7376 				ret = -EINVAL;
7377 				goto put_caps;
7378 			}
7379 
7380 			if (ret < 0)
7381 				goto put_caps;
7382 
7383 			nr_snk_pdo = min(ret, PDO_MAX_OBJECTS);
7384 			ret = fwnode_property_read_u32_array(caps, "sink-pdos", snk_pdo,
7385 							     nr_snk_pdo);
7386 			if (ret)
7387 				goto put_caps;
7388 
7389 			ret = tcpm_validate_caps(port, snk_pdo, nr_snk_pdo);
7390 			if (ret)
7391 				goto put_caps;
7392 
7393 			if (fwnode_property_read_u32(caps, "op-sink-microwatt", &uw) < 0) {
7394 				ret = -EINVAL;
7395 				goto put_caps;
7396 			}
7397 
7398 			port->pd_list[i]->operating_snk_mw = uw / 1000;
7399 
7400 			if (i == 0) {
7401 				port->nr_snk_pdo = nr_snk_pdo;
7402 				memcpy_and_pad(port->snk_pdo, sizeof(u32) * PDO_MAX_OBJECTS,
7403 					       port->pd_list[0]->sink_desc.pdo,
7404 					       sizeof(u32) * nr_snk_pdo,
7405 					       0);
7406 				port->operating_snk_mw = port->pd_list[0]->operating_snk_mw;
7407 			}
7408 		}
7409 	}
7410 
7411 put_caps:
7412 	if (caps != fwnode)
7413 		fwnode_handle_put(caps);
7414 put_capabilities:
7415 	fwnode_handle_put(capabilities);
7416 	return ret;
7417 }
7418 
tcpm_fw_get_snk_vdos(struct tcpm_port * port,struct fwnode_handle * fwnode)7419 static int tcpm_fw_get_snk_vdos(struct tcpm_port *port, struct fwnode_handle *fwnode)
7420 {
7421 	int ret;
7422 
7423 	/* sink-vdos is optional */
7424 	ret = fwnode_property_count_u32(fwnode, "sink-vdos");
7425 	if (ret < 0)
7426 		return 0;
7427 
7428 	port->nr_snk_vdo = min(ret, VDO_MAX_OBJECTS);
7429 	if (port->nr_snk_vdo) {
7430 		ret = fwnode_property_read_u32_array(fwnode, "sink-vdos",
7431 						     port->snk_vdo,
7432 						     port->nr_snk_vdo);
7433 		if (ret < 0)
7434 			return ret;
7435 	}
7436 
7437 	/* If sink-vdos is found, sink-vdos-v1 is expected for backward compatibility. */
7438 	if (port->nr_snk_vdo) {
7439 		ret = fwnode_property_count_u32(fwnode, "sink-vdos-v1");
7440 		if (ret < 0)
7441 			return ret;
7442 		else if (ret == 0)
7443 			return -ENODATA;
7444 
7445 		port->nr_snk_vdo_v1 = min(ret, VDO_MAX_OBJECTS);
7446 		ret = fwnode_property_read_u32_array(fwnode, "sink-vdos-v1",
7447 						     port->snk_vdo_v1,
7448 						     port->nr_snk_vdo_v1);
7449 		if (ret < 0)
7450 			return ret;
7451 	}
7452 
7453 	return 0;
7454 }
7455 
tcpm_fw_get_pd_revision(struct tcpm_port * port,struct fwnode_handle * fwnode)7456 static void tcpm_fw_get_pd_revision(struct tcpm_port *port, struct fwnode_handle *fwnode)
7457 {
7458 	int ret;
7459 	u8 val[4];
7460 
7461 	ret = fwnode_property_count_u8(fwnode, "pd-revision");
7462 	if (!ret || ret != 4) {
7463 		tcpm_log(port, "Unable to find pd-revision property or incorrect array size");
7464 		return;
7465 	}
7466 
7467 	ret = fwnode_property_read_u8_array(fwnode, "pd-revision", val, 4);
7468 	if (ret) {
7469 		tcpm_log(port, "Failed to parse pd-revision, ret:(%d)", ret);
7470 		return;
7471 	}
7472 
7473 	port->pd_rev.rev_major = val[0];
7474 	port->pd_rev.rev_minor = val[1];
7475 	port->pd_rev.ver_major = val[2];
7476 	port->pd_rev.ver_minor = val[3];
7477 }
7478 
7479 /* Power Supply access to expose source power information */
7480 enum tcpm_psy_online_states {
7481 	TCPM_PSY_OFFLINE = 0,
7482 	TCPM_PSY_FIXED_ONLINE,
7483 	TCPM_PSY_PROG_ONLINE,
7484 };
7485 
7486 static enum power_supply_property tcpm_psy_props[] = {
7487 	POWER_SUPPLY_PROP_USB_TYPE,
7488 	POWER_SUPPLY_PROP_ONLINE,
7489 	POWER_SUPPLY_PROP_VOLTAGE_MIN,
7490 	POWER_SUPPLY_PROP_VOLTAGE_MAX,
7491 	POWER_SUPPLY_PROP_VOLTAGE_NOW,
7492 	POWER_SUPPLY_PROP_CURRENT_MAX,
7493 	POWER_SUPPLY_PROP_CURRENT_NOW,
7494 };
7495 
tcpm_psy_get_online(struct tcpm_port * port,union power_supply_propval * val)7496 static int tcpm_psy_get_online(struct tcpm_port *port,
7497 			       union power_supply_propval *val)
7498 {
7499 	if (port->vbus_charge) {
7500 		if (port->pps_data.active)
7501 			val->intval = TCPM_PSY_PROG_ONLINE;
7502 		else
7503 			val->intval = TCPM_PSY_FIXED_ONLINE;
7504 	} else {
7505 		val->intval = TCPM_PSY_OFFLINE;
7506 	}
7507 
7508 	return 0;
7509 }
7510 
tcpm_psy_get_voltage_min(struct tcpm_port * port,union power_supply_propval * val)7511 static int tcpm_psy_get_voltage_min(struct tcpm_port *port,
7512 				    union power_supply_propval *val)
7513 {
7514 	if (port->pps_data.active)
7515 		val->intval = port->pps_data.min_volt * 1000;
7516 	else
7517 		val->intval = port->supply_voltage * 1000;
7518 
7519 	return 0;
7520 }
7521 
tcpm_psy_get_voltage_max(struct tcpm_port * port,union power_supply_propval * val)7522 static int tcpm_psy_get_voltage_max(struct tcpm_port *port,
7523 				    union power_supply_propval *val)
7524 {
7525 	if (port->pps_data.active)
7526 		val->intval = port->pps_data.max_volt * 1000;
7527 	else
7528 		val->intval = port->supply_voltage * 1000;
7529 
7530 	return 0;
7531 }
7532 
tcpm_psy_get_voltage_now(struct tcpm_port * port,union power_supply_propval * val)7533 static int tcpm_psy_get_voltage_now(struct tcpm_port *port,
7534 				    union power_supply_propval *val)
7535 {
7536 	val->intval = port->supply_voltage * 1000;
7537 
7538 	return 0;
7539 }
7540 
tcpm_psy_get_current_max(struct tcpm_port * port,union power_supply_propval * val)7541 static int tcpm_psy_get_current_max(struct tcpm_port *port,
7542 				    union power_supply_propval *val)
7543 {
7544 	if (port->pps_data.active)
7545 		val->intval = port->pps_data.max_curr * 1000;
7546 	else
7547 		val->intval = port->current_limit * 1000;
7548 
7549 	return 0;
7550 }
7551 
tcpm_psy_get_current_now(struct tcpm_port * port,union power_supply_propval * val)7552 static int tcpm_psy_get_current_now(struct tcpm_port *port,
7553 				    union power_supply_propval *val)
7554 {
7555 	val->intval = port->current_limit * 1000;
7556 
7557 	return 0;
7558 }
7559 
tcpm_psy_get_input_power_limit(struct tcpm_port * port,union power_supply_propval * val)7560 static int tcpm_psy_get_input_power_limit(struct tcpm_port *port,
7561 					  union power_supply_propval *val)
7562 {
7563 	unsigned int src_mv, src_ma, max_src_uw = 0;
7564 	unsigned int i, tmp;
7565 
7566 	for (i = 0; i < port->nr_source_caps; i++) {
7567 		u32 pdo = port->source_caps[i];
7568 
7569 		if (pdo_type(pdo) == PDO_TYPE_FIXED) {
7570 			src_mv = pdo_fixed_voltage(pdo);
7571 			src_ma = pdo_max_current(pdo);
7572 			tmp = src_mv * src_ma;
7573 			max_src_uw = tmp > max_src_uw ? tmp : max_src_uw;
7574 		}
7575 	}
7576 
7577 	val->intval = max_src_uw;
7578 	return 0;
7579 }
7580 
tcpm_psy_get_prop(struct power_supply * psy,enum power_supply_property psp,union power_supply_propval * val)7581 static int tcpm_psy_get_prop(struct power_supply *psy,
7582 			     enum power_supply_property psp,
7583 			     union power_supply_propval *val)
7584 {
7585 	struct tcpm_port *port = power_supply_get_drvdata(psy);
7586 	int ret = 0;
7587 
7588 	switch (psp) {
7589 	case POWER_SUPPLY_PROP_USB_TYPE:
7590 		val->intval = port->usb_type;
7591 		break;
7592 	case POWER_SUPPLY_PROP_ONLINE:
7593 		ret = tcpm_psy_get_online(port, val);
7594 		break;
7595 	case POWER_SUPPLY_PROP_VOLTAGE_MIN:
7596 		ret = tcpm_psy_get_voltage_min(port, val);
7597 		break;
7598 	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
7599 		ret = tcpm_psy_get_voltage_max(port, val);
7600 		break;
7601 	case POWER_SUPPLY_PROP_VOLTAGE_NOW:
7602 		ret = tcpm_psy_get_voltage_now(port, val);
7603 		break;
7604 	case POWER_SUPPLY_PROP_CURRENT_MAX:
7605 		ret = tcpm_psy_get_current_max(port, val);
7606 		break;
7607 	case POWER_SUPPLY_PROP_CURRENT_NOW:
7608 		ret = tcpm_psy_get_current_now(port, val);
7609 		break;
7610 	case POWER_SUPPLY_PROP_INPUT_POWER_LIMIT:
7611 		tcpm_psy_get_input_power_limit(port, val);
7612 		break;
7613 	default:
7614 		ret = -EINVAL;
7615 		break;
7616 	}
7617 
7618 	return ret;
7619 }
7620 
tcpm_psy_set_online(struct tcpm_port * port,const union power_supply_propval * val)7621 static int tcpm_psy_set_online(struct tcpm_port *port,
7622 			       const union power_supply_propval *val)
7623 {
7624 	int ret;
7625 
7626 	switch (val->intval) {
7627 	case TCPM_PSY_FIXED_ONLINE:
7628 		ret = tcpm_pps_activate(port, false);
7629 		break;
7630 	case TCPM_PSY_PROG_ONLINE:
7631 		ret = tcpm_pps_activate(port, true);
7632 		break;
7633 	default:
7634 		ret = -EINVAL;
7635 		break;
7636 	}
7637 
7638 	return ret;
7639 }
7640 
tcpm_psy_set_prop(struct power_supply * psy,enum power_supply_property psp,const union power_supply_propval * val)7641 static int tcpm_psy_set_prop(struct power_supply *psy,
7642 			     enum power_supply_property psp,
7643 			     const union power_supply_propval *val)
7644 {
7645 	struct tcpm_port *port = power_supply_get_drvdata(psy);
7646 	int ret;
7647 
7648 	/*
7649 	 * All the properties below are related to USB PD. The check needs to be
7650 	 * property specific when a non-pd related property is added.
7651 	 */
7652 	if (!port->pd_supported)
7653 		return -EOPNOTSUPP;
7654 
7655 	switch (psp) {
7656 	case POWER_SUPPLY_PROP_ONLINE:
7657 		ret = tcpm_psy_set_online(port, val);
7658 		break;
7659 	case POWER_SUPPLY_PROP_VOLTAGE_NOW:
7660 		ret = tcpm_pps_set_out_volt(port, val->intval / 1000);
7661 		break;
7662 	case POWER_SUPPLY_PROP_CURRENT_NOW:
7663 		if (val->intval > port->pps_data.max_curr * 1000)
7664 			ret = -EINVAL;
7665 		else
7666 			ret = tcpm_pps_set_op_curr(port, val->intval / 1000);
7667 		break;
7668 	default:
7669 		ret = -EINVAL;
7670 		break;
7671 	}
7672 	power_supply_changed(port->psy);
7673 	return ret;
7674 }
7675 
tcpm_psy_prop_writeable(struct power_supply * psy,enum power_supply_property psp)7676 static int tcpm_psy_prop_writeable(struct power_supply *psy,
7677 				   enum power_supply_property psp)
7678 {
7679 	switch (psp) {
7680 	case POWER_SUPPLY_PROP_ONLINE:
7681 	case POWER_SUPPLY_PROP_VOLTAGE_NOW:
7682 	case POWER_SUPPLY_PROP_CURRENT_NOW:
7683 		return 1;
7684 	default:
7685 		return 0;
7686 	}
7687 }
7688 
7689 static const char *tcpm_psy_name_prefix = "tcpm-source-psy-";
7690 
devm_tcpm_psy_register(struct tcpm_port * port)7691 static int devm_tcpm_psy_register(struct tcpm_port *port)
7692 {
7693 	struct power_supply_config psy_cfg = {};
7694 	const char *port_dev_name = dev_name(port->dev);
7695 	size_t psy_name_len = strlen(tcpm_psy_name_prefix) +
7696 				     strlen(port_dev_name) + 1;
7697 	char *psy_name;
7698 
7699 	psy_cfg.drv_data = port;
7700 	psy_cfg.fwnode = dev_fwnode(port->dev);
7701 	psy_name = devm_kzalloc(port->dev, psy_name_len, GFP_KERNEL);
7702 	if (!psy_name)
7703 		return -ENOMEM;
7704 
7705 	snprintf(psy_name, psy_name_len, "%s%s", tcpm_psy_name_prefix,
7706 		 port_dev_name);
7707 	port->psy_desc.name = psy_name;
7708 	port->psy_desc.type = POWER_SUPPLY_TYPE_USB;
7709 	port->psy_desc.usb_types = BIT(POWER_SUPPLY_USB_TYPE_C)  |
7710 				   BIT(POWER_SUPPLY_USB_TYPE_PD) |
7711 				   BIT(POWER_SUPPLY_USB_TYPE_PD_PPS);
7712 	port->psy_desc.properties = tcpm_psy_props;
7713 	port->psy_desc.num_properties = ARRAY_SIZE(tcpm_psy_props);
7714 	port->psy_desc.get_property = tcpm_psy_get_prop;
7715 	port->psy_desc.set_property = tcpm_psy_set_prop;
7716 	port->psy_desc.property_is_writeable = tcpm_psy_prop_writeable;
7717 
7718 	port->usb_type = POWER_SUPPLY_USB_TYPE_C;
7719 
7720 	port->psy = devm_power_supply_register(port->dev, &port->psy_desc,
7721 					       &psy_cfg);
7722 
7723 	return PTR_ERR_OR_ZERO(port->psy);
7724 }
7725 
state_machine_timer_handler(struct hrtimer * timer)7726 static enum hrtimer_restart state_machine_timer_handler(struct hrtimer *timer)
7727 {
7728 	struct tcpm_port *port = container_of(timer, struct tcpm_port, state_machine_timer);
7729 
7730 	if (port->registered)
7731 		kthread_queue_work(port->wq, &port->state_machine);
7732 	return HRTIMER_NORESTART;
7733 }
7734 
vdm_state_machine_timer_handler(struct hrtimer * timer)7735 static enum hrtimer_restart vdm_state_machine_timer_handler(struct hrtimer *timer)
7736 {
7737 	struct tcpm_port *port = container_of(timer, struct tcpm_port, vdm_state_machine_timer);
7738 
7739 	if (port->registered)
7740 		kthread_queue_work(port->wq, &port->vdm_state_machine);
7741 	return HRTIMER_NORESTART;
7742 }
7743 
enable_frs_timer_handler(struct hrtimer * timer)7744 static enum hrtimer_restart enable_frs_timer_handler(struct hrtimer *timer)
7745 {
7746 	struct tcpm_port *port = container_of(timer, struct tcpm_port, enable_frs_timer);
7747 
7748 	if (port->registered)
7749 		kthread_queue_work(port->wq, &port->enable_frs);
7750 	return HRTIMER_NORESTART;
7751 }
7752 
send_discover_timer_handler(struct hrtimer * timer)7753 static enum hrtimer_restart send_discover_timer_handler(struct hrtimer *timer)
7754 {
7755 	struct tcpm_port *port = container_of(timer, struct tcpm_port, send_discover_timer);
7756 
7757 	if (port->registered)
7758 		kthread_queue_work(port->wq, &port->send_discover_work);
7759 	return HRTIMER_NORESTART;
7760 }
7761 
tcpm_register_port(struct device * dev,struct tcpc_dev * tcpc)7762 struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
7763 {
7764 	struct tcpm_port *port;
7765 	int err;
7766 
7767 	if (!dev || !tcpc ||
7768 	    !tcpc->get_vbus || !tcpc->set_cc || !tcpc->get_cc ||
7769 	    !tcpc->set_polarity || !tcpc->set_vconn || !tcpc->set_vbus ||
7770 	    !tcpc->set_pd_rx || !tcpc->set_roles || !tcpc->pd_transmit)
7771 		return ERR_PTR(-EINVAL);
7772 
7773 	port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
7774 	if (!port)
7775 		return ERR_PTR(-ENOMEM);
7776 
7777 	port->dev = dev;
7778 	port->tcpc = tcpc;
7779 
7780 	mutex_init(&port->lock);
7781 	mutex_init(&port->swap_lock);
7782 
7783 	port->wq = kthread_create_worker(0, dev_name(dev));
7784 	if (IS_ERR(port->wq))
7785 		return ERR_CAST(port->wq);
7786 	sched_set_fifo(port->wq->task);
7787 
7788 	kthread_init_work(&port->state_machine, tcpm_state_machine_work);
7789 	kthread_init_work(&port->vdm_state_machine, vdm_state_machine_work);
7790 	kthread_init_work(&port->event_work, tcpm_pd_event_handler);
7791 	kthread_init_work(&port->enable_frs, tcpm_enable_frs_work);
7792 	kthread_init_work(&port->send_discover_work, tcpm_send_discover_work);
7793 	hrtimer_init(&port->state_machine_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
7794 	port->state_machine_timer.function = state_machine_timer_handler;
7795 	hrtimer_init(&port->vdm_state_machine_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
7796 	port->vdm_state_machine_timer.function = vdm_state_machine_timer_handler;
7797 	hrtimer_init(&port->enable_frs_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
7798 	port->enable_frs_timer.function = enable_frs_timer_handler;
7799 	hrtimer_init(&port->send_discover_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
7800 	port->send_discover_timer.function = send_discover_timer_handler;
7801 
7802 	spin_lock_init(&port->pd_event_lock);
7803 
7804 	init_completion(&port->tx_complete);
7805 	init_completion(&port->swap_complete);
7806 	init_completion(&port->pps_complete);
7807 	tcpm_debugfs_init(port);
7808 
7809 	err = tcpm_fw_get_caps(port, tcpc->fwnode);
7810 	if (err < 0)
7811 		goto out_destroy_wq;
7812 	err = tcpm_fw_get_snk_vdos(port, tcpc->fwnode);
7813 	if (err < 0)
7814 		goto out_destroy_wq;
7815 
7816 	tcpm_fw_get_timings(port, tcpc->fwnode);
7817 	tcpm_fw_get_pd_revision(port, tcpc->fwnode);
7818 
7819 	port->try_role = port->typec_caps.prefer_role;
7820 
7821 	port->typec_caps.revision = 0x0120;	/* Type-C spec release 1.2 */
7822 
7823 	if (port->pd_rev.rev_major)
7824 		port->typec_caps.pd_revision = port->pd_rev.rev_major << 8 |
7825 					       port->pd_rev.rev_minor;
7826 	else
7827 		port->typec_caps.pd_revision = 0x0300;	/* USB-PD spec release 3.0 */
7828 
7829 	port->typec_caps.svdm_version = SVDM_VER_2_0;
7830 	port->typec_caps.driver_data = port;
7831 	port->typec_caps.ops = &tcpm_ops;
7832 	port->typec_caps.orientation_aware = 1;
7833 
7834 	port->partner_desc.identity = &port->partner_ident;
7835 
7836 	port->role_sw = usb_role_switch_get(port->dev);
7837 	if (!port->role_sw)
7838 		port->role_sw = fwnode_usb_role_switch_get(tcpc->fwnode);
7839 	if (IS_ERR(port->role_sw)) {
7840 		err = PTR_ERR(port->role_sw);
7841 		goto out_destroy_wq;
7842 	}
7843 
7844 	err = devm_tcpm_psy_register(port);
7845 	if (err)
7846 		goto out_role_sw_put;
7847 	power_supply_changed(port->psy);
7848 
7849 	err = tcpm_port_register_pd(port);
7850 	if (err)
7851 		goto out_role_sw_put;
7852 
7853 	if (port->pds)
7854 		port->typec_caps.pd = port->pds[0];
7855 
7856 	port->typec_port = typec_register_port(port->dev, &port->typec_caps);
7857 	if (IS_ERR(port->typec_port)) {
7858 		err = PTR_ERR(port->typec_port);
7859 		goto out_unregister_pd;
7860 	}
7861 
7862 	typec_port_register_altmodes(port->typec_port,
7863 				     &tcpm_altmode_ops, port,
7864 				     port->port_altmode, ALTMODE_DISCOVERY_MAX);
7865 	typec_port_register_cable_ops(port->port_altmode, ARRAY_SIZE(port->port_altmode),
7866 				      &tcpm_cable_ops);
7867 	port->registered = true;
7868 
7869 	mutex_lock(&port->lock);
7870 	tcpm_init(port);
7871 	mutex_unlock(&port->lock);
7872 
7873 	tcpm_log(port, "%s: registered", dev_name(dev));
7874 	return port;
7875 
7876 out_unregister_pd:
7877 	tcpm_port_unregister_pd(port);
7878 out_role_sw_put:
7879 	usb_role_switch_put(port->role_sw);
7880 out_destroy_wq:
7881 	tcpm_debugfs_exit(port);
7882 	kthread_destroy_worker(port->wq);
7883 	return ERR_PTR(err);
7884 }
7885 EXPORT_SYMBOL_GPL(tcpm_register_port);
7886 
tcpm_unregister_port(struct tcpm_port * port)7887 void tcpm_unregister_port(struct tcpm_port *port)
7888 {
7889 	int i;
7890 
7891 	port->registered = false;
7892 	kthread_destroy_worker(port->wq);
7893 
7894 	hrtimer_cancel(&port->send_discover_timer);
7895 	hrtimer_cancel(&port->enable_frs_timer);
7896 	hrtimer_cancel(&port->vdm_state_machine_timer);
7897 	hrtimer_cancel(&port->state_machine_timer);
7898 
7899 	tcpm_reset_port(port);
7900 
7901 	tcpm_port_unregister_pd(port);
7902 
7903 	for (i = 0; i < ARRAY_SIZE(port->port_altmode); i++)
7904 		typec_unregister_altmode(port->port_altmode[i]);
7905 	typec_unregister_port(port->typec_port);
7906 	usb_role_switch_put(port->role_sw);
7907 	tcpm_debugfs_exit(port);
7908 }
7909 EXPORT_SYMBOL_GPL(tcpm_unregister_port);
7910 
7911 MODULE_AUTHOR("Guenter Roeck <groeck@chromium.org>");
7912 MODULE_DESCRIPTION("USB Type-C Port Manager");
7913 MODULE_LICENSE("GPL");
7914