1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright 2015-2017 Google, Inc
4 *
5 * USB Power Delivery protocol stack.
6 */
7
8 #include <linux/completion.h>
9 #include <linux/debugfs.h>
10 #include <linux/device.h>
11 #include <linux/hrtimer.h>
12 #include <linux/jiffies.h>
13 #include <linux/kernel.h>
14 #include <linux/kthread.h>
15 #include <linux/module.h>
16 #include <linux/mutex.h>
17 #include <linux/power_supply.h>
18 #include <linux/proc_fs.h>
19 #include <linux/property.h>
20 #include <linux/sched/clock.h>
21 #include <linux/seq_file.h>
22 #include <linux/slab.h>
23 #include <linux/spinlock.h>
24 #include <linux/usb.h>
25 #include <linux/usb/pd.h>
26 #include <linux/usb/pd_ado.h>
27 #include <linux/usb/pd_bdo.h>
28 #include <linux/usb/pd_ext_sdb.h>
29 #include <linux/usb/pd_vdo.h>
30 #include <linux/usb/role.h>
31 #include <linux/usb/tcpm.h>
32 #include <linux/usb/typec_altmode.h>
33
34 #include <trace/hooks/typec.h>
35 #include <uapi/linux/sched/types.h>
36
37 #define FOREACH_STATE(S) \
38 S(INVALID_STATE), \
39 S(TOGGLING), \
40 S(SRC_UNATTACHED), \
41 S(SRC_ATTACH_WAIT), \
42 S(SRC_ATTACHED), \
43 S(SRC_STARTUP), \
44 S(SRC_SEND_CAPABILITIES), \
45 S(SRC_SEND_CAPABILITIES_TIMEOUT), \
46 S(SRC_NEGOTIATE_CAPABILITIES), \
47 S(SRC_TRANSITION_SUPPLY), \
48 S(SRC_READY), \
49 S(SRC_WAIT_NEW_CAPABILITIES), \
50 \
51 S(SNK_UNATTACHED), \
52 S(SNK_ATTACH_WAIT), \
53 S(SNK_DEBOUNCED), \
54 S(SNK_ATTACHED), \
55 S(SNK_STARTUP), \
56 S(SNK_DISCOVERY), \
57 S(SNK_DISCOVERY_DEBOUNCE), \
58 S(SNK_DISCOVERY_DEBOUNCE_DONE), \
59 S(SNK_WAIT_CAPABILITIES), \
60 S(SNK_NEGOTIATE_CAPABILITIES), \
61 S(SNK_NEGOTIATE_PPS_CAPABILITIES), \
62 S(SNK_TRANSITION_SINK), \
63 S(SNK_TRANSITION_SINK_VBUS), \
64 S(SNK_READY), \
65 \
66 S(ACC_UNATTACHED), \
67 S(DEBUG_ACC_ATTACHED), \
68 S(AUDIO_ACC_ATTACHED), \
69 S(AUDIO_ACC_DEBOUNCE), \
70 \
71 S(HARD_RESET_SEND), \
72 S(HARD_RESET_START), \
73 S(SRC_HARD_RESET_VBUS_OFF), \
74 S(SRC_HARD_RESET_VBUS_ON), \
75 S(SNK_HARD_RESET_SINK_OFF), \
76 S(SNK_HARD_RESET_WAIT_VBUS), \
77 S(SNK_HARD_RESET_SINK_ON), \
78 \
79 S(SOFT_RESET), \
80 S(SRC_SOFT_RESET_WAIT_SNK_TX), \
81 S(SNK_SOFT_RESET), \
82 S(SOFT_RESET_SEND), \
83 \
84 S(DR_SWAP_ACCEPT), \
85 S(DR_SWAP_SEND), \
86 S(DR_SWAP_SEND_TIMEOUT), \
87 S(DR_SWAP_CANCEL), \
88 S(DR_SWAP_CHANGE_DR), \
89 \
90 S(PR_SWAP_ACCEPT), \
91 S(PR_SWAP_SEND), \
92 S(PR_SWAP_SEND_TIMEOUT), \
93 S(PR_SWAP_CANCEL), \
94 S(PR_SWAP_START), \
95 S(PR_SWAP_SRC_SNK_TRANSITION_OFF), \
96 S(PR_SWAP_SRC_SNK_SOURCE_OFF), \
97 S(PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED), \
98 S(PR_SWAP_SRC_SNK_SINK_ON), \
99 S(PR_SWAP_SNK_SRC_SINK_OFF), \
100 S(PR_SWAP_SNK_SRC_SOURCE_ON), \
101 S(PR_SWAP_SNK_SRC_SOURCE_ON_VBUS_RAMPED_UP), \
102 \
103 S(VCONN_SWAP_ACCEPT), \
104 S(VCONN_SWAP_SEND), \
105 S(VCONN_SWAP_SEND_TIMEOUT), \
106 S(VCONN_SWAP_CANCEL), \
107 S(VCONN_SWAP_START), \
108 S(VCONN_SWAP_WAIT_FOR_VCONN), \
109 S(VCONN_SWAP_TURN_ON_VCONN), \
110 S(VCONN_SWAP_TURN_OFF_VCONN), \
111 \
112 S(FR_SWAP_SEND), \
113 S(FR_SWAP_SEND_TIMEOUT), \
114 S(FR_SWAP_SNK_SRC_TRANSITION_TO_OFF), \
115 S(FR_SWAP_SNK_SRC_NEW_SINK_READY), \
116 S(FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED), \
117 S(FR_SWAP_CANCEL), \
118 \
119 S(SNK_TRY), \
120 S(SNK_TRY_WAIT), \
121 S(SNK_TRY_WAIT_DEBOUNCE), \
122 S(SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS), \
123 S(SRC_TRYWAIT), \
124 S(SRC_TRYWAIT_DEBOUNCE), \
125 S(SRC_TRYWAIT_UNATTACHED), \
126 \
127 S(SRC_TRY), \
128 S(SRC_TRY_WAIT), \
129 S(SRC_TRY_DEBOUNCE), \
130 S(SNK_TRYWAIT), \
131 S(SNK_TRYWAIT_DEBOUNCE), \
132 S(SNK_TRYWAIT_VBUS), \
133 S(BIST_RX), \
134 \
135 S(GET_STATUS_SEND), \
136 S(GET_STATUS_SEND_TIMEOUT), \
137 S(GET_PPS_STATUS_SEND), \
138 S(GET_PPS_STATUS_SEND_TIMEOUT), \
139 \
140 S(GET_SINK_CAP), \
141 S(GET_SINK_CAP_TIMEOUT), \
142 \
143 S(ERROR_RECOVERY), \
144 S(PORT_RESET), \
145 S(PORT_RESET_WAIT_OFF), \
146 \
147 S(AMS_START), \
148 S(CHUNK_NOT_SUPP)
149
150 #define FOREACH_AMS(S) \
151 S(NONE_AMS), \
152 S(POWER_NEGOTIATION), \
153 S(GOTOMIN), \
154 S(SOFT_RESET_AMS), \
155 S(HARD_RESET), \
156 S(CABLE_RESET), \
157 S(GET_SOURCE_CAPABILITIES), \
158 S(GET_SINK_CAPABILITIES), \
159 S(POWER_ROLE_SWAP), \
160 S(FAST_ROLE_SWAP), \
161 S(DATA_ROLE_SWAP), \
162 S(VCONN_SWAP), \
163 S(SOURCE_ALERT), \
164 S(GETTING_SOURCE_EXTENDED_CAPABILITIES),\
165 S(GETTING_SOURCE_SINK_STATUS), \
166 S(GETTING_BATTERY_CAPABILITIES), \
167 S(GETTING_BATTERY_STATUS), \
168 S(GETTING_MANUFACTURER_INFORMATION), \
169 S(SECURITY), \
170 S(FIRMWARE_UPDATE), \
171 S(DISCOVER_IDENTITY), \
172 S(SOURCE_STARTUP_CABLE_PLUG_DISCOVER_IDENTITY), \
173 S(DISCOVER_SVIDS), \
174 S(DISCOVER_MODES), \
175 S(DFP_TO_UFP_ENTER_MODE), \
176 S(DFP_TO_UFP_EXIT_MODE), \
177 S(DFP_TO_CABLE_PLUG_ENTER_MODE), \
178 S(DFP_TO_CABLE_PLUG_EXIT_MODE), \
179 S(ATTENTION), \
180 S(BIST), \
181 S(UNSTRUCTURED_VDMS), \
182 S(STRUCTURED_VDMS), \
183 S(COUNTRY_INFO), \
184 S(COUNTRY_CODES)
185
186 #define GENERATE_ENUM(e) e
187 #define GENERATE_STRING(s) #s
188
189 enum tcpm_state {
190 FOREACH_STATE(GENERATE_ENUM)
191 };
192
193 static const char * const tcpm_states[] = {
194 FOREACH_STATE(GENERATE_STRING)
195 };
196
197 enum tcpm_ams {
198 FOREACH_AMS(GENERATE_ENUM)
199 };
200
201 static const char * const tcpm_ams_str[] = {
202 FOREACH_AMS(GENERATE_STRING)
203 };
204
205 enum vdm_states {
206 VDM_STATE_ERR_BUSY = -3,
207 VDM_STATE_ERR_SEND = -2,
208 VDM_STATE_ERR_TMOUT = -1,
209 VDM_STATE_DONE = 0,
210 /* Anything >0 represents an active state */
211 VDM_STATE_READY = 1,
212 VDM_STATE_BUSY = 2,
213 VDM_STATE_WAIT_RSP_BUSY = 3,
214 VDM_STATE_SEND_MESSAGE = 4,
215 };
216
217 enum pd_msg_request {
218 PD_MSG_NONE = 0,
219 PD_MSG_CTRL_REJECT,
220 PD_MSG_CTRL_WAIT,
221 PD_MSG_CTRL_NOT_SUPP,
222 PD_MSG_DATA_SINK_CAP,
223 PD_MSG_DATA_SOURCE_CAP,
224 };
225
226 enum adev_actions {
227 ADEV_NONE = 0,
228 ADEV_NOTIFY_USB_AND_QUEUE_VDM,
229 ADEV_QUEUE_VDM,
230 ADEV_QUEUE_VDM_SEND_EXIT_MODE_ON_FAIL,
231 ADEV_ATTENTION,
232 };
233
234 /*
235 * Initial current capability of the new source when vSafe5V is applied during PD3.0 Fast Role Swap.
236 * Based on "Table 6-14 Fixed Supply PDO - Sink" of "USB Power Delivery Specification Revision 3.0,
237 * Version 1.2"
238 */
239 enum frs_typec_current {
240 FRS_NOT_SUPPORTED,
241 FRS_DEFAULT_POWER,
242 FRS_5V_1P5A,
243 FRS_5V_3A,
244 };
245
246 /* Events from low level driver */
247
248 #define TCPM_CC_EVENT BIT(0)
249 #define TCPM_VBUS_EVENT BIT(1)
250 #define TCPM_RESET_EVENT BIT(2)
251 #define TCPM_FRS_EVENT BIT(3)
252 #define TCPM_SOURCING_VBUS BIT(4)
253
254 #define LOG_BUFFER_ENTRIES 1024
255 #define LOG_BUFFER_ENTRY_SIZE 128
256
257 /* Alternate mode support */
258
259 #define SVID_DISCOVERY_MAX 16
260 #define ALTMODE_DISCOVERY_MAX (SVID_DISCOVERY_MAX * MODE_DISCOVERY_MAX)
261
262 #define GET_SINK_CAP_RETRY_MS 100
263 #define SEND_DISCOVER_RETRY_MS 100
264
265 struct pd_mode_data {
266 int svid_index; /* current SVID index */
267 int nsvids;
268 u16 svids[SVID_DISCOVERY_MAX];
269 int altmodes; /* number of alternate modes */
270 struct typec_altmode_desc altmode_desc[ALTMODE_DISCOVERY_MAX];
271 };
272
273 /*
274 * @min_volt: Actual min voltage at the local port
275 * @req_min_volt: Requested min voltage to the port partner
276 * @max_volt: Actual max voltage at the local port
277 * @req_max_volt: Requested max voltage to the port partner
278 * @max_curr: Actual max current at the local port
279 * @req_max_curr: Requested max current of the port partner
280 * @req_out_volt: Requested output voltage to the port partner
281 * @req_op_curr: Requested operating current to the port partner
282 * @supported: Parter has atleast one APDO hence supports PPS
283 * @active: PPS mode is active
284 */
285 struct pd_pps_data {
286 u32 min_volt;
287 u32 req_min_volt;
288 u32 max_volt;
289 u32 req_max_volt;
290 u32 max_curr;
291 u32 req_max_curr;
292 u32 req_out_volt;
293 u32 req_op_curr;
294 bool supported;
295 bool active;
296 };
297
298 struct tcpm_port {
299 struct device *dev;
300
301 struct mutex lock; /* tcpm state machine lock */
302 struct kthread_worker *wq;
303
304 struct typec_capability typec_caps;
305 struct typec_port *typec_port;
306
307 struct tcpc_dev *tcpc;
308 struct usb_role_switch *role_sw;
309
310 enum typec_role vconn_role;
311 enum typec_role pwr_role;
312 enum typec_data_role data_role;
313 enum typec_pwr_opmode pwr_opmode;
314
315 struct usb_pd_identity partner_ident;
316 struct typec_partner_desc partner_desc;
317 struct typec_partner *partner;
318
319 enum typec_cc_status cc_req;
320
321 enum typec_cc_status cc1;
322 enum typec_cc_status cc2;
323 enum typec_cc_polarity polarity;
324
325 bool attached;
326 bool connected;
327 enum typec_port_type port_type;
328
329 /*
330 * Set to true when vbus is greater than VSAFE5V min.
331 * Set to false when vbus falls below vSinkDisconnect max threshold.
332 */
333 bool vbus_present;
334
335 /*
336 * Set to true when vbus is less than VSAFE0V max.
337 * Set to false when vbus is greater than VSAFE0V max.
338 */
339 bool vbus_vsafe0v;
340
341 bool vbus_never_low;
342 bool vbus_source;
343 bool vbus_charge;
344
345 /* Set to true when Discover_Identity Command is expected to be sent in Ready states. */
346 bool send_discover;
347 bool op_vsafe5v;
348
349 int try_role;
350 int try_snk_count;
351 int try_src_count;
352
353 enum pd_msg_request queued_message;
354
355 enum tcpm_state enter_state;
356 enum tcpm_state prev_state;
357 enum tcpm_state state;
358 enum tcpm_state delayed_state;
359 ktime_t delayed_runtime;
360 unsigned long delay_ms;
361
362 spinlock_t pd_event_lock;
363 #ifdef CONFIG_NO_GKI
364 struct mutex pd_handler_lock;
365 #endif
366 u32 pd_events;
367
368 struct kthread_work event_work;
369 struct hrtimer state_machine_timer;
370 struct kthread_work state_machine;
371 struct hrtimer vdm_state_machine_timer;
372 struct kthread_work vdm_state_machine;
373 struct hrtimer enable_frs_timer;
374 struct kthread_work enable_frs;
375 struct hrtimer send_discover_timer;
376 struct kthread_work send_discover_work;
377 bool state_machine_running;
378 /* Set to true when VDM State Machine has following actions. */
379 bool vdm_sm_running;
380
381 struct completion tx_complete;
382 enum tcpm_transmit_status tx_status;
383
384 struct mutex swap_lock; /* swap command lock */
385 bool swap_pending;
386 bool non_pd_role_swap;
387 struct completion swap_complete;
388 int swap_status;
389
390 unsigned int negotiated_rev;
391 unsigned int message_id;
392 unsigned int caps_count;
393 unsigned int hard_reset_count;
394 bool pd_capable;
395 bool explicit_contract;
396 unsigned int rx_msgid;
397
398 /* Partner capabilities/requests */
399 u32 sink_request;
400 u32 source_caps[PDO_MAX_OBJECTS];
401 unsigned int nr_source_caps;
402 u32 sink_caps[PDO_MAX_OBJECTS];
403 unsigned int nr_sink_caps;
404
405 /* Local capabilities */
406 u32 src_pdo[PDO_MAX_OBJECTS];
407 unsigned int nr_src_pdo;
408 u32 snk_pdo[PDO_MAX_OBJECTS];
409 unsigned int nr_snk_pdo;
410 u32 snk_vdo_v1[VDO_MAX_OBJECTS];
411 unsigned int nr_snk_vdo_v1;
412 u32 snk_vdo[VDO_MAX_OBJECTS];
413 unsigned int nr_snk_vdo;
414
415 unsigned int operating_snk_mw;
416 bool update_sink_caps;
417
418 /* Requested current / voltage to the port partner */
419 u32 req_current_limit;
420 u32 req_supply_voltage;
421 /* Actual current / voltage limit of the local port */
422 u32 current_limit;
423 u32 supply_voltage;
424
425 /* Used to export TA voltage and current */
426 struct power_supply *psy;
427 struct power_supply_desc psy_desc;
428 enum power_supply_usb_type usb_type;
429
430 u32 bist_request;
431
432 /* PD state for Vendor Defined Messages */
433 enum vdm_states vdm_state;
434 u32 vdm_retries;
435 /* next Vendor Defined Message to send */
436 u32 vdo_data[VDO_MAX_SIZE];
437 u8 vdo_count;
438 /* VDO to retry if UFP responder replied busy */
439 u32 vdo_retry;
440
441 /* PPS */
442 struct pd_pps_data pps_data;
443 struct completion pps_complete;
444 bool pps_pending;
445 int pps_status;
446
447 /* Alternate mode data */
448 struct pd_mode_data mode_data;
449 struct typec_altmode *partner_altmode[ALTMODE_DISCOVERY_MAX];
450 struct typec_altmode *port_altmode[ALTMODE_DISCOVERY_MAX];
451
452 /* Deadline in jiffies to exit src_try_wait state */
453 unsigned long max_wait;
454
455 /* port belongs to a self powered device */
456 bool self_powered;
457
458 /* Sink FRS */
459 enum frs_typec_current new_source_frs_current;
460
461 /* Sink caps have been queried */
462 bool sink_cap_done;
463
464 /* Port is still in tCCDebounce */
465 bool debouncing;
466
467 /* Collision Avoidance and Atomic Message Sequence */
468 enum tcpm_state upcoming_state;
469 enum tcpm_ams ams;
470 enum tcpm_ams next_ams;
471 bool in_ams;
472
473 /* Auto vbus discharge status */
474 bool auto_vbus_discharge_enabled;
475
476 /*
477 * When set, port requests PD_P_SNK_STDBY_MW upon entering SNK_DISCOVERY and
478 * the actual currrent limit after RX of PD_CTRL_PSRDY for PD link,
479 * SNK_READY for non-pd link.
480 */
481 bool slow_charger_loop;
482 #ifdef CONFIG_DEBUG_FS
483 struct dentry *dentry;
484 struct mutex logbuffer_lock; /* log buffer access lock */
485 int logbuffer_head;
486 int logbuffer_tail;
487 u8 *logbuffer[LOG_BUFFER_ENTRIES];
488 #endif
489 };
490
491 struct pd_rx_event {
492 struct kthread_work work;
493 struct tcpm_port *port;
494 struct pd_message msg;
495 };
496
497 static const char * const pd_rev[] = {
498 [PD_REV10] = "rev1",
499 [PD_REV20] = "rev2",
500 [PD_REV30] = "rev3",
501 };
502
503 #define tcpm_cc_is_sink(cc) \
504 ((cc) == TYPEC_CC_RP_DEF || (cc) == TYPEC_CC_RP_1_5 || \
505 (cc) == TYPEC_CC_RP_3_0)
506
507 #define tcpm_port_is_sink(port) \
508 ((tcpm_cc_is_sink((port)->cc1) && !tcpm_cc_is_sink((port)->cc2)) || \
509 (tcpm_cc_is_sink((port)->cc2) && !tcpm_cc_is_sink((port)->cc1)))
510
511 #define tcpm_cc_is_source(cc) ((cc) == TYPEC_CC_RD)
512 #define tcpm_cc_is_audio(cc) ((cc) == TYPEC_CC_RA)
513 #define tcpm_cc_is_open(cc) ((cc) == TYPEC_CC_OPEN)
514
515 #define tcpm_port_is_source(port) \
516 ((tcpm_cc_is_source((port)->cc1) && \
517 !tcpm_cc_is_source((port)->cc2)) || \
518 (tcpm_cc_is_source((port)->cc2) && \
519 !tcpm_cc_is_source((port)->cc1)))
520
521 #define tcpm_port_is_debug(port) \
522 (tcpm_cc_is_source((port)->cc1) && tcpm_cc_is_source((port)->cc2))
523
524 #define tcpm_port_is_audio(port) \
525 (tcpm_cc_is_audio((port)->cc1) && tcpm_cc_is_audio((port)->cc2))
526
527 #define tcpm_port_is_audio_detached(port) \
528 ((tcpm_cc_is_audio((port)->cc1) && tcpm_cc_is_open((port)->cc2)) || \
529 (tcpm_cc_is_audio((port)->cc2) && tcpm_cc_is_open((port)->cc1)))
530
531 #define tcpm_try_snk(port) \
532 ((port)->try_snk_count == 0 && (port)->try_role == TYPEC_SINK && \
533 (port)->port_type == TYPEC_PORT_DRP)
534
535 #define tcpm_try_src(port) \
536 ((port)->try_src_count == 0 && (port)->try_role == TYPEC_SOURCE && \
537 (port)->port_type == TYPEC_PORT_DRP)
538
539 #define tcpm_data_role_for_source(port) \
540 ((port)->typec_caps.data == TYPEC_PORT_UFP ? \
541 TYPEC_DEVICE : TYPEC_HOST)
542
543 #define tcpm_data_role_for_sink(port) \
544 ((port)->typec_caps.data == TYPEC_PORT_DFP ? \
545 TYPEC_HOST : TYPEC_DEVICE)
546
547 #define tcpm_sink_tx_ok(port) \
548 (tcpm_port_is_sink(port) && \
549 ((port)->cc1 == TYPEC_CC_RP_3_0 || (port)->cc2 == TYPEC_CC_RP_3_0))
550
551 #define tcpm_wait_for_discharge(port) \
552 (((port)->auto_vbus_discharge_enabled && !(port)->vbus_vsafe0v) ? PD_T_SAFE_0V : 0)
553
tcpm_default_state(struct tcpm_port * port)554 static enum tcpm_state tcpm_default_state(struct tcpm_port *port)
555 {
556 if (port->port_type == TYPEC_PORT_DRP) {
557 if (port->try_role == TYPEC_SINK)
558 return SNK_UNATTACHED;
559 else if (port->try_role == TYPEC_SOURCE)
560 return SRC_UNATTACHED;
561 /* Fall through to return SRC_UNATTACHED */
562 } else if (port->port_type == TYPEC_PORT_SNK) {
563 return SNK_UNATTACHED;
564 }
565 return SRC_UNATTACHED;
566 }
567
tcpm_port_is_disconnected(struct tcpm_port * port)568 static bool tcpm_port_is_disconnected(struct tcpm_port *port)
569 {
570 return (!port->attached && port->cc1 == TYPEC_CC_OPEN &&
571 port->cc2 == TYPEC_CC_OPEN) ||
572 (port->attached && ((port->polarity == TYPEC_POLARITY_CC1 &&
573 port->cc1 == TYPEC_CC_OPEN) ||
574 (port->polarity == TYPEC_POLARITY_CC2 &&
575 port->cc2 == TYPEC_CC_OPEN)));
576 }
577
578 /*
579 * Logging
580 */
581
582 #ifdef CONFIG_DEBUG_FS
583
tcpm_log_full(struct tcpm_port * port)584 static bool tcpm_log_full(struct tcpm_port *port)
585 {
586 return port->logbuffer_tail ==
587 (port->logbuffer_head + 1) % LOG_BUFFER_ENTRIES;
588 }
589
590 __printf(2, 0)
_tcpm_log(struct tcpm_port * port,const char * fmt,va_list args)591 static void _tcpm_log(struct tcpm_port *port, const char *fmt, va_list args)
592 {
593 char tmpbuffer[LOG_BUFFER_ENTRY_SIZE];
594 u64 ts_nsec = local_clock();
595 unsigned long rem_nsec;
596 bool bypass_log = false;
597
598 mutex_lock(&port->logbuffer_lock);
599 if (!port->logbuffer[port->logbuffer_head]) {
600 port->logbuffer[port->logbuffer_head] =
601 kzalloc(LOG_BUFFER_ENTRY_SIZE, GFP_KERNEL);
602 if (!port->logbuffer[port->logbuffer_head]) {
603 mutex_unlock(&port->logbuffer_lock);
604 return;
605 }
606 }
607
608 vsnprintf(tmpbuffer, sizeof(tmpbuffer), fmt, args);
609 trace_android_vh_typec_tcpm_log(tmpbuffer, &bypass_log);
610 if (bypass_log)
611 goto abort;
612
613 if (tcpm_log_full(port)) {
614 port->logbuffer_head = max(port->logbuffer_head - 1, 0);
615 strcpy(tmpbuffer, "overflow");
616 }
617
618 if (port->logbuffer_head < 0 ||
619 port->logbuffer_head >= LOG_BUFFER_ENTRIES) {
620 dev_warn(port->dev,
621 "Bad log buffer index %d\n", port->logbuffer_head);
622 goto abort;
623 }
624
625 if (!port->logbuffer[port->logbuffer_head]) {
626 dev_warn(port->dev,
627 "Log buffer index %d is NULL\n", port->logbuffer_head);
628 goto abort;
629 }
630
631 rem_nsec = do_div(ts_nsec, 1000000000);
632 scnprintf(port->logbuffer[port->logbuffer_head],
633 LOG_BUFFER_ENTRY_SIZE, "[%5lu.%06lu] %s",
634 (unsigned long)ts_nsec, rem_nsec / 1000,
635 tmpbuffer);
636 port->logbuffer_head = (port->logbuffer_head + 1) % LOG_BUFFER_ENTRIES;
637
638 abort:
639 mutex_unlock(&port->logbuffer_lock);
640 }
641
642 __printf(2, 3)
tcpm_log(struct tcpm_port * port,const char * fmt,...)643 static void tcpm_log(struct tcpm_port *port, const char *fmt, ...)
644 {
645 va_list args;
646
647 /* Do not log while disconnected and unattached */
648 if (tcpm_port_is_disconnected(port) &&
649 (port->state == SRC_UNATTACHED || port->state == SNK_UNATTACHED ||
650 port->state == TOGGLING))
651 return;
652
653 va_start(args, fmt);
654 _tcpm_log(port, fmt, args);
655 va_end(args);
656 }
657
658 __printf(2, 3)
tcpm_log_force(struct tcpm_port * port,const char * fmt,...)659 static void tcpm_log_force(struct tcpm_port *port, const char *fmt, ...)
660 {
661 va_list args;
662
663 va_start(args, fmt);
664 _tcpm_log(port, fmt, args);
665 va_end(args);
666 }
667
tcpm_log_source_caps(struct tcpm_port * port)668 static void tcpm_log_source_caps(struct tcpm_port *port)
669 {
670 int i;
671
672 for (i = 0; i < port->nr_source_caps; i++) {
673 u32 pdo = port->source_caps[i];
674 enum pd_pdo_type type = pdo_type(pdo);
675 char msg[64];
676
677 switch (type) {
678 case PDO_TYPE_FIXED:
679 scnprintf(msg, sizeof(msg),
680 "%u mV, %u mA [%s%s%s%s%s%s]",
681 pdo_fixed_voltage(pdo),
682 pdo_max_current(pdo),
683 (pdo & PDO_FIXED_DUAL_ROLE) ?
684 "R" : "",
685 (pdo & PDO_FIXED_SUSPEND) ?
686 "S" : "",
687 (pdo & PDO_FIXED_HIGHER_CAP) ?
688 "H" : "",
689 (pdo & PDO_FIXED_USB_COMM) ?
690 "U" : "",
691 (pdo & PDO_FIXED_DATA_SWAP) ?
692 "D" : "",
693 (pdo & PDO_FIXED_EXTPOWER) ?
694 "E" : "");
695 break;
696 case PDO_TYPE_VAR:
697 scnprintf(msg, sizeof(msg),
698 "%u-%u mV, %u mA",
699 pdo_min_voltage(pdo),
700 pdo_max_voltage(pdo),
701 pdo_max_current(pdo));
702 break;
703 case PDO_TYPE_BATT:
704 scnprintf(msg, sizeof(msg),
705 "%u-%u mV, %u mW",
706 pdo_min_voltage(pdo),
707 pdo_max_voltage(pdo),
708 pdo_max_power(pdo));
709 break;
710 case PDO_TYPE_APDO:
711 if (pdo_apdo_type(pdo) == APDO_TYPE_PPS)
712 scnprintf(msg, sizeof(msg),
713 "%u-%u mV, %u mA",
714 pdo_pps_apdo_min_voltage(pdo),
715 pdo_pps_apdo_max_voltage(pdo),
716 pdo_pps_apdo_max_current(pdo));
717 else
718 strcpy(msg, "undefined APDO");
719 break;
720 default:
721 strcpy(msg, "undefined");
722 break;
723 }
724 tcpm_log(port, " PDO %d: type %d, %s",
725 i, type, msg);
726 }
727 }
728
tcpm_debug_show(struct seq_file * s,void * v)729 static int tcpm_debug_show(struct seq_file *s, void *v)
730 {
731 struct tcpm_port *port = (struct tcpm_port *)s->private;
732 int tail;
733
734 mutex_lock(&port->logbuffer_lock);
735 tail = port->logbuffer_tail;
736 while (tail != port->logbuffer_head) {
737 seq_printf(s, "%s\n", port->logbuffer[tail]);
738 tail = (tail + 1) % LOG_BUFFER_ENTRIES;
739 }
740 if (!seq_has_overflowed(s))
741 port->logbuffer_tail = tail;
742 mutex_unlock(&port->logbuffer_lock);
743
744 return 0;
745 }
746 DEFINE_SHOW_ATTRIBUTE(tcpm_debug);
747
tcpm_debugfs_init(struct tcpm_port * port)748 static void tcpm_debugfs_init(struct tcpm_port *port)
749 {
750 char name[NAME_MAX];
751
752 mutex_init(&port->logbuffer_lock);
753 snprintf(name, NAME_MAX, "tcpm-%s", dev_name(port->dev));
754 port->dentry = debugfs_create_file(name, S_IFREG | 0444, usb_debug_root,
755 port, &tcpm_debug_fops);
756 }
757
tcpm_debugfs_exit(struct tcpm_port * port)758 static void tcpm_debugfs_exit(struct tcpm_port *port)
759 {
760 int i;
761
762 mutex_lock(&port->logbuffer_lock);
763 for (i = 0; i < LOG_BUFFER_ENTRIES; i++) {
764 kfree(port->logbuffer[i]);
765 port->logbuffer[i] = NULL;
766 }
767 mutex_unlock(&port->logbuffer_lock);
768
769 debugfs_remove(port->dentry);
770 }
771
772 #else
773
774 __printf(2, 3)
tcpm_log(const struct tcpm_port * port,const char * fmt,...)775 static void tcpm_log(const struct tcpm_port *port, const char *fmt, ...) { }
776 __printf(2, 3)
tcpm_log_force(struct tcpm_port * port,const char * fmt,...)777 static void tcpm_log_force(struct tcpm_port *port, const char *fmt, ...) { }
tcpm_log_source_caps(struct tcpm_port * port)778 static void tcpm_log_source_caps(struct tcpm_port *port) { }
tcpm_debugfs_init(const struct tcpm_port * port)779 static void tcpm_debugfs_init(const struct tcpm_port *port) { }
tcpm_debugfs_exit(const struct tcpm_port * port)780 static void tcpm_debugfs_exit(const struct tcpm_port *port) { }
781
782 #endif
783
tcpm_set_cc(struct tcpm_port * port,enum typec_cc_status cc)784 static void tcpm_set_cc(struct tcpm_port *port, enum typec_cc_status cc)
785 {
786 tcpm_log(port, "cc:=%d", cc);
787 port->cc_req = cc;
788 port->tcpc->set_cc(port->tcpc, cc);
789 }
790
tcpm_enable_auto_vbus_discharge(struct tcpm_port * port,bool enable)791 static int tcpm_enable_auto_vbus_discharge(struct tcpm_port *port, bool enable)
792 {
793 int ret = 0;
794
795 if (port->tcpc->enable_auto_vbus_discharge) {
796 ret = port->tcpc->enable_auto_vbus_discharge(port->tcpc, enable);
797 tcpm_log_force(port, "%s vbus discharge ret:%d", enable ? "enable" : "disable",
798 ret);
799 if (!ret)
800 port->auto_vbus_discharge_enabled = enable;
801 }
802
803 return ret;
804 }
805
tcpm_apply_rc(struct tcpm_port * port)806 static void tcpm_apply_rc(struct tcpm_port *port)
807 {
808 /*
809 * TCPCI: Move to APPLY_RC state to prevent disconnect during PR_SWAP
810 * when Vbus auto discharge on disconnect is enabled.
811 */
812 if (port->tcpc->enable_auto_vbus_discharge && port->tcpc->apply_rc) {
813 tcpm_log(port, "Apply_RC");
814 port->tcpc->apply_rc(port->tcpc, port->cc_req, port->polarity);
815 tcpm_enable_auto_vbus_discharge(port, false);
816 }
817 }
818
819 /*
820 * Determine RP value to set based on maximum current supported
821 * by a port if configured as source.
822 * Returns CC value to report to link partner.
823 */
tcpm_rp_cc(struct tcpm_port * port)824 static enum typec_cc_status tcpm_rp_cc(struct tcpm_port *port)
825 {
826 const u32 *src_pdo = port->src_pdo;
827 int nr_pdo = port->nr_src_pdo;
828 int i;
829
830 /*
831 * Search for first entry with matching voltage.
832 * It should report the maximum supported current.
833 */
834 for (i = 0; i < nr_pdo; i++) {
835 const u32 pdo = src_pdo[i];
836
837 if (pdo_type(pdo) == PDO_TYPE_FIXED &&
838 pdo_fixed_voltage(pdo) == 5000) {
839 unsigned int curr = pdo_max_current(pdo);
840
841 if (curr >= 3000)
842 return TYPEC_CC_RP_3_0;
843 else if (curr >= 1500)
844 return TYPEC_CC_RP_1_5;
845 return TYPEC_CC_RP_DEF;
846 }
847 }
848
849 return TYPEC_CC_RP_DEF;
850 }
851
tcpm_ams_finish(struct tcpm_port * port)852 static void tcpm_ams_finish(struct tcpm_port *port)
853 {
854 tcpm_log(port, "AMS %s finished", tcpm_ams_str[port->ams]);
855
856 if (port->pd_capable && port->pwr_role == TYPEC_SOURCE) {
857 if (port->negotiated_rev >= PD_REV30)
858 tcpm_set_cc(port, SINK_TX_OK);
859 else
860 tcpm_set_cc(port, SINK_TX_NG);
861 } else if (port->pwr_role == TYPEC_SOURCE) {
862 tcpm_set_cc(port, tcpm_rp_cc(port));
863 }
864
865 port->in_ams = false;
866 port->ams = NONE_AMS;
867 }
868
tcpm_pd_transmit(struct tcpm_port * port,enum tcpm_transmit_type type,const struct pd_message * msg)869 static int tcpm_pd_transmit(struct tcpm_port *port,
870 enum tcpm_transmit_type type,
871 const struct pd_message *msg)
872 {
873 unsigned long timeout;
874 int ret;
875
876 if (msg)
877 tcpm_log(port, "PD TX, header: %#x", le16_to_cpu(msg->header));
878 else
879 tcpm_log(port, "PD TX, type: %#x", type);
880
881 reinit_completion(&port->tx_complete);
882 ret = port->tcpc->pd_transmit(port->tcpc, type, msg, port->negotiated_rev);
883 if (ret < 0)
884 return ret;
885
886 mutex_unlock(&port->lock);
887 timeout = wait_for_completion_timeout(&port->tx_complete,
888 msecs_to_jiffies(PD_T_TCPC_TX_TIMEOUT));
889 mutex_lock(&port->lock);
890 if (!timeout)
891 return -ETIMEDOUT;
892
893 switch (port->tx_status) {
894 case TCPC_TX_SUCCESS:
895 port->message_id = (port->message_id + 1) & PD_HEADER_ID_MASK;
896 /*
897 * USB PD rev 2.0, 8.3.2.2.1:
898 * USB PD rev 3.0, 8.3.2.1.3:
899 * "... Note that every AMS is Interruptible until the first
900 * Message in the sequence has been successfully sent (GoodCRC
901 * Message received)."
902 */
903 if (port->ams != NONE_AMS)
904 port->in_ams = true;
905 break;
906 case TCPC_TX_DISCARDED:
907 ret = -EAGAIN;
908 break;
909 case TCPC_TX_FAILED:
910 default:
911 ret = -EIO;
912 break;
913 }
914
915 /* Some AMS don't expect responses. Finish them here. */
916 if (port->ams == ATTENTION || port->ams == SOURCE_ALERT)
917 tcpm_ams_finish(port);
918
919 return ret;
920 }
921
tcpm_pd_transmit_complete(struct tcpm_port * port,enum tcpm_transmit_status status)922 void tcpm_pd_transmit_complete(struct tcpm_port *port,
923 enum tcpm_transmit_status status)
924 {
925 tcpm_log(port, "PD TX complete, status: %u", status);
926 port->tx_status = status;
927 complete(&port->tx_complete);
928 }
929 EXPORT_SYMBOL_GPL(tcpm_pd_transmit_complete);
930
tcpm_mux_set(struct tcpm_port * port,int state,enum usb_role usb_role,enum typec_orientation orientation)931 static int tcpm_mux_set(struct tcpm_port *port, int state,
932 enum usb_role usb_role,
933 enum typec_orientation orientation)
934 {
935 int ret;
936
937 tcpm_log(port, "Requesting mux state %d, usb-role %d, orientation %d",
938 state, usb_role, orientation);
939
940 ret = typec_set_orientation(port->typec_port, orientation);
941 if (ret)
942 return ret;
943
944 if (port->role_sw) {
945 ret = usb_role_switch_set_role(port->role_sw, usb_role);
946 if (ret)
947 return ret;
948 }
949
950 return typec_set_mode(port->typec_port, state);
951 }
952
tcpm_set_polarity(struct tcpm_port * port,enum typec_cc_polarity polarity)953 static int tcpm_set_polarity(struct tcpm_port *port,
954 enum typec_cc_polarity polarity)
955 {
956 int ret;
957
958 tcpm_log(port, "polarity %d", polarity);
959
960 ret = port->tcpc->set_polarity(port->tcpc, polarity);
961 if (ret < 0)
962 return ret;
963
964 port->polarity = polarity;
965
966 return 0;
967 }
968
tcpm_set_vconn(struct tcpm_port * port,bool enable)969 static int tcpm_set_vconn(struct tcpm_port *port, bool enable)
970 {
971 int ret;
972
973 tcpm_log(port, "vconn:=%d", enable);
974
975 ret = port->tcpc->set_vconn(port->tcpc, enable);
976 if (!ret) {
977 port->vconn_role = enable ? TYPEC_SOURCE : TYPEC_SINK;
978 typec_set_vconn_role(port->typec_port, port->vconn_role);
979 }
980
981 return ret;
982 }
983
tcpm_is_debouncing(struct tcpm_port * port)984 bool tcpm_is_debouncing(struct tcpm_port *port)
985 {
986 bool debounce;
987
988 if (!port)
989 return false;
990
991 mutex_lock(&port->lock);
992 debounce = port->debouncing;
993 mutex_unlock(&port->lock);
994
995 return debounce;
996 }
997 EXPORT_SYMBOL_GPL(tcpm_is_debouncing);
998
tcpm_get_current_limit(struct tcpm_port * port)999 static u32 tcpm_get_current_limit(struct tcpm_port *port)
1000 {
1001 enum typec_cc_status cc;
1002 u32 limit;
1003
1004 cc = port->polarity ? port->cc2 : port->cc1;
1005 switch (cc) {
1006 case TYPEC_CC_RP_1_5:
1007 limit = 1500;
1008 break;
1009 case TYPEC_CC_RP_3_0:
1010 limit = 3000;
1011 break;
1012 case TYPEC_CC_RP_DEF:
1013 default:
1014 if (port->tcpc->get_current_limit)
1015 limit = port->tcpc->get_current_limit(port->tcpc);
1016 else
1017 limit = 0;
1018 break;
1019 }
1020
1021 return limit;
1022 }
1023
tcpm_set_current_limit(struct tcpm_port * port,u32 max_ma,u32 mv)1024 static int tcpm_set_current_limit(struct tcpm_port *port, u32 max_ma, u32 mv)
1025 {
1026 int ret = -EOPNOTSUPP;
1027
1028 tcpm_log(port, "Setting voltage/current limit %u mV %u mA", mv, max_ma);
1029
1030 port->supply_voltage = mv;
1031 port->current_limit = max_ma;
1032 power_supply_changed(port->psy);
1033
1034 if (port->tcpc->set_current_limit)
1035 ret = port->tcpc->set_current_limit(port->tcpc, max_ma, mv);
1036
1037 return ret;
1038 }
1039
tcpm_set_attached_state(struct tcpm_port * port,bool attached)1040 static int tcpm_set_attached_state(struct tcpm_port *port, bool attached)
1041 {
1042 return port->tcpc->set_roles(port->tcpc, attached, port->pwr_role,
1043 port->data_role);
1044 }
1045
tcpm_set_roles(struct tcpm_port * port,bool attached,enum typec_role role,enum typec_data_role data)1046 static int tcpm_set_roles(struct tcpm_port *port, bool attached,
1047 enum typec_role role, enum typec_data_role data)
1048 {
1049 enum typec_orientation orientation;
1050 enum usb_role usb_role;
1051 int ret;
1052
1053 if (port->polarity == TYPEC_POLARITY_CC1)
1054 orientation = TYPEC_ORIENTATION_NORMAL;
1055 else
1056 orientation = TYPEC_ORIENTATION_REVERSE;
1057
1058 if (port->typec_caps.data == TYPEC_PORT_DRD) {
1059 if (data == TYPEC_HOST)
1060 usb_role = USB_ROLE_HOST;
1061 else
1062 usb_role = USB_ROLE_DEVICE;
1063 } else if (port->typec_caps.data == TYPEC_PORT_DFP) {
1064 if (data == TYPEC_HOST) {
1065 if (role == TYPEC_SOURCE)
1066 usb_role = USB_ROLE_HOST;
1067 else
1068 usb_role = USB_ROLE_NONE;
1069 } else {
1070 return -ENOTSUPP;
1071 }
1072 } else {
1073 if (data == TYPEC_DEVICE) {
1074 if (role == TYPEC_SINK)
1075 usb_role = USB_ROLE_DEVICE;
1076 else
1077 usb_role = USB_ROLE_NONE;
1078 } else {
1079 return -ENOTSUPP;
1080 }
1081 }
1082
1083 ret = tcpm_mux_set(port, TYPEC_STATE_USB, usb_role, orientation);
1084 if (ret < 0)
1085 return ret;
1086
1087 ret = port->tcpc->set_roles(port->tcpc, attached, role, data);
1088 if (ret < 0)
1089 return ret;
1090
1091 port->pwr_role = role;
1092 port->data_role = data;
1093 typec_set_data_role(port->typec_port, data);
1094 typec_set_pwr_role(port->typec_port, role);
1095
1096 return 0;
1097 }
1098
tcpm_set_pwr_role(struct tcpm_port * port,enum typec_role role)1099 static int tcpm_set_pwr_role(struct tcpm_port *port, enum typec_role role)
1100 {
1101 int ret;
1102
1103 ret = port->tcpc->set_roles(port->tcpc, true, role,
1104 port->data_role);
1105 if (ret < 0)
1106 return ret;
1107
1108 port->pwr_role = role;
1109 typec_set_pwr_role(port->typec_port, role);
1110
1111 return 0;
1112 }
1113
1114 /*
1115 * Transform the PDO to be compliant to PD rev2.0.
1116 * Return 0 if the PDO type is not defined in PD rev2.0.
1117 * Otherwise, return the converted PDO.
1118 */
tcpm_forge_legacy_pdo(struct tcpm_port * port,u32 pdo,enum typec_role role)1119 static u32 tcpm_forge_legacy_pdo(struct tcpm_port *port, u32 pdo, enum typec_role role)
1120 {
1121 switch (pdo_type(pdo)) {
1122 case PDO_TYPE_FIXED:
1123 if (role == TYPEC_SINK)
1124 return pdo & ~PDO_FIXED_FRS_CURR_MASK;
1125 else
1126 return pdo & ~PDO_FIXED_UNCHUNK_EXT;
1127 case PDO_TYPE_VAR:
1128 case PDO_TYPE_BATT:
1129 return pdo;
1130 case PDO_TYPE_APDO:
1131 default:
1132 return 0;
1133 }
1134 }
1135
tcpm_pd_send_source_caps(struct tcpm_port * port)1136 static int tcpm_pd_send_source_caps(struct tcpm_port *port)
1137 {
1138 struct pd_message msg;
1139 u32 pdo;
1140 unsigned int i, nr_pdo = 0;
1141
1142 memset(&msg, 0, sizeof(msg));
1143
1144 for (i = 0; i < port->nr_src_pdo; i++) {
1145 if (port->negotiated_rev >= PD_REV30) {
1146 msg.payload[nr_pdo++] = cpu_to_le32(port->src_pdo[i]);
1147 } else {
1148 pdo = tcpm_forge_legacy_pdo(port, port->src_pdo[i], TYPEC_SOURCE);
1149 if (pdo)
1150 msg.payload[nr_pdo++] = cpu_to_le32(pdo);
1151 }
1152 }
1153
1154 if (!nr_pdo) {
1155 /* No source capabilities defined, sink only */
1156 msg.header = PD_HEADER_LE(PD_CTRL_REJECT,
1157 port->pwr_role,
1158 port->data_role,
1159 port->negotiated_rev,
1160 port->message_id, 0);
1161 } else {
1162 msg.header = PD_HEADER_LE(PD_DATA_SOURCE_CAP,
1163 port->pwr_role,
1164 port->data_role,
1165 port->negotiated_rev,
1166 port->message_id,
1167 nr_pdo);
1168 }
1169
1170 return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
1171 }
1172
tcpm_pd_send_sink_caps(struct tcpm_port * port)1173 static int tcpm_pd_send_sink_caps(struct tcpm_port *port)
1174 {
1175 struct pd_message msg;
1176 u32 pdo;
1177 unsigned int i, nr_pdo = 0;
1178
1179 memset(&msg, 0, sizeof(msg));
1180
1181 for (i = 0; i < port->nr_snk_pdo; i++) {
1182 if (port->negotiated_rev >= PD_REV30) {
1183 msg.payload[nr_pdo++] = cpu_to_le32(port->snk_pdo[i]);
1184 } else {
1185 pdo = tcpm_forge_legacy_pdo(port, port->snk_pdo[i], TYPEC_SINK);
1186 if (pdo)
1187 msg.payload[nr_pdo++] = cpu_to_le32(pdo);
1188 }
1189 }
1190
1191 if (!nr_pdo) {
1192 /* No sink capabilities defined, source only */
1193 msg.header = PD_HEADER_LE(PD_CTRL_REJECT,
1194 port->pwr_role,
1195 port->data_role,
1196 port->negotiated_rev,
1197 port->message_id, 0);
1198 } else {
1199 msg.header = PD_HEADER_LE(PD_DATA_SINK_CAP,
1200 port->pwr_role,
1201 port->data_role,
1202 port->negotiated_rev,
1203 port->message_id,
1204 nr_pdo);
1205 }
1206
1207 return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
1208 }
1209
mod_tcpm_delayed_work(struct tcpm_port * port,unsigned int delay_ms)1210 static void mod_tcpm_delayed_work(struct tcpm_port *port, unsigned int delay_ms)
1211 {
1212 if (delay_ms) {
1213 hrtimer_start(&port->state_machine_timer, ms_to_ktime(delay_ms), HRTIMER_MODE_REL);
1214 } else {
1215 hrtimer_cancel(&port->state_machine_timer);
1216 kthread_queue_work(port->wq, &port->state_machine);
1217 }
1218 }
1219
mod_vdm_delayed_work(struct tcpm_port * port,unsigned int delay_ms)1220 static void mod_vdm_delayed_work(struct tcpm_port *port, unsigned int delay_ms)
1221 {
1222 if (delay_ms) {
1223 hrtimer_start(&port->vdm_state_machine_timer, ms_to_ktime(delay_ms),
1224 HRTIMER_MODE_REL);
1225 } else {
1226 hrtimer_cancel(&port->vdm_state_machine_timer);
1227 kthread_queue_work(port->wq, &port->vdm_state_machine);
1228 }
1229 }
1230
mod_enable_frs_delayed_work(struct tcpm_port * port,unsigned int delay_ms)1231 static void mod_enable_frs_delayed_work(struct tcpm_port *port, unsigned int delay_ms)
1232 {
1233 if (delay_ms) {
1234 hrtimer_start(&port->enable_frs_timer, ms_to_ktime(delay_ms), HRTIMER_MODE_REL);
1235 } else {
1236 hrtimer_cancel(&port->enable_frs_timer);
1237 kthread_queue_work(port->wq, &port->enable_frs);
1238 }
1239 }
1240
mod_send_discover_delayed_work(struct tcpm_port * port,unsigned int delay_ms)1241 static void mod_send_discover_delayed_work(struct tcpm_port *port, unsigned int delay_ms)
1242 {
1243 if (delay_ms) {
1244 hrtimer_start(&port->send_discover_timer, ms_to_ktime(delay_ms), HRTIMER_MODE_REL);
1245 } else {
1246 hrtimer_cancel(&port->send_discover_timer);
1247 kthread_queue_work(port->wq, &port->send_discover_work);
1248 }
1249 }
1250
tcpm_set_state(struct tcpm_port * port,enum tcpm_state state,unsigned int delay_ms)1251 static void tcpm_set_state(struct tcpm_port *port, enum tcpm_state state,
1252 unsigned int delay_ms)
1253 {
1254 if (delay_ms) {
1255 tcpm_log(port, "pending state change %s -> %s @ %u ms [%s %s]",
1256 tcpm_states[port->state], tcpm_states[state], delay_ms,
1257 pd_rev[port->negotiated_rev], tcpm_ams_str[port->ams]);
1258 port->delayed_state = state;
1259 mod_tcpm_delayed_work(port, delay_ms);
1260 port->delayed_runtime = ktime_add(ktime_get(), ms_to_ktime(delay_ms));
1261 port->delay_ms = delay_ms;
1262 } else {
1263 tcpm_log(port, "state change %s -> %s [%s %s]",
1264 tcpm_states[port->state], tcpm_states[state],
1265 pd_rev[port->negotiated_rev], tcpm_ams_str[port->ams]);
1266 port->delayed_state = INVALID_STATE;
1267 port->prev_state = port->state;
1268 port->state = state;
1269 /*
1270 * Don't re-queue the state machine work item if we're currently
1271 * in the state machine and we're immediately changing states.
1272 * tcpm_state_machine_work() will continue running the state
1273 * machine.
1274 */
1275 if (!port->state_machine_running)
1276 mod_tcpm_delayed_work(port, 0);
1277 }
1278 }
1279
tcpm_set_state_cond(struct tcpm_port * port,enum tcpm_state state,unsigned int delay_ms)1280 static void tcpm_set_state_cond(struct tcpm_port *port, enum tcpm_state state,
1281 unsigned int delay_ms)
1282 {
1283 if (port->enter_state == port->state)
1284 tcpm_set_state(port, state, delay_ms);
1285 else
1286 tcpm_log(port,
1287 "skipped %sstate change %s -> %s [%u ms], context state %s [%s %s]",
1288 delay_ms ? "delayed " : "",
1289 tcpm_states[port->state], tcpm_states[state],
1290 delay_ms, tcpm_states[port->enter_state],
1291 pd_rev[port->negotiated_rev], tcpm_ams_str[port->ams]);
1292 }
1293
tcpm_queue_message(struct tcpm_port * port,enum pd_msg_request message)1294 static void tcpm_queue_message(struct tcpm_port *port,
1295 enum pd_msg_request message)
1296 {
1297 port->queued_message = message;
1298 mod_tcpm_delayed_work(port, 0);
1299 }
1300
tcpm_vdm_ams(struct tcpm_port * port)1301 static bool tcpm_vdm_ams(struct tcpm_port *port)
1302 {
1303 switch (port->ams) {
1304 case DISCOVER_IDENTITY:
1305 case SOURCE_STARTUP_CABLE_PLUG_DISCOVER_IDENTITY:
1306 case DISCOVER_SVIDS:
1307 case DISCOVER_MODES:
1308 case DFP_TO_UFP_ENTER_MODE:
1309 case DFP_TO_UFP_EXIT_MODE:
1310 case DFP_TO_CABLE_PLUG_ENTER_MODE:
1311 case DFP_TO_CABLE_PLUG_EXIT_MODE:
1312 case ATTENTION:
1313 case UNSTRUCTURED_VDMS:
1314 case STRUCTURED_VDMS:
1315 break;
1316 default:
1317 return false;
1318 }
1319
1320 return true;
1321 }
1322
tcpm_ams_interruptible(struct tcpm_port * port)1323 static bool tcpm_ams_interruptible(struct tcpm_port *port)
1324 {
1325 switch (port->ams) {
1326 /* Interruptible AMS */
1327 case NONE_AMS:
1328 case SECURITY:
1329 case FIRMWARE_UPDATE:
1330 case DISCOVER_IDENTITY:
1331 case SOURCE_STARTUP_CABLE_PLUG_DISCOVER_IDENTITY:
1332 case DISCOVER_SVIDS:
1333 case DISCOVER_MODES:
1334 case DFP_TO_UFP_ENTER_MODE:
1335 case DFP_TO_UFP_EXIT_MODE:
1336 case DFP_TO_CABLE_PLUG_ENTER_MODE:
1337 case DFP_TO_CABLE_PLUG_EXIT_MODE:
1338 case UNSTRUCTURED_VDMS:
1339 case STRUCTURED_VDMS:
1340 case COUNTRY_INFO:
1341 case COUNTRY_CODES:
1342 break;
1343 /* Non-Interruptible AMS */
1344 default:
1345 if (port->in_ams)
1346 return false;
1347 break;
1348 }
1349
1350 return true;
1351 }
1352
tcpm_ams_start(struct tcpm_port * port,enum tcpm_ams ams)1353 static int tcpm_ams_start(struct tcpm_port *port, enum tcpm_ams ams)
1354 {
1355 int ret = 0;
1356
1357 tcpm_log(port, "AMS %s start", tcpm_ams_str[ams]);
1358
1359 if (!tcpm_ams_interruptible(port) &&
1360 !(ams == HARD_RESET || ams == SOFT_RESET_AMS)) {
1361 port->upcoming_state = INVALID_STATE;
1362 tcpm_log(port, "AMS %s not interruptible, aborting",
1363 tcpm_ams_str[port->ams]);
1364 return -EAGAIN;
1365 }
1366
1367 if (port->pwr_role == TYPEC_SOURCE) {
1368 enum typec_cc_status cc_req = port->cc_req;
1369
1370 port->ams = ams;
1371
1372 if (ams == HARD_RESET) {
1373 tcpm_set_cc(port, tcpm_rp_cc(port));
1374 tcpm_pd_transmit(port, TCPC_TX_HARD_RESET, NULL);
1375 tcpm_set_state(port, HARD_RESET_START, 0);
1376 return ret;
1377 } else if (ams == SOFT_RESET_AMS) {
1378 if (!port->explicit_contract)
1379 tcpm_set_cc(port, tcpm_rp_cc(port));
1380 tcpm_set_state(port, SOFT_RESET_SEND, 0);
1381 return ret;
1382 } else if (tcpm_vdm_ams(port)) {
1383 /* tSinkTx is enforced in vdm_run_state_machine */
1384 if (port->negotiated_rev >= PD_REV30)
1385 tcpm_set_cc(port, SINK_TX_NG);
1386 return ret;
1387 }
1388
1389 if (port->negotiated_rev >= PD_REV30)
1390 tcpm_set_cc(port, SINK_TX_NG);
1391
1392 switch (port->state) {
1393 case SRC_READY:
1394 case SRC_STARTUP:
1395 case SRC_SOFT_RESET_WAIT_SNK_TX:
1396 case SOFT_RESET:
1397 case SOFT_RESET_SEND:
1398 if (port->negotiated_rev >= PD_REV30)
1399 tcpm_set_state(port, AMS_START,
1400 cc_req == SINK_TX_OK ?
1401 PD_T_SINK_TX : 0);
1402 else
1403 tcpm_set_state(port, AMS_START, 0);
1404 break;
1405 default:
1406 if (port->negotiated_rev >= PD_REV30)
1407 tcpm_set_state(port, SRC_READY,
1408 cc_req == SINK_TX_OK ?
1409 PD_T_SINK_TX : 0);
1410 else
1411 tcpm_set_state(port, SRC_READY, 0);
1412 break;
1413 }
1414 } else {
1415 if (port->negotiated_rev >= PD_REV30 &&
1416 !tcpm_sink_tx_ok(port) &&
1417 ams != SOFT_RESET_AMS &&
1418 ams != HARD_RESET) {
1419 port->upcoming_state = INVALID_STATE;
1420 tcpm_log(port, "Sink TX No Go");
1421 return -EAGAIN;
1422 }
1423
1424 port->ams = ams;
1425
1426 if (ams == HARD_RESET) {
1427 tcpm_pd_transmit(port, TCPC_TX_HARD_RESET, NULL);
1428 tcpm_set_state(port, HARD_RESET_START, 0);
1429 return ret;
1430 } else if (tcpm_vdm_ams(port)) {
1431 return ret;
1432 }
1433
1434 if (port->state == SNK_READY ||
1435 port->state == SNK_SOFT_RESET)
1436 tcpm_set_state(port, AMS_START, 0);
1437 else
1438 tcpm_set_state(port, SNK_READY, 0);
1439 }
1440
1441 return ret;
1442 }
1443
1444 /*
1445 * VDM/VDO handling functions
1446 */
tcpm_queue_vdm(struct tcpm_port * port,const u32 header,const u32 * data,int cnt)1447 static void tcpm_queue_vdm(struct tcpm_port *port, const u32 header,
1448 const u32 *data, int cnt)
1449 {
1450 WARN_ON(!mutex_is_locked(&port->lock));
1451
1452 /* Make sure we are not still processing a previous VDM packet */
1453 WARN_ON(port->vdm_state > VDM_STATE_DONE);
1454
1455 port->vdo_count = cnt + 1;
1456 port->vdo_data[0] = header;
1457 memcpy(&port->vdo_data[1], data, sizeof(u32) * cnt);
1458 /* Set ready, vdm state machine will actually send */
1459 port->vdm_retries = 0;
1460 port->vdm_state = VDM_STATE_READY;
1461 port->vdm_sm_running = true;
1462
1463 mod_vdm_delayed_work(port, 0);
1464 }
1465
tcpm_queue_vdm_unlocked(struct tcpm_port * port,const u32 header,const u32 * data,int cnt)1466 static void tcpm_queue_vdm_unlocked(struct tcpm_port *port, const u32 header,
1467 const u32 *data, int cnt)
1468 {
1469 #ifdef CONFIG_NO_GKI
1470 mutex_lock(&port->pd_handler_lock);
1471 if (tcpm_port_is_disconnected(port))
1472 goto unlock;
1473 #endif
1474 mutex_lock(&port->lock);
1475 tcpm_queue_vdm(port, header, data, cnt);
1476 mutex_unlock(&port->lock);
1477 #ifdef CONFIG_NO_GKI
1478 unlock:
1479 mutex_unlock(&port->pd_handler_lock);
1480 #endif
1481 }
1482
svdm_consume_identity(struct tcpm_port * port,const u32 * p,int cnt)1483 static void svdm_consume_identity(struct tcpm_port *port, const u32 *p, int cnt)
1484 {
1485 u32 vdo = p[VDO_INDEX_IDH];
1486 u32 product = p[VDO_INDEX_PRODUCT];
1487
1488 memset(&port->mode_data, 0, sizeof(port->mode_data));
1489
1490 port->partner_ident.id_header = vdo;
1491 port->partner_ident.cert_stat = p[VDO_INDEX_CSTAT];
1492 port->partner_ident.product = product;
1493
1494 typec_partner_set_identity(port->partner);
1495
1496 tcpm_log(port, "Identity: %04x:%04x.%04x",
1497 PD_IDH_VID(vdo),
1498 PD_PRODUCT_PID(product), product & 0xffff);
1499 }
1500
svdm_consume_svids(struct tcpm_port * port,const u32 * p,int cnt)1501 static bool svdm_consume_svids(struct tcpm_port *port, const u32 *p, int cnt)
1502 {
1503 struct pd_mode_data *pmdata = &port->mode_data;
1504 int i;
1505
1506 for (i = 1; i < cnt; i++) {
1507 u16 svid;
1508
1509 svid = (p[i] >> 16) & 0xffff;
1510 if (!svid)
1511 return false;
1512
1513 if (pmdata->nsvids >= SVID_DISCOVERY_MAX)
1514 goto abort;
1515
1516 pmdata->svids[pmdata->nsvids++] = svid;
1517 tcpm_log(port, "SVID %d: 0x%x", pmdata->nsvids, svid);
1518
1519 svid = p[i] & 0xffff;
1520 if (!svid)
1521 return false;
1522
1523 if (pmdata->nsvids >= SVID_DISCOVERY_MAX)
1524 goto abort;
1525
1526 pmdata->svids[pmdata->nsvids++] = svid;
1527 tcpm_log(port, "SVID %d: 0x%x", pmdata->nsvids, svid);
1528 }
1529
1530 /*
1531 * PD3.0 Spec 6.4.4.3.2: The SVIDs are returned 2 per VDO (see Table
1532 * 6-43), and can be returned maximum 6 VDOs per response (see Figure
1533 * 6-19). If the Respondersupports 12 or more SVID then the Discover
1534 * SVIDs Command Shall be executed multiple times until a Discover
1535 * SVIDs VDO is returned ending either with a SVID value of 0x0000 in
1536 * the last part of the last VDO or with a VDO containing two SVIDs
1537 * with values of 0x0000.
1538 *
1539 * However, some odd dockers support SVIDs less than 12 but without
1540 * 0x0000 in the last VDO, so we need to break the Discover SVIDs
1541 * request and return false here.
1542 */
1543 return cnt == 7 ? true : false;
1544 abort:
1545 tcpm_log(port, "SVID_DISCOVERY_MAX(%d) too low!", SVID_DISCOVERY_MAX);
1546 return false;
1547 }
1548
svdm_consume_modes(struct tcpm_port * port,const u32 * p,int cnt)1549 static void svdm_consume_modes(struct tcpm_port *port, const u32 *p, int cnt)
1550 {
1551 struct pd_mode_data *pmdata = &port->mode_data;
1552 struct typec_altmode_desc *paltmode;
1553 int i;
1554
1555 if (pmdata->altmodes >= ARRAY_SIZE(port->partner_altmode)) {
1556 /* Already logged in svdm_consume_svids() */
1557 return;
1558 }
1559
1560 for (i = 1; i < cnt; i++) {
1561 paltmode = &pmdata->altmode_desc[pmdata->altmodes];
1562 memset(paltmode, 0, sizeof(*paltmode));
1563
1564 paltmode->svid = pmdata->svids[pmdata->svid_index];
1565 paltmode->mode = i;
1566 paltmode->vdo = p[i];
1567
1568 tcpm_log(port, " Alternate mode %d: SVID 0x%04x, VDO %d: 0x%08x",
1569 pmdata->altmodes, paltmode->svid,
1570 paltmode->mode, paltmode->vdo);
1571
1572 pmdata->altmodes++;
1573 }
1574 }
1575
tcpm_register_partner_altmodes(struct tcpm_port * port)1576 static void tcpm_register_partner_altmodes(struct tcpm_port *port)
1577 {
1578 struct pd_mode_data *modep = &port->mode_data;
1579 struct typec_altmode *altmode;
1580 int i;
1581
1582 for (i = 0; i < modep->altmodes; i++) {
1583 altmode = typec_partner_register_altmode(port->partner,
1584 &modep->altmode_desc[i]);
1585 if (IS_ERR(altmode)) {
1586 tcpm_log(port, "Failed to register partner SVID 0x%04x",
1587 modep->altmode_desc[i].svid);
1588 altmode = NULL;
1589 }
1590 port->partner_altmode[i] = altmode;
1591 }
1592 }
1593
1594 #define supports_modal(port) PD_IDH_MODAL_SUPP((port)->partner_ident.id_header)
1595
tcpm_pd_svdm(struct tcpm_port * port,struct typec_altmode * adev,const u32 * p,int cnt,u32 * response,enum adev_actions * adev_action)1596 static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev,
1597 const u32 *p, int cnt, u32 *response,
1598 enum adev_actions *adev_action)
1599 {
1600 struct typec_port *typec = port->typec_port;
1601 struct typec_altmode *pdev;
1602 struct pd_mode_data *modep;
1603 int svdm_version;
1604 int rlen = 0;
1605 int cmd_type;
1606 int cmd;
1607 int i;
1608
1609 cmd_type = PD_VDO_CMDT(p[0]);
1610 cmd = PD_VDO_CMD(p[0]);
1611
1612 tcpm_log(port, "Rx VDM cmd 0x%x type %d cmd %d len %d",
1613 p[0], cmd_type, cmd, cnt);
1614
1615 modep = &port->mode_data;
1616
1617 pdev = typec_match_altmode(port->partner_altmode, ALTMODE_DISCOVERY_MAX,
1618 PD_VDO_VID(p[0]), PD_VDO_OPOS(p[0]));
1619
1620 svdm_version = typec_get_negotiated_svdm_version(typec);
1621 if (svdm_version < 0)
1622 return 0;
1623
1624 switch (cmd_type) {
1625 case CMDT_INIT:
1626 switch (cmd) {
1627 case CMD_DISCOVER_IDENT:
1628 if (PD_VDO_VID(p[0]) != USB_SID_PD)
1629 break;
1630
1631 if (PD_VDO_SVDM_VER(p[0]) < svdm_version) {
1632 typec_partner_set_svdm_version(port->partner,
1633 PD_VDO_SVDM_VER(p[0]));
1634 svdm_version = PD_VDO_SVDM_VER(p[0]);
1635 }
1636
1637 port->ams = DISCOVER_IDENTITY;
1638 /*
1639 * PD2.0 Spec 6.10.3: respond with NAK as DFP (data host)
1640 * PD3.1 Spec 6.4.4.2.5.1: respond with NAK if "invalid field" or
1641 * "wrong configuation" or "Unrecognized"
1642 */
1643 if ((port->data_role == TYPEC_DEVICE || svdm_version >= SVDM_VER_2_0) &&
1644 port->nr_snk_vdo) {
1645 if (svdm_version < SVDM_VER_2_0) {
1646 for (i = 0; i < port->nr_snk_vdo_v1; i++)
1647 response[i + 1] = port->snk_vdo_v1[i];
1648 rlen = port->nr_snk_vdo_v1 + 1;
1649
1650 } else {
1651 for (i = 0; i < port->nr_snk_vdo; i++)
1652 response[i + 1] = port->snk_vdo[i];
1653 rlen = port->nr_snk_vdo + 1;
1654 }
1655 }
1656 break;
1657 case CMD_DISCOVER_SVID:
1658 port->ams = DISCOVER_SVIDS;
1659 break;
1660 case CMD_DISCOVER_MODES:
1661 port->ams = DISCOVER_MODES;
1662 break;
1663 case CMD_ENTER_MODE:
1664 port->ams = DFP_TO_UFP_ENTER_MODE;
1665 break;
1666 case CMD_EXIT_MODE:
1667 port->ams = DFP_TO_UFP_EXIT_MODE;
1668 break;
1669 case CMD_ATTENTION:
1670 /* Attention command does not have response */
1671 *adev_action = ADEV_ATTENTION;
1672 return 0;
1673 default:
1674 break;
1675 }
1676 if (rlen >= 1) {
1677 response[0] = p[0] | VDO_CMDT(CMDT_RSP_ACK);
1678 } else if (rlen == 0) {
1679 response[0] = p[0] | VDO_CMDT(CMDT_RSP_NAK);
1680 rlen = 1;
1681 } else {
1682 response[0] = p[0] | VDO_CMDT(CMDT_RSP_BUSY);
1683 rlen = 1;
1684 }
1685 response[0] = (response[0] & ~VDO_SVDM_VERS_MASK) |
1686 (VDO_SVDM_VERS(typec_get_negotiated_svdm_version(typec)));
1687 break;
1688 case CMDT_RSP_ACK:
1689 /* silently drop message if we are not connected */
1690 if (IS_ERR_OR_NULL(port->partner))
1691 break;
1692
1693 tcpm_ams_finish(port);
1694
1695 switch (cmd) {
1696 case CMD_DISCOVER_IDENT:
1697 if (PD_VDO_SVDM_VER(p[0]) < svdm_version)
1698 typec_partner_set_svdm_version(port->partner,
1699 PD_VDO_SVDM_VER(p[0]));
1700 /* 6.4.4.3.1 */
1701 svdm_consume_identity(port, p, cnt);
1702 response[0] = VDO(USB_SID_PD, 1, typec_get_negotiated_svdm_version(typec),
1703 CMD_DISCOVER_SVID);
1704 rlen = 1;
1705 break;
1706 case CMD_DISCOVER_SVID:
1707 /* 6.4.4.3.2 */
1708 if (svdm_consume_svids(port, p, cnt)) {
1709 response[0] = VDO(USB_SID_PD, 1, svdm_version, CMD_DISCOVER_SVID);
1710 rlen = 1;
1711 } else if (modep->nsvids && supports_modal(port)) {
1712 response[0] = VDO(modep->svids[0], 1, svdm_version,
1713 CMD_DISCOVER_MODES);
1714 rlen = 1;
1715 }
1716 break;
1717 case CMD_DISCOVER_MODES:
1718 /* 6.4.4.3.3 */
1719 svdm_consume_modes(port, p, cnt);
1720 modep->svid_index++;
1721 if (modep->svid_index < modep->nsvids) {
1722 u16 svid = modep->svids[modep->svid_index];
1723 response[0] = VDO(svid, 1, svdm_version, CMD_DISCOVER_MODES);
1724 rlen = 1;
1725 } else if (port->data_role == TYPEC_HOST) {
1726 tcpm_register_partner_altmodes(port);
1727 } else {
1728 /* Do dr_swap for ufp if the port supports drd */
1729 if (port->typec_caps.data == TYPEC_PORT_DRD &&
1730 !IS_ERR_OR_NULL(port->port_altmode[0])) {
1731 port->vdm_sm_running = false;
1732 port->upcoming_state = DR_SWAP_SEND;
1733 tcpm_ams_start(port, DATA_ROLE_SWAP);
1734 }
1735 }
1736 break;
1737 case CMD_ENTER_MODE:
1738 if (adev && pdev) {
1739 typec_altmode_update_active(pdev, true);
1740 *adev_action = ADEV_QUEUE_VDM_SEND_EXIT_MODE_ON_FAIL;
1741 }
1742 return 0;
1743 case CMD_EXIT_MODE:
1744 if (adev && pdev) {
1745 typec_altmode_update_active(pdev, false);
1746 /* Back to USB Operation */
1747 *adev_action = ADEV_NOTIFY_USB_AND_QUEUE_VDM;
1748 return 0;
1749 }
1750 break;
1751 case VDO_CMD_VENDOR(0) ... VDO_CMD_VENDOR(15):
1752 break;
1753 default:
1754 /* Unrecognized SVDM */
1755 response[0] = p[0] | VDO_CMDT(CMDT_RSP_NAK);
1756 rlen = 1;
1757 response[0] = (response[0] & ~VDO_SVDM_VERS_MASK) |
1758 (VDO_SVDM_VERS(svdm_version));
1759 break;
1760 }
1761 break;
1762 case CMDT_RSP_NAK:
1763 tcpm_ams_finish(port);
1764 switch (cmd) {
1765 case CMD_DISCOVER_IDENT:
1766 /* Do dr_swap for ufp if the port supports drd */
1767 if (port->typec_caps.data == TYPEC_PORT_DRD &&
1768 port->data_role == TYPEC_DEVICE &&
1769 !IS_ERR_OR_NULL(port->port_altmode[0])) {
1770 port->vdm_sm_running = false;
1771 port->upcoming_state = DR_SWAP_SEND;
1772 tcpm_ams_start(port, DATA_ROLE_SWAP);
1773 break;
1774 }
1775 fallthrough;
1776 case CMD_DISCOVER_SVID:
1777 case CMD_DISCOVER_MODES:
1778 case VDO_CMD_VENDOR(0) ... VDO_CMD_VENDOR(15):
1779 break;
1780 case CMD_ENTER_MODE:
1781 /* Back to USB Operation */
1782 *adev_action = ADEV_NOTIFY_USB_AND_QUEUE_VDM;
1783 return 0;
1784 default:
1785 /* Unrecognized SVDM */
1786 response[0] = p[0] | VDO_CMDT(CMDT_RSP_NAK);
1787 rlen = 1;
1788 response[0] = (response[0] & ~VDO_SVDM_VERS_MASK) |
1789 (VDO_SVDM_VERS(svdm_version));
1790 break;
1791 }
1792 break;
1793 default:
1794 response[0] = p[0] | VDO_CMDT(CMDT_RSP_NAK);
1795 rlen = 1;
1796 response[0] = (response[0] & ~VDO_SVDM_VERS_MASK) |
1797 (VDO_SVDM_VERS(svdm_version));
1798 break;
1799 }
1800
1801 /* Informing the alternate mode drivers about everything */
1802 *adev_action = ADEV_QUEUE_VDM;
1803 return rlen;
1804 }
1805
1806 static void tcpm_pd_handle_msg(struct tcpm_port *port,
1807 enum pd_msg_request message,
1808 enum tcpm_ams ams);
1809
tcpm_handle_vdm_request(struct tcpm_port * port,const __le32 * payload,int cnt)1810 static void tcpm_handle_vdm_request(struct tcpm_port *port,
1811 const __le32 *payload, int cnt)
1812 {
1813 enum adev_actions adev_action = ADEV_NONE;
1814 struct typec_altmode *adev;
1815 u32 p[PD_MAX_PAYLOAD];
1816 u32 response[8] = { };
1817 int i, rlen = 0;
1818
1819 for (i = 0; i < cnt; i++)
1820 p[i] = le32_to_cpu(payload[i]);
1821
1822 adev = typec_match_altmode(port->port_altmode, ALTMODE_DISCOVERY_MAX,
1823 PD_VDO_VID(p[0]), PD_VDO_OPOS(p[0]));
1824
1825 if (port->vdm_state == VDM_STATE_BUSY) {
1826 /* If UFP responded busy retry after timeout */
1827 if (PD_VDO_CMDT(p[0]) == CMDT_RSP_BUSY) {
1828 port->vdm_state = VDM_STATE_WAIT_RSP_BUSY;
1829 port->vdo_retry = (p[0] & ~VDO_CMDT_MASK) |
1830 CMDT_INIT;
1831 mod_vdm_delayed_work(port, PD_T_VDM_BUSY);
1832 return;
1833 }
1834 port->vdm_state = VDM_STATE_DONE;
1835 }
1836
1837 if (PD_VDO_SVDM(p[0]) && (adev || tcpm_vdm_ams(port) || port->nr_snk_vdo)) {
1838 /*
1839 * Here a SVDM is received (INIT or RSP or unknown). Set the vdm_sm_running in
1840 * advance because we are dropping the lock but may send VDMs soon.
1841 * For the cases of INIT received:
1842 * - If no response to send, it will be cleared later in this function.
1843 * - If there are responses to send, it will be cleared in the state machine.
1844 * For the cases of RSP received:
1845 * - If no further INIT to send, it will be cleared later in this function.
1846 * - Otherwise, it will be cleared in the state machine if timeout or it will go
1847 * back here until no further INIT to send.
1848 * For the cases of unknown type received:
1849 * - We will send NAK and the flag will be cleared in the state machine.
1850 */
1851 port->vdm_sm_running = true;
1852 rlen = tcpm_pd_svdm(port, adev, p, cnt, response, &adev_action);
1853 } else {
1854 if (port->negotiated_rev >= PD_REV30)
1855 tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
1856 }
1857
1858 /*
1859 * We are done with any state stored in the port struct now, except
1860 * for any port struct changes done by the tcpm_queue_vdm() call
1861 * below, which is a separate operation.
1862 *
1863 * So we can safely release the lock here; and we MUST release the
1864 * lock here to avoid an AB BA lock inversion:
1865 *
1866 * If we keep the lock here then the lock ordering in this path is:
1867 * 1. tcpm_pd_rx_handler take the tcpm port lock
1868 * 2. One of the typec_altmode_* calls below takes the alt-mode's lock
1869 *
1870 * And we also have this ordering:
1871 * 1. alt-mode driver takes the alt-mode's lock
1872 * 2. alt-mode driver calls tcpm_altmode_enter which takes the
1873 * tcpm port lock
1874 *
1875 * Dropping our lock here avoids this.
1876 */
1877 mutex_unlock(&port->lock);
1878
1879 if (adev) {
1880 switch (adev_action) {
1881 case ADEV_NONE:
1882 break;
1883 case ADEV_NOTIFY_USB_AND_QUEUE_VDM:
1884 WARN_ON(typec_altmode_notify(adev, TYPEC_STATE_USB, NULL));
1885 typec_altmode_vdm(adev, p[0], &p[1], cnt);
1886 break;
1887 case ADEV_QUEUE_VDM:
1888 typec_altmode_vdm(adev, p[0], &p[1], cnt);
1889 break;
1890 case ADEV_QUEUE_VDM_SEND_EXIT_MODE_ON_FAIL:
1891 if (typec_altmode_vdm(adev, p[0], &p[1], cnt)) {
1892 int svdm_version = typec_get_negotiated_svdm_version(
1893 port->typec_port);
1894 if (svdm_version < 0)
1895 break;
1896
1897 response[0] = VDO(adev->svid, 1, svdm_version,
1898 CMD_EXIT_MODE);
1899 response[0] |= VDO_OPOS(adev->mode);
1900 rlen = 1;
1901 }
1902 break;
1903 case ADEV_ATTENTION:
1904 typec_altmode_attention(adev, p[1]);
1905 break;
1906 }
1907 }
1908
1909 /*
1910 * We must re-take the lock here to balance the unlock in
1911 * tcpm_pd_rx_handler, note that no changes, other then the
1912 * tcpm_queue_vdm call, are made while the lock is held again.
1913 * All that is done after the call is unwinding the call stack until
1914 * we return to tcpm_pd_rx_handler and do the unlock there.
1915 */
1916 mutex_lock(&port->lock);
1917
1918 if (rlen > 0)
1919 tcpm_queue_vdm(port, response[0], &response[1], rlen - 1);
1920 else
1921 port->vdm_sm_running = false;
1922 }
1923
tcpm_send_vdm(struct tcpm_port * port,u32 vid,int cmd,const u32 * data,int count)1924 static void tcpm_send_vdm(struct tcpm_port *port, u32 vid, int cmd,
1925 const u32 *data, int count)
1926 {
1927 int svdm_version = typec_get_negotiated_svdm_version(port->typec_port);
1928 u32 header;
1929
1930 if (svdm_version < 0)
1931 return;
1932
1933 if (WARN_ON(count > VDO_MAX_SIZE - 1))
1934 count = VDO_MAX_SIZE - 1;
1935
1936 /* set VDM header with VID & CMD */
1937 header = VDO(vid, ((vid & USB_SID_PD) == USB_SID_PD) ?
1938 1 : (PD_VDO_CMD(cmd) <= CMD_ATTENTION),
1939 svdm_version, cmd);
1940 tcpm_queue_vdm(port, header, data, count);
1941 }
1942
vdm_ready_timeout(u32 vdm_hdr)1943 static unsigned int vdm_ready_timeout(u32 vdm_hdr)
1944 {
1945 unsigned int timeout;
1946 int cmd = PD_VDO_CMD(vdm_hdr);
1947
1948 /* its not a structured VDM command */
1949 if (!PD_VDO_SVDM(vdm_hdr))
1950 return PD_T_VDM_UNSTRUCTURED;
1951
1952 switch (PD_VDO_CMDT(vdm_hdr)) {
1953 case CMDT_INIT:
1954 if (cmd == CMD_ENTER_MODE || cmd == CMD_EXIT_MODE)
1955 timeout = PD_T_VDM_WAIT_MODE_E;
1956 else
1957 timeout = PD_T_VDM_SNDR_RSP;
1958 break;
1959 default:
1960 if (cmd == CMD_ENTER_MODE || cmd == CMD_EXIT_MODE)
1961 timeout = PD_T_VDM_E_MODE;
1962 else
1963 timeout = PD_T_VDM_RCVR_RSP;
1964 break;
1965 }
1966 return timeout;
1967 }
1968
vdm_run_state_machine(struct tcpm_port * port)1969 static void vdm_run_state_machine(struct tcpm_port *port)
1970 {
1971 struct pd_message msg;
1972 int i, res = 0;
1973 u32 vdo_hdr = port->vdo_data[0];
1974
1975 switch (port->vdm_state) {
1976 case VDM_STATE_READY:
1977 /* Only transmit VDM if attached */
1978 if (!port->attached) {
1979 port->vdm_state = VDM_STATE_ERR_BUSY;
1980 break;
1981 }
1982
1983 /*
1984 * if there's traffic or we're not in PDO ready state don't send
1985 * a VDM.
1986 */
1987 if (port->state != SRC_READY && port->state != SNK_READY) {
1988 port->vdm_sm_running = false;
1989 break;
1990 }
1991
1992 /* TODO: AMS operation for Unstructured VDM */
1993 if (PD_VDO_SVDM(vdo_hdr) && PD_VDO_CMDT(vdo_hdr) == CMDT_INIT) {
1994 switch (PD_VDO_CMD(vdo_hdr)) {
1995 case CMD_DISCOVER_IDENT:
1996 res = tcpm_ams_start(port, DISCOVER_IDENTITY);
1997 if (res == 0)
1998 port->send_discover = false;
1999 else if (res == -EAGAIN)
2000 mod_send_discover_delayed_work(port,
2001 SEND_DISCOVER_RETRY_MS);
2002 break;
2003 case CMD_DISCOVER_SVID:
2004 res = tcpm_ams_start(port, DISCOVER_SVIDS);
2005 break;
2006 case CMD_DISCOVER_MODES:
2007 res = tcpm_ams_start(port, DISCOVER_MODES);
2008 break;
2009 case CMD_ENTER_MODE:
2010 res = tcpm_ams_start(port, DFP_TO_UFP_ENTER_MODE);
2011 break;
2012 case CMD_EXIT_MODE:
2013 res = tcpm_ams_start(port, DFP_TO_UFP_EXIT_MODE);
2014 break;
2015 case CMD_ATTENTION:
2016 res = tcpm_ams_start(port, ATTENTION);
2017 break;
2018 case VDO_CMD_VENDOR(0) ... VDO_CMD_VENDOR(15):
2019 res = tcpm_ams_start(port, STRUCTURED_VDMS);
2020 break;
2021 default:
2022 res = -EOPNOTSUPP;
2023 break;
2024 }
2025
2026 if (res < 0) {
2027 port->vdm_state = VDM_STATE_ERR_BUSY;
2028 return;
2029 }
2030 }
2031
2032 port->vdm_state = VDM_STATE_SEND_MESSAGE;
2033 mod_vdm_delayed_work(port, (port->negotiated_rev >= PD_REV30 &&
2034 port->pwr_role == TYPEC_SOURCE &&
2035 PD_VDO_SVDM(vdo_hdr) &&
2036 PD_VDO_CMDT(vdo_hdr) == CMDT_INIT) ?
2037 PD_T_SINK_TX : 0);
2038 break;
2039 case VDM_STATE_WAIT_RSP_BUSY:
2040 port->vdo_data[0] = port->vdo_retry;
2041 port->vdo_count = 1;
2042 port->vdm_state = VDM_STATE_READY;
2043 tcpm_ams_finish(port);
2044 break;
2045 case VDM_STATE_BUSY:
2046 port->vdm_state = VDM_STATE_ERR_TMOUT;
2047 if (port->ams != NONE_AMS)
2048 tcpm_ams_finish(port);
2049 break;
2050 case VDM_STATE_ERR_SEND:
2051 /*
2052 * A partner which does not support USB PD will not reply,
2053 * so this is not a fatal error. At the same time, some
2054 * devices may not return GoodCRC under some circumstances,
2055 * so we need to retry.
2056 */
2057 if (port->vdm_retries < 3) {
2058 tcpm_log(port, "VDM Tx error, retry");
2059 port->vdm_retries++;
2060 port->vdm_state = VDM_STATE_READY;
2061 if (PD_VDO_SVDM(vdo_hdr) && PD_VDO_CMDT(vdo_hdr) == CMDT_INIT)
2062 tcpm_ams_finish(port);
2063 } else {
2064 tcpm_ams_finish(port);
2065 }
2066 break;
2067 case VDM_STATE_SEND_MESSAGE:
2068 /* Prepare and send VDM */
2069 memset(&msg, 0, sizeof(msg));
2070 msg.header = PD_HEADER_LE(PD_DATA_VENDOR_DEF,
2071 port->pwr_role,
2072 port->data_role,
2073 port->negotiated_rev,
2074 port->message_id, port->vdo_count);
2075 for (i = 0; i < port->vdo_count; i++)
2076 msg.payload[i] = cpu_to_le32(port->vdo_data[i]);
2077 res = tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
2078 if (res < 0) {
2079 port->vdm_state = VDM_STATE_ERR_SEND;
2080 } else {
2081 unsigned long timeout;
2082
2083 port->vdm_retries = 0;
2084 port->vdm_state = VDM_STATE_BUSY;
2085 timeout = vdm_ready_timeout(vdo_hdr);
2086 mod_vdm_delayed_work(port, timeout);
2087 }
2088 break;
2089 default:
2090 break;
2091 }
2092 }
2093
vdm_state_machine_work(struct kthread_work * work)2094 static void vdm_state_machine_work(struct kthread_work *work)
2095 {
2096 struct tcpm_port *port = container_of(work, struct tcpm_port, vdm_state_machine);
2097 enum vdm_states prev_state;
2098
2099 mutex_lock(&port->lock);
2100
2101 /*
2102 * Continue running as long as the port is not busy and there was
2103 * a state change.
2104 */
2105 do {
2106 prev_state = port->vdm_state;
2107 vdm_run_state_machine(port);
2108 } while (port->vdm_state != prev_state &&
2109 port->vdm_state != VDM_STATE_BUSY &&
2110 port->vdm_state != VDM_STATE_SEND_MESSAGE);
2111
2112 if (port->vdm_state < VDM_STATE_READY)
2113 port->vdm_sm_running = false;
2114
2115 mutex_unlock(&port->lock);
2116 }
2117
2118 enum pdo_err {
2119 PDO_NO_ERR,
2120 PDO_ERR_NO_VSAFE5V,
2121 PDO_ERR_VSAFE5V_NOT_FIRST,
2122 PDO_ERR_PDO_TYPE_NOT_IN_ORDER,
2123 PDO_ERR_FIXED_NOT_SORTED,
2124 PDO_ERR_VARIABLE_BATT_NOT_SORTED,
2125 PDO_ERR_DUPE_PDO,
2126 PDO_ERR_PPS_APDO_NOT_SORTED,
2127 PDO_ERR_DUPE_PPS_APDO,
2128 };
2129
2130 static const char * const pdo_err_msg[] = {
2131 [PDO_ERR_NO_VSAFE5V] =
2132 " err: source/sink caps should atleast have vSafe5V",
2133 [PDO_ERR_VSAFE5V_NOT_FIRST] =
2134 " err: vSafe5V Fixed Supply Object Shall always be the first object",
2135 [PDO_ERR_PDO_TYPE_NOT_IN_ORDER] =
2136 " err: PDOs should be in the following order: Fixed; Battery; Variable",
2137 [PDO_ERR_FIXED_NOT_SORTED] =
2138 " err: Fixed supply pdos should be in increasing order of their fixed voltage",
2139 [PDO_ERR_VARIABLE_BATT_NOT_SORTED] =
2140 " err: Variable/Battery supply pdos should be in increasing order of their minimum voltage",
2141 [PDO_ERR_DUPE_PDO] =
2142 " err: Variable/Batt supply pdos cannot have same min/max voltage",
2143 [PDO_ERR_PPS_APDO_NOT_SORTED] =
2144 " err: Programmable power supply apdos should be in increasing order of their maximum voltage",
2145 [PDO_ERR_DUPE_PPS_APDO] =
2146 " err: Programmable power supply apdos cannot have same min/max voltage and max current",
2147 };
2148
tcpm_caps_err(struct tcpm_port * port,const u32 * pdo,unsigned int nr_pdo)2149 static enum pdo_err tcpm_caps_err(struct tcpm_port *port, const u32 *pdo,
2150 unsigned int nr_pdo)
2151 {
2152 unsigned int i;
2153
2154 /* Should at least contain vSafe5v */
2155 if (nr_pdo < 1)
2156 return PDO_ERR_NO_VSAFE5V;
2157
2158 /* The vSafe5V Fixed Supply Object Shall always be the first object */
2159 if (pdo_type(pdo[0]) != PDO_TYPE_FIXED ||
2160 pdo_fixed_voltage(pdo[0]) != VSAFE5V)
2161 return PDO_ERR_VSAFE5V_NOT_FIRST;
2162
2163 for (i = 1; i < nr_pdo; i++) {
2164 if (pdo_type(pdo[i]) < pdo_type(pdo[i - 1])) {
2165 return PDO_ERR_PDO_TYPE_NOT_IN_ORDER;
2166 } else if (pdo_type(pdo[i]) == pdo_type(pdo[i - 1])) {
2167 enum pd_pdo_type type = pdo_type(pdo[i]);
2168
2169 switch (type) {
2170 /*
2171 * The remaining Fixed Supply Objects, if
2172 * present, shall be sent in voltage order;
2173 * lowest to highest.
2174 */
2175 case PDO_TYPE_FIXED:
2176 if (pdo_fixed_voltage(pdo[i]) <=
2177 pdo_fixed_voltage(pdo[i - 1]))
2178 return PDO_ERR_FIXED_NOT_SORTED;
2179 break;
2180 /*
2181 * The Battery Supply Objects and Variable
2182 * supply, if present shall be sent in Minimum
2183 * Voltage order; lowest to highest.
2184 */
2185 case PDO_TYPE_VAR:
2186 case PDO_TYPE_BATT:
2187 if (pdo_min_voltage(pdo[i]) <
2188 pdo_min_voltage(pdo[i - 1]))
2189 return PDO_ERR_VARIABLE_BATT_NOT_SORTED;
2190 else if ((pdo_min_voltage(pdo[i]) ==
2191 pdo_min_voltage(pdo[i - 1])) &&
2192 (pdo_max_voltage(pdo[i]) ==
2193 pdo_max_voltage(pdo[i - 1])))
2194 return PDO_ERR_DUPE_PDO;
2195 break;
2196 /*
2197 * The Programmable Power Supply APDOs, if present,
2198 * shall be sent in Maximum Voltage order;
2199 * lowest to highest.
2200 */
2201 case PDO_TYPE_APDO:
2202 if (pdo_apdo_type(pdo[i]) != APDO_TYPE_PPS)
2203 break;
2204
2205 if (pdo_pps_apdo_max_voltage(pdo[i]) <
2206 pdo_pps_apdo_max_voltage(pdo[i - 1]))
2207 return PDO_ERR_PPS_APDO_NOT_SORTED;
2208 else if (pdo_pps_apdo_min_voltage(pdo[i]) ==
2209 pdo_pps_apdo_min_voltage(pdo[i - 1]) &&
2210 pdo_pps_apdo_max_voltage(pdo[i]) ==
2211 pdo_pps_apdo_max_voltage(pdo[i - 1]) &&
2212 pdo_pps_apdo_max_current(pdo[i]) ==
2213 pdo_pps_apdo_max_current(pdo[i - 1]))
2214 return PDO_ERR_DUPE_PPS_APDO;
2215 break;
2216 default:
2217 tcpm_log_force(port, " Unknown pdo type");
2218 }
2219 }
2220 }
2221
2222 return PDO_NO_ERR;
2223 }
2224
tcpm_validate_caps(struct tcpm_port * port,const u32 * pdo,unsigned int nr_pdo)2225 static int tcpm_validate_caps(struct tcpm_port *port, const u32 *pdo,
2226 unsigned int nr_pdo)
2227 {
2228 enum pdo_err err_index = tcpm_caps_err(port, pdo, nr_pdo);
2229
2230 if (err_index != PDO_NO_ERR) {
2231 tcpm_log_force(port, " %s", pdo_err_msg[err_index]);
2232 return -EINVAL;
2233 }
2234
2235 return 0;
2236 }
2237
tcpm_altmode_enter(struct typec_altmode * altmode,u32 * vdo)2238 static int tcpm_altmode_enter(struct typec_altmode *altmode, u32 *vdo)
2239 {
2240 struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
2241 int svdm_version;
2242 u32 header;
2243
2244 svdm_version = typec_get_negotiated_svdm_version(port->typec_port);
2245 if (svdm_version < 0)
2246 return svdm_version;
2247
2248 header = VDO(altmode->svid, vdo ? 2 : 1, svdm_version, CMD_ENTER_MODE);
2249 header |= VDO_OPOS(altmode->mode);
2250
2251 tcpm_queue_vdm_unlocked(port, header, vdo, vdo ? 1 : 0);
2252 return 0;
2253 }
2254
tcpm_altmode_exit(struct typec_altmode * altmode)2255 static int tcpm_altmode_exit(struct typec_altmode *altmode)
2256 {
2257 struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
2258 int svdm_version;
2259 u32 header;
2260
2261 svdm_version = typec_get_negotiated_svdm_version(port->typec_port);
2262 if (svdm_version < 0)
2263 return svdm_version;
2264
2265 header = VDO(altmode->svid, 1, svdm_version, CMD_EXIT_MODE);
2266 header |= VDO_OPOS(altmode->mode);
2267
2268 tcpm_queue_vdm_unlocked(port, header, NULL, 0);
2269 return 0;
2270 }
2271
tcpm_altmode_vdm(struct typec_altmode * altmode,u32 header,const u32 * data,int count)2272 static int tcpm_altmode_vdm(struct typec_altmode *altmode,
2273 u32 header, const u32 *data, int count)
2274 {
2275 struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
2276
2277 tcpm_queue_vdm_unlocked(port, header, data, count - 1);
2278
2279 return 0;
2280 }
2281
2282 static const struct typec_altmode_ops tcpm_altmode_ops = {
2283 .enter = tcpm_altmode_enter,
2284 .exit = tcpm_altmode_exit,
2285 .vdm = tcpm_altmode_vdm,
2286 };
2287
2288 /*
2289 * PD (data, control) command handling functions
2290 */
ready_state(struct tcpm_port * port)2291 static inline enum tcpm_state ready_state(struct tcpm_port *port)
2292 {
2293 if (port->pwr_role == TYPEC_SOURCE)
2294 return SRC_READY;
2295 else
2296 return SNK_READY;
2297 }
2298
2299 static int tcpm_pd_send_control(struct tcpm_port *port,
2300 enum pd_ctrl_msg_type type);
2301
tcpm_handle_alert(struct tcpm_port * port,const __le32 * payload,int cnt)2302 static void tcpm_handle_alert(struct tcpm_port *port, const __le32 *payload,
2303 int cnt)
2304 {
2305 u32 p0 = le32_to_cpu(payload[0]);
2306 unsigned int type = usb_pd_ado_type(p0);
2307
2308 if (!type) {
2309 tcpm_log(port, "Alert message received with no type");
2310 tcpm_queue_message(port, PD_MSG_CTRL_NOT_SUPP);
2311 return;
2312 }
2313
2314 /* Just handling non-battery alerts for now */
2315 if (!(type & USB_PD_ADO_TYPE_BATT_STATUS_CHANGE)) {
2316 if (port->pwr_role == TYPEC_SOURCE) {
2317 port->upcoming_state = GET_STATUS_SEND;
2318 tcpm_ams_start(port, GETTING_SOURCE_SINK_STATUS);
2319 } else {
2320 /*
2321 * Do not check SinkTxOk here in case the Source doesn't set its Rp to
2322 * SinkTxOk in time.
2323 */
2324 port->ams = GETTING_SOURCE_SINK_STATUS;
2325 tcpm_set_state(port, GET_STATUS_SEND, 0);
2326 }
2327 } else {
2328 tcpm_queue_message(port, PD_MSG_CTRL_NOT_SUPP);
2329 }
2330 }
2331
tcpm_set_auto_vbus_discharge_threshold(struct tcpm_port * port,enum typec_pwr_opmode mode,bool pps_active,u32 requested_vbus_voltage)2332 static int tcpm_set_auto_vbus_discharge_threshold(struct tcpm_port *port,
2333 enum typec_pwr_opmode mode, bool pps_active,
2334 u32 requested_vbus_voltage)
2335 {
2336 int ret;
2337
2338 if (!port->tcpc->set_auto_vbus_discharge_threshold)
2339 return 0;
2340
2341 ret = port->tcpc->set_auto_vbus_discharge_threshold(port->tcpc, mode, pps_active,
2342 requested_vbus_voltage);
2343 tcpm_log_force(port,
2344 "set_auto_vbus_discharge_threshold mode:%d pps_active:%c vbus:%u ret:%d",
2345 mode, pps_active ? 'y' : 'n', requested_vbus_voltage, ret);
2346
2347 return ret;
2348 }
2349
tcpm_pd_handle_state(struct tcpm_port * port,enum tcpm_state state,enum tcpm_ams ams,unsigned int delay_ms)2350 static void tcpm_pd_handle_state(struct tcpm_port *port,
2351 enum tcpm_state state,
2352 enum tcpm_ams ams,
2353 unsigned int delay_ms)
2354 {
2355 switch (port->state) {
2356 case SRC_READY:
2357 case SNK_READY:
2358 port->ams = ams;
2359 tcpm_set_state(port, state, delay_ms);
2360 break;
2361 /* 8.3.3.4.1.1 and 6.8.1 power transitioning */
2362 case SNK_TRANSITION_SINK:
2363 case SNK_TRANSITION_SINK_VBUS:
2364 case SRC_TRANSITION_SUPPLY:
2365 tcpm_set_state(port, HARD_RESET_SEND, 0);
2366 break;
2367 default:
2368 if (!tcpm_ams_interruptible(port)) {
2369 tcpm_set_state(port, port->pwr_role == TYPEC_SOURCE ?
2370 SRC_SOFT_RESET_WAIT_SNK_TX :
2371 SNK_SOFT_RESET,
2372 0);
2373 } else {
2374 /* process the Message 6.8.1 */
2375 port->upcoming_state = state;
2376 port->next_ams = ams;
2377 tcpm_set_state(port, ready_state(port), delay_ms);
2378 }
2379 break;
2380 }
2381 }
2382
tcpm_pd_handle_msg(struct tcpm_port * port,enum pd_msg_request message,enum tcpm_ams ams)2383 static void tcpm_pd_handle_msg(struct tcpm_port *port,
2384 enum pd_msg_request message,
2385 enum tcpm_ams ams)
2386 {
2387 switch (port->state) {
2388 case SRC_READY:
2389 case SNK_READY:
2390 port->ams = ams;
2391 tcpm_queue_message(port, message);
2392 break;
2393 /* PD 3.0 Spec 8.3.3.4.1.1 and 6.8.1 */
2394 case SNK_TRANSITION_SINK:
2395 case SNK_TRANSITION_SINK_VBUS:
2396 case SRC_TRANSITION_SUPPLY:
2397 tcpm_set_state(port, HARD_RESET_SEND, 0);
2398 break;
2399 default:
2400 if (!tcpm_ams_interruptible(port)) {
2401 tcpm_set_state(port, port->pwr_role == TYPEC_SOURCE ?
2402 SRC_SOFT_RESET_WAIT_SNK_TX :
2403 SNK_SOFT_RESET,
2404 0);
2405 } else {
2406 port->next_ams = ams;
2407 tcpm_set_state(port, ready_state(port), 0);
2408 /* 6.8.1 process the Message */
2409 tcpm_queue_message(port, message);
2410 }
2411 break;
2412 }
2413 }
2414
tcpm_pd_data_request(struct tcpm_port * port,const struct pd_message * msg)2415 static void tcpm_pd_data_request(struct tcpm_port *port,
2416 const struct pd_message *msg)
2417 {
2418 enum pd_data_msg_type type = pd_header_type_le(msg->header);
2419 unsigned int cnt = pd_header_cnt_le(msg->header);
2420 unsigned int rev = pd_header_rev_le(msg->header);
2421 unsigned int i;
2422 enum frs_typec_current partner_frs_current;
2423 bool frs_enable;
2424 int ret;
2425
2426 if (tcpm_vdm_ams(port) && type != PD_DATA_VENDOR_DEF) {
2427 port->vdm_state = VDM_STATE_ERR_BUSY;
2428 tcpm_ams_finish(port);
2429 mod_vdm_delayed_work(port, 0);
2430 }
2431
2432 switch (type) {
2433 case PD_DATA_SOURCE_CAP:
2434 for (i = 0; i < cnt; i++)
2435 port->source_caps[i] = le32_to_cpu(msg->payload[i]);
2436
2437 port->nr_source_caps = cnt;
2438
2439 tcpm_log_source_caps(port);
2440
2441 tcpm_validate_caps(port, port->source_caps,
2442 port->nr_source_caps);
2443
2444 trace_android_vh_typec_store_partner_src_caps(port, &port->nr_source_caps,
2445 &port->source_caps);
2446
2447 /*
2448 * Adjust revision in subsequent message headers, as required,
2449 * to comply with 6.2.1.1.5 of the USB PD 3.0 spec. We don't
2450 * support Rev 1.0 so just do nothing in that scenario.
2451 */
2452 if (rev == PD_REV10) {
2453 if (port->ams == GET_SOURCE_CAPABILITIES)
2454 tcpm_ams_finish(port);
2455 break;
2456 }
2457
2458 if (rev < PD_MAX_REV)
2459 port->negotiated_rev = min_t(u16, rev, port->negotiated_rev);
2460
2461 if (port->pwr_role == TYPEC_SOURCE) {
2462 if (port->ams == GET_SOURCE_CAPABILITIES)
2463 tcpm_pd_handle_state(port, SRC_READY, NONE_AMS, 0);
2464 /* Unexpected Source Capabilities */
2465 else
2466 tcpm_pd_handle_msg(port,
2467 port->negotiated_rev < PD_REV30 ?
2468 PD_MSG_CTRL_REJECT :
2469 PD_MSG_CTRL_NOT_SUPP,
2470 NONE_AMS);
2471 } else if (port->state == SNK_WAIT_CAPABILITIES) {
2472 /*
2473 * This message may be received even if VBUS is not
2474 * present. This is quite unexpected; see USB PD
2475 * specification, sections 8.3.3.6.3.1 and 8.3.3.6.3.2.
2476 * However, at the same time, we must be ready to
2477 * receive this message and respond to it 15ms after
2478 * receiving PS_RDY during power swap operations, no matter
2479 * if VBUS is available or not (USB PD specification,
2480 * section 6.5.9.2).
2481 * So we need to accept the message either way,
2482 * but be prepared to keep waiting for VBUS after it was
2483 * handled.
2484 */
2485 port->ams = POWER_NEGOTIATION;
2486 port->in_ams = true;
2487 tcpm_set_state(port, SNK_NEGOTIATE_CAPABILITIES, 0);
2488 } else {
2489 if (port->ams == GET_SOURCE_CAPABILITIES)
2490 tcpm_ams_finish(port);
2491 tcpm_pd_handle_state(port, SNK_NEGOTIATE_CAPABILITIES,
2492 POWER_NEGOTIATION, 0);
2493 }
2494 break;
2495 case PD_DATA_REQUEST:
2496 /*
2497 * Adjust revision in subsequent message headers, as required,
2498 * to comply with 6.2.1.1.5 of the USB PD 3.0 spec. We don't
2499 * support Rev 1.0 so just reject in that scenario.
2500 */
2501 if (rev == PD_REV10) {
2502 tcpm_pd_handle_msg(port,
2503 port->negotiated_rev < PD_REV30 ?
2504 PD_MSG_CTRL_REJECT :
2505 PD_MSG_CTRL_NOT_SUPP,
2506 NONE_AMS);
2507 break;
2508 }
2509
2510 if (rev < PD_MAX_REV)
2511 port->negotiated_rev = min_t(u16, rev, port->negotiated_rev);
2512
2513 if (port->pwr_role != TYPEC_SOURCE || cnt != 1) {
2514 tcpm_pd_handle_msg(port,
2515 port->negotiated_rev < PD_REV30 ?
2516 PD_MSG_CTRL_REJECT :
2517 PD_MSG_CTRL_NOT_SUPP,
2518 NONE_AMS);
2519 break;
2520 }
2521
2522 port->sink_request = le32_to_cpu(msg->payload[0]);
2523
2524 if (port->vdm_sm_running && port->explicit_contract) {
2525 tcpm_pd_handle_msg(port, PD_MSG_CTRL_WAIT, port->ams);
2526 break;
2527 }
2528
2529 if (port->state == SRC_SEND_CAPABILITIES)
2530 tcpm_set_state(port, SRC_NEGOTIATE_CAPABILITIES, 0);
2531 else
2532 tcpm_pd_handle_state(port, SRC_NEGOTIATE_CAPABILITIES,
2533 POWER_NEGOTIATION, 0);
2534 break;
2535 case PD_DATA_SINK_CAP:
2536 /* We don't do anything with this at the moment... */
2537 for (i = 0; i < cnt; i++)
2538 port->sink_caps[i] = le32_to_cpu(msg->payload[i]);
2539
2540 partner_frs_current = (port->sink_caps[0] & PDO_FIXED_FRS_CURR_MASK) >>
2541 PDO_FIXED_FRS_CURR_SHIFT;
2542 frs_enable = partner_frs_current && (partner_frs_current <=
2543 port->new_source_frs_current);
2544 tcpm_log(port,
2545 "Port partner FRS capable partner_frs_current:%u port_frs_current:%u enable:%c",
2546 partner_frs_current, port->new_source_frs_current, frs_enable ? 'y' : 'n');
2547 if (frs_enable) {
2548 ret = port->tcpc->enable_frs(port->tcpc, true);
2549 tcpm_log(port, "Enable FRS %s, ret:%d\n", ret ? "fail" : "success", ret);
2550 }
2551
2552 port->nr_sink_caps = cnt;
2553 port->sink_cap_done = true;
2554 if (port->ams == GET_SINK_CAPABILITIES)
2555 tcpm_set_state(port, ready_state(port), 0);
2556 /* Unexpected Sink Capabilities */
2557 else
2558 tcpm_pd_handle_msg(port,
2559 port->negotiated_rev < PD_REV30 ?
2560 PD_MSG_CTRL_REJECT :
2561 PD_MSG_CTRL_NOT_SUPP,
2562 NONE_AMS);
2563 break;
2564 case PD_DATA_VENDOR_DEF:
2565 tcpm_handle_vdm_request(port, msg->payload, cnt);
2566 break;
2567 case PD_DATA_BIST:
2568 port->bist_request = le32_to_cpu(msg->payload[0]);
2569 tcpm_pd_handle_state(port, BIST_RX, BIST, 0);
2570 break;
2571 case PD_DATA_ALERT:
2572 if (port->state != SRC_READY && port->state != SNK_READY)
2573 tcpm_pd_handle_state(port, port->pwr_role == TYPEC_SOURCE ?
2574 SRC_SOFT_RESET_WAIT_SNK_TX : SNK_SOFT_RESET,
2575 NONE_AMS, 0);
2576 else
2577 tcpm_handle_alert(port, msg->payload, cnt);
2578 break;
2579 case PD_DATA_BATT_STATUS:
2580 case PD_DATA_GET_COUNTRY_INFO:
2581 /* Currently unsupported */
2582 tcpm_pd_handle_msg(port, port->negotiated_rev < PD_REV30 ?
2583 PD_MSG_CTRL_REJECT :
2584 PD_MSG_CTRL_NOT_SUPP,
2585 NONE_AMS);
2586 break;
2587 default:
2588 tcpm_pd_handle_msg(port, port->negotiated_rev < PD_REV30 ?
2589 PD_MSG_CTRL_REJECT :
2590 PD_MSG_CTRL_NOT_SUPP,
2591 NONE_AMS);
2592 tcpm_log(port, "Unrecognized data message type %#x", type);
2593 break;
2594 }
2595 }
2596
tcpm_pps_complete(struct tcpm_port * port,int result)2597 static void tcpm_pps_complete(struct tcpm_port *port, int result)
2598 {
2599 if (port->pps_pending) {
2600 port->pps_status = result;
2601 port->pps_pending = false;
2602 complete(&port->pps_complete);
2603 }
2604 }
2605
tcpm_pd_ctrl_request(struct tcpm_port * port,const struct pd_message * msg)2606 static void tcpm_pd_ctrl_request(struct tcpm_port *port,
2607 const struct pd_message *msg)
2608 {
2609 enum pd_ctrl_msg_type type = pd_header_type_le(msg->header);
2610 enum tcpm_state next_state;
2611
2612 /*
2613 * Stop VDM state machine if interrupted by other Messages while NOT_SUPP is allowed in
2614 * VDM AMS if waiting for VDM responses and will be handled later.
2615 */
2616 if (tcpm_vdm_ams(port) && type != PD_CTRL_NOT_SUPP && type != PD_CTRL_GOOD_CRC) {
2617 port->vdm_state = VDM_STATE_ERR_BUSY;
2618 tcpm_ams_finish(port);
2619 mod_vdm_delayed_work(port, 0);
2620 }
2621
2622 switch (type) {
2623 case PD_CTRL_GOOD_CRC:
2624 case PD_CTRL_PING:
2625 break;
2626 case PD_CTRL_GET_SOURCE_CAP:
2627 tcpm_pd_handle_msg(port, PD_MSG_DATA_SOURCE_CAP, GET_SOURCE_CAPABILITIES);
2628 break;
2629 case PD_CTRL_GET_SINK_CAP:
2630 tcpm_pd_handle_msg(port, PD_MSG_DATA_SINK_CAP, GET_SINK_CAPABILITIES);
2631 break;
2632 case PD_CTRL_GOTO_MIN:
2633 break;
2634 case PD_CTRL_PS_RDY:
2635 switch (port->state) {
2636 case SNK_TRANSITION_SINK:
2637 if (port->vbus_present) {
2638 tcpm_set_current_limit(port,
2639 port->req_current_limit,
2640 port->req_supply_voltage);
2641 port->explicit_contract = true;
2642 tcpm_set_auto_vbus_discharge_threshold(port,
2643 TYPEC_PWR_MODE_PD,
2644 port->pps_data.active,
2645 port->supply_voltage);
2646 tcpm_set_state(port, SNK_READY, 0);
2647 } else {
2648 /*
2649 * Seen after power swap. Keep waiting for VBUS
2650 * in a transitional state.
2651 */
2652 tcpm_set_state(port,
2653 SNK_TRANSITION_SINK_VBUS, 0);
2654 }
2655 break;
2656 case PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED:
2657 tcpm_set_state(port, PR_SWAP_SRC_SNK_SINK_ON, 0);
2658 break;
2659 case PR_SWAP_SNK_SRC_SINK_OFF:
2660 tcpm_set_state(port, PR_SWAP_SNK_SRC_SOURCE_ON, 0);
2661 break;
2662 case VCONN_SWAP_WAIT_FOR_VCONN:
2663 tcpm_set_state(port, VCONN_SWAP_TURN_OFF_VCONN, 0);
2664 break;
2665 case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF:
2666 tcpm_set_state(port, FR_SWAP_SNK_SRC_NEW_SINK_READY, 0);
2667 break;
2668 default:
2669 tcpm_pd_handle_state(port,
2670 port->pwr_role == TYPEC_SOURCE ?
2671 SRC_SOFT_RESET_WAIT_SNK_TX :
2672 SNK_SOFT_RESET,
2673 NONE_AMS, 0);
2674 break;
2675 }
2676 break;
2677 case PD_CTRL_REJECT:
2678 case PD_CTRL_WAIT:
2679 case PD_CTRL_NOT_SUPP:
2680 switch (port->state) {
2681 case SNK_NEGOTIATE_CAPABILITIES:
2682 /* USB PD specification, Figure 8-43 */
2683 if (port->explicit_contract)
2684 next_state = SNK_READY;
2685 else
2686 next_state = SNK_WAIT_CAPABILITIES;
2687
2688 /* Threshold was relaxed before sending Request. Restore it back. */
2689 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
2690 port->pps_data.active,
2691 port->supply_voltage);
2692 tcpm_set_state(port, next_state, 0);
2693 break;
2694 case SNK_NEGOTIATE_PPS_CAPABILITIES:
2695 /* Revert data back from any requested PPS updates */
2696 port->pps_data.req_out_volt = port->supply_voltage;
2697 port->pps_data.req_op_curr = port->current_limit;
2698 port->pps_status = (type == PD_CTRL_WAIT ?
2699 -EAGAIN : -EOPNOTSUPP);
2700
2701 /* Threshold was relaxed before sending Request. Restore it back. */
2702 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
2703 port->pps_data.active,
2704 port->supply_voltage);
2705
2706 tcpm_set_state(port, SNK_READY, 0);
2707 break;
2708 case DR_SWAP_SEND:
2709 port->swap_status = (type == PD_CTRL_WAIT ?
2710 -EAGAIN : -EOPNOTSUPP);
2711 tcpm_set_state(port, DR_SWAP_CANCEL, 0);
2712 break;
2713 case PR_SWAP_SEND:
2714 port->swap_status = (type == PD_CTRL_WAIT ?
2715 -EAGAIN : -EOPNOTSUPP);
2716 tcpm_set_state(port, PR_SWAP_CANCEL, 0);
2717 break;
2718 case VCONN_SWAP_SEND:
2719 port->swap_status = (type == PD_CTRL_WAIT ?
2720 -EAGAIN : -EOPNOTSUPP);
2721 tcpm_set_state(port, VCONN_SWAP_CANCEL, 0);
2722 break;
2723 case FR_SWAP_SEND:
2724 tcpm_set_state(port, FR_SWAP_CANCEL, 0);
2725 break;
2726 case GET_SINK_CAP:
2727 port->sink_cap_done = true;
2728 tcpm_set_state(port, ready_state(port), 0);
2729 break;
2730 case SRC_READY:
2731 case SNK_READY:
2732 if (port->vdm_state > VDM_STATE_READY) {
2733 port->vdm_state = VDM_STATE_DONE;
2734 if (tcpm_vdm_ams(port))
2735 tcpm_ams_finish(port);
2736 mod_vdm_delayed_work(port, 0);
2737 break;
2738 }
2739 fallthrough;
2740 default:
2741 tcpm_pd_handle_state(port,
2742 port->pwr_role == TYPEC_SOURCE ?
2743 SRC_SOFT_RESET_WAIT_SNK_TX :
2744 SNK_SOFT_RESET,
2745 NONE_AMS, 0);
2746 break;
2747 }
2748 break;
2749 case PD_CTRL_ACCEPT:
2750 switch (port->state) {
2751 case SNK_NEGOTIATE_CAPABILITIES:
2752 port->pps_data.active = false;
2753 tcpm_set_state(port, SNK_TRANSITION_SINK, 0);
2754 break;
2755 case SNK_NEGOTIATE_PPS_CAPABILITIES:
2756 port->pps_data.active = true;
2757 port->pps_data.min_volt = port->pps_data.req_min_volt;
2758 port->pps_data.max_volt = port->pps_data.req_max_volt;
2759 port->pps_data.max_curr = port->pps_data.req_max_curr;
2760 port->req_supply_voltage = port->pps_data.req_out_volt;
2761 port->req_current_limit = port->pps_data.req_op_curr;
2762 power_supply_changed(port->psy);
2763 tcpm_set_state(port, SNK_TRANSITION_SINK, 0);
2764 break;
2765 case SOFT_RESET_SEND:
2766 if (port->ams == SOFT_RESET_AMS)
2767 tcpm_ams_finish(port);
2768 if (port->pwr_role == TYPEC_SOURCE) {
2769 port->upcoming_state = SRC_SEND_CAPABILITIES;
2770 tcpm_ams_start(port, POWER_NEGOTIATION);
2771 } else {
2772 tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
2773 }
2774 break;
2775 case DR_SWAP_SEND:
2776 tcpm_set_state(port, DR_SWAP_CHANGE_DR, 0);
2777 break;
2778 case PR_SWAP_SEND:
2779 tcpm_set_state(port, PR_SWAP_START, 0);
2780 break;
2781 case VCONN_SWAP_SEND:
2782 tcpm_set_state(port, VCONN_SWAP_START, 0);
2783 break;
2784 case FR_SWAP_SEND:
2785 tcpm_set_state(port, FR_SWAP_SNK_SRC_TRANSITION_TO_OFF, 0);
2786 break;
2787 default:
2788 tcpm_pd_handle_state(port,
2789 port->pwr_role == TYPEC_SOURCE ?
2790 SRC_SOFT_RESET_WAIT_SNK_TX :
2791 SNK_SOFT_RESET,
2792 NONE_AMS, 0);
2793 break;
2794 }
2795 break;
2796 case PD_CTRL_SOFT_RESET:
2797 port->ams = SOFT_RESET_AMS;
2798 tcpm_set_state(port, SOFT_RESET, 0);
2799 break;
2800 case PD_CTRL_DR_SWAP:
2801 /*
2802 * XXX
2803 * 6.3.9: If an alternate mode is active, a request to swap
2804 * alternate modes shall trigger a port reset.
2805 */
2806 if (port->typec_caps.data != TYPEC_PORT_DRD) {
2807 tcpm_pd_handle_msg(port,
2808 port->negotiated_rev < PD_REV30 ?
2809 PD_MSG_CTRL_REJECT :
2810 PD_MSG_CTRL_NOT_SUPP,
2811 NONE_AMS);
2812 } else {
2813 if (port->send_discover) {
2814 tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
2815 break;
2816 }
2817
2818 tcpm_pd_handle_state(port, DR_SWAP_ACCEPT, DATA_ROLE_SWAP, 0);
2819 }
2820 break;
2821 case PD_CTRL_PR_SWAP:
2822 if (port->port_type != TYPEC_PORT_DRP) {
2823 tcpm_pd_handle_msg(port,
2824 port->negotiated_rev < PD_REV30 ?
2825 PD_MSG_CTRL_REJECT :
2826 PD_MSG_CTRL_NOT_SUPP,
2827 NONE_AMS);
2828 } else {
2829 if (port->send_discover) {
2830 tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
2831 break;
2832 }
2833
2834 tcpm_pd_handle_state(port, PR_SWAP_ACCEPT, POWER_ROLE_SWAP, 0);
2835 }
2836 break;
2837 case PD_CTRL_VCONN_SWAP:
2838 if (port->send_discover) {
2839 tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
2840 break;
2841 }
2842
2843 tcpm_pd_handle_state(port, VCONN_SWAP_ACCEPT, VCONN_SWAP, 0);
2844 break;
2845 case PD_CTRL_GET_SOURCE_CAP_EXT:
2846 case PD_CTRL_GET_STATUS:
2847 case PD_CTRL_FR_SWAP:
2848 case PD_CTRL_GET_PPS_STATUS:
2849 case PD_CTRL_GET_COUNTRY_CODES:
2850 /* Currently not supported */
2851 tcpm_pd_handle_msg(port,
2852 port->negotiated_rev < PD_REV30 ?
2853 PD_MSG_CTRL_REJECT :
2854 PD_MSG_CTRL_NOT_SUPP,
2855 NONE_AMS);
2856 break;
2857 default:
2858 tcpm_pd_handle_msg(port,
2859 port->negotiated_rev < PD_REV30 ?
2860 PD_MSG_CTRL_REJECT :
2861 PD_MSG_CTRL_NOT_SUPP,
2862 NONE_AMS);
2863 tcpm_log(port, "Unrecognized ctrl message type %#x", type);
2864 break;
2865 }
2866 }
2867
tcpm_pd_ext_msg_request(struct tcpm_port * port,const struct pd_message * msg)2868 static void tcpm_pd_ext_msg_request(struct tcpm_port *port,
2869 const struct pd_message *msg)
2870 {
2871 enum pd_ext_msg_type type = pd_header_type_le(msg->header);
2872 unsigned int data_size = pd_ext_header_data_size_le(msg->ext_msg.header);
2873
2874 /* stopping VDM state machine if interrupted by other Messages */
2875 if (tcpm_vdm_ams(port)) {
2876 port->vdm_state = VDM_STATE_ERR_BUSY;
2877 tcpm_ams_finish(port);
2878 mod_vdm_delayed_work(port, 0);
2879 }
2880
2881 if (!(le16_to_cpu(msg->ext_msg.header) & PD_EXT_HDR_CHUNKED)) {
2882 tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
2883 tcpm_log(port, "Unchunked extended messages unsupported");
2884 return;
2885 }
2886
2887 if (data_size > PD_EXT_MAX_CHUNK_DATA) {
2888 tcpm_pd_handle_state(port, CHUNK_NOT_SUPP, NONE_AMS, PD_T_CHUNK_NOT_SUPP);
2889 tcpm_log(port, "Chunk handling not yet supported");
2890 return;
2891 }
2892
2893 switch (type) {
2894 case PD_EXT_STATUS:
2895 case PD_EXT_PPS_STATUS:
2896 if (port->ams == GETTING_SOURCE_SINK_STATUS) {
2897 tcpm_ams_finish(port);
2898 tcpm_set_state(port, ready_state(port), 0);
2899 } else {
2900 /* unexpected Status or PPS_Status Message */
2901 tcpm_pd_handle_state(port, port->pwr_role == TYPEC_SOURCE ?
2902 SRC_SOFT_RESET_WAIT_SNK_TX : SNK_SOFT_RESET,
2903 NONE_AMS, 0);
2904 }
2905 break;
2906 case PD_EXT_SOURCE_CAP_EXT:
2907 case PD_EXT_GET_BATT_CAP:
2908 case PD_EXT_GET_BATT_STATUS:
2909 case PD_EXT_BATT_CAP:
2910 case PD_EXT_GET_MANUFACTURER_INFO:
2911 case PD_EXT_MANUFACTURER_INFO:
2912 case PD_EXT_SECURITY_REQUEST:
2913 case PD_EXT_SECURITY_RESPONSE:
2914 case PD_EXT_FW_UPDATE_REQUEST:
2915 case PD_EXT_FW_UPDATE_RESPONSE:
2916 case PD_EXT_COUNTRY_INFO:
2917 case PD_EXT_COUNTRY_CODES:
2918 tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
2919 break;
2920 default:
2921 tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
2922 tcpm_log(port, "Unrecognized extended message type %#x", type);
2923 break;
2924 }
2925 }
2926
tcpm_pd_rx_handler(struct kthread_work * work)2927 static void tcpm_pd_rx_handler(struct kthread_work *work)
2928 {
2929 struct pd_rx_event *event = container_of(work,
2930 struct pd_rx_event, work);
2931 const struct pd_message *msg = &event->msg;
2932 unsigned int cnt = pd_header_cnt_le(msg->header);
2933 struct tcpm_port *port = event->port;
2934
2935 mutex_lock(&port->lock);
2936
2937 tcpm_log(port, "PD RX, header: %#x [%d]", le16_to_cpu(msg->header),
2938 port->attached);
2939
2940 if (port->attached) {
2941 enum pd_ctrl_msg_type type = pd_header_type_le(msg->header);
2942 unsigned int msgid = pd_header_msgid_le(msg->header);
2943
2944 /*
2945 * USB PD standard, 6.6.1.2:
2946 * "... if MessageID value in a received Message is the
2947 * same as the stored value, the receiver shall return a
2948 * GoodCRC Message with that MessageID value and drop
2949 * the Message (this is a retry of an already received
2950 * Message). Note: this shall not apply to the Soft_Reset
2951 * Message which always has a MessageID value of zero."
2952 */
2953 if (msgid == port->rx_msgid && type != PD_CTRL_SOFT_RESET)
2954 goto done;
2955 port->rx_msgid = msgid;
2956
2957 /*
2958 * If both ends believe to be DFP/host, we have a data role
2959 * mismatch.
2960 */
2961 if (!!(le16_to_cpu(msg->header) & PD_HEADER_DATA_ROLE) ==
2962 (port->data_role == TYPEC_HOST)) {
2963 tcpm_log(port,
2964 "Data role mismatch, initiating error recovery");
2965 tcpm_set_state(port, ERROR_RECOVERY, 0);
2966 } else {
2967 if (le16_to_cpu(msg->header) & PD_HEADER_EXT_HDR)
2968 tcpm_pd_ext_msg_request(port, msg);
2969 else if (cnt)
2970 tcpm_pd_data_request(port, msg);
2971 else
2972 tcpm_pd_ctrl_request(port, msg);
2973 }
2974 }
2975
2976 done:
2977 mutex_unlock(&port->lock);
2978 kfree(event);
2979 }
2980
tcpm_pd_receive(struct tcpm_port * port,const struct pd_message * msg)2981 void tcpm_pd_receive(struct tcpm_port *port, const struct pd_message *msg)
2982 {
2983 struct pd_rx_event *event;
2984
2985 event = kzalloc(sizeof(*event), GFP_ATOMIC);
2986 if (!event)
2987 return;
2988
2989 kthread_init_work(&event->work, tcpm_pd_rx_handler);
2990 event->port = port;
2991 memcpy(&event->msg, msg, sizeof(*msg));
2992 kthread_queue_work(port->wq, &event->work);
2993 }
2994 EXPORT_SYMBOL_GPL(tcpm_pd_receive);
2995
tcpm_pd_send_control(struct tcpm_port * port,enum pd_ctrl_msg_type type)2996 static int tcpm_pd_send_control(struct tcpm_port *port,
2997 enum pd_ctrl_msg_type type)
2998 {
2999 struct pd_message msg;
3000
3001 memset(&msg, 0, sizeof(msg));
3002 msg.header = PD_HEADER_LE(type, port->pwr_role,
3003 port->data_role,
3004 port->negotiated_rev,
3005 port->message_id, 0);
3006
3007 return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
3008 }
3009
3010 /*
3011 * Send queued message without affecting state.
3012 * Return true if state machine should go back to sleep,
3013 * false otherwise.
3014 */
tcpm_send_queued_message(struct tcpm_port * port)3015 static bool tcpm_send_queued_message(struct tcpm_port *port)
3016 {
3017 enum pd_msg_request queued_message;
3018 int ret;
3019
3020 do {
3021 queued_message = port->queued_message;
3022 port->queued_message = PD_MSG_NONE;
3023
3024 switch (queued_message) {
3025 case PD_MSG_CTRL_WAIT:
3026 tcpm_pd_send_control(port, PD_CTRL_WAIT);
3027 break;
3028 case PD_MSG_CTRL_REJECT:
3029 tcpm_pd_send_control(port, PD_CTRL_REJECT);
3030 break;
3031 case PD_MSG_CTRL_NOT_SUPP:
3032 tcpm_pd_send_control(port, PD_CTRL_NOT_SUPP);
3033 break;
3034 case PD_MSG_DATA_SINK_CAP:
3035 ret = tcpm_pd_send_sink_caps(port);
3036 if (ret < 0) {
3037 tcpm_log(port, "Unable to send snk caps, ret=%d", ret);
3038 tcpm_set_state(port, SNK_SOFT_RESET, 0);
3039 }
3040 tcpm_ams_finish(port);
3041 break;
3042 case PD_MSG_DATA_SOURCE_CAP:
3043 ret = tcpm_pd_send_source_caps(port);
3044 if (ret < 0) {
3045 tcpm_log(port,
3046 "Unable to send src caps, ret=%d",
3047 ret);
3048 tcpm_set_state(port, SOFT_RESET_SEND, 0);
3049 } else if (port->pwr_role == TYPEC_SOURCE) {
3050 tcpm_ams_finish(port);
3051 tcpm_set_state(port, HARD_RESET_SEND,
3052 PD_T_SENDER_RESPONSE);
3053 } else {
3054 tcpm_ams_finish(port);
3055 }
3056 break;
3057 default:
3058 break;
3059 }
3060 } while (port->queued_message != PD_MSG_NONE);
3061
3062 if (port->delayed_state != INVALID_STATE) {
3063 if (ktime_after(port->delayed_runtime, ktime_get())) {
3064 mod_tcpm_delayed_work(port, ktime_to_ms(ktime_sub(port->delayed_runtime,
3065 ktime_get())));
3066 return true;
3067 }
3068 port->delayed_state = INVALID_STATE;
3069 }
3070 return false;
3071 }
3072
tcpm_pd_check_request(struct tcpm_port * port)3073 static int tcpm_pd_check_request(struct tcpm_port *port)
3074 {
3075 u32 pdo, rdo = port->sink_request;
3076 unsigned int max, op, pdo_max, index;
3077 enum pd_pdo_type type;
3078
3079 index = rdo_index(rdo);
3080 if (!index || index > port->nr_src_pdo)
3081 return -EINVAL;
3082
3083 pdo = port->src_pdo[index - 1];
3084 type = pdo_type(pdo);
3085 switch (type) {
3086 case PDO_TYPE_FIXED:
3087 case PDO_TYPE_VAR:
3088 max = rdo_max_current(rdo);
3089 op = rdo_op_current(rdo);
3090 pdo_max = pdo_max_current(pdo);
3091
3092 if (op > pdo_max)
3093 return -EINVAL;
3094 if (max > pdo_max && !(rdo & RDO_CAP_MISMATCH))
3095 return -EINVAL;
3096
3097 if (type == PDO_TYPE_FIXED)
3098 tcpm_log(port,
3099 "Requested %u mV, %u mA for %u / %u mA",
3100 pdo_fixed_voltage(pdo), pdo_max, op, max);
3101 else
3102 tcpm_log(port,
3103 "Requested %u -> %u mV, %u mA for %u / %u mA",
3104 pdo_min_voltage(pdo), pdo_max_voltage(pdo),
3105 pdo_max, op, max);
3106 break;
3107 case PDO_TYPE_BATT:
3108 max = rdo_max_power(rdo);
3109 op = rdo_op_power(rdo);
3110 pdo_max = pdo_max_power(pdo);
3111
3112 if (op > pdo_max)
3113 return -EINVAL;
3114 if (max > pdo_max && !(rdo & RDO_CAP_MISMATCH))
3115 return -EINVAL;
3116 tcpm_log(port,
3117 "Requested %u -> %u mV, %u mW for %u / %u mW",
3118 pdo_min_voltage(pdo), pdo_max_voltage(pdo),
3119 pdo_max, op, max);
3120 break;
3121 default:
3122 return -EINVAL;
3123 }
3124
3125 port->op_vsafe5v = index == 1;
3126
3127 return 0;
3128 }
3129
3130 #define min_power(x, y) min(pdo_max_power(x), pdo_max_power(y))
3131 #define min_current(x, y) min(pdo_max_current(x), pdo_max_current(y))
3132
tcpm_pd_select_pdo(struct tcpm_port * port,int * sink_pdo,int * src_pdo)3133 static int tcpm_pd_select_pdo(struct tcpm_port *port, int *sink_pdo,
3134 int *src_pdo)
3135 {
3136 unsigned int i, j, max_src_mv = 0, min_src_mv = 0, max_mw = 0,
3137 max_mv = 0, src_mw = 0, src_ma = 0, max_snk_mv = 0,
3138 min_snk_mv = 0;
3139 int ret = -EINVAL;
3140
3141 port->pps_data.supported = false;
3142 port->usb_type = POWER_SUPPLY_USB_TYPE_PD;
3143 power_supply_changed(port->psy);
3144
3145 /*
3146 * Select the source PDO providing the most power which has a
3147 * matchig sink cap.
3148 */
3149 for (i = 0; i < port->nr_source_caps; i++) {
3150 u32 pdo = port->source_caps[i];
3151 enum pd_pdo_type type = pdo_type(pdo);
3152
3153 switch (type) {
3154 case PDO_TYPE_FIXED:
3155 max_src_mv = pdo_fixed_voltage(pdo);
3156 min_src_mv = max_src_mv;
3157 break;
3158 case PDO_TYPE_BATT:
3159 case PDO_TYPE_VAR:
3160 max_src_mv = pdo_max_voltage(pdo);
3161 min_src_mv = pdo_min_voltage(pdo);
3162 break;
3163 case PDO_TYPE_APDO:
3164 if (pdo_apdo_type(pdo) == APDO_TYPE_PPS) {
3165 port->pps_data.supported = true;
3166 port->usb_type =
3167 POWER_SUPPLY_USB_TYPE_PD_PPS;
3168 power_supply_changed(port->psy);
3169 }
3170 continue;
3171 default:
3172 tcpm_log(port, "Invalid source PDO type, ignoring");
3173 continue;
3174 }
3175
3176 switch (type) {
3177 case PDO_TYPE_FIXED:
3178 case PDO_TYPE_VAR:
3179 src_ma = pdo_max_current(pdo);
3180 src_mw = src_ma * min_src_mv / 1000;
3181 break;
3182 case PDO_TYPE_BATT:
3183 src_mw = pdo_max_power(pdo);
3184 break;
3185 case PDO_TYPE_APDO:
3186 continue;
3187 default:
3188 tcpm_log(port, "Invalid source PDO type, ignoring");
3189 continue;
3190 }
3191
3192 for (j = 0; j < port->nr_snk_pdo; j++) {
3193 pdo = port->snk_pdo[j];
3194
3195 switch (pdo_type(pdo)) {
3196 case PDO_TYPE_FIXED:
3197 max_snk_mv = pdo_fixed_voltage(pdo);
3198 min_snk_mv = max_snk_mv;
3199 break;
3200 case PDO_TYPE_BATT:
3201 case PDO_TYPE_VAR:
3202 max_snk_mv = pdo_max_voltage(pdo);
3203 min_snk_mv = pdo_min_voltage(pdo);
3204 break;
3205 case PDO_TYPE_APDO:
3206 continue;
3207 default:
3208 tcpm_log(port, "Invalid sink PDO type, ignoring");
3209 continue;
3210 }
3211
3212 if (max_src_mv <= max_snk_mv &&
3213 min_src_mv >= min_snk_mv) {
3214 /* Prefer higher voltages if available */
3215 if ((src_mw == max_mw && min_src_mv > max_mv) ||
3216 src_mw > max_mw) {
3217 *src_pdo = i;
3218 *sink_pdo = j;
3219 max_mw = src_mw;
3220 max_mv = min_src_mv;
3221 ret = 0;
3222 }
3223 }
3224 }
3225 }
3226
3227 return ret;
3228 }
3229
3230 #define min_pps_apdo_current(x, y) \
3231 min(pdo_pps_apdo_max_current(x), pdo_pps_apdo_max_current(y))
3232
tcpm_pd_select_pps_apdo(struct tcpm_port * port)3233 static unsigned int tcpm_pd_select_pps_apdo(struct tcpm_port *port)
3234 {
3235 unsigned int i, j, max_mw = 0, max_mv = 0;
3236 unsigned int min_src_mv, max_src_mv, src_ma, src_mw;
3237 unsigned int min_snk_mv, max_snk_mv;
3238 unsigned int max_op_mv;
3239 u32 pdo, src, snk;
3240 unsigned int src_pdo = 0, snk_pdo = 0;
3241
3242 /*
3243 * Select the source PPS APDO providing the most power while staying
3244 * within the board's limits. We skip the first PDO as this is always
3245 * 5V 3A.
3246 */
3247 for (i = 1; i < port->nr_source_caps; ++i) {
3248 pdo = port->source_caps[i];
3249
3250 switch (pdo_type(pdo)) {
3251 case PDO_TYPE_APDO:
3252 if (pdo_apdo_type(pdo) != APDO_TYPE_PPS) {
3253 tcpm_log(port, "Not PPS APDO (source), ignoring");
3254 continue;
3255 }
3256
3257 min_src_mv = pdo_pps_apdo_min_voltage(pdo);
3258 max_src_mv = pdo_pps_apdo_max_voltage(pdo);
3259 src_ma = pdo_pps_apdo_max_current(pdo);
3260 src_mw = (src_ma * max_src_mv) / 1000;
3261
3262 /*
3263 * Now search through the sink PDOs to find a matching
3264 * PPS APDO. Again skip the first sink PDO as this will
3265 * always be 5V 3A.
3266 */
3267 for (j = 1; j < port->nr_snk_pdo; j++) {
3268 pdo = port->snk_pdo[j];
3269
3270 switch (pdo_type(pdo)) {
3271 case PDO_TYPE_APDO:
3272 if (pdo_apdo_type(pdo) != APDO_TYPE_PPS) {
3273 tcpm_log(port,
3274 "Not PPS APDO (sink), ignoring");
3275 continue;
3276 }
3277
3278 min_snk_mv =
3279 pdo_pps_apdo_min_voltage(pdo);
3280 max_snk_mv =
3281 pdo_pps_apdo_max_voltage(pdo);
3282 break;
3283 default:
3284 tcpm_log(port,
3285 "Not APDO type (sink), ignoring");
3286 continue;
3287 }
3288
3289 if (min_src_mv <= max_snk_mv &&
3290 max_src_mv >= min_snk_mv) {
3291 max_op_mv = min(max_src_mv, max_snk_mv);
3292 src_mw = (max_op_mv * src_ma) / 1000;
3293 /* Prefer higher voltages if available */
3294 if ((src_mw == max_mw &&
3295 max_op_mv > max_mv) ||
3296 src_mw > max_mw) {
3297 src_pdo = i;
3298 snk_pdo = j;
3299 max_mw = src_mw;
3300 max_mv = max_op_mv;
3301 }
3302 }
3303 }
3304
3305 break;
3306 default:
3307 tcpm_log(port, "Not APDO type (source), ignoring");
3308 continue;
3309 }
3310 }
3311
3312 if (src_pdo) {
3313 src = port->source_caps[src_pdo];
3314 snk = port->snk_pdo[snk_pdo];
3315
3316 port->pps_data.req_min_volt = max(pdo_pps_apdo_min_voltage(src),
3317 pdo_pps_apdo_min_voltage(snk));
3318 port->pps_data.req_max_volt = min(pdo_pps_apdo_max_voltage(src),
3319 pdo_pps_apdo_max_voltage(snk));
3320 port->pps_data.req_max_curr = min_pps_apdo_current(src, snk);
3321 port->pps_data.req_out_volt = min(port->pps_data.req_max_volt,
3322 max(port->pps_data.req_min_volt,
3323 port->pps_data.req_out_volt));
3324 port->pps_data.req_op_curr = min(port->pps_data.req_max_curr,
3325 port->pps_data.req_op_curr);
3326 }
3327
3328 return src_pdo;
3329 }
3330
tcpm_pd_build_request(struct tcpm_port * port,u32 * rdo)3331 static int tcpm_pd_build_request(struct tcpm_port *port, u32 *rdo)
3332 {
3333 unsigned int mv, ma, mw, flags;
3334 unsigned int max_ma, max_mw;
3335 enum pd_pdo_type type;
3336 u32 pdo, matching_snk_pdo;
3337 int src_pdo_index = 0;
3338 int snk_pdo_index = 0;
3339 int ret;
3340
3341 ret = tcpm_pd_select_pdo(port, &snk_pdo_index, &src_pdo_index);
3342 if (ret < 0)
3343 return ret;
3344
3345 pdo = port->source_caps[src_pdo_index];
3346 matching_snk_pdo = port->snk_pdo[snk_pdo_index];
3347 type = pdo_type(pdo);
3348
3349 switch (type) {
3350 case PDO_TYPE_FIXED:
3351 mv = pdo_fixed_voltage(pdo);
3352 break;
3353 case PDO_TYPE_BATT:
3354 case PDO_TYPE_VAR:
3355 mv = pdo_min_voltage(pdo);
3356 break;
3357 default:
3358 tcpm_log(port, "Invalid PDO selected!");
3359 return -EINVAL;
3360 }
3361
3362 /* Select maximum available current within the sink pdo's limit */
3363 if (type == PDO_TYPE_BATT) {
3364 mw = min_power(pdo, matching_snk_pdo);
3365 ma = 1000 * mw / mv;
3366 } else {
3367 ma = min_current(pdo, matching_snk_pdo);
3368 mw = ma * mv / 1000;
3369 }
3370
3371 flags = RDO_USB_COMM | RDO_NO_SUSPEND;
3372
3373 /* Set mismatch bit if offered power is less than operating power */
3374 max_ma = ma;
3375 max_mw = mw;
3376 if (mw < port->operating_snk_mw) {
3377 flags |= RDO_CAP_MISMATCH;
3378 if (type == PDO_TYPE_BATT &&
3379 (pdo_max_power(matching_snk_pdo) > pdo_max_power(pdo)))
3380 max_mw = pdo_max_power(matching_snk_pdo);
3381 else if (pdo_max_current(matching_snk_pdo) >
3382 pdo_max_current(pdo))
3383 max_ma = pdo_max_current(matching_snk_pdo);
3384 }
3385
3386 tcpm_log(port, "cc=%d cc1=%d cc2=%d vbus=%d vconn=%s polarity=%d",
3387 port->cc_req, port->cc1, port->cc2, port->vbus_source,
3388 port->vconn_role == TYPEC_SOURCE ? "source" : "sink",
3389 port->polarity);
3390
3391 if (type == PDO_TYPE_BATT) {
3392 *rdo = RDO_BATT(src_pdo_index + 1, mw, max_mw, flags);
3393
3394 tcpm_log(port, "Requesting PDO %d: %u mV, %u mW%s",
3395 src_pdo_index, mv, mw,
3396 flags & RDO_CAP_MISMATCH ? " [mismatch]" : "");
3397 } else {
3398 *rdo = RDO_FIXED(src_pdo_index + 1, ma, max_ma, flags);
3399
3400 tcpm_log(port, "Requesting PDO %d: %u mV, %u mA%s",
3401 src_pdo_index, mv, ma,
3402 flags & RDO_CAP_MISMATCH ? " [mismatch]" : "");
3403 }
3404
3405 port->req_current_limit = ma;
3406 port->req_supply_voltage = mv;
3407
3408 return 0;
3409 }
3410
tcpm_pd_send_request(struct tcpm_port * port)3411 static int tcpm_pd_send_request(struct tcpm_port *port)
3412 {
3413 struct pd_message msg;
3414 int ret;
3415 u32 rdo;
3416
3417 ret = tcpm_pd_build_request(port, &rdo);
3418 if (ret < 0)
3419 return ret;
3420
3421 /*
3422 * Relax the threshold as voltage will be adjusted after Accept Message plus tSrcTransition.
3423 * It is safer to modify the threshold here.
3424 */
3425 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, 0);
3426
3427 memset(&msg, 0, sizeof(msg));
3428 msg.header = PD_HEADER_LE(PD_DATA_REQUEST,
3429 port->pwr_role,
3430 port->data_role,
3431 port->negotiated_rev,
3432 port->message_id, 1);
3433 msg.payload[0] = cpu_to_le32(rdo);
3434
3435 return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
3436 }
3437
tcpm_pd_build_pps_request(struct tcpm_port * port,u32 * rdo)3438 static int tcpm_pd_build_pps_request(struct tcpm_port *port, u32 *rdo)
3439 {
3440 unsigned int out_mv, op_ma, op_mw, max_mv, max_ma, flags;
3441 enum pd_pdo_type type;
3442 unsigned int src_pdo_index;
3443 u32 pdo;
3444
3445 src_pdo_index = tcpm_pd_select_pps_apdo(port);
3446 if (!src_pdo_index)
3447 return -EOPNOTSUPP;
3448
3449 pdo = port->source_caps[src_pdo_index];
3450 type = pdo_type(pdo);
3451
3452 switch (type) {
3453 case PDO_TYPE_APDO:
3454 if (pdo_apdo_type(pdo) != APDO_TYPE_PPS) {
3455 tcpm_log(port, "Invalid APDO selected!");
3456 return -EINVAL;
3457 }
3458 max_mv = port->pps_data.req_max_volt;
3459 max_ma = port->pps_data.req_max_curr;
3460 out_mv = port->pps_data.req_out_volt;
3461 op_ma = port->pps_data.req_op_curr;
3462 break;
3463 default:
3464 tcpm_log(port, "Invalid PDO selected!");
3465 return -EINVAL;
3466 }
3467
3468 flags = RDO_USB_COMM | RDO_NO_SUSPEND;
3469
3470 op_mw = (op_ma * out_mv) / 1000;
3471 if (op_mw < port->operating_snk_mw) {
3472 /*
3473 * Try raising current to meet power needs. If that's not enough
3474 * then try upping the voltage. If that's still not enough
3475 * then we've obviously chosen a PPS APDO which really isn't
3476 * suitable so abandon ship.
3477 */
3478 op_ma = (port->operating_snk_mw * 1000) / out_mv;
3479 if ((port->operating_snk_mw * 1000) % out_mv)
3480 ++op_ma;
3481 op_ma += RDO_PROG_CURR_MA_STEP - (op_ma % RDO_PROG_CURR_MA_STEP);
3482
3483 if (op_ma > max_ma) {
3484 op_ma = max_ma;
3485 out_mv = (port->operating_snk_mw * 1000) / op_ma;
3486 if ((port->operating_snk_mw * 1000) % op_ma)
3487 ++out_mv;
3488 out_mv += RDO_PROG_VOLT_MV_STEP -
3489 (out_mv % RDO_PROG_VOLT_MV_STEP);
3490
3491 if (out_mv > max_mv) {
3492 tcpm_log(port, "Invalid PPS APDO selected!");
3493 return -EINVAL;
3494 }
3495 }
3496 }
3497
3498 tcpm_log(port, "cc=%d cc1=%d cc2=%d vbus=%d vconn=%s polarity=%d",
3499 port->cc_req, port->cc1, port->cc2, port->vbus_source,
3500 port->vconn_role == TYPEC_SOURCE ? "source" : "sink",
3501 port->polarity);
3502
3503 *rdo = RDO_PROG(src_pdo_index + 1, out_mv, op_ma, flags);
3504
3505 tcpm_log(port, "Requesting APDO %d: %u mV, %u mA",
3506 src_pdo_index, out_mv, op_ma);
3507
3508 port->pps_data.req_op_curr = op_ma;
3509 port->pps_data.req_out_volt = out_mv;
3510
3511 return 0;
3512 }
3513
tcpm_pd_send_pps_request(struct tcpm_port * port)3514 static int tcpm_pd_send_pps_request(struct tcpm_port *port)
3515 {
3516 struct pd_message msg;
3517 int ret;
3518 u32 rdo;
3519
3520 ret = tcpm_pd_build_pps_request(port, &rdo);
3521 if (ret < 0)
3522 return ret;
3523
3524 /* Relax the threshold as voltage will be adjusted right after Accept Message. */
3525 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, 0);
3526
3527 memset(&msg, 0, sizeof(msg));
3528 msg.header = PD_HEADER_LE(PD_DATA_REQUEST,
3529 port->pwr_role,
3530 port->data_role,
3531 port->negotiated_rev,
3532 port->message_id, 1);
3533 msg.payload[0] = cpu_to_le32(rdo);
3534
3535 return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
3536 }
3537
tcpm_set_vbus(struct tcpm_port * port,bool enable)3538 static int tcpm_set_vbus(struct tcpm_port *port, bool enable)
3539 {
3540 int ret;
3541
3542 if (enable && port->vbus_charge)
3543 return -EINVAL;
3544
3545 tcpm_log(port, "vbus:=%d charge=%d", enable, port->vbus_charge);
3546
3547 ret = port->tcpc->set_vbus(port->tcpc, enable, port->vbus_charge);
3548 if (ret < 0)
3549 return ret;
3550
3551 port->vbus_source = enable;
3552 return 0;
3553 }
3554
tcpm_set_charge(struct tcpm_port * port,bool charge)3555 static int tcpm_set_charge(struct tcpm_port *port, bool charge)
3556 {
3557 int ret;
3558
3559 if (charge && port->vbus_source)
3560 return -EINVAL;
3561
3562 if (charge != port->vbus_charge) {
3563 tcpm_log(port, "vbus=%d charge:=%d", port->vbus_source, charge);
3564 ret = port->tcpc->set_vbus(port->tcpc, port->vbus_source,
3565 charge);
3566 if (ret < 0)
3567 return ret;
3568 }
3569 port->vbus_charge = charge;
3570 power_supply_changed(port->psy);
3571 return 0;
3572 }
3573
tcpm_start_toggling(struct tcpm_port * port,enum typec_cc_status cc)3574 static bool tcpm_start_toggling(struct tcpm_port *port, enum typec_cc_status cc)
3575 {
3576 int ret;
3577
3578 if (!port->tcpc->start_toggling)
3579 return false;
3580
3581 tcpm_log_force(port, "Start toggling");
3582 ret = port->tcpc->start_toggling(port->tcpc, port->port_type, cc);
3583 return ret == 0;
3584 }
3585
tcpm_init_vbus(struct tcpm_port * port)3586 static int tcpm_init_vbus(struct tcpm_port *port)
3587 {
3588 int ret;
3589
3590 ret = port->tcpc->set_vbus(port->tcpc, false, false);
3591 port->vbus_source = false;
3592 port->vbus_charge = false;
3593 return ret;
3594 }
3595
tcpm_init_vconn(struct tcpm_port * port)3596 static int tcpm_init_vconn(struct tcpm_port *port)
3597 {
3598 int ret;
3599
3600 ret = port->tcpc->set_vconn(port->tcpc, false);
3601 port->vconn_role = TYPEC_SINK;
3602 return ret;
3603 }
3604
tcpm_typec_connect(struct tcpm_port * port)3605 static void tcpm_typec_connect(struct tcpm_port *port)
3606 {
3607 if (!port->connected) {
3608 /* Make sure we don't report stale identity information */
3609 memset(&port->partner_ident, 0, sizeof(port->partner_ident));
3610 port->partner_desc.usb_pd = port->pd_capable;
3611 if (tcpm_port_is_debug(port))
3612 port->partner_desc.accessory = TYPEC_ACCESSORY_DEBUG;
3613 else if (tcpm_port_is_audio(port))
3614 port->partner_desc.accessory = TYPEC_ACCESSORY_AUDIO;
3615 else
3616 port->partner_desc.accessory = TYPEC_ACCESSORY_NONE;
3617 port->partner = typec_register_partner(port->typec_port,
3618 &port->partner_desc);
3619 port->connected = true;
3620 }
3621 }
3622
tcpm_src_attach(struct tcpm_port * port)3623 static int tcpm_src_attach(struct tcpm_port *port)
3624 {
3625 enum typec_cc_polarity polarity =
3626 port->cc2 == TYPEC_CC_RD ? TYPEC_POLARITY_CC2
3627 : TYPEC_POLARITY_CC1;
3628 int ret;
3629
3630 if (port->attached)
3631 return 0;
3632
3633 ret = tcpm_set_polarity(port, polarity);
3634 if (ret < 0)
3635 return ret;
3636
3637 tcpm_enable_auto_vbus_discharge(port, true);
3638
3639 ret = tcpm_set_roles(port, true, TYPEC_SOURCE, tcpm_data_role_for_source(port));
3640 if (ret < 0)
3641 return ret;
3642
3643 ret = port->tcpc->set_pd_rx(port->tcpc, true);
3644 if (ret < 0)
3645 goto out_disable_mux;
3646
3647 /*
3648 * USB Type-C specification, version 1.2,
3649 * chapter 4.5.2.2.8.1 (Attached.SRC Requirements)
3650 * Enable VCONN only if the non-RD port is set to RA.
3651 */
3652 if ((polarity == TYPEC_POLARITY_CC1 && port->cc2 == TYPEC_CC_RA) ||
3653 (polarity == TYPEC_POLARITY_CC2 && port->cc1 == TYPEC_CC_RA)) {
3654 ret = tcpm_set_vconn(port, true);
3655 if (ret < 0)
3656 goto out_disable_pd;
3657 }
3658
3659 ret = tcpm_set_vbus(port, true);
3660 if (ret < 0)
3661 goto out_disable_vconn;
3662
3663 port->pd_capable = false;
3664
3665 port->partner = NULL;
3666
3667 port->attached = true;
3668 port->debouncing = false;
3669 port->send_discover = true;
3670
3671 return 0;
3672
3673 out_disable_vconn:
3674 tcpm_set_vconn(port, false);
3675 out_disable_pd:
3676 port->tcpc->set_pd_rx(port->tcpc, false);
3677 out_disable_mux:
3678 tcpm_mux_set(port, TYPEC_STATE_SAFE, USB_ROLE_NONE,
3679 TYPEC_ORIENTATION_NONE);
3680 return ret;
3681 }
3682
tcpm_typec_disconnect(struct tcpm_port * port)3683 static void tcpm_typec_disconnect(struct tcpm_port *port)
3684 {
3685 if (port->connected) {
3686 typec_unregister_partner(port->partner);
3687 port->partner = NULL;
3688 port->connected = false;
3689 }
3690 }
3691
tcpm_unregister_altmodes(struct tcpm_port * port)3692 static void tcpm_unregister_altmodes(struct tcpm_port *port)
3693 {
3694 struct pd_mode_data *modep = &port->mode_data;
3695 int i;
3696
3697 for (i = 0; i < modep->altmodes; i++) {
3698 typec_unregister_altmode(port->partner_altmode[i]);
3699 port->partner_altmode[i] = NULL;
3700 }
3701
3702 memset(modep, 0, sizeof(*modep));
3703 }
3704
tcpm_set_partner_usb_comm_capable(struct tcpm_port * port,bool capable)3705 static void tcpm_set_partner_usb_comm_capable(struct tcpm_port *port, bool capable)
3706 {
3707 tcpm_log(port, "Setting usb_comm capable %s", capable ? "true" : "false");
3708
3709 if (port->tcpc->set_partner_usb_comm_capable)
3710 port->tcpc->set_partner_usb_comm_capable(port->tcpc, capable);
3711 }
3712
tcpm_reset_port(struct tcpm_port * port)3713 static void tcpm_reset_port(struct tcpm_port *port)
3714 {
3715 tcpm_enable_auto_vbus_discharge(port, false);
3716 port->in_ams = false;
3717 port->ams = NONE_AMS;
3718 port->vdm_sm_running = false;
3719 tcpm_unregister_altmodes(port);
3720 tcpm_typec_disconnect(port);
3721 port->attached = false;
3722 port->pd_capable = false;
3723 port->pps_data.supported = false;
3724 tcpm_set_partner_usb_comm_capable(port, false);
3725
3726 /*
3727 * First Rx ID should be 0; set this to a sentinel of -1 so that
3728 * we can check tcpm_pd_rx_handler() if we had seen it before.
3729 */
3730 port->rx_msgid = -1;
3731
3732 port->tcpc->set_pd_rx(port->tcpc, false);
3733 tcpm_init_vbus(port); /* also disables charging */
3734 tcpm_init_vconn(port);
3735 tcpm_set_current_limit(port, 0, 0);
3736 tcpm_set_polarity(port, TYPEC_POLARITY_CC1);
3737 tcpm_mux_set(port, TYPEC_STATE_SAFE, USB_ROLE_NONE,
3738 TYPEC_ORIENTATION_NONE);
3739 tcpm_set_attached_state(port, false);
3740 port->try_src_count = 0;
3741 port->try_snk_count = 0;
3742 port->usb_type = POWER_SUPPLY_USB_TYPE_C;
3743 power_supply_changed(port->psy);
3744 port->nr_sink_caps = 0;
3745 port->sink_cap_done = false;
3746 if (port->tcpc->enable_frs)
3747 port->tcpc->enable_frs(port->tcpc, false);
3748 }
3749
tcpm_detach(struct tcpm_port * port)3750 static void tcpm_detach(struct tcpm_port *port)
3751 {
3752 if (tcpm_port_is_disconnected(port))
3753 port->hard_reset_count = 0;
3754
3755 if (!port->attached)
3756 return;
3757
3758 if (port->tcpc->set_bist_data) {
3759 tcpm_log(port, "disable BIST MODE TESTDATA");
3760 port->tcpc->set_bist_data(port->tcpc, false);
3761 }
3762
3763 tcpm_reset_port(port);
3764 }
3765
tcpm_src_detach(struct tcpm_port * port)3766 static void tcpm_src_detach(struct tcpm_port *port)
3767 {
3768 tcpm_detach(port);
3769 }
3770
tcpm_snk_attach(struct tcpm_port * port)3771 static int tcpm_snk_attach(struct tcpm_port *port)
3772 {
3773 int ret;
3774
3775 if (port->attached)
3776 return 0;
3777
3778 ret = tcpm_set_polarity(port, port->cc2 != TYPEC_CC_OPEN ?
3779 TYPEC_POLARITY_CC2 : TYPEC_POLARITY_CC1);
3780 if (ret < 0)
3781 return ret;
3782
3783 tcpm_enable_auto_vbus_discharge(port, true);
3784
3785 ret = tcpm_set_roles(port, true, TYPEC_SINK, tcpm_data_role_for_sink(port));
3786 if (ret < 0)
3787 return ret;
3788
3789 port->pd_capable = false;
3790
3791 port->partner = NULL;
3792
3793 port->attached = true;
3794 port->debouncing = false;
3795 port->send_discover = true;
3796
3797 return 0;
3798 }
3799
tcpm_snk_detach(struct tcpm_port * port)3800 static void tcpm_snk_detach(struct tcpm_port *port)
3801 {
3802 tcpm_detach(port);
3803 }
3804
tcpm_acc_attach(struct tcpm_port * port)3805 static int tcpm_acc_attach(struct tcpm_port *port)
3806 {
3807 int ret;
3808
3809 if (port->attached)
3810 return 0;
3811
3812 ret = tcpm_set_roles(port, true, TYPEC_SOURCE,
3813 tcpm_data_role_for_source(port));
3814 if (ret < 0)
3815 return ret;
3816
3817 port->partner = NULL;
3818
3819 tcpm_typec_connect(port);
3820
3821 port->attached = true;
3822 port->debouncing = false;
3823
3824 return 0;
3825 }
3826
tcpm_acc_detach(struct tcpm_port * port)3827 static void tcpm_acc_detach(struct tcpm_port *port)
3828 {
3829 tcpm_detach(port);
3830 }
3831
hard_reset_state(struct tcpm_port * port)3832 static inline enum tcpm_state hard_reset_state(struct tcpm_port *port)
3833 {
3834 if (port->hard_reset_count < PD_N_HARD_RESET_COUNT)
3835 return HARD_RESET_SEND;
3836 if (port->pd_capable)
3837 return ERROR_RECOVERY;
3838 if (port->pwr_role == TYPEC_SOURCE)
3839 return SRC_UNATTACHED;
3840 if (port->state == SNK_WAIT_CAPABILITIES)
3841 return SNK_READY;
3842 return SNK_UNATTACHED;
3843 }
3844
unattached_state(struct tcpm_port * port)3845 static inline enum tcpm_state unattached_state(struct tcpm_port *port)
3846 {
3847 if (port->port_type == TYPEC_PORT_DRP) {
3848 if (port->pwr_role == TYPEC_SOURCE)
3849 return SRC_UNATTACHED;
3850 else
3851 return SNK_UNATTACHED;
3852 } else if (port->port_type == TYPEC_PORT_SRC) {
3853 return SRC_UNATTACHED;
3854 }
3855
3856 return SNK_UNATTACHED;
3857 }
3858
tcpm_is_toggling(struct tcpm_port * port)3859 bool tcpm_is_toggling(struct tcpm_port *port)
3860 {
3861 if (port->port_type == TYPEC_PORT_DRP)
3862 return port->state == SRC_UNATTACHED || port->state == SNK_UNATTACHED ||
3863 port->state == TOGGLING;
3864
3865 return false;
3866 }
3867 EXPORT_SYMBOL_GPL(tcpm_is_toggling);
3868
tcpm_swap_complete(struct tcpm_port * port,int result)3869 static void tcpm_swap_complete(struct tcpm_port *port, int result)
3870 {
3871 if (port->swap_pending) {
3872 port->swap_status = result;
3873 port->swap_pending = false;
3874 port->non_pd_role_swap = false;
3875 complete(&port->swap_complete);
3876 }
3877 }
3878
tcpm_get_pwr_opmode(enum typec_cc_status cc)3879 static enum typec_pwr_opmode tcpm_get_pwr_opmode(enum typec_cc_status cc)
3880 {
3881 switch (cc) {
3882 case TYPEC_CC_RP_1_5:
3883 return TYPEC_PWR_MODE_1_5A;
3884 case TYPEC_CC_RP_3_0:
3885 return TYPEC_PWR_MODE_3_0A;
3886 case TYPEC_CC_RP_DEF:
3887 default:
3888 return TYPEC_PWR_MODE_USB;
3889 }
3890 }
3891
run_state_machine(struct tcpm_port * port)3892 static void run_state_machine(struct tcpm_port *port)
3893 {
3894 int ret;
3895 enum typec_pwr_opmode opmode;
3896 unsigned int msecs, timer_val_msecs;
3897 enum tcpm_state upcoming_state;
3898 const char *state_name;
3899 u32 current_limit;
3900 bool adjust;
3901
3902 port->enter_state = port->state;
3903 switch (port->state) {
3904 case TOGGLING:
3905 break;
3906 /* SRC states */
3907 case SRC_UNATTACHED:
3908 if (!port->non_pd_role_swap)
3909 tcpm_swap_complete(port, -ENOTCONN);
3910 tcpm_src_detach(port);
3911 if (port->debouncing) {
3912 port->debouncing = false;
3913 if (port->tcpc->check_contaminant &&
3914 port->tcpc->check_contaminant(port->tcpc)) {
3915 /* Contaminant detection would handle toggling */
3916 tcpm_set_state(port, TOGGLING, 0);
3917 break;
3918 }
3919 }
3920 if (tcpm_start_toggling(port, tcpm_rp_cc(port))) {
3921 tcpm_set_state(port, TOGGLING, 0);
3922 break;
3923 }
3924 tcpm_set_cc(port, tcpm_rp_cc(port));
3925 if (port->port_type == TYPEC_PORT_DRP)
3926 tcpm_set_state(port, SNK_UNATTACHED, PD_T_DRP_SNK);
3927 break;
3928 case SRC_ATTACH_WAIT:
3929 port->debouncing = true;
3930 timer_val_msecs = PD_T_CC_DEBOUNCE;
3931 trace_android_vh_typec_tcpm_get_timer(tcpm_states[SRC_ATTACH_WAIT],
3932 CC_DEBOUNCE, &timer_val_msecs);
3933 if (tcpm_port_is_debug(port))
3934 tcpm_set_state(port, DEBUG_ACC_ATTACHED,
3935 timer_val_msecs);
3936 else if (tcpm_port_is_audio(port))
3937 tcpm_set_state(port, AUDIO_ACC_ATTACHED,
3938 timer_val_msecs);
3939 else if (tcpm_port_is_source(port) && port->vbus_vsafe0v)
3940 tcpm_set_state(port,
3941 tcpm_try_snk(port) ? SNK_TRY
3942 : SRC_ATTACHED,
3943 timer_val_msecs);
3944 break;
3945
3946 case SNK_TRY:
3947 port->debouncing = false;
3948 port->try_snk_count++;
3949 /*
3950 * Requirements:
3951 * - Do not drive vconn or vbus
3952 * - Terminate CC pins (both) to Rd
3953 * Action:
3954 * - Wait for tDRPTry (PD_T_DRP_TRY).
3955 * Until then, ignore any state changes.
3956 */
3957 tcpm_set_cc(port, TYPEC_CC_RD);
3958 tcpm_set_state(port, SNK_TRY_WAIT, PD_T_DRP_TRY);
3959 break;
3960 case SNK_TRY_WAIT:
3961 if (tcpm_port_is_sink(port)) {
3962 tcpm_set_state(port, SNK_TRY_WAIT_DEBOUNCE, 0);
3963 } else {
3964 tcpm_set_state(port, SRC_TRYWAIT, 0);
3965 port->max_wait = 0;
3966 }
3967 break;
3968 case SNK_TRY_WAIT_DEBOUNCE:
3969 tcpm_set_state(port, SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS,
3970 PD_T_TRY_CC_DEBOUNCE);
3971 break;
3972 case SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS:
3973 if (port->vbus_present && tcpm_port_is_sink(port))
3974 tcpm_set_state(port, SNK_ATTACHED, 0);
3975 else
3976 port->max_wait = 0;
3977 break;
3978 case SRC_TRYWAIT:
3979 tcpm_set_cc(port, tcpm_rp_cc(port));
3980 if (port->max_wait == 0) {
3981 port->max_wait = jiffies +
3982 msecs_to_jiffies(PD_T_DRP_TRY);
3983 tcpm_set_state(port, SRC_TRYWAIT_UNATTACHED,
3984 PD_T_DRP_TRY);
3985 } else {
3986 if (time_is_after_jiffies(port->max_wait))
3987 tcpm_set_state(port, SRC_TRYWAIT_UNATTACHED,
3988 jiffies_to_msecs(port->max_wait -
3989 jiffies));
3990 else
3991 tcpm_set_state(port, SNK_UNATTACHED, 0);
3992 }
3993 break;
3994 case SRC_TRYWAIT_DEBOUNCE:
3995 timer_val_msecs = PD_T_CC_DEBOUNCE;
3996 trace_android_vh_typec_tcpm_get_timer(tcpm_states[SRC_TRYWAIT_DEBOUNCE],
3997 CC_DEBOUNCE, &timer_val_msecs);
3998 tcpm_set_state(port, SRC_ATTACHED, timer_val_msecs);
3999 break;
4000 case SRC_TRYWAIT_UNATTACHED:
4001 tcpm_set_state(port, SNK_UNATTACHED, 0);
4002 break;
4003
4004 case SRC_ATTACHED:
4005 ret = tcpm_src_attach(port);
4006 tcpm_set_state(port, SRC_UNATTACHED,
4007 ret < 0 ? 0 : PD_T_PS_SOURCE_ON);
4008 break;
4009 case SRC_STARTUP:
4010 opmode = tcpm_get_pwr_opmode(tcpm_rp_cc(port));
4011 typec_set_pwr_opmode(port->typec_port, opmode);
4012 port->pwr_opmode = TYPEC_PWR_MODE_USB;
4013 port->caps_count = 0;
4014 port->negotiated_rev = (((port->typec_caps.pd_revision >> 8) & 0xff) - 1);
4015 port->message_id = 0;
4016 port->rx_msgid = -1;
4017 port->explicit_contract = false;
4018 /* SNK -> SRC POWER/FAST_ROLE_SWAP finished */
4019 if (port->ams == POWER_ROLE_SWAP ||
4020 port->ams == FAST_ROLE_SWAP)
4021 tcpm_ams_finish(port);
4022 port->upcoming_state = SRC_SEND_CAPABILITIES;
4023 tcpm_ams_start(port, POWER_NEGOTIATION);
4024 break;
4025 case SRC_SEND_CAPABILITIES:
4026 port->caps_count++;
4027 if (port->caps_count > PD_N_CAPS_COUNT) {
4028 tcpm_set_state(port, SRC_READY, 0);
4029 break;
4030 }
4031 ret = tcpm_pd_send_source_caps(port);
4032 if (ret < 0) {
4033 tcpm_set_state(port, SRC_SEND_CAPABILITIES,
4034 PD_T_SEND_SOURCE_CAP);
4035 } else {
4036 /*
4037 * Per standard, we should clear the reset counter here.
4038 * However, that can result in state machine hang-ups.
4039 * Reset it only in READY state to improve stability.
4040 */
4041 /* port->hard_reset_count = 0; */
4042 port->caps_count = 0;
4043 port->pd_capable = true;
4044 tcpm_set_state_cond(port, SRC_SEND_CAPABILITIES_TIMEOUT,
4045 PD_T_SEND_SOURCE_CAP);
4046 }
4047 break;
4048 case SRC_SEND_CAPABILITIES_TIMEOUT:
4049 /*
4050 * Error recovery for a PD_DATA_SOURCE_CAP reply timeout.
4051 *
4052 * PD 2.0 sinks are supposed to accept src-capabilities with a
4053 * 3.0 header and simply ignore any src PDOs which the sink does
4054 * not understand such as PPS but some 2.0 sinks instead ignore
4055 * the entire PD_DATA_SOURCE_CAP message, causing contract
4056 * negotiation to fail.
4057 *
4058 * After PD_N_HARD_RESET_COUNT hard-reset attempts, we try
4059 * sending src-capabilities with a lower PD revision to
4060 * make these broken sinks work.
4061 */
4062 if (port->hard_reset_count < PD_N_HARD_RESET_COUNT) {
4063 tcpm_set_state(port, HARD_RESET_SEND, 0);
4064 } else if (port->negotiated_rev > PD_REV20) {
4065 port->negotiated_rev--;
4066 port->hard_reset_count = 0;
4067 tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
4068 } else {
4069 tcpm_set_state(port, hard_reset_state(port), 0);
4070 }
4071 break;
4072 case SRC_NEGOTIATE_CAPABILITIES:
4073 ret = tcpm_pd_check_request(port);
4074 if (ret < 0) {
4075 tcpm_pd_send_control(port, PD_CTRL_REJECT);
4076 if (!port->explicit_contract) {
4077 tcpm_set_state(port,
4078 SRC_WAIT_NEW_CAPABILITIES, 0);
4079 } else {
4080 tcpm_set_state(port, SRC_READY, 0);
4081 }
4082 } else {
4083 tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
4084 tcpm_set_partner_usb_comm_capable(port,
4085 !!(port->sink_request & RDO_USB_COMM));
4086 tcpm_set_state(port, SRC_TRANSITION_SUPPLY,
4087 PD_T_SRC_TRANSITION);
4088 }
4089 break;
4090 case SRC_TRANSITION_SUPPLY:
4091 /* XXX: regulator_set_voltage(vbus, ...) */
4092 tcpm_pd_send_control(port, PD_CTRL_PS_RDY);
4093 port->explicit_contract = true;
4094 typec_set_pwr_opmode(port->typec_port, TYPEC_PWR_MODE_PD);
4095 port->pwr_opmode = TYPEC_PWR_MODE_PD;
4096 tcpm_set_state_cond(port, SRC_READY, 0);
4097 break;
4098 case SRC_READY:
4099 #if 1
4100 port->hard_reset_count = 0;
4101 #endif
4102 port->try_src_count = 0;
4103
4104 tcpm_swap_complete(port, 0);
4105 tcpm_typec_connect(port);
4106
4107 if (port->ams != NONE_AMS)
4108 tcpm_ams_finish(port);
4109 if (port->next_ams != NONE_AMS) {
4110 port->ams = port->next_ams;
4111 port->next_ams = NONE_AMS;
4112 }
4113
4114 /*
4115 * If previous AMS is interrupted, switch to the upcoming
4116 * state.
4117 */
4118 if (port->upcoming_state != INVALID_STATE) {
4119 upcoming_state = port->upcoming_state;
4120 port->upcoming_state = INVALID_STATE;
4121 tcpm_set_state(port, upcoming_state, 0);
4122 break;
4123 }
4124
4125 /*
4126 * 6.4.4.3.1 Discover Identity
4127 * "The Discover Identity Command Shall only be sent to SOP when there is an
4128 * Explicit Contract."
4129 * For now, this driver only supports SOP for DISCOVER_IDENTITY, thus using
4130 * port->explicit_contract to decide whether to send the command.
4131 */
4132 if (port->explicit_contract)
4133 mod_send_discover_delayed_work(port, 0);
4134 else
4135 port->send_discover = false;
4136
4137 /*
4138 * 6.3.5
4139 * Sending ping messages is not necessary if
4140 * - the source operates at vSafe5V
4141 * or
4142 * - The system is not operating in PD mode
4143 * or
4144 * - Both partners are connected using a Type-C connector
4145 *
4146 * There is no actual need to send PD messages since the local
4147 * port type-c and the spec does not clearly say whether PD is
4148 * possible when type-c is connected to Type-A/B
4149 */
4150 break;
4151 case SRC_WAIT_NEW_CAPABILITIES:
4152 /* Nothing to do... */
4153 break;
4154
4155 /* SNK states */
4156 case SNK_UNATTACHED:
4157 if (!port->non_pd_role_swap)
4158 tcpm_swap_complete(port, -ENOTCONN);
4159 tcpm_pps_complete(port, -ENOTCONN);
4160 tcpm_snk_detach(port);
4161 if (port->debouncing) {
4162 port->debouncing = false;
4163 if (port->tcpc->check_contaminant &&
4164 port->tcpc->check_contaminant(port->tcpc)) {
4165 /* Contaminant detection would handle toggling */
4166 tcpm_set_state(port, TOGGLING, 0);
4167 break;
4168 }
4169 }
4170 if (tcpm_start_toggling(port, TYPEC_CC_RD)) {
4171 tcpm_set_state(port, TOGGLING, 0);
4172 break;
4173 }
4174 tcpm_set_cc(port, TYPEC_CC_RD);
4175 if (port->port_type == TYPEC_PORT_DRP)
4176 tcpm_set_state(port, SRC_UNATTACHED, PD_T_DRP_SRC);
4177 break;
4178 case SNK_ATTACH_WAIT:
4179 port->debouncing = true;
4180 timer_val_msecs = PD_T_CC_DEBOUNCE;
4181 trace_android_vh_typec_tcpm_get_timer(tcpm_states[SNK_ATTACH_WAIT],
4182 CC_DEBOUNCE, &timer_val_msecs);
4183 if ((port->cc1 == TYPEC_CC_OPEN &&
4184 port->cc2 != TYPEC_CC_OPEN) ||
4185 (port->cc1 != TYPEC_CC_OPEN &&
4186 port->cc2 == TYPEC_CC_OPEN))
4187 tcpm_set_state(port, SNK_DEBOUNCED,
4188 timer_val_msecs);
4189 else if (tcpm_port_is_disconnected(port))
4190 tcpm_set_state(port, SNK_UNATTACHED,
4191 timer_val_msecs);
4192 break;
4193 case SNK_DEBOUNCED:
4194 if (tcpm_port_is_disconnected(port)) {
4195 tcpm_set_state(port, SNK_UNATTACHED,
4196 PD_T_PD_DEBOUNCE);
4197 } else if (port->vbus_present) {
4198 tcpm_set_state(port,
4199 tcpm_try_src(port) ? SRC_TRY
4200 : SNK_ATTACHED,
4201 0);
4202 port->debouncing = false;
4203 } else {
4204 port->debouncing = false;
4205 }
4206 break;
4207 case SRC_TRY:
4208 port->try_src_count++;
4209 tcpm_set_cc(port, tcpm_rp_cc(port));
4210 port->max_wait = 0;
4211 tcpm_set_state(port, SRC_TRY_WAIT, 0);
4212 break;
4213 case SRC_TRY_WAIT:
4214 if (port->max_wait == 0) {
4215 port->max_wait = jiffies +
4216 msecs_to_jiffies(PD_T_DRP_TRY);
4217 msecs = PD_T_DRP_TRY;
4218 } else {
4219 if (time_is_after_jiffies(port->max_wait))
4220 msecs = jiffies_to_msecs(port->max_wait -
4221 jiffies);
4222 else
4223 msecs = 0;
4224 }
4225 tcpm_set_state(port, SNK_TRYWAIT, msecs);
4226 break;
4227 case SRC_TRY_DEBOUNCE:
4228 tcpm_set_state(port, SRC_ATTACHED, PD_T_PD_DEBOUNCE);
4229 break;
4230 case SNK_TRYWAIT:
4231 timer_val_msecs = PD_T_CC_DEBOUNCE;
4232 trace_android_vh_typec_tcpm_get_timer(tcpm_states[SNK_TRYWAIT],
4233 CC_DEBOUNCE, &timer_val_msecs);
4234 tcpm_set_cc(port, TYPEC_CC_RD);
4235 tcpm_set_state(port, SNK_TRYWAIT_VBUS, timer_val_msecs);
4236 break;
4237 case SNK_TRYWAIT_VBUS:
4238 /*
4239 * TCPM stays in this state indefinitely until VBUS
4240 * is detected as long as Rp is not detected for
4241 * more than a time period of tPDDebounce.
4242 */
4243 if (port->vbus_present && tcpm_port_is_sink(port)) {
4244 tcpm_set_state(port, SNK_ATTACHED, 0);
4245 break;
4246 }
4247 if (!tcpm_port_is_sink(port))
4248 tcpm_set_state(port, SNK_TRYWAIT_DEBOUNCE, 0);
4249 break;
4250 case SNK_TRYWAIT_DEBOUNCE:
4251 tcpm_set_state(port, SNK_UNATTACHED, PD_T_PD_DEBOUNCE);
4252 break;
4253 case SNK_ATTACHED:
4254 ret = tcpm_snk_attach(port);
4255 if (ret < 0)
4256 tcpm_set_state(port, SNK_UNATTACHED, 0);
4257 else
4258 tcpm_set_state(port, SNK_STARTUP, 0);
4259 break;
4260 case SNK_STARTUP:
4261 opmode = tcpm_get_pwr_opmode(port->polarity ?
4262 port->cc2 : port->cc1);
4263 typec_set_pwr_opmode(port->typec_port, opmode);
4264 port->pwr_opmode = TYPEC_PWR_MODE_USB;
4265 port->negotiated_rev = (((port->typec_caps.pd_revision >> 8) & 0xff) - 1);
4266 port->message_id = 0;
4267 port->rx_msgid = -1;
4268 port->explicit_contract = false;
4269
4270 if (port->ams == POWER_ROLE_SWAP ||
4271 port->ams == FAST_ROLE_SWAP)
4272 /* SRC -> SNK POWER/FAST_ROLE_SWAP finished */
4273 tcpm_ams_finish(port);
4274
4275 timer_val_msecs = 0;
4276 trace_android_vh_typec_tcpm_get_timer(tcpm_states[SNK_STARTUP],
4277 SINK_DISCOVERY_BC12, &timer_val_msecs);
4278 tcpm_set_state(port, SNK_DISCOVERY, timer_val_msecs);
4279 break;
4280 case SNK_DISCOVERY:
4281 if (port->vbus_present) {
4282 current_limit = tcpm_get_current_limit(port);
4283 trace_android_vh_typec_tcpm_adj_current_limit(tcpm_states[SNK_DISCOVERY],
4284 port->current_limit,
4285 port->supply_voltage,
4286 port->pd_capable,
4287 ¤t_limit, &adjust);
4288 if (port->slow_charger_loop && (current_limit > PD_P_SNK_STDBY_MW / 5))
4289 current_limit = PD_P_SNK_STDBY_MW / 5;
4290 tcpm_set_current_limit(port, current_limit, 5000);
4291 tcpm_set_charge(port, true);
4292 tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
4293 break;
4294 }
4295 /*
4296 * For DRP, timeouts differ. Also, handling is supposed to be
4297 * different and much more complex (dead battery detection;
4298 * see USB power delivery specification, section 8.3.3.6.1.5.1).
4299 */
4300 tcpm_set_state(port, hard_reset_state(port),
4301 port->port_type == TYPEC_PORT_DRP ?
4302 PD_T_DB_DETECT : PD_T_NO_RESPONSE);
4303 break;
4304 case SNK_DISCOVERY_DEBOUNCE:
4305 timer_val_msecs = PD_T_CC_DEBOUNCE;
4306 trace_android_vh_typec_tcpm_get_timer(tcpm_states[SNK_DISCOVERY_DEBOUNCE],
4307 CC_DEBOUNCE, &timer_val_msecs);
4308 tcpm_set_state(port, SNK_DISCOVERY_DEBOUNCE_DONE, timer_val_msecs);
4309 break;
4310 case SNK_DISCOVERY_DEBOUNCE_DONE:
4311 if (!tcpm_port_is_disconnected(port) &&
4312 tcpm_port_is_sink(port) &&
4313 ktime_after(port->delayed_runtime, ktime_get())) {
4314 tcpm_set_state(port, SNK_DISCOVERY,
4315 ktime_to_ms(ktime_sub(port->delayed_runtime, ktime_get())));
4316 break;
4317 }
4318 tcpm_set_state(port, unattached_state(port), 0);
4319 break;
4320 case SNK_WAIT_CAPABILITIES:
4321 if (port->prev_state != SOFT_RESET_SEND) {
4322 ret = port->tcpc->set_pd_rx(port->tcpc, true);
4323 if (ret < 0) {
4324 tcpm_set_state(port, SNK_READY, 0);
4325 break;
4326 }
4327 }
4328 timer_val_msecs = PD_T_SINK_WAIT_CAP;
4329 trace_android_vh_typec_tcpm_get_timer(tcpm_states[SNK_WAIT_CAPABILITIES],
4330 SINK_WAIT_CAP, &timer_val_msecs);
4331 /*
4332 * If VBUS has never been low, and we time out waiting
4333 * for source cap, try a soft reset first, in case we
4334 * were already in a stable contract before this boot.
4335 * Do this only once.
4336 */
4337 if (port->vbus_never_low) {
4338 port->vbus_never_low = false;
4339 tcpm_set_state(port, SNK_SOFT_RESET,
4340 timer_val_msecs);
4341 } else {
4342 tcpm_set_state(port, hard_reset_state(port),
4343 timer_val_msecs);
4344 }
4345 break;
4346 case SNK_NEGOTIATE_CAPABILITIES:
4347 port->pd_capable = true;
4348 tcpm_set_partner_usb_comm_capable(port,
4349 !!(port->source_caps[0] & PDO_FIXED_USB_COMM));
4350 port->hard_reset_count = 0;
4351 ret = tcpm_pd_send_request(port);
4352 if (ret < 0) {
4353 /* Restore back to the original state */
4354 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
4355 port->pps_data.active,
4356 port->supply_voltage);
4357 /* Let the Source send capabilities again. */
4358 tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
4359 } else {
4360 tcpm_set_state_cond(port, hard_reset_state(port),
4361 PD_T_SENDER_RESPONSE);
4362 }
4363 break;
4364 case SNK_NEGOTIATE_PPS_CAPABILITIES:
4365 ret = tcpm_pd_send_pps_request(port);
4366 if (ret < 0) {
4367 /* Restore back to the original state */
4368 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
4369 port->pps_data.active,
4370 port->supply_voltage);
4371 port->pps_status = ret;
4372 /*
4373 * If this was called due to updates to sink
4374 * capabilities, and pps is no longer valid, we should
4375 * safely fall back to a standard PDO.
4376 */
4377 if (port->update_sink_caps)
4378 tcpm_set_state(port, SNK_NEGOTIATE_CAPABILITIES, 0);
4379 else
4380 tcpm_set_state(port, SNK_READY, 0);
4381 } else {
4382 tcpm_set_state_cond(port, hard_reset_state(port),
4383 PD_T_SENDER_RESPONSE);
4384 }
4385 break;
4386 case SNK_TRANSITION_SINK:
4387 /* From the USB PD spec:
4388 * "The Sink Shall transition to Sink Standby before a positive or
4389 * negative voltage transition of VBUS. During Sink Standby
4390 * the Sink Shall reduce its power draw to pSnkStdby."
4391 *
4392 * This is not applicable to PPS though as the port can continue
4393 * to draw negotiated power without switching to standby.
4394 */
4395 if (port->supply_voltage != port->req_supply_voltage && !port->pps_data.active &&
4396 port->current_limit * port->supply_voltage / 1000 > PD_P_SNK_STDBY_MW) {
4397 u32 stdby_ma = PD_P_SNK_STDBY_MW * 1000 / port->supply_voltage;
4398
4399 tcpm_log(port, "Setting standby current %u mV @ %u mA",
4400 port->supply_voltage, stdby_ma);
4401 tcpm_set_current_limit(port, stdby_ma, port->supply_voltage);
4402 }
4403 fallthrough;
4404 case SNK_TRANSITION_SINK_VBUS:
4405 tcpm_set_state(port, hard_reset_state(port),
4406 PD_T_PS_TRANSITION);
4407 break;
4408 case SNK_READY:
4409 port->try_snk_count = 0;
4410 port->update_sink_caps = false;
4411 if (port->explicit_contract) {
4412 typec_set_pwr_opmode(port->typec_port,
4413 TYPEC_PWR_MODE_PD);
4414 port->pwr_opmode = TYPEC_PWR_MODE_PD;
4415 }
4416
4417 current_limit = tcpm_get_current_limit(port);
4418 adjust = false;
4419 trace_android_vh_typec_tcpm_adj_current_limit(tcpm_states[SNK_READY],
4420 port->current_limit,
4421 port->supply_voltage,
4422 port->pd_capable,
4423 ¤t_limit,
4424 &adjust);
4425 if (adjust)
4426 tcpm_set_current_limit(port, current_limit, 5000);
4427
4428 if (!port->pd_capable && port->slow_charger_loop)
4429 tcpm_set_current_limit(port, tcpm_get_current_limit(port), 5000);
4430 tcpm_swap_complete(port, 0);
4431 tcpm_typec_connect(port);
4432 mod_enable_frs_delayed_work(port, 0);
4433 tcpm_pps_complete(port, port->pps_status);
4434
4435 if (port->ams != NONE_AMS)
4436 tcpm_ams_finish(port);
4437 if (port->next_ams != NONE_AMS) {
4438 port->ams = port->next_ams;
4439 port->next_ams = NONE_AMS;
4440 }
4441
4442 /*
4443 * If previous AMS is interrupted, switch to the upcoming
4444 * state.
4445 */
4446 if (port->upcoming_state != INVALID_STATE) {
4447 upcoming_state = port->upcoming_state;
4448 port->upcoming_state = INVALID_STATE;
4449 tcpm_set_state(port, upcoming_state, 0);
4450 break;
4451 }
4452
4453 /*
4454 * 6.4.4.3.1 Discover Identity
4455 * "The Discover Identity Command Shall only be sent to SOP when there is an
4456 * Explicit Contract."
4457 * For now, this driver only supports SOP for DISCOVER_IDENTITY, thus using
4458 * port->explicit_contract.
4459 */
4460 if (port->explicit_contract)
4461 mod_send_discover_delayed_work(port, 0);
4462 else
4463 port->send_discover = false;
4464
4465 power_supply_changed(port->psy);
4466 break;
4467
4468 /* Accessory states */
4469 case ACC_UNATTACHED:
4470 tcpm_acc_detach(port);
4471 tcpm_set_state(port, SRC_UNATTACHED, 0);
4472 break;
4473 case DEBUG_ACC_ATTACHED:
4474 case AUDIO_ACC_ATTACHED:
4475 ret = tcpm_acc_attach(port);
4476 if (ret < 0)
4477 tcpm_set_state(port, ACC_UNATTACHED, 0);
4478 break;
4479 case AUDIO_ACC_DEBOUNCE:
4480 timer_val_msecs = PD_T_CC_DEBOUNCE;
4481 trace_android_vh_typec_tcpm_get_timer(tcpm_states[AUDIO_ACC_DEBOUNCE],
4482 CC_DEBOUNCE, &timer_val_msecs);
4483 tcpm_set_state(port, ACC_UNATTACHED, timer_val_msecs);
4484 break;
4485
4486 /* Hard_Reset states */
4487 case HARD_RESET_SEND:
4488 if (port->ams != NONE_AMS)
4489 tcpm_ams_finish(port);
4490 /*
4491 * State machine will be directed to HARD_RESET_START,
4492 * thus set upcoming_state to INVALID_STATE.
4493 */
4494 port->upcoming_state = INVALID_STATE;
4495 tcpm_ams_start(port, HARD_RESET);
4496 break;
4497 case HARD_RESET_START:
4498 port->sink_cap_done = false;
4499 if (port->tcpc->enable_frs)
4500 port->tcpc->enable_frs(port->tcpc, false);
4501 port->hard_reset_count++;
4502 port->tcpc->set_pd_rx(port->tcpc, false);
4503 tcpm_unregister_altmodes(port);
4504 port->nr_sink_caps = 0;
4505 port->send_discover = true;
4506 if (port->pwr_role == TYPEC_SOURCE)
4507 tcpm_set_state(port, SRC_HARD_RESET_VBUS_OFF,
4508 PD_T_PS_HARD_RESET);
4509 else
4510 tcpm_set_state(port, SNK_HARD_RESET_SINK_OFF, 0);
4511 break;
4512 case SRC_HARD_RESET_VBUS_OFF:
4513 /*
4514 * 7.1.5 Response to Hard Resets
4515 * Hard Reset Signaling indicates a communication failure has occurred and the
4516 * Source Shall stop driving VCONN, Shall remove Rp from the VCONN pin and Shall
4517 * drive VBUS to vSafe0V as shown in Figure 7-9.
4518 */
4519 tcpm_set_vconn(port, false);
4520 tcpm_set_vbus(port, false);
4521 tcpm_set_roles(port, port->self_powered, TYPEC_SOURCE,
4522 tcpm_data_role_for_source(port));
4523 /*
4524 * If tcpc fails to notify vbus off, TCPM will wait for PD_T_SAFE_0V +
4525 * PD_T_SRC_RECOVER before turning vbus back on.
4526 * From Table 7-12 Sequence Description for a Source Initiated Hard Reset:
4527 * 4. Policy Engine waits tPSHardReset after sending Hard Reset Signaling and then
4528 * tells the Device Policy Manager to instruct the power supply to perform a
4529 * Hard Reset. The transition to vSafe0V Shall occur within tSafe0V (t2).
4530 * 5. After tSrcRecover the Source applies power to VBUS in an attempt to
4531 * re-establish communication with the Sink and resume USB Default Operation.
4532 * The transition to vSafe5V Shall occur within tSrcTurnOn(t4).
4533 */
4534 tcpm_set_state(port, SRC_HARD_RESET_VBUS_ON, PD_T_SAFE_0V + PD_T_SRC_RECOVER);
4535 break;
4536 case SRC_HARD_RESET_VBUS_ON:
4537 tcpm_set_vconn(port, true);
4538 tcpm_set_vbus(port, true);
4539 if (port->ams == HARD_RESET)
4540 tcpm_ams_finish(port);
4541 port->tcpc->set_pd_rx(port->tcpc, true);
4542 tcpm_set_attached_state(port, true);
4543 tcpm_set_state(port, SRC_UNATTACHED, PD_T_PS_SOURCE_ON);
4544 break;
4545 case SNK_HARD_RESET_SINK_OFF:
4546 /* Do not discharge/disconnect during hard reseet */
4547 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, 0);
4548 memset(&port->pps_data, 0, sizeof(port->pps_data));
4549 tcpm_set_vconn(port, false);
4550 if (port->pd_capable)
4551 tcpm_set_charge(port, false);
4552 tcpm_set_roles(port, port->self_powered, TYPEC_SINK,
4553 tcpm_data_role_for_sink(port));
4554 /*
4555 * VBUS may or may not toggle, depending on the adapter.
4556 * If it doesn't toggle, transition to SNK_HARD_RESET_SINK_ON
4557 * directly after timeout.
4558 */
4559 tcpm_set_state(port, SNK_HARD_RESET_SINK_ON, PD_T_SAFE_0V);
4560 break;
4561 case SNK_HARD_RESET_WAIT_VBUS:
4562 if (port->ams == HARD_RESET)
4563 tcpm_ams_finish(port);
4564 /* Assume we're disconnected if VBUS doesn't come back. */
4565 tcpm_set_state(port, SNK_UNATTACHED,
4566 PD_T_SRC_RECOVER_MAX + PD_T_SRC_TURN_ON);
4567 break;
4568 case SNK_HARD_RESET_SINK_ON:
4569 /* Note: There is no guarantee that VBUS is on in this state */
4570 /*
4571 * XXX:
4572 * The specification suggests that dual mode ports in sink
4573 * mode should transition to state PE_SRC_Transition_to_default.
4574 * See USB power delivery specification chapter 8.3.3.6.1.3.
4575 * This would mean to to
4576 * - turn off VCONN, reset power supply
4577 * - request hardware reset
4578 * - turn on VCONN
4579 * - Transition to state PE_Src_Startup
4580 * SNK only ports shall transition to state Snk_Startup
4581 * (see chapter 8.3.3.3.8).
4582 * Similar, dual-mode ports in source mode should transition
4583 * to PE_SNK_Transition_to_default.
4584 */
4585 if (port->pd_capable) {
4586 tcpm_set_current_limit(port,
4587 tcpm_get_current_limit(port),
4588 5000);
4589 tcpm_set_charge(port, true);
4590 }
4591 if (port->ams == HARD_RESET)
4592 tcpm_ams_finish(port);
4593 tcpm_set_attached_state(port, true);
4594 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, VSAFE5V);
4595 tcpm_set_state(port, SNK_STARTUP, 0);
4596 break;
4597
4598 /* Soft_Reset states */
4599 case SOFT_RESET:
4600 port->message_id = 0;
4601 port->rx_msgid = -1;
4602 tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
4603 tcpm_ams_finish(port);
4604 if (port->pwr_role == TYPEC_SOURCE) {
4605 port->upcoming_state = SRC_SEND_CAPABILITIES;
4606 tcpm_ams_start(port, POWER_NEGOTIATION);
4607 } else {
4608 tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
4609 }
4610 break;
4611 case SRC_SOFT_RESET_WAIT_SNK_TX:
4612 case SNK_SOFT_RESET:
4613 if (port->ams != NONE_AMS)
4614 tcpm_ams_finish(port);
4615 port->upcoming_state = SOFT_RESET_SEND;
4616 tcpm_ams_start(port, SOFT_RESET_AMS);
4617 break;
4618 case SOFT_RESET_SEND:
4619 port->message_id = 0;
4620 port->rx_msgid = -1;
4621 port->tcpc->set_pd_rx(port->tcpc, true);
4622 if (tcpm_pd_send_control(port, PD_CTRL_SOFT_RESET))
4623 tcpm_set_state_cond(port, hard_reset_state(port), 0);
4624 else
4625 tcpm_set_state_cond(port, hard_reset_state(port),
4626 PD_T_SENDER_RESPONSE);
4627 break;
4628
4629 /* DR_Swap states */
4630 case DR_SWAP_SEND:
4631 tcpm_pd_send_control(port, PD_CTRL_DR_SWAP);
4632 if (port->data_role == TYPEC_DEVICE || port->negotiated_rev > PD_REV20)
4633 port->send_discover = true;
4634 tcpm_set_state_cond(port, DR_SWAP_SEND_TIMEOUT,
4635 PD_T_SENDER_RESPONSE);
4636 break;
4637 case DR_SWAP_ACCEPT:
4638 tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
4639 if (port->data_role == TYPEC_DEVICE || port->negotiated_rev > PD_REV20)
4640 port->send_discover = true;
4641 tcpm_set_state_cond(port, DR_SWAP_CHANGE_DR, 0);
4642 break;
4643 case DR_SWAP_SEND_TIMEOUT:
4644 tcpm_swap_complete(port, -ETIMEDOUT);
4645 port->send_discover = false;
4646 tcpm_ams_finish(port);
4647 tcpm_set_state(port, ready_state(port), 0);
4648 break;
4649 case DR_SWAP_CHANGE_DR:
4650 if (port->data_role == TYPEC_HOST) {
4651 tcpm_unregister_altmodes(port);
4652 tcpm_set_roles(port, true, port->pwr_role,
4653 TYPEC_DEVICE);
4654 } else {
4655 tcpm_set_roles(port, true, port->pwr_role,
4656 TYPEC_HOST);
4657 }
4658 tcpm_ams_finish(port);
4659 tcpm_set_state(port, ready_state(port), 0);
4660 break;
4661
4662 case FR_SWAP_SEND:
4663 if (tcpm_pd_send_control(port, PD_CTRL_FR_SWAP)) {
4664 tcpm_set_state(port, ERROR_RECOVERY, 0);
4665 break;
4666 }
4667 tcpm_set_state_cond(port, FR_SWAP_SEND_TIMEOUT, PD_T_SENDER_RESPONSE);
4668 break;
4669 case FR_SWAP_SEND_TIMEOUT:
4670 tcpm_set_state(port, ERROR_RECOVERY, 0);
4671 break;
4672 case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF:
4673 timer_val_msecs = PD_T_PS_SOURCE_OFF;
4674 state_name = tcpm_states[FR_SWAP_SNK_SRC_TRANSITION_TO_OFF];
4675 trace_android_vh_typec_tcpm_get_timer(state_name, SOURCE_OFF, &timer_val_msecs);
4676 tcpm_set_state(port, ERROR_RECOVERY, timer_val_msecs);
4677 break;
4678 case FR_SWAP_SNK_SRC_NEW_SINK_READY:
4679 if (port->vbus_source)
4680 tcpm_set_state(port, FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED, 0);
4681 else
4682 tcpm_set_state(port, ERROR_RECOVERY, PD_T_RECEIVER_RESPONSE);
4683 break;
4684 case FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED:
4685 tcpm_set_pwr_role(port, TYPEC_SOURCE);
4686 if (tcpm_pd_send_control(port, PD_CTRL_PS_RDY)) {
4687 tcpm_set_state(port, ERROR_RECOVERY, 0);
4688 break;
4689 }
4690 tcpm_set_cc(port, tcpm_rp_cc(port));
4691 tcpm_set_state(port, SRC_STARTUP, PD_T_SWAP_SRC_START);
4692 break;
4693
4694 /* PR_Swap states */
4695 case PR_SWAP_ACCEPT:
4696 tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
4697 tcpm_set_state(port, PR_SWAP_START, 0);
4698 break;
4699 case PR_SWAP_SEND:
4700 tcpm_pd_send_control(port, PD_CTRL_PR_SWAP);
4701 tcpm_set_state_cond(port, PR_SWAP_SEND_TIMEOUT,
4702 PD_T_SENDER_RESPONSE);
4703 break;
4704 case PR_SWAP_SEND_TIMEOUT:
4705 tcpm_swap_complete(port, -ETIMEDOUT);
4706 tcpm_set_state(port, ready_state(port), 0);
4707 break;
4708 case PR_SWAP_START:
4709 tcpm_apply_rc(port);
4710 if (port->pwr_role == TYPEC_SOURCE)
4711 tcpm_set_state(port, PR_SWAP_SRC_SNK_TRANSITION_OFF,
4712 PD_T_SRC_TRANSITION);
4713 else
4714 tcpm_set_state(port, PR_SWAP_SNK_SRC_SINK_OFF, 0);
4715 break;
4716 case PR_SWAP_SRC_SNK_TRANSITION_OFF:
4717 /*
4718 * Prevent vbus discharge circuit from turning on during PR_SWAP
4719 * as this is not a disconnect.
4720 */
4721 tcpm_set_vbus(port, false);
4722 port->explicit_contract = false;
4723 /* allow time for Vbus discharge, must be < tSrcSwapStdby */
4724 tcpm_set_state(port, PR_SWAP_SRC_SNK_SOURCE_OFF,
4725 PD_T_SRCSWAPSTDBY);
4726 break;
4727 case PR_SWAP_SRC_SNK_SOURCE_OFF:
4728 timer_val_msecs = PD_T_CC_DEBOUNCE;
4729 trace_android_vh_typec_tcpm_get_timer(tcpm_states[PR_SWAP_SRC_SNK_SOURCE_OFF],
4730 CC_DEBOUNCE, &timer_val_msecs);
4731 tcpm_set_cc(port, TYPEC_CC_RD);
4732 /* allow CC debounce */
4733 tcpm_set_state(port, PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED,
4734 timer_val_msecs);
4735 break;
4736 case PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED:
4737 /*
4738 * USB-PD standard, 6.2.1.4, Port Power Role:
4739 * "During the Power Role Swap Sequence, for the initial Source
4740 * Port, the Port Power Role field shall be set to Sink in the
4741 * PS_RDY Message indicating that the initial Source’s power
4742 * supply is turned off"
4743 */
4744 tcpm_set_pwr_role(port, TYPEC_SINK);
4745 if (tcpm_pd_send_control(port, PD_CTRL_PS_RDY)) {
4746 tcpm_set_state(port, ERROR_RECOVERY, 0);
4747 break;
4748 }
4749 tcpm_set_state(port, ERROR_RECOVERY, PD_T_PS_SOURCE_ON_PRS);
4750 break;
4751 case PR_SWAP_SRC_SNK_SINK_ON:
4752 tcpm_enable_auto_vbus_discharge(port, true);
4753 /* Set the vbus disconnect threshold for implicit contract */
4754 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, VSAFE5V);
4755 tcpm_set_state(port, SNK_STARTUP, 0);
4756 break;
4757 case PR_SWAP_SNK_SRC_SINK_OFF:
4758 timer_val_msecs = PD_T_PS_SOURCE_OFF;
4759 trace_android_vh_typec_tcpm_get_timer(tcpm_states[PR_SWAP_SNK_SRC_SINK_OFF],
4760 SOURCE_OFF, &timer_val_msecs);
4761 /*
4762 * Prevent vbus discharge circuit from turning on during PR_SWAP
4763 * as this is not a disconnect.
4764 */
4765 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB,
4766 port->pps_data.active, 0);
4767 tcpm_set_charge(port, false);
4768 tcpm_set_state(port, hard_reset_state(port), timer_val_msecs);
4769 break;
4770 case PR_SWAP_SNK_SRC_SOURCE_ON:
4771 tcpm_enable_auto_vbus_discharge(port, true);
4772 tcpm_set_cc(port, tcpm_rp_cc(port));
4773 tcpm_set_vbus(port, true);
4774 /*
4775 * allow time VBUS ramp-up, must be < tNewSrc
4776 * Also, this window overlaps with CC debounce as well.
4777 * So, Wait for the max of two which is PD_T_NEWSRC
4778 */
4779 tcpm_set_state(port, PR_SWAP_SNK_SRC_SOURCE_ON_VBUS_RAMPED_UP,
4780 PD_T_NEWSRC);
4781 break;
4782 case PR_SWAP_SNK_SRC_SOURCE_ON_VBUS_RAMPED_UP:
4783 /*
4784 * USB PD standard, 6.2.1.4:
4785 * "Subsequent Messages initiated by the Policy Engine,
4786 * such as the PS_RDY Message sent to indicate that Vbus
4787 * is ready, will have the Port Power Role field set to
4788 * Source."
4789 */
4790 tcpm_set_pwr_role(port, TYPEC_SOURCE);
4791 tcpm_pd_send_control(port, PD_CTRL_PS_RDY);
4792 tcpm_set_state(port, SRC_STARTUP, PD_T_SWAP_SRC_START);
4793 break;
4794
4795 case VCONN_SWAP_ACCEPT:
4796 tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
4797 tcpm_ams_finish(port);
4798 tcpm_set_state(port, VCONN_SWAP_START, 0);
4799 break;
4800 case VCONN_SWAP_SEND:
4801 tcpm_pd_send_control(port, PD_CTRL_VCONN_SWAP);
4802 tcpm_set_state(port, VCONN_SWAP_SEND_TIMEOUT,
4803 PD_T_SENDER_RESPONSE);
4804 break;
4805 case VCONN_SWAP_SEND_TIMEOUT:
4806 tcpm_swap_complete(port, -ETIMEDOUT);
4807 tcpm_set_state(port, ready_state(port), 0);
4808 break;
4809 case VCONN_SWAP_START:
4810 if (port->vconn_role == TYPEC_SOURCE)
4811 tcpm_set_state(port, VCONN_SWAP_WAIT_FOR_VCONN, 0);
4812 else
4813 tcpm_set_state(port, VCONN_SWAP_TURN_ON_VCONN, 0);
4814 break;
4815 case VCONN_SWAP_WAIT_FOR_VCONN:
4816 tcpm_set_state(port, hard_reset_state(port),
4817 PD_T_VCONN_SOURCE_ON);
4818 break;
4819 case VCONN_SWAP_TURN_ON_VCONN:
4820 tcpm_set_vconn(port, true);
4821 tcpm_pd_send_control(port, PD_CTRL_PS_RDY);
4822 tcpm_set_state(port, ready_state(port), 0);
4823 break;
4824 case VCONN_SWAP_TURN_OFF_VCONN:
4825 tcpm_set_vconn(port, false);
4826 tcpm_set_state(port, ready_state(port), 0);
4827 break;
4828
4829 case DR_SWAP_CANCEL:
4830 case PR_SWAP_CANCEL:
4831 case VCONN_SWAP_CANCEL:
4832 tcpm_swap_complete(port, port->swap_status);
4833 if (port->pwr_role == TYPEC_SOURCE)
4834 tcpm_set_state(port, SRC_READY, 0);
4835 else
4836 tcpm_set_state(port, SNK_READY, 0);
4837 break;
4838 case FR_SWAP_CANCEL:
4839 if (port->pwr_role == TYPEC_SOURCE)
4840 tcpm_set_state(port, SRC_READY, 0);
4841 else
4842 tcpm_set_state(port, SNK_READY, 0);
4843 break;
4844
4845 case BIST_RX:
4846 switch (BDO_MODE_MASK(port->bist_request)) {
4847 case BDO_MODE_CARRIER2:
4848 tcpm_pd_transmit(port, TCPC_TX_BIST_MODE_2, NULL);
4849 tcpm_set_state(port, unattached_state(port),
4850 PD_T_BIST_CONT_MODE);
4851 break;
4852 case BDO_MODE_TESTDATA:
4853 if (port->tcpc->set_bist_data) {
4854 tcpm_log(port, "Enable BIST MODE TESTDATA");
4855 port->tcpc->set_bist_data(port->tcpc, true);
4856 }
4857 break;
4858 default:
4859 break;
4860 }
4861 break;
4862 case GET_STATUS_SEND:
4863 tcpm_pd_send_control(port, PD_CTRL_GET_STATUS);
4864 tcpm_set_state(port, GET_STATUS_SEND_TIMEOUT,
4865 PD_T_SENDER_RESPONSE);
4866 break;
4867 case GET_STATUS_SEND_TIMEOUT:
4868 tcpm_set_state(port, ready_state(port), 0);
4869 break;
4870 case GET_PPS_STATUS_SEND:
4871 tcpm_pd_send_control(port, PD_CTRL_GET_PPS_STATUS);
4872 tcpm_set_state(port, GET_PPS_STATUS_SEND_TIMEOUT,
4873 PD_T_SENDER_RESPONSE);
4874 break;
4875 case GET_PPS_STATUS_SEND_TIMEOUT:
4876 tcpm_set_state(port, ready_state(port), 0);
4877 break;
4878 case GET_SINK_CAP:
4879 tcpm_pd_send_control(port, PD_CTRL_GET_SINK_CAP);
4880 tcpm_set_state(port, GET_SINK_CAP_TIMEOUT, PD_T_SENDER_RESPONSE);
4881 break;
4882 case GET_SINK_CAP_TIMEOUT:
4883 port->sink_cap_done = true;
4884 tcpm_set_state(port, ready_state(port), 0);
4885 break;
4886 case ERROR_RECOVERY:
4887 tcpm_swap_complete(port, -EPROTO);
4888 tcpm_pps_complete(port, -EPROTO);
4889 tcpm_set_state(port, PORT_RESET, 0);
4890 break;
4891 case PORT_RESET:
4892 tcpm_reset_port(port);
4893 tcpm_set_cc(port, TYPEC_CC_RD);
4894 tcpm_set_state(port, PORT_RESET_WAIT_OFF,
4895 PD_T_ERROR_RECOVERY);
4896 break;
4897 case PORT_RESET_WAIT_OFF:
4898 timer_val_msecs = PD_T_PS_SOURCE_OFF;
4899 trace_android_vh_typec_tcpm_get_timer(tcpm_states[PORT_RESET_WAIT_OFF],
4900 SOURCE_OFF, &timer_val_msecs);
4901 tcpm_set_state(port,
4902 tcpm_default_state(port),
4903 port->vbus_present ? timer_val_msecs : 0);
4904 break;
4905
4906 /* AMS intermediate state */
4907 case AMS_START:
4908 if (port->upcoming_state == INVALID_STATE) {
4909 tcpm_set_state(port, port->pwr_role == TYPEC_SOURCE ?
4910 SRC_READY : SNK_READY, 0);
4911 break;
4912 }
4913
4914 upcoming_state = port->upcoming_state;
4915 port->upcoming_state = INVALID_STATE;
4916 tcpm_set_state(port, upcoming_state, 0);
4917 break;
4918
4919 /* Chunk state */
4920 case CHUNK_NOT_SUPP:
4921 tcpm_pd_send_control(port, PD_CTRL_NOT_SUPP);
4922 tcpm_set_state(port, port->pwr_role == TYPEC_SOURCE ? SRC_READY : SNK_READY, 0);
4923 break;
4924 default:
4925 WARN(1, "Unexpected port state %d\n", port->state);
4926 break;
4927 }
4928 }
4929
tcpm_state_machine_work(struct kthread_work * work)4930 static void tcpm_state_machine_work(struct kthread_work *work)
4931 {
4932 struct tcpm_port *port = container_of(work, struct tcpm_port, state_machine);
4933 enum tcpm_state prev_state;
4934
4935 mutex_lock(&port->lock);
4936 port->state_machine_running = true;
4937
4938 if (port->queued_message && tcpm_send_queued_message(port))
4939 goto done;
4940
4941 /* If we were queued due to a delayed state change, update it now */
4942 if (port->delayed_state) {
4943 tcpm_log(port, "state change %s -> %s [delayed %ld ms]",
4944 tcpm_states[port->state],
4945 tcpm_states[port->delayed_state], port->delay_ms);
4946 port->prev_state = port->state;
4947 port->state = port->delayed_state;
4948 port->delayed_state = INVALID_STATE;
4949 }
4950
4951 /*
4952 * Continue running as long as we have (non-delayed) state changes
4953 * to make.
4954 */
4955 do {
4956 prev_state = port->state;
4957 run_state_machine(port);
4958 if (port->queued_message)
4959 tcpm_send_queued_message(port);
4960 } while (port->state != prev_state && !port->delayed_state);
4961
4962 done:
4963 port->state_machine_running = false;
4964 mutex_unlock(&port->lock);
4965 }
4966
_tcpm_cc_change(struct tcpm_port * port,enum typec_cc_status cc1,enum typec_cc_status cc2)4967 static void _tcpm_cc_change(struct tcpm_port *port, enum typec_cc_status cc1,
4968 enum typec_cc_status cc2)
4969 {
4970 enum typec_cc_status old_cc1, old_cc2;
4971 enum tcpm_state new_state;
4972
4973 old_cc1 = port->cc1;
4974 old_cc2 = port->cc2;
4975 port->cc1 = cc1;
4976 port->cc2 = cc2;
4977
4978 tcpm_log_force(port,
4979 "CC1: %u -> %u, CC2: %u -> %u [state %s, polarity %d, %s]",
4980 old_cc1, cc1, old_cc2, cc2, tcpm_states[port->state],
4981 port->polarity,
4982 tcpm_port_is_disconnected(port) ? "disconnected"
4983 : "connected");
4984
4985 switch (port->state) {
4986 case TOGGLING:
4987 if (tcpm_port_is_debug(port) || tcpm_port_is_audio(port) ||
4988 tcpm_port_is_source(port))
4989 tcpm_set_state(port, SRC_ATTACH_WAIT, 0);
4990 else if (tcpm_port_is_sink(port))
4991 tcpm_set_state(port, SNK_ATTACH_WAIT, 0);
4992 break;
4993 case SRC_UNATTACHED:
4994 case ACC_UNATTACHED:
4995 if (tcpm_port_is_debug(port) || tcpm_port_is_audio(port) ||
4996 tcpm_port_is_source(port))
4997 tcpm_set_state(port, SRC_ATTACH_WAIT, 0);
4998 break;
4999 case SRC_ATTACH_WAIT:
5000 if (tcpm_port_is_disconnected(port) ||
5001 tcpm_port_is_audio_detached(port))
5002 tcpm_set_state(port, SRC_UNATTACHED, 0);
5003 else if (cc1 != old_cc1 || cc2 != old_cc2)
5004 tcpm_set_state(port, SRC_ATTACH_WAIT, 0);
5005 break;
5006 case SRC_ATTACHED:
5007 case SRC_STARTUP:
5008 case SRC_SEND_CAPABILITIES:
5009 case SRC_READY:
5010 if (tcpm_port_is_disconnected(port) ||
5011 !tcpm_port_is_source(port)) {
5012 if (port->port_type == TYPEC_PORT_SRC)
5013 tcpm_set_state(port, SRC_UNATTACHED, tcpm_wait_for_discharge(port));
5014 else
5015 tcpm_set_state(port, SNK_UNATTACHED, tcpm_wait_for_discharge(port));
5016 }
5017 break;
5018 case SNK_UNATTACHED:
5019 if (tcpm_port_is_sink(port))
5020 tcpm_set_state(port, SNK_ATTACH_WAIT, 0);
5021 break;
5022 case SNK_ATTACH_WAIT:
5023 if ((port->cc1 == TYPEC_CC_OPEN &&
5024 port->cc2 != TYPEC_CC_OPEN) ||
5025 (port->cc1 != TYPEC_CC_OPEN &&
5026 port->cc2 == TYPEC_CC_OPEN))
5027 new_state = SNK_DEBOUNCED;
5028 else if (tcpm_port_is_disconnected(port))
5029 new_state = SNK_UNATTACHED;
5030 else
5031 break;
5032 if (new_state != port->delayed_state)
5033 tcpm_set_state(port, SNK_ATTACH_WAIT, 0);
5034 break;
5035 case SNK_DEBOUNCED:
5036 if (tcpm_port_is_disconnected(port))
5037 new_state = SNK_UNATTACHED;
5038 else if (port->vbus_present)
5039 new_state = tcpm_try_src(port) ? SRC_TRY : SNK_ATTACHED;
5040 else
5041 new_state = SNK_UNATTACHED;
5042 if (new_state != port->delayed_state)
5043 tcpm_set_state(port, SNK_DEBOUNCED, 0);
5044 break;
5045 case SNK_READY:
5046 /*
5047 * EXIT condition is based primarily on vbus disconnect and CC is secondary.
5048 * "A port that has entered into USB PD communications with the Source and
5049 * has seen the CC voltage exceed vRd-USB may monitor the CC pin to detect
5050 * cable disconnect in addition to monitoring VBUS.
5051 *
5052 * A port that is monitoring the CC voltage for disconnect (but is not in
5053 * the process of a USB PD PR_Swap or USB PD FR_Swap) shall transition to
5054 * Unattached.SNK within tSinkDisconnect after the CC voltage remains below
5055 * vRd-USB for tPDDebounce."
5056 *
5057 * When set_auto_vbus_discharge_threshold is enabled, CC pins go
5058 * away before vbus decays to disconnect threshold. Allow
5059 * disconnect to be driven by vbus disconnect when auto vbus
5060 * discharge is enabled.
5061 */
5062 if (!port->auto_vbus_discharge_enabled && tcpm_port_is_disconnected(port))
5063 tcpm_set_state(port, unattached_state(port), 0);
5064 else if (!port->pd_capable &&
5065 (cc1 != old_cc1 || cc2 != old_cc2))
5066 tcpm_set_current_limit(port,
5067 tcpm_get_current_limit(port),
5068 5000);
5069 break;
5070
5071 case AUDIO_ACC_ATTACHED:
5072 if (cc1 == TYPEC_CC_OPEN || cc2 == TYPEC_CC_OPEN)
5073 tcpm_set_state(port, AUDIO_ACC_DEBOUNCE, 0);
5074 break;
5075 case AUDIO_ACC_DEBOUNCE:
5076 if (tcpm_port_is_audio(port))
5077 tcpm_set_state(port, AUDIO_ACC_ATTACHED, 0);
5078 break;
5079
5080 case DEBUG_ACC_ATTACHED:
5081 if (cc1 == TYPEC_CC_OPEN || cc2 == TYPEC_CC_OPEN)
5082 tcpm_set_state(port, ACC_UNATTACHED, 0);
5083 break;
5084
5085 case SNK_TRY:
5086 /* Do nothing, waiting for timeout */
5087 break;
5088
5089 case SNK_DISCOVERY:
5090 /* CC line is unstable, wait for debounce */
5091 if (tcpm_port_is_disconnected(port))
5092 tcpm_set_state(port, SNK_DISCOVERY_DEBOUNCE, 0);
5093 break;
5094 case SNK_DISCOVERY_DEBOUNCE:
5095 break;
5096
5097 case SRC_TRYWAIT:
5098 /* Hand over to state machine if needed */
5099 if (!port->vbus_present && tcpm_port_is_source(port))
5100 tcpm_set_state(port, SRC_TRYWAIT_DEBOUNCE, 0);
5101 break;
5102 case SRC_TRYWAIT_DEBOUNCE:
5103 if (port->vbus_present || !tcpm_port_is_source(port))
5104 tcpm_set_state(port, SRC_TRYWAIT, 0);
5105 break;
5106 case SNK_TRY_WAIT_DEBOUNCE:
5107 if (!tcpm_port_is_sink(port)) {
5108 port->max_wait = 0;
5109 tcpm_set_state(port, SRC_TRYWAIT, 0);
5110 }
5111 break;
5112 case SRC_TRY_WAIT:
5113 if (tcpm_port_is_source(port))
5114 tcpm_set_state(port, SRC_TRY_DEBOUNCE, 0);
5115 break;
5116 case SRC_TRY_DEBOUNCE:
5117 tcpm_set_state(port, SRC_TRY_WAIT, 0);
5118 break;
5119 case SNK_TRYWAIT_DEBOUNCE:
5120 if (tcpm_port_is_sink(port))
5121 tcpm_set_state(port, SNK_TRYWAIT_VBUS, 0);
5122 break;
5123 case SNK_TRYWAIT_VBUS:
5124 if (!tcpm_port_is_sink(port))
5125 tcpm_set_state(port, SNK_TRYWAIT_DEBOUNCE, 0);
5126 break;
5127 case SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS:
5128 if (!tcpm_port_is_sink(port))
5129 tcpm_set_state(port, SRC_TRYWAIT, PD_T_TRY_CC_DEBOUNCE);
5130 else
5131 tcpm_set_state(port, SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS, 0);
5132 break;
5133 case SNK_TRYWAIT:
5134 /* Do nothing, waiting for tCCDebounce */
5135 break;
5136 case PR_SWAP_SNK_SRC_SINK_OFF:
5137 case PR_SWAP_SRC_SNK_TRANSITION_OFF:
5138 case PR_SWAP_SRC_SNK_SOURCE_OFF:
5139 case PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED:
5140 case PR_SWAP_SNK_SRC_SOURCE_ON:
5141 /*
5142 * CC state change is expected in PR_SWAP
5143 * Ignore it.
5144 */
5145 break;
5146 case FR_SWAP_SEND:
5147 case FR_SWAP_SEND_TIMEOUT:
5148 case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF:
5149 case FR_SWAP_SNK_SRC_NEW_SINK_READY:
5150 case FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED:
5151 /* Do nothing, CC change expected */
5152 break;
5153
5154 case PORT_RESET:
5155 case PORT_RESET_WAIT_OFF:
5156 /*
5157 * State set back to default mode once the timer completes.
5158 * Ignore CC changes here.
5159 */
5160 break;
5161 default:
5162 /*
5163 * While acting as sink and auto vbus discharge is enabled, Allow disconnect
5164 * to be driven by vbus disconnect.
5165 */
5166 if (tcpm_port_is_disconnected(port) && !(port->pwr_role == TYPEC_SINK &&
5167 port->auto_vbus_discharge_enabled))
5168 tcpm_set_state(port, unattached_state(port), 0);
5169 break;
5170 }
5171 }
5172
_tcpm_pd_vbus_on(struct tcpm_port * port)5173 static void _tcpm_pd_vbus_on(struct tcpm_port *port)
5174 {
5175 tcpm_log_force(port, "VBUS on");
5176 port->vbus_present = true;
5177 /*
5178 * When vbus_present is true i.e. Voltage at VBUS is greater than VSAFE5V implicitly
5179 * states that vbus is not at VSAFE0V, hence clear the vbus_vsafe0v flag here.
5180 */
5181 port->vbus_vsafe0v = false;
5182
5183 switch (port->state) {
5184 case SNK_TRANSITION_SINK_VBUS:
5185 port->explicit_contract = true;
5186 tcpm_set_state(port, SNK_READY, 0);
5187 break;
5188 case SNK_DISCOVERY:
5189 tcpm_set_state(port, SNK_DISCOVERY, 0);
5190 break;
5191
5192 case SNK_DEBOUNCED:
5193 tcpm_set_state(port, tcpm_try_src(port) ? SRC_TRY
5194 : SNK_ATTACHED,
5195 0);
5196 break;
5197 case SNK_HARD_RESET_WAIT_VBUS:
5198 tcpm_set_state(port, SNK_HARD_RESET_SINK_ON, 0);
5199 break;
5200 case SRC_ATTACHED:
5201 tcpm_set_state(port, SRC_STARTUP, 0);
5202 break;
5203 case SRC_HARD_RESET_VBUS_ON:
5204 tcpm_set_state(port, SRC_STARTUP, 0);
5205 break;
5206
5207 case SNK_TRY:
5208 /* Do nothing, waiting for timeout */
5209 break;
5210 case SRC_TRYWAIT:
5211 /* Do nothing, Waiting for Rd to be detected */
5212 break;
5213 case SRC_TRYWAIT_DEBOUNCE:
5214 tcpm_set_state(port, SRC_TRYWAIT, 0);
5215 break;
5216 case SNK_TRY_WAIT_DEBOUNCE:
5217 /* Do nothing, waiting for PD_DEBOUNCE to do be done */
5218 break;
5219 case SNK_TRYWAIT:
5220 /* Do nothing, waiting for tCCDebounce */
5221 break;
5222 case SNK_TRYWAIT_VBUS:
5223 if (tcpm_port_is_sink(port))
5224 tcpm_set_state(port, SNK_ATTACHED, 0);
5225 break;
5226 case SNK_TRYWAIT_DEBOUNCE:
5227 /* Do nothing, waiting for Rp */
5228 break;
5229 case SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS:
5230 if (port->vbus_present && tcpm_port_is_sink(port))
5231 tcpm_set_state(port, SNK_ATTACHED, 0);
5232 break;
5233 case SRC_TRY_WAIT:
5234 case SRC_TRY_DEBOUNCE:
5235 /* Do nothing, waiting for sink detection */
5236 break;
5237 case FR_SWAP_SEND:
5238 case FR_SWAP_SEND_TIMEOUT:
5239 case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF:
5240 case FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED:
5241 if (port->tcpc->frs_sourcing_vbus)
5242 port->tcpc->frs_sourcing_vbus(port->tcpc);
5243 break;
5244 case FR_SWAP_SNK_SRC_NEW_SINK_READY:
5245 if (port->tcpc->frs_sourcing_vbus)
5246 port->tcpc->frs_sourcing_vbus(port->tcpc);
5247 tcpm_set_state(port, FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED, 0);
5248 break;
5249
5250 case PORT_RESET:
5251 case PORT_RESET_WAIT_OFF:
5252 /*
5253 * State set back to default mode once the timer completes.
5254 * Ignore vbus changes here.
5255 */
5256 break;
5257
5258 default:
5259 break;
5260 }
5261 }
5262
_tcpm_pd_vbus_off(struct tcpm_port * port)5263 static void _tcpm_pd_vbus_off(struct tcpm_port *port)
5264 {
5265 tcpm_log_force(port, "VBUS off");
5266 port->vbus_present = false;
5267 port->vbus_never_low = false;
5268 switch (port->state) {
5269 case SNK_HARD_RESET_SINK_OFF:
5270 tcpm_set_state(port, SNK_HARD_RESET_WAIT_VBUS, 0);
5271 break;
5272 case HARD_RESET_SEND:
5273 break;
5274 case SNK_TRY:
5275 /* Do nothing, waiting for timeout */
5276 break;
5277 case SRC_TRYWAIT:
5278 /* Hand over to state machine if needed */
5279 if (tcpm_port_is_source(port))
5280 tcpm_set_state(port, SRC_TRYWAIT_DEBOUNCE, 0);
5281 break;
5282 case SNK_TRY_WAIT_DEBOUNCE:
5283 /* Do nothing, waiting for PD_DEBOUNCE to do be done */
5284 break;
5285 case SNK_TRYWAIT:
5286 case SNK_TRYWAIT_VBUS:
5287 case SNK_TRYWAIT_DEBOUNCE:
5288 break;
5289 case SNK_ATTACH_WAIT:
5290 case SNK_DEBOUNCED:
5291 port->debouncing = false;
5292 /* Do nothing, as TCPM is still waiting for vbus to reaach VSAFE5V to connect */
5293 break;
5294
5295 case SNK_NEGOTIATE_CAPABILITIES:
5296 break;
5297
5298 case PR_SWAP_SRC_SNK_TRANSITION_OFF:
5299 tcpm_set_state(port, PR_SWAP_SRC_SNK_SOURCE_OFF, 0);
5300 break;
5301
5302 case PR_SWAP_SNK_SRC_SINK_OFF:
5303 /* Do nothing, expected */
5304 break;
5305
5306 case PR_SWAP_SNK_SRC_SOURCE_ON:
5307 /*
5308 * Do nothing when vbus off notification is received.
5309 * TCPM can wait for PD_T_NEWSRC in PR_SWAP_SNK_SRC_SOURCE_ON
5310 * for the vbus source to ramp up.
5311 */
5312 break;
5313
5314 case PORT_RESET_WAIT_OFF:
5315 tcpm_set_state(port, tcpm_default_state(port), 0);
5316 break;
5317
5318 case SRC_TRY_WAIT:
5319 case SRC_TRY_DEBOUNCE:
5320 /* Do nothing, waiting for sink detection */
5321 break;
5322
5323 case SRC_STARTUP:
5324 case SRC_SEND_CAPABILITIES:
5325 case SRC_SEND_CAPABILITIES_TIMEOUT:
5326 case SRC_NEGOTIATE_CAPABILITIES:
5327 case SRC_TRANSITION_SUPPLY:
5328 case SRC_READY:
5329 case SRC_WAIT_NEW_CAPABILITIES:
5330 /*
5331 * Force to unattached state to re-initiate connection.
5332 * DRP port should move to Unattached.SNK instead of Unattached.SRC if
5333 * sink removed. Although sink removal here is due to source's vbus collapse,
5334 * treat it the same way for consistency.
5335 */
5336 if (port->port_type == TYPEC_PORT_SRC)
5337 tcpm_set_state(port, SRC_UNATTACHED, tcpm_wait_for_discharge(port));
5338 else
5339 tcpm_set_state(port, SNK_UNATTACHED, tcpm_wait_for_discharge(port));
5340 break;
5341
5342 case PORT_RESET:
5343 /*
5344 * State set back to default mode once the timer completes.
5345 * Ignore vbus changes here.
5346 */
5347 break;
5348
5349 case FR_SWAP_SEND:
5350 case FR_SWAP_SEND_TIMEOUT:
5351 case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF:
5352 case FR_SWAP_SNK_SRC_NEW_SINK_READY:
5353 case FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED:
5354 /* Do nothing, vbus drop expected */
5355 break;
5356
5357 default:
5358 if (port->pwr_role == TYPEC_SINK && port->attached)
5359 tcpm_set_state(port, SNK_UNATTACHED, tcpm_wait_for_discharge(port));
5360 break;
5361 }
5362 }
5363
_tcpm_pd_vbus_vsafe0v(struct tcpm_port * port)5364 static void _tcpm_pd_vbus_vsafe0v(struct tcpm_port *port)
5365 {
5366 unsigned int timer_val_msecs;
5367
5368 tcpm_log_force(port, "VBUS VSAFE0V");
5369 port->vbus_vsafe0v = true;
5370 switch (port->state) {
5371 case SRC_HARD_RESET_VBUS_OFF:
5372 /*
5373 * After establishing the vSafe0V voltage condition on VBUS, the Source Shall wait
5374 * tSrcRecover before re-applying VCONN and restoring VBUS to vSafe5V.
5375 */
5376 tcpm_set_state(port, SRC_HARD_RESET_VBUS_ON, PD_T_SRC_RECOVER);
5377 break;
5378 case SRC_ATTACH_WAIT:
5379 timer_val_msecs = PD_T_CC_DEBOUNCE;
5380 trace_android_vh_typec_tcpm_get_timer(tcpm_states[SRC_ATTACH_WAIT],
5381 CC_DEBOUNCE, &timer_val_msecs);
5382 if (tcpm_port_is_source(port))
5383 tcpm_set_state(port, tcpm_try_snk(port) ? SNK_TRY : SRC_ATTACHED,
5384 timer_val_msecs);
5385 break;
5386 case SRC_STARTUP:
5387 case SRC_SEND_CAPABILITIES:
5388 case SRC_SEND_CAPABILITIES_TIMEOUT:
5389 case SRC_NEGOTIATE_CAPABILITIES:
5390 case SRC_TRANSITION_SUPPLY:
5391 case SRC_READY:
5392 case SRC_WAIT_NEW_CAPABILITIES:
5393 if (port->auto_vbus_discharge_enabled) {
5394 if (port->port_type == TYPEC_PORT_SRC)
5395 tcpm_set_state(port, SRC_UNATTACHED, 0);
5396 else
5397 tcpm_set_state(port, SNK_UNATTACHED, 0);
5398 }
5399 break;
5400 case PR_SWAP_SNK_SRC_SINK_OFF:
5401 case PR_SWAP_SNK_SRC_SOURCE_ON:
5402 /* Do nothing, vsafe0v is expected during transition */
5403 break;
5404 case SNK_ATTACH_WAIT:
5405 case SNK_DEBOUNCED:
5406 /*Do nothing, still waiting for VSAFE5V for connect */
5407 break;
5408 default:
5409 if (port->pwr_role == TYPEC_SINK && port->auto_vbus_discharge_enabled)
5410 tcpm_set_state(port, SNK_UNATTACHED, 0);
5411 break;
5412 }
5413 }
5414
_tcpm_pd_hard_reset(struct tcpm_port * port)5415 static void _tcpm_pd_hard_reset(struct tcpm_port *port)
5416 {
5417 tcpm_log_force(port, "Received hard reset");
5418 if (port->bist_request == BDO_MODE_TESTDATA && port->tcpc->set_bist_data)
5419 port->tcpc->set_bist_data(port->tcpc, false);
5420
5421 if (port->ams != NONE_AMS)
5422 port->ams = NONE_AMS;
5423 if (port->hard_reset_count < PD_N_HARD_RESET_COUNT)
5424 port->ams = HARD_RESET;
5425 /*
5426 * If we keep receiving hard reset requests, executing the hard reset
5427 * must have failed. Revert to error recovery if that happens.
5428 */
5429 tcpm_set_state(port,
5430 port->hard_reset_count < PD_N_HARD_RESET_COUNT ?
5431 HARD_RESET_START : ERROR_RECOVERY,
5432 0);
5433 }
5434
tcpm_pd_event_handler(struct kthread_work * work)5435 static void tcpm_pd_event_handler(struct kthread_work *work)
5436 {
5437 struct tcpm_port *port = container_of(work, struct tcpm_port,
5438 event_work);
5439 u32 events;
5440
5441 #ifdef CONFIG_NO_GKI
5442 mutex_lock(&port->pd_handler_lock);
5443 #endif
5444 mutex_lock(&port->lock);
5445
5446 spin_lock(&port->pd_event_lock);
5447 while (port->pd_events) {
5448 events = port->pd_events;
5449 port->pd_events = 0;
5450 spin_unlock(&port->pd_event_lock);
5451 if (events & TCPM_RESET_EVENT)
5452 _tcpm_pd_hard_reset(port);
5453 if (events & TCPM_VBUS_EVENT) {
5454 bool vbus;
5455
5456 vbus = port->tcpc->get_vbus(port->tcpc);
5457 if (vbus) {
5458 _tcpm_pd_vbus_on(port);
5459 } else {
5460 _tcpm_pd_vbus_off(port);
5461 /*
5462 * When TCPC does not support detecting vsafe0v voltage level,
5463 * treat vbus absent as vsafe0v. Else invoke is_vbus_vsafe0v
5464 * to see if vbus has discharge to VSAFE0V.
5465 */
5466 if (!port->tcpc->is_vbus_vsafe0v ||
5467 port->tcpc->is_vbus_vsafe0v(port->tcpc))
5468 _tcpm_pd_vbus_vsafe0v(port);
5469 }
5470 }
5471 if (events & TCPM_CC_EVENT) {
5472 enum typec_cc_status cc1, cc2;
5473
5474 if (port->tcpc->get_cc(port->tcpc, &cc1, &cc2) == 0)
5475 _tcpm_cc_change(port, cc1, cc2);
5476 }
5477 if (events & TCPM_FRS_EVENT) {
5478 if (port->state == SNK_READY) {
5479 int ret;
5480
5481 port->upcoming_state = FR_SWAP_SEND;
5482 ret = tcpm_ams_start(port, FAST_ROLE_SWAP);
5483 if (ret == -EAGAIN)
5484 port->upcoming_state = INVALID_STATE;
5485 } else {
5486 tcpm_log(port, "Discarding FRS_SIGNAL! Not in sink ready");
5487 }
5488 }
5489 if (events & TCPM_SOURCING_VBUS) {
5490 tcpm_log(port, "sourcing vbus");
5491 /*
5492 * In fast role swap case TCPC autonomously sources vbus. Set vbus_source
5493 * true as TCPM wouldn't have called tcpm_set_vbus.
5494 *
5495 * When vbus is sourced on the command on TCPM i.e. TCPM called
5496 * tcpm_set_vbus to source vbus, vbus_source would already be true.
5497 */
5498 port->vbus_source = true;
5499 _tcpm_pd_vbus_on(port);
5500 }
5501
5502 spin_lock(&port->pd_event_lock);
5503 }
5504 spin_unlock(&port->pd_event_lock);
5505 mutex_unlock(&port->lock);
5506 #ifdef CONFIG_NO_GKI
5507 mutex_unlock(&port->pd_handler_lock);
5508 #endif
5509 }
5510
tcpm_cc_change(struct tcpm_port * port)5511 void tcpm_cc_change(struct tcpm_port *port)
5512 {
5513 spin_lock(&port->pd_event_lock);
5514 port->pd_events |= TCPM_CC_EVENT;
5515 spin_unlock(&port->pd_event_lock);
5516 kthread_queue_work(port->wq, &port->event_work);
5517 }
5518 EXPORT_SYMBOL_GPL(tcpm_cc_change);
5519
tcpm_vbus_change(struct tcpm_port * port)5520 void tcpm_vbus_change(struct tcpm_port *port)
5521 {
5522 spin_lock(&port->pd_event_lock);
5523 port->pd_events |= TCPM_VBUS_EVENT;
5524 spin_unlock(&port->pd_event_lock);
5525 kthread_queue_work(port->wq, &port->event_work);
5526 }
5527 EXPORT_SYMBOL_GPL(tcpm_vbus_change);
5528
tcpm_pd_hard_reset(struct tcpm_port * port)5529 void tcpm_pd_hard_reset(struct tcpm_port *port)
5530 {
5531 spin_lock(&port->pd_event_lock);
5532 port->pd_events = TCPM_RESET_EVENT;
5533 spin_unlock(&port->pd_event_lock);
5534 kthread_queue_work(port->wq, &port->event_work);
5535 }
5536 EXPORT_SYMBOL_GPL(tcpm_pd_hard_reset);
5537
tcpm_sink_frs(struct tcpm_port * port)5538 void tcpm_sink_frs(struct tcpm_port *port)
5539 {
5540 spin_lock(&port->pd_event_lock);
5541 port->pd_events |= TCPM_FRS_EVENT;
5542 spin_unlock(&port->pd_event_lock);
5543 kthread_queue_work(port->wq, &port->event_work);
5544 }
5545 EXPORT_SYMBOL_GPL(tcpm_sink_frs);
5546
tcpm_sourcing_vbus(struct tcpm_port * port)5547 void tcpm_sourcing_vbus(struct tcpm_port *port)
5548 {
5549 spin_lock(&port->pd_event_lock);
5550 port->pd_events |= TCPM_SOURCING_VBUS;
5551 spin_unlock(&port->pd_event_lock);
5552 kthread_queue_work(port->wq, &port->event_work);
5553 }
5554 EXPORT_SYMBOL_GPL(tcpm_sourcing_vbus);
5555
tcpm_enable_frs_work(struct kthread_work * work)5556 static void tcpm_enable_frs_work(struct kthread_work *work)
5557 {
5558 struct tcpm_port *port = container_of(work, struct tcpm_port, enable_frs);
5559 int ret;
5560
5561 mutex_lock(&port->lock);
5562 /* Not FRS capable */
5563 if (!port->connected || port->port_type != TYPEC_PORT_DRP ||
5564 port->pwr_opmode != TYPEC_PWR_MODE_PD ||
5565 !port->tcpc->enable_frs ||
5566 /* Sink caps queried */
5567 port->sink_cap_done || port->negotiated_rev < PD_REV30)
5568 goto unlock;
5569
5570 /* Send when the state machine is idle */
5571 if (port->state != SNK_READY || port->vdm_sm_running || port->send_discover)
5572 goto resched;
5573
5574 port->upcoming_state = GET_SINK_CAP;
5575 ret = tcpm_ams_start(port, GET_SINK_CAPABILITIES);
5576 if (ret == -EAGAIN) {
5577 port->upcoming_state = INVALID_STATE;
5578 } else {
5579 port->sink_cap_done = true;
5580 goto unlock;
5581 }
5582 resched:
5583 mod_enable_frs_delayed_work(port, GET_SINK_CAP_RETRY_MS);
5584 unlock:
5585 mutex_unlock(&port->lock);
5586 }
5587
tcpm_send_discover_work(struct kthread_work * work)5588 static void tcpm_send_discover_work(struct kthread_work *work)
5589 {
5590 struct tcpm_port *port = container_of(work, struct tcpm_port, send_discover_work);
5591
5592 mutex_lock(&port->lock);
5593 /* No need to send DISCOVER_IDENTITY anymore */
5594 if (!port->send_discover)
5595 goto unlock;
5596
5597 if (port->data_role == TYPEC_DEVICE && port->negotiated_rev < PD_REV30) {
5598 port->send_discover = false;
5599 goto unlock;
5600 }
5601
5602 /* Retry if the port is not idle */
5603 if ((port->state != SRC_READY && port->state != SNK_READY) || port->vdm_sm_running) {
5604 mod_send_discover_delayed_work(port, SEND_DISCOVER_RETRY_MS);
5605 goto unlock;
5606 }
5607
5608 tcpm_send_vdm(port, USB_SID_PD, CMD_DISCOVER_IDENT, NULL, 0);
5609
5610 unlock:
5611 mutex_unlock(&port->lock);
5612 }
5613
tcpm_dr_set(struct typec_port * p,enum typec_data_role data)5614 static int tcpm_dr_set(struct typec_port *p, enum typec_data_role data)
5615 {
5616 struct tcpm_port *port = typec_get_drvdata(p);
5617 int ret;
5618
5619 mutex_lock(&port->swap_lock);
5620 mutex_lock(&port->lock);
5621
5622 if (port->typec_caps.data != TYPEC_PORT_DRD) {
5623 ret = -EINVAL;
5624 goto port_unlock;
5625 }
5626 if (port->state != SRC_READY && port->state != SNK_READY) {
5627 ret = -EAGAIN;
5628 goto port_unlock;
5629 }
5630
5631 if (port->data_role == data) {
5632 ret = 0;
5633 goto port_unlock;
5634 }
5635
5636 /*
5637 * XXX
5638 * 6.3.9: If an alternate mode is active, a request to swap
5639 * alternate modes shall trigger a port reset.
5640 * Reject data role swap request in this case.
5641 */
5642
5643 if (!port->pd_capable) {
5644 /*
5645 * If the partner is not PD capable, reset the port to
5646 * trigger a role change. This can only work if a preferred
5647 * role is configured, and if it matches the requested role.
5648 */
5649 if (port->try_role == TYPEC_NO_PREFERRED_ROLE ||
5650 port->try_role == port->pwr_role) {
5651 ret = -EINVAL;
5652 goto port_unlock;
5653 }
5654 port->non_pd_role_swap = true;
5655 tcpm_set_state(port, PORT_RESET, 0);
5656 } else {
5657 port->upcoming_state = DR_SWAP_SEND;
5658 ret = tcpm_ams_start(port, DATA_ROLE_SWAP);
5659 if (ret == -EAGAIN) {
5660 port->upcoming_state = INVALID_STATE;
5661 goto port_unlock;
5662 }
5663 }
5664
5665 port->swap_status = 0;
5666 port->swap_pending = true;
5667 reinit_completion(&port->swap_complete);
5668 mutex_unlock(&port->lock);
5669
5670 if (!wait_for_completion_timeout(&port->swap_complete,
5671 msecs_to_jiffies(PD_ROLE_SWAP_TIMEOUT)))
5672 ret = -ETIMEDOUT;
5673 else
5674 ret = port->swap_status;
5675
5676 port->non_pd_role_swap = false;
5677 goto swap_unlock;
5678
5679 port_unlock:
5680 mutex_unlock(&port->lock);
5681 swap_unlock:
5682 mutex_unlock(&port->swap_lock);
5683 return ret;
5684 }
5685
tcpm_pr_set(struct typec_port * p,enum typec_role role)5686 static int tcpm_pr_set(struct typec_port *p, enum typec_role role)
5687 {
5688 struct tcpm_port *port = typec_get_drvdata(p);
5689 int ret;
5690
5691 mutex_lock(&port->swap_lock);
5692 mutex_lock(&port->lock);
5693
5694 if (port->port_type != TYPEC_PORT_DRP) {
5695 ret = -EINVAL;
5696 goto port_unlock;
5697 }
5698 if (port->state != SRC_READY && port->state != SNK_READY) {
5699 ret = -EAGAIN;
5700 goto port_unlock;
5701 }
5702
5703 if (role == port->pwr_role) {
5704 ret = 0;
5705 goto port_unlock;
5706 }
5707
5708 port->upcoming_state = PR_SWAP_SEND;
5709 ret = tcpm_ams_start(port, POWER_ROLE_SWAP);
5710 if (ret == -EAGAIN) {
5711 port->upcoming_state = INVALID_STATE;
5712 goto port_unlock;
5713 }
5714
5715 port->swap_status = 0;
5716 port->swap_pending = true;
5717 reinit_completion(&port->swap_complete);
5718 mutex_unlock(&port->lock);
5719
5720 if (!wait_for_completion_timeout(&port->swap_complete,
5721 msecs_to_jiffies(PD_ROLE_SWAP_TIMEOUT)))
5722 ret = -ETIMEDOUT;
5723 else
5724 ret = port->swap_status;
5725
5726 goto swap_unlock;
5727
5728 port_unlock:
5729 mutex_unlock(&port->lock);
5730 swap_unlock:
5731 mutex_unlock(&port->swap_lock);
5732 return ret;
5733 }
5734
tcpm_vconn_set(struct typec_port * p,enum typec_role role)5735 static int tcpm_vconn_set(struct typec_port *p, enum typec_role role)
5736 {
5737 struct tcpm_port *port = typec_get_drvdata(p);
5738 int ret;
5739
5740 mutex_lock(&port->swap_lock);
5741 mutex_lock(&port->lock);
5742
5743 if (port->state != SRC_READY && port->state != SNK_READY) {
5744 ret = -EAGAIN;
5745 goto port_unlock;
5746 }
5747
5748 if (role == port->vconn_role) {
5749 ret = 0;
5750 goto port_unlock;
5751 }
5752
5753 port->upcoming_state = VCONN_SWAP_SEND;
5754 ret = tcpm_ams_start(port, VCONN_SWAP);
5755 if (ret == -EAGAIN) {
5756 port->upcoming_state = INVALID_STATE;
5757 goto port_unlock;
5758 }
5759
5760 port->swap_status = 0;
5761 port->swap_pending = true;
5762 reinit_completion(&port->swap_complete);
5763 mutex_unlock(&port->lock);
5764
5765 if (!wait_for_completion_timeout(&port->swap_complete,
5766 msecs_to_jiffies(PD_ROLE_SWAP_TIMEOUT)))
5767 ret = -ETIMEDOUT;
5768 else
5769 ret = port->swap_status;
5770
5771 goto swap_unlock;
5772
5773 port_unlock:
5774 mutex_unlock(&port->lock);
5775 swap_unlock:
5776 mutex_unlock(&port->swap_lock);
5777 return ret;
5778 }
5779
tcpm_try_role(struct typec_port * p,int role)5780 static int tcpm_try_role(struct typec_port *p, int role)
5781 {
5782 struct tcpm_port *port = typec_get_drvdata(p);
5783 struct tcpc_dev *tcpc = port->tcpc;
5784 int ret = 0;
5785
5786 mutex_lock(&port->lock);
5787 if (tcpc->try_role)
5788 ret = tcpc->try_role(tcpc, role);
5789 if (!ret)
5790 port->try_role = role;
5791 port->try_src_count = 0;
5792 port->try_snk_count = 0;
5793 mutex_unlock(&port->lock);
5794
5795 return ret;
5796 }
5797
tcpm_pps_set_op_curr(struct tcpm_port * port,u16 req_op_curr)5798 static int tcpm_pps_set_op_curr(struct tcpm_port *port, u16 req_op_curr)
5799 {
5800 unsigned int target_mw;
5801 int ret;
5802
5803 mutex_lock(&port->swap_lock);
5804 mutex_lock(&port->lock);
5805
5806 if (!port->pps_data.active) {
5807 ret = -EOPNOTSUPP;
5808 goto port_unlock;
5809 }
5810
5811 if (port->state != SNK_READY) {
5812 ret = -EAGAIN;
5813 goto port_unlock;
5814 }
5815
5816 if (req_op_curr > port->pps_data.max_curr) {
5817 ret = -EINVAL;
5818 goto port_unlock;
5819 }
5820
5821 target_mw = (req_op_curr * port->supply_voltage) / 1000;
5822 if (target_mw < port->operating_snk_mw) {
5823 ret = -EINVAL;
5824 goto port_unlock;
5825 }
5826
5827 port->upcoming_state = SNK_NEGOTIATE_PPS_CAPABILITIES;
5828 ret = tcpm_ams_start(port, POWER_NEGOTIATION);
5829 if (ret == -EAGAIN) {
5830 port->upcoming_state = INVALID_STATE;
5831 goto port_unlock;
5832 }
5833
5834 /* Round down operating current to align with PPS valid steps */
5835 req_op_curr = req_op_curr - (req_op_curr % RDO_PROG_CURR_MA_STEP);
5836
5837 reinit_completion(&port->pps_complete);
5838 port->pps_data.req_op_curr = req_op_curr;
5839 port->pps_status = 0;
5840 port->pps_pending = true;
5841 mutex_unlock(&port->lock);
5842
5843 if (!wait_for_completion_timeout(&port->pps_complete,
5844 msecs_to_jiffies(PD_PPS_CTRL_TIMEOUT)))
5845 ret = -ETIMEDOUT;
5846 else
5847 ret = port->pps_status;
5848
5849 goto swap_unlock;
5850
5851 port_unlock:
5852 mutex_unlock(&port->lock);
5853 swap_unlock:
5854 mutex_unlock(&port->swap_lock);
5855
5856 return ret;
5857 }
5858
tcpm_pps_set_out_volt(struct tcpm_port * port,u16 req_out_volt)5859 static int tcpm_pps_set_out_volt(struct tcpm_port *port, u16 req_out_volt)
5860 {
5861 unsigned int target_mw;
5862 int ret;
5863
5864 mutex_lock(&port->swap_lock);
5865 mutex_lock(&port->lock);
5866
5867 if (!port->pps_data.active) {
5868 ret = -EOPNOTSUPP;
5869 goto port_unlock;
5870 }
5871
5872 if (port->state != SNK_READY) {
5873 ret = -EAGAIN;
5874 goto port_unlock;
5875 }
5876
5877 if (req_out_volt < port->pps_data.min_volt ||
5878 req_out_volt > port->pps_data.max_volt) {
5879 ret = -EINVAL;
5880 goto port_unlock;
5881 }
5882
5883 target_mw = (port->current_limit * req_out_volt) / 1000;
5884 if (target_mw < port->operating_snk_mw) {
5885 ret = -EINVAL;
5886 goto port_unlock;
5887 }
5888
5889 port->upcoming_state = SNK_NEGOTIATE_PPS_CAPABILITIES;
5890 ret = tcpm_ams_start(port, POWER_NEGOTIATION);
5891 if (ret == -EAGAIN) {
5892 port->upcoming_state = INVALID_STATE;
5893 goto port_unlock;
5894 }
5895
5896 /* Round down output voltage to align with PPS valid steps */
5897 req_out_volt = req_out_volt - (req_out_volt % RDO_PROG_VOLT_MV_STEP);
5898
5899 reinit_completion(&port->pps_complete);
5900 port->pps_data.req_out_volt = req_out_volt;
5901 port->pps_status = 0;
5902 port->pps_pending = true;
5903 mutex_unlock(&port->lock);
5904
5905 if (!wait_for_completion_timeout(&port->pps_complete,
5906 msecs_to_jiffies(PD_PPS_CTRL_TIMEOUT)))
5907 ret = -ETIMEDOUT;
5908 else
5909 ret = port->pps_status;
5910
5911 goto swap_unlock;
5912
5913 port_unlock:
5914 mutex_unlock(&port->lock);
5915 swap_unlock:
5916 mutex_unlock(&port->swap_lock);
5917
5918 return ret;
5919 }
5920
tcpm_pps_activate(struct tcpm_port * port,bool activate)5921 static int tcpm_pps_activate(struct tcpm_port *port, bool activate)
5922 {
5923 int ret = 0;
5924
5925 mutex_lock(&port->swap_lock);
5926 mutex_lock(&port->lock);
5927
5928 if (!port->pps_data.supported) {
5929 ret = -EOPNOTSUPP;
5930 goto port_unlock;
5931 }
5932
5933 /* Trying to deactivate PPS when already deactivated so just bail */
5934 if (!port->pps_data.active && !activate)
5935 goto port_unlock;
5936
5937 if (port->state != SNK_READY) {
5938 ret = -EAGAIN;
5939 goto port_unlock;
5940 }
5941
5942 if (activate)
5943 port->upcoming_state = SNK_NEGOTIATE_PPS_CAPABILITIES;
5944 else
5945 port->upcoming_state = SNK_NEGOTIATE_CAPABILITIES;
5946 ret = tcpm_ams_start(port, POWER_NEGOTIATION);
5947 if (ret == -EAGAIN) {
5948 port->upcoming_state = INVALID_STATE;
5949 goto port_unlock;
5950 }
5951
5952 reinit_completion(&port->pps_complete);
5953 port->pps_status = 0;
5954 port->pps_pending = true;
5955
5956 /* Trigger PPS request or move back to standard PDO contract */
5957 if (activate) {
5958 port->pps_data.req_out_volt = port->supply_voltage;
5959 port->pps_data.req_op_curr = port->current_limit;
5960 }
5961 mutex_unlock(&port->lock);
5962
5963 if (!wait_for_completion_timeout(&port->pps_complete,
5964 msecs_to_jiffies(PD_PPS_CTRL_TIMEOUT)))
5965 ret = -ETIMEDOUT;
5966 else
5967 ret = port->pps_status;
5968
5969 goto swap_unlock;
5970
5971 port_unlock:
5972 mutex_unlock(&port->lock);
5973 swap_unlock:
5974 mutex_unlock(&port->swap_lock);
5975
5976 return ret;
5977 }
5978
tcpm_init(struct tcpm_port * port)5979 static void tcpm_init(struct tcpm_port *port)
5980 {
5981 enum typec_cc_status cc1, cc2;
5982
5983 port->tcpc->init(port->tcpc);
5984
5985 tcpm_reset_port(port);
5986
5987 /*
5988 * XXX
5989 * Should possibly wait for VBUS to settle if it was enabled locally
5990 * since tcpm_reset_port() will disable VBUS.
5991 */
5992 port->vbus_present = port->tcpc->get_vbus(port->tcpc);
5993 if (port->vbus_present)
5994 port->vbus_never_low = true;
5995
5996 /*
5997 * 1. When vbus_present is true, voltage on VBUS is already at VSAFE5V.
5998 * So implicitly vbus_vsafe0v = false.
5999 *
6000 * 2. When vbus_present is false and TCPC does NOT support querying
6001 * vsafe0v status, then, it's best to assume vbus is at VSAFE0V i.e.
6002 * vbus_vsafe0v is true.
6003 *
6004 * 3. When vbus_present is false and TCPC does support querying vsafe0v,
6005 * then, query tcpc for vsafe0v status.
6006 */
6007 if (port->vbus_present)
6008 port->vbus_vsafe0v = false;
6009 else if (!port->tcpc->is_vbus_vsafe0v)
6010 port->vbus_vsafe0v = true;
6011 else
6012 port->vbus_vsafe0v = port->tcpc->is_vbus_vsafe0v(port->tcpc);
6013
6014 tcpm_set_state(port, tcpm_default_state(port), 0);
6015
6016 if (port->tcpc->get_cc(port->tcpc, &cc1, &cc2) == 0)
6017 _tcpm_cc_change(port, cc1, cc2);
6018
6019 /*
6020 * Some adapters need a clean slate at startup, and won't recover
6021 * otherwise. So do not try to be fancy and force a clean disconnect.
6022 */
6023 tcpm_set_state(port, PORT_RESET, 0);
6024 }
6025
tcpm_port_type_set(struct typec_port * p,enum typec_port_type type)6026 static int tcpm_port_type_set(struct typec_port *p, enum typec_port_type type)
6027 {
6028 struct tcpm_port *port = typec_get_drvdata(p);
6029
6030 mutex_lock(&port->lock);
6031 if (type == port->port_type)
6032 goto port_unlock;
6033
6034 port->port_type = type;
6035
6036 if (!port->connected) {
6037 tcpm_set_state(port, PORT_RESET, 0);
6038 } else if (type == TYPEC_PORT_SNK) {
6039 if (!(port->pwr_role == TYPEC_SINK &&
6040 port->data_role == TYPEC_DEVICE))
6041 tcpm_set_state(port, PORT_RESET, 0);
6042 } else if (type == TYPEC_PORT_SRC) {
6043 if (!(port->pwr_role == TYPEC_SOURCE &&
6044 port->data_role == TYPEC_HOST))
6045 tcpm_set_state(port, PORT_RESET, 0);
6046 }
6047
6048 port_unlock:
6049 mutex_unlock(&port->lock);
6050 return 0;
6051 }
6052
6053 static const struct typec_operations tcpm_ops = {
6054 .try_role = tcpm_try_role,
6055 .dr_set = tcpm_dr_set,
6056 .pr_set = tcpm_pr_set,
6057 .vconn_set = tcpm_vconn_set,
6058 .port_type_set = tcpm_port_type_set
6059 };
6060
tcpm_tcpc_reset(struct tcpm_port * port)6061 void tcpm_tcpc_reset(struct tcpm_port *port)
6062 {
6063 mutex_lock(&port->lock);
6064 /* XXX: Maintain PD connection if possible? */
6065 tcpm_init(port);
6066 mutex_unlock(&port->lock);
6067 }
6068 EXPORT_SYMBOL_GPL(tcpm_tcpc_reset);
6069
tcpm_fw_get_caps(struct tcpm_port * port,struct fwnode_handle * fwnode)6070 static int tcpm_fw_get_caps(struct tcpm_port *port,
6071 struct fwnode_handle *fwnode)
6072 {
6073 const char *cap_str;
6074 int ret;
6075 u32 mw, frs_current, pd_revision;
6076
6077 if (!fwnode)
6078 return -EINVAL;
6079
6080 ret = fwnode_property_read_u32(fwnode, "pd-revision",
6081 &pd_revision);
6082 if (ret < 0)
6083 port->typec_caps.pd_revision = 0x0300;
6084 else
6085 port->typec_caps.pd_revision = pd_revision & 0xffff;
6086
6087 /* USB data support is optional */
6088 ret = fwnode_property_read_string(fwnode, "data-role", &cap_str);
6089 if (ret == 0) {
6090 ret = typec_find_port_data_role(cap_str);
6091 if (ret < 0)
6092 return ret;
6093 port->typec_caps.data = ret;
6094 }
6095
6096 ret = fwnode_property_read_string(fwnode, "power-role", &cap_str);
6097 if (ret < 0)
6098 return ret;
6099
6100 ret = typec_find_port_power_role(cap_str);
6101 if (ret < 0)
6102 return ret;
6103 port->typec_caps.type = ret;
6104 port->port_type = port->typec_caps.type;
6105
6106 port->slow_charger_loop = fwnode_property_read_bool(fwnode, "slow-charger-loop");
6107 if (port->port_type == TYPEC_PORT_SNK)
6108 goto sink;
6109
6110 /* Get source pdos */
6111 ret = fwnode_property_count_u32(fwnode, "source-pdos");
6112 if (ret <= 0)
6113 return -EINVAL;
6114
6115 port->nr_src_pdo = min(ret, PDO_MAX_OBJECTS);
6116 ret = fwnode_property_read_u32_array(fwnode, "source-pdos",
6117 port->src_pdo, port->nr_src_pdo);
6118 if ((ret < 0) || tcpm_validate_caps(port, port->src_pdo,
6119 port->nr_src_pdo))
6120 return -EINVAL;
6121
6122 if (port->port_type == TYPEC_PORT_SRC)
6123 return 0;
6124
6125 /* Get the preferred power role for DRP */
6126 ret = fwnode_property_read_string(fwnode, "try-power-role", &cap_str);
6127 if (ret < 0)
6128 return ret;
6129
6130 port->typec_caps.prefer_role = typec_find_power_role(cap_str);
6131 if (port->typec_caps.prefer_role < 0)
6132 return -EINVAL;
6133 sink:
6134 /* Get sink pdos */
6135 ret = fwnode_property_count_u32(fwnode, "sink-pdos");
6136 if (ret <= 0)
6137 return -EINVAL;
6138
6139 port->nr_snk_pdo = min(ret, PDO_MAX_OBJECTS);
6140 ret = fwnode_property_read_u32_array(fwnode, "sink-pdos",
6141 port->snk_pdo, port->nr_snk_pdo);
6142 if ((ret < 0) || tcpm_validate_caps(port, port->snk_pdo,
6143 port->nr_snk_pdo))
6144 return -EINVAL;
6145
6146 if (fwnode_property_read_u32(fwnode, "op-sink-microwatt", &mw) < 0)
6147 return -EINVAL;
6148 port->operating_snk_mw = mw / 1000;
6149
6150 port->self_powered = fwnode_property_read_bool(fwnode, "self-powered");
6151
6152 /* FRS can only be supported byb DRP ports */
6153 if (port->port_type == TYPEC_PORT_DRP) {
6154 ret = fwnode_property_read_u32(fwnode, "new-source-frs-typec-current",
6155 &frs_current);
6156 if (ret >= 0 && frs_current <= FRS_5V_3A)
6157 port->new_source_frs_current = frs_current;
6158 }
6159
6160 /* sink-vdos is optional */
6161 ret = fwnode_property_count_u32(fwnode, "sink-vdos");
6162 if (ret < 0)
6163 ret = 0;
6164
6165 port->nr_snk_vdo = min(ret, VDO_MAX_OBJECTS);
6166 if (port->nr_snk_vdo) {
6167 ret = fwnode_property_read_u32_array(fwnode, "sink-vdos",
6168 port->snk_vdo,
6169 port->nr_snk_vdo);
6170 if (ret < 0)
6171 return ret;
6172 }
6173
6174 /* If sink-vdos is found, sink-vdos-v1 is expected for backward compatibility. */
6175 if (port->nr_snk_vdo) {
6176 ret = fwnode_property_count_u32(fwnode, "sink-vdos-v1");
6177 if (ret < 0)
6178 return ret;
6179 else if (ret == 0)
6180 return -ENODATA;
6181
6182 port->nr_snk_vdo_v1 = min(ret, VDO_MAX_OBJECTS);
6183 ret = fwnode_property_read_u32_array(fwnode, "sink-vdos-v1",
6184 port->snk_vdo_v1,
6185 port->nr_snk_vdo_v1);
6186 if (ret < 0)
6187 return ret;
6188 }
6189
6190 return 0;
6191 }
6192
tcpm_copy_pdos(u32 * dest_pdo,const u32 * src_pdo,unsigned int nr_pdo)6193 static int tcpm_copy_pdos(u32 *dest_pdo, const u32 *src_pdo, unsigned int nr_pdo)
6194 {
6195 unsigned int i;
6196
6197 if (nr_pdo > PDO_MAX_OBJECTS)
6198 nr_pdo = PDO_MAX_OBJECTS;
6199
6200 for (i = 0; i < nr_pdo; i++)
6201 dest_pdo[i] = src_pdo[i];
6202
6203 return nr_pdo;
6204 }
6205
tcpm_update_sink_capabilities(struct tcpm_port * port,const u32 * pdo,unsigned int nr_pdo,unsigned int operating_snk_mw)6206 int tcpm_update_sink_capabilities(struct tcpm_port *port, const u32 *pdo, unsigned int nr_pdo,
6207 unsigned int operating_snk_mw)
6208 {
6209 int ret = 0;
6210
6211 if (tcpm_validate_caps(port, pdo, nr_pdo))
6212 return -EINVAL;
6213
6214 mutex_lock(&port->lock);
6215 port->nr_snk_pdo = tcpm_copy_pdos(port->snk_pdo, pdo, nr_pdo);
6216 port->operating_snk_mw = operating_snk_mw;
6217
6218 switch (port->state) {
6219 case SNK_NEGOTIATE_CAPABILITIES:
6220 case SNK_NEGOTIATE_PPS_CAPABILITIES:
6221 case SNK_READY:
6222 case SNK_TRANSITION_SINK:
6223 case SNK_TRANSITION_SINK_VBUS:
6224 if (port->pps_data.active)
6225 port->upcoming_state = SNK_NEGOTIATE_PPS_CAPABILITIES;
6226 else if (port->pd_capable)
6227 port->upcoming_state = SNK_NEGOTIATE_CAPABILITIES;
6228 else
6229 break;
6230
6231 port->update_sink_caps = true;
6232
6233 ret = tcpm_ams_start(port, POWER_NEGOTIATION);
6234 if (ret == -EAGAIN) {
6235 port->upcoming_state = INVALID_STATE;
6236 break;
6237 }
6238 break;
6239 default:
6240 break;
6241 }
6242 mutex_unlock(&port->lock);
6243 return ret;
6244 }
6245 EXPORT_SYMBOL_GPL(tcpm_update_sink_capabilities);
6246
6247 /* Power Supply access to expose source power information */
6248 enum tcpm_psy_online_states {
6249 TCPM_PSY_OFFLINE = 0,
6250 TCPM_PSY_FIXED_ONLINE,
6251 TCPM_PSY_PROG_ONLINE,
6252 };
6253
6254 static enum power_supply_property tcpm_psy_props[] = {
6255 POWER_SUPPLY_PROP_USB_TYPE,
6256 POWER_SUPPLY_PROP_ONLINE,
6257 POWER_SUPPLY_PROP_VOLTAGE_MIN,
6258 POWER_SUPPLY_PROP_VOLTAGE_MAX,
6259 POWER_SUPPLY_PROP_VOLTAGE_NOW,
6260 POWER_SUPPLY_PROP_CURRENT_MAX,
6261 POWER_SUPPLY_PROP_CURRENT_NOW,
6262 };
6263
tcpm_psy_get_online(struct tcpm_port * port,union power_supply_propval * val)6264 static int tcpm_psy_get_online(struct tcpm_port *port,
6265 union power_supply_propval *val)
6266 {
6267 if (port->vbus_charge) {
6268 if (port->pps_data.active)
6269 val->intval = TCPM_PSY_PROG_ONLINE;
6270 else
6271 val->intval = TCPM_PSY_FIXED_ONLINE;
6272 } else {
6273 val->intval = TCPM_PSY_OFFLINE;
6274 }
6275
6276 return 0;
6277 }
6278
tcpm_psy_get_voltage_min(struct tcpm_port * port,union power_supply_propval * val)6279 static int tcpm_psy_get_voltage_min(struct tcpm_port *port,
6280 union power_supply_propval *val)
6281 {
6282 if (port->pps_data.active)
6283 val->intval = port->pps_data.min_volt * 1000;
6284 else
6285 val->intval = port->supply_voltage * 1000;
6286
6287 return 0;
6288 }
6289
tcpm_psy_get_voltage_max(struct tcpm_port * port,union power_supply_propval * val)6290 static int tcpm_psy_get_voltage_max(struct tcpm_port *port,
6291 union power_supply_propval *val)
6292 {
6293 if (port->pps_data.active)
6294 val->intval = port->pps_data.max_volt * 1000;
6295 else
6296 val->intval = port->supply_voltage * 1000;
6297
6298 return 0;
6299 }
6300
tcpm_psy_get_voltage_now(struct tcpm_port * port,union power_supply_propval * val)6301 static int tcpm_psy_get_voltage_now(struct tcpm_port *port,
6302 union power_supply_propval *val)
6303 {
6304 val->intval = port->supply_voltage * 1000;
6305
6306 return 0;
6307 }
6308
tcpm_psy_get_current_max(struct tcpm_port * port,union power_supply_propval * val)6309 static int tcpm_psy_get_current_max(struct tcpm_port *port,
6310 union power_supply_propval *val)
6311 {
6312 if (port->pps_data.active)
6313 val->intval = port->pps_data.max_curr * 1000;
6314 else
6315 val->intval = port->current_limit * 1000;
6316
6317 return 0;
6318 }
6319
tcpm_psy_get_current_now(struct tcpm_port * port,union power_supply_propval * val)6320 static int tcpm_psy_get_current_now(struct tcpm_port *port,
6321 union power_supply_propval *val)
6322 {
6323 val->intval = port->current_limit * 1000;
6324
6325 return 0;
6326 }
6327
tcpm_psy_get_input_power_limit(struct tcpm_port * port,union power_supply_propval * val)6328 static int tcpm_psy_get_input_power_limit(struct tcpm_port *port,
6329 union power_supply_propval *val)
6330 {
6331 unsigned int src_mv, src_ma, max_src_mw = 0;
6332 unsigned int i, tmp;
6333
6334 for (i = 0; i < port->nr_source_caps; i++) {
6335 u32 pdo = port->source_caps[i];
6336
6337 if (pdo_type(pdo) == PDO_TYPE_FIXED) {
6338 src_mv = pdo_fixed_voltage(pdo);
6339 src_ma = pdo_max_current(pdo);
6340 tmp = src_mv * src_ma / 1000;
6341 max_src_mw = tmp > max_src_mw ? tmp : max_src_mw;
6342 }
6343 }
6344
6345 val->intval = max_src_mw;
6346 return 0;
6347 }
6348
tcpm_psy_get_prop(struct power_supply * psy,enum power_supply_property psp,union power_supply_propval * val)6349 static int tcpm_psy_get_prop(struct power_supply *psy,
6350 enum power_supply_property psp,
6351 union power_supply_propval *val)
6352 {
6353 struct tcpm_port *port = power_supply_get_drvdata(psy);
6354 int ret = 0;
6355
6356 switch (psp) {
6357 case POWER_SUPPLY_PROP_USB_TYPE:
6358 val->intval = port->usb_type;
6359 break;
6360 case POWER_SUPPLY_PROP_ONLINE:
6361 ret = tcpm_psy_get_online(port, val);
6362 break;
6363 case POWER_SUPPLY_PROP_VOLTAGE_MIN:
6364 ret = tcpm_psy_get_voltage_min(port, val);
6365 break;
6366 case POWER_SUPPLY_PROP_VOLTAGE_MAX:
6367 ret = tcpm_psy_get_voltage_max(port, val);
6368 break;
6369 case POWER_SUPPLY_PROP_VOLTAGE_NOW:
6370 ret = tcpm_psy_get_voltage_now(port, val);
6371 break;
6372 case POWER_SUPPLY_PROP_CURRENT_MAX:
6373 ret = tcpm_psy_get_current_max(port, val);
6374 break;
6375 case POWER_SUPPLY_PROP_CURRENT_NOW:
6376 ret = tcpm_psy_get_current_now(port, val);
6377 break;
6378 case POWER_SUPPLY_PROP_INPUT_POWER_LIMIT:
6379 tcpm_psy_get_input_power_limit(port, val);
6380 break;
6381 default:
6382 ret = -EINVAL;
6383 break;
6384 }
6385 return ret;
6386 }
6387
tcpm_psy_set_online(struct tcpm_port * port,const union power_supply_propval * val)6388 static int tcpm_psy_set_online(struct tcpm_port *port,
6389 const union power_supply_propval *val)
6390 {
6391 int ret;
6392
6393 switch (val->intval) {
6394 case TCPM_PSY_FIXED_ONLINE:
6395 ret = tcpm_pps_activate(port, false);
6396 break;
6397 case TCPM_PSY_PROG_ONLINE:
6398 ret = tcpm_pps_activate(port, true);
6399 break;
6400 default:
6401 ret = -EINVAL;
6402 break;
6403 }
6404
6405 return ret;
6406 }
6407
tcpm_psy_set_prop(struct power_supply * psy,enum power_supply_property psp,const union power_supply_propval * val)6408 static int tcpm_psy_set_prop(struct power_supply *psy,
6409 enum power_supply_property psp,
6410 const union power_supply_propval *val)
6411 {
6412 struct tcpm_port *port = power_supply_get_drvdata(psy);
6413 int ret;
6414
6415 switch (psp) {
6416 case POWER_SUPPLY_PROP_ONLINE:
6417 ret = tcpm_psy_set_online(port, val);
6418 break;
6419 case POWER_SUPPLY_PROP_VOLTAGE_NOW:
6420 if (val->intval < port->pps_data.min_volt * 1000 ||
6421 val->intval > port->pps_data.max_volt * 1000)
6422 ret = -EINVAL;
6423 else
6424 ret = tcpm_pps_set_out_volt(port, val->intval / 1000);
6425 break;
6426 case POWER_SUPPLY_PROP_CURRENT_NOW:
6427 if (val->intval > port->pps_data.max_curr * 1000)
6428 ret = -EINVAL;
6429 else
6430 ret = tcpm_pps_set_op_curr(port, val->intval / 1000);
6431 break;
6432 default:
6433 ret = -EINVAL;
6434 break;
6435 }
6436 power_supply_changed(port->psy);
6437 return ret;
6438 }
6439
tcpm_psy_prop_writeable(struct power_supply * psy,enum power_supply_property psp)6440 static int tcpm_psy_prop_writeable(struct power_supply *psy,
6441 enum power_supply_property psp)
6442 {
6443 switch (psp) {
6444 case POWER_SUPPLY_PROP_ONLINE:
6445 case POWER_SUPPLY_PROP_VOLTAGE_NOW:
6446 case POWER_SUPPLY_PROP_CURRENT_NOW:
6447 return 1;
6448 default:
6449 return 0;
6450 }
6451 }
6452
6453 static enum power_supply_usb_type tcpm_psy_usb_types[] = {
6454 POWER_SUPPLY_USB_TYPE_C,
6455 POWER_SUPPLY_USB_TYPE_PD,
6456 POWER_SUPPLY_USB_TYPE_PD_PPS,
6457 };
6458
6459 static const char *tcpm_psy_name_prefix = "tcpm-source-psy-";
6460
devm_tcpm_psy_register(struct tcpm_port * port)6461 static int devm_tcpm_psy_register(struct tcpm_port *port)
6462 {
6463 struct power_supply_config psy_cfg = {};
6464 const char *port_dev_name = dev_name(port->dev);
6465 size_t psy_name_len = strlen(tcpm_psy_name_prefix) +
6466 strlen(port_dev_name) + 1;
6467 char *psy_name;
6468
6469 psy_cfg.drv_data = port;
6470 psy_cfg.fwnode = dev_fwnode(port->dev);
6471 psy_name = devm_kzalloc(port->dev, psy_name_len, GFP_KERNEL);
6472 if (!psy_name)
6473 return -ENOMEM;
6474
6475 snprintf(psy_name, psy_name_len, "%s%s", tcpm_psy_name_prefix,
6476 port_dev_name);
6477 port->psy_desc.name = psy_name;
6478 port->psy_desc.type = POWER_SUPPLY_TYPE_USB,
6479 port->psy_desc.usb_types = tcpm_psy_usb_types;
6480 port->psy_desc.num_usb_types = ARRAY_SIZE(tcpm_psy_usb_types);
6481 port->psy_desc.properties = tcpm_psy_props,
6482 port->psy_desc.num_properties = ARRAY_SIZE(tcpm_psy_props),
6483 port->psy_desc.get_property = tcpm_psy_get_prop,
6484 port->psy_desc.set_property = tcpm_psy_set_prop,
6485 port->psy_desc.property_is_writeable = tcpm_psy_prop_writeable,
6486
6487 port->usb_type = POWER_SUPPLY_USB_TYPE_C;
6488
6489 port->psy = devm_power_supply_register(port->dev, &port->psy_desc,
6490 &psy_cfg);
6491
6492 return PTR_ERR_OR_ZERO(port->psy);
6493 }
6494
state_machine_timer_handler(struct hrtimer * timer)6495 static enum hrtimer_restart state_machine_timer_handler(struct hrtimer *timer)
6496 {
6497 struct tcpm_port *port = container_of(timer, struct tcpm_port, state_machine_timer);
6498
6499 kthread_queue_work(port->wq, &port->state_machine);
6500 return HRTIMER_NORESTART;
6501 }
6502
vdm_state_machine_timer_handler(struct hrtimer * timer)6503 static enum hrtimer_restart vdm_state_machine_timer_handler(struct hrtimer *timer)
6504 {
6505 struct tcpm_port *port = container_of(timer, struct tcpm_port, vdm_state_machine_timer);
6506
6507 kthread_queue_work(port->wq, &port->vdm_state_machine);
6508 return HRTIMER_NORESTART;
6509 }
6510
enable_frs_timer_handler(struct hrtimer * timer)6511 static enum hrtimer_restart enable_frs_timer_handler(struct hrtimer *timer)
6512 {
6513 struct tcpm_port *port = container_of(timer, struct tcpm_port, enable_frs_timer);
6514
6515 kthread_queue_work(port->wq, &port->enable_frs);
6516 return HRTIMER_NORESTART;
6517 }
6518
send_discover_timer_handler(struct hrtimer * timer)6519 static enum hrtimer_restart send_discover_timer_handler(struct hrtimer *timer)
6520 {
6521 struct tcpm_port *port = container_of(timer, struct tcpm_port, send_discover_timer);
6522
6523 kthread_queue_work(port->wq, &port->send_discover_work);
6524 return HRTIMER_NORESTART;
6525 }
6526
tcpm_register_port(struct device * dev,struct tcpc_dev * tcpc)6527 struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
6528 {
6529 struct tcpm_port *port;
6530 int err;
6531
6532 if (!dev || !tcpc ||
6533 !tcpc->get_vbus || !tcpc->set_cc || !tcpc->get_cc ||
6534 !tcpc->set_polarity || !tcpc->set_vconn || !tcpc->set_vbus ||
6535 !tcpc->set_pd_rx || !tcpc->set_roles || !tcpc->pd_transmit)
6536 return ERR_PTR(-EINVAL);
6537
6538 port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
6539 if (!port)
6540 return ERR_PTR(-ENOMEM);
6541
6542 port->dev = dev;
6543 port->tcpc = tcpc;
6544
6545 mutex_init(&port->lock);
6546 mutex_init(&port->swap_lock);
6547 #ifdef CONFIG_NO_GKI
6548 mutex_init(&port->pd_handler_lock);
6549 #endif
6550
6551 port->wq = kthread_create_worker(0, dev_name(dev));
6552 if (IS_ERR(port->wq))
6553 return ERR_CAST(port->wq);
6554 sched_set_fifo(port->wq->task);
6555
6556 kthread_init_work(&port->state_machine, tcpm_state_machine_work);
6557 kthread_init_work(&port->vdm_state_machine, vdm_state_machine_work);
6558 kthread_init_work(&port->event_work, tcpm_pd_event_handler);
6559 kthread_init_work(&port->enable_frs, tcpm_enable_frs_work);
6560 kthread_init_work(&port->send_discover_work, tcpm_send_discover_work);
6561 hrtimer_init(&port->state_machine_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6562 port->state_machine_timer.function = state_machine_timer_handler;
6563 hrtimer_init(&port->vdm_state_machine_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6564 port->vdm_state_machine_timer.function = vdm_state_machine_timer_handler;
6565 hrtimer_init(&port->enable_frs_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6566 port->enable_frs_timer.function = enable_frs_timer_handler;
6567 hrtimer_init(&port->send_discover_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6568 port->send_discover_timer.function = send_discover_timer_handler;
6569
6570 spin_lock_init(&port->pd_event_lock);
6571
6572 init_completion(&port->tx_complete);
6573 init_completion(&port->swap_complete);
6574 init_completion(&port->pps_complete);
6575 tcpm_debugfs_init(port);
6576
6577 err = tcpm_fw_get_caps(port, tcpc->fwnode);
6578 if (err < 0)
6579 goto out_destroy_wq;
6580
6581 port->try_role = port->typec_caps.prefer_role;
6582
6583 port->typec_caps.fwnode = tcpc->fwnode;
6584 port->typec_caps.revision = 0x0120; /* Type-C spec release 1.2 */
6585 port->typec_caps.svdm_version = SVDM_VER_2_0;
6586 port->typec_caps.driver_data = port;
6587 port->typec_caps.ops = &tcpm_ops;
6588 port->typec_caps.orientation_aware = 1;
6589
6590 port->partner_desc.identity = &port->partner_ident;
6591 port->port_type = port->typec_caps.type;
6592
6593 port->role_sw = usb_role_switch_get(port->dev);
6594 if (IS_ERR(port->role_sw)) {
6595 err = PTR_ERR(port->role_sw);
6596 goto out_destroy_wq;
6597 }
6598
6599 err = devm_tcpm_psy_register(port);
6600 if (err)
6601 goto out_role_sw_put;
6602 power_supply_changed(port->psy);
6603
6604 port->typec_port = typec_register_port(port->dev, &port->typec_caps);
6605 if (IS_ERR(port->typec_port)) {
6606 err = PTR_ERR(port->typec_port);
6607 goto out_role_sw_put;
6608 }
6609
6610 typec_port_register_altmodes(port->typec_port,
6611 &tcpm_altmode_ops, port,
6612 port->port_altmode, ALTMODE_DISCOVERY_MAX);
6613
6614 mutex_lock(&port->lock);
6615 tcpm_init(port);
6616 mutex_unlock(&port->lock);
6617
6618 tcpm_log(port, "%s: registered", dev_name(dev));
6619 return port;
6620
6621 out_role_sw_put:
6622 usb_role_switch_put(port->role_sw);
6623 out_destroy_wq:
6624 tcpm_debugfs_exit(port);
6625 kthread_destroy_worker(port->wq);
6626 return ERR_PTR(err);
6627 }
6628 EXPORT_SYMBOL_GPL(tcpm_register_port);
6629
tcpm_unregister_port(struct tcpm_port * port)6630 void tcpm_unregister_port(struct tcpm_port *port)
6631 {
6632 int i;
6633
6634 hrtimer_cancel(&port->send_discover_timer);
6635 hrtimer_cancel(&port->enable_frs_timer);
6636 hrtimer_cancel(&port->vdm_state_machine_timer);
6637 hrtimer_cancel(&port->state_machine_timer);
6638
6639 tcpm_reset_port(port);
6640 for (i = 0; i < ARRAY_SIZE(port->port_altmode); i++)
6641 typec_unregister_altmode(port->port_altmode[i]);
6642 typec_unregister_port(port->typec_port);
6643 usb_role_switch_put(port->role_sw);
6644 tcpm_debugfs_exit(port);
6645 kthread_destroy_worker(port->wq);
6646 }
6647 EXPORT_SYMBOL_GPL(tcpm_unregister_port);
6648
6649 MODULE_AUTHOR("Guenter Roeck <groeck@chromium.org>");
6650 MODULE_DESCRIPTION("USB Type-C Port Manager");
6651 MODULE_LICENSE("GPL");
6652