1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | /* |
3 | * Copyright 2015-2017 Google, Inc |
4 | * |
5 | * USB Power Delivery protocol stack. |
6 | */ |
7 | |
8 | #include <linux/completion.h> |
9 | #include <linux/debugfs.h> |
10 | #include <linux/device.h> |
11 | #include <linux/hrtimer.h> |
12 | #include <linux/jiffies.h> |
13 | #include <linux/kernel.h> |
14 | #include <linux/kthread.h> |
15 | #include <linux/module.h> |
16 | #include <linux/mutex.h> |
17 | #include <linux/power_supply.h> |
18 | #include <linux/proc_fs.h> |
19 | #include <linux/property.h> |
20 | #include <linux/sched/clock.h> |
21 | #include <linux/seq_file.h> |
22 | #include <linux/slab.h> |
23 | #include <linux/spinlock.h> |
24 | #include <linux/usb.h> |
25 | #include <linux/usb/pd.h> |
26 | #include <linux/usb/pd_ado.h> |
27 | #include <linux/usb/pd_bdo.h> |
28 | #include <linux/usb/pd_ext_sdb.h> |
29 | #include <linux/usb/pd_vdo.h> |
30 | #include <linux/usb/role.h> |
31 | #include <linux/usb/tcpm.h> |
32 | #include <linux/usb/typec_altmode.h> |
33 | |
34 | #include <uapi/linux/sched/types.h> |
35 | |
36 | #define FOREACH_STATE(S) \ |
37 | S(INVALID_STATE), \ |
38 | S(TOGGLING), \ |
39 | S(CHECK_CONTAMINANT), \ |
40 | S(SRC_UNATTACHED), \ |
41 | S(SRC_ATTACH_WAIT), \ |
42 | S(SRC_ATTACHED), \ |
43 | S(SRC_STARTUP), \ |
44 | S(SRC_SEND_CAPABILITIES), \ |
45 | S(SRC_SEND_CAPABILITIES_TIMEOUT), \ |
46 | S(SRC_NEGOTIATE_CAPABILITIES), \ |
47 | S(SRC_TRANSITION_SUPPLY), \ |
48 | S(SRC_READY), \ |
49 | S(SRC_WAIT_NEW_CAPABILITIES), \ |
50 | \ |
51 | S(SNK_UNATTACHED), \ |
52 | S(SNK_ATTACH_WAIT), \ |
53 | S(SNK_DEBOUNCED), \ |
54 | S(SNK_ATTACHED), \ |
55 | S(SNK_STARTUP), \ |
56 | S(SNK_DISCOVERY), \ |
57 | S(SNK_DISCOVERY_DEBOUNCE), \ |
58 | S(SNK_DISCOVERY_DEBOUNCE_DONE), \ |
59 | S(SNK_WAIT_CAPABILITIES), \ |
60 | S(SNK_NEGOTIATE_CAPABILITIES), \ |
61 | S(SNK_NEGOTIATE_PPS_CAPABILITIES), \ |
62 | S(SNK_TRANSITION_SINK), \ |
63 | S(SNK_TRANSITION_SINK_VBUS), \ |
64 | S(SNK_READY), \ |
65 | \ |
66 | S(ACC_UNATTACHED), \ |
67 | S(DEBUG_ACC_ATTACHED), \ |
68 | S(AUDIO_ACC_ATTACHED), \ |
69 | S(AUDIO_ACC_DEBOUNCE), \ |
70 | \ |
71 | S(HARD_RESET_SEND), \ |
72 | S(HARD_RESET_START), \ |
73 | S(SRC_HARD_RESET_VBUS_OFF), \ |
74 | S(SRC_HARD_RESET_VBUS_ON), \ |
75 | S(SNK_HARD_RESET_SINK_OFF), \ |
76 | S(SNK_HARD_RESET_WAIT_VBUS), \ |
77 | S(SNK_HARD_RESET_SINK_ON), \ |
78 | \ |
79 | S(SOFT_RESET), \ |
80 | S(SRC_SOFT_RESET_WAIT_SNK_TX), \ |
81 | S(SNK_SOFT_RESET), \ |
82 | S(SOFT_RESET_SEND), \ |
83 | \ |
84 | S(DR_SWAP_ACCEPT), \ |
85 | S(DR_SWAP_SEND), \ |
86 | S(DR_SWAP_SEND_TIMEOUT), \ |
87 | S(DR_SWAP_CANCEL), \ |
88 | S(DR_SWAP_CHANGE_DR), \ |
89 | \ |
90 | S(PR_SWAP_ACCEPT), \ |
91 | S(PR_SWAP_SEND), \ |
92 | S(PR_SWAP_SEND_TIMEOUT), \ |
93 | S(PR_SWAP_CANCEL), \ |
94 | S(PR_SWAP_START), \ |
95 | S(PR_SWAP_SRC_SNK_TRANSITION_OFF), \ |
96 | S(PR_SWAP_SRC_SNK_SOURCE_OFF), \ |
97 | S(PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED), \ |
98 | S(PR_SWAP_SRC_SNK_SINK_ON), \ |
99 | S(PR_SWAP_SNK_SRC_SINK_OFF), \ |
100 | S(PR_SWAP_SNK_SRC_SOURCE_ON), \ |
101 | S(PR_SWAP_SNK_SRC_SOURCE_ON_VBUS_RAMPED_UP), \ |
102 | \ |
103 | S(VCONN_SWAP_ACCEPT), \ |
104 | S(VCONN_SWAP_SEND), \ |
105 | S(VCONN_SWAP_SEND_TIMEOUT), \ |
106 | S(VCONN_SWAP_CANCEL), \ |
107 | S(VCONN_SWAP_START), \ |
108 | S(VCONN_SWAP_WAIT_FOR_VCONN), \ |
109 | S(VCONN_SWAP_TURN_ON_VCONN), \ |
110 | S(VCONN_SWAP_TURN_OFF_VCONN), \ |
111 | S(VCONN_SWAP_SEND_SOFT_RESET), \ |
112 | \ |
113 | S(FR_SWAP_SEND), \ |
114 | S(FR_SWAP_SEND_TIMEOUT), \ |
115 | S(FR_SWAP_SNK_SRC_TRANSITION_TO_OFF), \ |
116 | S(FR_SWAP_SNK_SRC_NEW_SINK_READY), \ |
117 | S(FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED), \ |
118 | S(FR_SWAP_CANCEL), \ |
119 | \ |
120 | S(SNK_TRY), \ |
121 | S(SNK_TRY_WAIT), \ |
122 | S(SNK_TRY_WAIT_DEBOUNCE), \ |
123 | S(SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS), \ |
124 | S(SRC_TRYWAIT), \ |
125 | S(SRC_TRYWAIT_DEBOUNCE), \ |
126 | S(SRC_TRYWAIT_UNATTACHED), \ |
127 | \ |
128 | S(SRC_TRY), \ |
129 | S(SRC_TRY_WAIT), \ |
130 | S(SRC_TRY_DEBOUNCE), \ |
131 | S(SNK_TRYWAIT), \ |
132 | S(SNK_TRYWAIT_DEBOUNCE), \ |
133 | S(SNK_TRYWAIT_VBUS), \ |
134 | S(BIST_RX), \ |
135 | \ |
136 | S(GET_STATUS_SEND), \ |
137 | S(GET_STATUS_SEND_TIMEOUT), \ |
138 | S(GET_PPS_STATUS_SEND), \ |
139 | S(GET_PPS_STATUS_SEND_TIMEOUT), \ |
140 | \ |
141 | S(GET_SINK_CAP), \ |
142 | S(GET_SINK_CAP_TIMEOUT), \ |
143 | \ |
144 | S(ERROR_RECOVERY), \ |
145 | S(PORT_RESET), \ |
146 | S(PORT_RESET_WAIT_OFF), \ |
147 | \ |
148 | S(AMS_START), \ |
149 | S(CHUNK_NOT_SUPP), \ |
150 | \ |
151 | S(SRC_VDM_IDENTITY_REQUEST) |
152 | |
153 | #define FOREACH_AMS(S) \ |
154 | S(NONE_AMS), \ |
155 | S(POWER_NEGOTIATION), \ |
156 | S(GOTOMIN), \ |
157 | S(SOFT_RESET_AMS), \ |
158 | S(HARD_RESET), \ |
159 | S(CABLE_RESET), \ |
160 | S(GET_SOURCE_CAPABILITIES), \ |
161 | S(GET_SINK_CAPABILITIES), \ |
162 | S(POWER_ROLE_SWAP), \ |
163 | S(FAST_ROLE_SWAP), \ |
164 | S(DATA_ROLE_SWAP), \ |
165 | S(VCONN_SWAP), \ |
166 | S(SOURCE_ALERT), \ |
167 | S(GETTING_SOURCE_EXTENDED_CAPABILITIES),\ |
168 | S(GETTING_SOURCE_SINK_STATUS), \ |
169 | S(GETTING_BATTERY_CAPABILITIES), \ |
170 | S(GETTING_BATTERY_STATUS), \ |
171 | S(GETTING_MANUFACTURER_INFORMATION), \ |
172 | S(SECURITY), \ |
173 | S(FIRMWARE_UPDATE), \ |
174 | S(DISCOVER_IDENTITY), \ |
175 | S(SOURCE_STARTUP_CABLE_PLUG_DISCOVER_IDENTITY), \ |
176 | S(DISCOVER_SVIDS), \ |
177 | S(DISCOVER_MODES), \ |
178 | S(DFP_TO_UFP_ENTER_MODE), \ |
179 | S(DFP_TO_UFP_EXIT_MODE), \ |
180 | S(DFP_TO_CABLE_PLUG_ENTER_MODE), \ |
181 | S(DFP_TO_CABLE_PLUG_EXIT_MODE), \ |
182 | S(ATTENTION), \ |
183 | S(BIST), \ |
184 | S(UNSTRUCTURED_VDMS), \ |
185 | S(STRUCTURED_VDMS), \ |
186 | S(COUNTRY_INFO), \ |
187 | S(COUNTRY_CODES) |
188 | |
189 | #define GENERATE_ENUM(e) e |
190 | #define GENERATE_STRING(s) #s |
191 | |
192 | enum tcpm_state { |
193 | FOREACH_STATE(GENERATE_ENUM) |
194 | }; |
195 | |
196 | static const char * const tcpm_states[] = { |
197 | FOREACH_STATE(GENERATE_STRING) |
198 | }; |
199 | |
200 | enum tcpm_ams { |
201 | FOREACH_AMS(GENERATE_ENUM) |
202 | }; |
203 | |
204 | static const char * const tcpm_ams_str[] = { |
205 | FOREACH_AMS(GENERATE_STRING) |
206 | }; |
207 | |
208 | enum vdm_states { |
209 | VDM_STATE_ERR_BUSY = -3, |
210 | VDM_STATE_ERR_SEND = -2, |
211 | VDM_STATE_ERR_TMOUT = -1, |
212 | VDM_STATE_DONE = 0, |
213 | /* Anything >0 represents an active state */ |
214 | VDM_STATE_READY = 1, |
215 | VDM_STATE_BUSY = 2, |
216 | VDM_STATE_WAIT_RSP_BUSY = 3, |
217 | VDM_STATE_SEND_MESSAGE = 4, |
218 | }; |
219 | |
220 | enum pd_msg_request { |
221 | PD_MSG_NONE = 0, |
222 | PD_MSG_CTRL_REJECT, |
223 | PD_MSG_CTRL_WAIT, |
224 | PD_MSG_CTRL_NOT_SUPP, |
225 | PD_MSG_DATA_SINK_CAP, |
226 | PD_MSG_DATA_SOURCE_CAP, |
227 | }; |
228 | |
229 | enum adev_actions { |
230 | ADEV_NONE = 0, |
231 | ADEV_NOTIFY_USB_AND_QUEUE_VDM, |
232 | ADEV_QUEUE_VDM, |
233 | ADEV_QUEUE_VDM_SEND_EXIT_MODE_ON_FAIL, |
234 | ADEV_ATTENTION, |
235 | }; |
236 | |
237 | /* |
238 | * Initial current capability of the new source when vSafe5V is applied during PD3.0 Fast Role Swap. |
239 | * Based on "Table 6-14 Fixed Supply PDO - Sink" of "USB Power Delivery Specification Revision 3.0, |
240 | * Version 1.2" |
241 | */ |
242 | enum frs_typec_current { |
243 | FRS_NOT_SUPPORTED, |
244 | FRS_DEFAULT_POWER, |
245 | FRS_5V_1P5A, |
246 | FRS_5V_3A, |
247 | }; |
248 | |
249 | /* Events from low level driver */ |
250 | |
251 | #define TCPM_CC_EVENT BIT(0) |
252 | #define TCPM_VBUS_EVENT BIT(1) |
253 | #define TCPM_RESET_EVENT BIT(2) |
254 | #define TCPM_FRS_EVENT BIT(3) |
255 | #define TCPM_SOURCING_VBUS BIT(4) |
256 | #define TCPM_PORT_CLEAN BIT(5) |
257 | #define TCPM_PORT_ERROR BIT(6) |
258 | |
259 | #define LOG_BUFFER_ENTRIES 1024 |
260 | #define LOG_BUFFER_ENTRY_SIZE 128 |
261 | |
262 | /* Alternate mode support */ |
263 | |
264 | #define SVID_DISCOVERY_MAX 16 |
265 | #define ALTMODE_DISCOVERY_MAX (SVID_DISCOVERY_MAX * MODE_DISCOVERY_MAX) |
266 | |
267 | #define GET_SINK_CAP_RETRY_MS 100 |
268 | #define SEND_DISCOVER_RETRY_MS 100 |
269 | |
270 | struct pd_mode_data { |
271 | int svid_index; /* current SVID index */ |
272 | int nsvids; |
273 | u16 svids[SVID_DISCOVERY_MAX]; |
274 | int altmodes; /* number of alternate modes */ |
275 | struct typec_altmode_desc altmode_desc[ALTMODE_DISCOVERY_MAX]; |
276 | }; |
277 | |
278 | /* |
279 | * @min_volt: Actual min voltage at the local port |
280 | * @req_min_volt: Requested min voltage to the port partner |
281 | * @max_volt: Actual max voltage at the local port |
282 | * @req_max_volt: Requested max voltage to the port partner |
283 | * @max_curr: Actual max current at the local port |
284 | * @req_max_curr: Requested max current of the port partner |
285 | * @req_out_volt: Requested output voltage to the port partner |
286 | * @req_op_curr: Requested operating current to the port partner |
287 | * @supported: Parter has at least one APDO hence supports PPS |
288 | * @active: PPS mode is active |
289 | */ |
290 | struct pd_pps_data { |
291 | u32 min_volt; |
292 | u32 req_min_volt; |
293 | u32 max_volt; |
294 | u32 req_max_volt; |
295 | u32 max_curr; |
296 | u32 req_max_curr; |
297 | u32 req_out_volt; |
298 | u32 req_op_curr; |
299 | bool supported; |
300 | bool active; |
301 | }; |
302 | |
303 | struct pd_data { |
304 | struct usb_power_delivery *pd; |
305 | struct usb_power_delivery_capabilities *source_cap; |
306 | struct usb_power_delivery_capabilities_desc source_desc; |
307 | struct usb_power_delivery_capabilities *sink_cap; |
308 | struct usb_power_delivery_capabilities_desc sink_desc; |
309 | unsigned int operating_snk_mw; |
310 | }; |
311 | |
312 | struct tcpm_port { |
313 | struct device *dev; |
314 | |
315 | struct mutex lock; /* tcpm state machine lock */ |
316 | struct kthread_worker *wq; |
317 | |
318 | struct typec_capability typec_caps; |
319 | struct typec_port *typec_port; |
320 | |
321 | struct tcpc_dev *tcpc; |
322 | struct usb_role_switch *role_sw; |
323 | |
324 | enum typec_role vconn_role; |
325 | enum typec_role pwr_role; |
326 | enum typec_data_role data_role; |
327 | enum typec_pwr_opmode pwr_opmode; |
328 | |
329 | struct usb_pd_identity partner_ident; |
330 | struct typec_partner_desc partner_desc; |
331 | struct typec_partner *partner; |
332 | |
333 | struct usb_pd_identity cable_ident; |
334 | struct typec_cable_desc cable_desc; |
335 | struct typec_cable *cable; |
336 | struct typec_plug_desc plug_prime_desc; |
337 | struct typec_plug *plug_prime; |
338 | |
339 | enum typec_cc_status cc_req; |
340 | enum typec_cc_status src_rp; /* work only if pd_supported == false */ |
341 | |
342 | enum typec_cc_status cc1; |
343 | enum typec_cc_status cc2; |
344 | enum typec_cc_polarity polarity; |
345 | |
346 | bool attached; |
347 | bool connected; |
348 | bool registered; |
349 | bool pd_supported; |
350 | enum typec_port_type port_type; |
351 | |
352 | /* |
353 | * Set to true when vbus is greater than VSAFE5V min. |
354 | * Set to false when vbus falls below vSinkDisconnect max threshold. |
355 | */ |
356 | bool vbus_present; |
357 | |
358 | /* |
359 | * Set to true when vbus is less than VSAFE0V max. |
360 | * Set to false when vbus is greater than VSAFE0V max. |
361 | */ |
362 | bool vbus_vsafe0v; |
363 | |
364 | bool vbus_never_low; |
365 | bool vbus_source; |
366 | bool vbus_charge; |
367 | |
368 | /* Set to true when Discover_Identity Command is expected to be sent in Ready states. */ |
369 | bool send_discover; |
370 | bool op_vsafe5v; |
371 | |
372 | int try_role; |
373 | int try_snk_count; |
374 | int try_src_count; |
375 | |
376 | enum pd_msg_request queued_message; |
377 | |
378 | enum tcpm_state enter_state; |
379 | enum tcpm_state prev_state; |
380 | enum tcpm_state state; |
381 | enum tcpm_state delayed_state; |
382 | ktime_t delayed_runtime; |
383 | unsigned long delay_ms; |
384 | |
385 | spinlock_t pd_event_lock; |
386 | u32 pd_events; |
387 | |
388 | struct kthread_work event_work; |
389 | struct hrtimer state_machine_timer; |
390 | struct kthread_work state_machine; |
391 | struct hrtimer vdm_state_machine_timer; |
392 | struct kthread_work vdm_state_machine; |
393 | struct hrtimer enable_frs_timer; |
394 | struct kthread_work enable_frs; |
395 | struct hrtimer send_discover_timer; |
396 | struct kthread_work send_discover_work; |
397 | bool state_machine_running; |
398 | /* Set to true when VDM State Machine has following actions. */ |
399 | bool vdm_sm_running; |
400 | |
401 | struct completion tx_complete; |
402 | enum tcpm_transmit_status tx_status; |
403 | |
404 | struct mutex swap_lock; /* swap command lock */ |
405 | bool swap_pending; |
406 | bool non_pd_role_swap; |
407 | struct completion swap_complete; |
408 | int swap_status; |
409 | |
410 | unsigned int negotiated_rev; |
411 | unsigned int message_id; |
412 | unsigned int caps_count; |
413 | unsigned int hard_reset_count; |
414 | bool pd_capable; |
415 | bool explicit_contract; |
416 | unsigned int rx_msgid; |
417 | |
418 | /* USB PD objects */ |
419 | struct usb_power_delivery **pds; |
420 | struct pd_data **pd_list; |
421 | struct usb_power_delivery_capabilities *port_source_caps; |
422 | struct usb_power_delivery_capabilities *port_sink_caps; |
423 | struct usb_power_delivery *partner_pd; |
424 | struct usb_power_delivery_capabilities *partner_source_caps; |
425 | struct usb_power_delivery_capabilities *partner_sink_caps; |
426 | struct usb_power_delivery *selected_pd; |
427 | |
428 | /* Partner capabilities/requests */ |
429 | u32 sink_request; |
430 | u32 source_caps[PDO_MAX_OBJECTS]; |
431 | unsigned int nr_source_caps; |
432 | u32 sink_caps[PDO_MAX_OBJECTS]; |
433 | unsigned int nr_sink_caps; |
434 | |
435 | /* Local capabilities */ |
436 | unsigned int pd_count; |
437 | u32 src_pdo[PDO_MAX_OBJECTS]; |
438 | unsigned int nr_src_pdo; |
439 | u32 snk_pdo[PDO_MAX_OBJECTS]; |
440 | unsigned int nr_snk_pdo; |
441 | u32 snk_vdo_v1[VDO_MAX_OBJECTS]; |
442 | unsigned int nr_snk_vdo_v1; |
443 | u32 snk_vdo[VDO_MAX_OBJECTS]; |
444 | unsigned int nr_snk_vdo; |
445 | |
446 | unsigned int operating_snk_mw; |
447 | bool update_sink_caps; |
448 | |
449 | /* Requested current / voltage to the port partner */ |
450 | u32 req_current_limit; |
451 | u32 req_supply_voltage; |
452 | /* Actual current / voltage limit of the local port */ |
453 | u32 current_limit; |
454 | u32 supply_voltage; |
455 | |
456 | /* Used to export TA voltage and current */ |
457 | struct power_supply *psy; |
458 | struct power_supply_desc psy_desc; |
459 | enum power_supply_usb_type usb_type; |
460 | |
461 | u32 bist_request; |
462 | |
463 | /* PD state for Vendor Defined Messages */ |
464 | enum vdm_states vdm_state; |
465 | u32 vdm_retries; |
466 | /* next Vendor Defined Message to send */ |
467 | u32 vdo_data[VDO_MAX_SIZE]; |
468 | u8 vdo_count; |
469 | /* VDO to retry if UFP responder replied busy */ |
470 | u32 vdo_retry; |
471 | |
472 | /* PPS */ |
473 | struct pd_pps_data pps_data; |
474 | struct completion pps_complete; |
475 | bool pps_pending; |
476 | int pps_status; |
477 | |
478 | /* Alternate mode data */ |
479 | struct pd_mode_data mode_data; |
480 | struct pd_mode_data mode_data_prime; |
481 | struct typec_altmode *partner_altmode[ALTMODE_DISCOVERY_MAX]; |
482 | struct typec_altmode *plug_prime_altmode[ALTMODE_DISCOVERY_MAX]; |
483 | struct typec_altmode *port_altmode[ALTMODE_DISCOVERY_MAX]; |
484 | |
485 | /* Deadline in jiffies to exit src_try_wait state */ |
486 | unsigned long max_wait; |
487 | |
488 | /* port belongs to a self powered device */ |
489 | bool self_powered; |
490 | |
491 | /* Sink FRS */ |
492 | enum frs_typec_current new_source_frs_current; |
493 | |
494 | /* Sink caps have been queried */ |
495 | bool sink_cap_done; |
496 | |
497 | /* Collision Avoidance and Atomic Message Sequence */ |
498 | enum tcpm_state upcoming_state; |
499 | enum tcpm_ams ams; |
500 | enum tcpm_ams next_ams; |
501 | bool in_ams; |
502 | |
503 | /* Auto vbus discharge status */ |
504 | bool auto_vbus_discharge_enabled; |
505 | |
506 | /* |
507 | * When set, port requests PD_P_SNK_STDBY_MW upon entering SNK_DISCOVERY and |
508 | * the actual current limit after RX of PD_CTRL_PSRDY for PD link, |
509 | * SNK_READY for non-pd link. |
510 | */ |
511 | bool slow_charger_loop; |
512 | |
513 | /* |
514 | * When true indicates that the lower level drivers indicate potential presence |
515 | * of contaminant in the connector pins based on the tcpm state machine |
516 | * transitions. |
517 | */ |
518 | bool potential_contaminant; |
519 | |
520 | /* SOP* Related Fields */ |
521 | /* |
522 | * Flag to determine if SOP' Discover Identity is available. The flag |
523 | * is set if Discover Identity on SOP' does not immediately follow |
524 | * Discover Identity on SOP. |
525 | */ |
526 | bool send_discover_prime; |
527 | /* |
528 | * tx_sop_type determines which SOP* a message is being sent on. |
529 | * For messages that are queued and not sent immediately such as in |
530 | * tcpm_queue_message or messages that send after state changes, |
531 | * the tx_sop_type is set accordingly. |
532 | */ |
533 | enum tcpm_transmit_type tx_sop_type; |
534 | /* |
535 | * Prior to discovering the port partner's Specification Revision, the |
536 | * Vconn source and cable plug will use the lower of their two revisions. |
537 | * |
538 | * When the port partner's Specification Revision is discovered, the following |
539 | * rules are put in place. |
540 | * 1. If the cable revision (1) is lower than the revision negotiated |
541 | * between the port and partner (2), the port and partner will communicate |
542 | * on revision (2), but the port and cable will communicate on revision (1). |
543 | * 2. If the cable revision (1) is higher than the revision negotiated |
544 | * between the port and partner (2), the port and partner will communicate |
545 | * on revision (2), and the port and cable will communicate on revision (2) |
546 | * as well. |
547 | */ |
548 | unsigned int negotiated_rev_prime; |
549 | /* |
550 | * Each SOP* type must maintain their own tx and rx message IDs |
551 | */ |
552 | unsigned int message_id_prime; |
553 | unsigned int rx_msgid_prime; |
554 | #ifdef CONFIG_DEBUG_FS |
555 | struct dentry *dentry; |
556 | struct mutex logbuffer_lock; /* log buffer access lock */ |
557 | int logbuffer_head; |
558 | int logbuffer_tail; |
559 | u8 *logbuffer[LOG_BUFFER_ENTRIES]; |
560 | #endif |
561 | }; |
562 | |
563 | struct pd_rx_event { |
564 | struct kthread_work work; |
565 | struct tcpm_port *port; |
566 | struct pd_message msg; |
567 | enum tcpm_transmit_type rx_sop_type; |
568 | }; |
569 | |
570 | static const char * const pd_rev[] = { |
571 | [PD_REV10] = "rev1" , |
572 | [PD_REV20] = "rev2" , |
573 | [PD_REV30] = "rev3" , |
574 | }; |
575 | |
576 | #define tcpm_cc_is_sink(cc) \ |
577 | ((cc) == TYPEC_CC_RP_DEF || (cc) == TYPEC_CC_RP_1_5 || \ |
578 | (cc) == TYPEC_CC_RP_3_0) |
579 | |
580 | /* As long as cc is pulled up, we can consider it as sink. */ |
581 | #define tcpm_port_is_sink(port) \ |
582 | (tcpm_cc_is_sink((port)->cc1) || tcpm_cc_is_sink((port)->cc2)) |
583 | |
584 | #define tcpm_cc_is_source(cc) ((cc) == TYPEC_CC_RD) |
585 | #define tcpm_cc_is_audio(cc) ((cc) == TYPEC_CC_RA) |
586 | #define tcpm_cc_is_open(cc) ((cc) == TYPEC_CC_OPEN) |
587 | |
588 | #define tcpm_port_is_source(port) \ |
589 | ((tcpm_cc_is_source((port)->cc1) && \ |
590 | !tcpm_cc_is_source((port)->cc2)) || \ |
591 | (tcpm_cc_is_source((port)->cc2) && \ |
592 | !tcpm_cc_is_source((port)->cc1))) |
593 | |
594 | #define tcpm_port_is_debug(port) \ |
595 | (tcpm_cc_is_source((port)->cc1) && tcpm_cc_is_source((port)->cc2)) |
596 | |
597 | #define tcpm_port_is_audio(port) \ |
598 | (tcpm_cc_is_audio((port)->cc1) && tcpm_cc_is_audio((port)->cc2)) |
599 | |
600 | #define tcpm_port_is_audio_detached(port) \ |
601 | ((tcpm_cc_is_audio((port)->cc1) && tcpm_cc_is_open((port)->cc2)) || \ |
602 | (tcpm_cc_is_audio((port)->cc2) && tcpm_cc_is_open((port)->cc1))) |
603 | |
604 | #define tcpm_try_snk(port) \ |
605 | ((port)->try_snk_count == 0 && (port)->try_role == TYPEC_SINK && \ |
606 | (port)->port_type == TYPEC_PORT_DRP) |
607 | |
608 | #define tcpm_try_src(port) \ |
609 | ((port)->try_src_count == 0 && (port)->try_role == TYPEC_SOURCE && \ |
610 | (port)->port_type == TYPEC_PORT_DRP) |
611 | |
612 | #define tcpm_data_role_for_source(port) \ |
613 | ((port)->typec_caps.data == TYPEC_PORT_UFP ? \ |
614 | TYPEC_DEVICE : TYPEC_HOST) |
615 | |
616 | #define tcpm_data_role_for_sink(port) \ |
617 | ((port)->typec_caps.data == TYPEC_PORT_DFP ? \ |
618 | TYPEC_HOST : TYPEC_DEVICE) |
619 | |
620 | #define tcpm_sink_tx_ok(port) \ |
621 | (tcpm_port_is_sink(port) && \ |
622 | ((port)->cc1 == TYPEC_CC_RP_3_0 || (port)->cc2 == TYPEC_CC_RP_3_0)) |
623 | |
624 | #define tcpm_wait_for_discharge(port) \ |
625 | (((port)->auto_vbus_discharge_enabled && !(port)->vbus_vsafe0v) ? PD_T_SAFE_0V : 0) |
626 | |
627 | static enum tcpm_state tcpm_default_state(struct tcpm_port *port) |
628 | { |
629 | if (port->port_type == TYPEC_PORT_DRP) { |
630 | if (port->try_role == TYPEC_SINK) |
631 | return SNK_UNATTACHED; |
632 | else if (port->try_role == TYPEC_SOURCE) |
633 | return SRC_UNATTACHED; |
634 | /* Fall through to return SRC_UNATTACHED */ |
635 | } else if (port->port_type == TYPEC_PORT_SNK) { |
636 | return SNK_UNATTACHED; |
637 | } |
638 | return SRC_UNATTACHED; |
639 | } |
640 | |
641 | static bool tcpm_port_is_disconnected(struct tcpm_port *port) |
642 | { |
643 | return (!port->attached && port->cc1 == TYPEC_CC_OPEN && |
644 | port->cc2 == TYPEC_CC_OPEN) || |
645 | (port->attached && ((port->polarity == TYPEC_POLARITY_CC1 && |
646 | port->cc1 == TYPEC_CC_OPEN) || |
647 | (port->polarity == TYPEC_POLARITY_CC2 && |
648 | port->cc2 == TYPEC_CC_OPEN))); |
649 | } |
650 | |
651 | /* |
652 | * Logging |
653 | */ |
654 | |
655 | #ifdef CONFIG_DEBUG_FS |
656 | |
657 | static bool tcpm_log_full(struct tcpm_port *port) |
658 | { |
659 | return port->logbuffer_tail == |
660 | (port->logbuffer_head + 1) % LOG_BUFFER_ENTRIES; |
661 | } |
662 | |
663 | __printf(2, 0) |
664 | static void _tcpm_log(struct tcpm_port *port, const char *fmt, va_list args) |
665 | { |
666 | char tmpbuffer[LOG_BUFFER_ENTRY_SIZE]; |
667 | u64 ts_nsec = local_clock(); |
668 | unsigned long rem_nsec; |
669 | |
670 | mutex_lock(&port->logbuffer_lock); |
671 | if (!port->logbuffer[port->logbuffer_head]) { |
672 | port->logbuffer[port->logbuffer_head] = |
673 | kzalloc(LOG_BUFFER_ENTRY_SIZE, GFP_KERNEL); |
674 | if (!port->logbuffer[port->logbuffer_head]) { |
675 | mutex_unlock(lock: &port->logbuffer_lock); |
676 | return; |
677 | } |
678 | } |
679 | |
680 | vsnprintf(buf: tmpbuffer, size: sizeof(tmpbuffer), fmt, args); |
681 | |
682 | if (tcpm_log_full(port)) { |
683 | port->logbuffer_head = max(port->logbuffer_head - 1, 0); |
684 | strcpy(p: tmpbuffer, q: "overflow" ); |
685 | } |
686 | |
687 | if (port->logbuffer_head < 0 || |
688 | port->logbuffer_head >= LOG_BUFFER_ENTRIES) { |
689 | dev_warn(port->dev, |
690 | "Bad log buffer index %d\n" , port->logbuffer_head); |
691 | goto abort; |
692 | } |
693 | |
694 | if (!port->logbuffer[port->logbuffer_head]) { |
695 | dev_warn(port->dev, |
696 | "Log buffer index %d is NULL\n" , port->logbuffer_head); |
697 | goto abort; |
698 | } |
699 | |
700 | rem_nsec = do_div(ts_nsec, 1000000000); |
701 | scnprintf(buf: port->logbuffer[port->logbuffer_head], |
702 | LOG_BUFFER_ENTRY_SIZE, fmt: "[%5lu.%06lu] %s" , |
703 | (unsigned long)ts_nsec, rem_nsec / 1000, |
704 | tmpbuffer); |
705 | port->logbuffer_head = (port->logbuffer_head + 1) % LOG_BUFFER_ENTRIES; |
706 | |
707 | abort: |
708 | mutex_unlock(lock: &port->logbuffer_lock); |
709 | } |
710 | |
711 | __printf(2, 3) |
712 | static void tcpm_log(struct tcpm_port *port, const char *fmt, ...) |
713 | { |
714 | va_list args; |
715 | |
716 | /* Do not log while disconnected and unattached */ |
717 | if (tcpm_port_is_disconnected(port) && |
718 | (port->state == SRC_UNATTACHED || port->state == SNK_UNATTACHED || |
719 | port->state == TOGGLING || port->state == CHECK_CONTAMINANT)) |
720 | return; |
721 | |
722 | va_start(args, fmt); |
723 | _tcpm_log(port, fmt, args); |
724 | va_end(args); |
725 | } |
726 | |
727 | __printf(2, 3) |
728 | static void tcpm_log_force(struct tcpm_port *port, const char *fmt, ...) |
729 | { |
730 | va_list args; |
731 | |
732 | va_start(args, fmt); |
733 | _tcpm_log(port, fmt, args); |
734 | va_end(args); |
735 | } |
736 | |
737 | static void tcpm_log_source_caps(struct tcpm_port *port) |
738 | { |
739 | int i; |
740 | |
741 | for (i = 0; i < port->nr_source_caps; i++) { |
742 | u32 pdo = port->source_caps[i]; |
743 | enum pd_pdo_type type = pdo_type(pdo); |
744 | char msg[64]; |
745 | |
746 | switch (type) { |
747 | case PDO_TYPE_FIXED: |
748 | scnprintf(buf: msg, size: sizeof(msg), |
749 | fmt: "%u mV, %u mA [%s%s%s%s%s%s]" , |
750 | pdo_fixed_voltage(pdo), |
751 | pdo_max_current(pdo), |
752 | (pdo & PDO_FIXED_DUAL_ROLE) ? |
753 | "R" : "" , |
754 | (pdo & PDO_FIXED_SUSPEND) ? |
755 | "S" : "" , |
756 | (pdo & PDO_FIXED_HIGHER_CAP) ? |
757 | "H" : "" , |
758 | (pdo & PDO_FIXED_USB_COMM) ? |
759 | "U" : "" , |
760 | (pdo & PDO_FIXED_DATA_SWAP) ? |
761 | "D" : "" , |
762 | (pdo & PDO_FIXED_EXTPOWER) ? |
763 | "E" : "" ); |
764 | break; |
765 | case PDO_TYPE_VAR: |
766 | scnprintf(buf: msg, size: sizeof(msg), |
767 | fmt: "%u-%u mV, %u mA" , |
768 | pdo_min_voltage(pdo), |
769 | pdo_max_voltage(pdo), |
770 | pdo_max_current(pdo)); |
771 | break; |
772 | case PDO_TYPE_BATT: |
773 | scnprintf(buf: msg, size: sizeof(msg), |
774 | fmt: "%u-%u mV, %u mW" , |
775 | pdo_min_voltage(pdo), |
776 | pdo_max_voltage(pdo), |
777 | pdo_max_power(pdo)); |
778 | break; |
779 | case PDO_TYPE_APDO: |
780 | if (pdo_apdo_type(pdo) == APDO_TYPE_PPS) |
781 | scnprintf(buf: msg, size: sizeof(msg), |
782 | fmt: "%u-%u mV, %u mA" , |
783 | pdo_pps_apdo_min_voltage(pdo), |
784 | pdo_pps_apdo_max_voltage(pdo), |
785 | pdo_pps_apdo_max_current(pdo)); |
786 | else |
787 | strcpy(p: msg, q: "undefined APDO" ); |
788 | break; |
789 | default: |
790 | strcpy(p: msg, q: "undefined" ); |
791 | break; |
792 | } |
793 | tcpm_log(port, fmt: " PDO %d: type %d, %s" , |
794 | i, type, msg); |
795 | } |
796 | } |
797 | |
798 | static int tcpm_debug_show(struct seq_file *s, void *v) |
799 | { |
800 | struct tcpm_port *port = s->private; |
801 | int tail; |
802 | |
803 | mutex_lock(&port->logbuffer_lock); |
804 | tail = port->logbuffer_tail; |
805 | while (tail != port->logbuffer_head) { |
806 | seq_printf(m: s, fmt: "%s\n" , port->logbuffer[tail]); |
807 | tail = (tail + 1) % LOG_BUFFER_ENTRIES; |
808 | } |
809 | if (!seq_has_overflowed(m: s)) |
810 | port->logbuffer_tail = tail; |
811 | mutex_unlock(lock: &port->logbuffer_lock); |
812 | |
813 | return 0; |
814 | } |
815 | DEFINE_SHOW_ATTRIBUTE(tcpm_debug); |
816 | |
817 | static void tcpm_debugfs_init(struct tcpm_port *port) |
818 | { |
819 | char name[NAME_MAX]; |
820 | |
821 | mutex_init(&port->logbuffer_lock); |
822 | snprintf(buf: name, NAME_MAX, fmt: "tcpm-%s" , dev_name(dev: port->dev)); |
823 | port->dentry = debugfs_create_dir(name, parent: usb_debug_root); |
824 | debugfs_create_file(name: "log" , S_IFREG | 0444, parent: port->dentry, data: port, |
825 | fops: &tcpm_debug_fops); |
826 | } |
827 | |
828 | static void tcpm_debugfs_exit(struct tcpm_port *port) |
829 | { |
830 | int i; |
831 | |
832 | mutex_lock(&port->logbuffer_lock); |
833 | for (i = 0; i < LOG_BUFFER_ENTRIES; i++) { |
834 | kfree(objp: port->logbuffer[i]); |
835 | port->logbuffer[i] = NULL; |
836 | } |
837 | mutex_unlock(lock: &port->logbuffer_lock); |
838 | |
839 | debugfs_remove(dentry: port->dentry); |
840 | } |
841 | |
842 | #else |
843 | |
844 | __printf(2, 3) |
845 | static void tcpm_log(const struct tcpm_port *port, const char *fmt, ...) { } |
846 | __printf(2, 3) |
847 | static void tcpm_log_force(struct tcpm_port *port, const char *fmt, ...) { } |
848 | static void tcpm_log_source_caps(struct tcpm_port *port) { } |
849 | static void tcpm_debugfs_init(const struct tcpm_port *port) { } |
850 | static void tcpm_debugfs_exit(const struct tcpm_port *port) { } |
851 | |
852 | #endif |
853 | |
854 | static void tcpm_set_cc(struct tcpm_port *port, enum typec_cc_status cc) |
855 | { |
856 | tcpm_log(port, fmt: "cc:=%d" , cc); |
857 | port->cc_req = cc; |
858 | port->tcpc->set_cc(port->tcpc, cc); |
859 | } |
860 | |
861 | static int tcpm_enable_auto_vbus_discharge(struct tcpm_port *port, bool enable) |
862 | { |
863 | int ret = 0; |
864 | |
865 | if (port->tcpc->enable_auto_vbus_discharge) { |
866 | ret = port->tcpc->enable_auto_vbus_discharge(port->tcpc, enable); |
867 | tcpm_log_force(port, fmt: "%s vbus discharge ret:%d" , enable ? "enable" : "disable" , |
868 | ret); |
869 | if (!ret) |
870 | port->auto_vbus_discharge_enabled = enable; |
871 | } |
872 | |
873 | return ret; |
874 | } |
875 | |
876 | static void tcpm_apply_rc(struct tcpm_port *port) |
877 | { |
878 | /* |
879 | * TCPCI: Move to APPLY_RC state to prevent disconnect during PR_SWAP |
880 | * when Vbus auto discharge on disconnect is enabled. |
881 | */ |
882 | if (port->tcpc->enable_auto_vbus_discharge && port->tcpc->apply_rc) { |
883 | tcpm_log(port, fmt: "Apply_RC" ); |
884 | port->tcpc->apply_rc(port->tcpc, port->cc_req, port->polarity); |
885 | tcpm_enable_auto_vbus_discharge(port, enable: false); |
886 | } |
887 | } |
888 | |
889 | /* |
890 | * Determine RP value to set based on maximum current supported |
891 | * by a port if configured as source. |
892 | * Returns CC value to report to link partner. |
893 | */ |
894 | static enum typec_cc_status tcpm_rp_cc(struct tcpm_port *port) |
895 | { |
896 | const u32 *src_pdo = port->src_pdo; |
897 | int nr_pdo = port->nr_src_pdo; |
898 | int i; |
899 | |
900 | if (!port->pd_supported) |
901 | return port->src_rp; |
902 | |
903 | /* |
904 | * Search for first entry with matching voltage. |
905 | * It should report the maximum supported current. |
906 | */ |
907 | for (i = 0; i < nr_pdo; i++) { |
908 | const u32 pdo = src_pdo[i]; |
909 | |
910 | if (pdo_type(pdo) == PDO_TYPE_FIXED && |
911 | pdo_fixed_voltage(pdo) == 5000) { |
912 | unsigned int curr = pdo_max_current(pdo); |
913 | |
914 | if (curr >= 3000) |
915 | return TYPEC_CC_RP_3_0; |
916 | else if (curr >= 1500) |
917 | return TYPEC_CC_RP_1_5; |
918 | return TYPEC_CC_RP_DEF; |
919 | } |
920 | } |
921 | |
922 | return TYPEC_CC_RP_DEF; |
923 | } |
924 | |
925 | static void tcpm_ams_finish(struct tcpm_port *port) |
926 | { |
927 | tcpm_log(port, fmt: "AMS %s finished" , tcpm_ams_str[port->ams]); |
928 | |
929 | if (port->pd_capable && port->pwr_role == TYPEC_SOURCE) { |
930 | if (port->negotiated_rev >= PD_REV30) |
931 | tcpm_set_cc(port, SINK_TX_OK); |
932 | else |
933 | tcpm_set_cc(port, SINK_TX_NG); |
934 | } else if (port->pwr_role == TYPEC_SOURCE) { |
935 | tcpm_set_cc(port, cc: tcpm_rp_cc(port)); |
936 | } |
937 | |
938 | port->in_ams = false; |
939 | port->ams = NONE_AMS; |
940 | } |
941 | |
942 | static int tcpm_pd_transmit(struct tcpm_port *port, |
943 | enum tcpm_transmit_type tx_sop_type, |
944 | const struct pd_message *msg) |
945 | { |
946 | unsigned long timeout; |
947 | int ret; |
948 | unsigned int negotiated_rev; |
949 | |
950 | switch (tx_sop_type) { |
951 | case TCPC_TX_SOP_PRIME: |
952 | negotiated_rev = port->negotiated_rev_prime; |
953 | break; |
954 | case TCPC_TX_SOP: |
955 | default: |
956 | negotiated_rev = port->negotiated_rev; |
957 | break; |
958 | } |
959 | |
960 | if (msg) |
961 | tcpm_log(port, fmt: "PD TX, header: %#x" , le16_to_cpu(msg->header)); |
962 | else |
963 | tcpm_log(port, fmt: "PD TX, type: %#x" , tx_sop_type); |
964 | |
965 | reinit_completion(x: &port->tx_complete); |
966 | ret = port->tcpc->pd_transmit(port->tcpc, tx_sop_type, msg, negotiated_rev); |
967 | if (ret < 0) |
968 | return ret; |
969 | |
970 | mutex_unlock(lock: &port->lock); |
971 | timeout = wait_for_completion_timeout(x: &port->tx_complete, |
972 | timeout: msecs_to_jiffies(PD_T_TCPC_TX_TIMEOUT)); |
973 | mutex_lock(&port->lock); |
974 | if (!timeout) |
975 | return -ETIMEDOUT; |
976 | |
977 | switch (port->tx_status) { |
978 | case TCPC_TX_SUCCESS: |
979 | switch (tx_sop_type) { |
980 | case TCPC_TX_SOP_PRIME: |
981 | port->message_id_prime = (port->message_id_prime + 1) & |
982 | PD_HEADER_ID_MASK; |
983 | break; |
984 | case TCPC_TX_SOP: |
985 | default: |
986 | port->message_id = (port->message_id + 1) & |
987 | PD_HEADER_ID_MASK; |
988 | break; |
989 | } |
990 | /* |
991 | * USB PD rev 2.0, 8.3.2.2.1: |
992 | * USB PD rev 3.0, 8.3.2.1.3: |
993 | * "... Note that every AMS is Interruptible until the first |
994 | * Message in the sequence has been successfully sent (GoodCRC |
995 | * Message received)." |
996 | */ |
997 | if (port->ams != NONE_AMS) |
998 | port->in_ams = true; |
999 | break; |
1000 | case TCPC_TX_DISCARDED: |
1001 | ret = -EAGAIN; |
1002 | break; |
1003 | case TCPC_TX_FAILED: |
1004 | default: |
1005 | ret = -EIO; |
1006 | break; |
1007 | } |
1008 | |
1009 | /* Some AMS don't expect responses. Finish them here. */ |
1010 | if (port->ams == ATTENTION || port->ams == SOURCE_ALERT) |
1011 | tcpm_ams_finish(port); |
1012 | |
1013 | return ret; |
1014 | } |
1015 | |
1016 | void tcpm_pd_transmit_complete(struct tcpm_port *port, |
1017 | enum tcpm_transmit_status status) |
1018 | { |
1019 | tcpm_log(port, fmt: "PD TX complete, status: %u" , status); |
1020 | port->tx_status = status; |
1021 | complete(&port->tx_complete); |
1022 | } |
1023 | EXPORT_SYMBOL_GPL(tcpm_pd_transmit_complete); |
1024 | |
1025 | static int tcpm_mux_set(struct tcpm_port *port, int state, |
1026 | enum usb_role usb_role, |
1027 | enum typec_orientation orientation) |
1028 | { |
1029 | int ret; |
1030 | |
1031 | tcpm_log(port, fmt: "Requesting mux state %d, usb-role %d, orientation %d" , |
1032 | state, usb_role, orientation); |
1033 | |
1034 | ret = typec_set_orientation(port: port->typec_port, orientation); |
1035 | if (ret) |
1036 | return ret; |
1037 | |
1038 | if (port->role_sw) { |
1039 | ret = usb_role_switch_set_role(sw: port->role_sw, role: usb_role); |
1040 | if (ret) |
1041 | return ret; |
1042 | } |
1043 | |
1044 | return typec_set_mode(port: port->typec_port, mode: state); |
1045 | } |
1046 | |
1047 | static int tcpm_set_polarity(struct tcpm_port *port, |
1048 | enum typec_cc_polarity polarity) |
1049 | { |
1050 | int ret; |
1051 | |
1052 | tcpm_log(port, fmt: "polarity %d" , polarity); |
1053 | |
1054 | ret = port->tcpc->set_polarity(port->tcpc, polarity); |
1055 | if (ret < 0) |
1056 | return ret; |
1057 | |
1058 | port->polarity = polarity; |
1059 | |
1060 | return 0; |
1061 | } |
1062 | |
1063 | static int tcpm_set_vconn(struct tcpm_port *port, bool enable) |
1064 | { |
1065 | int ret; |
1066 | |
1067 | tcpm_log(port, fmt: "vconn:=%d" , enable); |
1068 | |
1069 | ret = port->tcpc->set_vconn(port->tcpc, enable); |
1070 | if (!ret) { |
1071 | port->vconn_role = enable ? TYPEC_SOURCE : TYPEC_SINK; |
1072 | typec_set_vconn_role(port: port->typec_port, role: port->vconn_role); |
1073 | } |
1074 | |
1075 | return ret; |
1076 | } |
1077 | |
1078 | static u32 tcpm_get_current_limit(struct tcpm_port *port) |
1079 | { |
1080 | enum typec_cc_status cc; |
1081 | u32 limit; |
1082 | |
1083 | cc = port->polarity ? port->cc2 : port->cc1; |
1084 | switch (cc) { |
1085 | case TYPEC_CC_RP_1_5: |
1086 | limit = 1500; |
1087 | break; |
1088 | case TYPEC_CC_RP_3_0: |
1089 | limit = 3000; |
1090 | break; |
1091 | case TYPEC_CC_RP_DEF: |
1092 | default: |
1093 | if (port->tcpc->get_current_limit) |
1094 | limit = port->tcpc->get_current_limit(port->tcpc); |
1095 | else |
1096 | limit = 0; |
1097 | break; |
1098 | } |
1099 | |
1100 | return limit; |
1101 | } |
1102 | |
1103 | static int tcpm_set_current_limit(struct tcpm_port *port, u32 max_ma, u32 mv) |
1104 | { |
1105 | int ret = -EOPNOTSUPP; |
1106 | |
1107 | tcpm_log(port, fmt: "Setting voltage/current limit %u mV %u mA" , mv, max_ma); |
1108 | |
1109 | port->supply_voltage = mv; |
1110 | port->current_limit = max_ma; |
1111 | power_supply_changed(psy: port->psy); |
1112 | |
1113 | if (port->tcpc->set_current_limit) |
1114 | ret = port->tcpc->set_current_limit(port->tcpc, max_ma, mv); |
1115 | |
1116 | return ret; |
1117 | } |
1118 | |
1119 | static int tcpm_set_attached_state(struct tcpm_port *port, bool attached) |
1120 | { |
1121 | return port->tcpc->set_roles(port->tcpc, attached, port->pwr_role, |
1122 | port->data_role); |
1123 | } |
1124 | |
1125 | static int tcpm_set_roles(struct tcpm_port *port, bool attached, |
1126 | enum typec_role role, enum typec_data_role data) |
1127 | { |
1128 | enum typec_orientation orientation; |
1129 | enum usb_role usb_role; |
1130 | int ret; |
1131 | |
1132 | if (port->polarity == TYPEC_POLARITY_CC1) |
1133 | orientation = TYPEC_ORIENTATION_NORMAL; |
1134 | else |
1135 | orientation = TYPEC_ORIENTATION_REVERSE; |
1136 | |
1137 | if (port->typec_caps.data == TYPEC_PORT_DRD) { |
1138 | if (data == TYPEC_HOST) |
1139 | usb_role = USB_ROLE_HOST; |
1140 | else |
1141 | usb_role = USB_ROLE_DEVICE; |
1142 | } else if (port->typec_caps.data == TYPEC_PORT_DFP) { |
1143 | if (data == TYPEC_HOST) { |
1144 | if (role == TYPEC_SOURCE) |
1145 | usb_role = USB_ROLE_HOST; |
1146 | else |
1147 | usb_role = USB_ROLE_NONE; |
1148 | } else { |
1149 | return -ENOTSUPP; |
1150 | } |
1151 | } else { |
1152 | if (data == TYPEC_DEVICE) { |
1153 | if (role == TYPEC_SINK) |
1154 | usb_role = USB_ROLE_DEVICE; |
1155 | else |
1156 | usb_role = USB_ROLE_NONE; |
1157 | } else { |
1158 | return -ENOTSUPP; |
1159 | } |
1160 | } |
1161 | |
1162 | ret = tcpm_mux_set(port, state: TYPEC_STATE_USB, usb_role, orientation); |
1163 | if (ret < 0) |
1164 | return ret; |
1165 | |
1166 | ret = port->tcpc->set_roles(port->tcpc, attached, role, data); |
1167 | if (ret < 0) |
1168 | return ret; |
1169 | |
1170 | if (port->tcpc->set_orientation) { |
1171 | ret = port->tcpc->set_orientation(port->tcpc, orientation); |
1172 | if (ret < 0) |
1173 | return ret; |
1174 | } |
1175 | |
1176 | port->pwr_role = role; |
1177 | port->data_role = data; |
1178 | typec_set_data_role(port: port->typec_port, role: data); |
1179 | typec_set_pwr_role(port: port->typec_port, role); |
1180 | |
1181 | return 0; |
1182 | } |
1183 | |
1184 | static int tcpm_set_pwr_role(struct tcpm_port *port, enum typec_role role) |
1185 | { |
1186 | int ret; |
1187 | |
1188 | ret = port->tcpc->set_roles(port->tcpc, true, role, |
1189 | port->data_role); |
1190 | if (ret < 0) |
1191 | return ret; |
1192 | |
1193 | port->pwr_role = role; |
1194 | typec_set_pwr_role(port: port->typec_port, role); |
1195 | |
1196 | return 0; |
1197 | } |
1198 | |
1199 | /* |
1200 | * Transform the PDO to be compliant to PD rev2.0. |
1201 | * Return 0 if the PDO type is not defined in PD rev2.0. |
1202 | * Otherwise, return the converted PDO. |
1203 | */ |
1204 | static u32 tcpm_forge_legacy_pdo(struct tcpm_port *port, u32 pdo, enum typec_role role) |
1205 | { |
1206 | switch (pdo_type(pdo)) { |
1207 | case PDO_TYPE_FIXED: |
1208 | if (role == TYPEC_SINK) |
1209 | return pdo & ~PDO_FIXED_FRS_CURR_MASK; |
1210 | else |
1211 | return pdo & ~PDO_FIXED_UNCHUNK_EXT; |
1212 | case PDO_TYPE_VAR: |
1213 | case PDO_TYPE_BATT: |
1214 | return pdo; |
1215 | case PDO_TYPE_APDO: |
1216 | default: |
1217 | return 0; |
1218 | } |
1219 | } |
1220 | |
1221 | static int tcpm_pd_send_source_caps(struct tcpm_port *port) |
1222 | { |
1223 | struct pd_message msg; |
1224 | u32 pdo; |
1225 | unsigned int i, nr_pdo = 0; |
1226 | |
1227 | memset(&msg, 0, sizeof(msg)); |
1228 | |
1229 | for (i = 0; i < port->nr_src_pdo; i++) { |
1230 | if (port->negotiated_rev >= PD_REV30) { |
1231 | msg.payload[nr_pdo++] = cpu_to_le32(port->src_pdo[i]); |
1232 | } else { |
1233 | pdo = tcpm_forge_legacy_pdo(port, pdo: port->src_pdo[i], role: TYPEC_SOURCE); |
1234 | if (pdo) |
1235 | msg.payload[nr_pdo++] = cpu_to_le32(pdo); |
1236 | } |
1237 | } |
1238 | |
1239 | if (!nr_pdo) { |
1240 | /* No source capabilities defined, sink only */ |
1241 | msg.header = PD_HEADER_LE(PD_CTRL_REJECT, |
1242 | port->pwr_role, |
1243 | port->data_role, |
1244 | port->negotiated_rev, |
1245 | port->message_id, 0); |
1246 | } else { |
1247 | msg.header = PD_HEADER_LE(PD_DATA_SOURCE_CAP, |
1248 | port->pwr_role, |
1249 | port->data_role, |
1250 | port->negotiated_rev, |
1251 | port->message_id, |
1252 | nr_pdo); |
1253 | } |
1254 | |
1255 | return tcpm_pd_transmit(port, tx_sop_type: TCPC_TX_SOP, msg: &msg); |
1256 | } |
1257 | |
1258 | static int tcpm_pd_send_sink_caps(struct tcpm_port *port) |
1259 | { |
1260 | struct pd_message msg; |
1261 | u32 pdo; |
1262 | unsigned int i, nr_pdo = 0; |
1263 | |
1264 | memset(&msg, 0, sizeof(msg)); |
1265 | |
1266 | for (i = 0; i < port->nr_snk_pdo; i++) { |
1267 | if (port->negotiated_rev >= PD_REV30) { |
1268 | msg.payload[nr_pdo++] = cpu_to_le32(port->snk_pdo[i]); |
1269 | } else { |
1270 | pdo = tcpm_forge_legacy_pdo(port, pdo: port->snk_pdo[i], role: TYPEC_SINK); |
1271 | if (pdo) |
1272 | msg.payload[nr_pdo++] = cpu_to_le32(pdo); |
1273 | } |
1274 | } |
1275 | |
1276 | if (!nr_pdo) { |
1277 | /* No sink capabilities defined, source only */ |
1278 | msg.header = PD_HEADER_LE(PD_CTRL_REJECT, |
1279 | port->pwr_role, |
1280 | port->data_role, |
1281 | port->negotiated_rev, |
1282 | port->message_id, 0); |
1283 | } else { |
1284 | msg.header = PD_HEADER_LE(PD_DATA_SINK_CAP, |
1285 | port->pwr_role, |
1286 | port->data_role, |
1287 | port->negotiated_rev, |
1288 | port->message_id, |
1289 | nr_pdo); |
1290 | } |
1291 | |
1292 | return tcpm_pd_transmit(port, tx_sop_type: TCPC_TX_SOP, msg: &msg); |
1293 | } |
1294 | |
1295 | static void mod_tcpm_delayed_work(struct tcpm_port *port, unsigned int delay_ms) |
1296 | { |
1297 | if (delay_ms) { |
1298 | hrtimer_start(timer: &port->state_machine_timer, tim: ms_to_ktime(ms: delay_ms), mode: HRTIMER_MODE_REL); |
1299 | } else { |
1300 | hrtimer_cancel(timer: &port->state_machine_timer); |
1301 | kthread_queue_work(worker: port->wq, work: &port->state_machine); |
1302 | } |
1303 | } |
1304 | |
1305 | static void mod_vdm_delayed_work(struct tcpm_port *port, unsigned int delay_ms) |
1306 | { |
1307 | if (delay_ms) { |
1308 | hrtimer_start(timer: &port->vdm_state_machine_timer, tim: ms_to_ktime(ms: delay_ms), |
1309 | mode: HRTIMER_MODE_REL); |
1310 | } else { |
1311 | hrtimer_cancel(timer: &port->vdm_state_machine_timer); |
1312 | kthread_queue_work(worker: port->wq, work: &port->vdm_state_machine); |
1313 | } |
1314 | } |
1315 | |
1316 | static void mod_enable_frs_delayed_work(struct tcpm_port *port, unsigned int delay_ms) |
1317 | { |
1318 | if (delay_ms) { |
1319 | hrtimer_start(timer: &port->enable_frs_timer, tim: ms_to_ktime(ms: delay_ms), mode: HRTIMER_MODE_REL); |
1320 | } else { |
1321 | hrtimer_cancel(timer: &port->enable_frs_timer); |
1322 | kthread_queue_work(worker: port->wq, work: &port->enable_frs); |
1323 | } |
1324 | } |
1325 | |
1326 | static void mod_send_discover_delayed_work(struct tcpm_port *port, unsigned int delay_ms) |
1327 | { |
1328 | if (delay_ms) { |
1329 | hrtimer_start(timer: &port->send_discover_timer, tim: ms_to_ktime(ms: delay_ms), mode: HRTIMER_MODE_REL); |
1330 | } else { |
1331 | hrtimer_cancel(timer: &port->send_discover_timer); |
1332 | kthread_queue_work(worker: port->wq, work: &port->send_discover_work); |
1333 | } |
1334 | } |
1335 | |
1336 | static void tcpm_set_state(struct tcpm_port *port, enum tcpm_state state, |
1337 | unsigned int delay_ms) |
1338 | { |
1339 | if (delay_ms) { |
1340 | tcpm_log(port, fmt: "pending state change %s -> %s @ %u ms [%s %s]" , |
1341 | tcpm_states[port->state], tcpm_states[state], delay_ms, |
1342 | pd_rev[port->negotiated_rev], tcpm_ams_str[port->ams]); |
1343 | port->delayed_state = state; |
1344 | mod_tcpm_delayed_work(port, delay_ms); |
1345 | port->delayed_runtime = ktime_add(ktime_get(), ms_to_ktime(delay_ms)); |
1346 | port->delay_ms = delay_ms; |
1347 | } else { |
1348 | tcpm_log(port, fmt: "state change %s -> %s [%s %s]" , |
1349 | tcpm_states[port->state], tcpm_states[state], |
1350 | pd_rev[port->negotiated_rev], tcpm_ams_str[port->ams]); |
1351 | port->delayed_state = INVALID_STATE; |
1352 | port->prev_state = port->state; |
1353 | port->state = state; |
1354 | /* |
1355 | * Don't re-queue the state machine work item if we're currently |
1356 | * in the state machine and we're immediately changing states. |
1357 | * tcpm_state_machine_work() will continue running the state |
1358 | * machine. |
1359 | */ |
1360 | if (!port->state_machine_running) |
1361 | mod_tcpm_delayed_work(port, delay_ms: 0); |
1362 | } |
1363 | } |
1364 | |
1365 | static void tcpm_set_state_cond(struct tcpm_port *port, enum tcpm_state state, |
1366 | unsigned int delay_ms) |
1367 | { |
1368 | if (port->enter_state == port->state) |
1369 | tcpm_set_state(port, state, delay_ms); |
1370 | else |
1371 | tcpm_log(port, |
1372 | fmt: "skipped %sstate change %s -> %s [%u ms], context state %s [%s %s]" , |
1373 | delay_ms ? "delayed " : "" , |
1374 | tcpm_states[port->state], tcpm_states[state], |
1375 | delay_ms, tcpm_states[port->enter_state], |
1376 | pd_rev[port->negotiated_rev], tcpm_ams_str[port->ams]); |
1377 | } |
1378 | |
1379 | static void tcpm_queue_message(struct tcpm_port *port, |
1380 | enum pd_msg_request message) |
1381 | { |
1382 | port->queued_message = message; |
1383 | mod_tcpm_delayed_work(port, delay_ms: 0); |
1384 | } |
1385 | |
1386 | static bool tcpm_vdm_ams(struct tcpm_port *port) |
1387 | { |
1388 | switch (port->ams) { |
1389 | case DISCOVER_IDENTITY: |
1390 | case SOURCE_STARTUP_CABLE_PLUG_DISCOVER_IDENTITY: |
1391 | case DISCOVER_SVIDS: |
1392 | case DISCOVER_MODES: |
1393 | case DFP_TO_UFP_ENTER_MODE: |
1394 | case DFP_TO_UFP_EXIT_MODE: |
1395 | case DFP_TO_CABLE_PLUG_ENTER_MODE: |
1396 | case DFP_TO_CABLE_PLUG_EXIT_MODE: |
1397 | case ATTENTION: |
1398 | case UNSTRUCTURED_VDMS: |
1399 | case STRUCTURED_VDMS: |
1400 | break; |
1401 | default: |
1402 | return false; |
1403 | } |
1404 | |
1405 | return true; |
1406 | } |
1407 | |
1408 | static bool tcpm_ams_interruptible(struct tcpm_port *port) |
1409 | { |
1410 | switch (port->ams) { |
1411 | /* Interruptible AMS */ |
1412 | case NONE_AMS: |
1413 | case SECURITY: |
1414 | case FIRMWARE_UPDATE: |
1415 | case DISCOVER_IDENTITY: |
1416 | case SOURCE_STARTUP_CABLE_PLUG_DISCOVER_IDENTITY: |
1417 | case DISCOVER_SVIDS: |
1418 | case DISCOVER_MODES: |
1419 | case DFP_TO_UFP_ENTER_MODE: |
1420 | case DFP_TO_UFP_EXIT_MODE: |
1421 | case DFP_TO_CABLE_PLUG_ENTER_MODE: |
1422 | case DFP_TO_CABLE_PLUG_EXIT_MODE: |
1423 | case UNSTRUCTURED_VDMS: |
1424 | case STRUCTURED_VDMS: |
1425 | case COUNTRY_INFO: |
1426 | case COUNTRY_CODES: |
1427 | break; |
1428 | /* Non-Interruptible AMS */ |
1429 | default: |
1430 | if (port->in_ams) |
1431 | return false; |
1432 | break; |
1433 | } |
1434 | |
1435 | return true; |
1436 | } |
1437 | |
1438 | static int tcpm_ams_start(struct tcpm_port *port, enum tcpm_ams ams) |
1439 | { |
1440 | int ret = 0; |
1441 | |
1442 | tcpm_log(port, fmt: "AMS %s start" , tcpm_ams_str[ams]); |
1443 | |
1444 | if (!tcpm_ams_interruptible(port) && |
1445 | !(ams == HARD_RESET || ams == SOFT_RESET_AMS)) { |
1446 | port->upcoming_state = INVALID_STATE; |
1447 | tcpm_log(port, fmt: "AMS %s not interruptible, aborting" , |
1448 | tcpm_ams_str[port->ams]); |
1449 | return -EAGAIN; |
1450 | } |
1451 | |
1452 | if (port->pwr_role == TYPEC_SOURCE) { |
1453 | enum typec_cc_status cc_req = port->cc_req; |
1454 | |
1455 | port->ams = ams; |
1456 | |
1457 | if (ams == HARD_RESET) { |
1458 | tcpm_set_cc(port, cc: tcpm_rp_cc(port)); |
1459 | tcpm_pd_transmit(port, tx_sop_type: TCPC_TX_HARD_RESET, NULL); |
1460 | tcpm_set_state(port, state: HARD_RESET_START, delay_ms: 0); |
1461 | return ret; |
1462 | } else if (ams == SOFT_RESET_AMS) { |
1463 | if (!port->explicit_contract) |
1464 | tcpm_set_cc(port, cc: tcpm_rp_cc(port)); |
1465 | tcpm_set_state(port, state: SOFT_RESET_SEND, delay_ms: 0); |
1466 | return ret; |
1467 | } else if (tcpm_vdm_ams(port)) { |
1468 | /* tSinkTx is enforced in vdm_run_state_machine */ |
1469 | if (port->negotiated_rev >= PD_REV30) |
1470 | tcpm_set_cc(port, SINK_TX_NG); |
1471 | return ret; |
1472 | } |
1473 | |
1474 | if (port->negotiated_rev >= PD_REV30) |
1475 | tcpm_set_cc(port, SINK_TX_NG); |
1476 | |
1477 | switch (port->state) { |
1478 | case SRC_READY: |
1479 | case SRC_STARTUP: |
1480 | case SRC_SOFT_RESET_WAIT_SNK_TX: |
1481 | case SOFT_RESET: |
1482 | case SOFT_RESET_SEND: |
1483 | if (port->negotiated_rev >= PD_REV30) |
1484 | tcpm_set_state(port, state: AMS_START, |
1485 | delay_ms: cc_req == SINK_TX_OK ? |
1486 | PD_T_SINK_TX : 0); |
1487 | else |
1488 | tcpm_set_state(port, state: AMS_START, delay_ms: 0); |
1489 | break; |
1490 | default: |
1491 | if (port->negotiated_rev >= PD_REV30) |
1492 | tcpm_set_state(port, state: SRC_READY, |
1493 | delay_ms: cc_req == SINK_TX_OK ? |
1494 | PD_T_SINK_TX : 0); |
1495 | else |
1496 | tcpm_set_state(port, state: SRC_READY, delay_ms: 0); |
1497 | break; |
1498 | } |
1499 | } else { |
1500 | if (port->negotiated_rev >= PD_REV30 && |
1501 | !tcpm_sink_tx_ok(port) && |
1502 | ams != SOFT_RESET_AMS && |
1503 | ams != HARD_RESET) { |
1504 | port->upcoming_state = INVALID_STATE; |
1505 | tcpm_log(port, fmt: "Sink TX No Go" ); |
1506 | return -EAGAIN; |
1507 | } |
1508 | |
1509 | port->ams = ams; |
1510 | |
1511 | if (ams == HARD_RESET) { |
1512 | tcpm_pd_transmit(port, tx_sop_type: TCPC_TX_HARD_RESET, NULL); |
1513 | tcpm_set_state(port, state: HARD_RESET_START, delay_ms: 0); |
1514 | return ret; |
1515 | } else if (tcpm_vdm_ams(port)) { |
1516 | return ret; |
1517 | } |
1518 | |
1519 | if (port->state == SNK_READY || |
1520 | port->state == SNK_SOFT_RESET) |
1521 | tcpm_set_state(port, state: AMS_START, delay_ms: 0); |
1522 | else |
1523 | tcpm_set_state(port, state: SNK_READY, delay_ms: 0); |
1524 | } |
1525 | |
1526 | return ret; |
1527 | } |
1528 | |
1529 | /* |
1530 | * VDM/VDO handling functions |
1531 | */ |
1532 | static void tcpm_queue_vdm(struct tcpm_port *port, const u32 , |
1533 | const u32 *data, int cnt, enum tcpm_transmit_type tx_sop_type) |
1534 | { |
1535 | u32 vdo_hdr = port->vdo_data[0]; |
1536 | |
1537 | WARN_ON(!mutex_is_locked(&port->lock)); |
1538 | |
1539 | /* If is sending discover_identity, handle received message first */ |
1540 | if (PD_VDO_SVDM(vdo_hdr) && PD_VDO_CMD(vdo_hdr) == CMD_DISCOVER_IDENT) { |
1541 | if (tx_sop_type == TCPC_TX_SOP_PRIME) |
1542 | port->send_discover_prime = true; |
1543 | else |
1544 | port->send_discover = true; |
1545 | mod_send_discover_delayed_work(port, SEND_DISCOVER_RETRY_MS); |
1546 | } else { |
1547 | /* Make sure we are not still processing a previous VDM packet */ |
1548 | WARN_ON(port->vdm_state > VDM_STATE_DONE); |
1549 | } |
1550 | |
1551 | port->vdo_count = cnt + 1; |
1552 | port->vdo_data[0] = header; |
1553 | memcpy(&port->vdo_data[1], data, sizeof(u32) * cnt); |
1554 | /* Set ready, vdm state machine will actually send */ |
1555 | port->vdm_retries = 0; |
1556 | port->vdm_state = VDM_STATE_READY; |
1557 | port->vdm_sm_running = true; |
1558 | |
1559 | port->tx_sop_type = tx_sop_type; |
1560 | |
1561 | mod_vdm_delayed_work(port, delay_ms: 0); |
1562 | } |
1563 | |
1564 | static void tcpm_queue_vdm_unlocked(struct tcpm_port *port, const u32 , |
1565 | const u32 *data, int cnt, enum tcpm_transmit_type tx_sop_type) |
1566 | { |
1567 | mutex_lock(&port->lock); |
1568 | tcpm_queue_vdm(port, header, data, cnt, tx_sop_type: TCPC_TX_SOP); |
1569 | mutex_unlock(lock: &port->lock); |
1570 | } |
1571 | |
1572 | static void svdm_consume_identity(struct tcpm_port *port, const u32 *p, int cnt) |
1573 | { |
1574 | u32 vdo = p[VDO_INDEX_IDH]; |
1575 | u32 product = p[VDO_INDEX_PRODUCT]; |
1576 | |
1577 | memset(&port->mode_data, 0, sizeof(port->mode_data)); |
1578 | |
1579 | port->partner_ident.id_header = vdo; |
1580 | port->partner_ident.cert_stat = p[VDO_INDEX_CSTAT]; |
1581 | port->partner_ident.product = product; |
1582 | |
1583 | typec_partner_set_identity(partner: port->partner); |
1584 | |
1585 | tcpm_log(port, fmt: "Identity: %04x:%04x.%04x" , |
1586 | PD_IDH_VID(vdo), |
1587 | PD_PRODUCT_PID(product), product & 0xffff); |
1588 | } |
1589 | |
1590 | static void svdm_consume_identity_sop_prime(struct tcpm_port *port, const u32 *p, int cnt) |
1591 | { |
1592 | u32 idh = p[VDO_INDEX_IDH]; |
1593 | u32 product = p[VDO_INDEX_PRODUCT]; |
1594 | int svdm_version; |
1595 | |
1596 | /* |
1597 | * Attempt to consume identity only if cable currently is not set |
1598 | */ |
1599 | if (!IS_ERR_OR_NULL(ptr: port->cable)) |
1600 | goto register_plug; |
1601 | |
1602 | /* Reset cable identity */ |
1603 | memset(&port->cable_ident, 0, sizeof(port->cable_ident)); |
1604 | |
1605 | /* Fill out id header, cert, product, cable VDO 1 */ |
1606 | port->cable_ident.id_header = idh; |
1607 | port->cable_ident.cert_stat = p[VDO_INDEX_CSTAT]; |
1608 | port->cable_ident.product = product; |
1609 | port->cable_ident.vdo[0] = p[VDO_INDEX_CABLE_1]; |
1610 | |
1611 | /* Fill out cable desc, infer svdm_version from pd revision */ |
1612 | port->cable_desc.type = (enum typec_plug_type) (VDO_TYPEC_CABLE_TYPE(p[VDO_INDEX_CABLE_1]) + |
1613 | USB_PLUG_TYPE_A); |
1614 | port->cable_desc.active = PD_IDH_PTYPE(idh) == IDH_PTYPE_ACABLE ? 1 : 0; |
1615 | /* Log PD Revision and additional cable VDO from negotiated revision */ |
1616 | switch (port->negotiated_rev_prime) { |
1617 | case PD_REV30: |
1618 | port->cable_desc.pd_revision = 0x0300; |
1619 | if (port->cable_desc.active) |
1620 | port->cable_ident.vdo[1] = p[VDO_INDEX_CABLE_2]; |
1621 | break; |
1622 | case PD_REV20: |
1623 | port->cable_desc.pd_revision = 0x0200; |
1624 | break; |
1625 | default: |
1626 | port->cable_desc.pd_revision = 0x0200; |
1627 | break; |
1628 | } |
1629 | port->cable_desc.identity = &port->cable_ident; |
1630 | /* Register Cable, set identity and svdm_version */ |
1631 | port->cable = typec_register_cable(port: port->typec_port, desc: &port->cable_desc); |
1632 | if (IS_ERR_OR_NULL(ptr: port->cable)) |
1633 | return; |
1634 | typec_cable_set_identity(cable: port->cable); |
1635 | /* Get SVDM version */ |
1636 | svdm_version = PD_VDO_SVDM_VER(p[VDO_INDEX_HDR]); |
1637 | typec_cable_set_svdm_version(cable: port->cable, svdm_version); |
1638 | |
1639 | register_plug: |
1640 | if (IS_ERR_OR_NULL(ptr: port->plug_prime)) { |
1641 | port->plug_prime_desc.index = TYPEC_PLUG_SOP_P; |
1642 | port->plug_prime = typec_register_plug(cable: port->cable, |
1643 | desc: &port->plug_prime_desc); |
1644 | } |
1645 | } |
1646 | |
1647 | static bool svdm_consume_svids(struct tcpm_port *port, const u32 *p, int cnt, |
1648 | enum tcpm_transmit_type rx_sop_type) |
1649 | { |
1650 | struct pd_mode_data *pmdata = rx_sop_type == TCPC_TX_SOP_PRIME ? |
1651 | &port->mode_data_prime : &port->mode_data; |
1652 | int i; |
1653 | |
1654 | for (i = 1; i < cnt; i++) { |
1655 | u16 svid; |
1656 | |
1657 | svid = (p[i] >> 16) & 0xffff; |
1658 | if (!svid) |
1659 | return false; |
1660 | |
1661 | if (pmdata->nsvids >= SVID_DISCOVERY_MAX) |
1662 | goto abort; |
1663 | |
1664 | pmdata->svids[pmdata->nsvids++] = svid; |
1665 | tcpm_log(port, fmt: "SVID %d: 0x%x" , pmdata->nsvids, svid); |
1666 | |
1667 | svid = p[i] & 0xffff; |
1668 | if (!svid) |
1669 | return false; |
1670 | |
1671 | if (pmdata->nsvids >= SVID_DISCOVERY_MAX) |
1672 | goto abort; |
1673 | |
1674 | pmdata->svids[pmdata->nsvids++] = svid; |
1675 | tcpm_log(port, fmt: "SVID %d: 0x%x" , pmdata->nsvids, svid); |
1676 | } |
1677 | |
1678 | /* |
1679 | * PD3.0 Spec 6.4.4.3.2: The SVIDs are returned 2 per VDO (see Table |
1680 | * 6-43), and can be returned maximum 6 VDOs per response (see Figure |
1681 | * 6-19). If the Respondersupports 12 or more SVID then the Discover |
1682 | * SVIDs Command Shall be executed multiple times until a Discover |
1683 | * SVIDs VDO is returned ending either with a SVID value of 0x0000 in |
1684 | * the last part of the last VDO or with a VDO containing two SVIDs |
1685 | * with values of 0x0000. |
1686 | * |
1687 | * However, some odd dockers support SVIDs less than 12 but without |
1688 | * 0x0000 in the last VDO, so we need to break the Discover SVIDs |
1689 | * request and return false here. |
1690 | */ |
1691 | return cnt == 7; |
1692 | abort: |
1693 | tcpm_log(port, fmt: "SVID_DISCOVERY_MAX(%d) too low!" , SVID_DISCOVERY_MAX); |
1694 | return false; |
1695 | } |
1696 | |
1697 | static void svdm_consume_modes(struct tcpm_port *port, const u32 *p, int cnt, |
1698 | enum tcpm_transmit_type rx_sop_type) |
1699 | { |
1700 | struct pd_mode_data *pmdata = &port->mode_data; |
1701 | struct typec_altmode_desc *paltmode; |
1702 | int i; |
1703 | |
1704 | switch (rx_sop_type) { |
1705 | case TCPC_TX_SOP_PRIME: |
1706 | pmdata = &port->mode_data_prime; |
1707 | if (pmdata->altmodes >= ARRAY_SIZE(port->plug_prime_altmode)) { |
1708 | /* Already logged in svdm_consume_svids() */ |
1709 | return; |
1710 | } |
1711 | break; |
1712 | case TCPC_TX_SOP: |
1713 | pmdata = &port->mode_data; |
1714 | if (pmdata->altmodes >= ARRAY_SIZE(port->partner_altmode)) { |
1715 | /* Already logged in svdm_consume_svids() */ |
1716 | return; |
1717 | } |
1718 | break; |
1719 | default: |
1720 | return; |
1721 | } |
1722 | |
1723 | for (i = 1; i < cnt; i++) { |
1724 | paltmode = &pmdata->altmode_desc[pmdata->altmodes]; |
1725 | memset(paltmode, 0, sizeof(*paltmode)); |
1726 | |
1727 | paltmode->svid = pmdata->svids[pmdata->svid_index]; |
1728 | paltmode->mode = i; |
1729 | paltmode->vdo = p[i]; |
1730 | |
1731 | tcpm_log(port, fmt: " Alternate mode %d: SVID 0x%04x, VDO %d: 0x%08x" , |
1732 | pmdata->altmodes, paltmode->svid, |
1733 | paltmode->mode, paltmode->vdo); |
1734 | |
1735 | pmdata->altmodes++; |
1736 | } |
1737 | } |
1738 | |
1739 | static void tcpm_register_partner_altmodes(struct tcpm_port *port) |
1740 | { |
1741 | struct pd_mode_data *modep = &port->mode_data; |
1742 | struct typec_altmode *altmode; |
1743 | int i; |
1744 | |
1745 | for (i = 0; i < modep->altmodes; i++) { |
1746 | altmode = typec_partner_register_altmode(partner: port->partner, |
1747 | desc: &modep->altmode_desc[i]); |
1748 | if (IS_ERR(ptr: altmode)) { |
1749 | tcpm_log(port, fmt: "Failed to register partner SVID 0x%04x" , |
1750 | modep->altmode_desc[i].svid); |
1751 | altmode = NULL; |
1752 | } |
1753 | port->partner_altmode[i] = altmode; |
1754 | } |
1755 | } |
1756 | |
1757 | static void tcpm_register_plug_altmodes(struct tcpm_port *port) |
1758 | { |
1759 | struct pd_mode_data *modep = &port->mode_data_prime; |
1760 | struct typec_altmode *altmode; |
1761 | int i; |
1762 | |
1763 | typec_plug_set_num_altmodes(plug: port->plug_prime, num_altmodes: modep->altmodes); |
1764 | |
1765 | for (i = 0; i < modep->altmodes; i++) { |
1766 | altmode = typec_plug_register_altmode(plug: port->plug_prime, |
1767 | desc: &modep->altmode_desc[i]); |
1768 | if (IS_ERR(ptr: altmode)) { |
1769 | tcpm_log(port, fmt: "Failed to register plug SVID 0x%04x" , |
1770 | modep->altmode_desc[i].svid); |
1771 | altmode = NULL; |
1772 | } |
1773 | port->plug_prime_altmode[i] = altmode; |
1774 | } |
1775 | } |
1776 | |
1777 | #define supports_modal(port) PD_IDH_MODAL_SUPP((port)->partner_ident.id_header) |
1778 | #define supports_modal_cable(port) PD_IDH_MODAL_SUPP((port)->cable_ident.id_header) |
1779 | #define supports_host(port) PD_IDH_HOST_SUPP((port->partner_ident.id_header)) |
1780 | |
1781 | /* |
1782 | * Helper to determine whether the port is capable of SOP' communication at the |
1783 | * current point in time. |
1784 | */ |
1785 | static bool tcpm_can_communicate_sop_prime(struct tcpm_port *port) |
1786 | { |
1787 | /* Check to see if tcpc supports SOP' communication */ |
1788 | if (!port->tcpc->cable_comm_capable || !port->tcpc->cable_comm_capable(port->tcpc)) |
1789 | return false; |
1790 | /* |
1791 | * Power Delivery 2.0 Section 6.3.11 |
1792 | * Before communicating with a Cable Plug a Port Should ensure that it |
1793 | * is the Vconn Source and that the Cable Plugs are powered by |
1794 | * performing a Vconn swap if necessary. Since it cannot be guaranteed |
1795 | * that the present Vconn Source is supplying Vconn, the only means to |
1796 | * ensure that the Cable Plugs are powered is for a Port wishing to |
1797 | * communicate with a Cable Plug is to become the Vconn Source. |
1798 | * |
1799 | * Power Delivery 3.0 Section 6.3.11 |
1800 | * Before communicating with a Cable Plug a Port Shall ensure that it |
1801 | * is the Vconn source. |
1802 | */ |
1803 | if (port->vconn_role != TYPEC_SOURCE) |
1804 | return false; |
1805 | /* |
1806 | * Power Delivery 2.0 Section 2.4.4 |
1807 | * When no Contract or an Implicit Contract is in place the Source can |
1808 | * communicate with a Cable Plug using SOP' packets in order to discover |
1809 | * its characteristics. |
1810 | * |
1811 | * Power Delivery 3.0 Section 2.4.4 |
1812 | * When no Contract or an Implicit Contract is in place only the Source |
1813 | * port that is supplying Vconn is allowed to send packets to a Cable |
1814 | * Plug and is allowed to respond to packets from the Cable Plug. |
1815 | */ |
1816 | if (!port->explicit_contract) |
1817 | return port->pwr_role == TYPEC_SOURCE; |
1818 | if (port->negotiated_rev == PD_REV30) |
1819 | return true; |
1820 | /* |
1821 | * Power Delivery 2.0 Section 2.4.4 |
1822 | * |
1823 | * When an Explicit Contract is in place the DFP (either the Source or |
1824 | * the Sink) can communicate with the Cable Plug(s) using SOP’/SOP” |
1825 | * Packets (see Figure 2-3). |
1826 | */ |
1827 | if (port->negotiated_rev == PD_REV20) |
1828 | return port->data_role == TYPEC_HOST; |
1829 | return false; |
1830 | } |
1831 | |
1832 | static bool tcpm_attempt_vconn_swap_discovery(struct tcpm_port *port) |
1833 | { |
1834 | if (!port->tcpc->attempt_vconn_swap_discovery) |
1835 | return false; |
1836 | |
1837 | /* Port is already source, no need to perform swap */ |
1838 | if (port->vconn_role == TYPEC_SOURCE) |
1839 | return false; |
1840 | |
1841 | /* |
1842 | * Partner needs to support Alternate Modes with modal support. If |
1843 | * partner is also capable of being a USB Host, it could be a device |
1844 | * that supports Alternate Modes as the DFP. |
1845 | */ |
1846 | if (!supports_modal(port) || supports_host(port)) |
1847 | return false; |
1848 | |
1849 | if ((port->negotiated_rev == PD_REV20 && port->data_role == TYPEC_HOST) || |
1850 | port->negotiated_rev == PD_REV30) |
1851 | return port->tcpc->attempt_vconn_swap_discovery(port->tcpc); |
1852 | |
1853 | return false; |
1854 | } |
1855 | |
1856 | |
1857 | static bool tcpm_cable_vdm_supported(struct tcpm_port *port) |
1858 | { |
1859 | return !IS_ERR_OR_NULL(ptr: port->cable) && |
1860 | typec_cable_is_active(cable: port->cable) && |
1861 | supports_modal_cable(port) && |
1862 | tcpm_can_communicate_sop_prime(port); |
1863 | } |
1864 | |
1865 | static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev, |
1866 | const u32 *p, int cnt, u32 *response, |
1867 | enum adev_actions *adev_action, |
1868 | enum tcpm_transmit_type rx_sop_type, |
1869 | enum tcpm_transmit_type *response_tx_sop_type) |
1870 | { |
1871 | struct typec_port *typec = port->typec_port; |
1872 | struct typec_altmode *pdev, *pdev_prime; |
1873 | struct pd_mode_data *modep, *modep_prime; |
1874 | int svdm_version; |
1875 | int rlen = 0; |
1876 | int cmd_type; |
1877 | int cmd; |
1878 | int i; |
1879 | int ret; |
1880 | |
1881 | cmd_type = PD_VDO_CMDT(p[0]); |
1882 | cmd = PD_VDO_CMD(p[0]); |
1883 | |
1884 | tcpm_log(port, fmt: "Rx VDM cmd 0x%x type %d cmd %d len %d" , |
1885 | p[0], cmd_type, cmd, cnt); |
1886 | |
1887 | switch (rx_sop_type) { |
1888 | case TCPC_TX_SOP_PRIME: |
1889 | modep_prime = &port->mode_data_prime; |
1890 | pdev_prime = typec_match_altmode(altmodes: port->plug_prime_altmode, |
1891 | ALTMODE_DISCOVERY_MAX, |
1892 | PD_VDO_VID(p[0]), |
1893 | PD_VDO_OPOS(p[0])); |
1894 | svdm_version = typec_get_cable_svdm_version(port: typec); |
1895 | /* |
1896 | * Update SVDM version if cable was discovered before port partner. |
1897 | */ |
1898 | if (!IS_ERR_OR_NULL(ptr: port->cable) && |
1899 | PD_VDO_SVDM_VER(p[0]) < svdm_version) |
1900 | typec_cable_set_svdm_version(cable: port->cable, svdm_version); |
1901 | break; |
1902 | case TCPC_TX_SOP: |
1903 | modep = &port->mode_data; |
1904 | pdev = typec_match_altmode(altmodes: port->partner_altmode, |
1905 | ALTMODE_DISCOVERY_MAX, |
1906 | PD_VDO_VID(p[0]), |
1907 | PD_VDO_OPOS(p[0])); |
1908 | svdm_version = typec_get_negotiated_svdm_version(port: typec); |
1909 | if (svdm_version < 0) |
1910 | return 0; |
1911 | break; |
1912 | default: |
1913 | modep = &port->mode_data; |
1914 | pdev = typec_match_altmode(altmodes: port->partner_altmode, |
1915 | ALTMODE_DISCOVERY_MAX, |
1916 | PD_VDO_VID(p[0]), |
1917 | PD_VDO_OPOS(p[0])); |
1918 | svdm_version = typec_get_negotiated_svdm_version(port: typec); |
1919 | if (svdm_version < 0) |
1920 | return 0; |
1921 | break; |
1922 | } |
1923 | |
1924 | switch (cmd_type) { |
1925 | case CMDT_INIT: |
1926 | /* |
1927 | * Only the port or port partner is allowed to initialize SVDM |
1928 | * commands over SOP'. In case the port partner initializes a |
1929 | * sequence when it is not allowed to send SOP' messages, drop |
1930 | * the message should the TCPM port try to process it. |
1931 | */ |
1932 | if (rx_sop_type == TCPC_TX_SOP_PRIME) |
1933 | return 0; |
1934 | |
1935 | switch (cmd) { |
1936 | case CMD_DISCOVER_IDENT: |
1937 | if (PD_VDO_VID(p[0]) != USB_SID_PD) |
1938 | break; |
1939 | |
1940 | if (IS_ERR_OR_NULL(ptr: port->partner)) |
1941 | break; |
1942 | |
1943 | if (PD_VDO_SVDM_VER(p[0]) < svdm_version) { |
1944 | typec_partner_set_svdm_version(partner: port->partner, |
1945 | PD_VDO_SVDM_VER(p[0])); |
1946 | svdm_version = PD_VDO_SVDM_VER(p[0]); |
1947 | } |
1948 | |
1949 | port->ams = DISCOVER_IDENTITY; |
1950 | /* |
1951 | * PD2.0 Spec 6.10.3: respond with NAK as DFP (data host) |
1952 | * PD3.1 Spec 6.4.4.2.5.1: respond with NAK if "invalid field" or |
1953 | * "wrong configuation" or "Unrecognized" |
1954 | */ |
1955 | if ((port->data_role == TYPEC_DEVICE || svdm_version >= SVDM_VER_2_0) && |
1956 | port->nr_snk_vdo) { |
1957 | if (svdm_version < SVDM_VER_2_0) { |
1958 | for (i = 0; i < port->nr_snk_vdo_v1; i++) |
1959 | response[i + 1] = port->snk_vdo_v1[i]; |
1960 | rlen = port->nr_snk_vdo_v1 + 1; |
1961 | |
1962 | } else { |
1963 | for (i = 0; i < port->nr_snk_vdo; i++) |
1964 | response[i + 1] = port->snk_vdo[i]; |
1965 | rlen = port->nr_snk_vdo + 1; |
1966 | } |
1967 | } |
1968 | break; |
1969 | case CMD_DISCOVER_SVID: |
1970 | port->ams = DISCOVER_SVIDS; |
1971 | break; |
1972 | case CMD_DISCOVER_MODES: |
1973 | port->ams = DISCOVER_MODES; |
1974 | break; |
1975 | case CMD_ENTER_MODE: |
1976 | port->ams = DFP_TO_UFP_ENTER_MODE; |
1977 | break; |
1978 | case CMD_EXIT_MODE: |
1979 | port->ams = DFP_TO_UFP_EXIT_MODE; |
1980 | break; |
1981 | case CMD_ATTENTION: |
1982 | /* Attention command does not have response */ |
1983 | *adev_action = ADEV_ATTENTION; |
1984 | return 0; |
1985 | default: |
1986 | break; |
1987 | } |
1988 | if (rlen >= 1) { |
1989 | response[0] = p[0] | VDO_CMDT(CMDT_RSP_ACK); |
1990 | } else if (rlen == 0) { |
1991 | response[0] = p[0] | VDO_CMDT(CMDT_RSP_NAK); |
1992 | rlen = 1; |
1993 | } else { |
1994 | response[0] = p[0] | VDO_CMDT(CMDT_RSP_BUSY); |
1995 | rlen = 1; |
1996 | } |
1997 | response[0] = (response[0] & ~VDO_SVDM_VERS_MASK) | |
1998 | (VDO_SVDM_VERS(typec_get_negotiated_svdm_version(typec))); |
1999 | break; |
2000 | case CMDT_RSP_ACK: |
2001 | /* |
2002 | * Silently drop message if we are not connected, but can process |
2003 | * if SOP' Discover Identity prior to explicit contract. |
2004 | */ |
2005 | if (IS_ERR_OR_NULL(ptr: port->partner) && |
2006 | !(rx_sop_type == TCPC_TX_SOP_PRIME && cmd == CMD_DISCOVER_IDENT)) |
2007 | break; |
2008 | |
2009 | tcpm_ams_finish(port); |
2010 | |
2011 | switch (cmd) { |
2012 | /* |
2013 | * SVDM Command Flow for SOP and SOP': |
2014 | * SOP Discover Identity |
2015 | * SOP' Discover Identity |
2016 | * SOP Discover SVIDs |
2017 | * Discover Modes |
2018 | * (Active Cables) |
2019 | * SOP' Discover SVIDs |
2020 | * Discover Modes |
2021 | * |
2022 | * Perform Discover SOP' if the port can communicate with cable |
2023 | * plug. |
2024 | */ |
2025 | case CMD_DISCOVER_IDENT: |
2026 | switch (rx_sop_type) { |
2027 | case TCPC_TX_SOP: |
2028 | if (PD_VDO_SVDM_VER(p[0]) < svdm_version) { |
2029 | typec_partner_set_svdm_version(partner: port->partner, |
2030 | PD_VDO_SVDM_VER(p[0])); |
2031 | /* If cable is discovered before partner, downgrade svdm */ |
2032 | if (!IS_ERR_OR_NULL(ptr: port->cable) && |
2033 | (typec_get_cable_svdm_version(port: port->typec_port) > |
2034 | svdm_version)) |
2035 | typec_cable_set_svdm_version(cable: port->cable, |
2036 | svdm_version); |
2037 | } |
2038 | /* 6.4.4.3.1 */ |
2039 | svdm_consume_identity(port, p, cnt); |
2040 | /* Attempt Vconn swap, delay SOP' discovery if necessary */ |
2041 | if (tcpm_attempt_vconn_swap_discovery(port)) { |
2042 | port->send_discover_prime = true; |
2043 | port->upcoming_state = VCONN_SWAP_SEND; |
2044 | ret = tcpm_ams_start(port, ams: VCONN_SWAP); |
2045 | if (!ret) |
2046 | return 0; |
2047 | /* Cannot perform Vconn swap */ |
2048 | port->upcoming_state = INVALID_STATE; |
2049 | port->send_discover_prime = false; |
2050 | } |
2051 | |
2052 | /* |
2053 | * Attempt Discover Identity on SOP' if the |
2054 | * cable was not discovered previously, and use |
2055 | * the SVDM version of the partner to probe. |
2056 | */ |
2057 | if (IS_ERR_OR_NULL(ptr: port->cable) && |
2058 | tcpm_can_communicate_sop_prime(port)) { |
2059 | *response_tx_sop_type = TCPC_TX_SOP_PRIME; |
2060 | port->send_discover_prime = true; |
2061 | response[0] = VDO(USB_SID_PD, 1, |
2062 | typec_get_negotiated_svdm_version(typec), |
2063 | CMD_DISCOVER_IDENT); |
2064 | rlen = 1; |
2065 | } else { |
2066 | *response_tx_sop_type = TCPC_TX_SOP; |
2067 | response[0] = VDO(USB_SID_PD, 1, |
2068 | typec_get_negotiated_svdm_version(typec), |
2069 | CMD_DISCOVER_SVID); |
2070 | rlen = 1; |
2071 | } |
2072 | break; |
2073 | case TCPC_TX_SOP_PRIME: |
2074 | /* |
2075 | * svdm_consume_identity_sop_prime will determine |
2076 | * the svdm_version for the cable moving forward. |
2077 | */ |
2078 | svdm_consume_identity_sop_prime(port, p, cnt); |
2079 | |
2080 | /* |
2081 | * If received in SRC_VDM_IDENTITY_REQUEST, continue |
2082 | * to SRC_SEND_CAPABILITIES |
2083 | */ |
2084 | if (port->state == SRC_VDM_IDENTITY_REQUEST) { |
2085 | tcpm_set_state(port, state: SRC_SEND_CAPABILITIES, delay_ms: 0); |
2086 | return 0; |
2087 | } |
2088 | |
2089 | *response_tx_sop_type = TCPC_TX_SOP; |
2090 | response[0] = VDO(USB_SID_PD, 1, |
2091 | typec_get_negotiated_svdm_version(typec), |
2092 | CMD_DISCOVER_SVID); |
2093 | rlen = 1; |
2094 | break; |
2095 | default: |
2096 | return 0; |
2097 | } |
2098 | break; |
2099 | case CMD_DISCOVER_SVID: |
2100 | *response_tx_sop_type = rx_sop_type; |
2101 | /* 6.4.4.3.2 */ |
2102 | if (svdm_consume_svids(port, p, cnt, rx_sop_type)) { |
2103 | response[0] = VDO(USB_SID_PD, 1, svdm_version, CMD_DISCOVER_SVID); |
2104 | rlen = 1; |
2105 | } else { |
2106 | if (rx_sop_type == TCPC_TX_SOP) { |
2107 | if (modep->nsvids && supports_modal(port)) { |
2108 | response[0] = VDO(modep->svids[0], 1, svdm_version, |
2109 | CMD_DISCOVER_MODES); |
2110 | rlen = 1; |
2111 | } |
2112 | } else if (rx_sop_type == TCPC_TX_SOP_PRIME) { |
2113 | if (modep_prime->nsvids) { |
2114 | response[0] = VDO(modep_prime->svids[0], 1, |
2115 | svdm_version, CMD_DISCOVER_MODES); |
2116 | rlen = 1; |
2117 | } |
2118 | } |
2119 | } |
2120 | break; |
2121 | case CMD_DISCOVER_MODES: |
2122 | if (rx_sop_type == TCPC_TX_SOP) { |
2123 | /* 6.4.4.3.3 */ |
2124 | svdm_consume_modes(port, p, cnt, rx_sop_type); |
2125 | modep->svid_index++; |
2126 | if (modep->svid_index < modep->nsvids) { |
2127 | u16 svid = modep->svids[modep->svid_index]; |
2128 | *response_tx_sop_type = TCPC_TX_SOP; |
2129 | response[0] = VDO(svid, 1, svdm_version, |
2130 | CMD_DISCOVER_MODES); |
2131 | rlen = 1; |
2132 | } else if (tcpm_cable_vdm_supported(port)) { |
2133 | *response_tx_sop_type = TCPC_TX_SOP_PRIME; |
2134 | response[0] = VDO(USB_SID_PD, 1, |
2135 | typec_get_cable_svdm_version(typec), |
2136 | CMD_DISCOVER_SVID); |
2137 | rlen = 1; |
2138 | } else { |
2139 | tcpm_register_partner_altmodes(port); |
2140 | } |
2141 | } else if (rx_sop_type == TCPC_TX_SOP_PRIME) { |
2142 | /* 6.4.4.3.3 */ |
2143 | svdm_consume_modes(port, p, cnt, rx_sop_type); |
2144 | modep_prime->svid_index++; |
2145 | if (modep_prime->svid_index < modep_prime->nsvids) { |
2146 | u16 svid = modep_prime->svids[modep_prime->svid_index]; |
2147 | *response_tx_sop_type = TCPC_TX_SOP_PRIME; |
2148 | response[0] = VDO(svid, 1, |
2149 | typec_get_cable_svdm_version(typec), |
2150 | CMD_DISCOVER_MODES); |
2151 | rlen = 1; |
2152 | } else { |
2153 | tcpm_register_plug_altmodes(port); |
2154 | tcpm_register_partner_altmodes(port); |
2155 | } |
2156 | } |
2157 | break; |
2158 | case CMD_ENTER_MODE: |
2159 | *response_tx_sop_type = rx_sop_type; |
2160 | if (rx_sop_type == TCPC_TX_SOP) { |
2161 | if (adev && pdev) { |
2162 | typec_altmode_update_active(alt: pdev, active: true); |
2163 | *adev_action = ADEV_QUEUE_VDM_SEND_EXIT_MODE_ON_FAIL; |
2164 | } |
2165 | } else if (rx_sop_type == TCPC_TX_SOP_PRIME) { |
2166 | if (adev && pdev_prime) { |
2167 | typec_altmode_update_active(alt: pdev_prime, active: true); |
2168 | *adev_action = ADEV_QUEUE_VDM_SEND_EXIT_MODE_ON_FAIL; |
2169 | } |
2170 | } |
2171 | return 0; |
2172 | case CMD_EXIT_MODE: |
2173 | *response_tx_sop_type = rx_sop_type; |
2174 | if (rx_sop_type == TCPC_TX_SOP) { |
2175 | if (adev && pdev) { |
2176 | typec_altmode_update_active(alt: pdev, active: false); |
2177 | /* Back to USB Operation */ |
2178 | *adev_action = ADEV_NOTIFY_USB_AND_QUEUE_VDM; |
2179 | return 0; |
2180 | } |
2181 | } |
2182 | break; |
2183 | case VDO_CMD_VENDOR(0) ... VDO_CMD_VENDOR(15): |
2184 | break; |
2185 | default: |
2186 | /* Unrecognized SVDM */ |
2187 | response[0] = p[0] | VDO_CMDT(CMDT_RSP_NAK); |
2188 | rlen = 1; |
2189 | response[0] = (response[0] & ~VDO_SVDM_VERS_MASK) | |
2190 | (VDO_SVDM_VERS(svdm_version)); |
2191 | break; |
2192 | } |
2193 | break; |
2194 | case CMDT_RSP_NAK: |
2195 | tcpm_ams_finish(port); |
2196 | switch (cmd) { |
2197 | case CMD_DISCOVER_IDENT: |
2198 | case CMD_DISCOVER_SVID: |
2199 | case CMD_DISCOVER_MODES: |
2200 | case VDO_CMD_VENDOR(0) ... VDO_CMD_VENDOR(15): |
2201 | break; |
2202 | case CMD_ENTER_MODE: |
2203 | /* Back to USB Operation */ |
2204 | *adev_action = ADEV_NOTIFY_USB_AND_QUEUE_VDM; |
2205 | return 0; |
2206 | default: |
2207 | /* Unrecognized SVDM */ |
2208 | response[0] = p[0] | VDO_CMDT(CMDT_RSP_NAK); |
2209 | rlen = 1; |
2210 | response[0] = (response[0] & ~VDO_SVDM_VERS_MASK) | |
2211 | (VDO_SVDM_VERS(svdm_version)); |
2212 | break; |
2213 | } |
2214 | break; |
2215 | default: |
2216 | response[0] = p[0] | VDO_CMDT(CMDT_RSP_NAK); |
2217 | rlen = 1; |
2218 | response[0] = (response[0] & ~VDO_SVDM_VERS_MASK) | |
2219 | (VDO_SVDM_VERS(svdm_version)); |
2220 | break; |
2221 | } |
2222 | |
2223 | /* Informing the alternate mode drivers about everything */ |
2224 | *adev_action = ADEV_QUEUE_VDM; |
2225 | return rlen; |
2226 | } |
2227 | |
2228 | static void tcpm_pd_handle_msg(struct tcpm_port *port, |
2229 | enum pd_msg_request message, |
2230 | enum tcpm_ams ams); |
2231 | |
2232 | static void tcpm_handle_vdm_request(struct tcpm_port *port, |
2233 | const __le32 *payload, int cnt, |
2234 | enum tcpm_transmit_type rx_sop_type) |
2235 | { |
2236 | enum adev_actions adev_action = ADEV_NONE; |
2237 | struct typec_altmode *adev; |
2238 | u32 p[PD_MAX_PAYLOAD]; |
2239 | u32 response[8] = { }; |
2240 | int i, rlen = 0; |
2241 | enum tcpm_transmit_type response_tx_sop_type = TCPC_TX_SOP; |
2242 | |
2243 | for (i = 0; i < cnt; i++) |
2244 | p[i] = le32_to_cpu(payload[i]); |
2245 | |
2246 | adev = typec_match_altmode(altmodes: port->port_altmode, ALTMODE_DISCOVERY_MAX, |
2247 | PD_VDO_VID(p[0]), PD_VDO_OPOS(p[0])); |
2248 | |
2249 | if (port->vdm_state == VDM_STATE_BUSY) { |
2250 | /* If UFP responded busy retry after timeout */ |
2251 | if (PD_VDO_CMDT(p[0]) == CMDT_RSP_BUSY) { |
2252 | port->vdm_state = VDM_STATE_WAIT_RSP_BUSY; |
2253 | port->vdo_retry = (p[0] & ~VDO_CMDT_MASK) | |
2254 | CMDT_INIT; |
2255 | mod_vdm_delayed_work(port, PD_T_VDM_BUSY); |
2256 | return; |
2257 | } |
2258 | port->vdm_state = VDM_STATE_DONE; |
2259 | } |
2260 | |
2261 | if (PD_VDO_SVDM(p[0]) && (adev || tcpm_vdm_ams(port) || port->nr_snk_vdo)) { |
2262 | /* |
2263 | * Here a SVDM is received (INIT or RSP or unknown). Set the vdm_sm_running in |
2264 | * advance because we are dropping the lock but may send VDMs soon. |
2265 | * For the cases of INIT received: |
2266 | * - If no response to send, it will be cleared later in this function. |
2267 | * - If there are responses to send, it will be cleared in the state machine. |
2268 | * For the cases of RSP received: |
2269 | * - If no further INIT to send, it will be cleared later in this function. |
2270 | * - Otherwise, it will be cleared in the state machine if timeout or it will go |
2271 | * back here until no further INIT to send. |
2272 | * For the cases of unknown type received: |
2273 | * - We will send NAK and the flag will be cleared in the state machine. |
2274 | */ |
2275 | port->vdm_sm_running = true; |
2276 | rlen = tcpm_pd_svdm(port, adev, p, cnt, response, adev_action: &adev_action, |
2277 | rx_sop_type, response_tx_sop_type: &response_tx_sop_type); |
2278 | } else { |
2279 | if (port->negotiated_rev >= PD_REV30) |
2280 | tcpm_pd_handle_msg(port, message: PD_MSG_CTRL_NOT_SUPP, ams: NONE_AMS); |
2281 | } |
2282 | |
2283 | /* |
2284 | * We are done with any state stored in the port struct now, except |
2285 | * for any port struct changes done by the tcpm_queue_vdm() call |
2286 | * below, which is a separate operation. |
2287 | * |
2288 | * So we can safely release the lock here; and we MUST release the |
2289 | * lock here to avoid an AB BA lock inversion: |
2290 | * |
2291 | * If we keep the lock here then the lock ordering in this path is: |
2292 | * 1. tcpm_pd_rx_handler take the tcpm port lock |
2293 | * 2. One of the typec_altmode_* calls below takes the alt-mode's lock |
2294 | * |
2295 | * And we also have this ordering: |
2296 | * 1. alt-mode driver takes the alt-mode's lock |
2297 | * 2. alt-mode driver calls tcpm_altmode_enter which takes the |
2298 | * tcpm port lock |
2299 | * |
2300 | * Dropping our lock here avoids this. |
2301 | */ |
2302 | mutex_unlock(lock: &port->lock); |
2303 | |
2304 | if (adev) { |
2305 | switch (adev_action) { |
2306 | case ADEV_NONE: |
2307 | break; |
2308 | case ADEV_NOTIFY_USB_AND_QUEUE_VDM: |
2309 | WARN_ON(typec_altmode_notify(adev, TYPEC_STATE_USB, NULL)); |
2310 | typec_altmode_vdm(altmode: adev, header: p[0], vdo: &p[1], count: cnt); |
2311 | break; |
2312 | case ADEV_QUEUE_VDM: |
2313 | if (response_tx_sop_type == TCPC_TX_SOP_PRIME) |
2314 | typec_cable_altmode_vdm(altmode: adev, sop: TYPEC_PLUG_SOP_P, header: p[0], vdo: &p[1], count: cnt); |
2315 | else |
2316 | typec_altmode_vdm(altmode: adev, header: p[0], vdo: &p[1], count: cnt); |
2317 | break; |
2318 | case ADEV_QUEUE_VDM_SEND_EXIT_MODE_ON_FAIL: |
2319 | if (response_tx_sop_type == TCPC_TX_SOP_PRIME) { |
2320 | if (typec_cable_altmode_vdm(altmode: adev, sop: TYPEC_PLUG_SOP_P, |
2321 | header: p[0], vdo: &p[1], count: cnt)) { |
2322 | int svdm_version = typec_get_cable_svdm_version( |
2323 | port: port->typec_port); |
2324 | if (svdm_version < 0) |
2325 | break; |
2326 | |
2327 | response[0] = VDO(adev->svid, 1, svdm_version, |
2328 | CMD_EXIT_MODE); |
2329 | response[0] |= VDO_OPOS(adev->mode); |
2330 | rlen = 1; |
2331 | } |
2332 | } else { |
2333 | if (typec_altmode_vdm(altmode: adev, header: p[0], vdo: &p[1], count: cnt)) { |
2334 | int svdm_version = typec_get_negotiated_svdm_version( |
2335 | port: port->typec_port); |
2336 | if (svdm_version < 0) |
2337 | break; |
2338 | |
2339 | response[0] = VDO(adev->svid, 1, svdm_version, |
2340 | CMD_EXIT_MODE); |
2341 | response[0] |= VDO_OPOS(adev->mode); |
2342 | rlen = 1; |
2343 | } |
2344 | } |
2345 | break; |
2346 | case ADEV_ATTENTION: |
2347 | if (typec_altmode_attention(altmode: adev, vdo: p[1])) |
2348 | tcpm_log(port, fmt: "typec_altmode_attention no port partner altmode" ); |
2349 | break; |
2350 | } |
2351 | } |
2352 | |
2353 | /* |
2354 | * We must re-take the lock here to balance the unlock in |
2355 | * tcpm_pd_rx_handler, note that no changes, other then the |
2356 | * tcpm_queue_vdm call, are made while the lock is held again. |
2357 | * All that is done after the call is unwinding the call stack until |
2358 | * we return to tcpm_pd_rx_handler and do the unlock there. |
2359 | */ |
2360 | mutex_lock(&port->lock); |
2361 | |
2362 | if (rlen > 0) |
2363 | tcpm_queue_vdm(port, header: response[0], data: &response[1], cnt: rlen - 1, tx_sop_type: response_tx_sop_type); |
2364 | else |
2365 | port->vdm_sm_running = false; |
2366 | } |
2367 | |
2368 | static void tcpm_send_vdm(struct tcpm_port *port, u32 vid, int cmd, |
2369 | const u32 *data, int count, enum tcpm_transmit_type tx_sop_type) |
2370 | { |
2371 | int svdm_version; |
2372 | u32 ; |
2373 | |
2374 | switch (tx_sop_type) { |
2375 | case TCPC_TX_SOP_PRIME: |
2376 | /* |
2377 | * If the port partner is discovered, then the port partner's |
2378 | * SVDM Version will be returned |
2379 | */ |
2380 | svdm_version = typec_get_cable_svdm_version(port: port->typec_port); |
2381 | if (svdm_version < 0) |
2382 | svdm_version = SVDM_VER_MAX; |
2383 | break; |
2384 | case TCPC_TX_SOP: |
2385 | svdm_version = typec_get_negotiated_svdm_version(port: port->typec_port); |
2386 | if (svdm_version < 0) |
2387 | return; |
2388 | break; |
2389 | default: |
2390 | svdm_version = typec_get_negotiated_svdm_version(port: port->typec_port); |
2391 | if (svdm_version < 0) |
2392 | return; |
2393 | break; |
2394 | } |
2395 | |
2396 | if (WARN_ON(count > VDO_MAX_SIZE - 1)) |
2397 | count = VDO_MAX_SIZE - 1; |
2398 | |
2399 | /* set VDM header with VID & CMD */ |
2400 | header = VDO(vid, ((vid & USB_SID_PD) == USB_SID_PD) ? |
2401 | 1 : (PD_VDO_CMD(cmd) <= CMD_ATTENTION), |
2402 | svdm_version, cmd); |
2403 | tcpm_queue_vdm(port, header, data, cnt: count, tx_sop_type); |
2404 | } |
2405 | |
2406 | static unsigned int vdm_ready_timeout(u32 vdm_hdr) |
2407 | { |
2408 | unsigned int timeout; |
2409 | int cmd = PD_VDO_CMD(vdm_hdr); |
2410 | |
2411 | /* its not a structured VDM command */ |
2412 | if (!PD_VDO_SVDM(vdm_hdr)) |
2413 | return PD_T_VDM_UNSTRUCTURED; |
2414 | |
2415 | switch (PD_VDO_CMDT(vdm_hdr)) { |
2416 | case CMDT_INIT: |
2417 | if (cmd == CMD_ENTER_MODE || cmd == CMD_EXIT_MODE) |
2418 | timeout = PD_T_VDM_WAIT_MODE_E; |
2419 | else |
2420 | timeout = PD_T_VDM_SNDR_RSP; |
2421 | break; |
2422 | default: |
2423 | if (cmd == CMD_ENTER_MODE || cmd == CMD_EXIT_MODE) |
2424 | timeout = PD_T_VDM_E_MODE; |
2425 | else |
2426 | timeout = PD_T_VDM_RCVR_RSP; |
2427 | break; |
2428 | } |
2429 | return timeout; |
2430 | } |
2431 | |
2432 | static void vdm_run_state_machine(struct tcpm_port *port) |
2433 | { |
2434 | struct pd_message msg; |
2435 | int i, res = 0; |
2436 | u32 vdo_hdr = port->vdo_data[0]; |
2437 | u32 response[8] = { }; |
2438 | |
2439 | switch (port->vdm_state) { |
2440 | case VDM_STATE_READY: |
2441 | /* Only transmit VDM if attached */ |
2442 | if (!port->attached) { |
2443 | port->vdm_state = VDM_STATE_ERR_BUSY; |
2444 | break; |
2445 | } |
2446 | |
2447 | /* |
2448 | * if there's traffic or we're not in PDO ready state don't send |
2449 | * a VDM. |
2450 | */ |
2451 | if (port->state != SRC_READY && port->state != SNK_READY && |
2452 | port->state != SRC_VDM_IDENTITY_REQUEST) { |
2453 | port->vdm_sm_running = false; |
2454 | break; |
2455 | } |
2456 | |
2457 | /* TODO: AMS operation for Unstructured VDM */ |
2458 | if (PD_VDO_SVDM(vdo_hdr) && PD_VDO_CMDT(vdo_hdr) == CMDT_INIT) { |
2459 | switch (PD_VDO_CMD(vdo_hdr)) { |
2460 | case CMD_DISCOVER_IDENT: |
2461 | res = tcpm_ams_start(port, ams: DISCOVER_IDENTITY); |
2462 | if (res == 0) { |
2463 | switch (port->tx_sop_type) { |
2464 | case TCPC_TX_SOP_PRIME: |
2465 | port->send_discover_prime = false; |
2466 | break; |
2467 | case TCPC_TX_SOP: |
2468 | port->send_discover = false; |
2469 | break; |
2470 | default: |
2471 | port->send_discover = false; |
2472 | break; |
2473 | } |
2474 | } else if (res == -EAGAIN) { |
2475 | port->vdo_data[0] = 0; |
2476 | mod_send_discover_delayed_work(port, |
2477 | SEND_DISCOVER_RETRY_MS); |
2478 | } |
2479 | break; |
2480 | case CMD_DISCOVER_SVID: |
2481 | res = tcpm_ams_start(port, ams: DISCOVER_SVIDS); |
2482 | break; |
2483 | case CMD_DISCOVER_MODES: |
2484 | res = tcpm_ams_start(port, ams: DISCOVER_MODES); |
2485 | break; |
2486 | case CMD_ENTER_MODE: |
2487 | res = tcpm_ams_start(port, ams: DFP_TO_UFP_ENTER_MODE); |
2488 | break; |
2489 | case CMD_EXIT_MODE: |
2490 | res = tcpm_ams_start(port, ams: DFP_TO_UFP_EXIT_MODE); |
2491 | break; |
2492 | case CMD_ATTENTION: |
2493 | res = tcpm_ams_start(port, ams: ATTENTION); |
2494 | break; |
2495 | case VDO_CMD_VENDOR(0) ... VDO_CMD_VENDOR(15): |
2496 | res = tcpm_ams_start(port, ams: STRUCTURED_VDMS); |
2497 | break; |
2498 | default: |
2499 | res = -EOPNOTSUPP; |
2500 | break; |
2501 | } |
2502 | |
2503 | if (res < 0) { |
2504 | port->vdm_state = VDM_STATE_ERR_BUSY; |
2505 | return; |
2506 | } |
2507 | } |
2508 | |
2509 | port->vdm_state = VDM_STATE_SEND_MESSAGE; |
2510 | mod_vdm_delayed_work(port, delay_ms: (port->negotiated_rev >= PD_REV30 && |
2511 | port->pwr_role == TYPEC_SOURCE && |
2512 | PD_VDO_SVDM(vdo_hdr) && |
2513 | PD_VDO_CMDT(vdo_hdr) == CMDT_INIT) ? |
2514 | PD_T_SINK_TX : 0); |
2515 | break; |
2516 | case VDM_STATE_WAIT_RSP_BUSY: |
2517 | port->vdo_data[0] = port->vdo_retry; |
2518 | port->vdo_count = 1; |
2519 | port->vdm_state = VDM_STATE_READY; |
2520 | tcpm_ams_finish(port); |
2521 | break; |
2522 | case VDM_STATE_BUSY: |
2523 | port->vdm_state = VDM_STATE_ERR_TMOUT; |
2524 | if (port->ams != NONE_AMS) |
2525 | tcpm_ams_finish(port); |
2526 | break; |
2527 | case VDM_STATE_ERR_SEND: |
2528 | /* |
2529 | * When sending Discover Identity to SOP' before establishing an |
2530 | * explicit contract, do not retry. Instead, weave sending |
2531 | * Source_Capabilities over SOP and Discover Identity over SOP'. |
2532 | */ |
2533 | if (port->state == SRC_VDM_IDENTITY_REQUEST) { |
2534 | tcpm_ams_finish(port); |
2535 | port->vdm_state = VDM_STATE_DONE; |
2536 | tcpm_set_state(port, state: SRC_SEND_CAPABILITIES, delay_ms: 0); |
2537 | /* |
2538 | * A partner which does not support USB PD will not reply, |
2539 | * so this is not a fatal error. At the same time, some |
2540 | * devices may not return GoodCRC under some circumstances, |
2541 | * so we need to retry. |
2542 | */ |
2543 | } else if (port->vdm_retries < 3) { |
2544 | tcpm_log(port, fmt: "VDM Tx error, retry" ); |
2545 | port->vdm_retries++; |
2546 | port->vdm_state = VDM_STATE_READY; |
2547 | if (PD_VDO_SVDM(vdo_hdr) && PD_VDO_CMDT(vdo_hdr) == CMDT_INIT) |
2548 | tcpm_ams_finish(port); |
2549 | } else { |
2550 | tcpm_ams_finish(port); |
2551 | if (port->tx_sop_type == TCPC_TX_SOP) |
2552 | break; |
2553 | /* Handle SOP' Transmission Errors */ |
2554 | switch (PD_VDO_CMD(vdo_hdr)) { |
2555 | /* |
2556 | * If Discover Identity fails on SOP', then resume |
2557 | * discovery process on SOP only. |
2558 | */ |
2559 | case CMD_DISCOVER_IDENT: |
2560 | port->vdo_data[0] = 0; |
2561 | response[0] = VDO(USB_SID_PD, 1, |
2562 | typec_get_negotiated_svdm_version( |
2563 | port->typec_port), |
2564 | CMD_DISCOVER_SVID); |
2565 | tcpm_queue_vdm(port, header: response[0], data: &response[1], |
2566 | cnt: 0, tx_sop_type: TCPC_TX_SOP); |
2567 | break; |
2568 | /* |
2569 | * If Discover SVIDs or Discover Modes fail, then |
2570 | * proceed with Alt Mode discovery process on SOP. |
2571 | */ |
2572 | case CMD_DISCOVER_SVID: |
2573 | tcpm_register_partner_altmodes(port); |
2574 | break; |
2575 | case CMD_DISCOVER_MODES: |
2576 | tcpm_register_partner_altmodes(port); |
2577 | break; |
2578 | default: |
2579 | break; |
2580 | } |
2581 | } |
2582 | break; |
2583 | case VDM_STATE_SEND_MESSAGE: |
2584 | /* Prepare and send VDM */ |
2585 | memset(&msg, 0, sizeof(msg)); |
2586 | if (port->tx_sop_type == TCPC_TX_SOP_PRIME) { |
2587 | msg.header = PD_HEADER_LE(PD_DATA_VENDOR_DEF, |
2588 | 0, /* Cable Plug Indicator for DFP/UFP */ |
2589 | 0, /* Reserved */ |
2590 | port->negotiated_rev_prime, |
2591 | port->message_id_prime, |
2592 | port->vdo_count); |
2593 | } else { |
2594 | msg.header = PD_HEADER_LE(PD_DATA_VENDOR_DEF, |
2595 | port->pwr_role, |
2596 | port->data_role, |
2597 | port->negotiated_rev, |
2598 | port->message_id, |
2599 | port->vdo_count); |
2600 | } |
2601 | for (i = 0; i < port->vdo_count; i++) |
2602 | msg.payload[i] = cpu_to_le32(port->vdo_data[i]); |
2603 | res = tcpm_pd_transmit(port, tx_sop_type: port->tx_sop_type, msg: &msg); |
2604 | if (res < 0) { |
2605 | port->vdm_state = VDM_STATE_ERR_SEND; |
2606 | } else { |
2607 | unsigned long timeout; |
2608 | |
2609 | port->vdm_retries = 0; |
2610 | port->vdo_data[0] = 0; |
2611 | port->vdm_state = VDM_STATE_BUSY; |
2612 | timeout = vdm_ready_timeout(vdm_hdr: vdo_hdr); |
2613 | mod_vdm_delayed_work(port, delay_ms: timeout); |
2614 | } |
2615 | break; |
2616 | default: |
2617 | break; |
2618 | } |
2619 | } |
2620 | |
2621 | static void vdm_state_machine_work(struct kthread_work *work) |
2622 | { |
2623 | struct tcpm_port *port = container_of(work, struct tcpm_port, vdm_state_machine); |
2624 | enum vdm_states prev_state; |
2625 | |
2626 | mutex_lock(&port->lock); |
2627 | |
2628 | /* |
2629 | * Continue running as long as the port is not busy and there was |
2630 | * a state change. |
2631 | */ |
2632 | do { |
2633 | prev_state = port->vdm_state; |
2634 | vdm_run_state_machine(port); |
2635 | } while (port->vdm_state != prev_state && |
2636 | port->vdm_state != VDM_STATE_BUSY && |
2637 | port->vdm_state != VDM_STATE_SEND_MESSAGE); |
2638 | |
2639 | if (port->vdm_state < VDM_STATE_READY) |
2640 | port->vdm_sm_running = false; |
2641 | |
2642 | mutex_unlock(lock: &port->lock); |
2643 | } |
2644 | |
2645 | enum pdo_err { |
2646 | PDO_NO_ERR, |
2647 | PDO_ERR_NO_VSAFE5V, |
2648 | PDO_ERR_VSAFE5V_NOT_FIRST, |
2649 | PDO_ERR_PDO_TYPE_NOT_IN_ORDER, |
2650 | PDO_ERR_FIXED_NOT_SORTED, |
2651 | PDO_ERR_VARIABLE_BATT_NOT_SORTED, |
2652 | PDO_ERR_DUPE_PDO, |
2653 | PDO_ERR_PPS_APDO_NOT_SORTED, |
2654 | PDO_ERR_DUPE_PPS_APDO, |
2655 | }; |
2656 | |
2657 | static const char * const pdo_err_msg[] = { |
2658 | [PDO_ERR_NO_VSAFE5V] = |
2659 | " err: source/sink caps should at least have vSafe5V" , |
2660 | [PDO_ERR_VSAFE5V_NOT_FIRST] = |
2661 | " err: vSafe5V Fixed Supply Object Shall always be the first object" , |
2662 | [PDO_ERR_PDO_TYPE_NOT_IN_ORDER] = |
2663 | " err: PDOs should be in the following order: Fixed; Battery; Variable" , |
2664 | [PDO_ERR_FIXED_NOT_SORTED] = |
2665 | " err: Fixed supply pdos should be in increasing order of their fixed voltage" , |
2666 | [PDO_ERR_VARIABLE_BATT_NOT_SORTED] = |
2667 | " err: Variable/Battery supply pdos should be in increasing order of their minimum voltage" , |
2668 | [PDO_ERR_DUPE_PDO] = |
2669 | " err: Variable/Batt supply pdos cannot have same min/max voltage" , |
2670 | [PDO_ERR_PPS_APDO_NOT_SORTED] = |
2671 | " err: Programmable power supply apdos should be in increasing order of their maximum voltage" , |
2672 | [PDO_ERR_DUPE_PPS_APDO] = |
2673 | " err: Programmable power supply apdos cannot have same min/max voltage and max current" , |
2674 | }; |
2675 | |
2676 | static enum pdo_err tcpm_caps_err(struct tcpm_port *port, const u32 *pdo, |
2677 | unsigned int nr_pdo) |
2678 | { |
2679 | unsigned int i; |
2680 | |
2681 | /* Should at least contain vSafe5v */ |
2682 | if (nr_pdo < 1) |
2683 | return PDO_ERR_NO_VSAFE5V; |
2684 | |
2685 | /* The vSafe5V Fixed Supply Object Shall always be the first object */ |
2686 | if (pdo_type(pdo: pdo[0]) != PDO_TYPE_FIXED || |
2687 | pdo_fixed_voltage(pdo: pdo[0]) != VSAFE5V) |
2688 | return PDO_ERR_VSAFE5V_NOT_FIRST; |
2689 | |
2690 | for (i = 1; i < nr_pdo; i++) { |
2691 | if (pdo_type(pdo: pdo[i]) < pdo_type(pdo: pdo[i - 1])) { |
2692 | return PDO_ERR_PDO_TYPE_NOT_IN_ORDER; |
2693 | } else if (pdo_type(pdo: pdo[i]) == pdo_type(pdo: pdo[i - 1])) { |
2694 | enum pd_pdo_type type = pdo_type(pdo: pdo[i]); |
2695 | |
2696 | switch (type) { |
2697 | /* |
2698 | * The remaining Fixed Supply Objects, if |
2699 | * present, shall be sent in voltage order; |
2700 | * lowest to highest. |
2701 | */ |
2702 | case PDO_TYPE_FIXED: |
2703 | if (pdo_fixed_voltage(pdo: pdo[i]) <= |
2704 | pdo_fixed_voltage(pdo: pdo[i - 1])) |
2705 | return PDO_ERR_FIXED_NOT_SORTED; |
2706 | break; |
2707 | /* |
2708 | * The Battery Supply Objects and Variable |
2709 | * supply, if present shall be sent in Minimum |
2710 | * Voltage order; lowest to highest. |
2711 | */ |
2712 | case PDO_TYPE_VAR: |
2713 | case PDO_TYPE_BATT: |
2714 | if (pdo_min_voltage(pdo: pdo[i]) < |
2715 | pdo_min_voltage(pdo: pdo[i - 1])) |
2716 | return PDO_ERR_VARIABLE_BATT_NOT_SORTED; |
2717 | else if ((pdo_min_voltage(pdo: pdo[i]) == |
2718 | pdo_min_voltage(pdo: pdo[i - 1])) && |
2719 | (pdo_max_voltage(pdo: pdo[i]) == |
2720 | pdo_max_voltage(pdo: pdo[i - 1]))) |
2721 | return PDO_ERR_DUPE_PDO; |
2722 | break; |
2723 | /* |
2724 | * The Programmable Power Supply APDOs, if present, |
2725 | * shall be sent in Maximum Voltage order; |
2726 | * lowest to highest. |
2727 | */ |
2728 | case PDO_TYPE_APDO: |
2729 | if (pdo_apdo_type(pdo: pdo[i]) != APDO_TYPE_PPS) |
2730 | break; |
2731 | |
2732 | if (pdo_pps_apdo_max_voltage(pdo: pdo[i]) < |
2733 | pdo_pps_apdo_max_voltage(pdo: pdo[i - 1])) |
2734 | return PDO_ERR_PPS_APDO_NOT_SORTED; |
2735 | else if (pdo_pps_apdo_min_voltage(pdo: pdo[i]) == |
2736 | pdo_pps_apdo_min_voltage(pdo: pdo[i - 1]) && |
2737 | pdo_pps_apdo_max_voltage(pdo: pdo[i]) == |
2738 | pdo_pps_apdo_max_voltage(pdo: pdo[i - 1]) && |
2739 | pdo_pps_apdo_max_current(pdo: pdo[i]) == |
2740 | pdo_pps_apdo_max_current(pdo: pdo[i - 1])) |
2741 | return PDO_ERR_DUPE_PPS_APDO; |
2742 | break; |
2743 | default: |
2744 | tcpm_log_force(port, fmt: " Unknown pdo type" ); |
2745 | } |
2746 | } |
2747 | } |
2748 | |
2749 | return PDO_NO_ERR; |
2750 | } |
2751 | |
2752 | static int tcpm_validate_caps(struct tcpm_port *port, const u32 *pdo, |
2753 | unsigned int nr_pdo) |
2754 | { |
2755 | enum pdo_err err_index = tcpm_caps_err(port, pdo, nr_pdo); |
2756 | |
2757 | if (err_index != PDO_NO_ERR) { |
2758 | tcpm_log_force(port, fmt: " %s" , pdo_err_msg[err_index]); |
2759 | return -EINVAL; |
2760 | } |
2761 | |
2762 | return 0; |
2763 | } |
2764 | |
2765 | static int tcpm_altmode_enter(struct typec_altmode *altmode, u32 *vdo) |
2766 | { |
2767 | struct tcpm_port *port = typec_altmode_get_drvdata(altmode); |
2768 | int svdm_version; |
2769 | u32 ; |
2770 | |
2771 | svdm_version = typec_get_negotiated_svdm_version(port: port->typec_port); |
2772 | if (svdm_version < 0) |
2773 | return svdm_version; |
2774 | |
2775 | header = VDO(altmode->svid, vdo ? 2 : 1, svdm_version, CMD_ENTER_MODE); |
2776 | header |= VDO_OPOS(altmode->mode); |
2777 | |
2778 | tcpm_queue_vdm_unlocked(port, header, data: vdo, cnt: vdo ? 1 : 0, tx_sop_type: TCPC_TX_SOP); |
2779 | return 0; |
2780 | } |
2781 | |
2782 | static int tcpm_altmode_exit(struct typec_altmode *altmode) |
2783 | { |
2784 | struct tcpm_port *port = typec_altmode_get_drvdata(altmode); |
2785 | int svdm_version; |
2786 | u32 ; |
2787 | |
2788 | svdm_version = typec_get_negotiated_svdm_version(port: port->typec_port); |
2789 | if (svdm_version < 0) |
2790 | return svdm_version; |
2791 | |
2792 | header = VDO(altmode->svid, 1, svdm_version, CMD_EXIT_MODE); |
2793 | header |= VDO_OPOS(altmode->mode); |
2794 | |
2795 | tcpm_queue_vdm_unlocked(port, header, NULL, cnt: 0, tx_sop_type: TCPC_TX_SOP); |
2796 | return 0; |
2797 | } |
2798 | |
2799 | static int tcpm_altmode_vdm(struct typec_altmode *altmode, |
2800 | u32 , const u32 *data, int count) |
2801 | { |
2802 | struct tcpm_port *port = typec_altmode_get_drvdata(altmode); |
2803 | |
2804 | tcpm_queue_vdm_unlocked(port, header, data, cnt: count - 1, tx_sop_type: TCPC_TX_SOP); |
2805 | |
2806 | return 0; |
2807 | } |
2808 | |
2809 | static const struct typec_altmode_ops tcpm_altmode_ops = { |
2810 | .enter = tcpm_altmode_enter, |
2811 | .exit = tcpm_altmode_exit, |
2812 | .vdm = tcpm_altmode_vdm, |
2813 | }; |
2814 | |
2815 | |
2816 | static int tcpm_cable_altmode_enter(struct typec_altmode *altmode, enum typec_plug_index sop, |
2817 | u32 *vdo) |
2818 | { |
2819 | struct tcpm_port *port = typec_altmode_get_drvdata(altmode); |
2820 | int svdm_version; |
2821 | u32 ; |
2822 | |
2823 | svdm_version = typec_get_cable_svdm_version(port: port->typec_port); |
2824 | if (svdm_version < 0) |
2825 | return svdm_version; |
2826 | |
2827 | header = VDO(altmode->svid, vdo ? 2 : 1, svdm_version, CMD_ENTER_MODE); |
2828 | header |= VDO_OPOS(altmode->mode); |
2829 | |
2830 | tcpm_queue_vdm_unlocked(port, header, data: vdo, cnt: vdo ? 1 : 0, tx_sop_type: TCPC_TX_SOP_PRIME); |
2831 | return 0; |
2832 | } |
2833 | |
2834 | static int tcpm_cable_altmode_exit(struct typec_altmode *altmode, enum typec_plug_index sop) |
2835 | { |
2836 | struct tcpm_port *port = typec_altmode_get_drvdata(altmode); |
2837 | int svdm_version; |
2838 | u32 ; |
2839 | |
2840 | svdm_version = typec_get_cable_svdm_version(port: port->typec_port); |
2841 | if (svdm_version < 0) |
2842 | return svdm_version; |
2843 | |
2844 | header = VDO(altmode->svid, 1, svdm_version, CMD_EXIT_MODE); |
2845 | header |= VDO_OPOS(altmode->mode); |
2846 | |
2847 | tcpm_queue_vdm_unlocked(port, header, NULL, cnt: 0, tx_sop_type: TCPC_TX_SOP_PRIME); |
2848 | return 0; |
2849 | } |
2850 | |
2851 | static int tcpm_cable_altmode_vdm(struct typec_altmode *altmode, enum typec_plug_index sop, |
2852 | u32 , const u32 *data, int count) |
2853 | { |
2854 | struct tcpm_port *port = typec_altmode_get_drvdata(altmode); |
2855 | |
2856 | tcpm_queue_vdm_unlocked(port, header, data, cnt: count - 1, tx_sop_type: TCPC_TX_SOP_PRIME); |
2857 | |
2858 | return 0; |
2859 | } |
2860 | |
2861 | static const struct typec_cable_ops tcpm_cable_ops = { |
2862 | .enter = tcpm_cable_altmode_enter, |
2863 | .exit = tcpm_cable_altmode_exit, |
2864 | .vdm = tcpm_cable_altmode_vdm, |
2865 | }; |
2866 | |
2867 | /* |
2868 | * PD (data, control) command handling functions |
2869 | */ |
2870 | static inline enum tcpm_state ready_state(struct tcpm_port *port) |
2871 | { |
2872 | if (port->pwr_role == TYPEC_SOURCE) |
2873 | return SRC_READY; |
2874 | else |
2875 | return SNK_READY; |
2876 | } |
2877 | |
2878 | static int tcpm_pd_send_control(struct tcpm_port *port, |
2879 | enum pd_ctrl_msg_type type, |
2880 | enum tcpm_transmit_type tx_sop_type); |
2881 | |
2882 | static void tcpm_handle_alert(struct tcpm_port *port, const __le32 *payload, |
2883 | int cnt) |
2884 | { |
2885 | u32 p0 = le32_to_cpu(payload[0]); |
2886 | unsigned int type = usb_pd_ado_type(ado: p0); |
2887 | |
2888 | if (!type) { |
2889 | tcpm_log(port, fmt: "Alert message received with no type" ); |
2890 | tcpm_queue_message(port, message: PD_MSG_CTRL_NOT_SUPP); |
2891 | return; |
2892 | } |
2893 | |
2894 | /* Just handling non-battery alerts for now */ |
2895 | if (!(type & USB_PD_ADO_TYPE_BATT_STATUS_CHANGE)) { |
2896 | if (port->pwr_role == TYPEC_SOURCE) { |
2897 | port->upcoming_state = GET_STATUS_SEND; |
2898 | tcpm_ams_start(port, ams: GETTING_SOURCE_SINK_STATUS); |
2899 | } else { |
2900 | /* |
2901 | * Do not check SinkTxOk here in case the Source doesn't set its Rp to |
2902 | * SinkTxOk in time. |
2903 | */ |
2904 | port->ams = GETTING_SOURCE_SINK_STATUS; |
2905 | tcpm_set_state(port, state: GET_STATUS_SEND, delay_ms: 0); |
2906 | } |
2907 | } else { |
2908 | tcpm_queue_message(port, message: PD_MSG_CTRL_NOT_SUPP); |
2909 | } |
2910 | } |
2911 | |
2912 | static int tcpm_set_auto_vbus_discharge_threshold(struct tcpm_port *port, |
2913 | enum typec_pwr_opmode mode, bool pps_active, |
2914 | u32 requested_vbus_voltage) |
2915 | { |
2916 | int ret; |
2917 | |
2918 | if (!port->tcpc->set_auto_vbus_discharge_threshold) |
2919 | return 0; |
2920 | |
2921 | ret = port->tcpc->set_auto_vbus_discharge_threshold(port->tcpc, mode, pps_active, |
2922 | requested_vbus_voltage); |
2923 | tcpm_log_force(port, |
2924 | fmt: "set_auto_vbus_discharge_threshold mode:%d pps_active:%c vbus:%u ret:%d" , |
2925 | mode, pps_active ? 'y' : 'n', requested_vbus_voltage, ret); |
2926 | |
2927 | return ret; |
2928 | } |
2929 | |
2930 | static void tcpm_pd_handle_state(struct tcpm_port *port, |
2931 | enum tcpm_state state, |
2932 | enum tcpm_ams ams, |
2933 | unsigned int delay_ms) |
2934 | { |
2935 | switch (port->state) { |
2936 | case SRC_READY: |
2937 | case SNK_READY: |
2938 | port->ams = ams; |
2939 | tcpm_set_state(port, state, delay_ms); |
2940 | break; |
2941 | /* 8.3.3.4.1.1 and 6.8.1 power transitioning */ |
2942 | case SNK_TRANSITION_SINK: |
2943 | case SNK_TRANSITION_SINK_VBUS: |
2944 | case SRC_TRANSITION_SUPPLY: |
2945 | tcpm_set_state(port, state: HARD_RESET_SEND, delay_ms: 0); |
2946 | break; |
2947 | default: |
2948 | if (!tcpm_ams_interruptible(port)) { |
2949 | tcpm_set_state(port, state: port->pwr_role == TYPEC_SOURCE ? |
2950 | SRC_SOFT_RESET_WAIT_SNK_TX : |
2951 | SNK_SOFT_RESET, |
2952 | delay_ms: 0); |
2953 | } else { |
2954 | /* process the Message 6.8.1 */ |
2955 | port->upcoming_state = state; |
2956 | port->next_ams = ams; |
2957 | tcpm_set_state(port, state: ready_state(port), delay_ms); |
2958 | } |
2959 | break; |
2960 | } |
2961 | } |
2962 | |
2963 | static void tcpm_pd_handle_msg(struct tcpm_port *port, |
2964 | enum pd_msg_request message, |
2965 | enum tcpm_ams ams) |
2966 | { |
2967 | switch (port->state) { |
2968 | case SRC_READY: |
2969 | case SNK_READY: |
2970 | port->ams = ams; |
2971 | tcpm_queue_message(port, message); |
2972 | break; |
2973 | /* PD 3.0 Spec 8.3.3.4.1.1 and 6.8.1 */ |
2974 | case SNK_TRANSITION_SINK: |
2975 | case SNK_TRANSITION_SINK_VBUS: |
2976 | case SRC_TRANSITION_SUPPLY: |
2977 | tcpm_set_state(port, state: HARD_RESET_SEND, delay_ms: 0); |
2978 | break; |
2979 | default: |
2980 | if (!tcpm_ams_interruptible(port)) { |
2981 | tcpm_set_state(port, state: port->pwr_role == TYPEC_SOURCE ? |
2982 | SRC_SOFT_RESET_WAIT_SNK_TX : |
2983 | SNK_SOFT_RESET, |
2984 | delay_ms: 0); |
2985 | } else { |
2986 | port->next_ams = ams; |
2987 | tcpm_set_state(port, state: ready_state(port), delay_ms: 0); |
2988 | /* 6.8.1 process the Message */ |
2989 | tcpm_queue_message(port, message); |
2990 | } |
2991 | break; |
2992 | } |
2993 | } |
2994 | |
2995 | static int tcpm_register_source_caps(struct tcpm_port *port) |
2996 | { |
2997 | struct usb_power_delivery_desc desc = { port->negotiated_rev }; |
2998 | struct usb_power_delivery_capabilities_desc caps = { }; |
2999 | struct usb_power_delivery_capabilities *cap; |
3000 | |
3001 | if (!port->partner_pd) |
3002 | port->partner_pd = usb_power_delivery_register(NULL, desc: &desc); |
3003 | if (IS_ERR(ptr: port->partner_pd)) |
3004 | return PTR_ERR(ptr: port->partner_pd); |
3005 | |
3006 | memcpy(caps.pdo, port->source_caps, sizeof(u32) * port->nr_source_caps); |
3007 | caps.role = TYPEC_SOURCE; |
3008 | |
3009 | cap = usb_power_delivery_register_capabilities(pd: port->partner_pd, desc: &caps); |
3010 | if (IS_ERR(ptr: cap)) |
3011 | return PTR_ERR(ptr: cap); |
3012 | |
3013 | port->partner_source_caps = cap; |
3014 | |
3015 | return 0; |
3016 | } |
3017 | |
3018 | static int tcpm_register_sink_caps(struct tcpm_port *port) |
3019 | { |
3020 | struct usb_power_delivery_desc desc = { port->negotiated_rev }; |
3021 | struct usb_power_delivery_capabilities_desc caps = { }; |
3022 | struct usb_power_delivery_capabilities *cap; |
3023 | |
3024 | if (!port->partner_pd) |
3025 | port->partner_pd = usb_power_delivery_register(NULL, desc: &desc); |
3026 | if (IS_ERR(ptr: port->partner_pd)) |
3027 | return PTR_ERR(ptr: port->partner_pd); |
3028 | |
3029 | memcpy(caps.pdo, port->sink_caps, sizeof(u32) * port->nr_sink_caps); |
3030 | caps.role = TYPEC_SINK; |
3031 | |
3032 | cap = usb_power_delivery_register_capabilities(pd: port->partner_pd, desc: &caps); |
3033 | if (IS_ERR(ptr: cap)) |
3034 | return PTR_ERR(ptr: cap); |
3035 | |
3036 | port->partner_sink_caps = cap; |
3037 | |
3038 | return 0; |
3039 | } |
3040 | |
3041 | static void tcpm_pd_data_request(struct tcpm_port *port, |
3042 | const struct pd_message *msg, |
3043 | enum tcpm_transmit_type rx_sop_type) |
3044 | { |
3045 | enum pd_data_msg_type type = pd_header_type_le(header: msg->header); |
3046 | unsigned int cnt = pd_header_cnt_le(header: msg->header); |
3047 | unsigned int rev = pd_header_rev_le(header: msg->header); |
3048 | unsigned int i; |
3049 | enum frs_typec_current partner_frs_current; |
3050 | bool frs_enable; |
3051 | int ret; |
3052 | |
3053 | if (tcpm_vdm_ams(port) && type != PD_DATA_VENDOR_DEF) { |
3054 | port->vdm_state = VDM_STATE_ERR_BUSY; |
3055 | tcpm_ams_finish(port); |
3056 | mod_vdm_delayed_work(port, delay_ms: 0); |
3057 | } |
3058 | |
3059 | switch (type) { |
3060 | case PD_DATA_SOURCE_CAP: |
3061 | for (i = 0; i < cnt; i++) |
3062 | port->source_caps[i] = le32_to_cpu(msg->payload[i]); |
3063 | |
3064 | port->nr_source_caps = cnt; |
3065 | |
3066 | tcpm_log_source_caps(port); |
3067 | |
3068 | tcpm_validate_caps(port, pdo: port->source_caps, |
3069 | nr_pdo: port->nr_source_caps); |
3070 | |
3071 | tcpm_register_source_caps(port); |
3072 | |
3073 | /* |
3074 | * Adjust revision in subsequent message headers, as required, |
3075 | * to comply with 6.2.1.1.5 of the USB PD 3.0 spec. We don't |
3076 | * support Rev 1.0 so just do nothing in that scenario. |
3077 | */ |
3078 | if (rev == PD_REV10) { |
3079 | if (port->ams == GET_SOURCE_CAPABILITIES) |
3080 | tcpm_ams_finish(port); |
3081 | break; |
3082 | } |
3083 | |
3084 | if (rev < PD_MAX_REV) { |
3085 | port->negotiated_rev = rev; |
3086 | if (port->negotiated_rev_prime > port->negotiated_rev) |
3087 | port->negotiated_rev_prime = port->negotiated_rev; |
3088 | } |
3089 | |
3090 | if (port->pwr_role == TYPEC_SOURCE) { |
3091 | if (port->ams == GET_SOURCE_CAPABILITIES) |
3092 | tcpm_pd_handle_state(port, state: SRC_READY, ams: NONE_AMS, delay_ms: 0); |
3093 | /* Unexpected Source Capabilities */ |
3094 | else |
3095 | tcpm_pd_handle_msg(port, |
3096 | message: port->negotiated_rev < PD_REV30 ? |
3097 | PD_MSG_CTRL_REJECT : |
3098 | PD_MSG_CTRL_NOT_SUPP, |
3099 | ams: NONE_AMS); |
3100 | } else if (port->state == SNK_WAIT_CAPABILITIES) { |
3101 | /* |
3102 | * This message may be received even if VBUS is not |
3103 | * present. This is quite unexpected; see USB PD |
3104 | * specification, sections 8.3.3.6.3.1 and 8.3.3.6.3.2. |
3105 | * However, at the same time, we must be ready to |
3106 | * receive this message and respond to it 15ms after |
3107 | * receiving PS_RDY during power swap operations, no matter |
3108 | * if VBUS is available or not (USB PD specification, |
3109 | * section 6.5.9.2). |
3110 | * So we need to accept the message either way, |
3111 | * but be prepared to keep waiting for VBUS after it was |
3112 | * handled. |
3113 | */ |
3114 | port->ams = POWER_NEGOTIATION; |
3115 | port->in_ams = true; |
3116 | tcpm_set_state(port, state: SNK_NEGOTIATE_CAPABILITIES, delay_ms: 0); |
3117 | } else { |
3118 | if (port->ams == GET_SOURCE_CAPABILITIES) |
3119 | tcpm_ams_finish(port); |
3120 | tcpm_pd_handle_state(port, state: SNK_NEGOTIATE_CAPABILITIES, |
3121 | ams: POWER_NEGOTIATION, delay_ms: 0); |
3122 | } |
3123 | break; |
3124 | case PD_DATA_REQUEST: |
3125 | /* |
3126 | * Adjust revision in subsequent message headers, as required, |
3127 | * to comply with 6.2.1.1.5 of the USB PD 3.0 spec. We don't |
3128 | * support Rev 1.0 so just reject in that scenario. |
3129 | */ |
3130 | if (rev == PD_REV10) { |
3131 | tcpm_pd_handle_msg(port, |
3132 | message: port->negotiated_rev < PD_REV30 ? |
3133 | PD_MSG_CTRL_REJECT : |
3134 | PD_MSG_CTRL_NOT_SUPP, |
3135 | ams: NONE_AMS); |
3136 | break; |
3137 | } |
3138 | |
3139 | if (rev < PD_MAX_REV) { |
3140 | port->negotiated_rev = rev; |
3141 | if (port->negotiated_rev_prime > port->negotiated_rev) |
3142 | port->negotiated_rev_prime = port->negotiated_rev; |
3143 | } |
3144 | |
3145 | if (port->pwr_role != TYPEC_SOURCE || cnt != 1) { |
3146 | tcpm_pd_handle_msg(port, |
3147 | message: port->negotiated_rev < PD_REV30 ? |
3148 | PD_MSG_CTRL_REJECT : |
3149 | PD_MSG_CTRL_NOT_SUPP, |
3150 | ams: NONE_AMS); |
3151 | break; |
3152 | } |
3153 | |
3154 | port->sink_request = le32_to_cpu(msg->payload[0]); |
3155 | |
3156 | if (port->vdm_sm_running && port->explicit_contract) { |
3157 | tcpm_pd_handle_msg(port, message: PD_MSG_CTRL_WAIT, ams: port->ams); |
3158 | break; |
3159 | } |
3160 | |
3161 | if (port->state == SRC_SEND_CAPABILITIES) |
3162 | tcpm_set_state(port, state: SRC_NEGOTIATE_CAPABILITIES, delay_ms: 0); |
3163 | else |
3164 | tcpm_pd_handle_state(port, state: SRC_NEGOTIATE_CAPABILITIES, |
3165 | ams: POWER_NEGOTIATION, delay_ms: 0); |
3166 | break; |
3167 | case PD_DATA_SINK_CAP: |
3168 | /* We don't do anything with this at the moment... */ |
3169 | for (i = 0; i < cnt; i++) |
3170 | port->sink_caps[i] = le32_to_cpu(msg->payload[i]); |
3171 | |
3172 | partner_frs_current = (port->sink_caps[0] & PDO_FIXED_FRS_CURR_MASK) >> |
3173 | PDO_FIXED_FRS_CURR_SHIFT; |
3174 | frs_enable = partner_frs_current && (partner_frs_current <= |
3175 | port->new_source_frs_current); |
3176 | tcpm_log(port, |
3177 | fmt: "Port partner FRS capable partner_frs_current:%u port_frs_current:%u enable:%c" , |
3178 | partner_frs_current, port->new_source_frs_current, frs_enable ? 'y' : 'n'); |
3179 | if (frs_enable) { |
3180 | ret = port->tcpc->enable_frs(port->tcpc, true); |
3181 | tcpm_log(port, fmt: "Enable FRS %s, ret:%d\n" , ret ? "fail" : "success" , ret); |
3182 | } |
3183 | |
3184 | port->nr_sink_caps = cnt; |
3185 | port->sink_cap_done = true; |
3186 | tcpm_register_sink_caps(port); |
3187 | |
3188 | if (port->ams == GET_SINK_CAPABILITIES) |
3189 | tcpm_set_state(port, state: ready_state(port), delay_ms: 0); |
3190 | /* Unexpected Sink Capabilities */ |
3191 | else |
3192 | tcpm_pd_handle_msg(port, |
3193 | message: port->negotiated_rev < PD_REV30 ? |
3194 | PD_MSG_CTRL_REJECT : |
3195 | PD_MSG_CTRL_NOT_SUPP, |
3196 | ams: NONE_AMS); |
3197 | break; |
3198 | case PD_DATA_VENDOR_DEF: |
3199 | tcpm_handle_vdm_request(port, payload: msg->payload, cnt, rx_sop_type); |
3200 | break; |
3201 | case PD_DATA_BIST: |
3202 | port->bist_request = le32_to_cpu(msg->payload[0]); |
3203 | tcpm_pd_handle_state(port, state: BIST_RX, ams: BIST, delay_ms: 0); |
3204 | break; |
3205 | case PD_DATA_ALERT: |
3206 | if (port->state != SRC_READY && port->state != SNK_READY) |
3207 | tcpm_pd_handle_state(port, state: port->pwr_role == TYPEC_SOURCE ? |
3208 | SRC_SOFT_RESET_WAIT_SNK_TX : SNK_SOFT_RESET, |
3209 | ams: NONE_AMS, delay_ms: 0); |
3210 | else |
3211 | tcpm_handle_alert(port, payload: msg->payload, cnt); |
3212 | break; |
3213 | case PD_DATA_BATT_STATUS: |
3214 | case PD_DATA_GET_COUNTRY_INFO: |
3215 | /* Currently unsupported */ |
3216 | tcpm_pd_handle_msg(port, message: port->negotiated_rev < PD_REV30 ? |
3217 | PD_MSG_CTRL_REJECT : |
3218 | PD_MSG_CTRL_NOT_SUPP, |
3219 | ams: NONE_AMS); |
3220 | break; |
3221 | default: |
3222 | tcpm_pd_handle_msg(port, message: port->negotiated_rev < PD_REV30 ? |
3223 | PD_MSG_CTRL_REJECT : |
3224 | PD_MSG_CTRL_NOT_SUPP, |
3225 | ams: NONE_AMS); |
3226 | tcpm_log(port, fmt: "Unrecognized data message type %#x" , type); |
3227 | break; |
3228 | } |
3229 | } |
3230 | |
3231 | static void tcpm_pps_complete(struct tcpm_port *port, int result) |
3232 | { |
3233 | if (port->pps_pending) { |
3234 | port->pps_status = result; |
3235 | port->pps_pending = false; |
3236 | complete(&port->pps_complete); |
3237 | } |
3238 | } |
3239 | |
3240 | static void tcpm_pd_ctrl_request(struct tcpm_port *port, |
3241 | const struct pd_message *msg, |
3242 | enum tcpm_transmit_type rx_sop_type) |
3243 | { |
3244 | enum pd_ctrl_msg_type type = pd_header_type_le(header: msg->header); |
3245 | enum tcpm_state next_state; |
3246 | unsigned int rev = pd_header_rev_le(header: msg->header); |
3247 | |
3248 | /* |
3249 | * Stop VDM state machine if interrupted by other Messages while NOT_SUPP is allowed in |
3250 | * VDM AMS if waiting for VDM responses and will be handled later. |
3251 | */ |
3252 | if (tcpm_vdm_ams(port) && type != PD_CTRL_NOT_SUPP && type != PD_CTRL_GOOD_CRC) { |
3253 | port->vdm_state = VDM_STATE_ERR_BUSY; |
3254 | tcpm_ams_finish(port); |
3255 | mod_vdm_delayed_work(port, delay_ms: 0); |
3256 | } |
3257 | |
3258 | switch (type) { |
3259 | case PD_CTRL_GOOD_CRC: |
3260 | case PD_CTRL_PING: |
3261 | break; |
3262 | case PD_CTRL_GET_SOURCE_CAP: |
3263 | tcpm_pd_handle_msg(port, message: PD_MSG_DATA_SOURCE_CAP, ams: GET_SOURCE_CAPABILITIES); |
3264 | break; |
3265 | case PD_CTRL_GET_SINK_CAP: |
3266 | tcpm_pd_handle_msg(port, message: PD_MSG_DATA_SINK_CAP, ams: GET_SINK_CAPABILITIES); |
3267 | break; |
3268 | case PD_CTRL_GOTO_MIN: |
3269 | break; |
3270 | case PD_CTRL_PS_RDY: |
3271 | switch (port->state) { |
3272 | case SNK_TRANSITION_SINK: |
3273 | if (port->vbus_present) { |
3274 | tcpm_set_current_limit(port, |
3275 | max_ma: port->req_current_limit, |
3276 | mv: port->req_supply_voltage); |
3277 | port->explicit_contract = true; |
3278 | tcpm_set_auto_vbus_discharge_threshold(port, |
3279 | mode: TYPEC_PWR_MODE_PD, |
3280 | pps_active: port->pps_data.active, |
3281 | requested_vbus_voltage: port->supply_voltage); |
3282 | tcpm_set_state(port, state: SNK_READY, delay_ms: 0); |
3283 | } else { |
3284 | /* |
3285 | * Seen after power swap. Keep waiting for VBUS |
3286 | * in a transitional state. |
3287 | */ |
3288 | tcpm_set_state(port, |
3289 | state: SNK_TRANSITION_SINK_VBUS, delay_ms: 0); |
3290 | } |
3291 | break; |
3292 | case PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED: |
3293 | tcpm_set_state(port, state: PR_SWAP_SRC_SNK_SINK_ON, delay_ms: 0); |
3294 | break; |
3295 | case PR_SWAP_SNK_SRC_SINK_OFF: |
3296 | tcpm_set_state(port, state: PR_SWAP_SNK_SRC_SOURCE_ON, delay_ms: 0); |
3297 | break; |
3298 | case VCONN_SWAP_WAIT_FOR_VCONN: |
3299 | tcpm_set_state(port, state: VCONN_SWAP_TURN_OFF_VCONN, delay_ms: 0); |
3300 | break; |
3301 | case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF: |
3302 | tcpm_set_state(port, state: FR_SWAP_SNK_SRC_NEW_SINK_READY, delay_ms: 0); |
3303 | break; |
3304 | default: |
3305 | tcpm_pd_handle_state(port, |
3306 | state: port->pwr_role == TYPEC_SOURCE ? |
3307 | SRC_SOFT_RESET_WAIT_SNK_TX : |
3308 | SNK_SOFT_RESET, |
3309 | ams: NONE_AMS, delay_ms: 0); |
3310 | break; |
3311 | } |
3312 | break; |
3313 | case PD_CTRL_REJECT: |
3314 | case PD_CTRL_WAIT: |
3315 | case PD_CTRL_NOT_SUPP: |
3316 | switch (port->state) { |
3317 | case SNK_NEGOTIATE_CAPABILITIES: |
3318 | /* USB PD specification, Figure 8-43 */ |
3319 | if (port->explicit_contract) |
3320 | next_state = SNK_READY; |
3321 | else |
3322 | next_state = SNK_WAIT_CAPABILITIES; |
3323 | |
3324 | /* Threshold was relaxed before sending Request. Restore it back. */ |
3325 | tcpm_set_auto_vbus_discharge_threshold(port, mode: TYPEC_PWR_MODE_PD, |
3326 | pps_active: port->pps_data.active, |
3327 | requested_vbus_voltage: port->supply_voltage); |
3328 | tcpm_set_state(port, state: next_state, delay_ms: 0); |
3329 | break; |
3330 | case SNK_NEGOTIATE_PPS_CAPABILITIES: |
3331 | /* Revert data back from any requested PPS updates */ |
3332 | port->pps_data.req_out_volt = port->supply_voltage; |
3333 | port->pps_data.req_op_curr = port->current_limit; |
3334 | port->pps_status = (type == PD_CTRL_WAIT ? |
3335 | -EAGAIN : -EOPNOTSUPP); |
3336 | |
3337 | /* Threshold was relaxed before sending Request. Restore it back. */ |
3338 | tcpm_set_auto_vbus_discharge_threshold(port, mode: TYPEC_PWR_MODE_PD, |
3339 | pps_active: port->pps_data.active, |
3340 | requested_vbus_voltage: port->supply_voltage); |
3341 | |
3342 | tcpm_set_state(port, state: SNK_READY, delay_ms: 0); |
3343 | break; |
3344 | case DR_SWAP_SEND: |
3345 | port->swap_status = (type == PD_CTRL_WAIT ? |
3346 | -EAGAIN : -EOPNOTSUPP); |
3347 | tcpm_set_state(port, state: DR_SWAP_CANCEL, delay_ms: 0); |
3348 | break; |
3349 | case PR_SWAP_SEND: |
3350 | port->swap_status = (type == PD_CTRL_WAIT ? |
3351 | -EAGAIN : -EOPNOTSUPP); |
3352 | tcpm_set_state(port, state: PR_SWAP_CANCEL, delay_ms: 0); |
3353 | break; |
3354 | case VCONN_SWAP_SEND: |
3355 | port->swap_status = (type == PD_CTRL_WAIT ? |
3356 | -EAGAIN : -EOPNOTSUPP); |
3357 | tcpm_set_state(port, state: VCONN_SWAP_CANCEL, delay_ms: 0); |
3358 | break; |
3359 | case FR_SWAP_SEND: |
3360 | tcpm_set_state(port, state: FR_SWAP_CANCEL, delay_ms: 0); |
3361 | break; |
3362 | case GET_SINK_CAP: |
3363 | port->sink_cap_done = true; |
3364 | tcpm_set_state(port, state: ready_state(port), delay_ms: 0); |
3365 | break; |
3366 | /* |
3367 | * Some port partners do not support GET_STATUS, avoid soft reset the link to |
3368 | * prevent redundant power re-negotiation |
3369 | */ |
3370 | case GET_STATUS_SEND: |
3371 | tcpm_set_state(port, state: ready_state(port), delay_ms: 0); |
3372 | break; |
3373 | case SRC_READY: |
3374 | case SNK_READY: |
3375 | if (port->vdm_state > VDM_STATE_READY) { |
3376 | port->vdm_state = VDM_STATE_DONE; |
3377 | if (tcpm_vdm_ams(port)) |
3378 | tcpm_ams_finish(port); |
3379 | mod_vdm_delayed_work(port, delay_ms: 0); |
3380 | break; |
3381 | } |
3382 | fallthrough; |
3383 | default: |
3384 | tcpm_pd_handle_state(port, |
3385 | state: port->pwr_role == TYPEC_SOURCE ? |
3386 | SRC_SOFT_RESET_WAIT_SNK_TX : |
3387 | SNK_SOFT_RESET, |
3388 | ams: NONE_AMS, delay_ms: 0); |
3389 | break; |
3390 | } |
3391 | break; |
3392 | case PD_CTRL_ACCEPT: |
3393 | switch (port->state) { |
3394 | case SNK_NEGOTIATE_CAPABILITIES: |
3395 | port->pps_data.active = false; |
3396 | tcpm_set_state(port, state: SNK_TRANSITION_SINK, delay_ms: 0); |
3397 | break; |
3398 | case SNK_NEGOTIATE_PPS_CAPABILITIES: |
3399 | port->pps_data.active = true; |
3400 | port->pps_data.min_volt = port->pps_data.req_min_volt; |
3401 | port->pps_data.max_volt = port->pps_data.req_max_volt; |
3402 | port->pps_data.max_curr = port->pps_data.req_max_curr; |
3403 | port->req_supply_voltage = port->pps_data.req_out_volt; |
3404 | port->req_current_limit = port->pps_data.req_op_curr; |
3405 | power_supply_changed(psy: port->psy); |
3406 | tcpm_set_state(port, state: SNK_TRANSITION_SINK, delay_ms: 0); |
3407 | break; |
3408 | case SOFT_RESET_SEND: |
3409 | if (port->ams == SOFT_RESET_AMS) |
3410 | tcpm_ams_finish(port); |
3411 | /* |
3412 | * SOP' Soft Reset is done after Vconn Swap, |
3413 | * which returns to ready state |
3414 | */ |
3415 | if (rx_sop_type == TCPC_TX_SOP_PRIME) { |
3416 | if (rev < port->negotiated_rev_prime) |
3417 | port->negotiated_rev_prime = rev; |
3418 | tcpm_set_state(port, state: ready_state(port), delay_ms: 0); |
3419 | break; |
3420 | } |
3421 | if (port->pwr_role == TYPEC_SOURCE) { |
3422 | port->upcoming_state = SRC_SEND_CAPABILITIES; |
3423 | tcpm_ams_start(port, ams: POWER_NEGOTIATION); |
3424 | } else { |
3425 | tcpm_set_state(port, state: SNK_WAIT_CAPABILITIES, delay_ms: 0); |
3426 | } |
3427 | break; |
3428 | case DR_SWAP_SEND: |
3429 | tcpm_set_state(port, state: DR_SWAP_CHANGE_DR, delay_ms: 0); |
3430 | break; |
3431 | case PR_SWAP_SEND: |
3432 | tcpm_set_state(port, state: PR_SWAP_START, delay_ms: 0); |
3433 | break; |
3434 | case VCONN_SWAP_SEND: |
3435 | tcpm_set_state(port, state: VCONN_SWAP_START, delay_ms: 0); |
3436 | break; |
3437 | case FR_SWAP_SEND: |
3438 | tcpm_set_state(port, state: FR_SWAP_SNK_SRC_TRANSITION_TO_OFF, delay_ms: 0); |
3439 | break; |
3440 | default: |
3441 | tcpm_pd_handle_state(port, |
3442 | state: port->pwr_role == TYPEC_SOURCE ? |
3443 | SRC_SOFT_RESET_WAIT_SNK_TX : |
3444 | SNK_SOFT_RESET, |
3445 | ams: NONE_AMS, delay_ms: 0); |
3446 | break; |
3447 | } |
3448 | break; |
3449 | case PD_CTRL_SOFT_RESET: |
3450 | port->ams = SOFT_RESET_AMS; |
3451 | tcpm_set_state(port, state: SOFT_RESET, delay_ms: 0); |
3452 | break; |
3453 | case PD_CTRL_DR_SWAP: |
3454 | /* |
3455 | * XXX |
3456 | * 6.3.9: If an alternate mode is active, a request to swap |
3457 | * alternate modes shall trigger a port reset. |
3458 | */ |
3459 | if (port->typec_caps.data != TYPEC_PORT_DRD) { |
3460 | tcpm_pd_handle_msg(port, |
3461 | message: port->negotiated_rev < PD_REV30 ? |
3462 | PD_MSG_CTRL_REJECT : |
3463 | PD_MSG_CTRL_NOT_SUPP, |
3464 | ams: NONE_AMS); |
3465 | } else { |
3466 | if (port->send_discover && port->negotiated_rev < PD_REV30) { |
3467 | tcpm_queue_message(port, message: PD_MSG_CTRL_WAIT); |
3468 | break; |
3469 | } |
3470 | |
3471 | tcpm_pd_handle_state(port, state: DR_SWAP_ACCEPT, ams: DATA_ROLE_SWAP, delay_ms: 0); |
3472 | } |
3473 | break; |
3474 | case PD_CTRL_PR_SWAP: |
3475 | if (port->port_type != TYPEC_PORT_DRP) { |
3476 | tcpm_pd_handle_msg(port, |
3477 | message: port->negotiated_rev < PD_REV30 ? |
3478 | PD_MSG_CTRL_REJECT : |
3479 | PD_MSG_CTRL_NOT_SUPP, |
3480 | ams: NONE_AMS); |
3481 | } else { |
3482 | if (port->send_discover && port->negotiated_rev < PD_REV30) { |
3483 | tcpm_queue_message(port, message: PD_MSG_CTRL_WAIT); |
3484 | break; |
3485 | } |
3486 | |
3487 | tcpm_pd_handle_state(port, state: PR_SWAP_ACCEPT, ams: POWER_ROLE_SWAP, delay_ms: 0); |
3488 | } |
3489 | break; |
3490 | case PD_CTRL_VCONN_SWAP: |
3491 | if (port->send_discover && port->negotiated_rev < PD_REV30) { |
3492 | tcpm_queue_message(port, message: PD_MSG_CTRL_WAIT); |
3493 | break; |
3494 | } |
3495 | |
3496 | tcpm_pd_handle_state(port, state: VCONN_SWAP_ACCEPT, ams: VCONN_SWAP, delay_ms: 0); |
3497 | break; |
3498 | case PD_CTRL_GET_SOURCE_CAP_EXT: |
3499 | case PD_CTRL_GET_STATUS: |
3500 | case PD_CTRL_FR_SWAP: |
3501 | case PD_CTRL_GET_PPS_STATUS: |
3502 | case PD_CTRL_GET_COUNTRY_CODES: |
3503 | /* Currently not supported */ |
3504 | tcpm_pd_handle_msg(port, |
3505 | message: port->negotiated_rev < PD_REV30 ? |
3506 | PD_MSG_CTRL_REJECT : |
3507 | PD_MSG_CTRL_NOT_SUPP, |
3508 | ams: NONE_AMS); |
3509 | break; |
3510 | default: |
3511 | tcpm_pd_handle_msg(port, |
3512 | message: port->negotiated_rev < PD_REV30 ? |
3513 | PD_MSG_CTRL_REJECT : |
3514 | PD_MSG_CTRL_NOT_SUPP, |
3515 | ams: NONE_AMS); |
3516 | tcpm_log(port, fmt: "Unrecognized ctrl message type %#x" , type); |
3517 | break; |
3518 | } |
3519 | } |
3520 | |
3521 | static void tcpm_pd_ext_msg_request(struct tcpm_port *port, |
3522 | const struct pd_message *msg) |
3523 | { |
3524 | enum pd_ext_msg_type type = pd_header_type_le(header: msg->header); |
3525 | unsigned int data_size = pd_ext_header_data_size_le(ext_header: msg->ext_msg.header); |
3526 | |
3527 | /* stopping VDM state machine if interrupted by other Messages */ |
3528 | if (tcpm_vdm_ams(port)) { |
3529 | port->vdm_state = VDM_STATE_ERR_BUSY; |
3530 | tcpm_ams_finish(port); |
3531 | mod_vdm_delayed_work(port, delay_ms: 0); |
3532 | } |
3533 | |
3534 | if (!(le16_to_cpu(msg->ext_msg.header) & PD_EXT_HDR_CHUNKED)) { |
3535 | tcpm_pd_handle_msg(port, message: PD_MSG_CTRL_NOT_SUPP, ams: NONE_AMS); |
3536 | tcpm_log(port, fmt: "Unchunked extended messages unsupported" ); |
3537 | return; |
3538 | } |
3539 | |
3540 | if (data_size > PD_EXT_MAX_CHUNK_DATA) { |
3541 | tcpm_pd_handle_state(port, state: CHUNK_NOT_SUPP, ams: NONE_AMS, PD_T_CHUNK_NOT_SUPP); |
3542 | tcpm_log(port, fmt: "Chunk handling not yet supported" ); |
3543 | return; |
3544 | } |
3545 | |
3546 | switch (type) { |
3547 | case PD_EXT_STATUS: |
3548 | case PD_EXT_PPS_STATUS: |
3549 | if (port->ams == GETTING_SOURCE_SINK_STATUS) { |
3550 | tcpm_ams_finish(port); |
3551 | tcpm_set_state(port, state: ready_state(port), delay_ms: 0); |
3552 | } else { |
3553 | /* unexpected Status or PPS_Status Message */ |
3554 | tcpm_pd_handle_state(port, state: port->pwr_role == TYPEC_SOURCE ? |
3555 | SRC_SOFT_RESET_WAIT_SNK_TX : SNK_SOFT_RESET, |
3556 | ams: NONE_AMS, delay_ms: 0); |
3557 | } |
3558 | break; |
3559 | case PD_EXT_SOURCE_CAP_EXT: |
3560 | case PD_EXT_GET_BATT_CAP: |
3561 | case PD_EXT_GET_BATT_STATUS: |
3562 | case PD_EXT_BATT_CAP: |
3563 | case PD_EXT_GET_MANUFACTURER_INFO: |
3564 | case PD_EXT_MANUFACTURER_INFO: |
3565 | case PD_EXT_SECURITY_REQUEST: |
3566 | case PD_EXT_SECURITY_RESPONSE: |
3567 | case PD_EXT_FW_UPDATE_REQUEST: |
3568 | case PD_EXT_FW_UPDATE_RESPONSE: |
3569 | case PD_EXT_COUNTRY_INFO: |
3570 | case PD_EXT_COUNTRY_CODES: |
3571 | tcpm_pd_handle_msg(port, message: PD_MSG_CTRL_NOT_SUPP, ams: NONE_AMS); |
3572 | break; |
3573 | default: |
3574 | tcpm_pd_handle_msg(port, message: PD_MSG_CTRL_NOT_SUPP, ams: NONE_AMS); |
3575 | tcpm_log(port, fmt: "Unrecognized extended message type %#x" , type); |
3576 | break; |
3577 | } |
3578 | } |
3579 | |
3580 | static void tcpm_pd_rx_handler(struct kthread_work *work) |
3581 | { |
3582 | struct pd_rx_event *event = container_of(work, |
3583 | struct pd_rx_event, work); |
3584 | const struct pd_message *msg = &event->msg; |
3585 | unsigned int cnt = pd_header_cnt_le(header: msg->header); |
3586 | struct tcpm_port *port = event->port; |
3587 | enum tcpm_transmit_type rx_sop_type = event->rx_sop_type; |
3588 | |
3589 | mutex_lock(&port->lock); |
3590 | |
3591 | tcpm_log(port, fmt: "PD RX, header: %#x [%d]" , le16_to_cpu(msg->header), |
3592 | port->attached); |
3593 | |
3594 | if (port->attached) { |
3595 | enum pd_ctrl_msg_type type = pd_header_type_le(header: msg->header); |
3596 | unsigned int msgid = pd_header_msgid_le(header: msg->header); |
3597 | |
3598 | /* |
3599 | * Drop SOP' messages if cannot receive via |
3600 | * tcpm_can_communicate_sop_prime |
3601 | */ |
3602 | if (rx_sop_type == TCPC_TX_SOP_PRIME && |
3603 | !tcpm_can_communicate_sop_prime(port)) |
3604 | goto done; |
3605 | |
3606 | /* |
3607 | * USB PD standard, 6.6.1.2: |
3608 | * "... if MessageID value in a received Message is the |
3609 | * same as the stored value, the receiver shall return a |
3610 | * GoodCRC Message with that MessageID value and drop |
3611 | * the Message (this is a retry of an already received |
3612 | * Message). Note: this shall not apply to the Soft_Reset |
3613 | * Message which always has a MessageID value of zero." |
3614 | */ |
3615 | switch (rx_sop_type) { |
3616 | case TCPC_TX_SOP_PRIME: |
3617 | if (msgid == port->rx_msgid_prime) |
3618 | goto done; |
3619 | port->rx_msgid_prime = msgid; |
3620 | break; |
3621 | case TCPC_TX_SOP: |
3622 | default: |
3623 | if (msgid == port->rx_msgid && type != PD_CTRL_SOFT_RESET) |
3624 | goto done; |
3625 | port->rx_msgid = msgid; |
3626 | break; |
3627 | } |
3628 | |
3629 | /* |
3630 | * If both ends believe to be DFP/host, we have a data role |
3631 | * mismatch. |
3632 | */ |
3633 | if (!!(le16_to_cpu(msg->header) & PD_HEADER_DATA_ROLE) == |
3634 | (port->data_role == TYPEC_HOST) && rx_sop_type == TCPC_TX_SOP) { |
3635 | tcpm_log(port, |
3636 | fmt: "Data role mismatch, initiating error recovery" ); |
3637 | tcpm_set_state(port, state: ERROR_RECOVERY, delay_ms: 0); |
3638 | } else { |
3639 | if (le16_to_cpu(msg->header) & PD_HEADER_EXT_HDR) |
3640 | tcpm_pd_ext_msg_request(port, msg); |
3641 | else if (cnt) |
3642 | tcpm_pd_data_request(port, msg, rx_sop_type); |
3643 | else |
3644 | tcpm_pd_ctrl_request(port, msg, rx_sop_type); |
3645 | } |
3646 | } |
3647 | |
3648 | done: |
3649 | mutex_unlock(lock: &port->lock); |
3650 | kfree(objp: event); |
3651 | } |
3652 | |
3653 | void tcpm_pd_receive(struct tcpm_port *port, const struct pd_message *msg, |
3654 | enum tcpm_transmit_type rx_sop_type) |
3655 | { |
3656 | struct pd_rx_event *event; |
3657 | |
3658 | event = kzalloc(size: sizeof(*event), GFP_ATOMIC); |
3659 | if (!event) |
3660 | return; |
3661 | |
3662 | kthread_init_work(&event->work, tcpm_pd_rx_handler); |
3663 | event->port = port; |
3664 | event->rx_sop_type = rx_sop_type; |
3665 | memcpy(&event->msg, msg, sizeof(*msg)); |
3666 | kthread_queue_work(worker: port->wq, work: &event->work); |
3667 | } |
3668 | EXPORT_SYMBOL_GPL(tcpm_pd_receive); |
3669 | |
3670 | static int tcpm_pd_send_control(struct tcpm_port *port, |
3671 | enum pd_ctrl_msg_type type, |
3672 | enum tcpm_transmit_type tx_sop_type) |
3673 | { |
3674 | struct pd_message msg; |
3675 | |
3676 | memset(&msg, 0, sizeof(msg)); |
3677 | switch (tx_sop_type) { |
3678 | case TCPC_TX_SOP_PRIME: |
3679 | msg.header = PD_HEADER_LE(type, |
3680 | 0, /* Cable Plug Indicator for DFP/UFP */ |
3681 | 0, /* Reserved */ |
3682 | port->negotiated_rev, |
3683 | port->message_id_prime, |
3684 | 0); |
3685 | break; |
3686 | case TCPC_TX_SOP: |
3687 | msg.header = PD_HEADER_LE(type, |
3688 | port->pwr_role, |
3689 | port->data_role, |
3690 | port->negotiated_rev, |
3691 | port->message_id, |
3692 | 0); |
3693 | break; |
3694 | default: |
3695 | msg.header = PD_HEADER_LE(type, |
3696 | port->pwr_role, |
3697 | port->data_role, |
3698 | port->negotiated_rev, |
3699 | port->message_id, |
3700 | 0); |
3701 | break; |
3702 | } |
3703 | |
3704 | return tcpm_pd_transmit(port, tx_sop_type, msg: &msg); |
3705 | } |
3706 | |
3707 | /* |
3708 | * Send queued message without affecting state. |
3709 | * Return true if state machine should go back to sleep, |
3710 | * false otherwise. |
3711 | */ |
3712 | static bool tcpm_send_queued_message(struct tcpm_port *port) |
3713 | { |
3714 | enum pd_msg_request queued_message; |
3715 | int ret; |
3716 | |
3717 | do { |
3718 | queued_message = port->queued_message; |
3719 | port->queued_message = PD_MSG_NONE; |
3720 | |
3721 | switch (queued_message) { |
3722 | case PD_MSG_CTRL_WAIT: |
3723 | tcpm_pd_send_control(port, type: PD_CTRL_WAIT, tx_sop_type: TCPC_TX_SOP); |
3724 | break; |
3725 | case PD_MSG_CTRL_REJECT: |
3726 | tcpm_pd_send_control(port, type: PD_CTRL_REJECT, tx_sop_type: TCPC_TX_SOP); |
3727 | break; |
3728 | case PD_MSG_CTRL_NOT_SUPP: |
3729 | tcpm_pd_send_control(port, type: PD_CTRL_NOT_SUPP, tx_sop_type: TCPC_TX_SOP); |
3730 | break; |
3731 | case PD_MSG_DATA_SINK_CAP: |
3732 | ret = tcpm_pd_send_sink_caps(port); |
3733 | if (ret < 0) { |
3734 | tcpm_log(port, fmt: "Unable to send snk caps, ret=%d" , ret); |
3735 | tcpm_set_state(port, state: SNK_SOFT_RESET, delay_ms: 0); |
3736 | } |
3737 | tcpm_ams_finish(port); |
3738 | break; |
3739 | case PD_MSG_DATA_SOURCE_CAP: |
3740 | ret = tcpm_pd_send_source_caps(port); |
3741 | if (ret < 0) { |
3742 | tcpm_log(port, |
3743 | fmt: "Unable to send src caps, ret=%d" , |
3744 | ret); |
3745 | tcpm_set_state(port, state: SOFT_RESET_SEND, delay_ms: 0); |
3746 | } else if (port->pwr_role == TYPEC_SOURCE) { |
3747 | tcpm_ams_finish(port); |
3748 | tcpm_set_state(port, state: HARD_RESET_SEND, |
3749 | PD_T_SENDER_RESPONSE); |
3750 | } else { |
3751 | tcpm_ams_finish(port); |
3752 | } |
3753 | break; |
3754 | default: |
3755 | break; |
3756 | } |
3757 | } while (port->queued_message != PD_MSG_NONE); |
3758 | |
3759 | if (port->delayed_state != INVALID_STATE) { |
3760 | if (ktime_after(cmp1: port->delayed_runtime, cmp2: ktime_get())) { |
3761 | mod_tcpm_delayed_work(port, delay_ms: ktime_to_ms(ktime_sub(port->delayed_runtime, |
3762 | ktime_get()))); |
3763 | return true; |
3764 | } |
3765 | port->delayed_state = INVALID_STATE; |
3766 | } |
3767 | return false; |
3768 | } |
3769 | |
3770 | static int tcpm_pd_check_request(struct tcpm_port *port) |
3771 | { |
3772 | u32 pdo, rdo = port->sink_request; |
3773 | unsigned int max, op, pdo_max, index; |
3774 | enum pd_pdo_type type; |
3775 | |
3776 | index = rdo_index(rdo); |
3777 | if (!index || index > port->nr_src_pdo) |
3778 | return -EINVAL; |
3779 | |
3780 | pdo = port->src_pdo[index - 1]; |
3781 | type = pdo_type(pdo); |
3782 | switch (type) { |
3783 | case PDO_TYPE_FIXED: |
3784 | case PDO_TYPE_VAR: |
3785 | max = rdo_max_current(rdo); |
3786 | op = rdo_op_current(rdo); |
3787 | pdo_max = pdo_max_current(pdo); |
3788 | |
3789 | if (op > pdo_max) |
3790 | return -EINVAL; |
3791 | if (max > pdo_max && !(rdo & RDO_CAP_MISMATCH)) |
3792 | return -EINVAL; |
3793 | |
3794 | if (type == PDO_TYPE_FIXED) |
3795 | tcpm_log(port, |
3796 | fmt: "Requested %u mV, %u mA for %u / %u mA" , |
3797 | pdo_fixed_voltage(pdo), pdo_max, op, max); |
3798 | else |
3799 | tcpm_log(port, |
3800 | fmt: "Requested %u -> %u mV, %u mA for %u / %u mA" , |
3801 | pdo_min_voltage(pdo), pdo_max_voltage(pdo), |
3802 | pdo_max, op, max); |
3803 | break; |
3804 | case PDO_TYPE_BATT: |
3805 | max = rdo_max_power(rdo); |
3806 | op = rdo_op_power(rdo); |
3807 | pdo_max = pdo_max_power(pdo); |
3808 | |
3809 | if (op > pdo_max) |
3810 | return -EINVAL; |
3811 | if (max > pdo_max && !(rdo & RDO_CAP_MISMATCH)) |
3812 | return -EINVAL; |
3813 | tcpm_log(port, |
3814 | fmt: "Requested %u -> %u mV, %u mW for %u / %u mW" , |
3815 | pdo_min_voltage(pdo), pdo_max_voltage(pdo), |
3816 | pdo_max, op, max); |
3817 | break; |
3818 | default: |
3819 | return -EINVAL; |
3820 | } |
3821 | |
3822 | port->op_vsafe5v = index == 1; |
3823 | |
3824 | return 0; |
3825 | } |
3826 | |
3827 | #define min_power(x, y) min(pdo_max_power(x), pdo_max_power(y)) |
3828 | #define min_current(x, y) min(pdo_max_current(x), pdo_max_current(y)) |
3829 | |
3830 | static int tcpm_pd_select_pdo(struct tcpm_port *port, int *sink_pdo, |
3831 | int *src_pdo) |
3832 | { |
3833 | unsigned int i, j, max_src_mv = 0, min_src_mv = 0, max_mw = 0, |
3834 | max_mv = 0, src_mw = 0, src_ma = 0, max_snk_mv = 0, |
3835 | min_snk_mv = 0; |
3836 | int ret = -EINVAL; |
3837 | |
3838 | port->pps_data.supported = false; |
3839 | port->usb_type = POWER_SUPPLY_USB_TYPE_PD; |
3840 | power_supply_changed(psy: port->psy); |
3841 | |
3842 | /* |
3843 | * Select the source PDO providing the most power which has a |
3844 | * matchig sink cap. |
3845 | */ |
3846 | for (i = 0; i < port->nr_source_caps; i++) { |
3847 | u32 pdo = port->source_caps[i]; |
3848 | enum pd_pdo_type type = pdo_type(pdo); |
3849 | |
3850 | switch (type) { |
3851 | case PDO_TYPE_FIXED: |
3852 | max_src_mv = pdo_fixed_voltage(pdo); |
3853 | min_src_mv = max_src_mv; |
3854 | break; |
3855 | case PDO_TYPE_BATT: |
3856 | case PDO_TYPE_VAR: |
3857 | max_src_mv = pdo_max_voltage(pdo); |
3858 | min_src_mv = pdo_min_voltage(pdo); |
3859 | break; |
3860 | case PDO_TYPE_APDO: |
3861 | if (pdo_apdo_type(pdo) == APDO_TYPE_PPS) { |
3862 | port->pps_data.supported = true; |
3863 | port->usb_type = |
3864 | POWER_SUPPLY_USB_TYPE_PD_PPS; |
3865 | power_supply_changed(psy: port->psy); |
3866 | } |
3867 | continue; |
3868 | default: |
3869 | tcpm_log(port, fmt: "Invalid source PDO type, ignoring" ); |
3870 | continue; |
3871 | } |
3872 | |
3873 | switch (type) { |
3874 | case PDO_TYPE_FIXED: |
3875 | case PDO_TYPE_VAR: |
3876 | src_ma = pdo_max_current(pdo); |
3877 | src_mw = src_ma * min_src_mv / 1000; |
3878 | break; |
3879 | case PDO_TYPE_BATT: |
3880 | src_mw = pdo_max_power(pdo); |
3881 | break; |
3882 | case PDO_TYPE_APDO: |
3883 | continue; |
3884 | default: |
3885 | tcpm_log(port, fmt: "Invalid source PDO type, ignoring" ); |
3886 | continue; |
3887 | } |
3888 | |
3889 | for (j = 0; j < port->nr_snk_pdo; j++) { |
3890 | pdo = port->snk_pdo[j]; |
3891 | |
3892 | switch (pdo_type(pdo)) { |
3893 | case PDO_TYPE_FIXED: |
3894 | max_snk_mv = pdo_fixed_voltage(pdo); |
3895 | min_snk_mv = max_snk_mv; |
3896 | break; |
3897 | case PDO_TYPE_BATT: |
3898 | case PDO_TYPE_VAR: |
3899 | max_snk_mv = pdo_max_voltage(pdo); |
3900 | min_snk_mv = pdo_min_voltage(pdo); |
3901 | break; |
3902 | case PDO_TYPE_APDO: |
3903 | continue; |
3904 | default: |
3905 | tcpm_log(port, fmt: "Invalid sink PDO type, ignoring" ); |
3906 | continue; |
3907 | } |
3908 | |
3909 | if (max_src_mv <= max_snk_mv && |
3910 | min_src_mv >= min_snk_mv) { |
3911 | /* Prefer higher voltages if available */ |
3912 | if ((src_mw == max_mw && min_src_mv > max_mv) || |
3913 | src_mw > max_mw) { |
3914 | *src_pdo = i; |
3915 | *sink_pdo = j; |
3916 | max_mw = src_mw; |
3917 | max_mv = min_src_mv; |
3918 | ret = 0; |
3919 | } |
3920 | } |
3921 | } |
3922 | } |
3923 | |
3924 | return ret; |
3925 | } |
3926 | |
3927 | static unsigned int tcpm_pd_select_pps_apdo(struct tcpm_port *port) |
3928 | { |
3929 | unsigned int i, src_ma, max_temp_mw = 0, max_op_ma, op_mw; |
3930 | unsigned int src_pdo = 0; |
3931 | u32 pdo, src; |
3932 | |
3933 | for (i = 1; i < port->nr_source_caps; ++i) { |
3934 | pdo = port->source_caps[i]; |
3935 | |
3936 | switch (pdo_type(pdo)) { |
3937 | case PDO_TYPE_APDO: |
3938 | if (pdo_apdo_type(pdo) != APDO_TYPE_PPS) { |
3939 | tcpm_log(port, fmt: "Not PPS APDO (source), ignoring" ); |
3940 | continue; |
3941 | } |
3942 | |
3943 | if (port->pps_data.req_out_volt > pdo_pps_apdo_max_voltage(pdo) || |
3944 | port->pps_data.req_out_volt < pdo_pps_apdo_min_voltage(pdo)) |
3945 | continue; |
3946 | |
3947 | src_ma = pdo_pps_apdo_max_current(pdo); |
3948 | max_op_ma = min(src_ma, port->pps_data.req_op_curr); |
3949 | op_mw = max_op_ma * port->pps_data.req_out_volt / 1000; |
3950 | if (op_mw > max_temp_mw) { |
3951 | src_pdo = i; |
3952 | max_temp_mw = op_mw; |
3953 | } |
3954 | break; |
3955 | default: |
3956 | tcpm_log(port, fmt: "Not APDO type (source), ignoring" ); |
3957 | continue; |
3958 | } |
3959 | } |
3960 | |
3961 | if (src_pdo) { |
3962 | src = port->source_caps[src_pdo]; |
3963 | |
3964 | port->pps_data.req_min_volt = pdo_pps_apdo_min_voltage(pdo: src); |
3965 | port->pps_data.req_max_volt = pdo_pps_apdo_max_voltage(pdo: src); |
3966 | port->pps_data.req_max_curr = pdo_pps_apdo_max_current(pdo: src); |
3967 | port->pps_data.req_op_curr = min(port->pps_data.req_max_curr, |
3968 | port->pps_data.req_op_curr); |
3969 | } |
3970 | |
3971 | return src_pdo; |
3972 | } |
3973 | |
3974 | static int tcpm_pd_build_request(struct tcpm_port *port, u32 *rdo) |
3975 | { |
3976 | unsigned int mv, ma, mw, flags; |
3977 | unsigned int max_ma, max_mw; |
3978 | enum pd_pdo_type type; |
3979 | u32 pdo, matching_snk_pdo; |
3980 | int src_pdo_index = 0; |
3981 | int snk_pdo_index = 0; |
3982 | int ret; |
3983 | |
3984 | ret = tcpm_pd_select_pdo(port, sink_pdo: &snk_pdo_index, src_pdo: &src_pdo_index); |
3985 | if (ret < 0) |
3986 | return ret; |
3987 | |
3988 | pdo = port->source_caps[src_pdo_index]; |
3989 | matching_snk_pdo = port->snk_pdo[snk_pdo_index]; |
3990 | type = pdo_type(pdo); |
3991 | |
3992 | switch (type) { |
3993 | case PDO_TYPE_FIXED: |
3994 | mv = pdo_fixed_voltage(pdo); |
3995 | break; |
3996 | case PDO_TYPE_BATT: |
3997 | case PDO_TYPE_VAR: |
3998 | mv = pdo_min_voltage(pdo); |
3999 | break; |
4000 | default: |
4001 | tcpm_log(port, fmt: "Invalid PDO selected!" ); |
4002 | return -EINVAL; |
4003 | } |
4004 | |
4005 | /* Select maximum available current within the sink pdo's limit */ |
4006 | if (type == PDO_TYPE_BATT) { |
4007 | mw = min_power(pdo, matching_snk_pdo); |
4008 | ma = 1000 * mw / mv; |
4009 | } else { |
4010 | ma = min_current(pdo, matching_snk_pdo); |
4011 | mw = ma * mv / 1000; |
4012 | } |
4013 | |
4014 | flags = RDO_USB_COMM | RDO_NO_SUSPEND; |
4015 | |
4016 | /* Set mismatch bit if offered power is less than operating power */ |
4017 | max_ma = ma; |
4018 | max_mw = mw; |
4019 | if (mw < port->operating_snk_mw) { |
4020 | flags |= RDO_CAP_MISMATCH; |
4021 | if (type == PDO_TYPE_BATT && |
4022 | (pdo_max_power(pdo: matching_snk_pdo) > pdo_max_power(pdo))) |
4023 | max_mw = pdo_max_power(pdo: matching_snk_pdo); |
4024 | else if (pdo_max_current(pdo: matching_snk_pdo) > |
4025 | pdo_max_current(pdo)) |
4026 | max_ma = pdo_max_current(pdo: matching_snk_pdo); |
4027 | } |
4028 | |
4029 | tcpm_log(port, fmt: "cc=%d cc1=%d cc2=%d vbus=%d vconn=%s polarity=%d" , |
4030 | port->cc_req, port->cc1, port->cc2, port->vbus_source, |
4031 | port->vconn_role == TYPEC_SOURCE ? "source" : "sink" , |
4032 | port->polarity); |
4033 | |
4034 | if (type == PDO_TYPE_BATT) { |
4035 | *rdo = RDO_BATT(src_pdo_index + 1, mw, max_mw, flags); |
4036 | |
4037 | tcpm_log(port, fmt: "Requesting PDO %d: %u mV, %u mW%s" , |
4038 | src_pdo_index, mv, mw, |
4039 | flags & RDO_CAP_MISMATCH ? " [mismatch]" : "" ); |
4040 | } else { |
4041 | *rdo = RDO_FIXED(src_pdo_index + 1, ma, max_ma, flags); |
4042 | |
4043 | tcpm_log(port, fmt: "Requesting PDO %d: %u mV, %u mA%s" , |
4044 | src_pdo_index, mv, ma, |
4045 | flags & RDO_CAP_MISMATCH ? " [mismatch]" : "" ); |
4046 | } |
4047 | |
4048 | port->req_current_limit = ma; |
4049 | port->req_supply_voltage = mv; |
4050 | |
4051 | return 0; |
4052 | } |
4053 | |
4054 | static int tcpm_pd_send_request(struct tcpm_port *port) |
4055 | { |
4056 | struct pd_message msg; |
4057 | int ret; |
4058 | u32 rdo; |
4059 | |
4060 | ret = tcpm_pd_build_request(port, rdo: &rdo); |
4061 | if (ret < 0) |
4062 | return ret; |
4063 | |
4064 | /* |
4065 | * Relax the threshold as voltage will be adjusted after Accept Message plus tSrcTransition. |
4066 | * It is safer to modify the threshold here. |
4067 | */ |
4068 | tcpm_set_auto_vbus_discharge_threshold(port, mode: TYPEC_PWR_MODE_USB, pps_active: false, requested_vbus_voltage: 0); |
4069 | |
4070 | memset(&msg, 0, sizeof(msg)); |
4071 | msg.header = PD_HEADER_LE(PD_DATA_REQUEST, |
4072 | port->pwr_role, |
4073 | port->data_role, |
4074 | port->negotiated_rev, |
4075 | port->message_id, 1); |
4076 | msg.payload[0] = cpu_to_le32(rdo); |
4077 | |
4078 | return tcpm_pd_transmit(port, tx_sop_type: TCPC_TX_SOP, msg: &msg); |
4079 | } |
4080 | |
4081 | static int tcpm_pd_build_pps_request(struct tcpm_port *port, u32 *rdo) |
4082 | { |
4083 | unsigned int out_mv, op_ma, op_mw, max_mv, max_ma, flags; |
4084 | unsigned int src_pdo_index; |
4085 | |
4086 | src_pdo_index = tcpm_pd_select_pps_apdo(port); |
4087 | if (!src_pdo_index) |
4088 | return -EOPNOTSUPP; |
4089 | |
4090 | max_mv = port->pps_data.req_max_volt; |
4091 | max_ma = port->pps_data.req_max_curr; |
4092 | out_mv = port->pps_data.req_out_volt; |
4093 | op_ma = port->pps_data.req_op_curr; |
4094 | |
4095 | flags = RDO_USB_COMM | RDO_NO_SUSPEND; |
4096 | |
4097 | op_mw = (op_ma * out_mv) / 1000; |
4098 | if (op_mw < port->operating_snk_mw) { |
4099 | /* |
4100 | * Try raising current to meet power needs. If that's not enough |
4101 | * then try upping the voltage. If that's still not enough |
4102 | * then we've obviously chosen a PPS APDO which really isn't |
4103 | * suitable so abandon ship. |
4104 | */ |
4105 | op_ma = (port->operating_snk_mw * 1000) / out_mv; |
4106 | if ((port->operating_snk_mw * 1000) % out_mv) |
4107 | ++op_ma; |
4108 | op_ma += RDO_PROG_CURR_MA_STEP - (op_ma % RDO_PROG_CURR_MA_STEP); |
4109 | |
4110 | if (op_ma > max_ma) { |
4111 | op_ma = max_ma; |
4112 | out_mv = (port->operating_snk_mw * 1000) / op_ma; |
4113 | if ((port->operating_snk_mw * 1000) % op_ma) |
4114 | ++out_mv; |
4115 | out_mv += RDO_PROG_VOLT_MV_STEP - |
4116 | (out_mv % RDO_PROG_VOLT_MV_STEP); |
4117 | |
4118 | if (out_mv > max_mv) { |
4119 | tcpm_log(port, fmt: "Invalid PPS APDO selected!" ); |
4120 | return -EINVAL; |
4121 | } |
4122 | } |
4123 | } |
4124 | |
4125 | tcpm_log(port, fmt: "cc=%d cc1=%d cc2=%d vbus=%d vconn=%s polarity=%d" , |
4126 | port->cc_req, port->cc1, port->cc2, port->vbus_source, |
4127 | port->vconn_role == TYPEC_SOURCE ? "source" : "sink" , |
4128 | port->polarity); |
4129 | |
4130 | *rdo = RDO_PROG(src_pdo_index + 1, out_mv, op_ma, flags); |
4131 | |
4132 | tcpm_log(port, fmt: "Requesting APDO %d: %u mV, %u mA" , |
4133 | src_pdo_index, out_mv, op_ma); |
4134 | |
4135 | port->pps_data.req_op_curr = op_ma; |
4136 | port->pps_data.req_out_volt = out_mv; |
4137 | |
4138 | return 0; |
4139 | } |
4140 | |
4141 | static int tcpm_pd_send_pps_request(struct tcpm_port *port) |
4142 | { |
4143 | struct pd_message msg; |
4144 | int ret; |
4145 | u32 rdo; |
4146 | |
4147 | ret = tcpm_pd_build_pps_request(port, rdo: &rdo); |
4148 | if (ret < 0) |
4149 | return ret; |
4150 | |
4151 | /* Relax the threshold as voltage will be adjusted right after Accept Message. */ |
4152 | tcpm_set_auto_vbus_discharge_threshold(port, mode: TYPEC_PWR_MODE_USB, pps_active: false, requested_vbus_voltage: 0); |
4153 | |
4154 | memset(&msg, 0, sizeof(msg)); |
4155 | msg.header = PD_HEADER_LE(PD_DATA_REQUEST, |
4156 | port->pwr_role, |
4157 | port->data_role, |
4158 | port->negotiated_rev, |
4159 | port->message_id, 1); |
4160 | msg.payload[0] = cpu_to_le32(rdo); |
4161 | |
4162 | return tcpm_pd_transmit(port, tx_sop_type: TCPC_TX_SOP, msg: &msg); |
4163 | } |
4164 | |
4165 | static int tcpm_set_vbus(struct tcpm_port *port, bool enable) |
4166 | { |
4167 | int ret; |
4168 | |
4169 | if (enable && port->vbus_charge) |
4170 | return -EINVAL; |
4171 | |
4172 | tcpm_log(port, fmt: "vbus:=%d charge=%d" , enable, port->vbus_charge); |
4173 | |
4174 | ret = port->tcpc->set_vbus(port->tcpc, enable, port->vbus_charge); |
4175 | if (ret < 0) |
4176 | return ret; |
4177 | |
4178 | port->vbus_source = enable; |
4179 | return 0; |
4180 | } |
4181 | |
4182 | static int tcpm_set_charge(struct tcpm_port *port, bool charge) |
4183 | { |
4184 | int ret; |
4185 | |
4186 | if (charge && port->vbus_source) |
4187 | return -EINVAL; |
4188 | |
4189 | if (charge != port->vbus_charge) { |
4190 | tcpm_log(port, fmt: "vbus=%d charge:=%d" , port->vbus_source, charge); |
4191 | ret = port->tcpc->set_vbus(port->tcpc, port->vbus_source, |
4192 | charge); |
4193 | if (ret < 0) |
4194 | return ret; |
4195 | } |
4196 | port->vbus_charge = charge; |
4197 | power_supply_changed(psy: port->psy); |
4198 | return 0; |
4199 | } |
4200 | |
4201 | static bool tcpm_start_toggling(struct tcpm_port *port, enum typec_cc_status cc) |
4202 | { |
4203 | int ret; |
4204 | |
4205 | if (!port->tcpc->start_toggling) |
4206 | return false; |
4207 | |
4208 | tcpm_log_force(port, fmt: "Start toggling" ); |
4209 | ret = port->tcpc->start_toggling(port->tcpc, port->port_type, cc); |
4210 | return ret == 0; |
4211 | } |
4212 | |
4213 | static int tcpm_init_vbus(struct tcpm_port *port) |
4214 | { |
4215 | int ret; |
4216 | |
4217 | ret = port->tcpc->set_vbus(port->tcpc, false, false); |
4218 | port->vbus_source = false; |
4219 | port->vbus_charge = false; |
4220 | return ret; |
4221 | } |
4222 | |
4223 | static int tcpm_init_vconn(struct tcpm_port *port) |
4224 | { |
4225 | int ret; |
4226 | |
4227 | ret = port->tcpc->set_vconn(port->tcpc, false); |
4228 | port->vconn_role = TYPEC_SINK; |
4229 | return ret; |
4230 | } |
4231 | |
4232 | static void tcpm_typec_connect(struct tcpm_port *port) |
4233 | { |
4234 | if (!port->connected) { |
4235 | /* Make sure we don't report stale identity information */ |
4236 | memset(&port->partner_ident, 0, sizeof(port->partner_ident)); |
4237 | port->partner_desc.usb_pd = port->pd_capable; |
4238 | if (tcpm_port_is_debug(port)) |
4239 | port->partner_desc.accessory = TYPEC_ACCESSORY_DEBUG; |
4240 | else if (tcpm_port_is_audio(port)) |
4241 | port->partner_desc.accessory = TYPEC_ACCESSORY_AUDIO; |
4242 | else |
4243 | port->partner_desc.accessory = TYPEC_ACCESSORY_NONE; |
4244 | port->partner = typec_register_partner(port: port->typec_port, |
4245 | desc: &port->partner_desc); |
4246 | port->connected = true; |
4247 | typec_partner_set_usb_power_delivery(partner: port->partner, pd: port->partner_pd); |
4248 | } |
4249 | } |
4250 | |
4251 | static int tcpm_src_attach(struct tcpm_port *port) |
4252 | { |
4253 | enum typec_cc_polarity polarity = |
4254 | port->cc2 == TYPEC_CC_RD ? TYPEC_POLARITY_CC2 |
4255 | : TYPEC_POLARITY_CC1; |
4256 | int ret; |
4257 | |
4258 | if (port->attached) |
4259 | return 0; |
4260 | |
4261 | ret = tcpm_set_polarity(port, polarity); |
4262 | if (ret < 0) |
4263 | return ret; |
4264 | |
4265 | tcpm_enable_auto_vbus_discharge(port, enable: true); |
4266 | |
4267 | ret = tcpm_set_roles(port, attached: true, role: TYPEC_SOURCE, tcpm_data_role_for_source(port)); |
4268 | if (ret < 0) |
4269 | return ret; |
4270 | |
4271 | if (port->pd_supported) { |
4272 | ret = port->tcpc->set_pd_rx(port->tcpc, true); |
4273 | if (ret < 0) |
4274 | goto out_disable_mux; |
4275 | } |
4276 | |
4277 | /* |
4278 | * USB Type-C specification, version 1.2, |
4279 | * chapter 4.5.2.2.8.1 (Attached.SRC Requirements) |
4280 | * Enable VCONN only if the non-RD port is set to RA. |
4281 | */ |
4282 | if ((polarity == TYPEC_POLARITY_CC1 && port->cc2 == TYPEC_CC_RA) || |
4283 | (polarity == TYPEC_POLARITY_CC2 && port->cc1 == TYPEC_CC_RA)) { |
4284 | ret = tcpm_set_vconn(port, enable: true); |
4285 | if (ret < 0) |
4286 | goto out_disable_pd; |
4287 | } |
4288 | |
4289 | ret = tcpm_set_vbus(port, enable: true); |
4290 | if (ret < 0) |
4291 | goto out_disable_vconn; |
4292 | |
4293 | port->pd_capable = false; |
4294 | |
4295 | port->partner = NULL; |
4296 | |
4297 | port->attached = true; |
4298 | port->send_discover = true; |
4299 | port->send_discover_prime = false; |
4300 | |
4301 | return 0; |
4302 | |
4303 | out_disable_vconn: |
4304 | tcpm_set_vconn(port, enable: false); |
4305 | out_disable_pd: |
4306 | if (port->pd_supported) |
4307 | port->tcpc->set_pd_rx(port->tcpc, false); |
4308 | out_disable_mux: |
4309 | tcpm_mux_set(port, state: TYPEC_STATE_SAFE, usb_role: USB_ROLE_NONE, |
4310 | orientation: TYPEC_ORIENTATION_NONE); |
4311 | return ret; |
4312 | } |
4313 | |
4314 | static void tcpm_typec_disconnect(struct tcpm_port *port) |
4315 | { |
4316 | /* |
4317 | * Unregister plug/cable outside of port->connected because cable can |
4318 | * be discovered before SRC_READY/SNK_READY states where port->connected |
4319 | * is set. |
4320 | */ |
4321 | typec_unregister_plug(plug: port->plug_prime); |
4322 | typec_unregister_cable(cable: port->cable); |
4323 | port->plug_prime = NULL; |
4324 | port->cable = NULL; |
4325 | if (port->connected) { |
4326 | typec_partner_set_usb_power_delivery(partner: port->partner, NULL); |
4327 | typec_unregister_partner(partner: port->partner); |
4328 | port->partner = NULL; |
4329 | port->connected = false; |
4330 | } |
4331 | } |
4332 | |
4333 | static void tcpm_unregister_altmodes(struct tcpm_port *port) |
4334 | { |
4335 | struct pd_mode_data *modep = &port->mode_data; |
4336 | struct pd_mode_data *modep_prime = &port->mode_data_prime; |
4337 | int i; |
4338 | |
4339 | for (i = 0; i < modep->altmodes; i++) { |
4340 | typec_unregister_altmode(altmode: port->partner_altmode[i]); |
4341 | port->partner_altmode[i] = NULL; |
4342 | } |
4343 | for (i = 0; i < modep_prime->altmodes; i++) { |
4344 | typec_unregister_altmode(altmode: port->plug_prime_altmode[i]); |
4345 | port->plug_prime_altmode[i] = NULL; |
4346 | } |
4347 | |
4348 | memset(modep, 0, sizeof(*modep)); |
4349 | memset(modep_prime, 0, sizeof(*modep_prime)); |
4350 | } |
4351 | |
4352 | static void tcpm_set_partner_usb_comm_capable(struct tcpm_port *port, bool capable) |
4353 | { |
4354 | tcpm_log(port, fmt: "Setting usb_comm capable %s" , capable ? "true" : "false" ); |
4355 | |
4356 | if (port->tcpc->set_partner_usb_comm_capable) |
4357 | port->tcpc->set_partner_usb_comm_capable(port->tcpc, capable); |
4358 | } |
4359 | |
4360 | static void tcpm_reset_port(struct tcpm_port *port) |
4361 | { |
4362 | tcpm_enable_auto_vbus_discharge(port, enable: false); |
4363 | port->in_ams = false; |
4364 | port->ams = NONE_AMS; |
4365 | port->vdm_sm_running = false; |
4366 | tcpm_unregister_altmodes(port); |
4367 | tcpm_typec_disconnect(port); |
4368 | port->attached = false; |
4369 | port->pd_capable = false; |
4370 | port->pps_data.supported = false; |
4371 | tcpm_set_partner_usb_comm_capable(port, capable: false); |
4372 | |
4373 | /* |
4374 | * First Rx ID should be 0; set this to a sentinel of -1 so that |
4375 | * we can check tcpm_pd_rx_handler() if we had seen it before. |
4376 | */ |
4377 | port->rx_msgid = -1; |
4378 | port->rx_msgid_prime = -1; |
4379 | |
4380 | port->tcpc->set_pd_rx(port->tcpc, false); |
4381 | tcpm_init_vbus(port); /* also disables charging */ |
4382 | tcpm_init_vconn(port); |
4383 | tcpm_set_current_limit(port, max_ma: 0, mv: 0); |
4384 | tcpm_set_polarity(port, polarity: TYPEC_POLARITY_CC1); |
4385 | tcpm_mux_set(port, state: TYPEC_STATE_SAFE, usb_role: USB_ROLE_NONE, |
4386 | orientation: TYPEC_ORIENTATION_NONE); |
4387 | tcpm_set_attached_state(port, attached: false); |
4388 | port->try_src_count = 0; |
4389 | port->try_snk_count = 0; |
4390 | port->usb_type = POWER_SUPPLY_USB_TYPE_C; |
4391 | power_supply_changed(psy: port->psy); |
4392 | port->nr_sink_caps = 0; |
4393 | port->sink_cap_done = false; |
4394 | if (port->tcpc->enable_frs) |
4395 | port->tcpc->enable_frs(port->tcpc, false); |
4396 | |
4397 | usb_power_delivery_unregister_capabilities(cap: port->partner_sink_caps); |
4398 | port->partner_sink_caps = NULL; |
4399 | usb_power_delivery_unregister_capabilities(cap: port->partner_source_caps); |
4400 | port->partner_source_caps = NULL; |
4401 | usb_power_delivery_unregister(pd: port->partner_pd); |
4402 | port->partner_pd = NULL; |
4403 | } |
4404 | |
4405 | static void tcpm_detach(struct tcpm_port *port) |
4406 | { |
4407 | if (tcpm_port_is_disconnected(port)) |
4408 | port->hard_reset_count = 0; |
4409 | |
4410 | if (!port->attached) |
4411 | return; |
4412 | |
4413 | if (port->tcpc->set_bist_data) { |
4414 | tcpm_log(port, fmt: "disable BIST MODE TESTDATA" ); |
4415 | port->tcpc->set_bist_data(port->tcpc, false); |
4416 | } |
4417 | |
4418 | tcpm_reset_port(port); |
4419 | } |
4420 | |
4421 | static void tcpm_src_detach(struct tcpm_port *port) |
4422 | { |
4423 | tcpm_detach(port); |
4424 | } |
4425 | |
4426 | static int tcpm_snk_attach(struct tcpm_port *port) |
4427 | { |
4428 | int ret; |
4429 | |
4430 | if (port->attached) |
4431 | return 0; |
4432 | |
4433 | ret = tcpm_set_polarity(port, polarity: port->cc2 != TYPEC_CC_OPEN ? |
4434 | TYPEC_POLARITY_CC2 : TYPEC_POLARITY_CC1); |
4435 | if (ret < 0) |
4436 | return ret; |
4437 | |
4438 | tcpm_enable_auto_vbus_discharge(port, enable: true); |
4439 | |
4440 | ret = tcpm_set_roles(port, attached: true, role: TYPEC_SINK, tcpm_data_role_for_sink(port)); |
4441 | if (ret < 0) |
4442 | return ret; |
4443 | |
4444 | port->pd_capable = false; |
4445 | |
4446 | port->partner = NULL; |
4447 | |
4448 | port->attached = true; |
4449 | port->send_discover = true; |
4450 | port->send_discover_prime = false; |
4451 | |
4452 | return 0; |
4453 | } |
4454 | |
4455 | static void tcpm_snk_detach(struct tcpm_port *port) |
4456 | { |
4457 | tcpm_detach(port); |
4458 | } |
4459 | |
4460 | static int tcpm_acc_attach(struct tcpm_port *port) |
4461 | { |
4462 | int ret; |
4463 | |
4464 | if (port->attached) |
4465 | return 0; |
4466 | |
4467 | ret = tcpm_set_roles(port, attached: true, role: TYPEC_SOURCE, |
4468 | tcpm_data_role_for_source(port)); |
4469 | if (ret < 0) |
4470 | return ret; |
4471 | |
4472 | port->partner = NULL; |
4473 | |
4474 | tcpm_typec_connect(port); |
4475 | |
4476 | port->attached = true; |
4477 | |
4478 | return 0; |
4479 | } |
4480 | |
4481 | static void tcpm_acc_detach(struct tcpm_port *port) |
4482 | { |
4483 | tcpm_detach(port); |
4484 | } |
4485 | |
4486 | static inline enum tcpm_state hard_reset_state(struct tcpm_port *port) |
4487 | { |
4488 | if (port->hard_reset_count < PD_N_HARD_RESET_COUNT) |
4489 | return HARD_RESET_SEND; |
4490 | if (port->pd_capable) |
4491 | return ERROR_RECOVERY; |
4492 | if (port->pwr_role == TYPEC_SOURCE) |
4493 | return SRC_UNATTACHED; |
4494 | if (port->state == SNK_WAIT_CAPABILITIES) |
4495 | return SNK_READY; |
4496 | return SNK_UNATTACHED; |
4497 | } |
4498 | |
4499 | static inline enum tcpm_state unattached_state(struct tcpm_port *port) |
4500 | { |
4501 | if (port->port_type == TYPEC_PORT_DRP) { |
4502 | if (port->pwr_role == TYPEC_SOURCE) |
4503 | return SRC_UNATTACHED; |
4504 | else |
4505 | return SNK_UNATTACHED; |
4506 | } else if (port->port_type == TYPEC_PORT_SRC) { |
4507 | return SRC_UNATTACHED; |
4508 | } |
4509 | |
4510 | return SNK_UNATTACHED; |
4511 | } |
4512 | |
4513 | static void tcpm_swap_complete(struct tcpm_port *port, int result) |
4514 | { |
4515 | if (port->swap_pending) { |
4516 | port->swap_status = result; |
4517 | port->swap_pending = false; |
4518 | port->non_pd_role_swap = false; |
4519 | complete(&port->swap_complete); |
4520 | } |
4521 | } |
4522 | |
4523 | static enum typec_pwr_opmode tcpm_get_pwr_opmode(enum typec_cc_status cc) |
4524 | { |
4525 | switch (cc) { |
4526 | case TYPEC_CC_RP_1_5: |
4527 | return TYPEC_PWR_MODE_1_5A; |
4528 | case TYPEC_CC_RP_3_0: |
4529 | return TYPEC_PWR_MODE_3_0A; |
4530 | case TYPEC_CC_RP_DEF: |
4531 | default: |
4532 | return TYPEC_PWR_MODE_USB; |
4533 | } |
4534 | } |
4535 | |
4536 | static enum typec_cc_status tcpm_pwr_opmode_to_rp(enum typec_pwr_opmode opmode) |
4537 | { |
4538 | switch (opmode) { |
4539 | case TYPEC_PWR_MODE_USB: |
4540 | return TYPEC_CC_RP_DEF; |
4541 | case TYPEC_PWR_MODE_1_5A: |
4542 | return TYPEC_CC_RP_1_5; |
4543 | case TYPEC_PWR_MODE_3_0A: |
4544 | case TYPEC_PWR_MODE_PD: |
4545 | default: |
4546 | return TYPEC_CC_RP_3_0; |
4547 | } |
4548 | } |
4549 | |
4550 | static void tcpm_set_initial_svdm_version(struct tcpm_port *port) |
4551 | { |
4552 | switch (port->negotiated_rev) { |
4553 | case PD_REV30: |
4554 | break; |
4555 | /* |
4556 | * 6.4.4.2.3 Structured VDM Version |
4557 | * 2.0 states "At this time, there is only one version (1.0) defined. |
4558 | * This field Shall be set to zero to indicate Version 1.0." |
4559 | * 3.0 states "This field Shall be set to 01b to indicate Version 2.0." |
4560 | * To ensure that we follow the Power Delivery revision we are currently |
4561 | * operating on, downgrade the SVDM version to the highest one supported |
4562 | * by the Power Delivery revision. |
4563 | */ |
4564 | case PD_REV20: |
4565 | typec_partner_set_svdm_version(partner: port->partner, svdm_version: SVDM_VER_1_0); |
4566 | break; |
4567 | default: |
4568 | typec_partner_set_svdm_version(partner: port->partner, svdm_version: SVDM_VER_1_0); |
4569 | break; |
4570 | } |
4571 | } |
4572 | |
4573 | static void run_state_machine(struct tcpm_port *port) |
4574 | { |
4575 | int ret; |
4576 | enum typec_pwr_opmode opmode; |
4577 | unsigned int msecs; |
4578 | enum tcpm_state upcoming_state; |
4579 | |
4580 | if (port->tcpc->check_contaminant && port->state != CHECK_CONTAMINANT) |
4581 | port->potential_contaminant = ((port->enter_state == SRC_ATTACH_WAIT && |
4582 | port->state == SRC_UNATTACHED) || |
4583 | (port->enter_state == SNK_ATTACH_WAIT && |
4584 | port->state == SNK_UNATTACHED) || |
4585 | (port->enter_state == SNK_DEBOUNCED && |
4586 | port->state == SNK_UNATTACHED)); |
4587 | |
4588 | port->enter_state = port->state; |
4589 | switch (port->state) { |
4590 | case TOGGLING: |
4591 | break; |
4592 | case CHECK_CONTAMINANT: |
4593 | port->tcpc->check_contaminant(port->tcpc); |
4594 | break; |
4595 | /* SRC states */ |
4596 | case SRC_UNATTACHED: |
4597 | if (!port->non_pd_role_swap) |
4598 | tcpm_swap_complete(port, result: -ENOTCONN); |
4599 | tcpm_src_detach(port); |
4600 | if (port->potential_contaminant) { |
4601 | tcpm_set_state(port, state: CHECK_CONTAMINANT, delay_ms: 0); |
4602 | break; |
4603 | } |
4604 | if (tcpm_start_toggling(port, cc: tcpm_rp_cc(port))) { |
4605 | tcpm_set_state(port, state: TOGGLING, delay_ms: 0); |
4606 | break; |
4607 | } |
4608 | tcpm_set_cc(port, cc: tcpm_rp_cc(port)); |
4609 | if (port->port_type == TYPEC_PORT_DRP) |
4610 | tcpm_set_state(port, state: SNK_UNATTACHED, PD_T_DRP_SNK); |
4611 | break; |
4612 | case SRC_ATTACH_WAIT: |
4613 | if (tcpm_port_is_debug(port)) |
4614 | tcpm_set_state(port, state: DEBUG_ACC_ATTACHED, |
4615 | PD_T_CC_DEBOUNCE); |
4616 | else if (tcpm_port_is_audio(port)) |
4617 | tcpm_set_state(port, state: AUDIO_ACC_ATTACHED, |
4618 | PD_T_CC_DEBOUNCE); |
4619 | else if (tcpm_port_is_source(port) && port->vbus_vsafe0v) |
4620 | tcpm_set_state(port, |
4621 | tcpm_try_snk(port) ? SNK_TRY |
4622 | : SRC_ATTACHED, |
4623 | PD_T_CC_DEBOUNCE); |
4624 | break; |
4625 | |
4626 | case SNK_TRY: |
4627 | port->try_snk_count++; |
4628 | /* |
4629 | * Requirements: |
4630 | * - Do not drive vconn or vbus |
4631 | * - Terminate CC pins (both) to Rd |
4632 | * Action: |
4633 | * - Wait for tDRPTry (PD_T_DRP_TRY). |
4634 | * Until then, ignore any state changes. |
4635 | */ |
4636 | tcpm_set_cc(port, cc: TYPEC_CC_RD); |
4637 | tcpm_set_state(port, state: SNK_TRY_WAIT, PD_T_DRP_TRY); |
4638 | break; |
4639 | case SNK_TRY_WAIT: |
4640 | if (tcpm_port_is_sink(port)) { |
4641 | tcpm_set_state(port, state: SNK_TRY_WAIT_DEBOUNCE, delay_ms: 0); |
4642 | } else { |
4643 | tcpm_set_state(port, state: SRC_TRYWAIT, delay_ms: 0); |
4644 | port->max_wait = 0; |
4645 | } |
4646 | break; |
4647 | case SNK_TRY_WAIT_DEBOUNCE: |
4648 | tcpm_set_state(port, state: SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS, |
4649 | PD_T_TRY_CC_DEBOUNCE); |
4650 | break; |
4651 | case SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS: |
4652 | if (port->vbus_present && tcpm_port_is_sink(port)) |
4653 | tcpm_set_state(port, state: SNK_ATTACHED, delay_ms: 0); |
4654 | else |
4655 | port->max_wait = 0; |
4656 | break; |
4657 | case SRC_TRYWAIT: |
4658 | tcpm_set_cc(port, cc: tcpm_rp_cc(port)); |
4659 | if (port->max_wait == 0) { |
4660 | port->max_wait = jiffies + |
4661 | msecs_to_jiffies(PD_T_DRP_TRY); |
4662 | tcpm_set_state(port, state: SRC_TRYWAIT_UNATTACHED, |
4663 | PD_T_DRP_TRY); |
4664 | } else { |
4665 | if (time_is_after_jiffies(port->max_wait)) |
4666 | tcpm_set_state(port, state: SRC_TRYWAIT_UNATTACHED, |
4667 | delay_ms: jiffies_to_msecs(j: port->max_wait - |
4668 | jiffies)); |
4669 | else |
4670 | tcpm_set_state(port, state: SNK_UNATTACHED, delay_ms: 0); |
4671 | } |
4672 | break; |
4673 | case SRC_TRYWAIT_DEBOUNCE: |
4674 | tcpm_set_state(port, state: SRC_ATTACHED, PD_T_CC_DEBOUNCE); |
4675 | break; |
4676 | case SRC_TRYWAIT_UNATTACHED: |
4677 | tcpm_set_state(port, state: SNK_UNATTACHED, delay_ms: 0); |
4678 | break; |
4679 | |
4680 | case SRC_ATTACHED: |
4681 | ret = tcpm_src_attach(port); |
4682 | tcpm_set_state(port, state: SRC_UNATTACHED, |
4683 | delay_ms: ret < 0 ? 0 : PD_T_PS_SOURCE_ON); |
4684 | break; |
4685 | case SRC_STARTUP: |
4686 | opmode = tcpm_get_pwr_opmode(cc: tcpm_rp_cc(port)); |
4687 | typec_set_pwr_opmode(port: port->typec_port, mode: opmode); |
4688 | port->pwr_opmode = TYPEC_PWR_MODE_USB; |
4689 | port->caps_count = 0; |
4690 | port->negotiated_rev = PD_MAX_REV; |
4691 | port->negotiated_rev_prime = PD_MAX_REV; |
4692 | port->message_id = 0; |
4693 | port->message_id_prime = 0; |
4694 | port->rx_msgid = -1; |
4695 | port->rx_msgid_prime = -1; |
4696 | port->explicit_contract = false; |
4697 | /* SNK -> SRC POWER/FAST_ROLE_SWAP finished */ |
4698 | if (port->ams == POWER_ROLE_SWAP || |
4699 | port->ams == FAST_ROLE_SWAP) |
4700 | tcpm_ams_finish(port); |
4701 | if (!port->pd_supported) { |
4702 | tcpm_set_state(port, state: SRC_READY, delay_ms: 0); |
4703 | break; |
4704 | } |
4705 | port->upcoming_state = SRC_SEND_CAPABILITIES; |
4706 | tcpm_ams_start(port, ams: POWER_NEGOTIATION); |
4707 | break; |
4708 | case SRC_SEND_CAPABILITIES: |
4709 | port->caps_count++; |
4710 | if (port->caps_count > PD_N_CAPS_COUNT) { |
4711 | tcpm_set_state(port, state: SRC_READY, delay_ms: 0); |
4712 | break; |
4713 | } |
4714 | ret = tcpm_pd_send_source_caps(port); |
4715 | if (ret < 0) { |
4716 | if (tcpm_can_communicate_sop_prime(port) && |
4717 | IS_ERR_OR_NULL(ptr: port->cable)) |
4718 | tcpm_set_state(port, state: SRC_VDM_IDENTITY_REQUEST, delay_ms: 0); |
4719 | else |
4720 | tcpm_set_state(port, state: SRC_SEND_CAPABILITIES, |
4721 | PD_T_SEND_SOURCE_CAP); |
4722 | } else { |
4723 | /* |
4724 | * Per standard, we should clear the reset counter here. |
4725 | * However, that can result in state machine hang-ups. |
4726 | * Reset it only in READY state to improve stability. |
4727 | */ |
4728 | /* port->hard_reset_count = 0; */ |
4729 | port->caps_count = 0; |
4730 | port->pd_capable = true; |
4731 | tcpm_set_state_cond(port, state: SRC_SEND_CAPABILITIES_TIMEOUT, |
4732 | PD_T_SEND_SOURCE_CAP); |
4733 | } |
4734 | break; |
4735 | case SRC_SEND_CAPABILITIES_TIMEOUT: |
4736 | /* |
4737 | * Error recovery for a PD_DATA_SOURCE_CAP reply timeout. |
4738 | * |
4739 | * PD 2.0 sinks are supposed to accept src-capabilities with a |
4740 | * 3.0 header and simply ignore any src PDOs which the sink does |
4741 | * not understand such as PPS but some 2.0 sinks instead ignore |
4742 | * the entire PD_DATA_SOURCE_CAP message, causing contract |
4743 | * negotiation to fail. |
4744 | * |
4745 | * After PD_N_HARD_RESET_COUNT hard-reset attempts, we try |
4746 | * sending src-capabilities with a lower PD revision to |
4747 | * make these broken sinks work. |
4748 | */ |
4749 | if (port->hard_reset_count < PD_N_HARD_RESET_COUNT) { |
4750 | tcpm_set_state(port, state: HARD_RESET_SEND, delay_ms: 0); |
4751 | } else if (port->negotiated_rev > PD_REV20) { |
4752 | port->negotiated_rev--; |
4753 | port->hard_reset_count = 0; |
4754 | tcpm_set_state(port, state: SRC_SEND_CAPABILITIES, delay_ms: 0); |
4755 | } else { |
4756 | tcpm_set_state(port, state: hard_reset_state(port), delay_ms: 0); |
4757 | } |
4758 | break; |
4759 | case SRC_NEGOTIATE_CAPABILITIES: |
4760 | ret = tcpm_pd_check_request(port); |
4761 | if (ret < 0) { |
4762 | tcpm_pd_send_control(port, type: PD_CTRL_REJECT, tx_sop_type: TCPC_TX_SOP); |
4763 | if (!port->explicit_contract) { |
4764 | tcpm_set_state(port, |
4765 | state: SRC_WAIT_NEW_CAPABILITIES, delay_ms: 0); |
4766 | } else { |
4767 | tcpm_set_state(port, state: SRC_READY, delay_ms: 0); |
4768 | } |
4769 | } else { |
4770 | tcpm_pd_send_control(port, type: PD_CTRL_ACCEPT, tx_sop_type: TCPC_TX_SOP); |
4771 | tcpm_set_partner_usb_comm_capable(port, |
4772 | capable: !!(port->sink_request & RDO_USB_COMM)); |
4773 | tcpm_set_state(port, state: SRC_TRANSITION_SUPPLY, |
4774 | PD_T_SRC_TRANSITION); |
4775 | } |
4776 | break; |
4777 | case SRC_TRANSITION_SUPPLY: |
4778 | /* XXX: regulator_set_voltage(vbus, ...) */ |
4779 | tcpm_pd_send_control(port, type: PD_CTRL_PS_RDY, tx_sop_type: TCPC_TX_SOP); |
4780 | port->explicit_contract = true; |
4781 | typec_set_pwr_opmode(port: port->typec_port, mode: TYPEC_PWR_MODE_PD); |
4782 | port->pwr_opmode = TYPEC_PWR_MODE_PD; |
4783 | tcpm_set_state_cond(port, state: SRC_READY, delay_ms: 0); |
4784 | break; |
4785 | case SRC_READY: |
4786 | #if 1 |
4787 | port->hard_reset_count = 0; |
4788 | #endif |
4789 | port->try_src_count = 0; |
4790 | |
4791 | tcpm_swap_complete(port, result: 0); |
4792 | tcpm_typec_connect(port); |
4793 | |
4794 | if (port->ams != NONE_AMS) |
4795 | tcpm_ams_finish(port); |
4796 | if (port->next_ams != NONE_AMS) { |
4797 | port->ams = port->next_ams; |
4798 | port->next_ams = NONE_AMS; |
4799 | } |
4800 | |
4801 | /* |
4802 | * If previous AMS is interrupted, switch to the upcoming |
4803 | * state. |
4804 | */ |
4805 | if (port->upcoming_state != INVALID_STATE) { |
4806 | upcoming_state = port->upcoming_state; |
4807 | port->upcoming_state = INVALID_STATE; |
4808 | tcpm_set_state(port, state: upcoming_state, delay_ms: 0); |
4809 | break; |
4810 | } |
4811 | |
4812 | /* |
4813 | * 6.4.4.3.1 Discover Identity |
4814 | * "The Discover Identity Command Shall only be sent to SOP when there is an |
4815 | * Explicit Contract." |
4816 | * |
4817 | * Discover Identity on SOP' should be discovered prior to the |
4818 | * ready state, but if done after a Vconn Swap following Discover |
4819 | * Identity on SOP then the discovery process can be run here |
4820 | * as well. |
4821 | */ |
4822 | if (port->explicit_contract) { |
4823 | if (port->send_discover_prime) { |
4824 | port->tx_sop_type = TCPC_TX_SOP_PRIME; |
4825 | } else { |
4826 | port->tx_sop_type = TCPC_TX_SOP; |
4827 | tcpm_set_initial_svdm_version(port); |
4828 | } |
4829 | mod_send_discover_delayed_work(port, delay_ms: 0); |
4830 | } else { |
4831 | port->send_discover = false; |
4832 | port->send_discover_prime = false; |
4833 | } |
4834 | |
4835 | /* |
4836 | * 6.3.5 |
4837 | * Sending ping messages is not necessary if |
4838 | * - the source operates at vSafe5V |
4839 | * or |
4840 | * - The system is not operating in PD mode |
4841 | * or |
4842 | * - Both partners are connected using a Type-C connector |
4843 | * |
4844 | * There is no actual need to send PD messages since the local |
4845 | * port type-c and the spec does not clearly say whether PD is |
4846 | * possible when type-c is connected to Type-A/B |
4847 | */ |
4848 | break; |
4849 | case SRC_WAIT_NEW_CAPABILITIES: |
4850 | /* Nothing to do... */ |
4851 | break; |
4852 | |
4853 | /* SNK states */ |
4854 | case SNK_UNATTACHED: |
4855 | if (!port->non_pd_role_swap) |
4856 | tcpm_swap_complete(port, result: -ENOTCONN); |
4857 | tcpm_pps_complete(port, result: -ENOTCONN); |
4858 | tcpm_snk_detach(port); |
4859 | if (port->potential_contaminant) { |
4860 | tcpm_set_state(port, state: CHECK_CONTAMINANT, delay_ms: 0); |
4861 | break; |
4862 | } |
4863 | if (tcpm_start_toggling(port, cc: TYPEC_CC_RD)) { |
4864 | tcpm_set_state(port, state: TOGGLING, delay_ms: 0); |
4865 | break; |
4866 | } |
4867 | tcpm_set_cc(port, cc: TYPEC_CC_RD); |
4868 | if (port->port_type == TYPEC_PORT_DRP) |
4869 | tcpm_set_state(port, state: SRC_UNATTACHED, PD_T_DRP_SRC); |
4870 | break; |
4871 | case SNK_ATTACH_WAIT: |
4872 | if ((port->cc1 == TYPEC_CC_OPEN && |
4873 | port->cc2 != TYPEC_CC_OPEN) || |
4874 | (port->cc1 != TYPEC_CC_OPEN && |
4875 | port->cc2 == TYPEC_CC_OPEN)) |
4876 | tcpm_set_state(port, state: SNK_DEBOUNCED, |
4877 | PD_T_CC_DEBOUNCE); |
4878 | else if (tcpm_port_is_disconnected(port)) |
4879 | tcpm_set_state(port, state: SNK_UNATTACHED, |
4880 | PD_T_PD_DEBOUNCE); |
4881 | break; |
4882 | case SNK_DEBOUNCED: |
4883 | if (tcpm_port_is_disconnected(port)) |
4884 | tcpm_set_state(port, state: SNK_UNATTACHED, |
4885 | PD_T_PD_DEBOUNCE); |
4886 | else if (port->vbus_present) |
4887 | tcpm_set_state(port, |
4888 | tcpm_try_src(port) ? SRC_TRY |
4889 | : SNK_ATTACHED, |
4890 | delay_ms: 0); |
4891 | break; |
4892 | case SRC_TRY: |
4893 | port->try_src_count++; |
4894 | tcpm_set_cc(port, cc: tcpm_rp_cc(port)); |
4895 | port->max_wait = 0; |
4896 | tcpm_set_state(port, state: SRC_TRY_WAIT, delay_ms: 0); |
4897 | break; |
4898 | case SRC_TRY_WAIT: |
4899 | if (port->max_wait == 0) { |
4900 | port->max_wait = jiffies + |
4901 | msecs_to_jiffies(PD_T_DRP_TRY); |
4902 | msecs = PD_T_DRP_TRY; |
4903 | } else { |
4904 | if (time_is_after_jiffies(port->max_wait)) |
4905 | msecs = jiffies_to_msecs(j: port->max_wait - |
4906 | jiffies); |
4907 | else |
4908 | msecs = 0; |
4909 | } |
4910 | tcpm_set_state(port, state: SNK_TRYWAIT, delay_ms: msecs); |
4911 | break; |
4912 | case SRC_TRY_DEBOUNCE: |
4913 | tcpm_set_state(port, state: SRC_ATTACHED, PD_T_PD_DEBOUNCE); |
4914 | break; |
4915 | case SNK_TRYWAIT: |
4916 | tcpm_set_cc(port, cc: TYPEC_CC_RD); |
4917 | tcpm_set_state(port, state: SNK_TRYWAIT_VBUS, PD_T_CC_DEBOUNCE); |
4918 | break; |
4919 | case SNK_TRYWAIT_VBUS: |
4920 | /* |
4921 | * TCPM stays in this state indefinitely until VBUS |
4922 | * is detected as long as Rp is not detected for |
4923 | * more than a time period of tPDDebounce. |
4924 | */ |
4925 | if (port->vbus_present && tcpm_port_is_sink(port)) { |
4926 | tcpm_set_state(port, state: SNK_ATTACHED, delay_ms: 0); |
4927 | break; |
4928 | } |
4929 | if (!tcpm_port_is_sink(port)) |
4930 | tcpm_set_state(port, state: SNK_TRYWAIT_DEBOUNCE, delay_ms: 0); |
4931 | break; |
4932 | case SNK_TRYWAIT_DEBOUNCE: |
4933 | tcpm_set_state(port, state: SNK_UNATTACHED, PD_T_PD_DEBOUNCE); |
4934 | break; |
4935 | case SNK_ATTACHED: |
4936 | ret = tcpm_snk_attach(port); |
4937 | if (ret < 0) |
4938 | tcpm_set_state(port, state: SNK_UNATTACHED, delay_ms: 0); |
4939 | else |
4940 | tcpm_set_state(port, state: SNK_STARTUP, delay_ms: 0); |
4941 | break; |
4942 | case SNK_STARTUP: |
4943 | opmode = tcpm_get_pwr_opmode(cc: port->polarity ? |
4944 | port->cc2 : port->cc1); |
4945 | typec_set_pwr_opmode(port: port->typec_port, mode: opmode); |
4946 | port->pwr_opmode = TYPEC_PWR_MODE_USB; |
4947 | port->negotiated_rev = PD_MAX_REV; |
4948 | port->negotiated_rev_prime = PD_MAX_REV; |
4949 | port->message_id = 0; |
4950 | port->message_id_prime = 0; |
4951 | port->rx_msgid = -1; |
4952 | port->rx_msgid_prime = -1; |
4953 | port->explicit_contract = false; |
4954 | |
4955 | if (port->ams == POWER_ROLE_SWAP || |
4956 | port->ams == FAST_ROLE_SWAP) |
4957 | /* SRC -> SNK POWER/FAST_ROLE_SWAP finished */ |
4958 | tcpm_ams_finish(port); |
4959 | |
4960 | tcpm_set_state(port, state: SNK_DISCOVERY, delay_ms: 0); |
4961 | break; |
4962 | case SNK_DISCOVERY: |
4963 | if (port->vbus_present) { |
4964 | u32 current_lim = tcpm_get_current_limit(port); |
4965 | |
4966 | if (port->slow_charger_loop && (current_lim > PD_P_SNK_STDBY_MW / 5)) |
4967 | current_lim = PD_P_SNK_STDBY_MW / 5; |
4968 | tcpm_set_current_limit(port, max_ma: current_lim, mv: 5000); |
4969 | /* Not sink vbus if operational current is 0mA */ |
4970 | tcpm_set_charge(port, charge: !port->pd_supported || |
4971 | pdo_max_current(pdo: port->snk_pdo[0])); |
4972 | |
4973 | if (!port->pd_supported) |
4974 | tcpm_set_state(port, state: SNK_READY, delay_ms: 0); |
4975 | else |
4976 | tcpm_set_state(port, state: SNK_WAIT_CAPABILITIES, delay_ms: 0); |
4977 | break; |
4978 | } |
4979 | /* |
4980 | * For DRP, timeouts differ. Also, handling is supposed to be |
4981 | * different and much more complex (dead battery detection; |
4982 | * see USB power delivery specification, section 8.3.3.6.1.5.1). |
4983 | */ |
4984 | tcpm_set_state(port, state: hard_reset_state(port), |
4985 | delay_ms: port->port_type == TYPEC_PORT_DRP ? |
4986 | PD_T_DB_DETECT : PD_T_NO_RESPONSE); |
4987 | break; |
4988 | case SNK_DISCOVERY_DEBOUNCE: |
4989 | tcpm_set_state(port, state: SNK_DISCOVERY_DEBOUNCE_DONE, |
4990 | PD_T_CC_DEBOUNCE); |
4991 | break; |
4992 | case SNK_DISCOVERY_DEBOUNCE_DONE: |
4993 | if (!tcpm_port_is_disconnected(port) && |
4994 | tcpm_port_is_sink(port) && |
4995 | ktime_after(cmp1: port->delayed_runtime, cmp2: ktime_get())) { |
4996 | tcpm_set_state(port, state: SNK_DISCOVERY, |
4997 | delay_ms: ktime_to_ms(ktime_sub(port->delayed_runtime, ktime_get()))); |
4998 | break; |
4999 | } |
5000 | tcpm_set_state(port, state: unattached_state(port), delay_ms: 0); |
5001 | break; |
5002 | case SNK_WAIT_CAPABILITIES: |
5003 | ret = port->tcpc->set_pd_rx(port->tcpc, true); |
5004 | if (ret < 0) { |
5005 | tcpm_set_state(port, state: SNK_READY, delay_ms: 0); |
5006 | break; |
5007 | } |
5008 | /* |
5009 | * If VBUS has never been low, and we time out waiting |
5010 | * for source cap, try a soft reset first, in case we |
5011 | * were already in a stable contract before this boot. |
5012 | * Do this only once. |
5013 | */ |
5014 | if (port->vbus_never_low) { |
5015 | port->vbus_never_low = false; |
5016 | tcpm_set_state(port, state: SNK_SOFT_RESET, |
5017 | PD_T_SINK_WAIT_CAP); |
5018 | } else { |
5019 | tcpm_set_state(port, state: hard_reset_state(port), |
5020 | PD_T_SINK_WAIT_CAP); |
5021 | } |
5022 | break; |
5023 | case SNK_NEGOTIATE_CAPABILITIES: |
5024 | port->pd_capable = true; |
5025 | tcpm_set_partner_usb_comm_capable(port, |
5026 | capable: !!(port->source_caps[0] & PDO_FIXED_USB_COMM)); |
5027 | port->hard_reset_count = 0; |
5028 | ret = tcpm_pd_send_request(port); |
5029 | if (ret < 0) { |
5030 | /* Restore back to the original state */ |
5031 | tcpm_set_auto_vbus_discharge_threshold(port, mode: TYPEC_PWR_MODE_PD, |
5032 | pps_active: port->pps_data.active, |
5033 | requested_vbus_voltage: port->supply_voltage); |
5034 | /* Let the Source send capabilities again. */ |
5035 | tcpm_set_state(port, state: SNK_WAIT_CAPABILITIES, delay_ms: 0); |
5036 | } else { |
5037 | tcpm_set_state_cond(port, state: hard_reset_state(port), |
5038 | PD_T_SENDER_RESPONSE); |
5039 | } |
5040 | break; |
5041 | case SNK_NEGOTIATE_PPS_CAPABILITIES: |
5042 | ret = tcpm_pd_send_pps_request(port); |
5043 | if (ret < 0) { |
5044 | /* Restore back to the original state */ |
5045 | tcpm_set_auto_vbus_discharge_threshold(port, mode: TYPEC_PWR_MODE_PD, |
5046 | pps_active: port->pps_data.active, |
5047 | requested_vbus_voltage: port->supply_voltage); |
5048 | port->pps_status = ret; |
5049 | /* |
5050 | * If this was called due to updates to sink |
5051 | * capabilities, and pps is no longer valid, we should |
5052 | * safely fall back to a standard PDO. |
5053 | */ |
5054 | if (port->update_sink_caps) |
5055 | tcpm_set_state(port, state: SNK_NEGOTIATE_CAPABILITIES, delay_ms: 0); |
5056 | else |
5057 | tcpm_set_state(port, state: SNK_READY, delay_ms: 0); |
5058 | } else { |
5059 | tcpm_set_state_cond(port, state: hard_reset_state(port), |
5060 | PD_T_SENDER_RESPONSE); |
5061 | } |
5062 | break; |
5063 | case SNK_TRANSITION_SINK: |
5064 | /* From the USB PD spec: |
5065 | * "The Sink Shall transition to Sink Standby before a positive or |
5066 | * negative voltage transition of VBUS. During Sink Standby |
5067 | * the Sink Shall reduce its power draw to pSnkStdby." |
5068 | * |
5069 | * This is not applicable to PPS though as the port can continue |
5070 | * to draw negotiated power without switching to standby. |
5071 | */ |
5072 | if (port->supply_voltage != port->req_supply_voltage && !port->pps_data.active && |
5073 | port->current_limit * port->supply_voltage / 1000 > PD_P_SNK_STDBY_MW) { |
5074 | u32 stdby_ma = PD_P_SNK_STDBY_MW * 1000 / port->supply_voltage; |
5075 | |
5076 | tcpm_log(port, fmt: "Setting standby current %u mV @ %u mA" , |
5077 | port->supply_voltage, stdby_ma); |
5078 | tcpm_set_current_limit(port, max_ma: stdby_ma, mv: port->supply_voltage); |
5079 | } |
5080 | fallthrough; |
5081 | case SNK_TRANSITION_SINK_VBUS: |
5082 | tcpm_set_state(port, state: hard_reset_state(port), |
5083 | PD_T_PS_TRANSITION); |
5084 | break; |
5085 | case SNK_READY: |
5086 | port->try_snk_count = 0; |
5087 | port->update_sink_caps = false; |
5088 | if (port->explicit_contract) { |
5089 | typec_set_pwr_opmode(port: port->typec_port, |
5090 | mode: TYPEC_PWR_MODE_PD); |
5091 | port->pwr_opmode = TYPEC_PWR_MODE_PD; |
5092 | } |
5093 | |
5094 | if (!port->pd_capable && port->slow_charger_loop) |
5095 | tcpm_set_current_limit(port, max_ma: tcpm_get_current_limit(port), mv: 5000); |
5096 | tcpm_swap_complete(port, result: 0); |
5097 | tcpm_typec_connect(port); |
5098 | if (port->pd_capable && port->source_caps[0] & PDO_FIXED_DUAL_ROLE) |
5099 | mod_enable_frs_delayed_work(port, delay_ms: 0); |
5100 | tcpm_pps_complete(port, result: port->pps_status); |
5101 | |
5102 | if (port->ams != NONE_AMS) |
5103 | tcpm_ams_finish(port); |
5104 | if (port->next_ams != NONE_AMS) { |
5105 | port->ams = port->next_ams; |
5106 | port->next_ams = NONE_AMS; |
5107 | } |
5108 | |
5109 | /* |
5110 | * If previous AMS is interrupted, switch to the upcoming |
5111 | * state. |
5112 | */ |
5113 | if (port->upcoming_state != INVALID_STATE) { |
5114 | upcoming_state = port->upcoming_state; |
5115 | port->upcoming_state = INVALID_STATE; |
5116 | tcpm_set_state(port, state: upcoming_state, delay_ms: 0); |
5117 | break; |
5118 | } |
5119 | |
5120 | /* |
5121 | * 6.4.4.3.1 Discover Identity |
5122 | * "The Discover Identity Command Shall only be sent to SOP when there is an |
5123 | * Explicit Contract." |
5124 | * |
5125 | * Discover Identity on SOP' should be discovered prior to the |
5126 | * ready state, but if done after a Vconn Swap following Discover |
5127 | * Identity on SOP then the discovery process can be run here |
5128 | * as well. |
5129 | */ |
5130 | if (port->explicit_contract) { |
5131 | if (port->send_discover_prime) { |
5132 | port->tx_sop_type = TCPC_TX_SOP_PRIME; |
5133 | } else { |
5134 | port->tx_sop_type = TCPC_TX_SOP; |
5135 | tcpm_set_initial_svdm_version(port); |
5136 | } |
5137 | mod_send_discover_delayed_work(port, delay_ms: 0); |
5138 | } else { |
5139 | port->send_discover = false; |
5140 | port->send_discover_prime = false; |
5141 | } |
5142 | |
5143 | power_supply_changed(psy: port->psy); |
5144 | break; |
5145 | |
5146 | /* Accessory states */ |
5147 | case ACC_UNATTACHED: |
5148 | tcpm_acc_detach(port); |
5149 | tcpm_set_state(port, state: SRC_UNATTACHED, delay_ms: 0); |
5150 | break; |
5151 | case DEBUG_ACC_ATTACHED: |
5152 | case AUDIO_ACC_ATTACHED: |
5153 | ret = tcpm_acc_attach(port); |
5154 | if (ret < 0) |
5155 | tcpm_set_state(port, state: ACC_UNATTACHED, delay_ms: 0); |
5156 | break; |
5157 | case AUDIO_ACC_DEBOUNCE: |
5158 | tcpm_set_state(port, state: ACC_UNATTACHED, PD_T_CC_DEBOUNCE); |
5159 | break; |
5160 | |
5161 | /* Hard_Reset states */ |
5162 | case HARD_RESET_SEND: |
5163 | if (port->ams != NONE_AMS) |
5164 | tcpm_ams_finish(port); |
5165 | /* |
5166 | * State machine will be directed to HARD_RESET_START, |
5167 | * thus set upcoming_state to INVALID_STATE. |
5168 | */ |
5169 | port->upcoming_state = INVALID_STATE; |
5170 | tcpm_ams_start(port, ams: HARD_RESET); |
5171 | break; |
5172 | case HARD_RESET_START: |
5173 | port->sink_cap_done = false; |
5174 | if (port->tcpc->enable_frs) |
5175 | port->tcpc->enable_frs(port->tcpc, false); |
5176 | port->hard_reset_count++; |
5177 | port->tcpc->set_pd_rx(port->tcpc, false); |
5178 | tcpm_unregister_altmodes(port); |
5179 | port->nr_sink_caps = 0; |
5180 | port->send_discover = true; |
5181 | port->send_discover_prime = false; |
5182 | if (port->pwr_role == TYPEC_SOURCE) |
5183 | tcpm_set_state(port, state: SRC_HARD_RESET_VBUS_OFF, |
5184 | PD_T_PS_HARD_RESET); |
5185 | else |
5186 | tcpm_set_state(port, state: SNK_HARD_RESET_SINK_OFF, delay_ms: 0); |
5187 | break; |
5188 | case SRC_HARD_RESET_VBUS_OFF: |
5189 | /* |
5190 | * 7.1.5 Response to Hard Resets |
5191 | * Hard Reset Signaling indicates a communication failure has occurred and the |
5192 | * Source Shall stop driving VCONN, Shall remove Rp from the VCONN pin and Shall |
5193 | * drive VBUS to vSafe0V as shown in Figure 7-9. |
5194 | */ |
5195 | tcpm_set_vconn(port, enable: false); |
5196 | tcpm_set_vbus(port, enable: false); |
5197 | tcpm_set_roles(port, attached: port->self_powered, role: TYPEC_SOURCE, |
5198 | tcpm_data_role_for_source(port)); |
5199 | /* |
5200 | * If tcpc fails to notify vbus off, TCPM will wait for PD_T_SAFE_0V + |
5201 | * PD_T_SRC_RECOVER before turning vbus back on. |
5202 | * From Table 7-12 Sequence Description for a Source Initiated Hard Reset: |
5203 | * 4. Policy Engine waits tPSHardReset after sending Hard Reset Signaling and then |
5204 | * tells the Device Policy Manager to instruct the power supply to perform a |
5205 | * Hard Reset. The transition to vSafe0V Shall occur within tSafe0V (t2). |
5206 | * 5. After tSrcRecover the Source applies power to VBUS in an attempt to |
5207 | * re-establish communication with the Sink and resume USB Default Operation. |
5208 | * The transition to vSafe5V Shall occur within tSrcTurnOn(t4). |
5209 | */ |
5210 | tcpm_set_state(port, state: SRC_HARD_RESET_VBUS_ON, PD_T_SAFE_0V + PD_T_SRC_RECOVER); |
5211 | break; |
5212 | case SRC_HARD_RESET_VBUS_ON: |
5213 | tcpm_set_vconn(port, enable: true); |
5214 | tcpm_set_vbus(port, enable: true); |
5215 | if (port->ams == HARD_RESET) |
5216 | tcpm_ams_finish(port); |
5217 | if (port->pd_supported) |
5218 | port->tcpc->set_pd_rx(port->tcpc, true); |
5219 | tcpm_set_attached_state(port, attached: true); |
5220 | tcpm_set_state(port, state: SRC_UNATTACHED, PD_T_PS_SOURCE_ON); |
5221 | break; |
5222 | case SNK_HARD_RESET_SINK_OFF: |
5223 | /* Do not discharge/disconnect during hard reseet */ |
5224 | tcpm_set_auto_vbus_discharge_threshold(port, mode: TYPEC_PWR_MODE_USB, pps_active: false, requested_vbus_voltage: 0); |
5225 | memset(&port->pps_data, 0, sizeof(port->pps_data)); |
5226 | tcpm_set_vconn(port, enable: false); |
5227 | if (port->pd_capable) |
5228 | tcpm_set_charge(port, charge: false); |
5229 | tcpm_set_roles(port, attached: port->self_powered, role: TYPEC_SINK, |
5230 | tcpm_data_role_for_sink(port)); |
5231 | /* |
5232 | * VBUS may or may not toggle, depending on the adapter. |
5233 | * If it doesn't toggle, transition to SNK_HARD_RESET_SINK_ON |
5234 | * directly after timeout. |
5235 | */ |
5236 | tcpm_set_state(port, state: SNK_HARD_RESET_SINK_ON, PD_T_SAFE_0V); |
5237 | break; |
5238 | case SNK_HARD_RESET_WAIT_VBUS: |
5239 | if (port->ams == HARD_RESET) |
5240 | tcpm_ams_finish(port); |
5241 | /* Assume we're disconnected if VBUS doesn't come back. */ |
5242 | tcpm_set_state(port, state: SNK_UNATTACHED, |
5243 | PD_T_SRC_RECOVER_MAX + PD_T_SRC_TURN_ON); |
5244 | break; |
5245 | case SNK_HARD_RESET_SINK_ON: |
5246 | /* Note: There is no guarantee that VBUS is on in this state */ |
5247 | /* |
5248 | * XXX: |
5249 | * The specification suggests that dual mode ports in sink |
5250 | * mode should transition to state PE_SRC_Transition_to_default. |
5251 | * See USB power delivery specification chapter 8.3.3.6.1.3. |
5252 | * This would mean to |
5253 | * - turn off VCONN, reset power supply |
5254 | * - request hardware reset |
5255 | * - turn on VCONN |
5256 | * - Transition to state PE_Src_Startup |
5257 | * SNK only ports shall transition to state Snk_Startup |
5258 | * (see chapter 8.3.3.3.8). |
5259 | * Similar, dual-mode ports in source mode should transition |
5260 | * to PE_SNK_Transition_to_default. |
5261 | */ |
5262 | if (port->pd_capable) { |
5263 | tcpm_set_current_limit(port, |
5264 | max_ma: tcpm_get_current_limit(port), |
5265 | mv: 5000); |
5266 | /* Not sink vbus if operational current is 0mA */ |
5267 | tcpm_set_charge(port, charge: !!pdo_max_current(pdo: port->snk_pdo[0])); |
5268 | } |
5269 | if (port->ams == HARD_RESET) |
5270 | tcpm_ams_finish(port); |
5271 | tcpm_set_attached_state(port, attached: true); |
5272 | tcpm_set_auto_vbus_discharge_threshold(port, mode: TYPEC_PWR_MODE_USB, pps_active: false, VSAFE5V); |
5273 | tcpm_set_state(port, state: SNK_STARTUP, delay_ms: 0); |
5274 | break; |
5275 | |
5276 | /* Soft_Reset states */ |
5277 | case SOFT_RESET: |
5278 | port->message_id = 0; |
5279 | port->rx_msgid = -1; |
5280 | /* remove existing capabilities */ |
5281 | usb_power_delivery_unregister_capabilities(cap: port->partner_source_caps); |
5282 | port->partner_source_caps = NULL; |
5283 | tcpm_pd_send_control(port, type: PD_CTRL_ACCEPT, tx_sop_type: TCPC_TX_SOP); |
5284 | tcpm_ams_finish(port); |
5285 | if (port->pwr_role == TYPEC_SOURCE) { |
5286 | port->upcoming_state = SRC_SEND_CAPABILITIES; |
5287 | tcpm_ams_start(port, ams: POWER_NEGOTIATION); |
5288 | } else { |
5289 | tcpm_set_state(port, state: SNK_WAIT_CAPABILITIES, delay_ms: 0); |
5290 | } |
5291 | break; |
5292 | case SRC_SOFT_RESET_WAIT_SNK_TX: |
5293 | case SNK_SOFT_RESET: |
5294 | if (port->ams != NONE_AMS) |
5295 | tcpm_ams_finish(port); |
5296 | port->upcoming_state = SOFT_RESET_SEND; |
5297 | tcpm_ams_start(port, ams: SOFT_RESET_AMS); |
5298 | break; |
5299 | case SOFT_RESET_SEND: |
5300 | /* |
5301 | * Power Delivery 3.0 Section 6.3.13 |
5302 | * |
5303 | * A Soft_Reset Message Shall be targeted at a specific entity |
5304 | * depending on the type of SOP* packet used. |
5305 | */ |
5306 | if (port->tx_sop_type == TCPC_TX_SOP_PRIME) { |
5307 | port->message_id_prime = 0; |
5308 | port->rx_msgid_prime = -1; |
5309 | tcpm_pd_send_control(port, type: PD_CTRL_SOFT_RESET, tx_sop_type: TCPC_TX_SOP_PRIME); |
5310 | tcpm_set_state_cond(port, state: ready_state(port), PD_T_SENDER_RESPONSE); |
5311 | } else { |
5312 | port->message_id = 0; |
5313 | port->rx_msgid = -1; |
5314 | /* remove existing capabilities */ |
5315 | usb_power_delivery_unregister_capabilities(cap: port->partner_source_caps); |
5316 | port->partner_source_caps = NULL; |
5317 | if (tcpm_pd_send_control(port, type: PD_CTRL_SOFT_RESET, tx_sop_type: TCPC_TX_SOP)) |
5318 | tcpm_set_state_cond(port, state: hard_reset_state(port), delay_ms: 0); |
5319 | else |
5320 | tcpm_set_state_cond(port, state: hard_reset_state(port), |
5321 | PD_T_SENDER_RESPONSE); |
5322 | } |
5323 | break; |
5324 | |
5325 | /* DR_Swap states */ |
5326 | case DR_SWAP_SEND: |
5327 | tcpm_pd_send_control(port, type: PD_CTRL_DR_SWAP, tx_sop_type: TCPC_TX_SOP); |
5328 | if (port->data_role == TYPEC_DEVICE || port->negotiated_rev > PD_REV20) { |
5329 | port->send_discover = true; |
5330 | port->send_discover_prime = false; |
5331 | } |
5332 | tcpm_set_state_cond(port, state: DR_SWAP_SEND_TIMEOUT, |
5333 | PD_T_SENDER_RESPONSE); |
5334 | break; |
5335 | case DR_SWAP_ACCEPT: |
5336 | tcpm_pd_send_control(port, type: PD_CTRL_ACCEPT, tx_sop_type: TCPC_TX_SOP); |
5337 | if (port->data_role == TYPEC_DEVICE || port->negotiated_rev > PD_REV20) { |
5338 | port->send_discover = true; |
5339 | port->send_discover_prime = false; |
5340 | } |
5341 | tcpm_set_state_cond(port, state: DR_SWAP_CHANGE_DR, delay_ms: 0); |
5342 | break; |
5343 | case DR_SWAP_SEND_TIMEOUT: |
5344 | tcpm_swap_complete(port, result: -ETIMEDOUT); |
5345 | port->send_discover = false; |
5346 | port->send_discover_prime = false; |
5347 | tcpm_ams_finish(port); |
5348 | tcpm_set_state(port, state: ready_state(port), delay_ms: 0); |
5349 | break; |
5350 | case DR_SWAP_CHANGE_DR: |
5351 | tcpm_unregister_altmodes(port); |
5352 | if (port->data_role == TYPEC_HOST) |
5353 | tcpm_set_roles(port, attached: true, role: port->pwr_role, |
5354 | data: TYPEC_DEVICE); |
5355 | else |
5356 | tcpm_set_roles(port, attached: true, role: port->pwr_role, |
5357 | data: TYPEC_HOST); |
5358 | tcpm_ams_finish(port); |
5359 | tcpm_set_state(port, state: ready_state(port), delay_ms: 0); |
5360 | break; |
5361 | |
5362 | case FR_SWAP_SEND: |
5363 | if (tcpm_pd_send_control(port, type: PD_CTRL_FR_SWAP, tx_sop_type: TCPC_TX_SOP)) { |
5364 | tcpm_set_state(port, state: ERROR_RECOVERY, delay_ms: 0); |
5365 | break; |
5366 | } |
5367 | tcpm_set_state_cond(port, state: FR_SWAP_SEND_TIMEOUT, PD_T_SENDER_RESPONSE); |
5368 | break; |
5369 | case FR_SWAP_SEND_TIMEOUT: |
5370 | tcpm_set_state(port, state: ERROR_RECOVERY, delay_ms: 0); |
5371 | break; |
5372 | case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF: |
5373 | tcpm_set_state(port, state: ERROR_RECOVERY, PD_T_PS_SOURCE_OFF); |
5374 | break; |
5375 | case FR_SWAP_SNK_SRC_NEW_SINK_READY: |
5376 | if (port->vbus_source) |
5377 | tcpm_set_state(port, state: FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED, delay_ms: 0); |
5378 | else |
5379 | tcpm_set_state(port, state: ERROR_RECOVERY, PD_T_RECEIVER_RESPONSE); |
5380 | break; |
5381 | case FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED: |
5382 | tcpm_set_pwr_role(port, role: TYPEC_SOURCE); |
5383 | if (tcpm_pd_send_control(port, type: PD_CTRL_PS_RDY, tx_sop_type: TCPC_TX_SOP)) { |
5384 | tcpm_set_state(port, state: ERROR_RECOVERY, delay_ms: 0); |
5385 | break; |
5386 | } |
5387 | tcpm_set_cc(port, cc: tcpm_rp_cc(port)); |
5388 | tcpm_set_state(port, state: SRC_STARTUP, PD_T_SWAP_SRC_START); |
5389 | break; |
5390 | |
5391 | /* PR_Swap states */ |
5392 | case PR_SWAP_ACCEPT: |
5393 | tcpm_pd_send_control(port, type: PD_CTRL_ACCEPT, tx_sop_type: TCPC_TX_SOP); |
5394 | tcpm_set_state(port, state: PR_SWAP_START, delay_ms: 0); |
5395 | break; |
5396 | case PR_SWAP_SEND: |
5397 | tcpm_pd_send_control(port, type: PD_CTRL_PR_SWAP, tx_sop_type: TCPC_TX_SOP); |
5398 | tcpm_set_state_cond(port, state: PR_SWAP_SEND_TIMEOUT, |
5399 | PD_T_SENDER_RESPONSE); |
5400 | break; |
5401 | case PR_SWAP_SEND_TIMEOUT: |
5402 | tcpm_swap_complete(port, result: -ETIMEDOUT); |
5403 | tcpm_set_state(port, state: ready_state(port), delay_ms: 0); |
5404 | break; |
5405 | case PR_SWAP_START: |
5406 | tcpm_apply_rc(port); |
5407 | if (port->pwr_role == TYPEC_SOURCE) |
5408 | tcpm_set_state(port, state: PR_SWAP_SRC_SNK_TRANSITION_OFF, |
5409 | PD_T_SRC_TRANSITION); |
5410 | else |
5411 | tcpm_set_state(port, state: PR_SWAP_SNK_SRC_SINK_OFF, delay_ms: 0); |
5412 | break; |
5413 | case PR_SWAP_SRC_SNK_TRANSITION_OFF: |
5414 | /* |
5415 | * Prevent vbus discharge circuit from turning on during PR_SWAP |
5416 | * as this is not a disconnect. |
5417 | */ |
5418 | tcpm_set_vbus(port, enable: false); |
5419 | port->explicit_contract = false; |
5420 | /* allow time for Vbus discharge, must be < tSrcSwapStdby */ |
5421 | tcpm_set_state(port, state: PR_SWAP_SRC_SNK_SOURCE_OFF, |
5422 | PD_T_SRCSWAPSTDBY); |
5423 | break; |
5424 | case PR_SWAP_SRC_SNK_SOURCE_OFF: |
5425 | tcpm_set_cc(port, cc: TYPEC_CC_RD); |
5426 | /* allow CC debounce */ |
5427 | tcpm_set_state(port, state: PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED, |
5428 | PD_T_CC_DEBOUNCE); |
5429 | break; |
5430 | case PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED: |
5431 | /* |
5432 | * USB-PD standard, 6.2.1.4, Port Power Role: |
5433 | * "During the Power Role Swap Sequence, for the initial Source |
5434 | * Port, the Port Power Role field shall be set to Sink in the |
5435 | * PS_RDY Message indicating that the initial Source’s power |
5436 | * supply is turned off" |
5437 | */ |
5438 | tcpm_set_pwr_role(port, role: TYPEC_SINK); |
5439 | if (tcpm_pd_send_control(port, type: PD_CTRL_PS_RDY, tx_sop_type: TCPC_TX_SOP)) { |
5440 | tcpm_set_state(port, state: ERROR_RECOVERY, delay_ms: 0); |
5441 | break; |
5442 | } |
5443 | tcpm_set_state(port, state: ERROR_RECOVERY, PD_T_PS_SOURCE_ON_PRS); |
5444 | break; |
5445 | case PR_SWAP_SRC_SNK_SINK_ON: |
5446 | tcpm_enable_auto_vbus_discharge(port, enable: true); |
5447 | /* Set the vbus disconnect threshold for implicit contract */ |
5448 | tcpm_set_auto_vbus_discharge_threshold(port, mode: TYPEC_PWR_MODE_USB, pps_active: false, VSAFE5V); |
5449 | tcpm_set_state(port, state: SNK_STARTUP, delay_ms: 0); |
5450 | break; |
5451 | case PR_SWAP_SNK_SRC_SINK_OFF: |
5452 | /* will be source, remove existing capabilities */ |
5453 | usb_power_delivery_unregister_capabilities(cap: port->partner_source_caps); |
5454 | port->partner_source_caps = NULL; |
5455 | /* |
5456 | * Prevent vbus discharge circuit from turning on during PR_SWAP |
5457 | * as this is not a disconnect. |
5458 | */ |
5459 | tcpm_set_auto_vbus_discharge_threshold(port, mode: TYPEC_PWR_MODE_USB, |
5460 | pps_active: port->pps_data.active, requested_vbus_voltage: 0); |
5461 | tcpm_set_charge(port, charge: false); |
5462 | tcpm_set_state(port, state: hard_reset_state(port), |
5463 | PD_T_PS_SOURCE_OFF); |
5464 | break; |
5465 | case PR_SWAP_SNK_SRC_SOURCE_ON: |
5466 | tcpm_enable_auto_vbus_discharge(port, enable: true); |
5467 | tcpm_set_cc(port, cc: tcpm_rp_cc(port)); |
5468 | tcpm_set_vbus(port, enable: true); |
5469 | /* |
5470 | * allow time VBUS ramp-up, must be < tNewSrc |
5471 | * Also, this window overlaps with CC debounce as well. |
5472 | * So, Wait for the max of two which is PD_T_NEWSRC |
5473 | */ |
5474 | tcpm_set_state(port, state: PR_SWAP_SNK_SRC_SOURCE_ON_VBUS_RAMPED_UP, |
5475 | PD_T_NEWSRC); |
5476 | break; |
5477 | case PR_SWAP_SNK_SRC_SOURCE_ON_VBUS_RAMPED_UP: |
5478 | /* |
5479 | * USB PD standard, 6.2.1.4: |
5480 | * "Subsequent Messages initiated by the Policy Engine, |
5481 | * such as the PS_RDY Message sent to indicate that Vbus |
5482 | * is ready, will have the Port Power Role field set to |
5483 | * Source." |
5484 | */ |
5485 | tcpm_set_pwr_role(port, role: TYPEC_SOURCE); |
5486 | tcpm_pd_send_control(port, type: PD_CTRL_PS_RDY, tx_sop_type: TCPC_TX_SOP); |
5487 | tcpm_set_state(port, state: SRC_STARTUP, PD_T_SWAP_SRC_START); |
5488 | break; |
5489 | |
5490 | case VCONN_SWAP_ACCEPT: |
5491 | tcpm_pd_send_control(port, type: PD_CTRL_ACCEPT, tx_sop_type: TCPC_TX_SOP); |
5492 | tcpm_ams_finish(port); |
5493 | tcpm_set_state(port, state: VCONN_SWAP_START, delay_ms: 0); |
5494 | break; |
5495 | case VCONN_SWAP_SEND: |
5496 | tcpm_pd_send_control(port, type: PD_CTRL_VCONN_SWAP, tx_sop_type: TCPC_TX_SOP); |
5497 | tcpm_set_state(port, state: VCONN_SWAP_SEND_TIMEOUT, |
5498 | PD_T_SENDER_RESPONSE); |
5499 | break; |
5500 | case VCONN_SWAP_SEND_TIMEOUT: |
5501 | tcpm_swap_complete(port, result: -ETIMEDOUT); |
5502 | tcpm_set_state(port, state: ready_state(port), delay_ms: 0); |
5503 | break; |
5504 | case VCONN_SWAP_START: |
5505 | if (port->vconn_role == TYPEC_SOURCE) |
5506 | tcpm_set_state(port, state: VCONN_SWAP_WAIT_FOR_VCONN, delay_ms: 0); |
5507 | else |
5508 | tcpm_set_state(port, state: VCONN_SWAP_TURN_ON_VCONN, delay_ms: 0); |
5509 | break; |
5510 | case VCONN_SWAP_WAIT_FOR_VCONN: |
5511 | tcpm_set_state(port, state: hard_reset_state(port), |
5512 | PD_T_VCONN_SOURCE_ON); |
5513 | break; |
5514 | case VCONN_SWAP_TURN_ON_VCONN: |
5515 | ret = tcpm_set_vconn(port, enable: true); |
5516 | tcpm_pd_send_control(port, type: PD_CTRL_PS_RDY, tx_sop_type: TCPC_TX_SOP); |
5517 | /* |
5518 | * USB PD 3.0 Section 6.4.4.3.1 |
5519 | * |
5520 | * Note that a Cable Plug or VPD will not be ready for PD |
5521 | * Communication until tVCONNStable after VCONN has been applied |
5522 | */ |
5523 | if (!ret) |
5524 | tcpm_set_state(port, state: VCONN_SWAP_SEND_SOFT_RESET, |
5525 | PD_T_VCONN_STABLE); |
5526 | else |
5527 | tcpm_set_state(port, state: ready_state(port), delay_ms: 0); |
5528 | break; |
5529 | case VCONN_SWAP_TURN_OFF_VCONN: |
5530 | tcpm_set_vconn(port, enable: false); |
5531 | tcpm_set_state(port, state: ready_state(port), delay_ms: 0); |
5532 | break; |
5533 | case VCONN_SWAP_SEND_SOFT_RESET: |
5534 | tcpm_swap_complete(port, result: port->swap_status); |
5535 | if (tcpm_can_communicate_sop_prime(port)) { |
5536 | port->tx_sop_type = TCPC_TX_SOP_PRIME; |
5537 | port->upcoming_state = SOFT_RESET_SEND; |
5538 | tcpm_ams_start(port, ams: SOFT_RESET_AMS); |
5539 | } else { |
5540 | tcpm_set_state(port, state: ready_state(port), delay_ms: 0); |
5541 | } |
5542 | break; |
5543 | |
5544 | case DR_SWAP_CANCEL: |
5545 | case PR_SWAP_CANCEL: |
5546 | case VCONN_SWAP_CANCEL: |
5547 | tcpm_swap_complete(port, result: port->swap_status); |
5548 | if (port->pwr_role == TYPEC_SOURCE) |
5549 | tcpm_set_state(port, state: SRC_READY, delay_ms: 0); |
5550 | else |
5551 | tcpm_set_state(port, state: SNK_READY, delay_ms: 0); |
5552 | break; |
5553 | case FR_SWAP_CANCEL: |
5554 | if (port->pwr_role == TYPEC_SOURCE) |
5555 | tcpm_set_state(port, state: SRC_READY, delay_ms: 0); |
5556 | else |
5557 | tcpm_set_state(port, state: SNK_READY, delay_ms: 0); |
5558 | break; |
5559 | |
5560 | case BIST_RX: |
5561 | switch (BDO_MODE_MASK(port->bist_request)) { |
5562 | case BDO_MODE_CARRIER2: |
5563 | tcpm_pd_transmit(port, tx_sop_type: TCPC_TX_BIST_MODE_2, NULL); |
5564 | tcpm_set_state(port, state: unattached_state(port), |
5565 | PD_T_BIST_CONT_MODE); |
5566 | break; |
5567 | case BDO_MODE_TESTDATA: |
5568 | if (port->tcpc->set_bist_data) { |
5569 | tcpm_log(port, fmt: "Enable BIST MODE TESTDATA" ); |
5570 | port->tcpc->set_bist_data(port->tcpc, true); |
5571 | } |
5572 | break; |
5573 | default: |
5574 | break; |
5575 | } |
5576 | break; |
5577 | case GET_STATUS_SEND: |
5578 | tcpm_pd_send_control(port, type: PD_CTRL_GET_STATUS, tx_sop_type: TCPC_TX_SOP); |
5579 | tcpm_set_state(port, state: GET_STATUS_SEND_TIMEOUT, |
5580 | PD_T_SENDER_RESPONSE); |
5581 | break; |
5582 | case GET_STATUS_SEND_TIMEOUT: |
5583 | tcpm_set_state(port, state: ready_state(port), delay_ms: 0); |
5584 | break; |
5585 | case GET_PPS_STATUS_SEND: |
5586 | tcpm_pd_send_control(port, type: PD_CTRL_GET_PPS_STATUS, tx_sop_type: TCPC_TX_SOP); |
5587 | tcpm_set_state(port, state: GET_PPS_STATUS_SEND_TIMEOUT, |
5588 | PD_T_SENDER_RESPONSE); |
5589 | break; |
5590 | case GET_PPS_STATUS_SEND_TIMEOUT: |
5591 | tcpm_set_state(port, state: ready_state(port), delay_ms: 0); |
5592 | break; |
5593 | case GET_SINK_CAP: |
5594 | tcpm_pd_send_control(port, type: PD_CTRL_GET_SINK_CAP, tx_sop_type: TCPC_TX_SOP); |
5595 | tcpm_set_state(port, state: GET_SINK_CAP_TIMEOUT, PD_T_SENDER_RESPONSE); |
5596 | break; |
5597 | case GET_SINK_CAP_TIMEOUT: |
5598 | port->sink_cap_done = true; |
5599 | tcpm_set_state(port, state: ready_state(port), delay_ms: 0); |
5600 | break; |
5601 | case ERROR_RECOVERY: |
5602 | tcpm_swap_complete(port, result: -EPROTO); |
5603 | tcpm_pps_complete(port, result: -EPROTO); |
5604 | tcpm_set_state(port, state: PORT_RESET, delay_ms: 0); |
5605 | break; |
5606 | case PORT_RESET: |
5607 | tcpm_reset_port(port); |
5608 | if (port->self_powered) |
5609 | tcpm_set_cc(port, cc: TYPEC_CC_OPEN); |
5610 | else |
5611 | tcpm_set_cc(port, cc: tcpm_default_state(port) == SNK_UNATTACHED ? |
5612 | TYPEC_CC_RD : tcpm_rp_cc(port)); |
5613 | tcpm_set_state(port, state: PORT_RESET_WAIT_OFF, |
5614 | PD_T_ERROR_RECOVERY); |
5615 | break; |
5616 | case PORT_RESET_WAIT_OFF: |
5617 | tcpm_set_state(port, |
5618 | state: tcpm_default_state(port), |
5619 | delay_ms: port->vbus_present ? PD_T_PS_SOURCE_OFF : 0); |
5620 | break; |
5621 | |
5622 | /* AMS intermediate state */ |
5623 | case AMS_START: |
5624 | if (port->upcoming_state == INVALID_STATE) { |
5625 | tcpm_set_state(port, state: port->pwr_role == TYPEC_SOURCE ? |
5626 | SRC_READY : SNK_READY, delay_ms: 0); |
5627 | break; |
5628 | } |
5629 | |
5630 | upcoming_state = port->upcoming_state; |
5631 | port->upcoming_state = INVALID_STATE; |
5632 | tcpm_set_state(port, state: upcoming_state, delay_ms: 0); |
5633 | break; |
5634 | |
5635 | /* Chunk state */ |
5636 | case CHUNK_NOT_SUPP: |
5637 | tcpm_pd_send_control(port, type: PD_CTRL_NOT_SUPP, tx_sop_type: TCPC_TX_SOP); |
5638 | tcpm_set_state(port, state: port->pwr_role == TYPEC_SOURCE ? SRC_READY : SNK_READY, delay_ms: 0); |
5639 | break; |
5640 | |
5641 | /* Cable states */ |
5642 | case SRC_VDM_IDENTITY_REQUEST: |
5643 | port->send_discover_prime = true; |
5644 | port->tx_sop_type = TCPC_TX_SOP_PRIME; |
5645 | mod_send_discover_delayed_work(port, delay_ms: 0); |
5646 | port->upcoming_state = SRC_SEND_CAPABILITIES; |
5647 | break; |
5648 | |
5649 | default: |
5650 | WARN(1, "Unexpected port state %d\n" , port->state); |
5651 | break; |
5652 | } |
5653 | } |
5654 | |
5655 | static void tcpm_state_machine_work(struct kthread_work *work) |
5656 | { |
5657 | struct tcpm_port *port = container_of(work, struct tcpm_port, state_machine); |
5658 | enum tcpm_state prev_state; |
5659 | |
5660 | mutex_lock(&port->lock); |
5661 | port->state_machine_running = true; |
5662 | |
5663 | if (port->queued_message && tcpm_send_queued_message(port)) |
5664 | goto done; |
5665 | |
5666 | /* If we were queued due to a delayed state change, update it now */ |
5667 | if (port->delayed_state) { |
5668 | tcpm_log(port, fmt: "state change %s -> %s [delayed %ld ms]" , |
5669 | tcpm_states[port->state], |
5670 | tcpm_states[port->delayed_state], port->delay_ms); |
5671 | port->prev_state = port->state; |
5672 | port->state = port->delayed_state; |
5673 | port->delayed_state = INVALID_STATE; |
5674 | } |
5675 | |
5676 | /* |
5677 | * Continue running as long as we have (non-delayed) state changes |
5678 | * to make. |
5679 | */ |
5680 | do { |
5681 | prev_state = port->state; |
5682 | run_state_machine(port); |
5683 | if (port->queued_message) |
5684 | tcpm_send_queued_message(port); |
5685 | } while (port->state != prev_state && !port->delayed_state); |
5686 | |
5687 | done: |
5688 | port->state_machine_running = false; |
5689 | mutex_unlock(lock: &port->lock); |
5690 | } |
5691 | |
5692 | static void _tcpm_cc_change(struct tcpm_port *port, enum typec_cc_status cc1, |
5693 | enum typec_cc_status cc2) |
5694 | { |
5695 | enum typec_cc_status old_cc1, old_cc2; |
5696 | enum tcpm_state new_state; |
5697 | |
5698 | old_cc1 = port->cc1; |
5699 | old_cc2 = port->cc2; |
5700 | port->cc1 = cc1; |
5701 | port->cc2 = cc2; |
5702 | |
5703 | tcpm_log_force(port, |
5704 | fmt: "CC1: %u -> %u, CC2: %u -> %u [state %s, polarity %d, %s]" , |
5705 | old_cc1, cc1, old_cc2, cc2, tcpm_states[port->state], |
5706 | port->polarity, |
5707 | tcpm_port_is_disconnected(port) ? "disconnected" |
5708 | : "connected" ); |
5709 | |
5710 | switch (port->state) { |
5711 | case TOGGLING: |
5712 | if (tcpm_port_is_debug(port) || tcpm_port_is_audio(port) || |
5713 | tcpm_port_is_source(port)) |
5714 | tcpm_set_state(port, state: SRC_ATTACH_WAIT, delay_ms: 0); |
5715 | else if (tcpm_port_is_sink(port)) |
5716 | tcpm_set_state(port, state: SNK_ATTACH_WAIT, delay_ms: 0); |
5717 | break; |
5718 | case CHECK_CONTAMINANT: |
5719 | /* Wait for Toggling to be resumed */ |
5720 | break; |
5721 | case SRC_UNATTACHED: |
5722 | case ACC_UNATTACHED: |
5723 | if (tcpm_port_is_debug(port) || tcpm_port_is_audio(port) || |
5724 | tcpm_port_is_source(port)) |
5725 | tcpm_set_state(port, state: SRC_ATTACH_WAIT, delay_ms: 0); |
5726 | break; |
5727 | case SRC_ATTACH_WAIT: |
5728 | if (tcpm_port_is_disconnected(port) || |
5729 | tcpm_port_is_audio_detached(port)) |
5730 | tcpm_set_state(port, state: SRC_UNATTACHED, delay_ms: 0); |
5731 | else if (cc1 != old_cc1 || cc2 != old_cc2) |
5732 | tcpm_set_state(port, state: SRC_ATTACH_WAIT, delay_ms: 0); |
5733 | break; |
5734 | case SRC_ATTACHED: |
5735 | case SRC_STARTUP: |
5736 | case SRC_SEND_CAPABILITIES: |
5737 | case SRC_READY: |
5738 | if (tcpm_port_is_disconnected(port) || |
5739 | !tcpm_port_is_source(port)) { |
5740 | if (port->port_type == TYPEC_PORT_SRC) |
5741 | tcpm_set_state(port, state: SRC_UNATTACHED, tcpm_wait_for_discharge(port)); |
5742 | else |
5743 | tcpm_set_state(port, state: SNK_UNATTACHED, tcpm_wait_for_discharge(port)); |
5744 | } |
5745 | break; |
5746 | case SNK_UNATTACHED: |
5747 | if (tcpm_port_is_sink(port)) |
5748 | tcpm_set_state(port, state: SNK_ATTACH_WAIT, delay_ms: 0); |
5749 | break; |
5750 | case SNK_ATTACH_WAIT: |
5751 | if ((port->cc1 == TYPEC_CC_OPEN && |
5752 | port->cc2 != TYPEC_CC_OPEN) || |
5753 | (port->cc1 != TYPEC_CC_OPEN && |
5754 | port->cc2 == TYPEC_CC_OPEN)) |
5755 | new_state = SNK_DEBOUNCED; |
5756 | else if (tcpm_port_is_disconnected(port)) |
5757 | new_state = SNK_UNATTACHED; |
5758 | else |
5759 | break; |
5760 | if (new_state != port->delayed_state) |
5761 | tcpm_set_state(port, state: SNK_ATTACH_WAIT, delay_ms: 0); |
5762 | break; |
5763 | case SNK_DEBOUNCED: |
5764 | if (tcpm_port_is_disconnected(port)) |
5765 | new_state = SNK_UNATTACHED; |
5766 | else if (port->vbus_present) |
5767 | new_state = tcpm_try_src(port) ? SRC_TRY : SNK_ATTACHED; |
5768 | else |
5769 | new_state = SNK_UNATTACHED; |
5770 | if (new_state != port->delayed_state) |
5771 | tcpm_set_state(port, state: SNK_DEBOUNCED, delay_ms: 0); |
5772 | break; |
5773 | case SNK_READY: |
5774 | /* |
5775 | * EXIT condition is based primarily on vbus disconnect and CC is secondary. |
5776 | * "A port that has entered into USB PD communications with the Source and |
5777 | * has seen the CC voltage exceed vRd-USB may monitor the CC pin to detect |
5778 | * cable disconnect in addition to monitoring VBUS. |
5779 | * |
5780 | * A port that is monitoring the CC voltage for disconnect (but is not in |
5781 | * the process of a USB PD PR_Swap or USB PD FR_Swap) shall transition to |
5782 | * Unattached.SNK within tSinkDisconnect after the CC voltage remains below |
5783 | * vRd-USB for tPDDebounce." |
5784 | * |
5785 | * When set_auto_vbus_discharge_threshold is enabled, CC pins go |
5786 | * away before vbus decays to disconnect threshold. Allow |
5787 | * disconnect to be driven by vbus disconnect when auto vbus |
5788 | * discharge is enabled. |
5789 | */ |
5790 | if (!port->auto_vbus_discharge_enabled && tcpm_port_is_disconnected(port)) |
5791 | tcpm_set_state(port, state: unattached_state(port), delay_ms: 0); |
5792 | else if (!port->pd_capable && |
5793 | (cc1 != old_cc1 || cc2 != old_cc2)) |
5794 | tcpm_set_current_limit(port, |
5795 | max_ma: tcpm_get_current_limit(port), |
5796 | mv: 5000); |
5797 | break; |
5798 | |
5799 | case AUDIO_ACC_ATTACHED: |
5800 | if (cc1 == TYPEC_CC_OPEN || cc2 == TYPEC_CC_OPEN) |
5801 | tcpm_set_state(port, state: AUDIO_ACC_DEBOUNCE, delay_ms: 0); |
5802 | break; |
5803 | case AUDIO_ACC_DEBOUNCE: |
5804 | if (tcpm_port_is_audio(port)) |
5805 | tcpm_set_state(port, state: AUDIO_ACC_ATTACHED, delay_ms: 0); |
5806 | break; |
5807 | |
5808 | case DEBUG_ACC_ATTACHED: |
5809 | if (cc1 == TYPEC_CC_OPEN || cc2 == TYPEC_CC_OPEN) |
5810 | tcpm_set_state(port, state: ACC_UNATTACHED, delay_ms: 0); |
5811 | break; |
5812 | |
5813 | case SNK_TRY: |
5814 | /* Do nothing, waiting for timeout */ |
5815 | break; |
5816 | |
5817 | case SNK_DISCOVERY: |
5818 | /* CC line is unstable, wait for debounce */ |
5819 | if (tcpm_port_is_disconnected(port)) |
5820 | tcpm_set_state(port, state: SNK_DISCOVERY_DEBOUNCE, delay_ms: 0); |
5821 | break; |
5822 | case SNK_DISCOVERY_DEBOUNCE: |
5823 | break; |
5824 | |
5825 | case SRC_TRYWAIT: |
5826 | /* Hand over to state machine if needed */ |
5827 | if (!port->vbus_present && tcpm_port_is_source(port)) |
5828 | tcpm_set_state(port, state: SRC_TRYWAIT_DEBOUNCE, delay_ms: 0); |
5829 | break; |
5830 | case SRC_TRYWAIT_DEBOUNCE: |
5831 | if (port->vbus_present || !tcpm_port_is_source(port)) |
5832 | tcpm_set_state(port, state: SRC_TRYWAIT, delay_ms: 0); |
5833 | break; |
5834 | case SNK_TRY_WAIT_DEBOUNCE: |
5835 | if (!tcpm_port_is_sink(port)) { |
5836 | port->max_wait = 0; |
5837 | tcpm_set_state(port, state: SRC_TRYWAIT, delay_ms: 0); |
5838 | } |
5839 | break; |
5840 | case SRC_TRY_WAIT: |
5841 | if (tcpm_port_is_source(port)) |
5842 | tcpm_set_state(port, state: SRC_TRY_DEBOUNCE, delay_ms: 0); |
5843 | break; |
5844 | case SRC_TRY_DEBOUNCE: |
5845 | tcpm_set_state(port, state: SRC_TRY_WAIT, delay_ms: 0); |
5846 | break; |
5847 | case SNK_TRYWAIT_DEBOUNCE: |
5848 | if (tcpm_port_is_sink(port)) |
5849 | tcpm_set_state(port, state: SNK_TRYWAIT_VBUS, delay_ms: 0); |
5850 | break; |
5851 | case SNK_TRYWAIT_VBUS: |
5852 | if (!tcpm_port_is_sink(port)) |
5853 | tcpm_set_state(port, state: SNK_TRYWAIT_DEBOUNCE, delay_ms: 0); |
5854 | break; |
5855 | case SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS: |
5856 | if (!tcpm_port_is_sink(port)) |
5857 | tcpm_set_state(port, state: SRC_TRYWAIT, PD_T_TRY_CC_DEBOUNCE); |
5858 | else |
5859 | tcpm_set_state(port, state: SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS, delay_ms: 0); |
5860 | break; |
5861 | case SNK_TRYWAIT: |
5862 | /* Do nothing, waiting for tCCDebounce */ |
5863 | break; |
5864 | case PR_SWAP_SNK_SRC_SINK_OFF: |
5865 | case PR_SWAP_SRC_SNK_TRANSITION_OFF: |
5866 | case PR_SWAP_SRC_SNK_SOURCE_OFF: |
5867 | case PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED: |
5868 | case PR_SWAP_SNK_SRC_SOURCE_ON: |
5869 | /* |
5870 | * CC state change is expected in PR_SWAP |
5871 | * Ignore it. |
5872 | */ |
5873 | break; |
5874 | case FR_SWAP_SEND: |
5875 | case FR_SWAP_SEND_TIMEOUT: |
5876 | case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF: |
5877 | case FR_SWAP_SNK_SRC_NEW_SINK_READY: |
5878 | case FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED: |
5879 | /* Do nothing, CC change expected */ |
5880 | break; |
5881 | |
5882 | case PORT_RESET: |
5883 | case PORT_RESET_WAIT_OFF: |
5884 | /* |
5885 | * State set back to default mode once the timer completes. |
5886 | * Ignore CC changes here. |
5887 | */ |
5888 | break; |
5889 | default: |
5890 | /* |
5891 | * While acting as sink and auto vbus discharge is enabled, Allow disconnect |
5892 | * to be driven by vbus disconnect. |
5893 | */ |
5894 | if (tcpm_port_is_disconnected(port) && !(port->pwr_role == TYPEC_SINK && |
5895 | port->auto_vbus_discharge_enabled)) |
5896 | tcpm_set_state(port, state: unattached_state(port), delay_ms: 0); |
5897 | break; |
5898 | } |
5899 | } |
5900 | |
5901 | static void _tcpm_pd_vbus_on(struct tcpm_port *port) |
5902 | { |
5903 | tcpm_log_force(port, fmt: "VBUS on" ); |
5904 | port->vbus_present = true; |
5905 | /* |
5906 | * When vbus_present is true i.e. Voltage at VBUS is greater than VSAFE5V implicitly |
5907 | * states that vbus is not at VSAFE0V, hence clear the vbus_vsafe0v flag here. |
5908 | */ |
5909 | port->vbus_vsafe0v = false; |
5910 | |
5911 | switch (port->state) { |
5912 | case SNK_TRANSITION_SINK_VBUS: |
5913 | port->explicit_contract = true; |
5914 | tcpm_set_state(port, state: SNK_READY, delay_ms: 0); |
5915 | break; |
5916 | case SNK_DISCOVERY: |
5917 | tcpm_set_state(port, state: SNK_DISCOVERY, delay_ms: 0); |
5918 | break; |
5919 | |
5920 | case SNK_DEBOUNCED: |
5921 | tcpm_set_state(port, tcpm_try_src(port) ? SRC_TRY |
5922 | : SNK_ATTACHED, |
5923 | delay_ms: 0); |
5924 | break; |
5925 | case SNK_HARD_RESET_WAIT_VBUS: |
5926 | tcpm_set_state(port, state: SNK_HARD_RESET_SINK_ON, delay_ms: 0); |
5927 | break; |
5928 | case SRC_ATTACHED: |
5929 | tcpm_set_state(port, state: SRC_STARTUP, delay_ms: 0); |
5930 | break; |
5931 | case SRC_HARD_RESET_VBUS_ON: |
5932 | tcpm_set_state(port, state: SRC_STARTUP, delay_ms: 0); |
5933 | break; |
5934 | |
5935 | case SNK_TRY: |
5936 | /* Do nothing, waiting for timeout */ |
5937 | break; |
5938 | case SRC_TRYWAIT: |
5939 | /* Do nothing, Waiting for Rd to be detected */ |
5940 | break; |
5941 | case SRC_TRYWAIT_DEBOUNCE: |
5942 | tcpm_set_state(port, state: SRC_TRYWAIT, delay_ms: 0); |
5943 | break; |
5944 | case SNK_TRY_WAIT_DEBOUNCE: |
5945 | /* Do nothing, waiting for PD_DEBOUNCE to do be done */ |
5946 | break; |
5947 | case SNK_TRYWAIT: |
5948 | /* Do nothing, waiting for tCCDebounce */ |
5949 | break; |
5950 | case SNK_TRYWAIT_VBUS: |
5951 | if (tcpm_port_is_sink(port)) |
5952 | tcpm_set_state(port, state: SNK_ATTACHED, delay_ms: 0); |
5953 | break; |
5954 | case SNK_TRYWAIT_DEBOUNCE: |
5955 | /* Do nothing, waiting for Rp */ |
5956 | break; |
5957 | case SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS: |
5958 | if (port->vbus_present && tcpm_port_is_sink(port)) |
5959 | tcpm_set_state(port, state: SNK_ATTACHED, delay_ms: 0); |
5960 | break; |
5961 | case SRC_TRY_WAIT: |
5962 | case SRC_TRY_DEBOUNCE: |
5963 | /* Do nothing, waiting for sink detection */ |
5964 | break; |
5965 | case FR_SWAP_SEND: |
5966 | case FR_SWAP_SEND_TIMEOUT: |
5967 | case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF: |
5968 | case FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED: |
5969 | if (port->tcpc->frs_sourcing_vbus) |
5970 | port->tcpc->frs_sourcing_vbus(port->tcpc); |
5971 | break; |
5972 | case FR_SWAP_SNK_SRC_NEW_SINK_READY: |
5973 | if (port->tcpc->frs_sourcing_vbus) |
5974 | port->tcpc->frs_sourcing_vbus(port->tcpc); |
5975 | tcpm_set_state(port, state: FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED, delay_ms: 0); |
5976 | break; |
5977 | |
5978 | case PORT_RESET: |
5979 | case PORT_RESET_WAIT_OFF: |
5980 | /* |
5981 | * State set back to default mode once the timer completes. |
5982 | * Ignore vbus changes here. |
5983 | */ |
5984 | break; |
5985 | |
5986 | default: |
5987 | break; |
5988 | } |
5989 | } |
5990 | |
5991 | static void _tcpm_pd_vbus_off(struct tcpm_port *port) |
5992 | { |
5993 | tcpm_log_force(port, fmt: "VBUS off" ); |
5994 | port->vbus_present = false; |
5995 | port->vbus_never_low = false; |
5996 | switch (port->state) { |
5997 | case SNK_HARD_RESET_SINK_OFF: |
5998 | tcpm_set_state(port, state: SNK_HARD_RESET_WAIT_VBUS, delay_ms: 0); |
5999 | break; |
6000 | case HARD_RESET_SEND: |
6001 | break; |
6002 | case SNK_TRY: |
6003 | /* Do nothing, waiting for timeout */ |
6004 | break; |
6005 | case SRC_TRYWAIT: |
6006 | /* Hand over to state machine if needed */ |
6007 | if (tcpm_port_is_source(port)) |
6008 | tcpm_set_state(port, state: SRC_TRYWAIT_DEBOUNCE, delay_ms: 0); |
6009 | break; |
6010 | case SNK_TRY_WAIT_DEBOUNCE: |
6011 | /* Do nothing, waiting for PD_DEBOUNCE to do be done */ |
6012 | break; |
6013 | case SNK_TRYWAIT: |
6014 | case SNK_TRYWAIT_VBUS: |
6015 | case SNK_TRYWAIT_DEBOUNCE: |
6016 | break; |
6017 | case SNK_ATTACH_WAIT: |
6018 | case SNK_DEBOUNCED: |
6019 | /* Do nothing, as TCPM is still waiting for vbus to reaach VSAFE5V to connect */ |
6020 | break; |
6021 | |
6022 | case SNK_NEGOTIATE_CAPABILITIES: |
6023 | break; |
6024 | |
6025 | case PR_SWAP_SRC_SNK_TRANSITION_OFF: |
6026 | tcpm_set_state(port, state: PR_SWAP_SRC_SNK_SOURCE_OFF, delay_ms: 0); |
6027 | break; |
6028 | |
6029 | case PR_SWAP_SNK_SRC_SINK_OFF: |
6030 | /* Do nothing, expected */ |
6031 | break; |
6032 | |
6033 | case PR_SWAP_SNK_SRC_SOURCE_ON: |
6034 | /* |
6035 | * Do nothing when vbus off notification is received. |
6036 | * TCPM can wait for PD_T_NEWSRC in PR_SWAP_SNK_SRC_SOURCE_ON |
6037 | * for the vbus source to ramp up. |
6038 | */ |
6039 | break; |
6040 | |
6041 | case PORT_RESET_WAIT_OFF: |
6042 | tcpm_set_state(port, state: tcpm_default_state(port), delay_ms: 0); |
6043 | break; |
6044 | |
6045 | case SRC_TRY_WAIT: |
6046 | case SRC_TRY_DEBOUNCE: |
6047 | /* Do nothing, waiting for sink detection */ |
6048 | break; |
6049 | |
6050 | case SRC_STARTUP: |
6051 | case SRC_SEND_CAPABILITIES: |
6052 | case SRC_SEND_CAPABILITIES_TIMEOUT: |
6053 | case SRC_NEGOTIATE_CAPABILITIES: |
6054 | case SRC_TRANSITION_SUPPLY: |
6055 | case SRC_READY: |
6056 | case SRC_WAIT_NEW_CAPABILITIES: |
6057 | /* |
6058 | * Force to unattached state to re-initiate connection. |
6059 | * DRP port should move to Unattached.SNK instead of Unattached.SRC if |
6060 | * sink removed. Although sink removal here is due to source's vbus collapse, |
6061 | * treat it the same way for consistency. |
6062 | */ |
6063 | if (port->port_type == TYPEC_PORT_SRC) |
6064 | tcpm_set_state(port, state: SRC_UNATTACHED, tcpm_wait_for_discharge(port)); |
6065 | else |
6066 | tcpm_set_state(port, state: SNK_UNATTACHED, tcpm_wait_for_discharge(port)); |
6067 | break; |
6068 | |
6069 | case PORT_RESET: |
6070 | /* |
6071 | * State set back to default mode once the timer completes. |
6072 | * Ignore vbus changes here. |
6073 | */ |
6074 | break; |
6075 | |
6076 | case FR_SWAP_SEND: |
6077 | case FR_SWAP_SEND_TIMEOUT: |
6078 | case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF: |
6079 | case FR_SWAP_SNK_SRC_NEW_SINK_READY: |
6080 | case FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED: |
6081 | /* Do nothing, vbus drop expected */ |
6082 | break; |
6083 | |
6084 | case SNK_HARD_RESET_WAIT_VBUS: |
6085 | /* Do nothing, its OK to receive vbus off events */ |
6086 | break; |
6087 | |
6088 | default: |
6089 | if (port->pwr_role == TYPEC_SINK && port->attached) |
6090 | tcpm_set_state(port, state: SNK_UNATTACHED, tcpm_wait_for_discharge(port)); |
6091 | break; |
6092 | } |
6093 | } |
6094 | |
6095 | static void _tcpm_pd_vbus_vsafe0v(struct tcpm_port *port) |
6096 | { |
6097 | tcpm_log_force(port, fmt: "VBUS VSAFE0V" ); |
6098 | port->vbus_vsafe0v = true; |
6099 | switch (port->state) { |
6100 | case SRC_HARD_RESET_VBUS_OFF: |
6101 | /* |
6102 | * After establishing the vSafe0V voltage condition on VBUS, the Source Shall wait |
6103 | * tSrcRecover before re-applying VCONN and restoring VBUS to vSafe5V. |
6104 | */ |
6105 | tcpm_set_state(port, state: SRC_HARD_RESET_VBUS_ON, PD_T_SRC_RECOVER); |
6106 | break; |
6107 | case SRC_ATTACH_WAIT: |
6108 | if (tcpm_port_is_source(port)) |
6109 | tcpm_set_state(port, tcpm_try_snk(port) ? SNK_TRY : SRC_ATTACHED, |
6110 | PD_T_CC_DEBOUNCE); |
6111 | break; |
6112 | case SRC_STARTUP: |
6113 | case SRC_SEND_CAPABILITIES: |
6114 | case SRC_SEND_CAPABILITIES_TIMEOUT: |
6115 | case SRC_NEGOTIATE_CAPABILITIES: |
6116 | case SRC_TRANSITION_SUPPLY: |
6117 | case SRC_READY: |
6118 | case SRC_WAIT_NEW_CAPABILITIES: |
6119 | if (port->auto_vbus_discharge_enabled) { |
6120 | if (port->port_type == TYPEC_PORT_SRC) |
6121 | tcpm_set_state(port, state: SRC_UNATTACHED, delay_ms: 0); |
6122 | else |
6123 | tcpm_set_state(port, state: SNK_UNATTACHED, delay_ms: 0); |
6124 | } |
6125 | break; |
6126 | case PR_SWAP_SNK_SRC_SINK_OFF: |
6127 | case PR_SWAP_SNK_SRC_SOURCE_ON: |
6128 | /* Do nothing, vsafe0v is expected during transition */ |
6129 | break; |
6130 | case SNK_ATTACH_WAIT: |
6131 | case SNK_DEBOUNCED: |
6132 | /*Do nothing, still waiting for VSAFE5V for connect */ |
6133 | break; |
6134 | case SNK_HARD_RESET_WAIT_VBUS: |
6135 | /* Do nothing, its OK to receive vbus off events */ |
6136 | break; |
6137 | default: |
6138 | if (port->pwr_role == TYPEC_SINK && port->auto_vbus_discharge_enabled) |
6139 | tcpm_set_state(port, state: SNK_UNATTACHED, delay_ms: 0); |
6140 | break; |
6141 | } |
6142 | } |
6143 | |
6144 | static void _tcpm_pd_hard_reset(struct tcpm_port *port) |
6145 | { |
6146 | tcpm_log_force(port, fmt: "Received hard reset" ); |
6147 | if (port->bist_request == BDO_MODE_TESTDATA && port->tcpc->set_bist_data) |
6148 | port->tcpc->set_bist_data(port->tcpc, false); |
6149 | |
6150 | switch (port->state) { |
6151 | case ERROR_RECOVERY: |
6152 | case PORT_RESET: |
6153 | case PORT_RESET_WAIT_OFF: |
6154 | return; |
6155 | default: |
6156 | break; |
6157 | } |
6158 | |
6159 | if (port->ams != NONE_AMS) |
6160 | port->ams = NONE_AMS; |
6161 | if (port->hard_reset_count < PD_N_HARD_RESET_COUNT) |
6162 | port->ams = HARD_RESET; |
6163 | /* |
6164 | * If we keep receiving hard reset requests, executing the hard reset |
6165 | * must have failed. Revert to error recovery if that happens. |
6166 | */ |
6167 | tcpm_set_state(port, |
6168 | state: port->hard_reset_count < PD_N_HARD_RESET_COUNT ? |
6169 | HARD_RESET_START : ERROR_RECOVERY, |
6170 | delay_ms: 0); |
6171 | } |
6172 | |
6173 | static void tcpm_pd_event_handler(struct kthread_work *work) |
6174 | { |
6175 | struct tcpm_port *port = container_of(work, struct tcpm_port, |
6176 | event_work); |
6177 | u32 events; |
6178 | |
6179 | mutex_lock(&port->lock); |
6180 | |
6181 | spin_lock(lock: &port->pd_event_lock); |
6182 | while (port->pd_events) { |
6183 | events = port->pd_events; |
6184 | port->pd_events = 0; |
6185 | spin_unlock(lock: &port->pd_event_lock); |
6186 | if (events & TCPM_RESET_EVENT) |
6187 | _tcpm_pd_hard_reset(port); |
6188 | if (events & TCPM_VBUS_EVENT) { |
6189 | bool vbus; |
6190 | |
6191 | vbus = port->tcpc->get_vbus(port->tcpc); |
6192 | if (vbus) { |
6193 | _tcpm_pd_vbus_on(port); |
6194 | } else { |
6195 | _tcpm_pd_vbus_off(port); |
6196 | /* |
6197 | * When TCPC does not support detecting vsafe0v voltage level, |
6198 | * treat vbus absent as vsafe0v. Else invoke is_vbus_vsafe0v |
6199 | * to see if vbus has discharge to VSAFE0V. |
6200 | */ |
6201 | if (!port->tcpc->is_vbus_vsafe0v || |
6202 | port->tcpc->is_vbus_vsafe0v(port->tcpc)) |
6203 | _tcpm_pd_vbus_vsafe0v(port); |
6204 | } |
6205 | } |
6206 | if (events & TCPM_CC_EVENT) { |
6207 | enum typec_cc_status cc1, cc2; |
6208 | |
6209 | if (port->tcpc->get_cc(port->tcpc, &cc1, &cc2) == 0) |
6210 | _tcpm_cc_change(port, cc1, cc2); |
6211 | } |
6212 | if (events & TCPM_FRS_EVENT) { |
6213 | if (port->state == SNK_READY) { |
6214 | int ret; |
6215 | |
6216 | port->upcoming_state = FR_SWAP_SEND; |
6217 | ret = tcpm_ams_start(port, ams: FAST_ROLE_SWAP); |
6218 | if (ret == -EAGAIN) |
6219 | port->upcoming_state = INVALID_STATE; |
6220 | } else { |
6221 | tcpm_log(port, fmt: "Discarding FRS_SIGNAL! Not in sink ready" ); |
6222 | } |
6223 | } |
6224 | if (events & TCPM_SOURCING_VBUS) { |
6225 | tcpm_log(port, fmt: "sourcing vbus" ); |
6226 | /* |
6227 | * In fast role swap case TCPC autonomously sources vbus. Set vbus_source |
6228 | * true as TCPM wouldn't have called tcpm_set_vbus. |
6229 | * |
6230 | * When vbus is sourced on the command on TCPM i.e. TCPM called |
6231 | * tcpm_set_vbus to source vbus, vbus_source would already be true. |
6232 | */ |
6233 | port->vbus_source = true; |
6234 | _tcpm_pd_vbus_on(port); |
6235 | } |
6236 | if (events & TCPM_PORT_CLEAN) { |
6237 | tcpm_log(port, fmt: "port clean" ); |
6238 | if (port->state == CHECK_CONTAMINANT) { |
6239 | if (tcpm_start_toggling(port, cc: tcpm_rp_cc(port))) |
6240 | tcpm_set_state(port, state: TOGGLING, delay_ms: 0); |
6241 | else |
6242 | tcpm_set_state(port, state: tcpm_default_state(port), delay_ms: 0); |
6243 | } |
6244 | } |
6245 | if (events & TCPM_PORT_ERROR) { |
6246 | tcpm_log(port, fmt: "port triggering error recovery" ); |
6247 | tcpm_set_state(port, state: ERROR_RECOVERY, delay_ms: 0); |
6248 | } |
6249 | |
6250 | spin_lock(lock: &port->pd_event_lock); |
6251 | } |
6252 | spin_unlock(lock: &port->pd_event_lock); |
6253 | mutex_unlock(lock: &port->lock); |
6254 | } |
6255 | |
6256 | void tcpm_cc_change(struct tcpm_port *port) |
6257 | { |
6258 | spin_lock(lock: &port->pd_event_lock); |
6259 | port->pd_events |= TCPM_CC_EVENT; |
6260 | spin_unlock(lock: &port->pd_event_lock); |
6261 | kthread_queue_work(worker: port->wq, work: &port->event_work); |
6262 | } |
6263 | EXPORT_SYMBOL_GPL(tcpm_cc_change); |
6264 | |
6265 | void tcpm_vbus_change(struct tcpm_port *port) |
6266 | { |
6267 | spin_lock(lock: &port->pd_event_lock); |
6268 | port->pd_events |= TCPM_VBUS_EVENT; |
6269 | spin_unlock(lock: &port->pd_event_lock); |
6270 | kthread_queue_work(worker: port->wq, work: &port->event_work); |
6271 | } |
6272 | EXPORT_SYMBOL_GPL(tcpm_vbus_change); |
6273 | |
6274 | void tcpm_pd_hard_reset(struct tcpm_port *port) |
6275 | { |
6276 | spin_lock(lock: &port->pd_event_lock); |
6277 | port->pd_events = TCPM_RESET_EVENT; |
6278 | spin_unlock(lock: &port->pd_event_lock); |
6279 | kthread_queue_work(worker: port->wq, work: &port->event_work); |
6280 | } |
6281 | EXPORT_SYMBOL_GPL(tcpm_pd_hard_reset); |
6282 | |
6283 | void tcpm_sink_frs(struct tcpm_port *port) |
6284 | { |
6285 | spin_lock(lock: &port->pd_event_lock); |
6286 | port->pd_events |= TCPM_FRS_EVENT; |
6287 | spin_unlock(lock: &port->pd_event_lock); |
6288 | kthread_queue_work(worker: port->wq, work: &port->event_work); |
6289 | } |
6290 | EXPORT_SYMBOL_GPL(tcpm_sink_frs); |
6291 | |
6292 | void tcpm_sourcing_vbus(struct tcpm_port *port) |
6293 | { |
6294 | spin_lock(lock: &port->pd_event_lock); |
6295 | port->pd_events |= TCPM_SOURCING_VBUS; |
6296 | spin_unlock(lock: &port->pd_event_lock); |
6297 | kthread_queue_work(worker: port->wq, work: &port->event_work); |
6298 | } |
6299 | EXPORT_SYMBOL_GPL(tcpm_sourcing_vbus); |
6300 | |
6301 | void tcpm_port_clean(struct tcpm_port *port) |
6302 | { |
6303 | spin_lock(lock: &port->pd_event_lock); |
6304 | port->pd_events |= TCPM_PORT_CLEAN; |
6305 | spin_unlock(lock: &port->pd_event_lock); |
6306 | kthread_queue_work(worker: port->wq, work: &port->event_work); |
6307 | } |
6308 | EXPORT_SYMBOL_GPL(tcpm_port_clean); |
6309 | |
6310 | bool tcpm_port_is_toggling(struct tcpm_port *port) |
6311 | { |
6312 | return port->port_type == TYPEC_PORT_DRP && port->state == TOGGLING; |
6313 | } |
6314 | EXPORT_SYMBOL_GPL(tcpm_port_is_toggling); |
6315 | |
6316 | void tcpm_port_error_recovery(struct tcpm_port *port) |
6317 | { |
6318 | spin_lock(lock: &port->pd_event_lock); |
6319 | port->pd_events |= TCPM_PORT_ERROR; |
6320 | spin_unlock(lock: &port->pd_event_lock); |
6321 | kthread_queue_work(worker: port->wq, work: &port->event_work); |
6322 | } |
6323 | EXPORT_SYMBOL_GPL(tcpm_port_error_recovery); |
6324 | |
6325 | static void tcpm_enable_frs_work(struct kthread_work *work) |
6326 | { |
6327 | struct tcpm_port *port = container_of(work, struct tcpm_port, enable_frs); |
6328 | int ret; |
6329 | |
6330 | mutex_lock(&port->lock); |
6331 | /* Not FRS capable */ |
6332 | if (!port->connected || port->port_type != TYPEC_PORT_DRP || |
6333 | port->pwr_opmode != TYPEC_PWR_MODE_PD || |
6334 | !port->tcpc->enable_frs || |
6335 | /* Sink caps queried */ |
6336 | port->sink_cap_done || port->negotiated_rev < PD_REV30) |
6337 | goto unlock; |
6338 | |
6339 | /* Send when the state machine is idle */ |
6340 | if (port->state != SNK_READY || port->vdm_sm_running || port->send_discover || |
6341 | port->send_discover_prime) |
6342 | goto resched; |
6343 | |
6344 | port->upcoming_state = GET_SINK_CAP; |
6345 | ret = tcpm_ams_start(port, ams: GET_SINK_CAPABILITIES); |
6346 | if (ret == -EAGAIN) { |
6347 | port->upcoming_state = INVALID_STATE; |
6348 | } else { |
6349 | port->sink_cap_done = true; |
6350 | goto unlock; |
6351 | } |
6352 | resched: |
6353 | mod_enable_frs_delayed_work(port, GET_SINK_CAP_RETRY_MS); |
6354 | unlock: |
6355 | mutex_unlock(lock: &port->lock); |
6356 | } |
6357 | |
6358 | static void tcpm_send_discover_work(struct kthread_work *work) |
6359 | { |
6360 | struct tcpm_port *port = container_of(work, struct tcpm_port, send_discover_work); |
6361 | |
6362 | mutex_lock(&port->lock); |
6363 | /* No need to send DISCOVER_IDENTITY anymore */ |
6364 | if (!port->send_discover && !port->send_discover_prime) |
6365 | goto unlock; |
6366 | |
6367 | if (port->data_role == TYPEC_DEVICE && port->negotiated_rev < PD_REV30) { |
6368 | port->send_discover = false; |
6369 | port->send_discover_prime = false; |
6370 | goto unlock; |
6371 | } |
6372 | |
6373 | /* Retry if the port is not idle */ |
6374 | if ((port->state != SRC_READY && port->state != SNK_READY && |
6375 | port->state != SRC_VDM_IDENTITY_REQUEST) || port->vdm_sm_running) { |
6376 | mod_send_discover_delayed_work(port, SEND_DISCOVER_RETRY_MS); |
6377 | goto unlock; |
6378 | } |
6379 | |
6380 | tcpm_send_vdm(port, USB_SID_PD, CMD_DISCOVER_IDENT, NULL, count: 0, tx_sop_type: port->tx_sop_type); |
6381 | |
6382 | unlock: |
6383 | mutex_unlock(lock: &port->lock); |
6384 | } |
6385 | |
6386 | static int tcpm_dr_set(struct typec_port *p, enum typec_data_role data) |
6387 | { |
6388 | struct tcpm_port *port = typec_get_drvdata(port: p); |
6389 | int ret; |
6390 | |
6391 | mutex_lock(&port->swap_lock); |
6392 | mutex_lock(&port->lock); |
6393 | |
6394 | if (port->typec_caps.data != TYPEC_PORT_DRD) { |
6395 | ret = -EINVAL; |
6396 | goto port_unlock; |
6397 | } |
6398 | if (port->state != SRC_READY && port->state != SNK_READY) { |
6399 | ret = -EAGAIN; |
6400 | goto port_unlock; |
6401 | } |
6402 | |
6403 | if (port->data_role == data) { |
6404 | ret = 0; |
6405 | goto port_unlock; |
6406 | } |
6407 | |
6408 | /* |
6409 | * XXX |
6410 | * 6.3.9: If an alternate mode is active, a request to swap |
6411 | * alternate modes shall trigger a port reset. |
6412 | * Reject data role swap request in this case. |
6413 | */ |
6414 | |
6415 | if (!port->pd_capable) { |
6416 | /* |
6417 | * If the partner is not PD capable, reset the port to |
6418 | * trigger a role change. This can only work if a preferred |
6419 | * role is configured, and if it matches the requested role. |
6420 | */ |
6421 | if (port->try_role == TYPEC_NO_PREFERRED_ROLE || |
6422 | port->try_role == port->pwr_role) { |
6423 | ret = -EINVAL; |
6424 | goto port_unlock; |
6425 | } |
6426 | port->non_pd_role_swap = true; |
6427 | tcpm_set_state(port, state: PORT_RESET, delay_ms: 0); |
6428 | } else { |
6429 | port->upcoming_state = DR_SWAP_SEND; |
6430 | ret = tcpm_ams_start(port, ams: DATA_ROLE_SWAP); |
6431 | if (ret == -EAGAIN) { |
6432 | port->upcoming_state = INVALID_STATE; |
6433 | goto port_unlock; |
6434 | } |
6435 | } |
6436 | |
6437 | port->swap_status = 0; |
6438 | port->swap_pending = true; |
6439 | reinit_completion(x: &port->swap_complete); |
6440 | mutex_unlock(lock: &port->lock); |
6441 | |
6442 | if (!wait_for_completion_timeout(x: &port->swap_complete, |
6443 | timeout: msecs_to_jiffies(PD_ROLE_SWAP_TIMEOUT))) |
6444 | ret = -ETIMEDOUT; |
6445 | else |
6446 | ret = port->swap_status; |
6447 | |
6448 | port->non_pd_role_swap = false; |
6449 | goto swap_unlock; |
6450 | |
6451 | port_unlock: |
6452 | mutex_unlock(lock: &port->lock); |
6453 | swap_unlock: |
6454 | mutex_unlock(lock: &port->swap_lock); |
6455 | return ret; |
6456 | } |
6457 | |
6458 | static int tcpm_pr_set(struct typec_port *p, enum typec_role role) |
6459 | { |
6460 | struct tcpm_port *port = typec_get_drvdata(port: p); |
6461 | int ret; |
6462 | |
6463 | mutex_lock(&port->swap_lock); |
6464 | mutex_lock(&port->lock); |
6465 | |
6466 | if (port->port_type != TYPEC_PORT_DRP) { |
6467 | ret = -EINVAL; |
6468 | goto port_unlock; |
6469 | } |
6470 | if (port->state != SRC_READY && port->state != SNK_READY) { |
6471 | ret = -EAGAIN; |
6472 | goto port_unlock; |
6473 | } |
6474 | |
6475 | if (role == port->pwr_role) { |
6476 | ret = 0; |
6477 | goto port_unlock; |
6478 | } |
6479 | |
6480 | port->upcoming_state = PR_SWAP_SEND; |
6481 | ret = tcpm_ams_start(port, ams: POWER_ROLE_SWAP); |
6482 | if (ret == -EAGAIN) { |
6483 | port->upcoming_state = INVALID_STATE; |
6484 | goto port_unlock; |
6485 | } |
6486 | |
6487 | port->swap_status = 0; |
6488 | port->swap_pending = true; |
6489 | reinit_completion(x: &port->swap_complete); |
6490 | mutex_unlock(lock: &port->lock); |
6491 | |
6492 | if (!wait_for_completion_timeout(x: &port->swap_complete, |
6493 | timeout: msecs_to_jiffies(PD_ROLE_SWAP_TIMEOUT))) |
6494 | ret = -ETIMEDOUT; |
6495 | else |
6496 | ret = port->swap_status; |
6497 | |
6498 | goto swap_unlock; |
6499 | |
6500 | port_unlock: |
6501 | mutex_unlock(lock: &port->lock); |
6502 | swap_unlock: |
6503 | mutex_unlock(lock: &port->swap_lock); |
6504 | return ret; |
6505 | } |
6506 | |
6507 | static int tcpm_vconn_set(struct typec_port *p, enum typec_role role) |
6508 | { |
6509 | struct tcpm_port *port = typec_get_drvdata(port: p); |
6510 | int ret; |
6511 | |
6512 | mutex_lock(&port->swap_lock); |
6513 | mutex_lock(&port->lock); |
6514 | |
6515 | if (port->state != SRC_READY && port->state != SNK_READY) { |
6516 | ret = -EAGAIN; |
6517 | goto port_unlock; |
6518 | } |
6519 | |
6520 | if (role == port->vconn_role) { |
6521 | ret = 0; |
6522 | goto port_unlock; |
6523 | } |
6524 | |
6525 | port->upcoming_state = VCONN_SWAP_SEND; |
6526 | ret = tcpm_ams_start(port, ams: VCONN_SWAP); |
6527 | if (ret == -EAGAIN) { |
6528 | port->upcoming_state = INVALID_STATE; |
6529 | goto port_unlock; |
6530 | } |
6531 | |
6532 | port->swap_status = 0; |
6533 | port->swap_pending = true; |
6534 | reinit_completion(x: &port->swap_complete); |
6535 | mutex_unlock(lock: &port->lock); |
6536 | |
6537 | if (!wait_for_completion_timeout(x: &port->swap_complete, |
6538 | timeout: msecs_to_jiffies(PD_ROLE_SWAP_TIMEOUT))) |
6539 | ret = -ETIMEDOUT; |
6540 | else |
6541 | ret = port->swap_status; |
6542 | |
6543 | goto swap_unlock; |
6544 | |
6545 | port_unlock: |
6546 | mutex_unlock(lock: &port->lock); |
6547 | swap_unlock: |
6548 | mutex_unlock(lock: &port->swap_lock); |
6549 | return ret; |
6550 | } |
6551 | |
6552 | static int tcpm_try_role(struct typec_port *p, int role) |
6553 | { |
6554 | struct tcpm_port *port = typec_get_drvdata(port: p); |
6555 | struct tcpc_dev *tcpc = port->tcpc; |
6556 | int ret = 0; |
6557 | |
6558 | mutex_lock(&port->lock); |
6559 | if (tcpc->try_role) |
6560 | ret = tcpc->try_role(tcpc, role); |
6561 | if (!ret) |
6562 | port->try_role = role; |
6563 | port->try_src_count = 0; |
6564 | port->try_snk_count = 0; |
6565 | mutex_unlock(lock: &port->lock); |
6566 | |
6567 | return ret; |
6568 | } |
6569 | |
6570 | static int tcpm_pps_set_op_curr(struct tcpm_port *port, u16 req_op_curr) |
6571 | { |
6572 | unsigned int target_mw; |
6573 | int ret; |
6574 | |
6575 | mutex_lock(&port->swap_lock); |
6576 | mutex_lock(&port->lock); |
6577 | |
6578 | if (!port->pps_data.active) { |
6579 | ret = -EOPNOTSUPP; |
6580 | goto port_unlock; |
6581 | } |
6582 | |
6583 | if (port->state != SNK_READY) { |
6584 | ret = -EAGAIN; |
6585 | goto port_unlock; |
6586 | } |
6587 | |
6588 | if (req_op_curr > port->pps_data.max_curr) { |
6589 | ret = -EINVAL; |
6590 | goto port_unlock; |
6591 | } |
6592 | |
6593 | target_mw = (req_op_curr * port->supply_voltage) / 1000; |
6594 | if (target_mw < port->operating_snk_mw) { |
6595 | ret = -EINVAL; |
6596 | goto port_unlock; |
6597 | } |
6598 | |
6599 | port->upcoming_state = SNK_NEGOTIATE_PPS_CAPABILITIES; |
6600 | ret = tcpm_ams_start(port, ams: POWER_NEGOTIATION); |
6601 | if (ret == -EAGAIN) { |
6602 | port->upcoming_state = INVALID_STATE; |
6603 | goto port_unlock; |
6604 | } |
6605 | |
6606 | /* Round down operating current to align with PPS valid steps */ |
6607 | req_op_curr = req_op_curr - (req_op_curr % RDO_PROG_CURR_MA_STEP); |
6608 | |
6609 | reinit_completion(x: &port->pps_complete); |
6610 | port->pps_data.req_op_curr = req_op_curr; |
6611 | port->pps_status = 0; |
6612 | port->pps_pending = true; |
6613 | mutex_unlock(lock: &port->lock); |
6614 | |
6615 | if (!wait_for_completion_timeout(x: &port->pps_complete, |
6616 | timeout: msecs_to_jiffies(PD_PPS_CTRL_TIMEOUT))) |
6617 | ret = -ETIMEDOUT; |
6618 | else |
6619 | ret = port->pps_status; |
6620 | |
6621 | goto swap_unlock; |
6622 | |
6623 | port_unlock: |
6624 | mutex_unlock(lock: &port->lock); |
6625 | swap_unlock: |
6626 | mutex_unlock(lock: &port->swap_lock); |
6627 | |
6628 | return ret; |
6629 | } |
6630 | |
6631 | static int tcpm_pps_set_out_volt(struct tcpm_port *port, u16 req_out_volt) |
6632 | { |
6633 | unsigned int target_mw; |
6634 | int ret; |
6635 | |
6636 | mutex_lock(&port->swap_lock); |
6637 | mutex_lock(&port->lock); |
6638 | |
6639 | if (!port->pps_data.active) { |
6640 | ret = -EOPNOTSUPP; |
6641 | goto port_unlock; |
6642 | } |
6643 | |
6644 | if (port->state != SNK_READY) { |
6645 | ret = -EAGAIN; |
6646 | goto port_unlock; |
6647 | } |
6648 | |
6649 | target_mw = (port->current_limit * req_out_volt) / 1000; |
6650 | if (target_mw < port->operating_snk_mw) { |
6651 | ret = -EINVAL; |
6652 | goto port_unlock; |
6653 | } |
6654 | |
6655 | port->upcoming_state = SNK_NEGOTIATE_PPS_CAPABILITIES; |
6656 | ret = tcpm_ams_start(port, ams: POWER_NEGOTIATION); |
6657 | if (ret == -EAGAIN) { |
6658 | port->upcoming_state = INVALID_STATE; |
6659 | goto port_unlock; |
6660 | } |
6661 | |
6662 | /* Round down output voltage to align with PPS valid steps */ |
6663 | req_out_volt = req_out_volt - (req_out_volt % RDO_PROG_VOLT_MV_STEP); |
6664 | |
6665 | reinit_completion(x: &port->pps_complete); |
6666 | port->pps_data.req_out_volt = req_out_volt; |
6667 | port->pps_status = 0; |
6668 | port->pps_pending = true; |
6669 | mutex_unlock(lock: &port->lock); |
6670 | |
6671 | if (!wait_for_completion_timeout(x: &port->pps_complete, |
6672 | timeout: msecs_to_jiffies(PD_PPS_CTRL_TIMEOUT))) |
6673 | ret = -ETIMEDOUT; |
6674 | else |
6675 | ret = port->pps_status; |
6676 | |
6677 | goto swap_unlock; |
6678 | |
6679 | port_unlock: |
6680 | mutex_unlock(lock: &port->lock); |
6681 | swap_unlock: |
6682 | mutex_unlock(lock: &port->swap_lock); |
6683 | |
6684 | return ret; |
6685 | } |
6686 | |
6687 | static int tcpm_pps_activate(struct tcpm_port *port, bool activate) |
6688 | { |
6689 | int ret = 0; |
6690 | |
6691 | mutex_lock(&port->swap_lock); |
6692 | mutex_lock(&port->lock); |
6693 | |
6694 | if (!port->pps_data.supported) { |
6695 | ret = -EOPNOTSUPP; |
6696 | goto port_unlock; |
6697 | } |
6698 | |
6699 | /* Trying to deactivate PPS when already deactivated so just bail */ |
6700 | if (!port->pps_data.active && !activate) |
6701 | goto port_unlock; |
6702 | |
6703 | if (port->state != SNK_READY) { |
6704 | ret = -EAGAIN; |
6705 | goto port_unlock; |
6706 | } |
6707 | |
6708 | if (activate) |
6709 | port->upcoming_state = SNK_NEGOTIATE_PPS_CAPABILITIES; |
6710 | else |
6711 | port->upcoming_state = SNK_NEGOTIATE_CAPABILITIES; |
6712 | ret = tcpm_ams_start(port, ams: POWER_NEGOTIATION); |
6713 | if (ret == -EAGAIN) { |
6714 | port->upcoming_state = INVALID_STATE; |
6715 | goto port_unlock; |
6716 | } |
6717 | |
6718 | reinit_completion(x: &port->pps_complete); |
6719 | port->pps_status = 0; |
6720 | port->pps_pending = true; |
6721 | |
6722 | /* Trigger PPS request or move back to standard PDO contract */ |
6723 | if (activate) { |
6724 | port->pps_data.req_out_volt = port->supply_voltage; |
6725 | port->pps_data.req_op_curr = port->current_limit; |
6726 | } |
6727 | mutex_unlock(lock: &port->lock); |
6728 | |
6729 | if (!wait_for_completion_timeout(x: &port->pps_complete, |
6730 | timeout: msecs_to_jiffies(PD_PPS_CTRL_TIMEOUT))) |
6731 | ret = -ETIMEDOUT; |
6732 | else |
6733 | ret = port->pps_status; |
6734 | |
6735 | goto swap_unlock; |
6736 | |
6737 | port_unlock: |
6738 | mutex_unlock(lock: &port->lock); |
6739 | swap_unlock: |
6740 | mutex_unlock(lock: &port->swap_lock); |
6741 | |
6742 | return ret; |
6743 | } |
6744 | |
6745 | static void tcpm_init(struct tcpm_port *port) |
6746 | { |
6747 | enum typec_cc_status cc1, cc2; |
6748 | |
6749 | port->tcpc->init(port->tcpc); |
6750 | |
6751 | tcpm_reset_port(port); |
6752 | |
6753 | /* |
6754 | * XXX |
6755 | * Should possibly wait for VBUS to settle if it was enabled locally |
6756 | * since tcpm_reset_port() will disable VBUS. |
6757 | */ |
6758 | port->vbus_present = port->tcpc->get_vbus(port->tcpc); |
6759 | if (port->vbus_present) |
6760 | port->vbus_never_low = true; |
6761 | |
6762 | /* |
6763 | * 1. When vbus_present is true, voltage on VBUS is already at VSAFE5V. |
6764 | * So implicitly vbus_vsafe0v = false. |
6765 | * |
6766 | * 2. When vbus_present is false and TCPC does NOT support querying |
6767 | * vsafe0v status, then, it's best to assume vbus is at VSAFE0V i.e. |
6768 | * vbus_vsafe0v is true. |
6769 | * |
6770 | * 3. When vbus_present is false and TCPC does support querying vsafe0v, |
6771 | * then, query tcpc for vsafe0v status. |
6772 | */ |
6773 | if (port->vbus_present) |
6774 | port->vbus_vsafe0v = false; |
6775 | else if (!port->tcpc->is_vbus_vsafe0v) |
6776 | port->vbus_vsafe0v = true; |
6777 | else |
6778 | port->vbus_vsafe0v = port->tcpc->is_vbus_vsafe0v(port->tcpc); |
6779 | |
6780 | tcpm_set_state(port, state: tcpm_default_state(port), delay_ms: 0); |
6781 | |
6782 | if (port->tcpc->get_cc(port->tcpc, &cc1, &cc2) == 0) |
6783 | _tcpm_cc_change(port, cc1, cc2); |
6784 | |
6785 | /* |
6786 | * Some adapters need a clean slate at startup, and won't recover |
6787 | * otherwise. So do not try to be fancy and force a clean disconnect. |
6788 | */ |
6789 | tcpm_set_state(port, state: PORT_RESET, delay_ms: 0); |
6790 | } |
6791 | |
6792 | static int tcpm_port_type_set(struct typec_port *p, enum typec_port_type type) |
6793 | { |
6794 | struct tcpm_port *port = typec_get_drvdata(port: p); |
6795 | |
6796 | mutex_lock(&port->lock); |
6797 | if (type == port->port_type) |
6798 | goto port_unlock; |
6799 | |
6800 | port->port_type = type; |
6801 | |
6802 | if (!port->connected) { |
6803 | tcpm_set_state(port, state: PORT_RESET, delay_ms: 0); |
6804 | } else if (type == TYPEC_PORT_SNK) { |
6805 | if (!(port->pwr_role == TYPEC_SINK && |
6806 | port->data_role == TYPEC_DEVICE)) |
6807 | tcpm_set_state(port, state: PORT_RESET, delay_ms: 0); |
6808 | } else if (type == TYPEC_PORT_SRC) { |
6809 | if (!(port->pwr_role == TYPEC_SOURCE && |
6810 | port->data_role == TYPEC_HOST)) |
6811 | tcpm_set_state(port, state: PORT_RESET, delay_ms: 0); |
6812 | } |
6813 | |
6814 | port_unlock: |
6815 | mutex_unlock(lock: &port->lock); |
6816 | return 0; |
6817 | } |
6818 | |
6819 | static struct pd_data *tcpm_find_pd_data(struct tcpm_port *port, struct usb_power_delivery *pd) |
6820 | { |
6821 | int i; |
6822 | |
6823 | for (i = 0; port->pd_list[i]; i++) { |
6824 | if (port->pd_list[i]->pd == pd) |
6825 | return port->pd_list[i]; |
6826 | } |
6827 | |
6828 | return ERR_PTR(error: -ENODATA); |
6829 | } |
6830 | |
6831 | static struct usb_power_delivery **tcpm_pd_get(struct typec_port *p) |
6832 | { |
6833 | struct tcpm_port *port = typec_get_drvdata(port: p); |
6834 | |
6835 | return port->pds; |
6836 | } |
6837 | |
6838 | static int tcpm_pd_set(struct typec_port *p, struct usb_power_delivery *pd) |
6839 | { |
6840 | struct tcpm_port *port = typec_get_drvdata(port: p); |
6841 | struct pd_data *data; |
6842 | int i, ret = 0; |
6843 | |
6844 | mutex_lock(&port->lock); |
6845 | |
6846 | if (port->selected_pd == pd) |
6847 | goto unlock; |
6848 | |
6849 | data = tcpm_find_pd_data(port, pd); |
6850 | if (IS_ERR(ptr: data)) { |
6851 | ret = PTR_ERR(ptr: data); |
6852 | goto unlock; |
6853 | } |
6854 | |
6855 | if (data->sink_desc.pdo[0]) { |
6856 | for (i = 0; i < PDO_MAX_OBJECTS && data->sink_desc.pdo[i]; i++) |
6857 | port->snk_pdo[i] = data->sink_desc.pdo[i]; |
6858 | port->nr_snk_pdo = i; |
6859 | port->operating_snk_mw = data->operating_snk_mw; |
6860 | } |
6861 | |
6862 | if (data->source_desc.pdo[0]) { |
6863 | for (i = 0; i < PDO_MAX_OBJECTS && data->source_desc.pdo[i]; i++) |
6864 | port->src_pdo[i] = data->source_desc.pdo[i]; |
6865 | port->nr_src_pdo = i; |
6866 | } |
6867 | |
6868 | switch (port->state) { |
6869 | case SRC_UNATTACHED: |
6870 | case SRC_ATTACH_WAIT: |
6871 | case SRC_TRYWAIT: |
6872 | tcpm_set_cc(port, cc: tcpm_rp_cc(port)); |
6873 | break; |
6874 | case SRC_SEND_CAPABILITIES: |
6875 | case SRC_SEND_CAPABILITIES_TIMEOUT: |
6876 | case SRC_NEGOTIATE_CAPABILITIES: |
6877 | case SRC_READY: |
6878 | case SRC_WAIT_NEW_CAPABILITIES: |
6879 | port->caps_count = 0; |
6880 | port->upcoming_state = SRC_SEND_CAPABILITIES; |
6881 | ret = tcpm_ams_start(port, ams: POWER_NEGOTIATION); |
6882 | if (ret == -EAGAIN) { |
6883 | port->upcoming_state = INVALID_STATE; |
6884 | goto unlock; |
6885 | } |
6886 | break; |
6887 | case SNK_NEGOTIATE_CAPABILITIES: |
6888 | case SNK_NEGOTIATE_PPS_CAPABILITIES: |
6889 | case SNK_READY: |
6890 | case SNK_TRANSITION_SINK: |
6891 | case SNK_TRANSITION_SINK_VBUS: |
6892 | if (port->pps_data.active) |
6893 | port->upcoming_state = SNK_NEGOTIATE_PPS_CAPABILITIES; |
6894 | else if (port->pd_capable) |
6895 | port->upcoming_state = SNK_NEGOTIATE_CAPABILITIES; |
6896 | else |
6897 | break; |
6898 | |
6899 | port->update_sink_caps = true; |
6900 | |
6901 | ret = tcpm_ams_start(port, ams: POWER_NEGOTIATION); |
6902 | if (ret == -EAGAIN) { |
6903 | port->upcoming_state = INVALID_STATE; |
6904 | goto unlock; |
6905 | } |
6906 | break; |
6907 | default: |
6908 | break; |
6909 | } |
6910 | |
6911 | port->port_source_caps = data->source_cap; |
6912 | port->port_sink_caps = data->sink_cap; |
6913 | typec_port_set_usb_power_delivery(port: p, NULL); |
6914 | port->selected_pd = pd; |
6915 | typec_port_set_usb_power_delivery(port: p, pd: port->selected_pd); |
6916 | unlock: |
6917 | mutex_unlock(lock: &port->lock); |
6918 | return ret; |
6919 | } |
6920 | |
6921 | static const struct typec_operations tcpm_ops = { |
6922 | .try_role = tcpm_try_role, |
6923 | .dr_set = tcpm_dr_set, |
6924 | .pr_set = tcpm_pr_set, |
6925 | .vconn_set = tcpm_vconn_set, |
6926 | .port_type_set = tcpm_port_type_set, |
6927 | .pd_get = tcpm_pd_get, |
6928 | .pd_set = tcpm_pd_set |
6929 | }; |
6930 | |
6931 | void tcpm_tcpc_reset(struct tcpm_port *port) |
6932 | { |
6933 | mutex_lock(&port->lock); |
6934 | /* XXX: Maintain PD connection if possible? */ |
6935 | tcpm_init(port); |
6936 | mutex_unlock(lock: &port->lock); |
6937 | } |
6938 | EXPORT_SYMBOL_GPL(tcpm_tcpc_reset); |
6939 | |
6940 | static void tcpm_port_unregister_pd(struct tcpm_port *port) |
6941 | { |
6942 | int i; |
6943 | |
6944 | port->port_sink_caps = NULL; |
6945 | port->port_source_caps = NULL; |
6946 | for (i = 0; i < port->pd_count; i++) { |
6947 | usb_power_delivery_unregister_capabilities(cap: port->pd_list[i]->sink_cap); |
6948 | usb_power_delivery_unregister_capabilities(cap: port->pd_list[i]->source_cap); |
6949 | devm_kfree(dev: port->dev, p: port->pd_list[i]); |
6950 | port->pd_list[i] = NULL; |
6951 | usb_power_delivery_unregister(pd: port->pds[i]); |
6952 | port->pds[i] = NULL; |
6953 | } |
6954 | } |
6955 | |
6956 | static int tcpm_port_register_pd(struct tcpm_port *port) |
6957 | { |
6958 | struct usb_power_delivery_desc desc = { port->typec_caps.pd_revision }; |
6959 | struct usb_power_delivery_capabilities *cap; |
6960 | int ret, i; |
6961 | |
6962 | if (!port->nr_src_pdo && !port->nr_snk_pdo) |
6963 | return 0; |
6964 | |
6965 | for (i = 0; i < port->pd_count; i++) { |
6966 | port->pds[i] = usb_power_delivery_register(parent: port->dev, desc: &desc); |
6967 | if (IS_ERR(ptr: port->pds[i])) { |
6968 | ret = PTR_ERR(ptr: port->pds[i]); |
6969 | goto err_unregister; |
6970 | } |
6971 | port->pd_list[i]->pd = port->pds[i]; |
6972 | |
6973 | if (port->pd_list[i]->source_desc.pdo[0]) { |
6974 | cap = usb_power_delivery_register_capabilities(pd: port->pds[i], |
6975 | desc: &port->pd_list[i]->source_desc); |
6976 | if (IS_ERR(ptr: cap)) { |
6977 | ret = PTR_ERR(ptr: cap); |
6978 | goto err_unregister; |
6979 | } |
6980 | port->pd_list[i]->source_cap = cap; |
6981 | } |
6982 | |
6983 | if (port->pd_list[i]->sink_desc.pdo[0]) { |
6984 | cap = usb_power_delivery_register_capabilities(pd: port->pds[i], |
6985 | desc: &port->pd_list[i]->sink_desc); |
6986 | if (IS_ERR(ptr: cap)) { |
6987 | ret = PTR_ERR(ptr: cap); |
6988 | goto err_unregister; |
6989 | } |
6990 | port->pd_list[i]->sink_cap = cap; |
6991 | } |
6992 | } |
6993 | |
6994 | port->port_source_caps = port->pd_list[0]->source_cap; |
6995 | port->port_sink_caps = port->pd_list[0]->sink_cap; |
6996 | port->selected_pd = port->pds[0]; |
6997 | return 0; |
6998 | |
6999 | err_unregister: |
7000 | tcpm_port_unregister_pd(port); |
7001 | |
7002 | return ret; |
7003 | } |
7004 | |
7005 | static int tcpm_fw_get_caps(struct tcpm_port *port, struct fwnode_handle *fwnode) |
7006 | { |
7007 | struct fwnode_handle *capabilities, *child, *caps = NULL; |
7008 | unsigned int nr_src_pdo, nr_snk_pdo; |
7009 | const char *opmode_str; |
7010 | u32 *src_pdo, *snk_pdo; |
7011 | u32 uw, frs_current; |
7012 | int ret = 0, i; |
7013 | int mode; |
7014 | |
7015 | if (!fwnode) |
7016 | return -EINVAL; |
7017 | |
7018 | /* |
7019 | * This fwnode has a "compatible" property, but is never populated as a |
7020 | * struct device. Instead we simply parse it to read the properties. |
7021 | * This it breaks fw_devlink=on. To maintain backward compatibility |
7022 | * with existing DT files, we work around this by deleting any |
7023 | * fwnode_links to/from this fwnode. |
7024 | */ |
7025 | fw_devlink_purge_absent_suppliers(fwnode); |
7026 | |
7027 | ret = typec_get_fw_cap(cap: &port->typec_caps, fwnode); |
7028 | if (ret < 0) |
7029 | return ret; |
7030 | |
7031 | mode = 0; |
7032 | |
7033 | if (fwnode_property_read_bool(fwnode, propname: "accessory-mode-audio" )) |
7034 | port->typec_caps.accessory[mode++] = TYPEC_ACCESSORY_AUDIO; |
7035 | |
7036 | if (fwnode_property_read_bool(fwnode, propname: "accessory-mode-debug" )) |
7037 | port->typec_caps.accessory[mode++] = TYPEC_ACCESSORY_DEBUG; |
7038 | |
7039 | port->port_type = port->typec_caps.type; |
7040 | port->pd_supported = !fwnode_property_read_bool(fwnode, propname: "pd-disable" ); |
7041 | port->slow_charger_loop = fwnode_property_read_bool(fwnode, propname: "slow-charger-loop" ); |
7042 | port->self_powered = fwnode_property_read_bool(fwnode, propname: "self-powered" ); |
7043 | |
7044 | if (!port->pd_supported) { |
7045 | ret = fwnode_property_read_string(fwnode, propname: "typec-power-opmode" , val: &opmode_str); |
7046 | if (ret) |
7047 | return ret; |
7048 | ret = typec_find_pwr_opmode(name: opmode_str); |
7049 | if (ret < 0) |
7050 | return ret; |
7051 | port->src_rp = tcpm_pwr_opmode_to_rp(opmode: ret); |
7052 | return 0; |
7053 | } |
7054 | |
7055 | /* The following code are applicable to pd-capable ports, i.e. pd_supported is true. */ |
7056 | |
7057 | /* FRS can only be supported by DRP ports */ |
7058 | if (port->port_type == TYPEC_PORT_DRP) { |
7059 | ret = fwnode_property_read_u32(fwnode, propname: "new-source-frs-typec-current" , |
7060 | val: &frs_current); |
7061 | if (!ret && frs_current <= FRS_5V_3A) |
7062 | port->new_source_frs_current = frs_current; |
7063 | |
7064 | if (ret) |
7065 | ret = 0; |
7066 | } |
7067 | |
7068 | /* For the backward compatibility, "capabilities" node is optional. */ |
7069 | capabilities = fwnode_get_named_child_node(fwnode, childname: "capabilities" ); |
7070 | if (!capabilities) { |
7071 | port->pd_count = 1; |
7072 | } else { |
7073 | fwnode_for_each_child_node(capabilities, child) |
7074 | port->pd_count++; |
7075 | |
7076 | if (!port->pd_count) { |
7077 | ret = -ENODATA; |
7078 | goto put_capabilities; |
7079 | } |
7080 | } |
7081 | |
7082 | port->pds = devm_kcalloc(dev: port->dev, n: port->pd_count, size: sizeof(struct usb_power_delivery *), |
7083 | GFP_KERNEL); |
7084 | if (!port->pds) { |
7085 | ret = -ENOMEM; |
7086 | goto put_capabilities; |
7087 | } |
7088 | |
7089 | port->pd_list = devm_kcalloc(dev: port->dev, n: port->pd_count, size: sizeof(struct pd_data *), |
7090 | GFP_KERNEL); |
7091 | if (!port->pd_list) { |
7092 | ret = -ENOMEM; |
7093 | goto put_capabilities; |
7094 | } |
7095 | |
7096 | for (i = 0; i < port->pd_count; i++) { |
7097 | port->pd_list[i] = devm_kzalloc(dev: port->dev, size: sizeof(struct pd_data), GFP_KERNEL); |
7098 | if (!port->pd_list[i]) { |
7099 | ret = -ENOMEM; |
7100 | goto put_capabilities; |
7101 | } |
7102 | |
7103 | src_pdo = port->pd_list[i]->source_desc.pdo; |
7104 | port->pd_list[i]->source_desc.role = TYPEC_SOURCE; |
7105 | snk_pdo = port->pd_list[i]->sink_desc.pdo; |
7106 | port->pd_list[i]->sink_desc.role = TYPEC_SINK; |
7107 | |
7108 | /* If "capabilities" is NULL, fall back to single pd cap population. */ |
7109 | if (!capabilities) |
7110 | caps = fwnode; |
7111 | else |
7112 | caps = fwnode_get_next_child_node(fwnode: capabilities, child: caps); |
7113 | |
7114 | if (port->port_type != TYPEC_PORT_SNK) { |
7115 | ret = fwnode_property_count_u32(fwnode: caps, propname: "source-pdos" ); |
7116 | if (ret == 0) { |
7117 | ret = -EINVAL; |
7118 | goto put_caps; |
7119 | } |
7120 | if (ret < 0) |
7121 | goto put_caps; |
7122 | |
7123 | nr_src_pdo = min(ret, PDO_MAX_OBJECTS); |
7124 | ret = fwnode_property_read_u32_array(fwnode: caps, propname: "source-pdos" , val: src_pdo, |
7125 | nval: nr_src_pdo); |
7126 | if (ret) |
7127 | goto put_caps; |
7128 | |
7129 | ret = tcpm_validate_caps(port, pdo: src_pdo, nr_pdo: nr_src_pdo); |
7130 | if (ret) |
7131 | goto put_caps; |
7132 | |
7133 | if (i == 0) { |
7134 | port->nr_src_pdo = nr_src_pdo; |
7135 | memcpy_and_pad(dest: port->src_pdo, dest_len: sizeof(u32) * PDO_MAX_OBJECTS, |
7136 | src: port->pd_list[0]->source_desc.pdo, |
7137 | count: sizeof(u32) * nr_src_pdo, |
7138 | pad: 0); |
7139 | } |
7140 | } |
7141 | |
7142 | if (port->port_type != TYPEC_PORT_SRC) { |
7143 | ret = fwnode_property_count_u32(fwnode: caps, propname: "sink-pdos" ); |
7144 | if (ret == 0) { |
7145 | ret = -EINVAL; |
7146 | goto put_caps; |
7147 | } |
7148 | |
7149 | if (ret < 0) |
7150 | goto put_caps; |
7151 | |
7152 | nr_snk_pdo = min(ret, PDO_MAX_OBJECTS); |
7153 | ret = fwnode_property_read_u32_array(fwnode: caps, propname: "sink-pdos" , val: snk_pdo, |
7154 | nval: nr_snk_pdo); |
7155 | if (ret) |
7156 | goto put_caps; |
7157 | |
7158 | ret = tcpm_validate_caps(port, pdo: snk_pdo, nr_pdo: nr_snk_pdo); |
7159 | if (ret) |
7160 | goto put_caps; |
7161 | |
7162 | if (fwnode_property_read_u32(fwnode: caps, propname: "op-sink-microwatt" , val: &uw) < 0) { |
7163 | ret = -EINVAL; |
7164 | goto put_caps; |
7165 | } |
7166 | |
7167 | port->pd_list[i]->operating_snk_mw = uw / 1000; |
7168 | |
7169 | if (i == 0) { |
7170 | port->nr_snk_pdo = nr_snk_pdo; |
7171 | memcpy_and_pad(dest: port->snk_pdo, dest_len: sizeof(u32) * PDO_MAX_OBJECTS, |
7172 | src: port->pd_list[0]->sink_desc.pdo, |
7173 | count: sizeof(u32) * nr_snk_pdo, |
7174 | pad: 0); |
7175 | port->operating_snk_mw = port->pd_list[0]->operating_snk_mw; |
7176 | } |
7177 | } |
7178 | } |
7179 | |
7180 | put_caps: |
7181 | if (caps != fwnode) |
7182 | fwnode_handle_put(fwnode: caps); |
7183 | put_capabilities: |
7184 | fwnode_handle_put(fwnode: capabilities); |
7185 | return ret; |
7186 | } |
7187 | |
7188 | static int tcpm_fw_get_snk_vdos(struct tcpm_port *port, struct fwnode_handle *fwnode) |
7189 | { |
7190 | int ret; |
7191 | |
7192 | /* sink-vdos is optional */ |
7193 | ret = fwnode_property_count_u32(fwnode, propname: "sink-vdos" ); |
7194 | if (ret < 0) |
7195 | return 0; |
7196 | |
7197 | port->nr_snk_vdo = min(ret, VDO_MAX_OBJECTS); |
7198 | if (port->nr_snk_vdo) { |
7199 | ret = fwnode_property_read_u32_array(fwnode, propname: "sink-vdos" , |
7200 | val: port->snk_vdo, |
7201 | nval: port->nr_snk_vdo); |
7202 | if (ret < 0) |
7203 | return ret; |
7204 | } |
7205 | |
7206 | /* If sink-vdos is found, sink-vdos-v1 is expected for backward compatibility. */ |
7207 | if (port->nr_snk_vdo) { |
7208 | ret = fwnode_property_count_u32(fwnode, propname: "sink-vdos-v1" ); |
7209 | if (ret < 0) |
7210 | return ret; |
7211 | else if (ret == 0) |
7212 | return -ENODATA; |
7213 | |
7214 | port->nr_snk_vdo_v1 = min(ret, VDO_MAX_OBJECTS); |
7215 | ret = fwnode_property_read_u32_array(fwnode, propname: "sink-vdos-v1" , |
7216 | val: port->snk_vdo_v1, |
7217 | nval: port->nr_snk_vdo_v1); |
7218 | if (ret < 0) |
7219 | return ret; |
7220 | } |
7221 | |
7222 | return 0; |
7223 | } |
7224 | |
7225 | /* Power Supply access to expose source power information */ |
7226 | enum tcpm_psy_online_states { |
7227 | TCPM_PSY_OFFLINE = 0, |
7228 | TCPM_PSY_FIXED_ONLINE, |
7229 | TCPM_PSY_PROG_ONLINE, |
7230 | }; |
7231 | |
7232 | static enum power_supply_property tcpm_psy_props[] = { |
7233 | POWER_SUPPLY_PROP_USB_TYPE, |
7234 | POWER_SUPPLY_PROP_ONLINE, |
7235 | POWER_SUPPLY_PROP_VOLTAGE_MIN, |
7236 | POWER_SUPPLY_PROP_VOLTAGE_MAX, |
7237 | POWER_SUPPLY_PROP_VOLTAGE_NOW, |
7238 | POWER_SUPPLY_PROP_CURRENT_MAX, |
7239 | POWER_SUPPLY_PROP_CURRENT_NOW, |
7240 | }; |
7241 | |
7242 | static int tcpm_psy_get_online(struct tcpm_port *port, |
7243 | union power_supply_propval *val) |
7244 | { |
7245 | if (port->vbus_charge) { |
7246 | if (port->pps_data.active) |
7247 | val->intval = TCPM_PSY_PROG_ONLINE; |
7248 | else |
7249 | val->intval = TCPM_PSY_FIXED_ONLINE; |
7250 | } else { |
7251 | val->intval = TCPM_PSY_OFFLINE; |
7252 | } |
7253 | |
7254 | return 0; |
7255 | } |
7256 | |
7257 | static int tcpm_psy_get_voltage_min(struct tcpm_port *port, |
7258 | union power_supply_propval *val) |
7259 | { |
7260 | if (port->pps_data.active) |
7261 | val->intval = port->pps_data.min_volt * 1000; |
7262 | else |
7263 | val->intval = port->supply_voltage * 1000; |
7264 | |
7265 | return 0; |
7266 | } |
7267 | |
7268 | static int tcpm_psy_get_voltage_max(struct tcpm_port *port, |
7269 | union power_supply_propval *val) |
7270 | { |
7271 | if (port->pps_data.active) |
7272 | val->intval = port->pps_data.max_volt * 1000; |
7273 | else |
7274 | val->intval = port->supply_voltage * 1000; |
7275 | |
7276 | return 0; |
7277 | } |
7278 | |
7279 | static int tcpm_psy_get_voltage_now(struct tcpm_port *port, |
7280 | union power_supply_propval *val) |
7281 | { |
7282 | val->intval = port->supply_voltage * 1000; |
7283 | |
7284 | return 0; |
7285 | } |
7286 | |
7287 | static int tcpm_psy_get_current_max(struct tcpm_port *port, |
7288 | union power_supply_propval *val) |
7289 | { |
7290 | if (port->pps_data.active) |
7291 | val->intval = port->pps_data.max_curr * 1000; |
7292 | else |
7293 | val->intval = port->current_limit * 1000; |
7294 | |
7295 | return 0; |
7296 | } |
7297 | |
7298 | static int tcpm_psy_get_current_now(struct tcpm_port *port, |
7299 | union power_supply_propval *val) |
7300 | { |
7301 | val->intval = port->current_limit * 1000; |
7302 | |
7303 | return 0; |
7304 | } |
7305 | |
7306 | static int tcpm_psy_get_input_power_limit(struct tcpm_port *port, |
7307 | union power_supply_propval *val) |
7308 | { |
7309 | unsigned int src_mv, src_ma, max_src_uw = 0; |
7310 | unsigned int i, tmp; |
7311 | |
7312 | for (i = 0; i < port->nr_source_caps; i++) { |
7313 | u32 pdo = port->source_caps[i]; |
7314 | |
7315 | if (pdo_type(pdo) == PDO_TYPE_FIXED) { |
7316 | src_mv = pdo_fixed_voltage(pdo); |
7317 | src_ma = pdo_max_current(pdo); |
7318 | tmp = src_mv * src_ma; |
7319 | max_src_uw = tmp > max_src_uw ? tmp : max_src_uw; |
7320 | } |
7321 | } |
7322 | |
7323 | val->intval = max_src_uw; |
7324 | return 0; |
7325 | } |
7326 | |
7327 | static int tcpm_psy_get_prop(struct power_supply *psy, |
7328 | enum power_supply_property psp, |
7329 | union power_supply_propval *val) |
7330 | { |
7331 | struct tcpm_port *port = power_supply_get_drvdata(psy); |
7332 | int ret = 0; |
7333 | |
7334 | switch (psp) { |
7335 | case POWER_SUPPLY_PROP_USB_TYPE: |
7336 | val->intval = port->usb_type; |
7337 | break; |
7338 | case POWER_SUPPLY_PROP_ONLINE: |
7339 | ret = tcpm_psy_get_online(port, val); |
7340 | break; |
7341 | case POWER_SUPPLY_PROP_VOLTAGE_MIN: |
7342 | ret = tcpm_psy_get_voltage_min(port, val); |
7343 | break; |
7344 | case POWER_SUPPLY_PROP_VOLTAGE_MAX: |
7345 | ret = tcpm_psy_get_voltage_max(port, val); |
7346 | break; |
7347 | case POWER_SUPPLY_PROP_VOLTAGE_NOW: |
7348 | ret = tcpm_psy_get_voltage_now(port, val); |
7349 | break; |
7350 | case POWER_SUPPLY_PROP_CURRENT_MAX: |
7351 | ret = tcpm_psy_get_current_max(port, val); |
7352 | break; |
7353 | case POWER_SUPPLY_PROP_CURRENT_NOW: |
7354 | ret = tcpm_psy_get_current_now(port, val); |
7355 | break; |
7356 | case POWER_SUPPLY_PROP_INPUT_POWER_LIMIT: |
7357 | tcpm_psy_get_input_power_limit(port, val); |
7358 | break; |
7359 | default: |
7360 | ret = -EINVAL; |
7361 | break; |
7362 | } |
7363 | |
7364 | return ret; |
7365 | } |
7366 | |
7367 | static int tcpm_psy_set_online(struct tcpm_port *port, |
7368 | const union power_supply_propval *val) |
7369 | { |
7370 | int ret; |
7371 | |
7372 | switch (val->intval) { |
7373 | case TCPM_PSY_FIXED_ONLINE: |
7374 | ret = tcpm_pps_activate(port, activate: false); |
7375 | break; |
7376 | case TCPM_PSY_PROG_ONLINE: |
7377 | ret = tcpm_pps_activate(port, activate: true); |
7378 | break; |
7379 | default: |
7380 | ret = -EINVAL; |
7381 | break; |
7382 | } |
7383 | |
7384 | return ret; |
7385 | } |
7386 | |
7387 | static int tcpm_psy_set_prop(struct power_supply *psy, |
7388 | enum power_supply_property psp, |
7389 | const union power_supply_propval *val) |
7390 | { |
7391 | struct tcpm_port *port = power_supply_get_drvdata(psy); |
7392 | int ret; |
7393 | |
7394 | /* |
7395 | * All the properties below are related to USB PD. The check needs to be |
7396 | * property specific when a non-pd related property is added. |
7397 | */ |
7398 | if (!port->pd_supported) |
7399 | return -EOPNOTSUPP; |
7400 | |
7401 | switch (psp) { |
7402 | case POWER_SUPPLY_PROP_ONLINE: |
7403 | ret = tcpm_psy_set_online(port, val); |
7404 | break; |
7405 | case POWER_SUPPLY_PROP_VOLTAGE_NOW: |
7406 | ret = tcpm_pps_set_out_volt(port, req_out_volt: val->intval / 1000); |
7407 | break; |
7408 | case POWER_SUPPLY_PROP_CURRENT_NOW: |
7409 | if (val->intval > port->pps_data.max_curr * 1000) |
7410 | ret = -EINVAL; |
7411 | else |
7412 | ret = tcpm_pps_set_op_curr(port, req_op_curr: val->intval / 1000); |
7413 | break; |
7414 | default: |
7415 | ret = -EINVAL; |
7416 | break; |
7417 | } |
7418 | power_supply_changed(psy: port->psy); |
7419 | return ret; |
7420 | } |
7421 | |
7422 | static int tcpm_psy_prop_writeable(struct power_supply *psy, |
7423 | enum power_supply_property psp) |
7424 | { |
7425 | switch (psp) { |
7426 | case POWER_SUPPLY_PROP_ONLINE: |
7427 | case POWER_SUPPLY_PROP_VOLTAGE_NOW: |
7428 | case POWER_SUPPLY_PROP_CURRENT_NOW: |
7429 | return 1; |
7430 | default: |
7431 | return 0; |
7432 | } |
7433 | } |
7434 | |
7435 | static enum power_supply_usb_type tcpm_psy_usb_types[] = { |
7436 | POWER_SUPPLY_USB_TYPE_C, |
7437 | POWER_SUPPLY_USB_TYPE_PD, |
7438 | POWER_SUPPLY_USB_TYPE_PD_PPS, |
7439 | }; |
7440 | |
7441 | static const char *tcpm_psy_name_prefix = "tcpm-source-psy-" ; |
7442 | |
7443 | static int devm_tcpm_psy_register(struct tcpm_port *port) |
7444 | { |
7445 | struct power_supply_config psy_cfg = {}; |
7446 | const char *port_dev_name = dev_name(dev: port->dev); |
7447 | size_t psy_name_len = strlen(tcpm_psy_name_prefix) + |
7448 | strlen(port_dev_name) + 1; |
7449 | char *psy_name; |
7450 | |
7451 | psy_cfg.drv_data = port; |
7452 | psy_cfg.fwnode = dev_fwnode(port->dev); |
7453 | psy_name = devm_kzalloc(dev: port->dev, size: psy_name_len, GFP_KERNEL); |
7454 | if (!psy_name) |
7455 | return -ENOMEM; |
7456 | |
7457 | snprintf(buf: psy_name, size: psy_name_len, fmt: "%s%s" , tcpm_psy_name_prefix, |
7458 | port_dev_name); |
7459 | port->psy_desc.name = psy_name; |
7460 | port->psy_desc.type = POWER_SUPPLY_TYPE_USB; |
7461 | port->psy_desc.usb_types = tcpm_psy_usb_types; |
7462 | port->psy_desc.num_usb_types = ARRAY_SIZE(tcpm_psy_usb_types); |
7463 | port->psy_desc.properties = tcpm_psy_props; |
7464 | port->psy_desc.num_properties = ARRAY_SIZE(tcpm_psy_props); |
7465 | port->psy_desc.get_property = tcpm_psy_get_prop; |
7466 | port->psy_desc.set_property = tcpm_psy_set_prop; |
7467 | port->psy_desc.property_is_writeable = tcpm_psy_prop_writeable; |
7468 | |
7469 | port->usb_type = POWER_SUPPLY_USB_TYPE_C; |
7470 | |
7471 | port->psy = devm_power_supply_register(parent: port->dev, desc: &port->psy_desc, |
7472 | cfg: &psy_cfg); |
7473 | |
7474 | return PTR_ERR_OR_ZERO(ptr: port->psy); |
7475 | } |
7476 | |
7477 | static enum hrtimer_restart state_machine_timer_handler(struct hrtimer *timer) |
7478 | { |
7479 | struct tcpm_port *port = container_of(timer, struct tcpm_port, state_machine_timer); |
7480 | |
7481 | if (port->registered) |
7482 | kthread_queue_work(worker: port->wq, work: &port->state_machine); |
7483 | return HRTIMER_NORESTART; |
7484 | } |
7485 | |
7486 | static enum hrtimer_restart vdm_state_machine_timer_handler(struct hrtimer *timer) |
7487 | { |
7488 | struct tcpm_port *port = container_of(timer, struct tcpm_port, vdm_state_machine_timer); |
7489 | |
7490 | if (port->registered) |
7491 | kthread_queue_work(worker: port->wq, work: &port->vdm_state_machine); |
7492 | return HRTIMER_NORESTART; |
7493 | } |
7494 | |
7495 | static enum hrtimer_restart enable_frs_timer_handler(struct hrtimer *timer) |
7496 | { |
7497 | struct tcpm_port *port = container_of(timer, struct tcpm_port, enable_frs_timer); |
7498 | |
7499 | if (port->registered) |
7500 | kthread_queue_work(worker: port->wq, work: &port->enable_frs); |
7501 | return HRTIMER_NORESTART; |
7502 | } |
7503 | |
7504 | static enum hrtimer_restart send_discover_timer_handler(struct hrtimer *timer) |
7505 | { |
7506 | struct tcpm_port *port = container_of(timer, struct tcpm_port, send_discover_timer); |
7507 | |
7508 | if (port->registered) |
7509 | kthread_queue_work(worker: port->wq, work: &port->send_discover_work); |
7510 | return HRTIMER_NORESTART; |
7511 | } |
7512 | |
7513 | struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc) |
7514 | { |
7515 | struct tcpm_port *port; |
7516 | int err; |
7517 | |
7518 | if (!dev || !tcpc || |
7519 | !tcpc->get_vbus || !tcpc->set_cc || !tcpc->get_cc || |
7520 | !tcpc->set_polarity || !tcpc->set_vconn || !tcpc->set_vbus || |
7521 | !tcpc->set_pd_rx || !tcpc->set_roles || !tcpc->pd_transmit) |
7522 | return ERR_PTR(error: -EINVAL); |
7523 | |
7524 | port = devm_kzalloc(dev, size: sizeof(*port), GFP_KERNEL); |
7525 | if (!port) |
7526 | return ERR_PTR(error: -ENOMEM); |
7527 | |
7528 | port->dev = dev; |
7529 | port->tcpc = tcpc; |
7530 | |
7531 | mutex_init(&port->lock); |
7532 | mutex_init(&port->swap_lock); |
7533 | |
7534 | port->wq = kthread_create_worker(flags: 0, namefmt: dev_name(dev)); |
7535 | if (IS_ERR(ptr: port->wq)) |
7536 | return ERR_CAST(ptr: port->wq); |
7537 | sched_set_fifo(p: port->wq->task); |
7538 | |
7539 | kthread_init_work(&port->state_machine, tcpm_state_machine_work); |
7540 | kthread_init_work(&port->vdm_state_machine, vdm_state_machine_work); |
7541 | kthread_init_work(&port->event_work, tcpm_pd_event_handler); |
7542 | kthread_init_work(&port->enable_frs, tcpm_enable_frs_work); |
7543 | kthread_init_work(&port->send_discover_work, tcpm_send_discover_work); |
7544 | hrtimer_init(timer: &port->state_machine_timer, CLOCK_MONOTONIC, mode: HRTIMER_MODE_REL); |
7545 | port->state_machine_timer.function = state_machine_timer_handler; |
7546 | hrtimer_init(timer: &port->vdm_state_machine_timer, CLOCK_MONOTONIC, mode: HRTIMER_MODE_REL); |
7547 | port->vdm_state_machine_timer.function = vdm_state_machine_timer_handler; |
7548 | hrtimer_init(timer: &port->enable_frs_timer, CLOCK_MONOTONIC, mode: HRTIMER_MODE_REL); |
7549 | port->enable_frs_timer.function = enable_frs_timer_handler; |
7550 | hrtimer_init(timer: &port->send_discover_timer, CLOCK_MONOTONIC, mode: HRTIMER_MODE_REL); |
7551 | port->send_discover_timer.function = send_discover_timer_handler; |
7552 | |
7553 | spin_lock_init(&port->pd_event_lock); |
7554 | |
7555 | init_completion(x: &port->tx_complete); |
7556 | init_completion(x: &port->swap_complete); |
7557 | init_completion(x: &port->pps_complete); |
7558 | tcpm_debugfs_init(port); |
7559 | |
7560 | err = tcpm_fw_get_caps(port, fwnode: tcpc->fwnode); |
7561 | if (err < 0) |
7562 | goto out_destroy_wq; |
7563 | err = tcpm_fw_get_snk_vdos(port, fwnode: tcpc->fwnode); |
7564 | if (err < 0) |
7565 | goto out_destroy_wq; |
7566 | |
7567 | port->try_role = port->typec_caps.prefer_role; |
7568 | |
7569 | port->typec_caps.revision = 0x0120; /* Type-C spec release 1.2 */ |
7570 | port->typec_caps.pd_revision = 0x0300; /* USB-PD spec release 3.0 */ |
7571 | port->typec_caps.svdm_version = SVDM_VER_2_0; |
7572 | port->typec_caps.driver_data = port; |
7573 | port->typec_caps.ops = &tcpm_ops; |
7574 | port->typec_caps.orientation_aware = 1; |
7575 | |
7576 | port->partner_desc.identity = &port->partner_ident; |
7577 | |
7578 | port->role_sw = usb_role_switch_get(dev: port->dev); |
7579 | if (!port->role_sw) |
7580 | port->role_sw = fwnode_usb_role_switch_get(node: tcpc->fwnode); |
7581 | if (IS_ERR(ptr: port->role_sw)) { |
7582 | err = PTR_ERR(ptr: port->role_sw); |
7583 | goto out_destroy_wq; |
7584 | } |
7585 | |
7586 | err = devm_tcpm_psy_register(port); |
7587 | if (err) |
7588 | goto out_role_sw_put; |
7589 | power_supply_changed(psy: port->psy); |
7590 | |
7591 | err = tcpm_port_register_pd(port); |
7592 | if (err) |
7593 | goto out_role_sw_put; |
7594 | |
7595 | if (port->pds) |
7596 | port->typec_caps.pd = port->pds[0]; |
7597 | |
7598 | port->typec_port = typec_register_port(parent: port->dev, cap: &port->typec_caps); |
7599 | if (IS_ERR(ptr: port->typec_port)) { |
7600 | err = PTR_ERR(ptr: port->typec_port); |
7601 | goto out_unregister_pd; |
7602 | } |
7603 | |
7604 | typec_port_register_altmodes(port: port->typec_port, |
7605 | ops: &tcpm_altmode_ops, drvdata: port, |
7606 | altmodes: port->port_altmode, ALTMODE_DISCOVERY_MAX); |
7607 | typec_port_register_cable_ops(altmodes: port->port_altmode, ARRAY_SIZE(port->port_altmode), |
7608 | ops: &tcpm_cable_ops); |
7609 | port->registered = true; |
7610 | |
7611 | mutex_lock(&port->lock); |
7612 | tcpm_init(port); |
7613 | mutex_unlock(lock: &port->lock); |
7614 | |
7615 | tcpm_log(port, fmt: "%s: registered" , dev_name(dev)); |
7616 | return port; |
7617 | |
7618 | out_unregister_pd: |
7619 | tcpm_port_unregister_pd(port); |
7620 | out_role_sw_put: |
7621 | usb_role_switch_put(sw: port->role_sw); |
7622 | out_destroy_wq: |
7623 | tcpm_debugfs_exit(port); |
7624 | kthread_destroy_worker(worker: port->wq); |
7625 | return ERR_PTR(error: err); |
7626 | } |
7627 | EXPORT_SYMBOL_GPL(tcpm_register_port); |
7628 | |
7629 | void tcpm_unregister_port(struct tcpm_port *port) |
7630 | { |
7631 | int i; |
7632 | |
7633 | port->registered = false; |
7634 | kthread_destroy_worker(worker: port->wq); |
7635 | |
7636 | hrtimer_cancel(timer: &port->send_discover_timer); |
7637 | hrtimer_cancel(timer: &port->enable_frs_timer); |
7638 | hrtimer_cancel(timer: &port->vdm_state_machine_timer); |
7639 | hrtimer_cancel(timer: &port->state_machine_timer); |
7640 | |
7641 | tcpm_reset_port(port); |
7642 | |
7643 | tcpm_port_unregister_pd(port); |
7644 | |
7645 | for (i = 0; i < ARRAY_SIZE(port->port_altmode); i++) |
7646 | typec_unregister_altmode(altmode: port->port_altmode[i]); |
7647 | typec_unregister_port(port: port->typec_port); |
7648 | usb_role_switch_put(sw: port->role_sw); |
7649 | tcpm_debugfs_exit(port); |
7650 | } |
7651 | EXPORT_SYMBOL_GPL(tcpm_unregister_port); |
7652 | |
7653 | MODULE_AUTHOR("Guenter Roeck <groeck@chromium.org>" ); |
7654 | MODULE_DESCRIPTION("USB Type-C Port Manager" ); |
7655 | MODULE_LICENSE("GPL" ); |
7656 | |