1 /*
2 * Copyright (c) 2023 The Chromium OS Authors
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 /**
8 * @file
9 * @brief USB-C Power Policy Engine (PE)
10 *
11 * The information in this file was taken from the USB PD
12 * Specification Revision 3.0, Version 2.0
13 */
14
15 #include <zephyr/kernel.h>
16 #include <zephyr/sys/byteorder.h>
17 #include <zephyr/smf.h>
18 #include <zephyr/usb_c/usbc.h>
19 #include <zephyr/drivers/usb_c/usbc_pd.h>
20
21 #include <zephyr/logging/log.h>
22 LOG_MODULE_DECLARE(usbc_stack, CONFIG_USBC_STACK_LOG_LEVEL);
23
24 #include "usbc_stack.h"
25
26 /**
27 * @brief Initialize the Source Policy Engine layer
28 */
pe_src_init(const struct device * dev)29 void pe_src_init(const struct device *dev)
30 {
31 struct usbc_port_data *data = dev->data;
32 struct policy_engine *pe = data->pe;
33
34 /* Initial role of source is DFP */
35 pe_set_data_role(dev, TC_ROLE_DFP);
36
37 /* Reject Sink Request by default */
38 pe->snk_request_reply = SNK_REQUEST_REJECT;
39
40 /* Initialize timers */
41 usbc_timer_init(&pe->pd_t_typec_send_source_cap, PD_T_TYPEC_SEND_SOURCE_CAP_MIN_MS);
42 usbc_timer_init(&pe->pd_t_ps_hard_reset, PD_T_PS_HARD_RESET_MAX_MS);
43
44 /* Goto startup state */
45 pe_set_state(dev, PE_SRC_STARTUP);
46 }
47
48 /**
49 * @brief Handle source-specific DPM requests
50 */
source_dpm_requests(const struct device * dev)51 bool source_dpm_requests(const struct device *dev)
52 {
53 struct usbc_port_data *data = dev->data;
54 struct policy_engine *pe = data->pe;
55
56 if (pe->dpm_request == REQUEST_GET_SNK_CAPS) {
57 atomic_set_bit(pe->flags, PE_FLAGS_DPM_INITIATED_AMS);
58 pe_set_state(dev, PE_GET_SINK_CAP);
59 return true;
60 } else if (pe->dpm_request == REQUEST_PE_GOTO_MIN) {
61 atomic_set_bit(pe->flags, PE_FLAGS_DPM_INITIATED_AMS);
62 pe_set_state(dev, PE_SRC_TRANSITION_SUPPLY);
63 return true;
64 }
65
66 return false;
67 }
68
69 /**
70 * @brief Send Source Caps to Sink
71 */
send_src_caps(struct policy_engine * pe)72 static void send_src_caps(struct policy_engine *pe)
73 {
74 const struct device *dev = pe->dev;
75 struct usbc_port_data *data = dev->data;
76 struct protocol_layer_tx_t *prl_tx = data->prl_tx;
77 struct pd_msg *msg = &prl_tx->emsg;
78 const uint32_t *pdos;
79 uint32_t num_pdos = 0;
80
81 /* This callback must be implemented */
82 __ASSERT(data->policy_cb_get_src_caps != NULL, "Callback pointer should not be NULL");
83
84 data->policy_cb_get_src_caps(dev, &pdos, &num_pdos);
85
86 msg->len = PD_CONVERT_PD_HEADER_COUNT_TO_BYTES(num_pdos);
87 memcpy(msg->data, pdos, msg->len);
88 pe_send_data_msg(dev, PD_PACKET_SOP, PD_DATA_SOURCE_CAP);
89 }
90
91 /**
92 * @brief 8.3.3.2.1 PE_SRC_Startup State
93 */
pe_src_startup_entry(void * obj)94 void pe_src_startup_entry(void *obj)
95 {
96 struct policy_engine *pe = (struct policy_engine *)obj;
97 const struct device *dev = pe->dev;
98
99 LOG_INF("PE_SRC_Startup");
100
101 /* Reset CapsCounter */
102 pe->caps_counter = 0;
103
104 /* Reset the protocol layer */
105 prl_reset(dev);
106
107 /* Set power role to Source */
108 pe->power_role = TC_ROLE_SOURCE;
109
110 /* Invalidate explicit contract */
111 atomic_clear_bit(pe->flags, PE_FLAGS_EXPLICIT_CONTRACT);
112
113 policy_notify(dev, NOT_PD_CONNECTED);
114 }
115
pe_src_startup_run(void * obj)116 enum smf_state_result pe_src_startup_run(void *obj)
117 {
118 struct policy_engine *pe = (struct policy_engine *)obj;
119 const struct device *dev = pe->dev;
120
121 /*
122 * Once the reset process completes, the Policy Engine Shall
123 * transition to the PE_SRC_Send_Capabilities state
124 */
125 if (prl_is_running(dev)) {
126 pe_set_state(dev, PE_SRC_SEND_CAPABILITIES);
127 }
128 return SMF_EVENT_PROPAGATE;
129 }
130
131 /**
132 * @brief 8.3.3.2.2 PE_SRC_Discovery State
133 */
pe_src_discovery_entry(void * obj)134 void pe_src_discovery_entry(void *obj)
135 {
136 struct policy_engine *pe = (struct policy_engine *)obj;
137
138 LOG_INF("PE_SRC_Discovery");
139
140 /*
141 * Start the SourceCapabilityTimer in order to trigger sending a
142 * Source_Capabilities message
143 */
144 usbc_timer_start(&pe->pd_t_typec_send_source_cap);
145 }
146
pe_src_discovery_run(void * obj)147 enum smf_state_result pe_src_discovery_run(void *obj)
148 {
149 struct policy_engine *pe = (struct policy_engine *)obj;
150 const struct device *dev = pe->dev;
151
152 /*
153 * The Policy Engine Shall transition to the PE_SRC_Send_Capabilities state when:
154 * 1) The SourceCapabilityTimer times out
155 * 2) And CapsCounter ≤ nCapsCount
156 */
157 if (usbc_timer_expired(&pe->pd_t_typec_send_source_cap)) {
158 if (pe->caps_counter <= PD_N_CAPS_COUNT) {
159 pe_set_state(dev, PE_SRC_SEND_CAPABILITIES);
160 } else {
161 pe_set_state(dev, PE_SRC_DISABLED);
162 }
163 }
164 return SMF_EVENT_PROPAGATE;
165 }
166
pe_src_discovery_exit(void * obj)167 void pe_src_discovery_exit(void *obj)
168 {
169 struct policy_engine *pe = (struct policy_engine *)obj;
170
171 usbc_timer_stop(&pe->pd_t_typec_send_source_cap);
172 }
173
174 /**
175 * @brief 8.3.3.2.3 PE_SRC_Send_Capabilities State
176 */
pe_src_send_capabilities_entry(void * obj)177 void pe_src_send_capabilities_entry(void *obj)
178 {
179 struct policy_engine *pe = (struct policy_engine *)obj;
180
181 /* Request present source capabilities from Device Policy Manager */
182 send_src_caps(pe);
183 /* Increment CapsCounter */
184 pe->caps_counter++;
185 /* Init submachine */
186 pe->submachine = SM_WAIT_FOR_TX;
187
188 LOG_INF("PE_SRC_Send_Capabilities");
189 }
190
pe_src_send_capabilities_run(void * obj)191 enum smf_state_result pe_src_send_capabilities_run(void *obj)
192 {
193 struct policy_engine *pe = (struct policy_engine *)obj;
194 const struct device *dev = pe->dev;
195 struct usbc_port_data *data = dev->data;
196 struct protocol_layer_rx_t *prl_rx = data->prl_rx;
197
198 switch (pe->submachine) {
199 case SM_WAIT_FOR_TX:
200 /*
201 * When message is sent, the Policy Engine Shall:
202 * 1) Stop the NoResponseTimer .
203 * 2) Reset the HardResetCounter and CapsCounter to zero.
204 * 3) Initialize and run the SenderResponseTimer
205 */
206 if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_TX_COMPLETE)) {
207 usbc_timer_stop(&pe->pd_t_no_response);
208 pe->hard_reset_counter = 0;
209 pe->caps_counter = 0;
210 pe->submachine = SM_WAIT_FOR_RX;
211 }
212 /*
213 * The Policy Engine Shall transition to the PE_SRC_Discovery
214 * state when:
215 * 1) The Protocol Layer indicates that the Message has
216 * not been sent
217 * 2) And we are presently not Connected.
218 */
219 else if ((atomic_test_and_clear_bit(pe->flags, PE_FLAGS_MSG_XMIT_ERROR) ||
220 atomic_test_and_clear_bit(pe->flags, PE_FLAGS_MSG_DISCARDED)) &&
221 (atomic_test_bit(pe->flags, PE_FLAGS_PD_CONNECTED) == false)) {
222 pe_set_state(dev, PE_SRC_DISCOVERY);
223 }
224 break;
225 case SM_WAIT_FOR_RX:
226 /*
227 * The Policy Engine Shall transition to the PE_SRC_Negotiate_Capability state when:
228 * 1) A Request Message is received from the Sink.
229 */
230 if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_MSG_RECEIVED)) {
231 union pd_header header = prl_rx->emsg.header;
232
233 if (received_data_message(dev, header, PD_DATA_REQUEST)) {
234 /* Set to highest revision supported by both ports */
235 prl_set_rev(dev, PD_PACKET_SOP,
236 MIN(PD_REV30, header.specification_revision));
237 pe_set_state(dev, PE_SRC_NEGOTIATE_CAPABILITY);
238 }
239 }
240 /*
241 * The Policy Engine Shall transition to the PE_SRC_Hard_Reset
242 * state when:
243 * 1) The SenderResponseTimer times out
244 */
245 else if (usbc_timer_expired(&pe->pd_t_sender_response)) {
246 pe_set_state(dev, PE_SRC_HARD_RESET);
247 }
248 break;
249 }
250 return SMF_EVENT_PROPAGATE;
251 }
252
253 /**
254 * @brief 8.3.3.2.4 PE_SRC_Negotiate_Capability State
255 */
pe_src_negotiate_capability_entry(void * obj)256 void pe_src_negotiate_capability_entry(void *obj)
257 {
258 struct policy_engine *pe = (struct policy_engine *)obj;
259 const struct device *dev = pe->dev;
260 struct usbc_port_data *data = dev->data;
261 struct protocol_layer_rx_t *prl_rx = data->prl_rx;
262
263 LOG_INF("PE_SRC_Negotiate_Capability");
264
265 /* Get sink request */
266 pe->snk_request = *(uint32_t *)prl_rx->emsg.data;
267
268 /*
269 * Ask the Device Policy Manager to evaluate the Request
270 * from the Attached Sink.
271 */
272 pe->snk_request_reply = policy_check_sink_request(dev, pe->snk_request);
273
274 /*
275 * The Policy Engine Shall transition to the
276 * PE_SRC_Transition_Supply state when:
277 * 1) The Request can be met.
278 */
279 if (pe->snk_request_reply == SNK_REQUEST_VALID) {
280 pe_set_state(dev, PE_SRC_TRANSITION_SUPPLY);
281 }
282 /*
283 * The Policy Engine Shall transition to the
284 * PE_SRC_Capability_Response state when:
285 * 1) The Request cannot be met.
286 * 2) Or the Request can be met later from the Power Reserve.
287 */
288 else {
289 pe_set_state(dev, PE_SRC_CAPABILITY_RESPONSE);
290 }
291 }
292
293 /**
294 * @brief 8.3.3.2.5 PE_SRC_Transition_Supply State
295 */
pe_src_transition_supply_entry(void * obj)296 void pe_src_transition_supply_entry(void *obj)
297 {
298 struct policy_engine *pe = (struct policy_engine *)obj;
299 const struct device *dev = pe->dev;
300
301 LOG_INF("PE_SRC_Transition_Supply");
302
303 /*
304 * If snk_request_reply is set, this state was entered
305 * from PE_SRC_Negotiate_Capability. So send Accept Message
306 * and inform the Device Policy Manager that it Shall transition
307 * the power supply to the Requested power level.
308 */
309 if (pe->snk_request_reply == SNK_REQUEST_VALID) {
310 pe_send_ctrl_msg(dev, PD_PACKET_SOP, PD_CTRL_ACCEPT);
311 policy_notify(dev, TRANSITION_PS);
312 }
313 /*
314 * If snk_request_reply is not valid, this state was entered
315 * from PE_SRC_Ready. So send GotoMin Message.
316 */
317 else {
318 pe_send_ctrl_msg(dev, PD_PACKET_SOP, PD_CTRL_GOTO_MIN);
319 }
320 }
321
pe_src_transition_supply_run(void * obj)322 enum smf_state_result pe_src_transition_supply_run(void *obj)
323 {
324 struct policy_engine *pe = (struct policy_engine *)obj;
325 const struct device *dev = pe->dev;
326
327 /*
328 * The Policy Engine Shall transition to the PE_SRC_Ready state when:
329 * 1) The Device Policy Manager informs the Policy Engine that
330 * the power supply is ready.
331 */
332 if (atomic_test_bit(pe->flags, PE_FLAGS_TX_COMPLETE)) {
333 if (policy_is_ps_ready(dev)) {
334 pe_set_state(dev, PE_SRC_READY);
335 }
336 }
337 /*
338 * The Policy Engine Shall transition to the PE_SRC_Hard_Reset
339 * state when:
340 * 1) A Protocol Error occurs.
341 */
342 else if (atomic_test_bit(pe->flags, PE_FLAGS_PROTOCOL_ERROR)) {
343 pe_set_state(dev, PE_SRC_HARD_RESET);
344 }
345 return SMF_EVENT_PROPAGATE;
346 }
347
pe_src_transition_supply_exit(void * obj)348 void pe_src_transition_supply_exit(void *obj)
349 {
350 struct policy_engine *pe = (struct policy_engine *)obj;
351 const struct device *dev = pe->dev;
352
353 /* Send PS_RDY message */
354 if (pe->snk_request_reply == SNK_REQUEST_VALID) {
355 /* Clear request reply and reject by default */
356 pe->snk_request_reply = SNK_REQUEST_REJECT;
357 /* Send PS Ready */
358 pe_send_ctrl_msg(dev, PD_PACKET_SOP, PD_CTRL_PS_RDY);
359 /* Explicit Contract is now in place */
360 atomic_set_bit(pe->flags, PE_FLAGS_EXPLICIT_CONTRACT);
361 /* Update present contract */
362 pe->present_contract = pe->snk_request;
363 }
364 }
365
366 /**
367 * @brief 8.3.3.2.6 PE_SRC_Ready State
368 */
pe_src_ready_entry(void * obj)369 void pe_src_ready_entry(void *obj)
370 {
371 struct policy_engine *pe = (struct policy_engine *)obj;
372 const struct device *dev = pe->dev;
373
374 LOG_INF("PE_SRC_Ready");
375
376 /*
377 * If the transition into PE_SRC_Ready is the result of Protocol Error
378 * that has not caused a Soft Reset then the notification to the
379 * Protocol Layer of the end of the AMS Shall Not be sent since there
380 * is a Message to be processed.
381 *
382 * Else on entry to the PE_SRC_Ready state the Source Shall notify the
383 * Protocol Layer of the end of the Atomic Message Sequence (AMS).
384 */
385 if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_PROTOCOL_ERROR_NO_SOFT_RESET)) {
386 pe_dpm_end_ams(dev);
387 }
388 }
389
pe_src_ready_run(void * obj)390 enum smf_state_result pe_src_ready_run(void *obj)
391 {
392 struct policy_engine *pe = (struct policy_engine *)obj;
393 const struct device *dev = pe->dev;
394 struct usbc_port_data *data = dev->data;
395 struct protocol_layer_rx_t *prl_rx = data->prl_rx;
396
397 /* Handle incoming messages */
398 if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_MSG_RECEIVED)) {
399 union pd_header header = prl_rx->emsg.header;
400
401 /*
402 * Extended Message Requests
403 */
404 if (header.extended) {
405 extended_message_not_supported(dev);
406 }
407 /*
408 * Data Message Requests
409 */
410 else if (header.number_of_data_objects > 0) {
411 switch (header.message_type) {
412 case PD_DATA_REQUEST:
413 pe_set_state(dev, PE_SRC_NEGOTIATE_CAPABILITY);
414 break;
415 case PD_DATA_VENDOR_DEF:
416 /**
417 * VDM is unsupported. PD2.0 ignores and PD3.0
418 * reply with not supported.
419 */
420 if (prl_get_rev(dev, PD_PACKET_SOP) > PD_REV20) {
421 pe_set_state(dev, PE_SEND_NOT_SUPPORTED);
422 }
423 break;
424 default:
425 pe_set_state(dev, PE_SEND_NOT_SUPPORTED);
426 }
427 }
428 /*
429 * Control Message Requests
430 */
431 else {
432 switch (header.message_type) {
433 case PD_CTRL_GOOD_CRC:
434 /* Do nothing */
435 break;
436 case PD_CTRL_NOT_SUPPORTED:
437 /* Notify DPM */
438 policy_notify(dev, MSG_NOT_SUPPORTED_RECEIVED);
439 break;
440 case PD_CTRL_PING:
441 /* Do nothing */
442 break;
443 case PD_CTRL_GET_SOURCE_CAP:
444 pe_set_state(dev, PE_SRC_SEND_CAPABILITIES);
445 break;
446 case PD_CTRL_DR_SWAP:
447 pe_set_state(dev, PE_DRS_EVALUATE_SWAP);
448 break;
449 /*
450 * USB PD 3.0 6.8.1:
451 * Receiving an unexpected message shall be responded
452 * to with a soft reset message.
453 */
454 case PD_CTRL_ACCEPT:
455 case PD_CTRL_REJECT:
456 case PD_CTRL_WAIT:
457 case PD_CTRL_PS_RDY:
458 pe_send_soft_reset(dev, prl_rx->emsg.type);
459 break;
460 /*
461 * Receiving an unknown or unsupported message
462 * shall be responded to with a not supported
463 * message.
464 */
465 default:
466 pe_set_state(dev, PE_SEND_NOT_SUPPORTED);
467 break;
468 }
469 }
470 } else {
471 /* Handle Source DPManager Requests */
472 source_dpm_requests(dev);
473 }
474 return SMF_EVENT_PROPAGATE;
475 }
476
pe_src_ready_exit(void * obj)477 void pe_src_ready_exit(void *obj)
478 {
479 struct policy_engine *pe = (struct policy_engine *)obj;
480 const struct device *dev = pe->dev;
481
482 /*
483 * If the Source is initiating an AMS, then notify the
484 * PRL that the first message in an AMS will follow.
485 */
486 if (pe_dpm_initiated_ams(dev)) {
487 prl_first_msg_notificaiton(dev);
488 }
489 }
490
491 /**
492 * @brief 8.3.3.2.7 PE_SRC_Disabled State
493 */
pe_src_disabled_entry(void * obj)494 void pe_src_disabled_entry(void *obj)
495 {
496 LOG_INF("PE_SRC_Disabled");
497
498 /*
499 * Unresponsive to USB Power Delivery messaging, but not to Hard Reset
500 * Signaling. See pe_got_hard_reset
501 */
502 }
503
504 /**
505 * @brief 8.3.3.2.11 PE_SRC_Transition_to_default State
506 */
pe_src_transition_to_default_entry(void * obj)507 void pe_src_transition_to_default_entry(void *obj)
508 {
509 struct policy_engine *pe = (struct policy_engine *)obj;
510 const struct device *dev = pe->dev;
511
512 /*
513 * On entry to the PE_SRC_Transition_to_default state the
514 * Policy Engine Shall:
515 * 1: indicate to the Device Policy Manager that the power
516 * supply Shall Hard Reset
517 * 2: request a reset of the local hardware
518 * 3: request the Device Policy Manager to set the Port
519 * Data Role to DFP and turn off VCONN.
520 *
521 * NOTE: 1, 2 and VCONN off are done by Device Policy Manager when
522 * it receives the HARD_RESET_RECEIVED notification.
523 */
524 policy_notify(dev, HARD_RESET_RECEIVED);
525 pe->data_role = TC_ROLE_DFP;
526 policy_notify(dev, DATA_ROLE_IS_DFP);
527 }
528
pe_src_transition_to_default_run(void * obj)529 enum smf_state_result pe_src_transition_to_default_run(void *obj)
530 {
531 struct policy_engine *pe = (struct policy_engine *)obj;
532 const struct device *dev = pe->dev;
533
534 /*
535 * The Policy Engine Shall transition to the PE_SRC_Startup
536 * state when:
537 * 1: The Device Policy Manager indicates that the power
538 * supply has reached the default level.
539 */
540 if (policy_check(dev, CHECK_SRC_PS_AT_DEFAULT_LEVEL)) {
541 pe_set_state(dev, PE_SRC_STARTUP);
542 }
543 return SMF_EVENT_PROPAGATE;
544 }
545
pe_src_transition_to_default_exit(void * obj)546 void pe_src_transition_to_default_exit(void *obj)
547 {
548 struct policy_engine *pe = (struct policy_engine *)obj;
549 const struct device *dev = pe->dev;
550
551 /*
552 * On exit from the PE_SRC_Transition_to_default state the
553 * Policy Engine Shall:
554 * 1: request the Device Policy Manager to turn on VCONN
555 * 2: inform the Protocol Layer that the Hard Reset is complete.
556 *
557 * NOTE: The Device Policy Manager turns on VCONN when it notifies the
558 * PE that the Power Supply is at the default level.
559 */
560 prl_hard_reset_complete(dev);
561 }
562
563 /**
564 * 8.3.3.2.8 PE_SRC_Capability_Response State
565 */
pe_src_capability_response_entry(void * obj)566 void pe_src_capability_response_entry(void *obj)
567 {
568 struct policy_engine *pe = (struct policy_engine *)obj;
569 const struct device *dev = pe->dev;
570
571 /*
572 * On entry to the PE_SRC_Capability_Response state the Policy Engine
573 * Shall request the Protocol Layer to send one of the following:
574 */
575
576 /*
577 * 1: Reject Message – if the request cannot be met or the present
578 * Contract is Invalid.
579 */
580 if (pe->snk_request_reply == SNK_REQUEST_REJECT) {
581 pe_send_ctrl_msg(dev, PD_PACKET_SOP, PD_CTRL_REJECT);
582 }
583 /*
584 * 2: Wait Message – if the request could be met later from the Power
585 * Reserve. A Wait Message Shall Not be sent if the present Contract
586 * is Invalid.
587 */
588 else {
589 pe_send_ctrl_msg(dev, PD_PACKET_SOP, PD_CTRL_WAIT);
590 }
591 }
592
pe_src_capability_response_run(void * obj)593 enum smf_state_result pe_src_capability_response_run(void *obj)
594 {
595 struct policy_engine *pe = (struct policy_engine *)obj;
596 const struct device *dev = pe->dev;
597
598 /* Wait until message has been sent */
599 if (!atomic_test_and_clear_bit(pe->flags, PE_FLAGS_TX_COMPLETE)) {
600 return SMF_EVENT_PROPAGATE;
601 }
602
603 /*
604 * The Policy Engine Shall transition to the PE_SRC_Ready state when:
605 * 1: There is an Explicit Contract AND
606 * 2: A Reject Message has been sent and the present Contract
607 * is still Valid OR
608 * 3: A Wait Message has been sent.
609 */
610 if (atomic_test_bit(pe->flags, PE_FLAGS_EXPLICIT_CONTRACT) &&
611 ((pe->snk_request_reply == SNK_REQUEST_REJECT &&
612 policy_present_contract_is_valid(dev, pe->present_contract)) ||
613 (pe->snk_request_reply == SNK_REQUEST_WAIT))) {
614 pe_set_state(dev, PE_SRC_READY);
615 }
616 /*
617 * The Policy Engine Shall transition to the PE_SRC_Hard_Reset state
618 * when:
619 * 1: There is an Explicit Contract and
620 * 2: The Reject Message has been sent and the present Contract
621 * is Invalid
622 */
623 else if (atomic_test_bit(pe->flags, PE_FLAGS_EXPLICIT_CONTRACT) &&
624 policy_present_contract_is_valid(dev, pe->present_contract) == false) {
625 pe_set_state(dev, PE_SRC_HARD_RESET);
626 }
627 /*
628 * The Policy Engine Shall transition to the PE_SRC_Wait_New_Capabilities
629 * state when:
630 * 1: There is no Explicit Contract and
631 * 2: A Reject Message has been sent or
632 * 3: A Wait Message has been sent.
633 */
634 else {
635 /* 8.3.3.2.13 PE_SRC_Wait_New_Capabilities embedded here */
636
637 /*
638 * In the PE_SRC_Wait_New_Capabilities State the Device Policy Manager
639 * Should either decide to send no further Source Capabilities or
640 * Should send a different set of Source Capabilities. Continuing
641 * to send the same set of Source Capabilities could result in a live
642 * lock situation.
643 */
644
645 /* Notify DPM to send a different set of Source Capabilities */
646 if (policy_change_src_caps(dev)) {
647 /* DPM will send different set of Source Capabilities */
648 pe_set_state(dev, PE_SRC_SEND_CAPABILITIES);
649 } else {
650 /*
651 * DPM can not send a different set of Source
652 * Capabilities, so disable port.
653 */
654 pe_set_state(dev, PE_SUSPEND);
655 }
656 }
657 return SMF_EVENT_PROPAGATE;
658 }
659
pe_src_hard_reset_parent_entry(void * obj)660 void pe_src_hard_reset_parent_entry(void *obj)
661 {
662 struct policy_engine *pe = (struct policy_engine *)obj;
663
664 pe->submachine = SM_HARD_RESET_START;
665 }
666
pe_src_hard_reset_parent_run(void * obj)667 enum smf_state_result pe_src_hard_reset_parent_run(void *obj)
668 {
669 struct policy_engine *pe = (struct policy_engine *)obj;
670 const struct device *dev = pe->dev;
671
672 switch (pe->submachine) {
673 case SM_HARD_RESET_START:
674 /*
675 * Initialize and run the NoResponseTimer.
676 * Note that the NoResponseTimer Shall continue to run
677 * in every state until it is stopped or times out.
678 */
679 usbc_timer_start(&pe->pd_t_no_response);
680
681 /* Initialize and run the PSHardResetTimer */
682 usbc_timer_start(&pe->pd_t_ps_hard_reset);
683
684 pe->submachine = SM_HARD_RESET_WAIT;
685 break;
686 case SM_HARD_RESET_WAIT:
687 /*
688 * The Policy Engine Shall transition to the
689 * PE_SRC_Transition_to_default state when:
690 * The PSHardResetTimer times out.
691 */
692 if (usbc_timer_expired(&pe->pd_t_ps_hard_reset)) {
693 pe_set_state(dev, PE_SRC_TRANSITION_TO_DEFAULT);
694 }
695 break;
696 }
697 return SMF_EVENT_PROPAGATE;
698 }
699
pe_src_hard_reset_parent_exit(void * obj)700 void pe_src_hard_reset_parent_exit(void *obj)
701 {
702 struct policy_engine *pe = (struct policy_engine *)obj;
703
704 /* Stop the Hard Reset Timer */
705 usbc_timer_stop(&pe->pd_t_ps_hard_reset);
706 }
707
708 /**
709 * @brief 8.3.3.2.9 PE_SRC_Hard_Reset State
710 */
pe_src_hard_reset_entry(void * obj)711 void pe_src_hard_reset_entry(void *obj)
712 {
713 struct policy_engine *pe = (struct policy_engine *)obj;
714 const struct device *dev = pe->dev;
715
716 /*
717 * On entry to the PE_SRC_Hard_Reset state the
718 * Policy Engine Shall:
719 */
720
721 /*
722 * Request the generation of Hard Reset Signaling by
723 * the PHY Layer
724 */
725 prl_execute_hard_reset(dev);
726 }
727