Lines Matching refs:dev
59 static inline u32 mei_me_mecbrw_read(const struct mei_device *dev) in mei_me_mecbrw_read() argument
61 return mei_me_reg_read(to_me_hw(dev), ME_CB_RW); in mei_me_mecbrw_read()
70 static inline void mei_me_hcbww_write(struct mei_device *dev, u32 data) in mei_me_hcbww_write() argument
72 mei_me_reg_write(to_me_hw(dev), H_CB_WW, data); in mei_me_hcbww_write()
82 static inline u32 mei_me_mecsr_read(const struct mei_device *dev) in mei_me_mecsr_read() argument
86 reg = mei_me_reg_read(to_me_hw(dev), ME_CSR_HA); in mei_me_mecsr_read()
87 trace_mei_reg_read(dev->dev, "ME_CSR_HA", ME_CSR_HA, reg); in mei_me_mecsr_read()
99 static inline u32 mei_hcsr_read(const struct mei_device *dev) in mei_hcsr_read() argument
103 reg = mei_me_reg_read(to_me_hw(dev), H_CSR); in mei_hcsr_read()
104 trace_mei_reg_read(dev->dev, "H_CSR", H_CSR, reg); in mei_hcsr_read()
115 static inline void mei_hcsr_write(struct mei_device *dev, u32 reg) in mei_hcsr_write() argument
117 trace_mei_reg_write(dev->dev, "H_CSR", H_CSR, reg); in mei_hcsr_write()
118 mei_me_reg_write(to_me_hw(dev), H_CSR, reg); in mei_hcsr_write()
128 static inline void mei_hcsr_set(struct mei_device *dev, u32 reg) in mei_hcsr_set() argument
131 mei_hcsr_write(dev, reg); in mei_hcsr_set()
139 static inline void mei_hcsr_set_hig(struct mei_device *dev) in mei_hcsr_set_hig() argument
143 hcsr = mei_hcsr_read(dev) | H_IG; in mei_hcsr_set_hig()
144 mei_hcsr_set(dev, hcsr); in mei_hcsr_set_hig()
154 static inline u32 mei_me_d0i3c_read(const struct mei_device *dev) in mei_me_d0i3c_read() argument
158 reg = mei_me_reg_read(to_me_hw(dev), H_D0I3C); in mei_me_d0i3c_read()
159 trace_mei_reg_read(dev->dev, "H_D0I3C", H_D0I3C, reg); in mei_me_d0i3c_read()
170 static inline void mei_me_d0i3c_write(struct mei_device *dev, u32 reg) in mei_me_d0i3c_write() argument
172 trace_mei_reg_write(dev->dev, "H_D0I3C", H_D0I3C, reg); in mei_me_d0i3c_write()
173 mei_me_reg_write(to_me_hw(dev), H_D0I3C, reg); in mei_me_d0i3c_write()
184 static int mei_me_trc_status(struct mei_device *dev, u32 *trc) in mei_me_trc_status() argument
186 struct mei_me_hw *hw = to_me_hw(dev); in mei_me_trc_status()
192 trace_mei_reg_read(dev->dev, "ME_TRC", ME_TRC, *trc); in mei_me_trc_status()
205 static int mei_me_fw_status(struct mei_device *dev, in mei_me_fw_status() argument
208 struct mei_me_hw *hw = to_me_hw(dev); in mei_me_fw_status()
218 ret = hw->read_fws(dev, fw_src->status[i], in mei_me_fw_status()
220 trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HFS_X", in mei_me_fw_status()
240 static int mei_me_hw_config(struct mei_device *dev) in mei_me_hw_config() argument
242 struct mei_me_hw *hw = to_me_hw(dev); in mei_me_hw_config()
249 hcsr = mei_hcsr_read(dev); in mei_me_hw_config()
253 hw->read_fws(dev, PCI_CFG_HFS_1, ®); in mei_me_hw_config()
254 trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg); in mei_me_hw_config()
260 reg = mei_me_d0i3c_read(dev); in mei_me_hw_config()
276 static inline enum mei_pg_state mei_me_pg_state(struct mei_device *dev) in mei_me_pg_state() argument
278 struct mei_me_hw *hw = to_me_hw(dev); in mei_me_pg_state()
295 static inline void me_intr_disable(struct mei_device *dev, u32 hcsr) in me_intr_disable() argument
298 mei_hcsr_set(dev, hcsr); in me_intr_disable()
307 static inline void me_intr_clear(struct mei_device *dev, u32 hcsr) in me_intr_clear() argument
310 mei_hcsr_write(dev, hcsr); in me_intr_clear()
318 static void mei_me_intr_clear(struct mei_device *dev) in mei_me_intr_clear() argument
320 u32 hcsr = mei_hcsr_read(dev); in mei_me_intr_clear()
322 me_intr_clear(dev, hcsr); in mei_me_intr_clear()
329 static void mei_me_intr_enable(struct mei_device *dev) in mei_me_intr_enable() argument
333 if (mei_me_hw_use_polling(to_me_hw(dev))) in mei_me_intr_enable()
336 hcsr = mei_hcsr_read(dev) | H_CSR_IE_MASK; in mei_me_intr_enable()
337 mei_hcsr_set(dev, hcsr); in mei_me_intr_enable()
345 static void mei_me_intr_disable(struct mei_device *dev) in mei_me_intr_disable() argument
347 u32 hcsr = mei_hcsr_read(dev); in mei_me_intr_disable()
349 me_intr_disable(dev, hcsr); in mei_me_intr_disable()
357 static void mei_me_synchronize_irq(struct mei_device *dev) in mei_me_synchronize_irq() argument
359 struct mei_me_hw *hw = to_me_hw(dev); in mei_me_synchronize_irq()
372 static void mei_me_hw_reset_release(struct mei_device *dev) in mei_me_hw_reset_release() argument
374 u32 hcsr = mei_hcsr_read(dev); in mei_me_hw_reset_release()
378 mei_hcsr_set(dev, hcsr); in mei_me_hw_reset_release()
386 static void mei_me_host_set_ready(struct mei_device *dev) in mei_me_host_set_ready() argument
388 u32 hcsr = mei_hcsr_read(dev); in mei_me_host_set_ready()
390 if (!mei_me_hw_use_polling(to_me_hw(dev))) in mei_me_host_set_ready()
394 mei_hcsr_set(dev, hcsr); in mei_me_host_set_ready()
403 static bool mei_me_host_is_ready(struct mei_device *dev) in mei_me_host_is_ready() argument
405 u32 hcsr = mei_hcsr_read(dev); in mei_me_host_is_ready()
416 static bool mei_me_hw_is_ready(struct mei_device *dev) in mei_me_hw_is_ready() argument
418 u32 mecsr = mei_me_mecsr_read(dev); in mei_me_hw_is_ready()
429 static bool mei_me_hw_is_resetting(struct mei_device *dev) in mei_me_hw_is_resetting() argument
431 u32 mecsr = mei_me_mecsr_read(dev); in mei_me_hw_is_resetting()
441 static void mei_gsc_pxp_check(struct mei_device *dev) in mei_gsc_pxp_check() argument
443 struct mei_me_hw *hw = to_me_hw(dev); in mei_gsc_pxp_check()
446 if (!kind_is_gsc(dev) && !kind_is_gscfi(dev)) in mei_gsc_pxp_check()
449 hw->read_fws(dev, PCI_CFG_HFS_5, &fwsts5); in mei_gsc_pxp_check()
450 trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HFS_5", PCI_CFG_HFS_5, fwsts5); in mei_gsc_pxp_check()
453 if (dev->gsc_reset_to_pxp == MEI_DEV_RESET_TO_PXP_DEFAULT) in mei_gsc_pxp_check()
454 dev->gsc_reset_to_pxp = MEI_DEV_RESET_TO_PXP_PERFORMED; in mei_gsc_pxp_check()
456 dev->gsc_reset_to_pxp = MEI_DEV_RESET_TO_PXP_DEFAULT; in mei_gsc_pxp_check()
459 if (dev->pxp_mode == MEI_DEV_PXP_DEFAULT) in mei_gsc_pxp_check()
463 dev_dbg(dev->dev, "pxp mode is ready 0x%08x\n", fwsts5); in mei_gsc_pxp_check()
464 dev->pxp_mode = MEI_DEV_PXP_READY; in mei_gsc_pxp_check()
466 dev_dbg(dev->dev, "pxp mode is not ready 0x%08x\n", fwsts5); in mei_gsc_pxp_check()
477 static int mei_me_hw_ready_wait(struct mei_device *dev) in mei_me_hw_ready_wait() argument
479 mutex_unlock(&dev->device_lock); in mei_me_hw_ready_wait()
480 wait_event_timeout(dev->wait_hw_ready, in mei_me_hw_ready_wait()
481 dev->recvd_hw_ready, in mei_me_hw_ready_wait()
482 dev->timeouts.hw_ready); in mei_me_hw_ready_wait()
483 mutex_lock(&dev->device_lock); in mei_me_hw_ready_wait()
484 if (!dev->recvd_hw_ready) { in mei_me_hw_ready_wait()
485 dev_err(dev->dev, "wait hw ready failed\n"); in mei_me_hw_ready_wait()
489 mei_gsc_pxp_check(dev); in mei_me_hw_ready_wait()
491 mei_me_hw_reset_release(dev); in mei_me_hw_ready_wait()
492 dev->recvd_hw_ready = false; in mei_me_hw_ready_wait()
501 static void mei_me_check_fw_reset(struct mei_device *dev) in mei_me_check_fw_reset() argument
508 if (!dev->saved_fw_status_flag) in mei_me_check_fw_reset()
511 if (dev->gsc_reset_to_pxp == MEI_DEV_RESET_TO_PXP_PERFORMED) { in mei_me_check_fw_reset()
512 ret = mei_fw_status(dev, &fw_status); in mei_me_check_fw_reset()
519 dev_err(dev->dev, "failed to read firmware status: %d\n", ret); in mei_me_check_fw_reset()
523 mei_fw_status2str(&dev->saved_fw_status, fw_sts_str, sizeof(fw_sts_str)); in mei_me_check_fw_reset()
524 dev_warn(dev->dev, "unexpected reset: fw_pm_event = 0x%x, dev_state = %u fw status = %s\n", in mei_me_check_fw_reset()
525 fw_pm_event, dev->saved_dev_state, fw_sts_str); in mei_me_check_fw_reset()
528 if (dev->gsc_reset_to_pxp == MEI_DEV_RESET_TO_PXP_PERFORMED) in mei_me_check_fw_reset()
529 dev->gsc_reset_to_pxp = MEI_DEV_RESET_TO_PXP_DONE; in mei_me_check_fw_reset()
530 dev->saved_fw_status_flag = false; in mei_me_check_fw_reset()
539 static int mei_me_hw_start(struct mei_device *dev) in mei_me_hw_start() argument
541 int ret = mei_me_hw_ready_wait(dev); in mei_me_hw_start()
543 if (kind_is_gsc(dev) || kind_is_gscfi(dev)) in mei_me_hw_start()
544 mei_me_check_fw_reset(dev); in mei_me_hw_start()
547 dev_dbg(dev->dev, "hw is ready\n"); in mei_me_hw_start()
549 mei_me_host_set_ready(dev); in mei_me_hw_start()
561 static unsigned char mei_hbuf_filled_slots(struct mei_device *dev) in mei_hbuf_filled_slots() argument
566 hcsr = mei_hcsr_read(dev); in mei_hbuf_filled_slots()
581 static bool mei_me_hbuf_is_empty(struct mei_device *dev) in mei_me_hbuf_is_empty() argument
583 return mei_hbuf_filled_slots(dev) == 0; in mei_me_hbuf_is_empty()
593 static int mei_me_hbuf_empty_slots(struct mei_device *dev) in mei_me_hbuf_empty_slots() argument
595 struct mei_me_hw *hw = to_me_hw(dev); in mei_me_hbuf_empty_slots()
598 filled_slots = mei_hbuf_filled_slots(dev); in mei_me_hbuf_empty_slots()
615 static u32 mei_me_hbuf_depth(const struct mei_device *dev) in mei_me_hbuf_depth() argument
617 struct mei_me_hw *hw = to_me_hw(dev); in mei_me_hbuf_depth()
633 static int mei_me_hbuf_write(struct mei_device *dev, in mei_me_hbuf_write() argument
647 dev_err(dev->dev, "wrong parameters null data with data_len = %zu\n", data_len); in mei_me_hbuf_write()
651 dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM((struct mei_msg_hdr *)hdr)); in mei_me_hbuf_write()
653 empty_slots = mei_hbuf_empty_slots(dev); in mei_me_hbuf_write()
654 dev_dbg(dev->dev, "empty slots = %d.\n", empty_slots); in mei_me_hbuf_write()
665 mei_me_hcbww_write(dev, reg_buf[i]); in mei_me_hbuf_write()
669 mei_me_hcbww_write(dev, reg_buf[i]); in mei_me_hbuf_write()
676 mei_me_hcbww_write(dev, reg); in mei_me_hbuf_write()
679 mei_hcsr_set_hig(dev); in mei_me_hbuf_write()
680 if (!mei_me_hw_is_ready(dev)) in mei_me_hbuf_write()
693 static int mei_me_count_full_read_slots(struct mei_device *dev) in mei_me_count_full_read_slots() argument
699 me_csr = mei_me_mecsr_read(dev); in mei_me_count_full_read_slots()
709 dev_dbg(dev->dev, "filled_slots =%08x\n", filled_slots); in mei_me_count_full_read_slots()
722 static int mei_me_read_slots(struct mei_device *dev, unsigned char *buffer, in mei_me_read_slots() argument
728 *reg_buf++ = mei_me_mecbrw_read(dev); in mei_me_read_slots()
731 u32 reg = mei_me_mecbrw_read(dev); in mei_me_read_slots()
736 mei_hcsr_set_hig(dev); in mei_me_read_slots()
745 static void mei_me_pg_set(struct mei_device *dev) in mei_me_pg_set() argument
747 struct mei_me_hw *hw = to_me_hw(dev); in mei_me_pg_set()
751 trace_mei_reg_read(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg); in mei_me_pg_set()
755 trace_mei_reg_write(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg); in mei_me_pg_set()
764 static void mei_me_pg_unset(struct mei_device *dev) in mei_me_pg_unset() argument
766 struct mei_me_hw *hw = to_me_hw(dev); in mei_me_pg_unset()
770 trace_mei_reg_read(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg); in mei_me_pg_unset()
776 trace_mei_reg_write(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg); in mei_me_pg_unset()
787 static int mei_me_pg_legacy_enter_sync(struct mei_device *dev) in mei_me_pg_legacy_enter_sync() argument
789 struct mei_me_hw *hw = to_me_hw(dev); in mei_me_pg_legacy_enter_sync()
792 dev->pg_event = MEI_PG_EVENT_WAIT; in mei_me_pg_legacy_enter_sync()
794 ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_ENTRY_REQ_CMD); in mei_me_pg_legacy_enter_sync()
798 mutex_unlock(&dev->device_lock); in mei_me_pg_legacy_enter_sync()
799 wait_event_timeout(dev->wait_pg, in mei_me_pg_legacy_enter_sync()
800 dev->pg_event == MEI_PG_EVENT_RECEIVED, in mei_me_pg_legacy_enter_sync()
801 dev->timeouts.pgi); in mei_me_pg_legacy_enter_sync()
802 mutex_lock(&dev->device_lock); in mei_me_pg_legacy_enter_sync()
804 if (dev->pg_event == MEI_PG_EVENT_RECEIVED) { in mei_me_pg_legacy_enter_sync()
805 mei_me_pg_set(dev); in mei_me_pg_legacy_enter_sync()
811 dev->pg_event = MEI_PG_EVENT_IDLE; in mei_me_pg_legacy_enter_sync()
824 static int mei_me_pg_legacy_exit_sync(struct mei_device *dev) in mei_me_pg_legacy_exit_sync() argument
826 struct mei_me_hw *hw = to_me_hw(dev); in mei_me_pg_legacy_exit_sync()
829 if (dev->pg_event == MEI_PG_EVENT_RECEIVED) in mei_me_pg_legacy_exit_sync()
832 dev->pg_event = MEI_PG_EVENT_WAIT; in mei_me_pg_legacy_exit_sync()
834 mei_me_pg_unset(dev); in mei_me_pg_legacy_exit_sync()
836 mutex_unlock(&dev->device_lock); in mei_me_pg_legacy_exit_sync()
837 wait_event_timeout(dev->wait_pg, in mei_me_pg_legacy_exit_sync()
838 dev->pg_event == MEI_PG_EVENT_RECEIVED, in mei_me_pg_legacy_exit_sync()
839 dev->timeouts.pgi); in mei_me_pg_legacy_exit_sync()
840 mutex_lock(&dev->device_lock); in mei_me_pg_legacy_exit_sync()
843 if (dev->pg_event != MEI_PG_EVENT_RECEIVED) { in mei_me_pg_legacy_exit_sync()
848 dev->pg_event = MEI_PG_EVENT_INTR_WAIT; in mei_me_pg_legacy_exit_sync()
849 ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_EXIT_RES_CMD); in mei_me_pg_legacy_exit_sync()
853 mutex_unlock(&dev->device_lock); in mei_me_pg_legacy_exit_sync()
854 wait_event_timeout(dev->wait_pg, in mei_me_pg_legacy_exit_sync()
855 dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, in mei_me_pg_legacy_exit_sync()
856 dev->timeouts.pgi); in mei_me_pg_legacy_exit_sync()
857 mutex_lock(&dev->device_lock); in mei_me_pg_legacy_exit_sync()
859 if (dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED) in mei_me_pg_legacy_exit_sync()
865 dev->pg_event = MEI_PG_EVENT_IDLE; in mei_me_pg_legacy_exit_sync()
878 static bool mei_me_pg_in_transition(struct mei_device *dev) in mei_me_pg_in_transition() argument
880 return dev->pg_event >= MEI_PG_EVENT_WAIT && in mei_me_pg_in_transition()
881 dev->pg_event <= MEI_PG_EVENT_INTR_WAIT; in mei_me_pg_in_transition()
891 static bool mei_me_pg_is_enabled(struct mei_device *dev) in mei_me_pg_is_enabled() argument
893 struct mei_me_hw *hw = to_me_hw(dev); in mei_me_pg_is_enabled()
894 u32 reg = mei_me_mecsr_read(dev); in mei_me_pg_is_enabled()
902 if (!dev->hbm_f_pg_supported) in mei_me_pg_is_enabled()
908 dev_dbg(dev->dev, "pg: not supported: d0i3 = %d HGP = %d hbm version %d.%d ?= %d.%d\n", in mei_me_pg_is_enabled()
911 dev->version.major_version, in mei_me_pg_is_enabled()
912 dev->version.minor_version, in mei_me_pg_is_enabled()
927 static u32 mei_me_d0i3_set(struct mei_device *dev, bool intr) in mei_me_d0i3_set() argument
929 u32 reg = mei_me_d0i3c_read(dev); in mei_me_d0i3_set()
936 mei_me_d0i3c_write(dev, reg); in mei_me_d0i3_set()
938 reg = mei_me_d0i3c_read(dev); in mei_me_d0i3_set()
949 static u32 mei_me_d0i3_unset(struct mei_device *dev) in mei_me_d0i3_unset() argument
951 u32 reg = mei_me_d0i3c_read(dev); in mei_me_d0i3_unset()
955 mei_me_d0i3c_write(dev, reg); in mei_me_d0i3_unset()
957 reg = mei_me_d0i3c_read(dev); in mei_me_d0i3_unset()
968 static int mei_me_d0i3_enter_sync(struct mei_device *dev) in mei_me_d0i3_enter_sync() argument
970 struct mei_me_hw *hw = to_me_hw(dev); in mei_me_d0i3_enter_sync()
974 reg = mei_me_d0i3c_read(dev); in mei_me_d0i3_enter_sync()
977 dev_dbg(dev->dev, "d0i3 set not needed\n"); in mei_me_d0i3_enter_sync()
983 dev->pg_event = MEI_PG_EVENT_WAIT; in mei_me_d0i3_enter_sync()
985 ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_ENTRY_REQ_CMD); in mei_me_d0i3_enter_sync()
990 mutex_unlock(&dev->device_lock); in mei_me_d0i3_enter_sync()
991 wait_event_timeout(dev->wait_pg, in mei_me_d0i3_enter_sync()
992 dev->pg_event == MEI_PG_EVENT_RECEIVED, in mei_me_d0i3_enter_sync()
993 dev->timeouts.pgi); in mei_me_d0i3_enter_sync()
994 mutex_lock(&dev->device_lock); in mei_me_d0i3_enter_sync()
996 if (dev->pg_event != MEI_PG_EVENT_RECEIVED) { in mei_me_d0i3_enter_sync()
1002 dev->pg_event = MEI_PG_EVENT_INTR_WAIT; in mei_me_d0i3_enter_sync()
1004 reg = mei_me_d0i3_set(dev, true); in mei_me_d0i3_enter_sync()
1006 dev_dbg(dev->dev, "d0i3 enter wait not needed\n"); in mei_me_d0i3_enter_sync()
1011 mutex_unlock(&dev->device_lock); in mei_me_d0i3_enter_sync()
1012 wait_event_timeout(dev->wait_pg, in mei_me_d0i3_enter_sync()
1013 dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, in mei_me_d0i3_enter_sync()
1014 dev->timeouts.d0i3); in mei_me_d0i3_enter_sync()
1015 mutex_lock(&dev->device_lock); in mei_me_d0i3_enter_sync()
1017 if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) { in mei_me_d0i3_enter_sync()
1018 reg = mei_me_d0i3c_read(dev); in mei_me_d0i3_enter_sync()
1029 dev->pg_event = MEI_PG_EVENT_IDLE; in mei_me_d0i3_enter_sync()
1030 dev_dbg(dev->dev, "d0i3 enter ret = %d\n", ret); in mei_me_d0i3_enter_sync()
1044 static int mei_me_d0i3_enter(struct mei_device *dev) in mei_me_d0i3_enter() argument
1046 struct mei_me_hw *hw = to_me_hw(dev); in mei_me_d0i3_enter()
1049 reg = mei_me_d0i3c_read(dev); in mei_me_d0i3_enter()
1052 dev_dbg(dev->dev, "already d0i3 : set not needed\n"); in mei_me_d0i3_enter()
1056 mei_me_d0i3_set(dev, false); in mei_me_d0i3_enter()
1059 dev->pg_event = MEI_PG_EVENT_IDLE; in mei_me_d0i3_enter()
1060 dev_dbg(dev->dev, "d0i3 enter\n"); in mei_me_d0i3_enter()
1071 static int mei_me_d0i3_exit_sync(struct mei_device *dev) in mei_me_d0i3_exit_sync() argument
1073 struct mei_me_hw *hw = to_me_hw(dev); in mei_me_d0i3_exit_sync()
1077 dev->pg_event = MEI_PG_EVENT_INTR_WAIT; in mei_me_d0i3_exit_sync()
1079 reg = mei_me_d0i3c_read(dev); in mei_me_d0i3_exit_sync()
1082 dev_dbg(dev->dev, "d0i3 exit not needed\n"); in mei_me_d0i3_exit_sync()
1087 reg = mei_me_d0i3_unset(dev); in mei_me_d0i3_exit_sync()
1089 dev_dbg(dev->dev, "d0i3 exit wait not needed\n"); in mei_me_d0i3_exit_sync()
1094 mutex_unlock(&dev->device_lock); in mei_me_d0i3_exit_sync()
1095 wait_event_timeout(dev->wait_pg, in mei_me_d0i3_exit_sync()
1096 dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, in mei_me_d0i3_exit_sync()
1097 dev->timeouts.d0i3); in mei_me_d0i3_exit_sync()
1098 mutex_lock(&dev->device_lock); in mei_me_d0i3_exit_sync()
1100 if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) { in mei_me_d0i3_exit_sync()
1101 reg = mei_me_d0i3c_read(dev); in mei_me_d0i3_exit_sync()
1112 dev->pg_event = MEI_PG_EVENT_IDLE; in mei_me_d0i3_exit_sync()
1114 dev_dbg(dev->dev, "d0i3 exit ret = %d\n", ret); in mei_me_d0i3_exit_sync()
1124 static void mei_me_pg_legacy_intr(struct mei_device *dev) in mei_me_pg_legacy_intr() argument
1126 struct mei_me_hw *hw = to_me_hw(dev); in mei_me_pg_legacy_intr()
1128 if (dev->pg_event != MEI_PG_EVENT_INTR_WAIT) in mei_me_pg_legacy_intr()
1131 dev->pg_event = MEI_PG_EVENT_INTR_RECEIVED; in mei_me_pg_legacy_intr()
1133 if (waitqueue_active(&dev->wait_pg)) in mei_me_pg_legacy_intr()
1134 wake_up(&dev->wait_pg); in mei_me_pg_legacy_intr()
1143 static void mei_me_d0i3_intr(struct mei_device *dev, u32 intr_source) in mei_me_d0i3_intr() argument
1145 struct mei_me_hw *hw = to_me_hw(dev); in mei_me_d0i3_intr()
1147 if (dev->pg_event == MEI_PG_EVENT_INTR_WAIT && in mei_me_d0i3_intr()
1149 dev->pg_event = MEI_PG_EVENT_INTR_RECEIVED; in mei_me_d0i3_intr()
1152 if (dev->hbm_state != MEI_HBM_IDLE) { in mei_me_d0i3_intr()
1157 dev_dbg(dev->dev, "d0i3 set host ready\n"); in mei_me_d0i3_intr()
1158 mei_me_host_set_ready(dev); in mei_me_d0i3_intr()
1164 wake_up(&dev->wait_pg); in mei_me_d0i3_intr()
1173 dev_dbg(dev->dev, "d0i3 want resume\n"); in mei_me_d0i3_intr()
1174 mei_hbm_pg_resume(dev); in mei_me_d0i3_intr()
1184 static void mei_me_pg_intr(struct mei_device *dev, u32 intr_source) in mei_me_pg_intr() argument
1186 struct mei_me_hw *hw = to_me_hw(dev); in mei_me_pg_intr()
1189 mei_me_d0i3_intr(dev, intr_source); in mei_me_pg_intr()
1191 mei_me_pg_legacy_intr(dev); in mei_me_pg_intr()
1201 int mei_me_pg_enter_sync(struct mei_device *dev) in mei_me_pg_enter_sync() argument
1203 struct mei_me_hw *hw = to_me_hw(dev); in mei_me_pg_enter_sync()
1206 return mei_me_d0i3_enter_sync(dev); in mei_me_pg_enter_sync()
1208 return mei_me_pg_legacy_enter_sync(dev); in mei_me_pg_enter_sync()
1218 int mei_me_pg_exit_sync(struct mei_device *dev) in mei_me_pg_exit_sync() argument
1220 struct mei_me_hw *hw = to_me_hw(dev); in mei_me_pg_exit_sync()
1223 return mei_me_d0i3_exit_sync(dev); in mei_me_pg_exit_sync()
1225 return mei_me_pg_legacy_exit_sync(dev); in mei_me_pg_exit_sync()
1236 static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable) in mei_me_hw_reset() argument
1238 struct mei_me_hw *hw = to_me_hw(dev); in mei_me_hw_reset()
1243 mei_me_intr_enable(dev); in mei_me_hw_reset()
1245 ret = mei_me_d0i3_exit_sync(dev); in mei_me_hw_reset()
1253 pm_runtime_set_active(dev->dev); in mei_me_hw_reset()
1255 hcsr = mei_hcsr_read(dev); in mei_me_hw_reset()
1262 dev_warn(dev->dev, "H_RST is set = 0x%08X", hcsr); in mei_me_hw_reset()
1264 mei_hcsr_set(dev, hcsr); in mei_me_hw_reset()
1265 hcsr = mei_hcsr_read(dev); in mei_me_hw_reset()
1270 if (!intr_enable || mei_me_hw_use_polling(to_me_hw(dev))) in mei_me_hw_reset()
1273 dev->recvd_hw_ready = false; in mei_me_hw_reset()
1274 mei_hcsr_write(dev, hcsr); in mei_me_hw_reset()
1280 hcsr = mei_hcsr_read(dev); in mei_me_hw_reset()
1283 dev_warn(dev->dev, "H_RST is not set = 0x%08X", hcsr); in mei_me_hw_reset()
1286 dev_warn(dev->dev, "H_RDY is not cleared 0x%08X", hcsr); in mei_me_hw_reset()
1289 mei_me_hw_reset_release(dev); in mei_me_hw_reset()
1291 ret = mei_me_d0i3_enter(dev); in mei_me_hw_reset()
1309 struct mei_device *dev = (struct mei_device *)dev_id; in mei_me_irq_quick_handler() local
1312 hcsr = mei_hcsr_read(dev); in mei_me_irq_quick_handler()
1316 dev_dbg(dev->dev, "interrupt source 0x%08X\n", me_intr_src(hcsr)); in mei_me_irq_quick_handler()
1319 me_intr_disable(dev, hcsr); in mei_me_irq_quick_handler()
1336 struct mei_device *dev = (struct mei_device *) dev_id; in mei_me_irq_thread_handler() local
1342 dev_dbg(dev->dev, "function called after ISR to handle the interrupt processing.\n"); in mei_me_irq_thread_handler()
1344 mutex_lock(&dev->device_lock); in mei_me_irq_thread_handler()
1346 hcsr = mei_hcsr_read(dev); in mei_me_irq_thread_handler()
1347 me_intr_clear(dev, hcsr); in mei_me_irq_thread_handler()
1352 if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) { in mei_me_irq_thread_handler()
1353 if (kind_is_gsc(dev) || kind_is_gscfi(dev)) { in mei_me_irq_thread_handler()
1354 dev_dbg(dev->dev, "FW not ready: resetting: dev_state = %d\n", in mei_me_irq_thread_handler()
1355 dev->dev_state); in mei_me_irq_thread_handler()
1357 dev_warn(dev->dev, "FW not ready: resetting: dev_state = %d\n", in mei_me_irq_thread_handler()
1358 dev->dev_state); in mei_me_irq_thread_handler()
1360 if (dev->dev_state == MEI_DEV_POWERING_DOWN || in mei_me_irq_thread_handler()
1361 dev->dev_state == MEI_DEV_POWER_DOWN) in mei_me_irq_thread_handler()
1362 mei_cl_all_disconnect(dev); in mei_me_irq_thread_handler()
1363 else if (dev->dev_state != MEI_DEV_DISABLED) in mei_me_irq_thread_handler()
1364 schedule_work(&dev->reset_work); in mei_me_irq_thread_handler()
1368 if (mei_me_hw_is_resetting(dev)) in mei_me_irq_thread_handler()
1369 mei_hcsr_set_hig(dev); in mei_me_irq_thread_handler()
1371 mei_me_pg_intr(dev, me_intr_src(hcsr)); in mei_me_irq_thread_handler()
1374 if (!mei_host_is_ready(dev)) { in mei_me_irq_thread_handler()
1375 if (mei_hw_is_ready(dev)) { in mei_me_irq_thread_handler()
1376 dev_dbg(dev->dev, "we need to start the dev.\n"); in mei_me_irq_thread_handler()
1377 dev->recvd_hw_ready = true; in mei_me_irq_thread_handler()
1378 wake_up(&dev->wait_hw_ready); in mei_me_irq_thread_handler()
1380 dev_dbg(dev->dev, "Spurious Interrupt\n"); in mei_me_irq_thread_handler()
1385 slots = mei_count_full_read_slots(dev); in mei_me_irq_thread_handler()
1387 dev_dbg(dev->dev, "slots to read = %08x\n", slots); in mei_me_irq_thread_handler()
1388 rets = mei_irq_read_handler(dev, &cmpl_list, &slots); in mei_me_irq_thread_handler()
1397 dev_err(dev->dev, "mei_irq_read_handler ret = %d, state = %d.\n", in mei_me_irq_thread_handler()
1398 rets, dev->dev_state); in mei_me_irq_thread_handler()
1399 if (dev->dev_state != MEI_DEV_RESETTING && in mei_me_irq_thread_handler()
1400 dev->dev_state != MEI_DEV_DISABLED && in mei_me_irq_thread_handler()
1401 dev->dev_state != MEI_DEV_POWERING_DOWN && in mei_me_irq_thread_handler()
1402 dev->dev_state != MEI_DEV_POWER_DOWN) in mei_me_irq_thread_handler()
1403 schedule_work(&dev->reset_work); in mei_me_irq_thread_handler()
1408 dev->hbuf_is_ready = mei_hbuf_is_ready(dev); in mei_me_irq_thread_handler()
1415 if (dev->pg_event != MEI_PG_EVENT_WAIT && in mei_me_irq_thread_handler()
1416 dev->pg_event != MEI_PG_EVENT_RECEIVED) { in mei_me_irq_thread_handler()
1417 rets = mei_irq_write_handler(dev, &cmpl_list); in mei_me_irq_thread_handler()
1418 dev->hbuf_is_ready = mei_hbuf_is_ready(dev); in mei_me_irq_thread_handler()
1421 mei_irq_compl_handler(dev, &cmpl_list); in mei_me_irq_thread_handler()
1424 dev_dbg(dev->dev, "interrupt thread end ret = %d\n", rets); in mei_me_irq_thread_handler()
1425 mei_me_intr_enable(dev); in mei_me_irq_thread_handler()
1426 mutex_unlock(&dev->device_lock); in mei_me_irq_thread_handler()
1452 struct mei_device *dev = _dev; in mei_me_polling_thread() local
1456 dev_dbg(dev->dev, "kernel thread is running\n"); in mei_me_polling_thread()
1458 struct mei_me_hw *hw = to_me_hw(dev); in mei_me_polling_thread()
1468 hcsr = mei_hcsr_read(dev); in mei_me_polling_thread()
1471 irq_ret = mei_me_irq_thread_handler(1, dev); in mei_me_polling_thread()
1473 dev_err(dev->dev, "irq_ret %d\n", irq_ret); in mei_me_polling_thread()
1541 trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_2", PCI_CFG_HFS_2, reg); in mei_me_fw_type_nm()
1567 trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg); in mei_me_fw_type_sps_4()
1592 trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_3", PCI_CFG_HFS_3, reg); in mei_me_fw_type_sps_ign()
1595 dev_dbg(&pdev->dev, "fw type is %d\n", fw_type); in mei_me_fw_type_sps_ign()
1803 struct mei_device *dev; in mei_me_dev_init() local
1807 dev = devm_kzalloc(parent, sizeof(*dev) + sizeof(*hw), GFP_KERNEL); in mei_me_dev_init()
1808 if (!dev) in mei_me_dev_init()
1811 hw = to_me_hw(dev); in mei_me_dev_init()
1814 dev->dr_dscr[i].size = cfg->dma_size[i]; in mei_me_dev_init()
1816 mei_device_init(dev, parent, slow_fw, &mei_me_hw_ops); in mei_me_dev_init()
1819 dev->fw_f_fw_ver_supported = cfg->fw_ver_supported; in mei_me_dev_init()
1821 dev->kind = cfg->kind; in mei_me_dev_init()
1823 return dev; in mei_me_dev_init()