Lines Matching refs:omap_port

35 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);  in ssi_wakein()  local
36 return gpiod_get_value(omap_port->wake_gpio); in ssi_wakein()
42 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); in ssi_debug_remove_port() local
44 debugfs_remove_recursive(omap_port->dir); in ssi_debug_remove_port()
50 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); in ssi_port_regs_show() local
56 pm_runtime_get_sync(omap_port->pdev); in ssi_port_regs_show()
57 if (omap_port->wake_irq > 0) in ssi_port_regs_show()
66 base = omap_port->sst_base; in ssi_port_regs_show()
86 for (ch = 0; ch < omap_port->channels; ch++) { in ssi_port_regs_show()
91 base = omap_port->ssr_base; in ssi_port_regs_show()
113 for (ch = 0; ch < omap_port->channels; ch++) { in ssi_port_regs_show()
117 pm_runtime_put_autosuspend(omap_port->pdev); in ssi_port_regs_show()
127 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); in ssi_div_get() local
129 pm_runtime_get_sync(omap_port->pdev); in ssi_div_get()
130 *val = readl(omap_port->sst_base + SSI_SST_DIVISOR_REG); in ssi_div_get()
131 pm_runtime_put_autosuspend(omap_port->pdev); in ssi_div_get()
139 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); in ssi_div_set() local
144 pm_runtime_get_sync(omap_port->pdev); in ssi_div_set()
145 writel(val, omap_port->sst_base + SSI_SST_DIVISOR_REG); in ssi_div_set()
146 omap_port->sst.divisor = val; in ssi_div_set()
147 pm_runtime_put_autosuspend(omap_port->pdev); in ssi_div_set()
154 static int ssi_debug_add_port(struct omap_ssi_port *omap_port, in ssi_debug_add_port() argument
157 struct hsi_port *port = to_hsi_port(omap_port->dev); in ssi_debug_add_port()
159 dir = debugfs_create_dir(dev_name(omap_port->dev), dir); in ssi_debug_add_port()
162 omap_port->dir = dir; in ssi_debug_add_port()
176 struct omap_ssi_port *omap_port; in ssi_process_errqueue() local
180 omap_port = container_of(work, struct omap_ssi_port, errqueue_work.work); in ssi_process_errqueue()
182 list_for_each_safe(head, tmp, &omap_port->errqueue) { in ssi_process_errqueue()
210 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); in ssi_start_dma() local
222 pm_runtime_get(omap_port->pdev); in ssi_start_dma()
224 if (!pm_runtime_active(omap_port->pdev)) { in ssi_start_dma()
226 pm_runtime_put_autosuspend(omap_port->pdev); in ssi_start_dma()
235 pm_runtime_put_autosuspend(omap_port->pdev); in ssi_start_dma()
244 s_addr = omap_port->ssr_dma + in ssi_start_dma()
252 pm_runtime_put_autosuspend(omap_port->pdev); in ssi_start_dma()
262 d_addr = omap_port->sst_dma + in ssi_start_dma()
289 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); in ssi_start_pio() local
294 pm_runtime_get(omap_port->pdev); in ssi_start_pio()
296 if (!pm_runtime_active(omap_port->pdev)) { in ssi_start_pio()
298 pm_runtime_put_autosuspend(omap_port->pdev); in ssi_start_pio()
305 pm_runtime_get(omap_port->pdev); in ssi_start_pio()
313 pm_runtime_put_autosuspend(omap_port->pdev); in ssi_start_pio()
341 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); in ssi_async_break() local
347 pm_runtime_get_sync(omap_port->pdev); in ssi_async_break()
349 if (omap_port->sst.mode != SSI_MODE_FRAME) { in ssi_async_break()
353 writel(1, omap_port->sst_base + SSI_SST_BREAK_REG); in ssi_async_break()
357 if (omap_port->ssr.mode != SSI_MODE_FRAME) { in ssi_async_break()
361 spin_lock_bh(&omap_port->lock); in ssi_async_break()
367 list_add_tail(&msg->link, &omap_port->brkqueue); in ssi_async_break()
368 spin_unlock_bh(&omap_port->lock); in ssi_async_break()
371 pm_runtime_mark_last_busy(omap_port->pdev); in ssi_async_break()
372 pm_runtime_put_autosuspend(omap_port->pdev); in ssi_async_break()
380 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); in ssi_async() local
393 BUG_ON(msg->channel >= omap_port->sst.channels); in ssi_async()
394 queue = &omap_port->txqueue[msg->channel]; in ssi_async()
396 BUG_ON(msg->channel >= omap_port->ssr.channels); in ssi_async()
397 queue = &omap_port->rxqueue[msg->channel]; in ssi_async()
401 pm_runtime_get_sync(omap_port->pdev); in ssi_async()
402 spin_lock_bh(&omap_port->lock); in ssi_async()
409 spin_unlock_bh(&omap_port->lock); in ssi_async()
410 pm_runtime_mark_last_busy(omap_port->pdev); in ssi_async()
411 pm_runtime_put_autosuspend(omap_port->pdev); in ssi_async()
457 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); in ssi_setup() local
460 void __iomem *sst = omap_port->sst_base; in ssi_setup()
461 void __iomem *ssr = omap_port->ssr_base; in ssi_setup()
466 pm_runtime_get_sync(omap_port->pdev); in ssi_setup()
467 spin_lock_bh(&omap_port->lock); in ssi_setup()
493 if ((omap_port->ssr.mode == SSI_MODE_FRAME) && in ssi_setup()
495 ssi_flush_queue(&omap_port->brkqueue, cl); in ssi_setup()
497 omap_port->channels = max(cl->rx_cfg.num_hw_channels, in ssi_setup()
501 omap_port->sst.divisor = div; in ssi_setup()
502 omap_port->sst.frame_size = 31; in ssi_setup()
503 omap_port->sst.channels = cl->tx_cfg.num_hw_channels; in ssi_setup()
504 omap_port->sst.arb_mode = cl->tx_cfg.arb_mode; in ssi_setup()
505 omap_port->sst.mode = cl->tx_cfg.mode; in ssi_setup()
507 omap_port->ssr.frame_size = 31; in ssi_setup()
508 omap_port->ssr.timeout = 0; in ssi_setup()
509 omap_port->ssr.channels = cl->rx_cfg.num_hw_channels; in ssi_setup()
510 omap_port->ssr.mode = cl->rx_cfg.mode; in ssi_setup()
512 spin_unlock_bh(&omap_port->lock); in ssi_setup()
513 pm_runtime_mark_last_busy(omap_port->pdev); in ssi_setup()
514 pm_runtime_put_autosuspend(omap_port->pdev); in ssi_setup()
522 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); in ssi_flush() local
526 void __iomem *sst = omap_port->sst_base; in ssi_flush()
527 void __iomem *ssr = omap_port->ssr_base; in ssi_flush()
531 pm_runtime_get_sync(omap_port->pdev); in ssi_flush()
532 spin_lock_bh(&omap_port->lock); in ssi_flush()
535 pinctrl_pm_select_idle_state(omap_port->pdev); in ssi_flush()
545 pm_runtime_put_autosuspend(omap_port->pdev); in ssi_flush()
566 for (i = 0; i < omap_port->channels; i++) { in ssi_flush()
568 if (!list_empty(&omap_port->txqueue[i])) in ssi_flush()
569 pm_runtime_put_autosuspend(omap_port->pdev); in ssi_flush()
570 ssi_flush_queue(&omap_port->txqueue[i], NULL); in ssi_flush()
571 ssi_flush_queue(&omap_port->rxqueue[i], NULL); in ssi_flush()
573 ssi_flush_queue(&omap_port->brkqueue, NULL); in ssi_flush()
576 pinctrl_pm_select_default_state(omap_port->pdev); in ssi_flush()
578 spin_unlock_bh(&omap_port->lock); in ssi_flush()
579 pm_runtime_mark_last_busy(omap_port->pdev); in ssi_flush()
580 pm_runtime_put_autosuspend(omap_port->pdev); in ssi_flush()
587 struct omap_ssi_port *omap_port = in start_tx_work() local
589 struct hsi_port *port = to_hsi_port(omap_port->dev); in start_tx_work()
593 pm_runtime_get_sync(omap_port->pdev); /* Grab clocks */ in start_tx_work()
600 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); in ssi_start_tx() local
602 dev_dbg(&port->device, "Wake out high %d\n", omap_port->wk_refcount); in ssi_start_tx()
604 spin_lock_bh(&omap_port->wk_lock); in ssi_start_tx()
605 if (omap_port->wk_refcount++) { in ssi_start_tx()
606 spin_unlock_bh(&omap_port->wk_lock); in ssi_start_tx()
609 spin_unlock_bh(&omap_port->wk_lock); in ssi_start_tx()
611 schedule_work(&omap_port->work); in ssi_start_tx()
619 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); in ssi_stop_tx() local
623 dev_dbg(&port->device, "Wake out low %d\n", omap_port->wk_refcount); in ssi_stop_tx()
625 spin_lock_bh(&omap_port->wk_lock); in ssi_stop_tx()
626 BUG_ON(!omap_port->wk_refcount); in ssi_stop_tx()
627 if (--omap_port->wk_refcount) { in ssi_stop_tx()
628 spin_unlock_bh(&omap_port->wk_lock); in ssi_stop_tx()
632 spin_unlock_bh(&omap_port->wk_lock); in ssi_stop_tx()
634 pm_runtime_mark_last_busy(omap_port->pdev); in ssi_stop_tx()
635 pm_runtime_put_autosuspend(omap_port->pdev); /* Release clocks */ in ssi_stop_tx()
641 static void ssi_transfer(struct omap_ssi_port *omap_port, in ssi_transfer() argument
647 pm_runtime_get(omap_port->pdev); in ssi_transfer()
648 spin_lock_bh(&omap_port->lock); in ssi_transfer()
656 spin_unlock_bh(&omap_port->lock); in ssi_transfer()
658 spin_lock_bh(&omap_port->lock); in ssi_transfer()
661 spin_unlock_bh(&omap_port->lock); in ssi_transfer()
662 pm_runtime_mark_last_busy(omap_port->pdev); in ssi_transfer()
663 pm_runtime_put_autosuspend(omap_port->pdev); in ssi_transfer()
669 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); in ssi_cleanup_queues() local
679 ssi_flush_queue(&omap_port->brkqueue, cl); in ssi_cleanup_queues()
680 if (list_empty(&omap_port->brkqueue)) in ssi_cleanup_queues()
683 for (i = 0; i < omap_port->channels; i++) { in ssi_cleanup_queues()
684 if (list_empty(&omap_port->txqueue[i])) in ssi_cleanup_queues()
686 msg = list_first_entry(&omap_port->txqueue[i], struct hsi_msg, in ssi_cleanup_queues()
692 pm_runtime_mark_last_busy(omap_port->pdev); in ssi_cleanup_queues()
693 pm_runtime_put_autosuspend(omap_port->pdev); in ssi_cleanup_queues()
695 ssi_flush_queue(&omap_port->txqueue[i], cl); in ssi_cleanup_queues()
697 for (i = 0; i < omap_port->channels; i++) { in ssi_cleanup_queues()
698 if (list_empty(&omap_port->rxqueue[i])) in ssi_cleanup_queues()
700 msg = list_first_entry(&omap_port->rxqueue[i], struct hsi_msg, in ssi_cleanup_queues()
706 ssi_flush_queue(&omap_port->rxqueue[i], cl); in ssi_cleanup_queues()
708 if (!list_empty(&omap_port->rxqueue[i])) in ssi_cleanup_queues()
712 tmp = readl(omap_port->sst_base + SSI_SST_BUFSTATE_REG); in ssi_cleanup_queues()
714 writel_relaxed(tmp, omap_port->sst_base + SSI_SST_BUFSTATE_REG); in ssi_cleanup_queues()
716 tmp = readl(omap_port->ssr_base + SSI_SSR_BUFSTATE_REG); in ssi_cleanup_queues()
718 writel_relaxed(tmp, omap_port->ssr_base + SSI_SSR_BUFSTATE_REG); in ssi_cleanup_queues()
731 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); in ssi_cleanup_gdd() local
748 pm_runtime_mark_last_busy(omap_port->pdev); in ssi_cleanup_gdd()
749 pm_runtime_put_autosuspend(omap_port->pdev); in ssi_cleanup_gdd()
759 static int ssi_set_port_mode(struct omap_ssi_port *omap_port, u32 mode) in ssi_set_port_mode() argument
761 writel(mode, omap_port->sst_base + SSI_SST_MODE_REG); in ssi_set_port_mode()
762 writel(mode, omap_port->ssr_base + SSI_SSR_MODE_REG); in ssi_set_port_mode()
764 mode = readl(omap_port->ssr_base + SSI_SSR_MODE_REG); in ssi_set_port_mode()
772 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); in ssi_release() local
775 pm_runtime_get_sync(omap_port->pdev); in ssi_release()
776 spin_lock_bh(&omap_port->lock); in ssi_release()
787 if (test_and_clear_bit(SSI_WAKE_EN, &omap_port->flags)) in ssi_release()
788 pm_runtime_put_sync(omap_port->pdev); in ssi_release()
789 pm_runtime_get(omap_port->pdev); in ssi_release()
791 ssi_set_port_mode(omap_port, SSI_MODE_SLEEP); in ssi_release()
792 omap_port->sst.mode = SSI_MODE_SLEEP; in ssi_release()
793 omap_port->ssr.mode = SSI_MODE_SLEEP; in ssi_release()
794 pm_runtime_put(omap_port->pdev); in ssi_release()
795 WARN_ON(omap_port->wk_refcount != 0); in ssi_release()
797 spin_unlock_bh(&omap_port->lock); in ssi_release()
798 pm_runtime_put_sync(omap_port->pdev); in ssi_release()
807 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); in ssi_error() local
817 err = readl(omap_port->ssr_base + SSI_SSR_ERROR_REG); in ssi_error()
838 spin_lock(&omap_port->lock); in ssi_error()
843 writel_relaxed(err, omap_port->ssr_base + SSI_SSR_ERRORACK_REG); in ssi_error()
847 for (i = 0; i < omap_port->channels; i++) { in ssi_error()
848 if (list_empty(&omap_port->rxqueue[i])) in ssi_error()
850 msg = list_first_entry(&omap_port->rxqueue[i], struct hsi_msg, in ssi_error()
854 spin_unlock(&omap_port->lock); in ssi_error()
857 ssi_transfer(omap_port, &omap_port->rxqueue[i]); in ssi_error()
858 spin_lock(&omap_port->lock); in ssi_error()
860 spin_unlock(&omap_port->lock); in ssi_error()
865 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); in ssi_break_complete() local
874 spin_lock(&omap_port->lock); in ssi_break_complete()
878 writel_relaxed(0, omap_port->ssr_base + SSI_SSR_BREAK_REG); in ssi_break_complete()
881 spin_unlock(&omap_port->lock); in ssi_break_complete()
883 list_for_each_entry_safe(msg, tmp, &omap_port->brkqueue, link) { in ssi_break_complete()
885 spin_lock(&omap_port->lock); in ssi_break_complete()
887 spin_unlock(&omap_port->lock); in ssi_break_complete()
897 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); in ssi_pio_complete() local
903 spin_lock_bh(&omap_port->lock); in ssi_pio_complete()
916 writel(*buf, omap_port->sst_base + in ssi_pio_complete()
919 *buf = readl(omap_port->ssr_base + in ssi_pio_complete()
935 spin_unlock_bh(&omap_port->lock); in ssi_pio_complete()
945 pm_runtime_mark_last_busy(omap_port->pdev); in ssi_pio_complete()
946 pm_runtime_put_autosuspend(omap_port->pdev); in ssi_pio_complete()
952 spin_unlock_bh(&omap_port->lock); in ssi_pio_complete()
954 ssi_transfer(omap_port, queue); in ssi_pio_complete()
961 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); in ssi_pio_thread() local
967 pm_runtime_get_sync(omap_port->pdev); in ssi_pio_thread()
973 for (ch = 0; ch < omap_port->channels; ch++) { in ssi_pio_thread()
975 ssi_pio_complete(port, &omap_port->txqueue[ch]); in ssi_pio_thread()
977 ssi_pio_complete(port, &omap_port->rxqueue[ch]); in ssi_pio_thread()
990 pm_runtime_mark_last_busy(omap_port->pdev); in ssi_pio_thread()
991 pm_runtime_put_autosuspend(omap_port->pdev); in ssi_pio_thread()
1000 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); in ssi_wake_thread() local
1011 if (!test_and_set_bit(SSI_WAKE_EN, &omap_port->flags)) in ssi_wake_thread()
1012 pm_runtime_get_sync(omap_port->pdev); in ssi_wake_thread()
1014 if (omap_port->wktest) { /* FIXME: HACK ! To be removed */ in ssi_wake_thread()
1021 if (omap_port->wktest) { /* FIXME: HACK ! To be removed */ in ssi_wake_thread()
1026 if (test_and_clear_bit(SSI_WAKE_EN, &omap_port->flags)) { in ssi_wake_thread()
1027 pm_runtime_mark_last_busy(omap_port->pdev); in ssi_wake_thread()
1028 pm_runtime_put_autosuspend(omap_port->pdev); in ssi_wake_thread()
1037 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); in ssi_port_irq() local
1043 omap_port->irq = err; in ssi_port_irq()
1044 err = devm_request_threaded_irq(&port->device, omap_port->irq, NULL, in ssi_port_irq()
1048 omap_port->irq, err); in ssi_port_irq()
1054 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); in ssi_wake_irq() local
1058 if (!omap_port->wake_gpio) { in ssi_wake_irq()
1059 omap_port->wake_irq = -1; in ssi_wake_irq()
1063 cawake_irq = gpiod_to_irq(omap_port->wake_gpio); in ssi_wake_irq()
1064 omap_port->wake_irq = cawake_irq; in ssi_wake_irq()
1081 static void ssi_queues_init(struct omap_ssi_port *omap_port) in ssi_queues_init() argument
1086 INIT_LIST_HEAD(&omap_port->txqueue[ch]); in ssi_queues_init()
1087 INIT_LIST_HEAD(&omap_port->rxqueue[ch]); in ssi_queues_init()
1089 INIT_LIST_HEAD(&omap_port->brkqueue); in ssi_queues_init()
1129 struct omap_ssi_port *omap_port; in ssi_port_probe() local
1170 omap_port = devm_kzalloc(&port->device, sizeof(*omap_port), GFP_KERNEL); in ssi_port_probe()
1171 if (!omap_port) { in ssi_port_probe()
1175 omap_port->wake_gpio = cawake_gpio; in ssi_port_probe()
1176 omap_port->pdev = &pd->dev; in ssi_port_probe()
1177 omap_port->port_id = port_id; in ssi_port_probe()
1179 INIT_DEFERRABLE_WORK(&omap_port->errqueue_work, ssi_process_errqueue); in ssi_port_probe()
1180 INIT_WORK(&omap_port->work, start_tx_work); in ssi_port_probe()
1189 hsi_port_set_drvdata(port, omap_port); in ssi_port_probe()
1190 omap_ssi->port[port_id] = omap_port; in ssi_port_probe()
1194 err = ssi_port_get_iomem(pd, "tx", &omap_port->sst_base, in ssi_port_probe()
1195 &omap_port->sst_dma); in ssi_port_probe()
1198 err = ssi_port_get_iomem(pd, "rx", &omap_port->ssr_base, in ssi_port_probe()
1199 &omap_port->ssr_dma); in ssi_port_probe()
1210 ssi_queues_init(omap_port); in ssi_port_probe()
1211 spin_lock_init(&omap_port->lock); in ssi_port_probe()
1212 spin_lock_init(&omap_port->wk_lock); in ssi_port_probe()
1213 omap_port->dev = &port->device; in ssi_port_probe()
1215 pm_runtime_use_autosuspend(omap_port->pdev); in ssi_port_probe()
1216 pm_runtime_set_autosuspend_delay(omap_port->pdev, 250); in ssi_port_probe()
1217 pm_runtime_enable(omap_port->pdev); in ssi_port_probe()
1220 err = ssi_debug_add_port(omap_port, omap_ssi->dir); in ssi_port_probe()
1222 pm_runtime_disable(omap_port->pdev); in ssi_port_probe()
1240 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); in ssi_port_remove() local
1248 cancel_delayed_work_sync(&omap_port->errqueue_work); in ssi_port_remove()
1259 omap_ssi->port[omap_port->port_id] = NULL; in ssi_port_remove()
1268 static int ssi_restore_divisor(struct omap_ssi_port *omap_port) in ssi_restore_divisor() argument
1270 writel_relaxed(omap_port->sst.divisor, in ssi_restore_divisor()
1271 omap_port->sst_base + SSI_SST_DIVISOR_REG); in ssi_restore_divisor()
1277 struct omap_ssi_port *omap_port) in omap_ssi_port_update_fclk() argument
1281 omap_port->sst.divisor = div; in omap_ssi_port_update_fclk()
1282 ssi_restore_divisor(omap_port); in omap_ssi_port_update_fclk()
1286 static int ssi_save_port_ctx(struct omap_ssi_port *omap_port) in ssi_save_port_ctx() argument
1288 struct hsi_port *port = to_hsi_port(omap_port->dev); in ssi_save_port_ctx()
1292 omap_port->sys_mpu_enable = readl(omap_ssi->sys + in ssi_save_port_ctx()
1298 static int ssi_restore_port_ctx(struct omap_ssi_port *omap_port) in ssi_restore_port_ctx() argument
1300 struct hsi_port *port = to_hsi_port(omap_port->dev); in ssi_restore_port_ctx()
1305 writel_relaxed(omap_port->sys_mpu_enable, in ssi_restore_port_ctx()
1309 base = omap_port->sst_base; in ssi_restore_port_ctx()
1310 writel_relaxed(omap_port->sst.frame_size, base + SSI_SST_FRAMESIZE_REG); in ssi_restore_port_ctx()
1311 writel_relaxed(omap_port->sst.channels, base + SSI_SST_CHANNELS_REG); in ssi_restore_port_ctx()
1312 writel_relaxed(omap_port->sst.arb_mode, base + SSI_SST_ARBMODE_REG); in ssi_restore_port_ctx()
1315 base = omap_port->ssr_base; in ssi_restore_port_ctx()
1316 writel_relaxed(omap_port->ssr.frame_size, base + SSI_SSR_FRAMESIZE_REG); in ssi_restore_port_ctx()
1317 writel_relaxed(omap_port->ssr.channels, base + SSI_SSR_CHANNELS_REG); in ssi_restore_port_ctx()
1318 writel_relaxed(omap_port->ssr.timeout, base + SSI_SSR_TIMEOUT_REG); in ssi_restore_port_ctx()
1323 static int ssi_restore_port_mode(struct omap_ssi_port *omap_port) in ssi_restore_port_mode() argument
1327 writel_relaxed(omap_port->sst.mode, in ssi_restore_port_mode()
1328 omap_port->sst_base + SSI_SST_MODE_REG); in ssi_restore_port_mode()
1329 writel_relaxed(omap_port->ssr.mode, in ssi_restore_port_mode()
1330 omap_port->ssr_base + SSI_SSR_MODE_REG); in ssi_restore_port_mode()
1332 mode = readl(omap_port->ssr_base + SSI_SSR_MODE_REG); in ssi_restore_port_mode()
1340 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); in omap_ssi_port_runtime_suspend() local
1346 ssi_set_port_mode(omap_port, SSI_MODE_SLEEP); in omap_ssi_port_runtime_suspend()
1348 omap_port->loss_count = in omap_ssi_port_runtime_suspend()
1350 ssi_save_port_ctx(omap_port); in omap_ssi_port_runtime_suspend()
1358 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); in omap_ssi_port_runtime_resume() local
1364 if ((omap_ssi->get_loss) && (omap_port->loss_count == in omap_ssi_port_runtime_resume()
1368 ssi_restore_port_ctx(omap_port); in omap_ssi_port_runtime_resume()
1371 ssi_restore_divisor(omap_port); in omap_ssi_port_runtime_resume()
1372 ssi_restore_port_mode(omap_port); in omap_ssi_port_runtime_resume()