Lines Matching refs:hif
29 if (!completion_done(&wdev->hif.ctrl_ready)) in device_wakeup()
38 if (wait_for_completion_timeout(&wdev->hif.ctrl_ready, msecs_to_jiffies(2))) { in device_wakeup()
39 complete(&wdev->hif.ctrl_ready); in device_wakeup()
66 struct wfx_hif_msg *hif; in rx_helper() local
86 hif = (struct wfx_hif_msg *)skb->data; in rx_helper()
87 WARN(hif->encrypted & 0x3, "encryption is unsupported"); in rx_helper()
90 computed_len = le16_to_cpu(hif->len); in rx_helper()
96 hif, read_len, true); in rx_helper()
100 if (!(hif->id & HIF_ID_IS_INDICATION)) { in rx_helper()
102 if (hif->id == HIF_CNF_ID_MULTI_TRANSMIT) in rx_helper()
104 ((struct wfx_hif_cnf_multi_transmit *)hif->body)->num_tx_confs; in rx_helper()
107 WARN(wdev->hif.tx_buffers_used < release_count, "corrupted buffer counter"); in rx_helper()
108 wdev->hif.tx_buffers_used -= release_count; in rx_helper()
110 _trace_hif_recv(hif, wdev->hif.tx_buffers_used); in rx_helper()
112 if (hif->id != HIF_IND_ID_EXCEPTION && hif->id != HIF_IND_ID_ERROR) { in rx_helper()
113 if (hif->seqnum != wdev->hif.rx_seqnum) in rx_helper()
115 hif->seqnum, wdev->hif.rx_seqnum); in rx_helper()
116 wdev->hif.rx_seqnum = (hif->seqnum + 1) % (HIF_COUNTER_MAX + 1); in rx_helper()
119 skb_put(skb, le16_to_cpu(hif->len)); in rx_helper()
122 if (!wdev->hif.tx_buffers_used) in rx_helper()
123 wake_up(&wdev->hif.tx_buffers_empty); in rx_helper()
143 else if (try_wait_for_completion(&wdev->hif.ctrl_ready)) in bh_work_rx()
144 ctrl_reg = atomic_xchg(&wdev->hif.ctrl_reg, 0); in bh_work_rx()
159 ctrl_reg = atomic_xchg(&wdev->hif.ctrl_reg, piggyback); in bh_work_rx()
160 complete(&wdev->hif.ctrl_ready); in bh_work_rx()
168 static void tx_helper(struct wfx_dev *wdev, struct wfx_hif_msg *hif) in tx_helper() argument
173 size_t len = le16_to_cpu(hif->len); in tx_helper()
175 WARN(len < sizeof(*hif), "try to send corrupted data"); in tx_helper()
177 hif->seqnum = wdev->hif.tx_seqnum; in tx_helper()
178 wdev->hif.tx_seqnum = (wdev->hif.tx_seqnum + 1) % (HIF_COUNTER_MAX + 1); in tx_helper()
180 data = hif; in tx_helper()
189 wdev->hif.tx_buffers_used++; in tx_helper()
190 _trace_hif_send(hif, wdev->hif.tx_buffers_used); in tx_helper()
198 struct wfx_hif_msg *hif; in bh_work_tx() local
202 hif = NULL; in bh_work_tx()
203 if (wdev->hif.tx_buffers_used < le16_to_cpu(wdev->hw_caps.num_inp_ch_bufs)) { in bh_work_tx()
206 hif = wdev->hif_cmd.buf_send; in bh_work_tx()
208 hif = wfx_tx_queues_get(wdev); in bh_work_tx()
211 if (!hif) in bh_work_tx()
213 tx_helper(wdev, hif); in bh_work_tx()
235 struct wfx_dev *wdev = container_of(work, struct wfx_dev, hif.bh); in bh_work()
255 if (!wdev->hif.tx_buffers_used && !work_pending(work)) { in bh_work()
259 _trace_bh_stats(stats_ind, stats_req, stats_cnf, wdev->hif.tx_buffers_used, release_chip); in bh_work()
268 prev = atomic_xchg(&wdev->hif.ctrl_reg, cur); in wfx_bh_request_rx()
269 complete(&wdev->hif.ctrl_ready); in wfx_bh_request_rx()
270 queue_work(wdev->bh_wq, &wdev->hif.bh); in wfx_bh_request_rx()
283 queue_work(wdev->bh_wq, &wdev->hif.bh); in wfx_bh_request_tx()
316 INIT_WORK(&wdev->hif.bh, bh_work); in wfx_bh_register()
317 init_completion(&wdev->hif.ctrl_ready); in wfx_bh_register()
318 init_waitqueue_head(&wdev->hif.tx_buffers_empty); in wfx_bh_register()
323 flush_work(&wdev->hif.bh); in wfx_bh_unregister()