Lines Matching refs:mhi_netdev

49 	struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);  in mhi_ndo_open()  local
52 schedule_delayed_work(&mhi_netdev->rx_refill, 0); in mhi_ndo_open()
64 struct mhi_net_dev *mhi_netdev = netdev_priv(ndev); in mhi_ndo_stop() local
68 cancel_delayed_work_sync(&mhi_netdev->rx_refill); in mhi_ndo_stop()
75 struct mhi_net_dev *mhi_netdev = netdev_priv(ndev); in mhi_ndo_xmit() local
76 struct mhi_device *mdev = mhi_netdev->mdev; in mhi_ndo_xmit()
93 u64_stats_update_begin(&mhi_netdev->stats.tx_syncp); in mhi_ndo_xmit()
94 u64_stats_inc(&mhi_netdev->stats.tx_dropped); in mhi_ndo_xmit()
95 u64_stats_update_end(&mhi_netdev->stats.tx_syncp); in mhi_ndo_xmit()
103 struct mhi_net_dev *mhi_netdev = netdev_priv(ndev); in mhi_ndo_get_stats64() local
107 start = u64_stats_fetch_begin(&mhi_netdev->stats.rx_syncp); in mhi_ndo_get_stats64()
108 stats->rx_packets = u64_stats_read(&mhi_netdev->stats.rx_packets); in mhi_ndo_get_stats64()
109 stats->rx_bytes = u64_stats_read(&mhi_netdev->stats.rx_bytes); in mhi_ndo_get_stats64()
110 stats->rx_errors = u64_stats_read(&mhi_netdev->stats.rx_errors); in mhi_ndo_get_stats64()
111 } while (u64_stats_fetch_retry(&mhi_netdev->stats.rx_syncp, start)); in mhi_ndo_get_stats64()
114 start = u64_stats_fetch_begin(&mhi_netdev->stats.tx_syncp); in mhi_ndo_get_stats64()
115 stats->tx_packets = u64_stats_read(&mhi_netdev->stats.tx_packets); in mhi_ndo_get_stats64()
116 stats->tx_bytes = u64_stats_read(&mhi_netdev->stats.tx_bytes); in mhi_ndo_get_stats64()
117 stats->tx_errors = u64_stats_read(&mhi_netdev->stats.tx_errors); in mhi_ndo_get_stats64()
118 stats->tx_dropped = u64_stats_read(&mhi_netdev->stats.tx_dropped); in mhi_ndo_get_stats64()
119 } while (u64_stats_fetch_retry(&mhi_netdev->stats.tx_syncp, start)); in mhi_ndo_get_stats64()
143 static struct sk_buff *mhi_net_skb_agg(struct mhi_net_dev *mhi_netdev, in mhi_net_skb_agg() argument
146 struct sk_buff *head = mhi_netdev->skbagg_head; in mhi_net_skb_agg()
147 struct sk_buff *tail = mhi_netdev->skbagg_tail; in mhi_net_skb_agg()
151 mhi_netdev->skbagg_head = skb; in mhi_net_skb_agg()
164 mhi_netdev->skbagg_tail = skb; in mhi_net_skb_agg()
166 return mhi_netdev->skbagg_head; in mhi_net_skb_agg()
172 struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev); in mhi_net_dl_callback() local
187 netdev_warn_once(mhi_netdev->ndev, in mhi_net_dl_callback()
190 mhi_net_skb_agg(mhi_netdev, skb); in mhi_net_dl_callback()
199 u64_stats_update_begin(&mhi_netdev->stats.rx_syncp); in mhi_net_dl_callback()
200 u64_stats_inc(&mhi_netdev->stats.rx_errors); in mhi_net_dl_callback()
201 u64_stats_update_end(&mhi_netdev->stats.rx_syncp); in mhi_net_dl_callback()
206 if (mhi_netdev->skbagg_head) { in mhi_net_dl_callback()
208 skb = mhi_net_skb_agg(mhi_netdev, skb); in mhi_net_dl_callback()
209 mhi_netdev->skbagg_head = NULL; in mhi_net_dl_callback()
224 u64_stats_update_begin(&mhi_netdev->stats.rx_syncp); in mhi_net_dl_callback()
225 u64_stats_inc(&mhi_netdev->stats.rx_packets); in mhi_net_dl_callback()
226 u64_stats_add(&mhi_netdev->stats.rx_bytes, skb->len); in mhi_net_dl_callback()
227 u64_stats_update_end(&mhi_netdev->stats.rx_syncp); in mhi_net_dl_callback()
232 if (free_desc_count >= mhi_netdev->rx_queue_sz / 2) in mhi_net_dl_callback()
233 schedule_delayed_work(&mhi_netdev->rx_refill, 0); in mhi_net_dl_callback()
239 struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev); in mhi_net_ul_callback() local
240 struct net_device *ndev = mhi_netdev->ndev; in mhi_net_ul_callback()
241 struct mhi_device *mdev = mhi_netdev->mdev; in mhi_net_ul_callback()
249 u64_stats_update_begin(&mhi_netdev->stats.tx_syncp); in mhi_net_ul_callback()
253 u64_stats_update_end(&mhi_netdev->stats.tx_syncp); in mhi_net_ul_callback()
257 u64_stats_inc(&mhi_netdev->stats.tx_errors); in mhi_net_ul_callback()
259 u64_stats_inc(&mhi_netdev->stats.tx_packets); in mhi_net_ul_callback()
260 u64_stats_add(&mhi_netdev->stats.tx_bytes, mhi_res->bytes_xferd); in mhi_net_ul_callback()
262 u64_stats_update_end(&mhi_netdev->stats.tx_syncp); in mhi_net_ul_callback()
270 struct mhi_net_dev *mhi_netdev = container_of(work, struct mhi_net_dev, in mhi_net_rx_refill_work() local
272 struct net_device *ndev = mhi_netdev->ndev; in mhi_net_rx_refill_work()
273 struct mhi_device *mdev = mhi_netdev->mdev; in mhi_net_rx_refill_work()
278 size = mhi_netdev->mru ? mhi_netdev->mru : READ_ONCE(ndev->mtu); in mhi_net_rx_refill_work()
300 if (mhi_get_free_desc_count(mdev, DMA_FROM_DEVICE) == mhi_netdev->rx_queue_sz) in mhi_net_rx_refill_work()
301 schedule_delayed_work(&mhi_netdev->rx_refill, HZ / 2); in mhi_net_rx_refill_work()
306 struct mhi_net_dev *mhi_netdev; in mhi_net_newlink() local
309 mhi_netdev = netdev_priv(ndev); in mhi_net_newlink()
311 dev_set_drvdata(&mhi_dev->dev, mhi_netdev); in mhi_net_newlink()
312 mhi_netdev->ndev = ndev; in mhi_net_newlink()
313 mhi_netdev->mdev = mhi_dev; in mhi_net_newlink()
314 mhi_netdev->skbagg_head = NULL; in mhi_net_newlink()
315 mhi_netdev->mru = mhi_dev->mhi_cntrl->mru; in mhi_net_newlink()
317 INIT_DELAYED_WORK(&mhi_netdev->rx_refill, mhi_net_rx_refill_work); in mhi_net_newlink()
318 u64_stats_init(&mhi_netdev->stats.rx_syncp); in mhi_net_newlink()
319 u64_stats_init(&mhi_netdev->stats.tx_syncp); in mhi_net_newlink()
327 mhi_netdev->rx_queue_sz = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE); in mhi_net_newlink()
338 struct mhi_net_dev *mhi_netdev = netdev_priv(ndev); in mhi_net_dellink() local
344 kfree_skb(mhi_netdev->skbagg_head); in mhi_net_dellink()
376 struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev); in mhi_net_remove() local
378 mhi_net_dellink(mhi_dev, mhi_netdev->ndev); in mhi_net_remove()