Lines Matching refs:netdev
34 netdev_nl_dev_fill(struct net_device *netdev, struct sk_buff *rsp, in netdev_nl_dev_fill() argument
41 netdev_assert_locked(netdev); /* note: rtnl_lock may not be held! */ in netdev_nl_dev_fill()
48 if (netdev->xdp_metadata_ops && netdev->xdp_metadata_ops->xmo) \ in netdev_nl_dev_fill()
53 if (netdev->xsk_tx_metadata_ops) { in netdev_nl_dev_fill()
54 if (netdev->xsk_tx_metadata_ops->tmo_fill_timestamp) in netdev_nl_dev_fill()
56 if (netdev->xsk_tx_metadata_ops->tmo_request_checksum) in netdev_nl_dev_fill()
58 if (netdev->xsk_tx_metadata_ops->tmo_request_launch_time) in netdev_nl_dev_fill()
62 if (nla_put_u32(rsp, NETDEV_A_DEV_IFINDEX, netdev->ifindex) || in netdev_nl_dev_fill()
64 netdev->xdp_features, NETDEV_A_DEV_PAD) || in netdev_nl_dev_fill()
71 if (netdev->xdp_features & NETDEV_XDP_ACT_XSK_ZEROCOPY) { in netdev_nl_dev_fill()
73 netdev->xdp_zc_max_segs)) in netdev_nl_dev_fill()
87 netdev_genl_dev_notify(struct net_device *netdev, int cmd) in netdev_genl_dev_notify() argument
92 if (!genl_has_listeners(&netdev_nl_family, dev_net(netdev), in netdev_genl_dev_notify()
102 if (netdev_nl_dev_fill(netdev, ntf, &info)) { in netdev_genl_dev_notify()
107 genlmsg_multicast_netns(&netdev_nl_family, dev_net(netdev), ntf, in netdev_genl_dev_notify()
113 struct net_device *netdev; in netdev_nl_dev_get_doit() local
127 netdev = netdev_get_by_index_lock(genl_info_net(info), ifindex); in netdev_nl_dev_get_doit()
128 if (!netdev) { in netdev_nl_dev_get_doit()
133 err = netdev_nl_dev_fill(netdev, rsp, info); in netdev_nl_dev_get_doit()
134 netdev_unlock(netdev); in netdev_nl_dev_get_doit()
152 for_each_netdev_lock_scoped(net, netdev, ctx->ifindex) { in netdev_nl_dev_get_dumpit()
153 err = netdev_nl_dev_fill(netdev, skb, genl_info_dump(cb)); in netdev_nl_dev_get_dumpit()
261 netdev_nl_napi_dump_one(struct net_device *netdev, struct sk_buff *rsp, in netdev_nl_napi_dump_one() argument
269 if (!netdev->up) in netdev_nl_napi_dump_one()
273 list_for_each_entry(napi, &netdev->napi_list, dev_list) { in netdev_nl_napi_dump_one()
297 struct net_device *netdev; in netdev_nl_napi_get_dumpit() local
305 netdev = netdev_get_by_index_lock(net, ifindex); in netdev_nl_napi_get_dumpit()
306 if (netdev) { in netdev_nl_napi_get_dumpit()
307 err = netdev_nl_napi_dump_one(netdev, skb, info, ctx); in netdev_nl_napi_get_dumpit()
308 netdev_unlock(netdev); in netdev_nl_napi_get_dumpit()
313 for_each_netdev_lock_scoped(net, netdev, ctx->ifindex) { in netdev_nl_napi_get_dumpit()
314 err = netdev_nl_napi_dump_one(netdev, skb, info, ctx); in netdev_nl_napi_get_dumpit()
390 netdev_nl_queue_fill_one(struct sk_buff *rsp, struct net_device *netdev, in netdev_nl_queue_fill_one() argument
404 nla_put_u32(rsp, NETDEV_A_QUEUE_IFINDEX, netdev->ifindex)) in netdev_nl_queue_fill_one()
409 rxq = __netif_get_rx_queue(netdev, q_idx); in netdev_nl_queue_fill_one()
425 txq = netdev_get_tx_queue(netdev, q_idx); in netdev_nl_queue_fill_one()
445 static int netdev_nl_queue_validate(struct net_device *netdev, u32 q_id, in netdev_nl_queue_validate() argument
450 if (q_id >= netdev->real_num_rx_queues) in netdev_nl_queue_validate()
454 if (q_id >= netdev->real_num_tx_queues) in netdev_nl_queue_validate()
461 netdev_nl_queue_fill(struct sk_buff *rsp, struct net_device *netdev, u32 q_idx, in netdev_nl_queue_fill() argument
466 if (!netdev->up) in netdev_nl_queue_fill()
469 err = netdev_nl_queue_validate(netdev, q_idx, q_type); in netdev_nl_queue_fill()
473 return netdev_nl_queue_fill_one(rsp, netdev, q_idx, q_type, info); in netdev_nl_queue_fill()
479 struct net_device *netdev; in netdev_nl_queue_get_doit() local
496 netdev = netdev_get_by_index_lock_ops_compat(genl_info_net(info), in netdev_nl_queue_get_doit()
498 if (netdev) { in netdev_nl_queue_get_doit()
499 err = netdev_nl_queue_fill(rsp, netdev, q_id, q_type, info); in netdev_nl_queue_get_doit()
500 netdev_unlock_ops_compat(netdev); in netdev_nl_queue_get_doit()
516 netdev_nl_queue_dump_one(struct net_device *netdev, struct sk_buff *rsp, in netdev_nl_queue_dump_one() argument
522 if (!netdev->up) in netdev_nl_queue_dump_one()
525 for (; ctx->rxq_idx < netdev->real_num_rx_queues; ctx->rxq_idx++) { in netdev_nl_queue_dump_one()
526 err = netdev_nl_queue_fill_one(rsp, netdev, ctx->rxq_idx, in netdev_nl_queue_dump_one()
531 for (; ctx->txq_idx < netdev->real_num_tx_queues; ctx->txq_idx++) { in netdev_nl_queue_dump_one()
532 err = netdev_nl_queue_fill_one(rsp, netdev, ctx->txq_idx, in netdev_nl_queue_dump_one()
546 struct net_device *netdev; in netdev_nl_queue_get_dumpit() local
554 netdev = netdev_get_by_index_lock_ops_compat(net, ifindex); in netdev_nl_queue_get_dumpit()
555 if (netdev) { in netdev_nl_queue_get_dumpit()
556 err = netdev_nl_queue_dump_one(netdev, skb, info, ctx); in netdev_nl_queue_get_dumpit()
557 netdev_unlock_ops_compat(netdev); in netdev_nl_queue_get_dumpit()
562 for_each_netdev_lock_ops_compat_scoped(net, netdev, in netdev_nl_queue_get_dumpit()
564 err = netdev_nl_queue_dump_one(netdev, skb, info, ctx); in netdev_nl_queue_get_dumpit()
640 netdev_nl_stats_queue(struct net_device *netdev, struct sk_buff *rsp, in netdev_nl_stats_queue() argument
643 const struct netdev_stat_ops *ops = netdev->stat_ops; in netdev_nl_stats_queue()
651 if (nla_put_u32(rsp, NETDEV_A_QSTATS_IFINDEX, netdev->ifindex) || in netdev_nl_stats_queue()
659 ops->get_queue_stats_rx(netdev, i, &rx); in netdev_nl_stats_queue()
667 ops->get_queue_stats_tx(netdev, i, &tx); in netdev_nl_stats_queue()
687 netdev_nl_stats_by_queue(struct net_device *netdev, struct sk_buff *rsp, in netdev_nl_stats_by_queue() argument
691 const struct netdev_stat_ops *ops = netdev->stat_ops; in netdev_nl_stats_by_queue()
694 if (!(netdev->flags & IFF_UP)) in netdev_nl_stats_by_queue()
698 while (ops->get_queue_stats_rx && i < netdev->real_num_rx_queues) { in netdev_nl_stats_by_queue()
699 err = netdev_nl_stats_queue(netdev, rsp, NETDEV_QUEUE_TYPE_RX, in netdev_nl_stats_by_queue()
706 while (ops->get_queue_stats_tx && i < netdev->real_num_tx_queues) { in netdev_nl_stats_by_queue()
707 err = netdev_nl_stats_queue(netdev, rsp, NETDEV_QUEUE_TYPE_TX, in netdev_nl_stats_by_queue()
735 void netdev_stat_queue_sum(struct net_device *netdev, in netdev_stat_queue_sum() argument
746 ops = netdev->stat_ops; in netdev_stat_queue_sum()
751 ops->get_queue_stats_rx(netdev, i, &rx); in netdev_stat_queue_sum()
757 ops->get_queue_stats_tx(netdev, i, &tx); in netdev_stat_queue_sum()
764 netdev_nl_stats_by_netdev(struct net_device *netdev, struct sk_buff *rsp, in netdev_nl_stats_by_netdev() argument
772 if (!netdev->stat_ops->get_base_stats) in netdev_nl_stats_by_netdev()
778 netdev->stat_ops->get_base_stats(netdev, &rx_sum, &tx_sum); in netdev_nl_stats_by_netdev()
788 if (nla_put_u32(rsp, NETDEV_A_QSTATS_IFINDEX, netdev->ifindex)) in netdev_nl_stats_by_netdev()
791 netdev_stat_queue_sum(netdev, 0, netdev->real_num_rx_queues, &rx_sum, in netdev_nl_stats_by_netdev()
792 0, netdev->real_num_tx_queues, &tx_sum); in netdev_nl_stats_by_netdev()
807 netdev_nl_qstats_get_dump_one(struct net_device *netdev, unsigned int scope, in netdev_nl_qstats_get_dump_one() argument
811 if (!netdev->stat_ops) in netdev_nl_qstats_get_dump_one()
816 return netdev_nl_stats_by_netdev(netdev, skb, info); in netdev_nl_qstats_get_dump_one()
818 return netdev_nl_stats_by_queue(netdev, skb, info, ctx); in netdev_nl_qstats_get_dump_one()
830 struct net_device *netdev; in netdev_nl_qstats_get_dumpit() local
844 netdev = netdev_get_by_index_lock_ops_compat(net, ifindex); in netdev_nl_qstats_get_dumpit()
845 if (!netdev) { in netdev_nl_qstats_get_dumpit()
850 if (netdev->stat_ops) { in netdev_nl_qstats_get_dumpit()
851 err = netdev_nl_qstats_get_dump_one(netdev, scope, skb, in netdev_nl_qstats_get_dumpit()
858 netdev_unlock_ops_compat(netdev); in netdev_nl_qstats_get_dumpit()
862 for_each_netdev_lock_ops_compat_scoped(net, netdev, ctx->ifindex) { in netdev_nl_qstats_get_dumpit()
863 err = netdev_nl_qstats_get_dump_one(netdev, scope, skb, in netdev_nl_qstats_get_dumpit()
878 struct net_device *netdev; in netdev_nl_bind_rx_doit() local
909 netdev = netdev_get_by_index_lock(genl_info_net(info), ifindex); in netdev_nl_bind_rx_doit()
910 if (!netdev) { in netdev_nl_bind_rx_doit()
914 if (!netif_device_present(netdev)) in netdev_nl_bind_rx_doit()
916 else if (!netdev_need_ops_lock(netdev)) in netdev_nl_bind_rx_doit()
924 binding = net_devmem_bind_dmabuf(netdev, DMA_FROM_DEVICE, dmabuf_fd, in netdev_nl_bind_rx_doit()
954 err = net_devmem_bind_dmabuf_to_queue(netdev, rxq_idx, binding, in netdev_nl_bind_rx_doit()
967 netdev_unlock(netdev); in netdev_nl_bind_rx_doit()
976 netdev_unlock(netdev); in netdev_nl_bind_rx_doit()
988 struct net_device *netdev; in netdev_nl_bind_tx_doit() local
1017 netdev = netdev_get_by_index_lock(genl_info_net(info), ifindex); in netdev_nl_bind_tx_doit()
1018 if (!netdev) { in netdev_nl_bind_tx_doit()
1023 if (!netif_device_present(netdev)) { in netdev_nl_bind_tx_doit()
1028 if (!netdev->netmem_tx) { in netdev_nl_bind_tx_doit()
1035 binding = net_devmem_bind_dmabuf(netdev, DMA_TO_DEVICE, dmabuf_fd, priv, in netdev_nl_bind_tx_doit()
1045 netdev_unlock(netdev); in netdev_nl_bind_tx_doit()
1051 netdev_unlock(netdev); in netdev_nl_bind_tx_doit()
1095 struct net_device *netdev = netdev_notifier_info_to_dev(ptr); in netdev_genl_netdevice_event() local
1099 netdev_lock_ops_to_full(netdev); in netdev_genl_netdevice_event()
1100 netdev_genl_dev_notify(netdev, NETDEV_CMD_DEV_ADD_NTF); in netdev_genl_netdevice_event()
1101 netdev_unlock_full_to_ops(netdev); in netdev_genl_netdevice_event()
1104 netdev_lock(netdev); in netdev_genl_netdevice_event()
1105 netdev_genl_dev_notify(netdev, NETDEV_CMD_DEV_DEL_NTF); in netdev_genl_netdevice_event()
1106 netdev_unlock(netdev); in netdev_genl_netdevice_event()
1109 netdev_genl_dev_notify(netdev, NETDEV_CMD_DEV_CHANGE_NTF); in netdev_genl_netdevice_event()