Lines Matching refs:tun
133 struct tun_struct __rcu *tun; member
159 struct tun_struct *tun; member
224 static void tun_flow_init(struct tun_struct *tun);
225 static void tun_flow_uninit(struct tun_struct *tun);
267 static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile, in tun_napi_init() argument
273 netif_napi_add_tx(tun->dev, &tfile->napi, tun_napi_poll); in tun_napi_init()
317 static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun, in tun_flow_create() argument
324 netif_info(tun, tx_queued, tun->dev, in tun_flow_create()
331 e->tun = tun; in tun_flow_create()
333 ++tun->flow_count; in tun_flow_create()
338 static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e) in tun_flow_delete() argument
340 netif_info(tun, tx_queued, tun->dev, "delete flow: hash %u index %u\n", in tun_flow_delete()
344 --tun->flow_count; in tun_flow_delete()
347 static void tun_flow_flush(struct tun_struct *tun) in tun_flow_flush() argument
351 spin_lock_bh(&tun->lock); in tun_flow_flush()
356 hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) in tun_flow_flush()
357 tun_flow_delete(tun, e); in tun_flow_flush()
359 spin_unlock_bh(&tun->lock); in tun_flow_flush()
362 static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index) in tun_flow_delete_by_queue() argument
366 spin_lock_bh(&tun->lock); in tun_flow_delete_by_queue()
371 hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) { in tun_flow_delete_by_queue()
373 tun_flow_delete(tun, e); in tun_flow_delete_by_queue()
376 spin_unlock_bh(&tun->lock); in tun_flow_delete_by_queue()
381 struct tun_struct *tun = timer_container_of(tun, t, flow_gc_timer); in tun_flow_cleanup() local
382 unsigned long delay = tun->ageing_time; in tun_flow_cleanup()
387 spin_lock(&tun->lock); in tun_flow_cleanup()
392 hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) { in tun_flow_cleanup()
397 tun_flow_delete(tun, e); in tun_flow_cleanup()
407 mod_timer(&tun->flow_gc_timer, round_jiffies_up(next_timer)); in tun_flow_cleanup()
408 spin_unlock(&tun->lock); in tun_flow_cleanup()
411 static void tun_flow_update(struct tun_struct *tun, u32 rxhash, in tun_flow_update() argument
416 unsigned long delay = tun->ageing_time; in tun_flow_update()
419 head = &tun->flows[tun_hashfn(rxhash)]; in tun_flow_update()
432 spin_lock_bh(&tun->lock); in tun_flow_update()
434 tun->flow_count < MAX_TAP_FLOWS) in tun_flow_update()
435 tun_flow_create(tun, head, rxhash, queue_index); in tun_flow_update()
437 if (!timer_pending(&tun->flow_gc_timer)) in tun_flow_update()
438 mod_timer(&tun->flow_gc_timer, in tun_flow_update()
440 spin_unlock_bh(&tun->lock); in tun_flow_update()
461 static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb) in tun_automq_select_queue() argument
466 numqueues = READ_ONCE(tun->numqueues); in tun_automq_select_queue()
469 e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq); in tun_automq_select_queue()
480 static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb) in tun_ebpf_select_queue() argument
486 numqueues = READ_ONCE(tun->numqueues); in tun_ebpf_select_queue()
490 prog = rcu_dereference(tun->steering_prog); in tun_ebpf_select_queue()
500 struct tun_struct *tun = netdev_priv(dev); in tun_select_queue() local
504 if (rcu_dereference(tun->steering_prog)) in tun_select_queue()
505 ret = tun_ebpf_select_queue(tun, skb); in tun_select_queue()
507 ret = tun_automq_select_queue(tun, skb); in tun_select_queue()
513 static inline bool tun_not_capable(struct tun_struct *tun) in tun_not_capable() argument
516 struct net *net = dev_net(tun->dev); in tun_not_capable()
518 return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) || in tun_not_capable()
519 (gid_valid(tun->group) && !in_egroup_p(tun->group))) && in tun_not_capable()
523 static void tun_set_real_num_queues(struct tun_struct *tun) in tun_set_real_num_queues() argument
525 netif_set_real_num_tx_queues(tun->dev, tun->numqueues); in tun_set_real_num_queues()
526 netif_set_real_num_rx_queues(tun->dev, tun->numqueues); in tun_set_real_num_queues()
529 static void tun_disable_queue(struct tun_struct *tun, struct tun_file *tfile) in tun_disable_queue() argument
531 tfile->detached = tun; in tun_disable_queue()
532 list_add_tail(&tfile->next, &tun->disabled); in tun_disable_queue()
533 ++tun->numdisabled; in tun_disable_queue()
538 struct tun_struct *tun = tfile->detached; in tun_enable_queue() local
542 --tun->numdisabled; in tun_enable_queue()
543 return tun; in tun_enable_queue()
574 struct tun_struct *tun; in __tun_detach() local
576 tun = rtnl_dereference(tfile->tun); in __tun_detach()
578 if (tun && clean) { in __tun_detach()
584 if (tun && !tfile->detached) { in __tun_detach()
586 BUG_ON(index >= tun->numqueues); in __tun_detach()
588 rcu_assign_pointer(tun->tfiles[index], in __tun_detach()
589 tun->tfiles[tun->numqueues - 1]); in __tun_detach()
590 ntfile = rtnl_dereference(tun->tfiles[index]); in __tun_detach()
593 rcu_assign_pointer(tun->tfiles[tun->numqueues - 1], in __tun_detach()
596 --tun->numqueues; in __tun_detach()
598 RCU_INIT_POINTER(tfile->tun, NULL); in __tun_detach()
601 tun_disable_queue(tun, tfile); in __tun_detach()
606 tun_flow_delete_by_queue(tun, tun->numqueues + 1); in __tun_detach()
609 tun_set_real_num_queues(tun); in __tun_detach()
611 tun = tun_enable_queue(tfile); in __tun_detach()
616 if (tun && tun->numqueues == 0 && tun->numdisabled == 0) { in __tun_detach()
617 netif_carrier_off(tun->dev); in __tun_detach()
619 if (!(tun->flags & IFF_PERSIST) && in __tun_detach()
620 tun->dev->reg_state == NETREG_REGISTERED) in __tun_detach()
621 unregister_netdevice(tun->dev); in __tun_detach()
623 if (tun) in __tun_detach()
631 struct tun_struct *tun; in tun_detach() local
635 tun = rtnl_dereference(tfile->tun); in tun_detach()
636 dev = tun ? tun->dev : NULL; in tun_detach()
648 struct tun_struct *tun = netdev_priv(dev); in tun_detach_all() local
650 int i, n = tun->numqueues; in tun_detach_all()
653 tfile = rtnl_dereference(tun->tfiles[i]); in tun_detach_all()
658 RCU_INIT_POINTER(tfile->tun, NULL); in tun_detach_all()
659 --tun->numqueues; in tun_detach_all()
661 list_for_each_entry(tfile, &tun->disabled, next) { in tun_detach_all()
664 RCU_INIT_POINTER(tfile->tun, NULL); in tun_detach_all()
666 BUG_ON(tun->numqueues != 0); in tun_detach_all()
670 tfile = rtnl_dereference(tun->tfiles[i]); in tun_detach_all()
677 list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) { in tun_detach_all()
684 BUG_ON(tun->numdisabled != 0); in tun_detach_all()
686 if (tun->flags & IFF_PERSIST) in tun_detach_all()
690 static int tun_attach(struct tun_struct *tun, struct file *file, in tun_attach() argument
695 struct net_device *dev = tun->dev; in tun_attach()
698 err = security_tun_dev_attach(tfile->socket.sk, tun->security); in tun_attach()
703 if (rtnl_dereference(tfile->tun) && !tfile->detached) in tun_attach()
707 if (!(tun->flags & IFF_MULTI_QUEUE) && tun->numqueues == 1) in tun_attach()
712 tun->numqueues + tun->numdisabled == MAX_TAP_QUEUES) in tun_attach()
718 if (!skip_filter && (tun->filter_attached == true)) { in tun_attach()
720 err = sk_attach_filter(&tun->fprog, tfile->socket.sk); in tun_attach()
733 tfile->queue_index = tun->numqueues; in tun_attach()
745 tun->dev, tfile->queue_index, 0); in tun_attach()
762 tun_napi_init(tun, tfile, napi, napi_frags); in tun_attach()
765 if (rtnl_dereference(tun->xdp_prog)) in tun_attach()
777 rcu_assign_pointer(tfile->tun, tun); in tun_attach()
778 rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile); in tun_attach()
779 tun->numqueues++; in tun_attach()
780 tun_set_real_num_queues(tun); in tun_attach()
787 struct tun_struct *tun; in tun_get() local
790 tun = rcu_dereference(tfile->tun); in tun_get()
791 if (tun) in tun_get()
792 dev_hold(tun->dev); in tun_get()
795 return tun; in tun_get()
798 static void tun_put(struct tun_struct *tun) in tun_put() argument
800 dev_put(tun->dev); in tun_put()
913 struct tun_struct *tun = netdev_priv(dev); in tun_net_init() local
914 struct ifreq *ifr = tun->ifr; in tun_net_init()
917 spin_lock_init(&tun->lock); in tun_net_init()
919 err = security_tun_dev_alloc_security(&tun->security); in tun_net_init()
923 tun_flow_init(tun); in tun_net_init()
936 tun->flags = (tun->flags & ~TUN_FEATURES) | in tun_net_init()
939 INIT_LIST_HEAD(&tun->disabled); in tun_net_init()
940 err = tun_attach(tun, tun->file, false, ifr->ifr_flags & IFF_NAPI, in tun_net_init()
943 tun_flow_uninit(tun); in tun_net_init()
944 security_tun_dev_free_security(tun->security); in tun_net_init()
972 static void tun_automq_xmit(struct tun_struct *tun, struct sk_buff *skb) in tun_automq_xmit() argument
975 if (tun->numqueues == 1 && static_branch_unlikely(&rps_needed)) { in tun_automq_xmit()
983 e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)], rxhash); in tun_automq_xmit()
990 static unsigned int run_ebpf_filter(struct tun_struct *tun, in run_ebpf_filter() argument
994 struct tun_prog *prog = rcu_dereference(tun->filter_prog); in run_ebpf_filter()
1006 struct tun_struct *tun = netdev_priv(dev); in tun_net_xmit() local
1013 tfile = rcu_dereference(tun->tfiles[txq]); in tun_net_xmit()
1021 if (!rcu_dereference(tun->steering_prog)) in tun_net_xmit()
1022 tun_automq_xmit(tun, skb); in tun_net_xmit()
1024 netif_info(tun, tx_queued, tun->dev, "%s %d\n", __func__, skb->len); in tun_net_xmit()
1029 if (!check_filter(&tun->txflt, skb)) { in tun_net_xmit()
1038 len = run_ebpf_filter(tun, skb, len); in tun_net_xmit()
1100 struct tun_struct *tun = netdev_priv(dev); in tun_net_fix_features() local
1102 return (features & tun->set_features) | (features & ~TUN_USER_FEATURES); in tun_net_fix_features()
1107 struct tun_struct *tun = netdev_priv(dev); in tun_set_headroom() local
1112 tun->align = new_hr; in tun_set_headroom()
1118 struct tun_struct *tun = netdev_priv(dev); in tun_net_get_stats64() local
1123 (unsigned long)atomic_long_read(&tun->rx_frame_errors); in tun_net_get_stats64()
1129 struct tun_struct *tun = netdev_priv(dev); in tun_xdp_set() local
1134 old_prog = rtnl_dereference(tun->xdp_prog); in tun_xdp_set()
1135 rcu_assign_pointer(tun->xdp_prog, prog); in tun_xdp_set()
1139 for (i = 0; i < tun->numqueues; i++) { in tun_xdp_set()
1140 tfile = rtnl_dereference(tun->tfiles[i]); in tun_xdp_set()
1146 list_for_each_entry(tfile, &tun->disabled, next) { in tun_xdp_set()
1169 struct tun_struct *tun = netdev_priv(dev); in tun_net_change_carrier() local
1171 if (!tun->numqueues) in tun_net_change_carrier()
1205 struct tun_struct *tun = netdev_priv(dev); in tun_xdp_xmit() local
1217 numqueues = READ_ONCE(tun->numqueues); in tun_xdp_xmit()
1223 tfile = rcu_dereference(tun->tfiles[smp_processor_id() % in tun_xdp_xmit()
1283 static void tun_flow_init(struct tun_struct *tun) in tun_flow_init() argument
1288 INIT_HLIST_HEAD(&tun->flows[i]); in tun_flow_init()
1290 tun->ageing_time = TUN_FLOW_EXPIRE; in tun_flow_init()
1291 timer_setup(&tun->flow_gc_timer, tun_flow_cleanup, 0); in tun_flow_init()
1292 mod_timer(&tun->flow_gc_timer, in tun_flow_init()
1293 round_jiffies_up(jiffies + tun->ageing_time)); in tun_flow_init()
1296 static void tun_flow_uninit(struct tun_struct *tun) in tun_flow_uninit() argument
1298 timer_delete_sync(&tun->flow_gc_timer); in tun_flow_uninit()
1299 tun_flow_flush(tun); in tun_flow_uninit()
1308 struct tun_struct *tun = netdev_priv(dev); in tun_net_initialize() local
1310 switch (tun->flags & TUN_TYPE_MASK) { in tun_net_initialize()
1346 static bool tun_sock_writeable(struct tun_struct *tun, struct tun_file *tfile) in tun_sock_writeable() argument
1350 return (tun->dev->flags & IFF_UP) && sock_writeable(sk); in tun_sock_writeable()
1359 struct tun_struct *tun = tun_get(tfile); in tun_chr_poll() local
1363 if (!tun) in tun_chr_poll()
1378 if (tun_sock_writeable(tun, tfile) || in tun_chr_poll()
1380 tun_sock_writeable(tun, tfile))) in tun_chr_poll()
1383 if (tun->dev->reg_state != NETREG_REGISTERED) in tun_chr_poll()
1386 tun_put(tun); in tun_chr_poll()
1474 static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile, in tun_rx_batched() argument
1479 u32 rx_batched = tun->rx_batched; in tun_rx_batched()
1514 static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile, in tun_can_build_skb() argument
1517 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) in tun_can_build_skb()
1558 static int tun_xdp_act(struct tun_struct *tun, struct bpf_prog *xdp_prog, in tun_xdp_act() argument
1565 err = xdp_do_redirect(tun->dev, xdp, xdp_prog); in tun_xdp_act()
1567 dev_core_stats_rx_dropped_inc(tun->dev); in tun_xdp_act()
1570 dev_sw_netstats_rx_add(tun->dev, xdp->data_end - xdp->data); in tun_xdp_act()
1573 err = tun_xdp_tx(tun->dev, xdp); in tun_xdp_act()
1575 dev_core_stats_rx_dropped_inc(tun->dev); in tun_xdp_act()
1578 dev_sw_netstats_rx_add(tun->dev, xdp->data_end - xdp->data); in tun_xdp_act()
1583 bpf_warn_invalid_xdp_action(tun->dev, xdp_prog, act); in tun_xdp_act()
1586 trace_xdp_exception(tun->dev, xdp_prog, act); in tun_xdp_act()
1589 dev_core_stats_rx_dropped_inc(tun->dev); in tun_xdp_act()
1596 static struct sk_buff *tun_build_skb(struct tun_struct *tun, in tun_build_skb() argument
1613 xdp_prog = rcu_dereference(tun->xdp_prog); in tun_build_skb()
1645 xdp_prog = rcu_dereference(tun->xdp_prog); in tun_build_skb()
1658 err = tun_xdp_act(tun, xdp_prog, &xdp, act); in tun_build_skb()
1693 static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, in tun_get_user() argument
1700 size_t len = total_len, align = tun->align, linear; in tun_get_user()
1722 if (!(tun->flags & IFF_NO_PI)) { in tun_get_user()
1731 if (tun->flags & IFF_VNET_HDR) { in tun_get_user()
1732 int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); in tun_get_user()
1735 hdr_len = __tun_vnet_hdr_get(vnet_hdr_sz, tun->flags, in tun_get_user()
1743 if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) { in tun_get_user()
1765 if (!frags && tun_can_build_skb(tun, tfile, len, noblock, zerocopy)) { in tun_get_user()
1770 skb = tun_build_skb(tun, tfile, from, gso, len, &skb_xdp); in tun_get_user()
1814 if (tun_vnet_hdr_tnl_to_skb(tun->flags, features, skb, &hdr)) { in tun_get_user()
1815 atomic_long_inc(&tun->rx_frame_errors); in tun_get_user()
1820 switch (tun->flags & TUN_TYPE_MASK) { in tun_get_user()
1822 if (tun->flags & IFF_NO_PI) { in tun_get_user()
1840 skb->dev = tun->dev; in tun_get_user()
1848 skb->protocol = eth_type_trans(skb, tun->dev); in tun_get_user()
1870 xdp_prog = rcu_dereference(tun->xdp_prog); in tun_get_user()
1887 if (!rcu_access_pointer(tun->steering_prog) && tun->numqueues > 1 && in tun_get_user()
1892 if (unlikely(!(tun->dev->flags & IFF_UP))) { in tun_get_user()
1904 headlen = eth_get_headlen(tun->dev, skb->data, in tun_get_user()
1910 dev_core_stats_rx_dropped_inc(tun->dev); in tun_get_user()
1950 tun_rx_batched(tun, tfile, skb, more); in tun_get_user()
1957 dev_sw_netstats_rx_add(tun->dev, len); in tun_get_user()
1961 tun_flow_update(tun, rxhash, tfile); in tun_get_user()
1967 dev_core_stats_rx_dropped_inc(tun->dev); in tun_get_user()
1986 struct tun_struct *tun = tun_get(tfile); in tun_chr_write_iter() local
1990 if (!tun) in tun_chr_write_iter()
1996 result = tun_get_user(tun, tfile, NULL, from, noblock, false); in tun_chr_write_iter()
1998 tun_put(tun); in tun_chr_write_iter()
2002 static ssize_t tun_put_user_xdp(struct tun_struct *tun, in tun_put_user_xdp() argument
2011 if (tun->flags & IFF_VNET_HDR) { in tun_put_user_xdp()
2014 vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); in tun_put_user_xdp()
2023 dev_sw_netstats_tx_add(tun->dev, 1, ret); in tun_put_user_xdp()
2030 static ssize_t tun_put_user(struct tun_struct *tun, in tun_put_user() argument
2045 if (tun->flags & IFF_VNET_HDR) in tun_put_user()
2046 vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); in tun_put_user()
2050 if (!(tun->flags & IFF_NO_PI)) { in tun_put_user()
2068 ret = tun_vnet_hdr_tnl_from_skb(tun->flags, tun->dev, skb, in tun_put_user()
2078 ret = __tun_vnet_hdr_put(vnet_hdr_sz, tun->dev->features, in tun_put_user()
2107 dev_sw_netstats_tx_add(tun->dev, 1, skb->len + vlan_hlen); in tun_put_user()
2154 static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile, in tun_do_read() argument
2176 ret = tun_put_user_xdp(tun, tfile, xdpf, to); in tun_do_read()
2181 ret = tun_put_user(tun, tfile, skb, to); in tun_do_read()
2195 struct tun_struct *tun = tun_get(tfile); in tun_chr_read_iter() local
2199 if (!tun) in tun_chr_read_iter()
2205 ret = tun_do_read(tun, tfile, to, noblock, NULL); in tun_chr_read_iter()
2209 tun_put(tun); in tun_chr_read_iter()
2221 static int __tun_set_ebpf(struct tun_struct *tun, in __tun_set_ebpf() argument
2234 spin_lock_bh(&tun->lock); in __tun_set_ebpf()
2236 lockdep_is_held(&tun->lock)); in __tun_set_ebpf()
2238 spin_unlock_bh(&tun->lock); in __tun_set_ebpf()
2248 struct tun_struct *tun = netdev_priv(dev); in tun_free_netdev() local
2250 BUG_ON(!(list_empty(&tun->disabled))); in tun_free_netdev()
2252 tun_flow_uninit(tun); in tun_free_netdev()
2253 security_tun_dev_free_security(tun->security); in tun_free_netdev()
2254 __tun_set_ebpf(tun, &tun->steering_prog, NULL); in tun_free_netdev()
2255 __tun_set_ebpf(tun, &tun->filter_prog, NULL); in tun_free_netdev()
2260 struct tun_struct *tun = netdev_priv(dev); in tun_setup() local
2262 tun->owner = INVALID_UID; in tun_setup()
2263 tun->group = INVALID_GID; in tun_setup()
2264 tun_default_link_ksettings(dev, &tun->link_ksettings); in tun_setup()
2303 struct tun_struct *tun = netdev_priv(dev); in tun_fill_info() local
2305 if (nla_put_u8(skb, IFLA_TUN_TYPE, tun->flags & TUN_TYPE_MASK)) in tun_fill_info()
2307 if (uid_valid(tun->owner) && in tun_fill_info()
2309 from_kuid_munged(current_user_ns(), tun->owner))) in tun_fill_info()
2311 if (gid_valid(tun->group) && in tun_fill_info()
2313 from_kgid_munged(current_user_ns(), tun->group))) in tun_fill_info()
2315 if (nla_put_u8(skb, IFLA_TUN_PI, !(tun->flags & IFF_NO_PI))) in tun_fill_info()
2317 if (nla_put_u8(skb, IFLA_TUN_VNET_HDR, !!(tun->flags & IFF_VNET_HDR))) in tun_fill_info()
2319 if (nla_put_u8(skb, IFLA_TUN_PERSIST, !!(tun->flags & IFF_PERSIST))) in tun_fill_info()
2322 !!(tun->flags & IFF_MULTI_QUEUE))) in tun_fill_info()
2324 if (tun->flags & IFF_MULTI_QUEUE) { in tun_fill_info()
2325 if (nla_put_u32(skb, IFLA_TUN_NUM_QUEUES, tun->numqueues)) in tun_fill_info()
2328 tun->numdisabled)) in tun_fill_info()
2373 static int tun_xdp_one(struct tun_struct *tun, in tun_xdp_one() argument
2395 xdp_prog = rcu_dereference(tun->xdp_prog); in tun_xdp_one()
2405 ret = tun_xdp_act(tun, xdp_prog, xdp, act); in tun_xdp_one()
2450 features = tun_vnet_hdr_guest_features(READ_ONCE(tun->vnet_hdr_sz)); in tun_xdp_one()
2452 if (tun_vnet_hdr_tnl_to_skb(tun->flags, features, skb, tnl_hdr)) { in tun_xdp_one()
2453 atomic_long_inc(&tun->rx_frame_errors); in tun_xdp_one()
2459 skb->protocol = eth_type_trans(skb, tun->dev); in tun_xdp_one()
2472 if (!rcu_dereference(tun->steering_prog) && tun->numqueues > 1 && in tun_xdp_one()
2497 dev_sw_netstats_rx_add(tun->dev, datasize); in tun_xdp_one()
2500 tun_flow_update(tun, rxhash, tfile); in tun_xdp_one()
2510 struct tun_struct *tun = tun_get(tfile); in tun_sendmsg() local
2514 if (!tun) in tun_sendmsg()
2532 ret = tun_xdp_one(tun, tfile, xdp, &flush, &tpage); in tun_sendmsg()
2553 ret = tun_get_user(tun, tfile, ctl ? ctl->ptr : NULL, &m->msg_iter, in tun_sendmsg()
2557 tun_put(tun); in tun_sendmsg()
2565 struct tun_struct *tun = tun_get(tfile); in tun_recvmsg() local
2569 if (!tun) { in tun_recvmsg()
2583 ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, ptr); in tun_recvmsg()
2589 tun_put(tun); in tun_recvmsg()
2593 tun_put(tun); in tun_recvmsg()
2616 struct tun_struct *tun; in tun_peek_len() local
2619 tun = tun_get(tfile); in tun_peek_len()
2620 if (!tun) in tun_peek_len()
2624 tun_put(tun); in tun_peek_len()
2642 static int tun_flags(struct tun_struct *tun) in tun_flags() argument
2644 return tun->flags & (TUN_FEATURES | IFF_PERSIST | IFF_TUN | IFF_TAP); in tun_flags()
2650 struct tun_struct *tun = netdev_priv(to_net_dev(dev)); in tun_flags_show() local
2651 return sysfs_emit(buf, "0x%x\n", tun_flags(tun)); in tun_flags_show()
2657 struct tun_struct *tun = netdev_priv(to_net_dev(dev)); in owner_show() local
2658 return uid_valid(tun->owner)? in owner_show()
2660 from_kuid_munged(current_user_ns(), tun->owner)) : in owner_show()
2667 struct tun_struct *tun = netdev_priv(to_net_dev(dev)); in group_show() local
2668 return gid_valid(tun->group) ? in group_show()
2670 from_kgid_munged(current_user_ns(), tun->group)) : in group_show()
2691 struct tun_struct *tun; in tun_set_iff() local
2713 tun = netdev_priv(dev); in tun_set_iff()
2715 tun = netdev_priv(dev); in tun_set_iff()
2720 !!(tun->flags & IFF_MULTI_QUEUE)) in tun_set_iff()
2723 if (tun_not_capable(tun)) in tun_set_iff()
2725 err = security_tun_dev_open(tun->security); in tun_set_iff()
2729 err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER, in tun_set_iff()
2735 if (tun->flags & IFF_MULTI_QUEUE && in tun_set_iff()
2736 (tun->numqueues + tun->numdisabled > 1)) { in tun_set_iff()
2744 tun->flags = (tun->flags & ~TUN_FEATURES) | in tun_set_iff()
2787 tun = netdev_priv(dev); in tun_set_iff()
2788 tun->dev = dev; in tun_set_iff()
2789 tun->flags = flags; in tun_set_iff()
2790 tun->txflt.count = 0; in tun_set_iff()
2791 tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr); in tun_set_iff()
2793 tun->align = NET_SKB_PAD; in tun_set_iff()
2794 tun->filter_attached = false; in tun_set_iff()
2795 tun->sndbuf = tfile->socket.sk->sk_sndbuf; in tun_set_iff()
2796 tun->rx_batched = 0; in tun_set_iff()
2797 RCU_INIT_POINTER(tun->steering_prog, NULL); in tun_set_iff()
2799 tun->ifr = ifr; in tun_set_iff()
2800 tun->file = file; in tun_set_iff()
2804 err = register_netdevice(tun->dev); in tun_set_iff()
2812 rcu_assign_pointer(tfile->tun, tun); in tun_set_iff()
2816 netif_carrier_off(tun->dev); in tun_set_iff()
2818 netif_carrier_on(tun->dev); in tun_set_iff()
2823 if (netif_running(tun->dev)) in tun_set_iff()
2824 netif_tx_wake_all_queues(tun->dev); in tun_set_iff()
2826 strcpy(ifr->ifr_name, tun->dev->name); in tun_set_iff()
2830 static void tun_get_iff(struct tun_struct *tun, struct ifreq *ifr) in tun_get_iff() argument
2832 strcpy(ifr->ifr_name, tun->dev->name); in tun_get_iff()
2834 ifr->ifr_flags = tun_flags(tun); in tun_get_iff()
2842 static int set_offload(struct tun_struct *tun, unsigned long arg) in set_offload() argument
2888 tun->set_features = features; in set_offload()
2889 tun->dev->wanted_features &= ~TUN_USER_FEATURES; in set_offload()
2890 tun->dev->wanted_features |= features; in set_offload()
2891 netdev_update_features(tun->dev); in set_offload()
2896 static void tun_detach_filter(struct tun_struct *tun, int n) in tun_detach_filter() argument
2902 tfile = rtnl_dereference(tun->tfiles[i]); in tun_detach_filter()
2908 tun->filter_attached = false; in tun_detach_filter()
2911 static int tun_attach_filter(struct tun_struct *tun) in tun_attach_filter() argument
2916 for (i = 0; i < tun->numqueues; i++) { in tun_attach_filter()
2917 tfile = rtnl_dereference(tun->tfiles[i]); in tun_attach_filter()
2919 ret = sk_attach_filter(&tun->fprog, tfile->socket.sk); in tun_attach_filter()
2922 tun_detach_filter(tun, i); in tun_attach_filter()
2927 tun->filter_attached = true; in tun_attach_filter()
2931 static void tun_set_sndbuf(struct tun_struct *tun) in tun_set_sndbuf() argument
2936 for (i = 0; i < tun->numqueues; i++) { in tun_set_sndbuf()
2937 tfile = rtnl_dereference(tun->tfiles[i]); in tun_set_sndbuf()
2938 tfile->socket.sk->sk_sndbuf = tun->sndbuf; in tun_set_sndbuf()
2945 struct tun_struct *tun; in tun_set_queue() local
2951 tun = tfile->detached; in tun_set_queue()
2952 if (!tun) { in tun_set_queue()
2956 ret = security_tun_dev_attach_queue(tun->security); in tun_set_queue()
2959 ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI, in tun_set_queue()
2960 tun->flags & IFF_NAPI_FRAGS, true); in tun_set_queue()
2962 tun = rtnl_dereference(tfile->tun); in tun_set_queue()
2963 if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached) in tun_set_queue()
2971 netdev_state_change(tun->dev); in tun_set_queue()
2978 static int tun_set_ebpf(struct tun_struct *tun, struct tun_prog __rcu **prog_p, in tun_set_ebpf() argument
2995 return __tun_set_ebpf(tun, prog_p, prog); in tun_set_ebpf()
3042 struct tun_struct *tun; in __tun_chr_ioctl() local
3077 tun = tun_get(tfile); in __tun_chr_ioctl()
3080 if (tun) in __tun_chr_ioctl()
3096 if (tun) in __tun_chr_ioctl()
3111 if (!tun) in __tun_chr_ioctl()
3114 netif_info(tun, drv, tun->dev, "tun_chr_ioctl cmd %u\n", cmd); in __tun_chr_ioctl()
3116 net = dev_net(tun->dev); in __tun_chr_ioctl()
3120 tun_get_iff(tun, &ifr); in __tun_chr_ioctl()
3135 netif_info(tun, drv, tun->dev, "ignored: set checksum %s\n", in __tun_chr_ioctl()
3143 if (arg && !(tun->flags & IFF_PERSIST)) { in __tun_chr_ioctl()
3144 tun->flags |= IFF_PERSIST; in __tun_chr_ioctl()
3148 if (!arg && (tun->flags & IFF_PERSIST)) { in __tun_chr_ioctl()
3149 tun->flags &= ~IFF_PERSIST; in __tun_chr_ioctl()
3154 netif_info(tun, drv, tun->dev, "persist %s\n", in __tun_chr_ioctl()
3165 tun->owner = owner; in __tun_chr_ioctl()
3167 netif_info(tun, drv, tun->dev, "owner set to %u\n", in __tun_chr_ioctl()
3168 from_kuid(&init_user_ns, tun->owner)); in __tun_chr_ioctl()
3178 tun->group = group; in __tun_chr_ioctl()
3180 netif_info(tun, drv, tun->dev, "group set to %u\n", in __tun_chr_ioctl()
3181 from_kgid(&init_user_ns, tun->group)); in __tun_chr_ioctl()
3186 if (tun->dev->flags & IFF_UP) { in __tun_chr_ioctl()
3187 netif_info(tun, drv, tun->dev, in __tun_chr_ioctl()
3192 tun->dev); in __tun_chr_ioctl()
3195 netif_info(tun, drv, tun->dev, in __tun_chr_ioctl()
3199 tun->dev->type = (int) arg; in __tun_chr_ioctl()
3200 tun->dev->addr_len = tun_get_addr_len(tun->dev->type); in __tun_chr_ioctl()
3201 netif_info(tun, drv, tun->dev, "linktype set to %d\n", in __tun_chr_ioctl()
3202 tun->dev->type); in __tun_chr_ioctl()
3204 tun->dev); in __tun_chr_ioctl()
3209 tun->msg_enable = (u32)arg; in __tun_chr_ioctl()
3213 ret = set_offload(tun, arg); in __tun_chr_ioctl()
3219 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) in __tun_chr_ioctl()
3221 ret = update_filter(&tun->txflt, (void __user *)arg); in __tun_chr_ioctl()
3226 netif_get_mac_address(&ifr.ifr_hwaddr, net, tun->dev->name); in __tun_chr_ioctl()
3233 if (tun->dev->addr_len > sizeof(ifr.ifr_hwaddr)) { in __tun_chr_ioctl()
3237 ret = dev_set_mac_address_user(tun->dev, in __tun_chr_ioctl()
3258 tun->sndbuf = sndbuf; in __tun_chr_ioctl()
3259 tun_set_sndbuf(tun); in __tun_chr_ioctl()
3265 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) in __tun_chr_ioctl()
3268 if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog))) in __tun_chr_ioctl()
3271 ret = tun_attach_filter(tun); in __tun_chr_ioctl()
3277 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) in __tun_chr_ioctl()
3280 tun_detach_filter(tun, tun->numqueues); in __tun_chr_ioctl()
3285 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) in __tun_chr_ioctl()
3288 if (copy_to_user(argp, &tun->fprog, sizeof(tun->fprog))) in __tun_chr_ioctl()
3294 ret = tun_set_ebpf(tun, &tun->steering_prog, argp); in __tun_chr_ioctl()
3298 ret = tun_set_ebpf(tun, &tun->filter_prog, argp); in __tun_chr_ioctl()
3306 ret = tun_net_change_carrier(tun->dev, (bool)carrier); in __tun_chr_ioctl()
3317 ret = tun_vnet_ioctl(&tun->vnet_hdr_sz, &tun->flags, cmd, argp); in __tun_chr_ioctl()
3322 netdev_state_change(tun->dev); in __tun_chr_ioctl()
3326 if (tun) in __tun_chr_ioctl()
3327 tun_put(tun); in __tun_chr_ioctl()
3405 RCU_INIT_POINTER(tfile->tun, NULL); in tun_chr_open()
3442 struct tun_struct *tun; in tun_chr_show_fdinfo() local
3448 tun = tun_get(tfile); in tun_chr_show_fdinfo()
3449 if (tun) in tun_chr_show_fdinfo()
3450 tun_get_iff(tun, &ifr); in tun_chr_show_fdinfo()
3453 if (tun) in tun_chr_show_fdinfo()
3454 tun_put(tun); in tun_chr_show_fdinfo()
3501 struct tun_struct *tun = netdev_priv(dev); in tun_get_link_ksettings() local
3503 memcpy(cmd, &tun->link_ksettings, sizeof(*cmd)); in tun_get_link_ksettings()
3510 struct tun_struct *tun = netdev_priv(dev); in tun_set_link_ksettings() local
3512 memcpy(&tun->link_ksettings, cmd, sizeof(*cmd)); in tun_set_link_ksettings()
3518 struct tun_struct *tun = netdev_priv(dev); in tun_get_drvinfo() local
3523 switch (tun->flags & TUN_TYPE_MASK) { in tun_get_drvinfo()
3535 struct tun_struct *tun = netdev_priv(dev); in tun_get_msglevel() local
3537 return tun->msg_enable; in tun_get_msglevel()
3542 struct tun_struct *tun = netdev_priv(dev); in tun_set_msglevel() local
3544 tun->msg_enable = value; in tun_set_msglevel()
3552 struct tun_struct *tun = netdev_priv(dev); in tun_get_coalesce() local
3554 ec->rx_max_coalesced_frames = tun->rx_batched; in tun_get_coalesce()
3564 struct tun_struct *tun = netdev_priv(dev); in tun_set_coalesce() local
3567 tun->rx_batched = NAPI_POLL_WEIGHT; in tun_set_coalesce()
3569 tun->rx_batched = ec->rx_max_coalesced_frames; in tun_set_coalesce()
3577 struct tun_struct *tun = netdev_priv(dev); in tun_get_channels() local
3579 channels->combined_count = tun->numqueues; in tun_get_channels()
3580 channels->max_combined = tun->flags & IFF_MULTI_QUEUE ? MAX_TAP_QUEUES : 1; in tun_get_channels()
3597 static int tun_queue_resize(struct tun_struct *tun) in tun_queue_resize() argument
3599 struct net_device *dev = tun->dev; in tun_queue_resize()
3602 int n = tun->numqueues + tun->numdisabled; in tun_queue_resize()
3609 for (i = 0; i < tun->numqueues; i++) { in tun_queue_resize()
3610 tfile = rtnl_dereference(tun->tfiles[i]); in tun_queue_resize()
3613 list_for_each_entry(tfile, &tun->disabled, next) in tun_queue_resize()
3628 struct tun_struct *tun = netdev_priv(dev); in tun_device_event() local
3636 if (tun_queue_resize(tun)) in tun_device_event()
3640 for (i = 0; i < tun->numqueues; i++) { in tun_device_event()
3643 tfile = rtnl_dereference(tun->tfiles[i]); in tun_device_event()