1 /*
2 * Copyright (C) 2017-2018 Netronome Systems, Inc.
3 *
4 * This software is licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree.
7 *
8 * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
9 * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
10 * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
11 * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
12 * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
13 * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
14 */
15
16 #include <linux/bpf.h>
17 #include <linux/bpf_verifier.h>
18 #include <linux/bug.h>
19 #include <linux/kdev_t.h>
20 #include <linux/list.h>
21 #include <linux/lockdep.h>
22 #include <linux/netdevice.h>
23 #include <linux/printk.h>
24 #include <linux/proc_ns.h>
25 #include <linux/rhashtable.h>
26 #include <linux/rtnetlink.h>
27 #include <linux/rwsem.h>
28
29 /* Protects offdevs, members of bpf_offload_netdev and offload members
30 * of all progs.
31 * RTNL lock cannot be taken when holding this lock.
32 */
33 static DECLARE_RWSEM(bpf_devs_lock);
34
35 struct bpf_offload_dev {
36 const struct bpf_prog_offload_ops *ops;
37 struct list_head netdevs;
38 void *priv;
39 };
40
41 struct bpf_offload_netdev {
42 struct rhash_head l;
43 struct net_device *netdev;
44 struct bpf_offload_dev *offdev; /* NULL when bound-only */
45 struct list_head progs;
46 struct list_head maps;
47 struct list_head offdev_netdevs;
48 };
49
50 static const struct rhashtable_params offdevs_params = {
51 .nelem_hint = 4,
52 .key_len = sizeof(struct net_device *),
53 .key_offset = offsetof(struct bpf_offload_netdev, netdev),
54 .head_offset = offsetof(struct bpf_offload_netdev, l),
55 .automatic_shrinking = true,
56 };
57
58 static struct rhashtable offdevs;
59
bpf_dev_offload_check(struct net_device * netdev)60 static int bpf_dev_offload_check(struct net_device *netdev)
61 {
62 if (!netdev)
63 return -EINVAL;
64 if (!netdev->netdev_ops->ndo_bpf)
65 return -EOPNOTSUPP;
66 return 0;
67 }
68
69 static struct bpf_offload_netdev *
bpf_offload_find_netdev(struct net_device * netdev)70 bpf_offload_find_netdev(struct net_device *netdev)
71 {
72 lockdep_assert_held(&bpf_devs_lock);
73
74 return rhashtable_lookup_fast(&offdevs, &netdev, offdevs_params);
75 }
76
__bpf_offload_dev_netdev_register(struct bpf_offload_dev * offdev,struct net_device * netdev)77 static int __bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
78 struct net_device *netdev)
79 {
80 struct bpf_offload_netdev *ondev;
81 int err;
82
83 ondev = kzalloc(sizeof(*ondev), GFP_KERNEL);
84 if (!ondev)
85 return -ENOMEM;
86
87 ondev->netdev = netdev;
88 ondev->offdev = offdev;
89 INIT_LIST_HEAD(&ondev->progs);
90 INIT_LIST_HEAD(&ondev->maps);
91
92 err = rhashtable_insert_fast(&offdevs, &ondev->l, offdevs_params);
93 if (err) {
94 netdev_warn(netdev, "failed to register for BPF offload\n");
95 goto err_free;
96 }
97
98 if (offdev)
99 list_add(&ondev->offdev_netdevs, &offdev->netdevs);
100 return 0;
101
102 err_free:
103 kfree(ondev);
104 return err;
105 }
106
__bpf_prog_offload_destroy(struct bpf_prog * prog)107 static void __bpf_prog_offload_destroy(struct bpf_prog *prog)
108 {
109 struct bpf_prog_offload *offload = prog->aux->offload;
110
111 if (offload->dev_state)
112 offload->offdev->ops->destroy(prog);
113
114 list_del_init(&offload->offloads);
115 kfree(offload);
116 prog->aux->offload = NULL;
117 }
118
bpf_map_offload_ndo(struct bpf_offloaded_map * offmap,enum bpf_netdev_command cmd)119 static int bpf_map_offload_ndo(struct bpf_offloaded_map *offmap,
120 enum bpf_netdev_command cmd)
121 {
122 struct netdev_bpf data = {};
123 struct net_device *netdev;
124
125 ASSERT_RTNL();
126
127 data.command = cmd;
128 data.offmap = offmap;
129 /* Caller must make sure netdev is valid */
130 netdev = offmap->netdev;
131
132 return netdev->netdev_ops->ndo_bpf(netdev, &data);
133 }
134
__bpf_map_offload_destroy(struct bpf_offloaded_map * offmap)135 static void __bpf_map_offload_destroy(struct bpf_offloaded_map *offmap)
136 {
137 WARN_ON(bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_FREE));
138 /* Make sure BPF_MAP_GET_NEXT_ID can't find this dead map */
139 bpf_map_free_id(&offmap->map);
140 list_del_init(&offmap->offloads);
141 offmap->netdev = NULL;
142 }
143
__bpf_offload_dev_netdev_unregister(struct bpf_offload_dev * offdev,struct net_device * netdev)144 static void __bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
145 struct net_device *netdev)
146 {
147 struct bpf_offload_netdev *ondev, *altdev = NULL;
148 struct bpf_offloaded_map *offmap, *mtmp;
149 struct bpf_prog_offload *offload, *ptmp;
150
151 ASSERT_RTNL();
152
153 ondev = rhashtable_lookup_fast(&offdevs, &netdev, offdevs_params);
154 if (WARN_ON(!ondev))
155 return;
156
157 WARN_ON(rhashtable_remove_fast(&offdevs, &ondev->l, offdevs_params));
158
159 /* Try to move the objects to another netdev of the device */
160 if (offdev) {
161 list_del(&ondev->offdev_netdevs);
162 altdev = list_first_entry_or_null(&offdev->netdevs,
163 struct bpf_offload_netdev,
164 offdev_netdevs);
165 }
166
167 if (altdev) {
168 list_for_each_entry(offload, &ondev->progs, offloads)
169 offload->netdev = altdev->netdev;
170 list_splice_init(&ondev->progs, &altdev->progs);
171
172 list_for_each_entry(offmap, &ondev->maps, offloads)
173 offmap->netdev = altdev->netdev;
174 list_splice_init(&ondev->maps, &altdev->maps);
175 } else {
176 list_for_each_entry_safe(offload, ptmp, &ondev->progs, offloads)
177 __bpf_prog_offload_destroy(offload->prog);
178 list_for_each_entry_safe(offmap, mtmp, &ondev->maps, offloads)
179 __bpf_map_offload_destroy(offmap);
180 }
181
182 WARN_ON(!list_empty(&ondev->progs));
183 WARN_ON(!list_empty(&ondev->maps));
184 kfree(ondev);
185 }
186
__bpf_prog_dev_bound_init(struct bpf_prog * prog,struct net_device * netdev)187 static int __bpf_prog_dev_bound_init(struct bpf_prog *prog, struct net_device *netdev)
188 {
189 struct bpf_offload_netdev *ondev;
190 struct bpf_prog_offload *offload;
191 int err;
192
193 offload = kzalloc(sizeof(*offload), GFP_USER);
194 if (!offload)
195 return -ENOMEM;
196
197 offload->prog = prog;
198 offload->netdev = netdev;
199
200 ondev = bpf_offload_find_netdev(offload->netdev);
201 if (!ondev) {
202 if (bpf_prog_is_offloaded(prog->aux)) {
203 err = -EINVAL;
204 goto err_free;
205 }
206
207 /* When only binding to the device, explicitly
208 * create an entry in the hashtable.
209 */
210 err = __bpf_offload_dev_netdev_register(NULL, offload->netdev);
211 if (err)
212 goto err_free;
213 ondev = bpf_offload_find_netdev(offload->netdev);
214 }
215 offload->offdev = ondev->offdev;
216 prog->aux->offload = offload;
217 list_add_tail(&offload->offloads, &ondev->progs);
218
219 return 0;
220 err_free:
221 kfree(offload);
222 return err;
223 }
224
bpf_prog_dev_bound_init(struct bpf_prog * prog,union bpf_attr * attr)225 int bpf_prog_dev_bound_init(struct bpf_prog *prog, union bpf_attr *attr)
226 {
227 struct net_device *netdev;
228 int err;
229
230 if (attr->prog_type != BPF_PROG_TYPE_SCHED_CLS &&
231 attr->prog_type != BPF_PROG_TYPE_XDP)
232 return -EINVAL;
233
234 if (attr->prog_flags & ~BPF_F_XDP_DEV_BOUND_ONLY)
235 return -EINVAL;
236
237 if (attr->prog_type == BPF_PROG_TYPE_SCHED_CLS &&
238 attr->prog_flags & BPF_F_XDP_DEV_BOUND_ONLY)
239 return -EINVAL;
240
241 netdev = dev_get_by_index(current->nsproxy->net_ns, attr->prog_ifindex);
242 if (!netdev)
243 return -EINVAL;
244
245 err = bpf_dev_offload_check(netdev);
246 if (err)
247 goto out;
248
249 prog->aux->offload_requested = !(attr->prog_flags & BPF_F_XDP_DEV_BOUND_ONLY);
250
251 down_write(&bpf_devs_lock);
252 err = __bpf_prog_dev_bound_init(prog, netdev);
253 up_write(&bpf_devs_lock);
254
255 out:
256 dev_put(netdev);
257 return err;
258 }
259
bpf_prog_dev_bound_inherit(struct bpf_prog * new_prog,struct bpf_prog * old_prog)260 int bpf_prog_dev_bound_inherit(struct bpf_prog *new_prog, struct bpf_prog *old_prog)
261 {
262 int err;
263
264 if (!bpf_prog_is_dev_bound(old_prog->aux))
265 return 0;
266
267 if (bpf_prog_is_offloaded(old_prog->aux))
268 return -EINVAL;
269
270 new_prog->aux->dev_bound = old_prog->aux->dev_bound;
271 new_prog->aux->offload_requested = old_prog->aux->offload_requested;
272
273 down_write(&bpf_devs_lock);
274 if (!old_prog->aux->offload) {
275 err = -EINVAL;
276 goto out;
277 }
278
279 err = __bpf_prog_dev_bound_init(new_prog, old_prog->aux->offload->netdev);
280
281 out:
282 up_write(&bpf_devs_lock);
283 return err;
284 }
285
bpf_prog_offload_verifier_prep(struct bpf_prog * prog)286 int bpf_prog_offload_verifier_prep(struct bpf_prog *prog)
287 {
288 struct bpf_prog_offload *offload;
289 int ret = -ENODEV;
290
291 down_read(&bpf_devs_lock);
292 offload = prog->aux->offload;
293 if (offload) {
294 ret = offload->offdev->ops->prepare(prog);
295 offload->dev_state = !ret;
296 }
297 up_read(&bpf_devs_lock);
298
299 return ret;
300 }
301
bpf_prog_offload_verify_insn(struct bpf_verifier_env * env,int insn_idx,int prev_insn_idx)302 int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
303 int insn_idx, int prev_insn_idx)
304 {
305 struct bpf_prog_offload *offload;
306 int ret = -ENODEV;
307
308 down_read(&bpf_devs_lock);
309 offload = env->prog->aux->offload;
310 if (offload)
311 ret = offload->offdev->ops->insn_hook(env, insn_idx,
312 prev_insn_idx);
313 up_read(&bpf_devs_lock);
314
315 return ret;
316 }
317
bpf_prog_offload_finalize(struct bpf_verifier_env * env)318 int bpf_prog_offload_finalize(struct bpf_verifier_env *env)
319 {
320 struct bpf_prog_offload *offload;
321 int ret = -ENODEV;
322
323 down_read(&bpf_devs_lock);
324 offload = env->prog->aux->offload;
325 if (offload) {
326 if (offload->offdev->ops->finalize)
327 ret = offload->offdev->ops->finalize(env);
328 else
329 ret = 0;
330 }
331 up_read(&bpf_devs_lock);
332
333 return ret;
334 }
335
336 void
bpf_prog_offload_replace_insn(struct bpf_verifier_env * env,u32 off,struct bpf_insn * insn)337 bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off,
338 struct bpf_insn *insn)
339 {
340 const struct bpf_prog_offload_ops *ops;
341 struct bpf_prog_offload *offload;
342 int ret = -EOPNOTSUPP;
343
344 down_read(&bpf_devs_lock);
345 offload = env->prog->aux->offload;
346 if (offload) {
347 ops = offload->offdev->ops;
348 if (!offload->opt_failed && ops->replace_insn)
349 ret = ops->replace_insn(env, off, insn);
350 offload->opt_failed |= ret;
351 }
352 up_read(&bpf_devs_lock);
353 }
354
355 void
bpf_prog_offload_remove_insns(struct bpf_verifier_env * env,u32 off,u32 cnt)356 bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt)
357 {
358 struct bpf_prog_offload *offload;
359 int ret = -EOPNOTSUPP;
360
361 down_read(&bpf_devs_lock);
362 offload = env->prog->aux->offload;
363 if (offload) {
364 if (!offload->opt_failed && offload->offdev->ops->remove_insns)
365 ret = offload->offdev->ops->remove_insns(env, off, cnt);
366 offload->opt_failed |= ret;
367 }
368 up_read(&bpf_devs_lock);
369 }
370
bpf_prog_dev_bound_destroy(struct bpf_prog * prog)371 void bpf_prog_dev_bound_destroy(struct bpf_prog *prog)
372 {
373 struct bpf_offload_netdev *ondev;
374 struct net_device *netdev;
375
376 rtnl_lock();
377 down_write(&bpf_devs_lock);
378 if (prog->aux->offload) {
379 list_del_init(&prog->aux->offload->offloads);
380
381 netdev = prog->aux->offload->netdev;
382 __bpf_prog_offload_destroy(prog);
383
384 ondev = bpf_offload_find_netdev(netdev);
385 if (!ondev->offdev && list_empty(&ondev->progs))
386 __bpf_offload_dev_netdev_unregister(NULL, netdev);
387 }
388 up_write(&bpf_devs_lock);
389 rtnl_unlock();
390 }
391
bpf_prog_offload_translate(struct bpf_prog * prog)392 static int bpf_prog_offload_translate(struct bpf_prog *prog)
393 {
394 struct bpf_prog_offload *offload;
395 int ret = -ENODEV;
396
397 down_read(&bpf_devs_lock);
398 offload = prog->aux->offload;
399 if (offload)
400 ret = offload->offdev->ops->translate(prog);
401 up_read(&bpf_devs_lock);
402
403 return ret;
404 }
405
bpf_prog_warn_on_exec(const void * ctx,const struct bpf_insn * insn)406 static unsigned int bpf_prog_warn_on_exec(const void *ctx,
407 const struct bpf_insn *insn)
408 {
409 WARN(1, "attempt to execute device eBPF program on the host!");
410 return 0;
411 }
412
bpf_prog_offload_compile(struct bpf_prog * prog)413 int bpf_prog_offload_compile(struct bpf_prog *prog)
414 {
415 prog->bpf_func = bpf_prog_warn_on_exec;
416
417 return bpf_prog_offload_translate(prog);
418 }
419
420 struct ns_get_path_bpf_prog_args {
421 struct bpf_prog *prog;
422 struct bpf_prog_info *info;
423 };
424
bpf_prog_offload_info_fill_ns(void * private_data)425 static struct ns_common *bpf_prog_offload_info_fill_ns(void *private_data)
426 {
427 struct ns_get_path_bpf_prog_args *args = private_data;
428 struct bpf_prog_aux *aux = args->prog->aux;
429 struct ns_common *ns;
430 struct net *net;
431
432 rtnl_lock();
433 down_read(&bpf_devs_lock);
434
435 if (aux->offload) {
436 args->info->ifindex = aux->offload->netdev->ifindex;
437 net = dev_net(aux->offload->netdev);
438 get_net(net);
439 ns = &net->ns;
440 } else {
441 args->info->ifindex = 0;
442 ns = NULL;
443 }
444
445 up_read(&bpf_devs_lock);
446 rtnl_unlock();
447
448 return ns;
449 }
450
bpf_prog_offload_info_fill(struct bpf_prog_info * info,struct bpf_prog * prog)451 int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
452 struct bpf_prog *prog)
453 {
454 struct ns_get_path_bpf_prog_args args = {
455 .prog = prog,
456 .info = info,
457 };
458 struct bpf_prog_aux *aux = prog->aux;
459 struct inode *ns_inode;
460 struct path ns_path;
461 char __user *uinsns;
462 int res;
463 u32 ulen;
464
465 res = ns_get_path_cb(&ns_path, bpf_prog_offload_info_fill_ns, &args);
466 if (res) {
467 if (!info->ifindex)
468 return -ENODEV;
469 return res;
470 }
471
472 down_read(&bpf_devs_lock);
473
474 if (!aux->offload) {
475 up_read(&bpf_devs_lock);
476 return -ENODEV;
477 }
478
479 ulen = info->jited_prog_len;
480 info->jited_prog_len = aux->offload->jited_len;
481 if (info->jited_prog_len && ulen) {
482 uinsns = u64_to_user_ptr(info->jited_prog_insns);
483 ulen = min_t(u32, info->jited_prog_len, ulen);
484 if (copy_to_user(uinsns, aux->offload->jited_image, ulen)) {
485 up_read(&bpf_devs_lock);
486 return -EFAULT;
487 }
488 }
489
490 up_read(&bpf_devs_lock);
491
492 ns_inode = ns_path.dentry->d_inode;
493 info->netns_dev = new_encode_dev(ns_inode->i_sb->s_dev);
494 info->netns_ino = ns_inode->i_ino;
495 path_put(&ns_path);
496
497 return 0;
498 }
499
500 const struct bpf_prog_ops bpf_offload_prog_ops = {
501 };
502
bpf_map_offload_map_alloc(union bpf_attr * attr)503 struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr)
504 {
505 struct net *net = current->nsproxy->net_ns;
506 struct bpf_offload_netdev *ondev;
507 struct bpf_offloaded_map *offmap;
508 int err;
509
510 if (!capable(CAP_SYS_ADMIN))
511 return ERR_PTR(-EPERM);
512 if (attr->map_type != BPF_MAP_TYPE_ARRAY &&
513 attr->map_type != BPF_MAP_TYPE_HASH)
514 return ERR_PTR(-EINVAL);
515
516 offmap = bpf_map_area_alloc(sizeof(*offmap), NUMA_NO_NODE);
517 if (!offmap)
518 return ERR_PTR(-ENOMEM);
519
520 bpf_map_init_from_attr(&offmap->map, attr);
521
522 rtnl_lock();
523 down_write(&bpf_devs_lock);
524 offmap->netdev = __dev_get_by_index(net, attr->map_ifindex);
525 err = bpf_dev_offload_check(offmap->netdev);
526 if (err)
527 goto err_unlock;
528
529 ondev = bpf_offload_find_netdev(offmap->netdev);
530 if (!ondev) {
531 err = -EINVAL;
532 goto err_unlock;
533 }
534
535 err = bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_ALLOC);
536 if (err)
537 goto err_unlock;
538
539 list_add_tail(&offmap->offloads, &ondev->maps);
540 up_write(&bpf_devs_lock);
541 rtnl_unlock();
542
543 return &offmap->map;
544
545 err_unlock:
546 up_write(&bpf_devs_lock);
547 rtnl_unlock();
548 bpf_map_area_free(offmap);
549 return ERR_PTR(err);
550 }
551
bpf_map_offload_map_free(struct bpf_map * map)552 void bpf_map_offload_map_free(struct bpf_map *map)
553 {
554 struct bpf_offloaded_map *offmap = map_to_offmap(map);
555
556 rtnl_lock();
557 down_write(&bpf_devs_lock);
558 if (offmap->netdev)
559 __bpf_map_offload_destroy(offmap);
560 up_write(&bpf_devs_lock);
561 rtnl_unlock();
562
563 bpf_map_area_free(offmap);
564 }
565
bpf_map_offload_lookup_elem(struct bpf_map * map,void * key,void * value)566 int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value)
567 {
568 struct bpf_offloaded_map *offmap = map_to_offmap(map);
569 int ret = -ENODEV;
570
571 down_read(&bpf_devs_lock);
572 if (offmap->netdev)
573 ret = offmap->dev_ops->map_lookup_elem(offmap, key, value);
574 up_read(&bpf_devs_lock);
575
576 return ret;
577 }
578
bpf_map_offload_update_elem(struct bpf_map * map,void * key,void * value,u64 flags)579 int bpf_map_offload_update_elem(struct bpf_map *map,
580 void *key, void *value, u64 flags)
581 {
582 struct bpf_offloaded_map *offmap = map_to_offmap(map);
583 int ret = -ENODEV;
584
585 if (unlikely(flags > BPF_EXIST))
586 return -EINVAL;
587
588 down_read(&bpf_devs_lock);
589 if (offmap->netdev)
590 ret = offmap->dev_ops->map_update_elem(offmap, key, value,
591 flags);
592 up_read(&bpf_devs_lock);
593
594 return ret;
595 }
596
bpf_map_offload_delete_elem(struct bpf_map * map,void * key)597 int bpf_map_offload_delete_elem(struct bpf_map *map, void *key)
598 {
599 struct bpf_offloaded_map *offmap = map_to_offmap(map);
600 int ret = -ENODEV;
601
602 down_read(&bpf_devs_lock);
603 if (offmap->netdev)
604 ret = offmap->dev_ops->map_delete_elem(offmap, key);
605 up_read(&bpf_devs_lock);
606
607 return ret;
608 }
609
bpf_map_offload_get_next_key(struct bpf_map * map,void * key,void * next_key)610 int bpf_map_offload_get_next_key(struct bpf_map *map, void *key, void *next_key)
611 {
612 struct bpf_offloaded_map *offmap = map_to_offmap(map);
613 int ret = -ENODEV;
614
615 down_read(&bpf_devs_lock);
616 if (offmap->netdev)
617 ret = offmap->dev_ops->map_get_next_key(offmap, key, next_key);
618 up_read(&bpf_devs_lock);
619
620 return ret;
621 }
622
623 struct ns_get_path_bpf_map_args {
624 struct bpf_offloaded_map *offmap;
625 struct bpf_map_info *info;
626 };
627
bpf_map_offload_info_fill_ns(void * private_data)628 static struct ns_common *bpf_map_offload_info_fill_ns(void *private_data)
629 {
630 struct ns_get_path_bpf_map_args *args = private_data;
631 struct ns_common *ns;
632 struct net *net;
633
634 rtnl_lock();
635 down_read(&bpf_devs_lock);
636
637 if (args->offmap->netdev) {
638 args->info->ifindex = args->offmap->netdev->ifindex;
639 net = dev_net(args->offmap->netdev);
640 get_net(net);
641 ns = &net->ns;
642 } else {
643 args->info->ifindex = 0;
644 ns = NULL;
645 }
646
647 up_read(&bpf_devs_lock);
648 rtnl_unlock();
649
650 return ns;
651 }
652
bpf_map_offload_info_fill(struct bpf_map_info * info,struct bpf_map * map)653 int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map)
654 {
655 struct ns_get_path_bpf_map_args args = {
656 .offmap = map_to_offmap(map),
657 .info = info,
658 };
659 struct inode *ns_inode;
660 struct path ns_path;
661 int res;
662
663 res = ns_get_path_cb(&ns_path, bpf_map_offload_info_fill_ns, &args);
664 if (res) {
665 if (!info->ifindex)
666 return -ENODEV;
667 return res;
668 }
669
670 ns_inode = ns_path.dentry->d_inode;
671 info->netns_dev = new_encode_dev(ns_inode->i_sb->s_dev);
672 info->netns_ino = ns_inode->i_ino;
673 path_put(&ns_path);
674
675 return 0;
676 }
677
__bpf_offload_dev_match(struct bpf_prog * prog,struct net_device * netdev)678 static bool __bpf_offload_dev_match(struct bpf_prog *prog,
679 struct net_device *netdev)
680 {
681 struct bpf_offload_netdev *ondev1, *ondev2;
682 struct bpf_prog_offload *offload;
683
684 if (!bpf_prog_is_dev_bound(prog->aux))
685 return false;
686
687 offload = prog->aux->offload;
688 if (!offload)
689 return false;
690 if (offload->netdev == netdev)
691 return true;
692
693 ondev1 = bpf_offload_find_netdev(offload->netdev);
694 ondev2 = bpf_offload_find_netdev(netdev);
695
696 return ondev1 && ondev2 && ondev1->offdev == ondev2->offdev;
697 }
698
bpf_offload_dev_match(struct bpf_prog * prog,struct net_device * netdev)699 bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev)
700 {
701 bool ret;
702
703 down_read(&bpf_devs_lock);
704 ret = __bpf_offload_dev_match(prog, netdev);
705 up_read(&bpf_devs_lock);
706
707 return ret;
708 }
709 EXPORT_SYMBOL_GPL(bpf_offload_dev_match);
710
bpf_prog_dev_bound_match(const struct bpf_prog * lhs,const struct bpf_prog * rhs)711 bool bpf_prog_dev_bound_match(const struct bpf_prog *lhs, const struct bpf_prog *rhs)
712 {
713 bool ret;
714
715 if (bpf_prog_is_offloaded(lhs->aux) != bpf_prog_is_offloaded(rhs->aux))
716 return false;
717
718 down_read(&bpf_devs_lock);
719 ret = lhs->aux->offload && rhs->aux->offload &&
720 lhs->aux->offload->netdev &&
721 lhs->aux->offload->netdev == rhs->aux->offload->netdev;
722 up_read(&bpf_devs_lock);
723
724 return ret;
725 }
726
bpf_offload_prog_map_match(struct bpf_prog * prog,struct bpf_map * map)727 bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map)
728 {
729 struct bpf_offloaded_map *offmap;
730 bool ret;
731
732 if (!bpf_map_is_offloaded(map))
733 return bpf_map_offload_neutral(map);
734 offmap = map_to_offmap(map);
735
736 down_read(&bpf_devs_lock);
737 ret = __bpf_offload_dev_match(prog, offmap->netdev);
738 up_read(&bpf_devs_lock);
739
740 return ret;
741 }
742
bpf_offload_dev_netdev_register(struct bpf_offload_dev * offdev,struct net_device * netdev)743 int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
744 struct net_device *netdev)
745 {
746 int err;
747
748 down_write(&bpf_devs_lock);
749 err = __bpf_offload_dev_netdev_register(offdev, netdev);
750 up_write(&bpf_devs_lock);
751 return err;
752 }
753 EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_register);
754
bpf_offload_dev_netdev_unregister(struct bpf_offload_dev * offdev,struct net_device * netdev)755 void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
756 struct net_device *netdev)
757 {
758 down_write(&bpf_devs_lock);
759 __bpf_offload_dev_netdev_unregister(offdev, netdev);
760 up_write(&bpf_devs_lock);
761 }
762 EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_unregister);
763
764 struct bpf_offload_dev *
bpf_offload_dev_create(const struct bpf_prog_offload_ops * ops,void * priv)765 bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv)
766 {
767 struct bpf_offload_dev *offdev;
768
769 offdev = kzalloc(sizeof(*offdev), GFP_KERNEL);
770 if (!offdev)
771 return ERR_PTR(-ENOMEM);
772
773 offdev->ops = ops;
774 offdev->priv = priv;
775 INIT_LIST_HEAD(&offdev->netdevs);
776
777 return offdev;
778 }
779 EXPORT_SYMBOL_GPL(bpf_offload_dev_create);
780
bpf_offload_dev_destroy(struct bpf_offload_dev * offdev)781 void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev)
782 {
783 WARN_ON(!list_empty(&offdev->netdevs));
784 kfree(offdev);
785 }
786 EXPORT_SYMBOL_GPL(bpf_offload_dev_destroy);
787
bpf_offload_dev_priv(struct bpf_offload_dev * offdev)788 void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev)
789 {
790 return offdev->priv;
791 }
792 EXPORT_SYMBOL_GPL(bpf_offload_dev_priv);
793
bpf_dev_bound_netdev_unregister(struct net_device * dev)794 void bpf_dev_bound_netdev_unregister(struct net_device *dev)
795 {
796 struct bpf_offload_netdev *ondev;
797
798 ASSERT_RTNL();
799
800 down_write(&bpf_devs_lock);
801 ondev = bpf_offload_find_netdev(dev);
802 if (ondev && !ondev->offdev)
803 __bpf_offload_dev_netdev_unregister(NULL, ondev->netdev);
804 up_write(&bpf_devs_lock);
805 }
806
bpf_dev_bound_kfunc_check(struct bpf_verifier_log * log,struct bpf_prog_aux * prog_aux)807 int bpf_dev_bound_kfunc_check(struct bpf_verifier_log *log,
808 struct bpf_prog_aux *prog_aux)
809 {
810 if (!bpf_prog_is_dev_bound(prog_aux)) {
811 bpf_log(log, "metadata kfuncs require device-bound program\n");
812 return -EINVAL;
813 }
814
815 if (bpf_prog_is_offloaded(prog_aux)) {
816 bpf_log(log, "metadata kfuncs can't be offloaded\n");
817 return -EINVAL;
818 }
819
820 return 0;
821 }
822
bpf_dev_bound_resolve_kfunc(struct bpf_prog * prog,u32 func_id)823 void *bpf_dev_bound_resolve_kfunc(struct bpf_prog *prog, u32 func_id)
824 {
825 const struct xdp_metadata_ops *ops;
826 void *p = NULL;
827
828 /* We don't hold bpf_devs_lock while resolving several
829 * kfuncs and can race with the unregister_netdevice().
830 * We rely on bpf_dev_bound_match() check at attach
831 * to render this program unusable.
832 */
833 down_read(&bpf_devs_lock);
834 if (!prog->aux->offload)
835 goto out;
836
837 ops = prog->aux->offload->netdev->xdp_metadata_ops;
838 if (!ops)
839 goto out;
840
841 if (func_id == bpf_xdp_metadata_kfunc_id(XDP_METADATA_KFUNC_RX_TIMESTAMP))
842 p = ops->xmo_rx_timestamp;
843 else if (func_id == bpf_xdp_metadata_kfunc_id(XDP_METADATA_KFUNC_RX_HASH))
844 p = ops->xmo_rx_hash;
845 out:
846 up_read(&bpf_devs_lock);
847
848 return p;
849 }
850
bpf_offload_init(void)851 static int __init bpf_offload_init(void)
852 {
853 return rhashtable_init(&offdevs, &offdevs_params);
854 }
855
856 late_initcall(bpf_offload_init);
857