Lines Matching refs:reuse
22 struct sock_reuseport *reuse, bool bind_inany);
26 struct sock_reuseport *reuse; in reuseport_has_conns_set() local
32 reuse = rcu_dereference_protected(sk->sk_reuseport_cb, in reuseport_has_conns_set()
34 if (likely(reuse)) in reuseport_has_conns_set()
35 reuse->has_conns = 1; in reuseport_has_conns_set()
40 static void __reuseport_get_incoming_cpu(struct sock_reuseport *reuse) in __reuseport_get_incoming_cpu() argument
43 WRITE_ONCE(reuse->incoming_cpu, reuse->incoming_cpu + 1); in __reuseport_get_incoming_cpu()
46 static void __reuseport_put_incoming_cpu(struct sock_reuseport *reuse) in __reuseport_put_incoming_cpu() argument
49 WRITE_ONCE(reuse->incoming_cpu, reuse->incoming_cpu - 1); in __reuseport_put_incoming_cpu()
52 static void reuseport_get_incoming_cpu(struct sock *sk, struct sock_reuseport *reuse) in reuseport_get_incoming_cpu() argument
55 __reuseport_get_incoming_cpu(reuse); in reuseport_get_incoming_cpu()
58 static void reuseport_put_incoming_cpu(struct sock *sk, struct sock_reuseport *reuse) in reuseport_put_incoming_cpu() argument
61 __reuseport_put_incoming_cpu(reuse); in reuseport_put_incoming_cpu()
66 struct sock_reuseport *reuse; in reuseport_update_incoming_cpu() local
88 reuse = rcu_dereference_protected(sk->sk_reuseport_cb, in reuseport_update_incoming_cpu()
92 if (!reuse) in reuseport_update_incoming_cpu()
96 __reuseport_get_incoming_cpu(reuse); in reuseport_update_incoming_cpu()
98 __reuseport_put_incoming_cpu(reuse); in reuseport_update_incoming_cpu()
105 const struct sock_reuseport *reuse, in reuseport_sock_index() argument
112 right = reuse->num_socks; in reuseport_sock_index()
114 left = reuse->max_socks - reuse->num_closed_socks; in reuseport_sock_index()
115 right = reuse->max_socks; in reuseport_sock_index()
119 if (reuse->socks[left] == sk) in reuseport_sock_index()
125 struct sock_reuseport *reuse) in __reuseport_add_sock() argument
127 reuse->socks[reuse->num_socks] = sk; in __reuseport_add_sock()
130 reuse->num_socks++; in __reuseport_add_sock()
131 reuseport_get_incoming_cpu(sk, reuse); in __reuseport_add_sock()
135 struct sock_reuseport *reuse) in __reuseport_detach_sock() argument
137 int i = reuseport_sock_index(sk, reuse, false); in __reuseport_detach_sock()
142 reuse->socks[i] = reuse->socks[reuse->num_socks - 1]; in __reuseport_detach_sock()
143 reuse->num_socks--; in __reuseport_detach_sock()
144 reuseport_put_incoming_cpu(sk, reuse); in __reuseport_detach_sock()
150 struct sock_reuseport *reuse) in __reuseport_add_closed_sock() argument
152 reuse->socks[reuse->max_socks - reuse->num_closed_socks - 1] = sk; in __reuseport_add_closed_sock()
154 WRITE_ONCE(reuse->num_closed_socks, reuse->num_closed_socks + 1); in __reuseport_add_closed_sock()
155 reuseport_get_incoming_cpu(sk, reuse); in __reuseport_add_closed_sock()
159 struct sock_reuseport *reuse) in __reuseport_detach_closed_sock() argument
161 int i = reuseport_sock_index(sk, reuse, true); in __reuseport_detach_closed_sock()
166 reuse->socks[i] = reuse->socks[reuse->max_socks - reuse->num_closed_socks]; in __reuseport_detach_closed_sock()
168 WRITE_ONCE(reuse->num_closed_socks, reuse->num_closed_socks - 1); in __reuseport_detach_closed_sock()
169 reuseport_put_incoming_cpu(sk, reuse); in __reuseport_detach_closed_sock()
178 struct sock_reuseport *reuse = kzalloc(size, GFP_ATOMIC); in __reuseport_alloc() local
180 if (!reuse) in __reuseport_alloc()
183 reuse->max_socks = max_socks; in __reuseport_alloc()
185 RCU_INIT_POINTER(reuse->prog, NULL); in __reuseport_alloc()
186 return reuse; in __reuseport_alloc()
191 struct sock_reuseport *reuse; in reuseport_alloc() local
202 reuse = rcu_dereference_protected(sk->sk_reuseport_cb, in reuseport_alloc()
204 if (reuse) { in reuseport_alloc()
205 if (reuse->num_closed_socks) { in reuseport_alloc()
207 ret = reuseport_resurrect(sk, reuse, NULL, bind_inany); in reuseport_alloc()
216 reuse->bind_inany = bind_inany; in reuseport_alloc()
220 reuse = __reuseport_alloc(INIT_SOCKS); in reuseport_alloc()
221 if (!reuse) { in reuseport_alloc()
228 kfree(reuse); in reuseport_alloc()
233 reuse->reuseport_id = id; in reuseport_alloc()
234 reuse->bind_inany = bind_inany; in reuseport_alloc()
235 reuse->socks[0] = sk; in reuseport_alloc()
236 reuse->num_socks = 1; in reuseport_alloc()
237 reuseport_get_incoming_cpu(sk, reuse); in reuseport_alloc()
238 rcu_assign_pointer(sk->sk_reuseport_cb, reuse); in reuseport_alloc()
247 static struct sock_reuseport *reuseport_grow(struct sock_reuseport *reuse) in reuseport_grow() argument
252 more_socks_size = reuse->max_socks * 2U; in reuseport_grow()
254 if (reuse->num_closed_socks) { in reuseport_grow()
261 sk = reuse->socks[reuse->max_socks - reuse->num_closed_socks]; in reuseport_grow()
263 __reuseport_detach_closed_sock(sk, reuse); in reuseport_grow()
265 return reuse; in reuseport_grow()
275 more_reuse->num_socks = reuse->num_socks; in reuseport_grow()
276 more_reuse->num_closed_socks = reuse->num_closed_socks; in reuseport_grow()
277 more_reuse->prog = reuse->prog; in reuseport_grow()
278 more_reuse->reuseport_id = reuse->reuseport_id; in reuseport_grow()
279 more_reuse->bind_inany = reuse->bind_inany; in reuseport_grow()
280 more_reuse->has_conns = reuse->has_conns; in reuseport_grow()
281 more_reuse->incoming_cpu = reuse->incoming_cpu; in reuseport_grow()
283 memcpy(more_reuse->socks, reuse->socks, in reuseport_grow()
284 reuse->num_socks * sizeof(struct sock *)); in reuseport_grow()
287 reuse->socks + (reuse->max_socks - reuse->num_closed_socks), in reuseport_grow()
288 reuse->num_closed_socks * sizeof(struct sock *)); in reuseport_grow()
289 more_reuse->synq_overflow_ts = READ_ONCE(reuse->synq_overflow_ts); in reuseport_grow()
291 for (i = 0; i < reuse->max_socks; ++i) in reuseport_grow()
292 rcu_assign_pointer(reuse->socks[i]->sk_reuseport_cb, in reuseport_grow()
299 kfree_rcu(reuse, rcu); in reuseport_grow()
305 struct sock_reuseport *reuse; in reuseport_free_rcu() local
307 reuse = container_of(head, struct sock_reuseport, rcu); in reuseport_free_rcu()
308 sk_reuseport_prog_free(rcu_dereference_protected(reuse->prog, 1)); in reuseport_free_rcu()
309 ida_free(&reuseport_ida, reuse->reuseport_id); in reuseport_free_rcu()
310 kfree(reuse); in reuseport_free_rcu()
323 struct sock_reuseport *old_reuse, *reuse; in reuseport_add_sock() local
333 reuse = rcu_dereference_protected(sk2->sk_reuseport_cb, in reuseport_add_sock()
339 int err = reuseport_resurrect(sk, old_reuse, reuse, reuse->bind_inany); in reuseport_add_sock()
350 if (reuse->num_socks + reuse->num_closed_socks == reuse->max_socks) { in reuseport_add_sock()
351 reuse = reuseport_grow(reuse); in reuseport_add_sock()
352 if (!reuse) { in reuseport_add_sock()
358 __reuseport_add_sock(sk, reuse); in reuseport_add_sock()
359 rcu_assign_pointer(sk->sk_reuseport_cb, reuse); in reuseport_add_sock()
370 struct sock_reuseport *reuse, bool bind_inany) in reuseport_resurrect() argument
372 if (old_reuse == reuse) { in reuseport_resurrect()
381 if (!reuse) { in reuseport_resurrect()
390 reuse = __reuseport_alloc(INIT_SOCKS); in reuseport_resurrect()
391 if (!reuse) in reuseport_resurrect()
396 kfree(reuse); in reuseport_resurrect()
400 reuse->reuseport_id = id; in reuseport_resurrect()
401 reuse->bind_inany = bind_inany; in reuseport_resurrect()
411 if (reuse->num_socks + reuse->num_closed_socks == reuse->max_socks) { in reuseport_resurrect()
412 reuse = reuseport_grow(reuse); in reuseport_resurrect()
413 if (!reuse) in reuseport_resurrect()
419 __reuseport_add_sock(sk, reuse); in reuseport_resurrect()
420 rcu_assign_pointer(sk->sk_reuseport_cb, reuse); in reuseport_resurrect()
430 struct sock_reuseport *reuse; in reuseport_detach_sock() local
433 reuse = rcu_dereference_protected(sk->sk_reuseport_cb, in reuseport_detach_sock()
437 if (!reuse) in reuseport_detach_sock()
452 if (!__reuseport_detach_closed_sock(sk, reuse)) in reuseport_detach_sock()
453 __reuseport_detach_sock(sk, reuse); in reuseport_detach_sock()
455 if (reuse->num_socks + reuse->num_closed_socks == 0) in reuseport_detach_sock()
456 call_rcu(&reuse->rcu, reuseport_free_rcu); in reuseport_detach_sock()
466 struct sock_reuseport *reuse; in reuseport_stop_listen_sock() local
471 reuse = rcu_dereference_protected(sk->sk_reuseport_cb, in reuseport_stop_listen_sock()
473 prog = rcu_dereference_protected(reuse->prog, in reuseport_stop_listen_sock()
483 __reuseport_detach_sock(sk, reuse); in reuseport_stop_listen_sock()
484 __reuseport_add_closed_sock(sk, reuse); in reuseport_stop_listen_sock()
498 static struct sock *run_bpf_filter(struct sock_reuseport *reuse, u16 socks, in run_bpf_filter() argument
525 return reuse->socks[index]; in run_bpf_filter()
528 static struct sock *reuseport_select_sock_by_hash(struct sock_reuseport *reuse, in reuseport_select_sock_by_hash() argument
536 struct sock *sk = reuse->socks[i]; in reuseport_select_sock_by_hash()
540 if (!READ_ONCE(reuse->incoming_cpu)) in reuseport_select_sock_by_hash()
574 struct sock_reuseport *reuse; in reuseport_select_sock() local
580 reuse = rcu_dereference(sk->sk_reuseport_cb); in reuseport_select_sock()
583 if (!reuse) in reuseport_select_sock()
586 prog = rcu_dereference(reuse->prog); in reuseport_select_sock()
587 socks = READ_ONCE(reuse->num_socks); in reuseport_select_sock()
596 sk2 = bpf_run_sk_reuseport(reuse, sk, prog, skb, NULL, hash); in reuseport_select_sock()
598 sk2 = run_bpf_filter(reuse, socks, prog, skb, hdr_len); in reuseport_select_sock()
603 sk2 = reuseport_select_sock_by_hash(reuse, hash, socks); in reuseport_select_sock()
625 struct sock_reuseport *reuse; in reuseport_migrate_sock() local
634 reuse = rcu_dereference(sk->sk_reuseport_cb); in reuseport_migrate_sock()
635 if (!reuse) in reuseport_migrate_sock()
638 socks = READ_ONCE(reuse->num_socks); in reuseport_migrate_sock()
646 prog = rcu_dereference(reuse->prog); in reuseport_migrate_sock()
660 nsk = bpf_run_sk_reuseport(reuse, sk, prog, skb, migrating_sk, hash); in reuseport_migrate_sock()
667 nsk = reuseport_select_sock_by_hash(reuse, hash, socks); in reuseport_migrate_sock()
686 struct sock_reuseport *reuse; in reuseport_attach_prog() local
704 reuse = rcu_dereference_protected(sk->sk_reuseport_cb, in reuseport_attach_prog()
706 old_prog = rcu_dereference_protected(reuse->prog, in reuseport_attach_prog()
708 rcu_assign_pointer(reuse->prog, prog); in reuseport_attach_prog()
718 struct sock_reuseport *reuse; in reuseport_detach_prog() local
723 reuse = rcu_dereference_protected(sk->sk_reuseport_cb, in reuseport_detach_prog()
729 if (!reuse) { in reuseport_detach_prog()
734 if (sk_unhashed(sk) && reuse->num_closed_socks) { in reuseport_detach_prog()
739 old_prog = rcu_replace_pointer(reuse->prog, old_prog, in reuseport_detach_prog()