Lines Matching refs:reuse

22 			       struct sock_reuseport *reuse, bool bind_inany);
26 struct sock_reuseport *reuse;
32 reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
34 if (likely(reuse))
35 reuse->has_conns = 1;
40 static void __reuseport_get_incoming_cpu(struct sock_reuseport *reuse)
43 WRITE_ONCE(reuse->incoming_cpu, reuse->incoming_cpu + 1);
46 static void __reuseport_put_incoming_cpu(struct sock_reuseport *reuse)
49 WRITE_ONCE(reuse->incoming_cpu, reuse->incoming_cpu - 1);
52 static void reuseport_get_incoming_cpu(struct sock *sk, struct sock_reuseport *reuse)
55 __reuseport_get_incoming_cpu(reuse);
58 static void reuseport_put_incoming_cpu(struct sock *sk, struct sock_reuseport *reuse)
61 __reuseport_put_incoming_cpu(reuse);
66 struct sock_reuseport *reuse;
88 reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
92 if (!reuse)
96 __reuseport_get_incoming_cpu(reuse);
98 __reuseport_put_incoming_cpu(reuse);
105 const struct sock_reuseport *reuse,
112 right = reuse->num_socks;
114 left = reuse->max_socks - reuse->num_closed_socks;
115 right = reuse->max_socks;
119 if (reuse->socks[left] == sk)
125 struct sock_reuseport *reuse)
127 reuse->socks[reuse->num_socks] = sk;
130 reuse->num_socks++;
131 reuseport_get_incoming_cpu(sk, reuse);
135 struct sock_reuseport *reuse)
137 int i = reuseport_sock_index(sk, reuse, false);
142 reuse->socks[i] = reuse->socks[reuse->num_socks - 1];
143 reuse->num_socks--;
144 reuseport_put_incoming_cpu(sk, reuse);
150 struct sock_reuseport *reuse)
152 reuse->socks[reuse->max_socks - reuse->num_closed_socks - 1] = sk;
154 WRITE_ONCE(reuse->num_closed_socks, reuse->num_closed_socks + 1);
155 reuseport_get_incoming_cpu(sk, reuse);
159 struct sock_reuseport *reuse)
161 int i = reuseport_sock_index(sk, reuse, true);
166 reuse->socks[i] = reuse->socks[reuse->max_socks - reuse->num_closed_socks];
168 WRITE_ONCE(reuse->num_closed_socks, reuse->num_closed_socks - 1);
169 reuseport_put_incoming_cpu(sk, reuse);
178 struct sock_reuseport *reuse = kzalloc(size, GFP_ATOMIC);
180 if (!reuse)
183 reuse->max_socks = max_socks;
185 RCU_INIT_POINTER(reuse->prog, NULL);
186 return reuse;
191 struct sock_reuseport *reuse;
202 reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
204 if (reuse) {
205 if (reuse->num_closed_socks) {
207 ret = reuseport_resurrect(sk, reuse, NULL, bind_inany);
211 /* Only set reuse->bind_inany if the bind_inany is true.
212 * Otherwise, it will overwrite the reuse->bind_inany
216 reuse->bind_inany = bind_inany;
220 reuse = __reuseport_alloc(INIT_SOCKS);
221 if (!reuse) {
228 kfree(reuse);
233 reuse->reuseport_id = id;
234 reuse->bind_inany = bind_inany;
235 reuse->socks[0] = sk;
236 reuse->num_socks = 1;
237 reuseport_get_incoming_cpu(sk, reuse);
238 rcu_assign_pointer(sk->sk_reuseport_cb, reuse);
247 static struct sock_reuseport *reuseport_grow(struct sock_reuseport *reuse)
252 more_socks_size = reuse->max_socks * 2U;
254 if (reuse->num_closed_socks) {
261 sk = reuse->socks[reuse->max_socks - reuse->num_closed_socks];
263 __reuseport_detach_closed_sock(sk, reuse);
265 return reuse;
275 more_reuse->num_socks = reuse->num_socks;
276 more_reuse->num_closed_socks = reuse->num_closed_socks;
277 more_reuse->prog = reuse->prog;
278 more_reuse->reuseport_id = reuse->reuseport_id;
279 more_reuse->bind_inany = reuse->bind_inany;
280 more_reuse->has_conns = reuse->has_conns;
281 more_reuse->incoming_cpu = reuse->incoming_cpu;
283 memcpy(more_reuse->socks, reuse->socks,
284 reuse->num_socks * sizeof(struct sock *));
287 reuse->socks + (reuse->max_socks - reuse->num_closed_socks),
288 reuse->num_closed_socks * sizeof(struct sock *));
289 more_reuse->synq_overflow_ts = READ_ONCE(reuse->synq_overflow_ts);
291 for (i = 0; i < reuse->max_socks; ++i)
292 rcu_assign_pointer(reuse->socks[i]->sk_reuseport_cb,
296 * that reuse and more_reuse can temporarily share a reference
299 kfree_rcu(reuse, rcu);
305 struct sock_reuseport *reuse;
307 reuse = container_of(head, struct sock_reuseport, rcu);
308 sk_reuseport_prog_free(rcu_dereference_protected(reuse->prog, 1));
309 ida_free(&reuseport_ida, reuse->reuseport_id);
310 kfree(reuse);
323 struct sock_reuseport *old_reuse, *reuse;
333 reuse = rcu_dereference_protected(sk2->sk_reuseport_cb,
339 int err = reuseport_resurrect(sk, old_reuse, reuse, reuse->bind_inany);
350 if (reuse->num_socks + reuse->num_closed_socks == reuse->max_socks) {
351 reuse = reuseport_grow(reuse);
352 if (!reuse) {
358 __reuseport_add_sock(sk, reuse);
359 rcu_assign_pointer(sk->sk_reuseport_cb, reuse);
370 struct sock_reuseport *reuse, bool bind_inany)
372 if (old_reuse == reuse) {
381 if (!reuse) {
390 reuse = __reuseport_alloc(INIT_SOCKS);
391 if (!reuse)
396 kfree(reuse);
400 reuse->reuseport_id = id;
401 reuse->bind_inany = bind_inany;
411 if (reuse->num_socks + reuse->num_closed_socks == reuse->max_socks) {
412 reuse = reuseport_grow(reuse);
413 if (!reuse)
419 __reuseport_add_sock(sk, reuse);
420 rcu_assign_pointer(sk->sk_reuseport_cb, reuse);
430 struct sock_reuseport *reuse;
433 reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
437 if (!reuse)
452 if (!__reuseport_detach_closed_sock(sk, reuse))
453 __reuseport_detach_sock(sk, reuse);
455 if (reuse->num_socks + reuse->num_closed_socks == 0)
456 call_rcu(&reuse->rcu, reuseport_free_rcu);
466 struct sock_reuseport *reuse;
471 reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
473 prog = rcu_dereference_protected(reuse->prog,
483 __reuseport_detach_sock(sk, reuse);
484 __reuseport_add_closed_sock(sk, reuse);
498 static struct sock *run_bpf_filter(struct sock_reuseport *reuse, u16 socks,
525 return reuse->socks[index];
528 static struct sock *reuseport_select_sock_by_hash(struct sock_reuseport *reuse,
536 struct sock *sk = reuse->socks[i];
540 if (!READ_ONCE(reuse->incoming_cpu))
574 struct sock_reuseport *reuse;
580 reuse = rcu_dereference(sk->sk_reuseport_cb);
583 if (!reuse)
586 prog = rcu_dereference(reuse->prog);
587 socks = READ_ONCE(reuse->num_socks);
596 sk2 = bpf_run_sk_reuseport(reuse, sk, prog, skb, NULL, hash);
598 sk2 = run_bpf_filter(reuse, socks, prog, skb, hdr_len);
603 sk2 = reuseport_select_sock_by_hash(reuse, hash, socks);
625 struct sock_reuseport *reuse;
634 reuse = rcu_dereference(sk->sk_reuseport_cb);
635 if (!reuse)
638 socks = READ_ONCE(reuse->num_socks);
646 prog = rcu_dereference(reuse->prog);
660 nsk = bpf_run_sk_reuseport(reuse, sk, prog, skb, migrating_sk, hash);
667 nsk = reuseport_select_sock_by_hash(reuse, hash, socks);
686 struct sock_reuseport *reuse;
704 reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
706 old_prog = rcu_dereference_protected(reuse->prog,
708 rcu_assign_pointer(reuse->prog, prog);
718 struct sock_reuseport *reuse;
723 reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
726 /* reuse must be checked after acquiring the reuseport_lock
729 if (!reuse) {
734 if (sk_unhashed(sk) && reuse->num_closed_socks) {
739 old_prog = rcu_replace_pointer(reuse->prog, old_prog,