1/* SPDX-License-Identifier: GPL-2.0 */ 2/* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */ 3 4#ifndef _LINUX_SKMSG_H 5#define _LINUX_SKMSG_H 6 7#include <linux/bpf.h> 8#include <linux/filter.h> 9#include <linux/scatterlist.h> 10#include <linux/skbuff.h> 11 12#include <net/sock.h> 13#include <net/tcp.h> 14#include <net/strparser.h> 15 16#define MAX_MSG_FRAGS MAX_SKB_FRAGS 17#define NR_MSG_FRAG_IDS (MAX_MSG_FRAGS + 1) 18 19enum __sk_action { 20 __SK_DROP = 0, 21 __SK_PASS, 22 __SK_REDIRECT, 23 __SK_NONE, 24}; 25 26struct sk_msg_sg { 27 u32 start; 28 u32 curr; 29 u32 end; 30 u32 size; 31 u32 copybreak; 32 DECLARE_BITMAP(copy, MAX_MSG_FRAGS + 2); 33 /* The extra two elements: 34 * 1) used for chaining the front and sections when the list becomes 35 * partitioned (e.g. end < start). The crypto APIs require the 36 * chaining; 37 * 2) to chain tailer SG entries after the message. 38 */ 39 struct scatterlist data[MAX_MSG_FRAGS + 2]; 40}; 41 42/* UAPI in filter.c depends on struct sk_msg_sg being first element. */ 43struct sk_msg { 44 struct sk_msg_sg sg; 45 void *data; 46 void *data_end; 47 u32 apply_bytes; 48 u32 cork_bytes; 49 u32 flags; 50 struct sk_buff *skb; 51 struct sock *sk_redir; 52 struct sock *sk; 53 struct list_head list; 54}; 55 56struct sk_psock_progs { 57 struct bpf_prog *msg_parser; 58 struct bpf_prog *stream_parser; 59 struct bpf_prog *stream_verdict; 60 struct bpf_prog *skb_verdict; 61}; 62 63enum sk_psock_state_bits { 64 SK_PSOCK_TX_ENABLED, 65 SK_PSOCK_RX_STRP_ENABLED, 66}; 67 68struct sk_psock_link { 69 struct list_head list; 70 struct bpf_map *map; 71 void *link_raw; 72}; 73 74struct sk_psock_work_state { 75 u32 len; 76 u32 off; 77}; 78 79struct sk_psock { 80 struct sock *sk; 81 struct sock *sk_redir; 82 u32 apply_bytes; 83 u32 cork_bytes; 84 u32 eval; 85 bool redir_ingress; /* undefined if sk_redir is null */ 86 struct sk_msg *cork; 87 struct sk_psock_progs progs; 88#if IS_ENABLED(CONFIG_BPF_STREAM_PARSER) 89 struct strparser strp; 90#endif 91 struct sk_buff_head ingress_skb; 92 struct list_head ingress_msg; 93 spinlock_t ingress_lock; 94 unsigned long state; 95 struct list_head link; 96 spinlock_t link_lock; 97 refcount_t refcnt; 98 void (*saved_unhash)(struct sock *sk); 99 void (*saved_destroy)(struct sock *sk); 100 void (*saved_close)(struct sock *sk, long timeout); 101 void (*saved_write_space)(struct sock *sk); 102 void (*saved_data_ready)(struct sock *sk); 103 /* psock_update_sk_prot may be called with restore=false many times 104 * so the handler must be safe for this case. It will be called 105 * exactly once with restore=true when the psock is being destroyed 106 * and psock refcnt is zero, but before an RCU grace period. 107 */ 108 int (*psock_update_sk_prot)(struct sock *sk, struct sk_psock *psock, 109 bool restore); 110 struct proto *sk_proto; 111 struct mutex work_mutex; 112 struct sk_psock_work_state work_state; 113 struct delayed_work work; 114 struct sock *sk_pair; 115 struct rcu_work rwork; 116}; 117 118int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len, 119 int elem_first_coalesce); 120int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src, 121 u32 off, u32 len); 122void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len); 123int sk_msg_free(struct sock *sk, struct sk_msg *msg); 124int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg); 125void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes); 126void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg, 127 u32 bytes); 128 129void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes); 130void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes); 131 132int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from, 133 struct sk_msg *msg, u32 bytes); 134int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from, 135 struct sk_msg *msg, u32 bytes); 136int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg, 137 int len, int flags); 138bool sk_msg_is_readable(struct sock *sk); 139 140static inline void sk_msg_check_to_free(struct sk_msg *msg, u32 i, u32 bytes) 141{ 142 WARN_ON(i == msg->sg.end && bytes); 143} 144 145static inline void sk_msg_apply_bytes(struct sk_psock *psock, u32 bytes) 146{ 147 if (psock->apply_bytes) { 148 if (psock->apply_bytes < bytes) 149 psock->apply_bytes = 0; 150 else 151 psock->apply_bytes -= bytes; 152 } 153} 154 155static inline u32 sk_msg_iter_dist(u32 start, u32 end) 156{ 157 return end >= start ? end - start : end + (NR_MSG_FRAG_IDS - start); 158} 159 160#define sk_msg_iter_var_prev(var) \ 161 do { \ 162 if (var == 0) \ 163 var = NR_MSG_FRAG_IDS - 1; \ 164 else \ 165 var--; \ 166 } while (0) 167 168#define sk_msg_iter_var_next(var) \ 169 do { \ 170 var++; \ 171 if (var == NR_MSG_FRAG_IDS) \ 172 var = 0; \ 173 } while (0) 174 175#define sk_msg_iter_prev(msg, which) \ 176 sk_msg_iter_var_prev(msg->sg.which) 177 178#define sk_msg_iter_next(msg, which) \ 179 sk_msg_iter_var_next(msg->sg.which) 180 181static inline void sk_msg_init(struct sk_msg *msg) 182{ 183 BUILD_BUG_ON(ARRAY_SIZE(msg->sg.data) - 1 != NR_MSG_FRAG_IDS); 184 memset(msg, 0, sizeof(*msg)); 185 sg_init_marker(msg->sg.data, NR_MSG_FRAG_IDS); 186} 187 188static inline void sk_msg_xfer(struct sk_msg *dst, struct sk_msg *src, 189 int which, u32 size) 190{ 191 dst->sg.data[which] = src->sg.data[which]; 192 dst->sg.data[which].length = size; 193 dst->sg.size += size; 194 src->sg.size -= size; 195 src->sg.data[which].length -= size; 196 src->sg.data[which].offset += size; 197} 198 199static inline void sk_msg_xfer_full(struct sk_msg *dst, struct sk_msg *src) 200{ 201 memcpy(dst, src, sizeof(*src)); 202 sk_msg_init(src); 203} 204 205static inline bool sk_msg_full(const struct sk_msg *msg) 206{ 207 return sk_msg_iter_dist(msg->sg.start, msg->sg.end) == MAX_MSG_FRAGS; 208} 209 210static inline u32 sk_msg_elem_used(const struct sk_msg *msg) 211{ 212 return sk_msg_iter_dist(msg->sg.start, msg->sg.end); 213} 214 215static inline struct scatterlist *sk_msg_elem(struct sk_msg *msg, int which) 216{ 217 return &msg->sg.data[which]; 218} 219 220static inline struct scatterlist sk_msg_elem_cpy(struct sk_msg *msg, int which) 221{ 222 return msg->sg.data[which]; 223} 224 225static inline struct page *sk_msg_page(struct sk_msg *msg, int which) 226{ 227 return sg_page(sk_msg_elem(msg, which)); 228} 229 230static inline bool sk_msg_to_ingress(const struct sk_msg *msg) 231{ 232 return msg->flags & BPF_F_INGRESS; 233} 234 235static inline void sk_msg_compute_data_pointers(struct sk_msg *msg) 236{ 237 struct scatterlist *sge = sk_msg_elem(msg, msg->sg.start); 238 239 if (test_bit(msg->sg.start, msg->sg.copy)) { 240 msg->data = NULL; 241 msg->data_end = NULL; 242 } else { 243 msg->data = sg_virt(sge); 244 msg->data_end = msg->data + sge->length; 245 } 246} 247 248static inline void sk_msg_page_add(struct sk_msg *msg, struct page *page, 249 u32 len, u32 offset) 250{ 251 struct scatterlist *sge; 252 253 get_page(page); 254 sge = sk_msg_elem(msg, msg->sg.end); 255 sg_set_page(sge, page, len, offset); 256 sg_unmark_end(sge); 257 258 __set_bit(msg->sg.end, msg->sg.copy); 259 msg->sg.size += len; 260 sk_msg_iter_next(msg, end); 261} 262 263static inline void sk_msg_sg_copy(struct sk_msg *msg, u32 i, bool copy_state) 264{ 265 do { 266 if (copy_state) 267 __set_bit(i, msg->sg.copy); 268 else 269 __clear_bit(i, msg->sg.copy); 270 sk_msg_iter_var_next(i); 271 if (i == msg->sg.end) 272 break; 273 } while (1); 274} 275 276static inline void sk_msg_sg_copy_set(struct sk_msg *msg, u32 start) 277{ 278 sk_msg_sg_copy(msg, start, true); 279} 280 281static inline void sk_msg_sg_copy_clear(struct sk_msg *msg, u32 start) 282{ 283 sk_msg_sg_copy(msg, start, false); 284} 285 286static inline struct sk_psock *sk_psock(const struct sock *sk) 287{ 288 return __rcu_dereference_sk_user_data_with_flags(sk, 289 SK_USER_DATA_PSOCK); 290} 291 292static inline void sk_psock_set_state(struct sk_psock *psock, 293 enum sk_psock_state_bits bit) 294{ 295 set_bit(bit, &psock->state); 296} 297 298static inline void sk_psock_clear_state(struct sk_psock *psock, 299 enum sk_psock_state_bits bit) 300{ 301 clear_bit(bit, &psock->state); 302} 303 304static inline bool sk_psock_test_state(const struct sk_psock *psock, 305 enum sk_psock_state_bits bit) 306{ 307 return test_bit(bit, &psock->state); 308} 309 310static inline void sock_drop(struct sock *sk, struct sk_buff *skb) 311{ 312 sk_drops_add(sk, skb); 313 kfree_skb(skb); 314} 315 316static inline void sk_psock_queue_msg(struct sk_psock *psock, 317 struct sk_msg *msg) 318{ 319 spin_lock_bh(&psock->ingress_lock); 320 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) 321 list_add_tail(&msg->list, &psock->ingress_msg); 322 else { 323 sk_msg_free(psock->sk, msg); 324 kfree(msg); 325 } 326 spin_unlock_bh(&psock->ingress_lock); 327} 328 329static inline struct sk_msg *sk_psock_dequeue_msg(struct sk_psock *psock) 330{ 331 struct sk_msg *msg; 332 333 spin_lock_bh(&psock->ingress_lock); 334 msg = list_first_entry_or_null(&psock->ingress_msg, struct sk_msg, list); 335 if (msg) 336 list_del(&msg->list); 337 spin_unlock_bh(&psock->ingress_lock); 338 return msg; 339} 340 341static inline struct sk_msg *sk_psock_peek_msg(struct sk_psock *psock) 342{ 343 struct sk_msg *msg; 344 345 spin_lock_bh(&psock->ingress_lock); 346 msg = list_first_entry_or_null(&psock->ingress_msg, struct sk_msg, list); 347 spin_unlock_bh(&psock->ingress_lock); 348 return msg; 349} 350 351static inline struct sk_msg *sk_psock_next_msg(struct sk_psock *psock, 352 struct sk_msg *msg) 353{ 354 struct sk_msg *ret; 355 356 spin_lock_bh(&psock->ingress_lock); 357 if (list_is_last(&msg->list, &psock->ingress_msg)) 358 ret = NULL; 359 else 360 ret = list_next_entry(msg, list); 361 spin_unlock_bh(&psock->ingress_lock); 362 return ret; 363} 364 365static inline bool sk_psock_queue_empty(const struct sk_psock *psock) 366{ 367 return psock ? list_empty(&psock->ingress_msg) : true; 368} 369 370static inline void kfree_sk_msg(struct sk_msg *msg) 371{ 372 if (msg->skb) 373 consume_skb(msg->skb); 374 kfree(msg); 375} 376 377static inline void sk_psock_report_error(struct sk_psock *psock, int err) 378{ 379 struct sock *sk = psock->sk; 380 381 sk->sk_err = err; 382 sk_error_report(sk); 383} 384 385struct sk_psock *sk_psock_init(struct sock *sk, int node); 386void sk_psock_stop(struct sk_psock *psock); 387 388#if IS_ENABLED(CONFIG_BPF_STREAM_PARSER) 389int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock); 390void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock); 391void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock); 392#else 393static inline int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock) 394{ 395 return -EOPNOTSUPP; 396} 397 398static inline void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock) 399{ 400} 401 402static inline void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock) 403{ 404} 405#endif 406 407void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock); 408void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock); 409 410int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock, 411 struct sk_msg *msg); 412 413static inline struct sk_psock_link *sk_psock_init_link(void) 414{ 415 return kzalloc(sizeof(struct sk_psock_link), 416 GFP_ATOMIC | __GFP_NOWARN); 417} 418 419static inline void sk_psock_free_link(struct sk_psock_link *link) 420{ 421 kfree(link); 422} 423 424struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock); 425 426static inline void sk_psock_cork_free(struct sk_psock *psock) 427{ 428 if (psock->cork) { 429 sk_msg_free(psock->sk, psock->cork); 430 kfree(psock->cork); 431 psock->cork = NULL; 432 } 433} 434 435static inline void sk_psock_restore_proto(struct sock *sk, 436 struct sk_psock *psock) 437{ 438 if (psock->psock_update_sk_prot) 439 psock->psock_update_sk_prot(sk, psock, true); 440} 441 442static inline struct sk_psock *sk_psock_get(struct sock *sk) 443{ 444 struct sk_psock *psock; 445 446 rcu_read_lock(); 447 psock = sk_psock(sk); 448 if (psock && !refcount_inc_not_zero(&psock->refcnt)) 449 psock = NULL; 450 rcu_read_unlock(); 451 return psock; 452} 453 454void sk_psock_drop(struct sock *sk, struct sk_psock *psock); 455 456static inline void sk_psock_put(struct sock *sk, struct sk_psock *psock) 457{ 458 if (refcount_dec_and_test(&psock->refcnt)) 459 sk_psock_drop(sk, psock); 460} 461 462static inline void sk_psock_data_ready(struct sock *sk, struct sk_psock *psock) 463{ 464 if (psock->saved_data_ready) 465 psock->saved_data_ready(sk); 466 else 467 sk->sk_data_ready(sk); 468} 469 470static inline void psock_set_prog(struct bpf_prog **pprog, 471 struct bpf_prog *prog) 472{ 473 prog = xchg(pprog, prog); 474 if (prog) 475 bpf_prog_put(prog); 476} 477 478static inline int psock_replace_prog(struct bpf_prog **pprog, 479 struct bpf_prog *prog, 480 struct bpf_prog *old) 481{ 482 if (cmpxchg(pprog, old, prog) != old) 483 return -ENOENT; 484 485 if (old) 486 bpf_prog_put(old); 487 488 return 0; 489} 490 491static inline void psock_progs_drop(struct sk_psock_progs *progs) 492{ 493 psock_set_prog(&progs->msg_parser, NULL); 494 psock_set_prog(&progs->stream_parser, NULL); 495 psock_set_prog(&progs->stream_verdict, NULL); 496 psock_set_prog(&progs->skb_verdict, NULL); 497} 498 499int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb); 500 501static inline bool sk_psock_strp_enabled(struct sk_psock *psock) 502{ 503 if (!psock) 504 return false; 505 return !!psock->saved_data_ready; 506} 507 508#if IS_ENABLED(CONFIG_NET_SOCK_MSG) 509 510#define BPF_F_STRPARSER (1UL << 1) 511 512/* We only have two bits so far. */ 513#define BPF_F_PTR_MASK ~(BPF_F_INGRESS | BPF_F_STRPARSER) 514 515static inline bool skb_bpf_strparser(const struct sk_buff *skb) 516{ 517 unsigned long sk_redir = skb->_sk_redir; 518 519 return sk_redir & BPF_F_STRPARSER; 520} 521 522static inline void skb_bpf_set_strparser(struct sk_buff *skb) 523{ 524 skb->_sk_redir |= BPF_F_STRPARSER; 525} 526 527static inline bool skb_bpf_ingress(const struct sk_buff *skb) 528{ 529 unsigned long sk_redir = skb->_sk_redir; 530 531 return sk_redir & BPF_F_INGRESS; 532} 533 534static inline void skb_bpf_set_ingress(struct sk_buff *skb) 535{ 536 skb->_sk_redir |= BPF_F_INGRESS; 537} 538 539static inline void skb_bpf_set_redir(struct sk_buff *skb, struct sock *sk_redir, 540 bool ingress) 541{ 542 skb->_sk_redir = (unsigned long)sk_redir; 543 if (ingress) 544 skb->_sk_redir |= BPF_F_INGRESS; 545} 546 547static inline struct sock *skb_bpf_redirect_fetch(const struct sk_buff *skb) 548{ 549 unsigned long sk_redir = skb->_sk_redir; 550 551 return (struct sock *)(sk_redir & BPF_F_PTR_MASK); 552} 553 554static inline void skb_bpf_redirect_clear(struct sk_buff *skb) 555{ 556 skb->_sk_redir = 0; 557} 558#endif /* CONFIG_NET_SOCK_MSG */ 559#endif /* _LINUX_SKMSG_H */ 560