Lines Matching defs:sk

51 static inline int rxrpc_writable(struct sock *sk)
53 return refcount_read(&sk->sk_wmem_alloc) < (size_t) sk->sk_sndbuf;
59 static void rxrpc_write_space(struct sock *sk)
61 _enter("%p", sk);
63 if (rxrpc_writable(sk)) {
64 struct socket_wq *wq = rcu_dereference(sk->sk_wq);
68 sk_wake_async_rcu(sk, SOCK_WAKE_SPACE, POLL_OUT);
134 struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
145 lock_sock(&rx->sk);
147 switch (rx->sk.sk_state) {
150 local = rxrpc_lookup_local(sock_net(&rx->sk), &rx->srx);
164 rx->sk.sk_state = RXRPC_SERVER_BOUND;
167 rx->sk.sk_state = RXRPC_CLIENT_BOUND;
183 rx->sk.sk_state = RXRPC_SERVER_BOUND2;
191 release_sock(&rx->sk);
201 release_sock(&rx->sk);
212 struct sock *sk = sock->sk;
213 struct rxrpc_sock *rx = rxrpc_sk(sk);
219 lock_sock(&rx->sk);
221 switch (rx->sk.sk_state) {
234 old = sk->sk_max_ack_backlog;
235 sk->sk_max_ack_backlog = backlog;
238 rx->sk.sk_state = RXRPC_SERVER_LISTENING;
240 sk->sk_max_ack_backlog = old;
244 rx->sk.sk_state = RXRPC_SERVER_LISTEN_DISABLED;
245 sk->sk_max_ack_backlog = 0;
256 release_sock(&rx->sk);
273 struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
344 struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
351 lock_sock(&rx->sk);
388 static void rxrpc_dummy_notify_rx(struct sock *sk, struct rxrpc_call *rxcall,
407 rxrpc_release_call(rxrpc_sk(sock->sk), call);
479 struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
520 struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
531 lock_sock(&rx->sk);
537 switch (rx->sk.sk_state) {
539 rx->sk.sk_state = RXRPC_CLIENT_UNBOUND;
554 release_sock(&rx->sk);
570 struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
573 _enter(",{%d},,%zu", rx->sk.sk_state, len);
586 lock_sock(&rx->sk);
588 switch (rx->sk.sk_state) {
608 local = rxrpc_lookup_local(sock_net(sock->sk), &rx->srx);
615 rx->sk.sk_state = RXRPC_CLIENT_BOUND;
636 release_sock(&rx->sk);
642 int rxrpc_sock_set_min_security_level(struct sock *sk, unsigned int val)
644 if (sk->sk_state != RXRPC_UNBOUND)
648 lock_sock(sk);
649 rxrpc_sk(sk)->min_sec_level = val;
650 release_sock(sk);
661 struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
668 lock_sock(&rx->sk);
678 if (rx->sk.sk_state != RXRPC_UNBOUND)
688 if (rx->sk.sk_state != RXRPC_UNBOUND)
698 if (rx->sk.sk_state != RXRPC_UNBOUND)
708 if (rx->sk.sk_state != RXRPC_UNBOUND)
726 if (rx->sk.sk_state != RXRPC_SERVER_BOUND2)
750 release_sock(&rx->sk);
788 struct sock *sk = sock->sk;
789 struct rxrpc_sock *rx = rxrpc_sk(sk);
803 if (rxrpc_writable(sk))
817 struct sock *sk;
832 sk = sk_alloc(net, PF_RXRPC, GFP_KERNEL, &rxrpc_proto, kern);
833 if (!sk)
836 sock_init_data(sock, sk);
837 sock_set_flag(sk, SOCK_RCU_FREE);
838 sk->sk_state = RXRPC_UNBOUND;
839 sk->sk_write_space = rxrpc_write_space;
840 sk->sk_max_ack_backlog = 0;
841 sk->sk_destruct = rxrpc_sock_destructor;
843 rx = rxrpc_sk(sk);
855 rxnet = rxrpc_net(sock_net(&rx->sk));
867 struct sock *sk = sock->sk;
868 struct rxrpc_sock *rx = rxrpc_sk(sk);
871 _enter("%p,%d", sk, flags);
875 if (sk->sk_state == RXRPC_CLOSE)
878 lock_sock(sk);
880 if (sk->sk_state < RXRPC_CLOSE) {
881 sk->sk_state = RXRPC_CLOSE;
882 sk->sk_shutdown = SHUTDOWN_MASK;
889 release_sock(sk);
896 static void rxrpc_sock_destructor(struct sock *sk)
898 _enter("%p", sk);
900 rxrpc_purge_queue(&sk->sk_receive_queue);
902 WARN_ON(refcount_read(&sk->sk_wmem_alloc));
903 WARN_ON(!sk_unhashed(sk));
904 WARN_ON(sk->sk_socket);
906 if (!sock_flag(sk, SOCK_DEAD)) {
907 printk("Attempt to release alive rxrpc socket: %p\n", sk);
915 static int rxrpc_release_sock(struct sock *sk)
917 struct rxrpc_sock *rx = rxrpc_sk(sk);
919 _enter("%p{%d,%d}", sk, sk->sk_state, refcount_read(&sk->sk_refcnt));
922 sock_orphan(sk);
923 sk->sk_shutdown = SHUTDOWN_MASK;
929 switch (sk->sk_state) {
938 sk->sk_state = RXRPC_CLOSE;
950 rxrpc_purge_queue(&sk->sk_receive_queue);
959 sock_put(sk);
970 struct sock *sk = sock->sk;
972 _enter("%p{%p}", sock, sk);
974 if (!sk)
977 sock->sk = NULL;
979 return rxrpc_release_sock(sk);