Lines Matching refs:ep

41 static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
48 ep->digest = kzalloc(SCTP_SIGNATURE_SIZE, gfp);
49 if (!ep->digest)
52 ep->asconf_enable = net->sctp.addip_enable;
53 ep->auth_enable = net->sctp.auth_enable;
54 if (ep->auth_enable) {
55 if (sctp_auth_init(ep, gfp))
57 if (ep->asconf_enable) {
58 sctp_auth_ep_add_chunkid(ep, SCTP_CID_ASCONF);
59 sctp_auth_ep_add_chunkid(ep, SCTP_CID_ASCONF_ACK);
65 ep->base.type = SCTP_EP_TYPE_SOCKET;
68 refcount_set(&ep->base.refcnt, 1);
69 ep->base.dead = false;
72 sctp_inq_init(&ep->base.inqueue);
75 sctp_inq_set_th_handler(&ep->base.inqueue, sctp_endpoint_bh_rcv);
78 sctp_bind_addr_init(&ep->base.bind_addr, 0);
81 INIT_LIST_HEAD(&ep->asocs);
84 ep->sndbuf_policy = net->sctp.sndbuf_policy;
91 ep->rcvbuf_policy = net->sctp.rcvbuf_policy;
94 get_random_bytes(ep->secret_key, sizeof(ep->secret_key));
97 INIT_LIST_HEAD(&ep->endpoint_shared_keys);
102 list_add(&null_key->key_list, &ep->endpoint_shared_keys);
107 ep->prsctp_enable = net->sctp.prsctp_enable;
108 ep->reconf_enable = net->sctp.reconf_enable;
109 ep->ecn_enable = net->sctp.ecn_enable;
112 ep->base.sk = sk;
113 ep->base.net = sock_net(sk);
114 sock_hold(ep->base.sk);
116 return ep;
119 sctp_auth_free(ep);
121 kfree(ep->digest);
131 struct sctp_endpoint *ep;
134 ep = kzalloc(sizeof(*ep), gfp);
135 if (!ep)
138 if (!sctp_endpoint_init(ep, sk, gfp))
141 SCTP_DBG_OBJCNT_INC(ep);
142 return ep;
145 kfree(ep);
151 void sctp_endpoint_add_asoc(struct sctp_endpoint *ep,
154 struct sock *sk = ep->base.sk;
164 list_add_tail(&asoc->asocs, &ep->asocs);
174 void sctp_endpoint_free(struct sctp_endpoint *ep)
176 ep->base.dead = true;
178 inet_sk_set_state(ep->base.sk, SCTP_SS_CLOSED);
181 sctp_unhash_endpoint(ep);
183 sctp_endpoint_put(ep);
189 struct sctp_endpoint *ep = container_of(head, struct sctp_endpoint, rcu);
190 struct sock *sk = ep->base.sk;
192 sctp_sk(sk)->ep = NULL;
195 kfree(ep);
196 SCTP_DBG_OBJCNT_DEC(ep);
199 static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
203 if (unlikely(!ep->base.dead)) {
204 WARN(1, "Attempt to destroy undead endpoint %p!\n", ep);
209 kfree(ep->digest);
214 sctp_auth_destroy_keys(&ep->endpoint_shared_keys);
215 sctp_auth_free(ep);
218 sctp_inq_free(&ep->base.inqueue);
219 sctp_bind_addr_free(&ep->base.bind_addr);
221 memset(ep->secret_key, 0, sizeof(ep->secret_key));
223 sk = ep->base.sk;
228 call_rcu(&ep->rcu, sctp_endpoint_destroy_rcu);
232 int sctp_endpoint_hold(struct sctp_endpoint *ep)
234 return refcount_inc_not_zero(&ep->base.refcnt);
240 void sctp_endpoint_put(struct sctp_endpoint *ep)
242 if (refcount_dec_and_test(&ep->base.refcnt))
243 sctp_endpoint_destroy(ep);
247 struct sctp_endpoint *sctp_endpoint_is_match(struct sctp_endpoint *ep,
252 int bound_dev_if = READ_ONCE(ep->base.sk->sk_bound_dev_if);
255 if (net_eq(ep->base.net, net) &&
257 (htons(ep->base.bind_addr.port) == laddr->v4.sin_port)) {
258 if (sctp_bind_addr_match(&ep->base.bind_addr, laddr,
259 sctp_sk(ep->base.sk)))
260 retval = ep;
271 const struct sctp_endpoint *ep,
283 if (!ep->base.bind_addr.port)
287 t = sctp_epaddr_lookup_transport(ep, paddr);
301 bool sctp_endpoint_is_peeled_off(struct sctp_endpoint *ep,
304 int bound_dev_if = READ_ONCE(ep->base.sk->sk_bound_dev_if);
306 struct net *net = ep->base.net;
309 bp = &ep->base.bind_addr;
327 struct sctp_endpoint *ep =
341 if (ep->base.dead)
345 inqueue = &ep->base.inqueue;
346 sk = ep->base.sk;
382 asoc = sctp_endpoint_lookup_assoc(ep,
399 SCTP_INC_STATS(ep->base.net, SCTP_MIB_INCTRLCHUNKS);
408 ep, asoc, chunk, GFP_ATOMIC);
416 if (!sctp_sk(sk)->ep)