• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/net/ipv4/
1#include <crypto/aead.h>
2#include <crypto/authenc.h>
3#include <linux/err.h>
4#include <linux/module.h>
5#include <net/ip.h>
6#include <net/xfrm.h>
7#include <net/esp.h>
8#include <linux/scatterlist.h>
9#include <linux/kernel.h>
10#include <linux/pfkeyv2.h>
11#include <linux/rtnetlink.h>
12#include <linux/slab.h>
13#include <linux/spinlock.h>
14#include <linux/in6.h>
15#include <net/icmp.h>
16#include <net/protocol.h>
17#include <net/udp.h>
18
19struct esp_skb_cb {
20	struct xfrm_skb_cb xfrm;
21	void *tmp;
22};
23
24#define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
25
26/*
27 * Allocate an AEAD request structure with extra space for SG and IV.
28 *
29 * For alignment considerations the IV is placed at the front, followed
30 * by the request and finally the SG list.
31 *
32 * TODO: Use spare space in skb for this where possible.
33 */
34static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags)
35{
36	unsigned int len;
37
38	len = crypto_aead_ivsize(aead);
39	if (len) {
40		len += crypto_aead_alignmask(aead) &
41		       ~(crypto_tfm_ctx_alignment() - 1);
42		len = ALIGN(len, crypto_tfm_ctx_alignment());
43	}
44
45	len += sizeof(struct aead_givcrypt_request) + crypto_aead_reqsize(aead);
46	len = ALIGN(len, __alignof__(struct scatterlist));
47
48	len += sizeof(struct scatterlist) * nfrags;
49
50	return kmalloc(len, GFP_ATOMIC);
51}
52
53static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp)
54{
55	return crypto_aead_ivsize(aead) ?
56	       PTR_ALIGN((u8 *)tmp, crypto_aead_alignmask(aead) + 1) : tmp;
57}
58
59static inline struct aead_givcrypt_request *esp_tmp_givreq(
60	struct crypto_aead *aead, u8 *iv)
61{
62	struct aead_givcrypt_request *req;
63
64	req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
65				crypto_tfm_ctx_alignment());
66	aead_givcrypt_set_tfm(req, aead);
67	return req;
68}
69
70static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv)
71{
72	struct aead_request *req;
73
74	req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
75				crypto_tfm_ctx_alignment());
76	aead_request_set_tfm(req, aead);
77	return req;
78}
79
80static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead,
81					     struct aead_request *req)
82{
83	return (void *)ALIGN((unsigned long)(req + 1) +
84			     crypto_aead_reqsize(aead),
85			     __alignof__(struct scatterlist));
86}
87
88static inline struct scatterlist *esp_givreq_sg(
89	struct crypto_aead *aead, struct aead_givcrypt_request *req)
90{
91	return (void *)ALIGN((unsigned long)(req + 1) +
92			     crypto_aead_reqsize(aead),
93			     __alignof__(struct scatterlist));
94}
95
96static void esp_output_done(struct crypto_async_request *base, int err)
97{
98	struct sk_buff *skb = base->data;
99
100	kfree(ESP_SKB_CB(skb)->tmp);
101	xfrm_output_resume(skb, err);
102}
103
104static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
105{
106	int err;
107	struct ip_esp_hdr *esph;
108	struct crypto_aead *aead;
109	struct aead_givcrypt_request *req;
110	struct scatterlist *sg;
111	struct scatterlist *asg;
112	struct esp_data *esp;
113	struct sk_buff *trailer;
114	void *tmp;
115	u8 *iv;
116	u8 *tail;
117	int blksize;
118	int clen;
119	int alen;
120	int nfrags;
121
122	/* skb is pure payload to encrypt */
123
124	err = -ENOMEM;
125
126	/* Round to block size */
127	clen = skb->len;
128
129	esp = x->data;
130	aead = esp->aead;
131	alen = crypto_aead_authsize(aead);
132
133	blksize = ALIGN(crypto_aead_blocksize(aead), 4);
134	clen = ALIGN(clen + 2, blksize);
135	if (esp->padlen)
136		clen = ALIGN(clen, esp->padlen);
137
138	if ((err = skb_cow_data(skb, clen - skb->len + alen, &trailer)) < 0)
139		goto error;
140	nfrags = err;
141
142	tmp = esp_alloc_tmp(aead, nfrags + 1);
143	if (!tmp)
144		goto error;
145
146	iv = esp_tmp_iv(aead, tmp);
147	req = esp_tmp_givreq(aead, iv);
148	asg = esp_givreq_sg(aead, req);
149	sg = asg + 1;
150
151	/* Fill padding... */
152	tail = skb_tail_pointer(trailer);
153	do {
154		int i;
155		for (i=0; i<clen-skb->len - 2; i++)
156			tail[i] = i + 1;
157	} while (0);
158	tail[clen - skb->len - 2] = (clen - skb->len) - 2;
159	tail[clen - skb->len - 1] = *skb_mac_header(skb);
160	pskb_put(skb, trailer, clen - skb->len + alen);
161
162	skb_push(skb, -skb_network_offset(skb));
163	esph = ip_esp_hdr(skb);
164	*skb_mac_header(skb) = IPPROTO_ESP;
165
166	/* this is non-NULL only with UDP Encapsulation */
167	if (x->encap) {
168		struct xfrm_encap_tmpl *encap = x->encap;
169		struct udphdr *uh;
170		__be32 *udpdata32;
171		__be16 sport, dport;
172		int encap_type;
173
174		spin_lock_bh(&x->lock);
175		sport = encap->encap_sport;
176		dport = encap->encap_dport;
177		encap_type = encap->encap_type;
178		spin_unlock_bh(&x->lock);
179
180		uh = (struct udphdr *)esph;
181		uh->source = sport;
182		uh->dest = dport;
183		uh->len = htons(skb->len - skb_transport_offset(skb));
184		uh->check = 0;
185
186		switch (encap_type) {
187		default:
188		case UDP_ENCAP_ESPINUDP:
189			esph = (struct ip_esp_hdr *)(uh + 1);
190			break;
191		case UDP_ENCAP_ESPINUDP_NON_IKE:
192			udpdata32 = (__be32 *)(uh + 1);
193			udpdata32[0] = udpdata32[1] = 0;
194			esph = (struct ip_esp_hdr *)(udpdata32 + 2);
195			break;
196		}
197
198		*skb_mac_header(skb) = IPPROTO_UDP;
199	}
200
201	esph->spi = x->id.spi;
202	esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output);
203
204	sg_init_table(sg, nfrags);
205	skb_to_sgvec(skb, sg,
206		     esph->enc_data + crypto_aead_ivsize(aead) - skb->data,
207		     clen + alen);
208	sg_init_one(asg, esph, sizeof(*esph));
209
210	aead_givcrypt_set_callback(req, 0, esp_output_done, skb);
211	aead_givcrypt_set_crypt(req, sg, sg, clen, iv);
212	aead_givcrypt_set_assoc(req, asg, sizeof(*esph));
213	aead_givcrypt_set_giv(req, esph->enc_data,
214			      XFRM_SKB_CB(skb)->seq.output);
215
216	ESP_SKB_CB(skb)->tmp = tmp;
217	err = crypto_aead_givencrypt(req);
218	if (err == -EINPROGRESS)
219		goto error;
220
221	if (err == -EBUSY)
222		err = NET_XMIT_DROP;
223
224	kfree(tmp);
225
226error:
227	return err;
228}
229
230static int esp_input_done2(struct sk_buff *skb, int err)
231{
232	struct iphdr *iph;
233	struct xfrm_state *x = xfrm_input_state(skb);
234	struct esp_data *esp = x->data;
235	struct crypto_aead *aead = esp->aead;
236	int alen = crypto_aead_authsize(aead);
237	int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
238	int elen = skb->len - hlen;
239	int ihl;
240	u8 nexthdr[2];
241	int padlen;
242
243	kfree(ESP_SKB_CB(skb)->tmp);
244
245	if (unlikely(err))
246		goto out;
247
248	if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2))
249		BUG();
250
251	err = -EINVAL;
252	padlen = nexthdr[0];
253	if (padlen + 2 + alen >= elen)
254		goto out;
255
256	/* ... check padding bits here. Silly. :-) */
257
258	iph = ip_hdr(skb);
259	ihl = iph->ihl * 4;
260
261	if (x->encap) {
262		struct xfrm_encap_tmpl *encap = x->encap;
263		struct udphdr *uh = (void *)(skb_network_header(skb) + ihl);
264
265		/*
266		 * 1) if the NAT-T peer's IP or port changed then
267		 *    advertize the change to the keying daemon.
268		 *    This is an inbound SA, so just compare
269		 *    SRC ports.
270		 */
271		if (iph->saddr != x->props.saddr.a4 ||
272		    uh->source != encap->encap_sport) {
273			xfrm_address_t ipaddr;
274
275			ipaddr.a4 = iph->saddr;
276			km_new_mapping(x, &ipaddr, uh->source);
277
278		}
279
280		/*
281		 * 2) ignore UDP/TCP checksums in case
282		 *    of NAT-T in Transport Mode, or
283		 *    perform other post-processing fixes
284		 *    as per draft-ietf-ipsec-udp-encaps-06,
285		 *    section 3.1.2
286		 */
287		if (x->props.mode == XFRM_MODE_TRANSPORT)
288			skb->ip_summed = CHECKSUM_UNNECESSARY;
289	}
290
291	pskb_trim(skb, skb->len - alen - padlen - 2);
292	__skb_pull(skb, hlen);
293	skb_set_transport_header(skb, -ihl);
294
295	err = nexthdr[1];
296
297	/* RFC4303: Drop dummy packets without any error */
298	if (err == IPPROTO_NONE)
299		err = -EINVAL;
300
301out:
302	return err;
303}
304
305static void esp_input_done(struct crypto_async_request *base, int err)
306{
307	struct sk_buff *skb = base->data;
308
309	xfrm_input_resume(skb, esp_input_done2(skb, err));
310}
311
312/*
313 * Note: detecting truncated vs. non-truncated authentication data is very
314 * expensive, so we only support truncated data, which is the recommended
315 * and common case.
316 */
317static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
318{
319	struct ip_esp_hdr *esph;
320	struct esp_data *esp = x->data;
321	struct crypto_aead *aead = esp->aead;
322	struct aead_request *req;
323	struct sk_buff *trailer;
324	int elen = skb->len - sizeof(*esph) - crypto_aead_ivsize(aead);
325	int nfrags;
326	void *tmp;
327	u8 *iv;
328	struct scatterlist *sg;
329	struct scatterlist *asg;
330	int err = -EINVAL;
331
332	if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)))
333		goto out;
334
335	if (elen <= 0)
336		goto out;
337
338	if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
339		goto out;
340	nfrags = err;
341
342	err = -ENOMEM;
343	tmp = esp_alloc_tmp(aead, nfrags + 1);
344	if (!tmp)
345		goto out;
346
347	ESP_SKB_CB(skb)->tmp = tmp;
348	iv = esp_tmp_iv(aead, tmp);
349	req = esp_tmp_req(aead, iv);
350	asg = esp_req_sg(aead, req);
351	sg = asg + 1;
352
353	skb->ip_summed = CHECKSUM_NONE;
354
355	esph = (struct ip_esp_hdr *)skb->data;
356
357	/* Get ivec. This can be wrong, check against another impls. */
358	iv = esph->enc_data;
359
360	sg_init_table(sg, nfrags);
361	skb_to_sgvec(skb, sg, sizeof(*esph) + crypto_aead_ivsize(aead), elen);
362	sg_init_one(asg, esph, sizeof(*esph));
363
364	aead_request_set_callback(req, 0, esp_input_done, skb);
365	aead_request_set_crypt(req, sg, sg, elen, iv);
366	aead_request_set_assoc(req, asg, sizeof(*esph));
367
368	err = crypto_aead_decrypt(req);
369	if (err == -EINPROGRESS)
370		goto out;
371
372	err = esp_input_done2(skb, err);
373
374out:
375	return err;
376}
377
378static u32 esp4_get_mtu(struct xfrm_state *x, int mtu)
379{
380	struct esp_data *esp = x->data;
381	u32 blksize = ALIGN(crypto_aead_blocksize(esp->aead), 4);
382	u32 align = max_t(u32, blksize, esp->padlen);
383	u32 rem;
384
385	mtu -= x->props.header_len + crypto_aead_authsize(esp->aead);
386	rem = mtu & (align - 1);
387	mtu &= ~(align - 1);
388
389	switch (x->props.mode) {
390	case XFRM_MODE_TUNNEL:
391		break;
392	default:
393	case XFRM_MODE_TRANSPORT:
394		/* The worst case */
395		mtu -= blksize - 4;
396		mtu += min_t(u32, blksize - 4, rem);
397		break;
398	case XFRM_MODE_BEET:
399		/* The worst case. */
400		mtu += min_t(u32, IPV4_BEET_PHMAXLEN, rem);
401		break;
402	}
403
404	return mtu - 2;
405}
406
407static void esp4_err(struct sk_buff *skb, u32 info)
408{
409	struct net *net = dev_net(skb->dev);
410	struct iphdr *iph = (struct iphdr *)skb->data;
411	struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2));
412	struct xfrm_state *x;
413
414	if (icmp_hdr(skb)->type != ICMP_DEST_UNREACH ||
415	    icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
416		return;
417
418	x = xfrm_state_lookup(net, skb->mark, (xfrm_address_t *)&iph->daddr, esph->spi, IPPROTO_ESP, AF_INET);
419	if (!x)
420		return;
421	NETDEBUG(KERN_DEBUG "pmtu discovery on SA ESP/%08x/%08x\n",
422		 ntohl(esph->spi), ntohl(iph->daddr));
423	xfrm_state_put(x);
424}
425
426static void esp_destroy(struct xfrm_state *x)
427{
428	struct esp_data *esp = x->data;
429
430	if (!esp)
431		return;
432
433	crypto_free_aead(esp->aead);
434	kfree(esp);
435}
436
437static int esp_init_aead(struct xfrm_state *x)
438{
439	struct esp_data *esp = x->data;
440	struct crypto_aead *aead;
441	int err;
442
443	aead = crypto_alloc_aead(x->aead->alg_name, 0, 0);
444	err = PTR_ERR(aead);
445	if (IS_ERR(aead))
446		goto error;
447
448	esp->aead = aead;
449
450	err = crypto_aead_setkey(aead, x->aead->alg_key,
451				 (x->aead->alg_key_len + 7) / 8);
452	if (err)
453		goto error;
454
455	err = crypto_aead_setauthsize(aead, x->aead->alg_icv_len / 8);
456	if (err)
457		goto error;
458
459error:
460	return err;
461}
462
463static int esp_init_authenc(struct xfrm_state *x)
464{
465	struct esp_data *esp = x->data;
466	struct crypto_aead *aead;
467	struct crypto_authenc_key_param *param;
468	struct rtattr *rta;
469	char *key;
470	char *p;
471	char authenc_name[CRYPTO_MAX_ALG_NAME];
472	unsigned int keylen;
473	int err;
474
475	err = -EINVAL;
476	if (x->ealg == NULL)
477		goto error;
478
479	err = -ENAMETOOLONG;
480	if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME, "authenc(%s,%s)",
481		     x->aalg ? x->aalg->alg_name : "digest_null",
482		     x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME)
483		goto error;
484
485	aead = crypto_alloc_aead(authenc_name, 0, 0);
486	err = PTR_ERR(aead);
487	if (IS_ERR(aead))
488		goto error;
489
490	esp->aead = aead;
491
492	keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) +
493		 (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param));
494	err = -ENOMEM;
495	key = kmalloc(keylen, GFP_KERNEL);
496	if (!key)
497		goto error;
498
499	p = key;
500	rta = (void *)p;
501	rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
502	rta->rta_len = RTA_LENGTH(sizeof(*param));
503	param = RTA_DATA(rta);
504	p += RTA_SPACE(sizeof(*param));
505
506	if (x->aalg) {
507		struct xfrm_algo_desc *aalg_desc;
508
509		memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8);
510		p += (x->aalg->alg_key_len + 7) / 8;
511
512		aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
513		BUG_ON(!aalg_desc);
514
515		err = -EINVAL;
516		if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
517		    crypto_aead_authsize(aead)) {
518			NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n",
519				 x->aalg->alg_name,
520				 crypto_aead_authsize(aead),
521				 aalg_desc->uinfo.auth.icv_fullbits/8);
522			goto free_key;
523		}
524
525		err = crypto_aead_setauthsize(
526			aead, x->aalg->alg_trunc_len / 8);
527		if (err)
528			goto free_key;
529	}
530
531	param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8);
532	memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8);
533
534	err = crypto_aead_setkey(aead, key, keylen);
535
536free_key:
537	kfree(key);
538
539error:
540	return err;
541}
542
543static int esp_init_state(struct xfrm_state *x)
544{
545	struct esp_data *esp;
546	struct crypto_aead *aead;
547	u32 align;
548	int err;
549
550	esp = kzalloc(sizeof(*esp), GFP_KERNEL);
551	if (esp == NULL)
552		return -ENOMEM;
553
554	x->data = esp;
555
556	if (x->aead)
557		err = esp_init_aead(x);
558	else
559		err = esp_init_authenc(x);
560
561	if (err)
562		goto error;
563
564	aead = esp->aead;
565
566	esp->padlen = 0;
567
568	x->props.header_len = sizeof(struct ip_esp_hdr) +
569			      crypto_aead_ivsize(aead);
570	if (x->props.mode == XFRM_MODE_TUNNEL)
571		x->props.header_len += sizeof(struct iphdr);
572	else if (x->props.mode == XFRM_MODE_BEET && x->sel.family != AF_INET6)
573		x->props.header_len += IPV4_BEET_PHMAXLEN;
574	if (x->encap) {
575		struct xfrm_encap_tmpl *encap = x->encap;
576
577		switch (encap->encap_type) {
578		default:
579			goto error;
580		case UDP_ENCAP_ESPINUDP:
581			x->props.header_len += sizeof(struct udphdr);
582			break;
583		case UDP_ENCAP_ESPINUDP_NON_IKE:
584			x->props.header_len += sizeof(struct udphdr) + 2 * sizeof(u32);
585			break;
586		}
587	}
588
589	align = ALIGN(crypto_aead_blocksize(aead), 4);
590	if (esp->padlen)
591		align = max_t(u32, align, esp->padlen);
592	x->props.trailer_len = align + 1 + crypto_aead_authsize(esp->aead);
593
594error:
595	return err;
596}
597
598static const struct xfrm_type esp_type =
599{
600	.description	= "ESP4",
601	.owner		= THIS_MODULE,
602	.proto	     	= IPPROTO_ESP,
603	.flags		= XFRM_TYPE_REPLAY_PROT,
604	.init_state	= esp_init_state,
605	.destructor	= esp_destroy,
606	.get_mtu	= esp4_get_mtu,
607	.input		= esp_input,
608	.output		= esp_output
609};
610
611static const struct net_protocol esp4_protocol = {
612	.handler	=	xfrm4_rcv,
613	.err_handler	=	esp4_err,
614	.no_policy	=	1,
615	.netns_ok	=	1,
616};
617
618static int __init esp4_init(void)
619{
620	if (xfrm_register_type(&esp_type, AF_INET) < 0) {
621		printk(KERN_INFO "ip esp init: can't add xfrm type\n");
622		return -EAGAIN;
623	}
624	if (inet_add_protocol(&esp4_protocol, IPPROTO_ESP) < 0) {
625		printk(KERN_INFO "ip esp init: can't add protocol\n");
626		xfrm_unregister_type(&esp_type, AF_INET);
627		return -EAGAIN;
628	}
629	return 0;
630}
631
632static void __exit esp4_fini(void)
633{
634	if (inet_del_protocol(&esp4_protocol, IPPROTO_ESP) < 0)
635		printk(KERN_INFO "ip esp close: can't remove protocol\n");
636	if (xfrm_unregister_type(&esp_type, AF_INET) < 0)
637		printk(KERN_INFO "ip esp close: can't remove xfrm type\n");
638}
639
640module_init(esp4_init);
641module_exit(esp4_fini);
642MODULE_LICENSE("GPL");
643MODULE_ALIAS_XFRM_TYPE(AF_INET, XFRM_PROTO_ESP);
644