1/*
2 * linux/drivers/s390/net/qeth_eddp.c
3 *
4 * Enhanced Device Driver Packing (EDDP) support for the qeth driver.
5 *
6 * Copyright 2004 IBM Corporation
7 *
8 *    Author(s): Thomas Spatzier <tspat@de.ibm.com>
9 *
10 */
11#include <linux/errno.h>
12#include <linux/ip.h>
13#include <linux/inetdevice.h>
14#include <linux/netdevice.h>
15#include <linux/kernel.h>
16#include <linux/tcp.h>
17#include <net/tcp.h>
18#include <linux/skbuff.h>
19
20#include <net/ip.h>
21
22#include "qeth.h"
23#include "qeth_mpc.h"
24#include "qeth_eddp.h"
25
26int
27qeth_eddp_check_buffers_for_context(struct qeth_qdio_out_q *queue,
28				    struct qeth_eddp_context *ctx)
29{
30	int index = queue->next_buf_to_fill;
31	int elements_needed = ctx->num_elements;
32	int elements_in_buffer;
33	int skbs_in_buffer;
34	int buffers_needed = 0;
35
36	QETH_DBF_TEXT(trace, 5, "eddpcbfc");
37	while(elements_needed > 0) {
38		buffers_needed++;
39		if (atomic_read(&queue->bufs[index].state) !=
40				QETH_QDIO_BUF_EMPTY)
41			return -EBUSY;
42
43		elements_in_buffer = QETH_MAX_BUFFER_ELEMENTS(queue->card) -
44				     queue->bufs[index].next_element_to_fill;
45		skbs_in_buffer = elements_in_buffer / ctx->elements_per_skb;
46		elements_needed -= skbs_in_buffer * ctx->elements_per_skb;
47		index = (index + 1) % QDIO_MAX_BUFFERS_PER_Q;
48	}
49	return buffers_needed;
50}
51
52static void
53qeth_eddp_free_context(struct qeth_eddp_context *ctx)
54{
55	int i;
56
57	QETH_DBF_TEXT(trace, 5, "eddpfctx");
58	for (i = 0; i < ctx->num_pages; ++i)
59		free_page((unsigned long)ctx->pages[i]);
60	kfree(ctx->pages);
61	kfree(ctx->elements);
62	kfree(ctx);
63}
64
65
66static inline void
67qeth_eddp_get_context(struct qeth_eddp_context *ctx)
68{
69	atomic_inc(&ctx->refcnt);
70}
71
72void
73qeth_eddp_put_context(struct qeth_eddp_context *ctx)
74{
75	if (atomic_dec_return(&ctx->refcnt) == 0)
76		qeth_eddp_free_context(ctx);
77}
78
79void
80qeth_eddp_buf_release_contexts(struct qeth_qdio_out_buffer *buf)
81{
82	struct qeth_eddp_context_reference *ref;
83
84	QETH_DBF_TEXT(trace, 6, "eddprctx");
85	while (!list_empty(&buf->ctx_list)){
86		ref = list_entry(buf->ctx_list.next,
87				 struct qeth_eddp_context_reference, list);
88		qeth_eddp_put_context(ref->ctx);
89		list_del(&ref->list);
90		kfree(ref);
91	}
92}
93
94static int
95qeth_eddp_buf_ref_context(struct qeth_qdio_out_buffer *buf,
96			  struct qeth_eddp_context *ctx)
97{
98	struct qeth_eddp_context_reference *ref;
99
100	QETH_DBF_TEXT(trace, 6, "eddprfcx");
101	ref = kmalloc(sizeof(struct qeth_eddp_context_reference), GFP_ATOMIC);
102	if (ref == NULL)
103		return -ENOMEM;
104	qeth_eddp_get_context(ctx);
105	ref->ctx = ctx;
106	list_add_tail(&ref->list, &buf->ctx_list);
107	return 0;
108}
109
110int
111qeth_eddp_fill_buffer(struct qeth_qdio_out_q *queue,
112		      struct qeth_eddp_context *ctx,
113		      int index)
114{
115	struct qeth_qdio_out_buffer *buf = NULL;
116	struct qdio_buffer *buffer;
117	int elements = ctx->num_elements;
118	int element = 0;
119	int flush_cnt = 0;
120	int must_refcnt = 1;
121	int i;
122
123	QETH_DBF_TEXT(trace, 5, "eddpfibu");
124	while (elements > 0) {
125		buf = &queue->bufs[index];
126		if (atomic_read(&buf->state) != QETH_QDIO_BUF_EMPTY){
127			/* normally this should not happen since we checked for
128			 * available elements in qeth_check_elements_for_context
129			 */
130			if (element == 0)
131				return -EBUSY;
132			else {
133				PRINT_WARN("could only partially fill eddp "
134					   "buffer!\n");
135				goto out;
136			}
137		}
138		/* check if the whole next skb fits into current buffer */
139		if ((QETH_MAX_BUFFER_ELEMENTS(queue->card) -
140					buf->next_element_to_fill)
141				< ctx->elements_per_skb){
142			/* no -> go to next buffer */
143			atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
144			index = (index + 1) % QDIO_MAX_BUFFERS_PER_Q;
145			flush_cnt++;
146			/* new buffer, so we have to add ctx to buffer'ctx_list
147			 * and increment ctx's refcnt */
148			must_refcnt = 1;
149			continue;
150		}
151		if (must_refcnt){
152			must_refcnt = 0;
153			if (qeth_eddp_buf_ref_context(buf, ctx)){
154				PRINT_WARN("no memory to create eddp context "
155					   "reference\n");
156				goto out_check;
157			}
158		}
159		buffer = buf->buffer;
160		/* fill one skb into buffer */
161		for (i = 0; i < ctx->elements_per_skb; ++i){
162			buffer->element[buf->next_element_to_fill].addr =
163				ctx->elements[element].addr;
164			buffer->element[buf->next_element_to_fill].length =
165				ctx->elements[element].length;
166			buffer->element[buf->next_element_to_fill].flags =
167				ctx->elements[element].flags;
168			buf->next_element_to_fill++;
169			element++;
170			elements--;
171		}
172	}
173out_check:
174	if (!queue->do_pack) {
175		QETH_DBF_TEXT(trace, 6, "fillbfnp");
176		/* set state to PRIMED -> will be flushed */
177		if (buf->next_element_to_fill > 0){
178			atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
179			flush_cnt++;
180		}
181	} else {
182		if (queue->card->options.performance_stats)
183			queue->card->perf_stats.skbs_sent_pack++;
184		QETH_DBF_TEXT(trace, 6, "fillbfpa");
185		if (buf->next_element_to_fill >=
186				QETH_MAX_BUFFER_ELEMENTS(queue->card)) {
187			/*
188			 * packed buffer if full -> set state PRIMED
189			 * -> will be flushed
190			 */
191			atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
192			flush_cnt++;
193		}
194	}
195out:
196	return flush_cnt;
197}
198
199static void
200qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx,
201			      struct qeth_eddp_data *eddp, int data_len)
202{
203	u8 *page;
204	int page_remainder;
205	int page_offset;
206	int pkt_len;
207	struct qeth_eddp_element *element;
208
209	QETH_DBF_TEXT(trace, 5, "eddpcrsh");
210	page = ctx->pages[ctx->offset >> PAGE_SHIFT];
211	page_offset = ctx->offset % PAGE_SIZE;
212	element = &ctx->elements[ctx->num_elements];
213	pkt_len = eddp->nhl + eddp->thl + data_len;
214	if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2)
215		pkt_len += ETH_HLEN;
216	if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q))
217		pkt_len += VLAN_HLEN;
218	/* does complete packet fit in current page ? */
219	page_remainder = PAGE_SIZE - page_offset;
220	if (page_remainder < (sizeof(struct qeth_hdr) + pkt_len)){
221		/* no -> go to start of next page */
222		ctx->offset += page_remainder;
223		page = ctx->pages[ctx->offset >> PAGE_SHIFT];
224		page_offset = 0;
225	}
226	memcpy(page + page_offset, &eddp->qh, sizeof(struct qeth_hdr));
227	element->addr = page + page_offset;
228	element->length = sizeof(struct qeth_hdr);
229	ctx->offset += sizeof(struct qeth_hdr);
230	page_offset += sizeof(struct qeth_hdr);
231	/* add mac header (?) */
232	if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2){
233		memcpy(page + page_offset, &eddp->mac, ETH_HLEN);
234		element->length += ETH_HLEN;
235		ctx->offset += ETH_HLEN;
236		page_offset += ETH_HLEN;
237	}
238	/* add VLAN tag */
239	if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)){
240		memcpy(page + page_offset, &eddp->vlan, VLAN_HLEN);
241		element->length += VLAN_HLEN;
242		ctx->offset += VLAN_HLEN;
243		page_offset += VLAN_HLEN;
244	}
245	/* add network header */
246	memcpy(page + page_offset, (u8 *)&eddp->nh, eddp->nhl);
247	element->length += eddp->nhl;
248	eddp->nh_in_ctx = page + page_offset;
249	ctx->offset += eddp->nhl;
250	page_offset += eddp->nhl;
251	/* add transport header */
252	memcpy(page + page_offset, (u8 *)&eddp->th, eddp->thl);
253	element->length += eddp->thl;
254	eddp->th_in_ctx = page + page_offset;
255	ctx->offset += eddp->thl;
256}
257
258static void
259qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, int len,
260			__wsum *hcsum)
261{
262	struct skb_frag_struct *frag;
263	int left_in_frag;
264	int copy_len;
265	u8 *src;
266
267	QETH_DBF_TEXT(trace, 5, "eddpcdtc");
268	if (skb_shinfo(eddp->skb)->nr_frags == 0) {
269		skb_copy_from_linear_data_offset(eddp->skb, eddp->skb_offset,
270						 dst, len);
271		*hcsum = csum_partial(eddp->skb->data + eddp->skb_offset, len,
272				      *hcsum);
273		eddp->skb_offset += len;
274	} else {
275		while (len > 0) {
276			if (eddp->frag < 0) {
277				/* we're in skb->data */
278				left_in_frag = (eddp->skb->len - eddp->skb->data_len)
279						- eddp->skb_offset;
280				src = eddp->skb->data + eddp->skb_offset;
281			} else {
282				frag = &skb_shinfo(eddp->skb)->
283					frags[eddp->frag];
284				left_in_frag = frag->size - eddp->frag_offset;
285				src = (u8 *)(
286					(page_to_pfn(frag->page) << PAGE_SHIFT)+
287					frag->page_offset + eddp->frag_offset);
288			}
289			if (left_in_frag <= 0) {
290				eddp->frag++;
291				eddp->frag_offset = 0;
292				continue;
293			}
294			copy_len = min(left_in_frag, len);
295			memcpy(dst, src, copy_len);
296			*hcsum = csum_partial(src, copy_len, *hcsum);
297			dst += copy_len;
298			eddp->frag_offset += copy_len;
299			eddp->skb_offset += copy_len;
300			len -= copy_len;
301		}
302	}
303}
304
305static void
306qeth_eddp_create_segment_data_tcp(struct qeth_eddp_context *ctx,
307				  struct qeth_eddp_data *eddp, int data_len,
308				  __wsum hcsum)
309{
310	u8 *page;
311	int page_remainder;
312	int page_offset;
313	struct qeth_eddp_element *element;
314	int first_lap = 1;
315
316	QETH_DBF_TEXT(trace, 5, "eddpcsdt");
317	page = ctx->pages[ctx->offset >> PAGE_SHIFT];
318	page_offset = ctx->offset % PAGE_SIZE;
319	element = &ctx->elements[ctx->num_elements];
320	while (data_len){
321		page_remainder = PAGE_SIZE - page_offset;
322		if (page_remainder < data_len){
323			qeth_eddp_copy_data_tcp(page + page_offset, eddp,
324						page_remainder, &hcsum);
325			element->length += page_remainder;
326			if (first_lap)
327				element->flags = SBAL_FLAGS_FIRST_FRAG;
328			else
329				element->flags = SBAL_FLAGS_MIDDLE_FRAG;
330			ctx->num_elements++;
331			element++;
332			data_len -= page_remainder;
333			ctx->offset += page_remainder;
334			page = ctx->pages[ctx->offset >> PAGE_SHIFT];
335			page_offset = 0;
336			element->addr = page + page_offset;
337		} else {
338			qeth_eddp_copy_data_tcp(page + page_offset, eddp,
339						data_len, &hcsum);
340			element->length += data_len;
341			if (!first_lap)
342				element->flags = SBAL_FLAGS_LAST_FRAG;
343			ctx->num_elements++;
344			ctx->offset += data_len;
345			data_len = 0;
346		}
347		first_lap = 0;
348	}
349	((struct tcphdr *)eddp->th_in_ctx)->check = csum_fold(hcsum);
350}
351
352static __wsum
353qeth_eddp_check_tcp4_hdr(struct qeth_eddp_data *eddp, int data_len)
354{
355	__wsum phcsum; /* pseudo header checksum */
356
357	QETH_DBF_TEXT(trace, 5, "eddpckt4");
358	eddp->th.tcp.h.check = 0;
359	/* compute pseudo header checksum */
360	phcsum = csum_tcpudp_nofold(eddp->nh.ip4.h.saddr, eddp->nh.ip4.h.daddr,
361				    eddp->thl + data_len, IPPROTO_TCP, 0);
362	/* compute checksum of tcp header */
363	return csum_partial((u8 *)&eddp->th, eddp->thl, phcsum);
364}
365
366static __wsum
367qeth_eddp_check_tcp6_hdr(struct qeth_eddp_data *eddp, int data_len)
368{
369	__be32 proto;
370	__wsum phcsum; /* pseudo header checksum */
371
372	QETH_DBF_TEXT(trace, 5, "eddpckt6");
373	eddp->th.tcp.h.check = 0;
374	/* compute pseudo header checksum */
375	phcsum = csum_partial((u8 *)&eddp->nh.ip6.h.saddr,
376			      sizeof(struct in6_addr), 0);
377	phcsum = csum_partial((u8 *)&eddp->nh.ip6.h.daddr,
378			      sizeof(struct in6_addr), phcsum);
379	proto = htonl(IPPROTO_TCP);
380	phcsum = csum_partial((u8 *)&proto, sizeof(u32), phcsum);
381	return phcsum;
382}
383
384static struct qeth_eddp_data *
385qeth_eddp_create_eddp_data(struct qeth_hdr *qh, u8 *nh, u8 nhl, u8 *th, u8 thl)
386{
387	struct qeth_eddp_data *eddp;
388
389	QETH_DBF_TEXT(trace, 5, "eddpcrda");
390	eddp = kzalloc(sizeof(struct qeth_eddp_data), GFP_ATOMIC);
391	if (eddp){
392		eddp->nhl = nhl;
393		eddp->thl = thl;
394		memcpy(&eddp->qh, qh, sizeof(struct qeth_hdr));
395		memcpy(&eddp->nh, nh, nhl);
396		memcpy(&eddp->th, th, thl);
397		eddp->frag = -1; /* initially we're in skb->data */
398	}
399	return eddp;
400}
401
402static void
403__qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
404			     struct qeth_eddp_data *eddp)
405{
406	struct tcphdr *tcph;
407	int data_len;
408	__wsum hcsum;
409
410	QETH_DBF_TEXT(trace, 5, "eddpftcp");
411	eddp->skb_offset = sizeof(struct qeth_hdr) + eddp->nhl + eddp->thl;
412       if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
413               eddp->skb_offset += sizeof(struct ethhdr);
414#ifdef CONFIG_QETH_VLAN
415               if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q))
416                       eddp->skb_offset += VLAN_HLEN;
417#endif /* CONFIG_QETH_VLAN */
418       }
419	tcph = tcp_hdr(eddp->skb);
420	while (eddp->skb_offset < eddp->skb->len) {
421		data_len = min((int)skb_shinfo(eddp->skb)->gso_size,
422			       (int)(eddp->skb->len - eddp->skb_offset));
423		/* prepare qdio hdr */
424		if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2){
425			eddp->qh.hdr.l2.pkt_length = data_len + ETH_HLEN +
426						     eddp->nhl + eddp->thl;
427#ifdef CONFIG_QETH_VLAN
428			if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q))
429				eddp->qh.hdr.l2.pkt_length += VLAN_HLEN;
430#endif /* CONFIG_QETH_VLAN */
431		} else
432			eddp->qh.hdr.l3.length = data_len + eddp->nhl +
433						 eddp->thl;
434		/* prepare ip hdr */
435		if (eddp->skb->protocol == htons(ETH_P_IP)){
436			eddp->nh.ip4.h.tot_len = htons(data_len + eddp->nhl +
437						 eddp->thl);
438			eddp->nh.ip4.h.check = 0;
439			eddp->nh.ip4.h.check =
440				ip_fast_csum((u8 *)&eddp->nh.ip4.h,
441						eddp->nh.ip4.h.ihl);
442		} else
443			eddp->nh.ip6.h.payload_len = htons(data_len + eddp->thl);
444		/* prepare tcp hdr */
445		if (data_len == (eddp->skb->len - eddp->skb_offset)){
446			/* last segment -> set FIN and PSH flags */
447			eddp->th.tcp.h.fin = tcph->fin;
448			eddp->th.tcp.h.psh = tcph->psh;
449		}
450		if (eddp->skb->protocol == htons(ETH_P_IP))
451			hcsum = qeth_eddp_check_tcp4_hdr(eddp, data_len);
452		else
453			hcsum = qeth_eddp_check_tcp6_hdr(eddp, data_len);
454		/* fill the next segment into the context */
455		qeth_eddp_create_segment_hdrs(ctx, eddp, data_len);
456		qeth_eddp_create_segment_data_tcp(ctx, eddp, data_len, hcsum);
457		if (eddp->skb_offset >= eddp->skb->len)
458			break;
459		/* prepare headers for next round */
460		if (eddp->skb->protocol == htons(ETH_P_IP))
461			eddp->nh.ip4.h.id = htons(ntohs(eddp->nh.ip4.h.id) + 1);
462		eddp->th.tcp.h.seq = htonl(ntohl(eddp->th.tcp.h.seq) + data_len);
463	}
464}
465
466static int
467qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
468			   struct sk_buff *skb, struct qeth_hdr *qhdr)
469{
470	struct qeth_eddp_data *eddp = NULL;
471
472	QETH_DBF_TEXT(trace, 5, "eddpficx");
473	/* create our segmentation headers and copy original headers */
474	if (skb->protocol == htons(ETH_P_IP))
475		eddp = qeth_eddp_create_eddp_data(qhdr,
476						  skb_network_header(skb),
477						  ip_hdrlen(skb),
478						  skb_transport_header(skb),
479						  tcp_hdrlen(skb));
480	else
481		eddp = qeth_eddp_create_eddp_data(qhdr,
482						  skb_network_header(skb),
483						  sizeof(struct ipv6hdr),
484						  skb_transport_header(skb),
485						  tcp_hdrlen(skb));
486
487	if (eddp == NULL) {
488		QETH_DBF_TEXT(trace, 2, "eddpfcnm");
489		return -ENOMEM;
490	}
491	if (qhdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
492		skb_set_mac_header(skb, sizeof(struct qeth_hdr));
493		memcpy(&eddp->mac, eth_hdr(skb), ETH_HLEN);
494#ifdef CONFIG_QETH_VLAN
495		if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) {
496			eddp->vlan[0] = skb->protocol;
497			eddp->vlan[1] = htons(vlan_tx_tag_get(skb));
498		}
499#endif /* CONFIG_QETH_VLAN */
500	}
501	/* the next flags will only be set on the last segment */
502	eddp->th.tcp.h.fin = 0;
503	eddp->th.tcp.h.psh = 0;
504	eddp->skb = skb;
505	/* begin segmentation and fill context */
506	__qeth_eddp_fill_context_tcp(ctx, eddp);
507	kfree(eddp);
508	return 0;
509}
510
511static void
512qeth_eddp_calc_num_pages(struct qeth_eddp_context *ctx, struct sk_buff *skb,
513			 int hdr_len)
514{
515	int skbs_per_page;
516
517	QETH_DBF_TEXT(trace, 5, "eddpcanp");
518	/* can we put multiple skbs in one page? */
519	skbs_per_page = PAGE_SIZE / (skb_shinfo(skb)->gso_size + hdr_len);
520	if (skbs_per_page > 1){
521		ctx->num_pages = (skb_shinfo(skb)->gso_segs + 1) /
522				 skbs_per_page + 1;
523		ctx->elements_per_skb = 1;
524	} else {
525		/* no -> how many elements per skb? */
526		ctx->elements_per_skb = (skb_shinfo(skb)->gso_size + hdr_len +
527				     PAGE_SIZE) >> PAGE_SHIFT;
528		ctx->num_pages = ctx->elements_per_skb *
529				 (skb_shinfo(skb)->gso_segs + 1);
530	}
531	ctx->num_elements = ctx->elements_per_skb *
532			    (skb_shinfo(skb)->gso_segs + 1);
533}
534
535static struct qeth_eddp_context *
536qeth_eddp_create_context_generic(struct qeth_card *card, struct sk_buff *skb,
537				 int hdr_len)
538{
539	struct qeth_eddp_context *ctx = NULL;
540	u8 *addr;
541	int i;
542
543	QETH_DBF_TEXT(trace, 5, "creddpcg");
544	/* create the context and allocate pages */
545	ctx = kzalloc(sizeof(struct qeth_eddp_context), GFP_ATOMIC);
546	if (ctx == NULL){
547		QETH_DBF_TEXT(trace, 2, "ceddpcn1");
548		return NULL;
549	}
550	ctx->type = QETH_LARGE_SEND_EDDP;
551	qeth_eddp_calc_num_pages(ctx, skb, hdr_len);
552	if (ctx->elements_per_skb > QETH_MAX_BUFFER_ELEMENTS(card)){
553		QETH_DBF_TEXT(trace, 2, "ceddpcis");
554		kfree(ctx);
555		return NULL;
556	}
557	ctx->pages = kcalloc(ctx->num_pages, sizeof(u8 *), GFP_ATOMIC);
558	if (ctx->pages == NULL){
559		QETH_DBF_TEXT(trace, 2, "ceddpcn2");
560		kfree(ctx);
561		return NULL;
562	}
563	for (i = 0; i < ctx->num_pages; ++i){
564		addr = (u8 *)__get_free_page(GFP_ATOMIC);
565		if (addr == NULL){
566			QETH_DBF_TEXT(trace, 2, "ceddpcn3");
567			ctx->num_pages = i;
568			qeth_eddp_free_context(ctx);
569			return NULL;
570		}
571		memset(addr, 0, PAGE_SIZE);
572		ctx->pages[i] = addr;
573	}
574	ctx->elements = kcalloc(ctx->num_elements,
575				sizeof(struct qeth_eddp_element), GFP_ATOMIC);
576	if (ctx->elements == NULL){
577		QETH_DBF_TEXT(trace, 2, "ceddpcn4");
578		qeth_eddp_free_context(ctx);
579		return NULL;
580	}
581	/* reset num_elements; will be incremented again in fill_buffer to
582	 * reflect number of actually used elements */
583	ctx->num_elements = 0;
584	return ctx;
585}
586
587static struct qeth_eddp_context *
588qeth_eddp_create_context_tcp(struct qeth_card *card, struct sk_buff *skb,
589			     struct qeth_hdr *qhdr)
590{
591	struct qeth_eddp_context *ctx = NULL;
592
593	QETH_DBF_TEXT(trace, 5, "creddpct");
594	if (skb->protocol == htons(ETH_P_IP))
595		ctx = qeth_eddp_create_context_generic(card, skb,
596						       (sizeof(struct qeth_hdr) +
597						        ip_hdrlen(skb) +
598							tcp_hdrlen(skb)));
599	else if (skb->protocol == htons(ETH_P_IPV6))
600		ctx = qeth_eddp_create_context_generic(card, skb,
601			sizeof(struct qeth_hdr) + sizeof(struct ipv6hdr) +
602			tcp_hdrlen(skb));
603	else
604		QETH_DBF_TEXT(trace, 2, "cetcpinv");
605
606	if (ctx == NULL) {
607		QETH_DBF_TEXT(trace, 2, "creddpnl");
608		return NULL;
609	}
610	if (qeth_eddp_fill_context_tcp(ctx, skb, qhdr)){
611		QETH_DBF_TEXT(trace, 2, "ceddptfe");
612		qeth_eddp_free_context(ctx);
613		return NULL;
614	}
615	atomic_set(&ctx->refcnt, 1);
616	return ctx;
617}
618
619struct qeth_eddp_context *
620qeth_eddp_create_context(struct qeth_card *card, struct sk_buff *skb,
621			 struct qeth_hdr *qhdr, unsigned char sk_protocol)
622{
623	QETH_DBF_TEXT(trace, 5, "creddpc");
624	switch (sk_protocol) {
625	case IPPROTO_TCP:
626		return qeth_eddp_create_context_tcp(card, skb, qhdr);
627	default:
628		QETH_DBF_TEXT(trace, 2, "eddpinvp");
629	}
630	return NULL;
631}
632