1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 *
4 * Author	Karsten Keil <kkeil@novell.com>
5 *
6 * Copyright 2008  by Karsten Keil <kkeil@novell.com>
7 */
8
9#include <linux/gfp.h>
10#include <linux/module.h>
11#include <linux/mISDNhw.h>
12
13static void
14dchannel_bh(struct work_struct *ws)
15{
16	struct dchannel	*dch  = container_of(ws, struct dchannel, workq);
17	struct sk_buff	*skb;
18	int		err;
19
20	if (test_and_clear_bit(FLG_RECVQUEUE, &dch->Flags)) {
21		while ((skb = skb_dequeue(&dch->rqueue))) {
22			if (likely(dch->dev.D.peer)) {
23				err = dch->dev.D.recv(dch->dev.D.peer, skb);
24				if (err)
25					dev_kfree_skb(skb);
26			} else
27				dev_kfree_skb(skb);
28		}
29	}
30	if (test_and_clear_bit(FLG_PHCHANGE, &dch->Flags)) {
31		if (dch->phfunc)
32			dch->phfunc(dch);
33	}
34}
35
36static void
37bchannel_bh(struct work_struct *ws)
38{
39	struct bchannel	*bch  = container_of(ws, struct bchannel, workq);
40	struct sk_buff	*skb;
41	int		err;
42
43	if (test_and_clear_bit(FLG_RECVQUEUE, &bch->Flags)) {
44		while ((skb = skb_dequeue(&bch->rqueue))) {
45			bch->rcount--;
46			if (likely(bch->ch.peer)) {
47				err = bch->ch.recv(bch->ch.peer, skb);
48				if (err)
49					dev_kfree_skb(skb);
50			} else
51				dev_kfree_skb(skb);
52		}
53	}
54}
55
56int
57mISDN_initdchannel(struct dchannel *ch, int maxlen, void *phf)
58{
59	test_and_set_bit(FLG_HDLC, &ch->Flags);
60	ch->maxlen = maxlen;
61	ch->hw = NULL;
62	ch->rx_skb = NULL;
63	ch->tx_skb = NULL;
64	ch->tx_idx = 0;
65	ch->phfunc = phf;
66	skb_queue_head_init(&ch->squeue);
67	skb_queue_head_init(&ch->rqueue);
68	INIT_LIST_HEAD(&ch->dev.bchannels);
69	INIT_WORK(&ch->workq, dchannel_bh);
70	return 0;
71}
72EXPORT_SYMBOL(mISDN_initdchannel);
73
74int
75mISDN_initbchannel(struct bchannel *ch, unsigned short maxlen,
76		   unsigned short minlen)
77{
78	ch->Flags = 0;
79	ch->minlen = minlen;
80	ch->next_minlen = minlen;
81	ch->init_minlen = minlen;
82	ch->maxlen = maxlen;
83	ch->next_maxlen = maxlen;
84	ch->init_maxlen = maxlen;
85	ch->hw = NULL;
86	ch->rx_skb = NULL;
87	ch->tx_skb = NULL;
88	ch->tx_idx = 0;
89	skb_queue_head_init(&ch->rqueue);
90	ch->rcount = 0;
91	ch->next_skb = NULL;
92	INIT_WORK(&ch->workq, bchannel_bh);
93	return 0;
94}
95EXPORT_SYMBOL(mISDN_initbchannel);
96
97int
98mISDN_freedchannel(struct dchannel *ch)
99{
100	if (ch->tx_skb) {
101		dev_kfree_skb(ch->tx_skb);
102		ch->tx_skb = NULL;
103	}
104	if (ch->rx_skb) {
105		dev_kfree_skb(ch->rx_skb);
106		ch->rx_skb = NULL;
107	}
108	skb_queue_purge(&ch->squeue);
109	skb_queue_purge(&ch->rqueue);
110	flush_work(&ch->workq);
111	return 0;
112}
113EXPORT_SYMBOL(mISDN_freedchannel);
114
115void
116mISDN_clear_bchannel(struct bchannel *ch)
117{
118	if (ch->tx_skb) {
119		dev_kfree_skb(ch->tx_skb);
120		ch->tx_skb = NULL;
121	}
122	ch->tx_idx = 0;
123	if (ch->rx_skb) {
124		dev_kfree_skb(ch->rx_skb);
125		ch->rx_skb = NULL;
126	}
127	if (ch->next_skb) {
128		dev_kfree_skb(ch->next_skb);
129		ch->next_skb = NULL;
130	}
131	test_and_clear_bit(FLG_TX_BUSY, &ch->Flags);
132	test_and_clear_bit(FLG_TX_NEXT, &ch->Flags);
133	test_and_clear_bit(FLG_ACTIVE, &ch->Flags);
134	test_and_clear_bit(FLG_FILLEMPTY, &ch->Flags);
135	test_and_clear_bit(FLG_TX_EMPTY, &ch->Flags);
136	test_and_clear_bit(FLG_RX_OFF, &ch->Flags);
137	ch->dropcnt = 0;
138	ch->minlen = ch->init_minlen;
139	ch->next_minlen = ch->init_minlen;
140	ch->maxlen = ch->init_maxlen;
141	ch->next_maxlen = ch->init_maxlen;
142	skb_queue_purge(&ch->rqueue);
143	ch->rcount = 0;
144}
145EXPORT_SYMBOL(mISDN_clear_bchannel);
146
147void
148mISDN_freebchannel(struct bchannel *ch)
149{
150	cancel_work_sync(&ch->workq);
151	mISDN_clear_bchannel(ch);
152}
153EXPORT_SYMBOL(mISDN_freebchannel);
154
155int
156mISDN_ctrl_bchannel(struct bchannel *bch, struct mISDN_ctrl_req *cq)
157{
158	int ret = 0;
159
160	switch (cq->op) {
161	case MISDN_CTRL_GETOP:
162		cq->op = MISDN_CTRL_RX_BUFFER | MISDN_CTRL_FILL_EMPTY |
163			 MISDN_CTRL_RX_OFF;
164		break;
165	case MISDN_CTRL_FILL_EMPTY:
166		if (cq->p1) {
167			memset(bch->fill, cq->p2 & 0xff, MISDN_BCH_FILL_SIZE);
168			test_and_set_bit(FLG_FILLEMPTY, &bch->Flags);
169		} else {
170			test_and_clear_bit(FLG_FILLEMPTY, &bch->Flags);
171		}
172		break;
173	case MISDN_CTRL_RX_OFF:
174		/* read back dropped byte count */
175		cq->p2 = bch->dropcnt;
176		if (cq->p1)
177			test_and_set_bit(FLG_RX_OFF, &bch->Flags);
178		else
179			test_and_clear_bit(FLG_RX_OFF, &bch->Flags);
180		bch->dropcnt = 0;
181		break;
182	case MISDN_CTRL_RX_BUFFER:
183		if (cq->p2 > MISDN_CTRL_RX_SIZE_IGNORE)
184			bch->next_maxlen = cq->p2;
185		if (cq->p1 > MISDN_CTRL_RX_SIZE_IGNORE)
186			bch->next_minlen = cq->p1;
187		/* we return the old values */
188		cq->p1 = bch->minlen;
189		cq->p2 = bch->maxlen;
190		break;
191	default:
192		pr_info("mISDN unhandled control %x operation\n", cq->op);
193		ret = -EINVAL;
194		break;
195	}
196	return ret;
197}
198EXPORT_SYMBOL(mISDN_ctrl_bchannel);
199
200static inline u_int
201get_sapi_tei(u_char *p)
202{
203	u_int	sapi, tei;
204
205	sapi = *p >> 2;
206	tei = p[1] >> 1;
207	return sapi | (tei << 8);
208}
209
210void
211recv_Dchannel(struct dchannel *dch)
212{
213	struct mISDNhead *hh;
214
215	if (dch->rx_skb->len < 2) { /* at least 2 for sapi / tei */
216		dev_kfree_skb(dch->rx_skb);
217		dch->rx_skb = NULL;
218		return;
219	}
220	hh = mISDN_HEAD_P(dch->rx_skb);
221	hh->prim = PH_DATA_IND;
222	hh->id = get_sapi_tei(dch->rx_skb->data);
223	skb_queue_tail(&dch->rqueue, dch->rx_skb);
224	dch->rx_skb = NULL;
225	schedule_event(dch, FLG_RECVQUEUE);
226}
227EXPORT_SYMBOL(recv_Dchannel);
228
229void
230recv_Echannel(struct dchannel *ech, struct dchannel *dch)
231{
232	struct mISDNhead *hh;
233
234	if (ech->rx_skb->len < 2) { /* at least 2 for sapi / tei */
235		dev_kfree_skb(ech->rx_skb);
236		ech->rx_skb = NULL;
237		return;
238	}
239	hh = mISDN_HEAD_P(ech->rx_skb);
240	hh->prim = PH_DATA_E_IND;
241	hh->id = get_sapi_tei(ech->rx_skb->data);
242	skb_queue_tail(&dch->rqueue, ech->rx_skb);
243	ech->rx_skb = NULL;
244	schedule_event(dch, FLG_RECVQUEUE);
245}
246EXPORT_SYMBOL(recv_Echannel);
247
248void
249recv_Bchannel(struct bchannel *bch, unsigned int id, bool force)
250{
251	struct mISDNhead *hh;
252
253	/* if allocation did fail upper functions still may call us */
254	if (unlikely(!bch->rx_skb))
255		return;
256	if (unlikely(!bch->rx_skb->len)) {
257		/* we have no data to send - this may happen after recovery
258		 * from overflow or too small allocation.
259		 * We need to free the buffer here */
260		dev_kfree_skb(bch->rx_skb);
261		bch->rx_skb = NULL;
262	} else {
263		if (test_bit(FLG_TRANSPARENT, &bch->Flags) &&
264		    (bch->rx_skb->len < bch->minlen) && !force)
265				return;
266		hh = mISDN_HEAD_P(bch->rx_skb);
267		hh->prim = PH_DATA_IND;
268		hh->id = id;
269		if (bch->rcount >= 64) {
270			printk(KERN_WARNING
271			       "B%d receive queue overflow - flushing!\n",
272			       bch->nr);
273			skb_queue_purge(&bch->rqueue);
274		}
275		bch->rcount++;
276		skb_queue_tail(&bch->rqueue, bch->rx_skb);
277		bch->rx_skb = NULL;
278		schedule_event(bch, FLG_RECVQUEUE);
279	}
280}
281EXPORT_SYMBOL(recv_Bchannel);
282
283void
284recv_Dchannel_skb(struct dchannel *dch, struct sk_buff *skb)
285{
286	skb_queue_tail(&dch->rqueue, skb);
287	schedule_event(dch, FLG_RECVQUEUE);
288}
289EXPORT_SYMBOL(recv_Dchannel_skb);
290
291void
292recv_Bchannel_skb(struct bchannel *bch, struct sk_buff *skb)
293{
294	if (bch->rcount >= 64) {
295		printk(KERN_WARNING "B-channel %p receive queue overflow, "
296		       "flushing!\n", bch);
297		skb_queue_purge(&bch->rqueue);
298		bch->rcount = 0;
299	}
300	bch->rcount++;
301	skb_queue_tail(&bch->rqueue, skb);
302	schedule_event(bch, FLG_RECVQUEUE);
303}
304EXPORT_SYMBOL(recv_Bchannel_skb);
305
306static void
307confirm_Dsend(struct dchannel *dch)
308{
309	struct sk_buff	*skb;
310
311	skb = _alloc_mISDN_skb(PH_DATA_CNF, mISDN_HEAD_ID(dch->tx_skb),
312			       0, NULL, GFP_ATOMIC);
313	if (!skb) {
314		printk(KERN_ERR "%s: no skb id %x\n", __func__,
315		       mISDN_HEAD_ID(dch->tx_skb));
316		return;
317	}
318	skb_queue_tail(&dch->rqueue, skb);
319	schedule_event(dch, FLG_RECVQUEUE);
320}
321
322int
323get_next_dframe(struct dchannel *dch)
324{
325	dch->tx_idx = 0;
326	dch->tx_skb = skb_dequeue(&dch->squeue);
327	if (dch->tx_skb) {
328		confirm_Dsend(dch);
329		return 1;
330	}
331	dch->tx_skb = NULL;
332	test_and_clear_bit(FLG_TX_BUSY, &dch->Flags);
333	return 0;
334}
335EXPORT_SYMBOL(get_next_dframe);
336
337static void
338confirm_Bsend(struct bchannel *bch)
339{
340	struct sk_buff	*skb;
341
342	if (bch->rcount >= 64) {
343		printk(KERN_WARNING "B-channel %p receive queue overflow, "
344		       "flushing!\n", bch);
345		skb_queue_purge(&bch->rqueue);
346		bch->rcount = 0;
347	}
348	skb = _alloc_mISDN_skb(PH_DATA_CNF, mISDN_HEAD_ID(bch->tx_skb),
349			       0, NULL, GFP_ATOMIC);
350	if (!skb) {
351		printk(KERN_ERR "%s: no skb id %x\n", __func__,
352		       mISDN_HEAD_ID(bch->tx_skb));
353		return;
354	}
355	bch->rcount++;
356	skb_queue_tail(&bch->rqueue, skb);
357	schedule_event(bch, FLG_RECVQUEUE);
358}
359
360int
361get_next_bframe(struct bchannel *bch)
362{
363	bch->tx_idx = 0;
364	if (test_bit(FLG_TX_NEXT, &bch->Flags)) {
365		bch->tx_skb = bch->next_skb;
366		if (bch->tx_skb) {
367			bch->next_skb = NULL;
368			test_and_clear_bit(FLG_TX_NEXT, &bch->Flags);
369			/* confirm imediately to allow next data */
370			confirm_Bsend(bch);
371			return 1;
372		} else {
373			test_and_clear_bit(FLG_TX_NEXT, &bch->Flags);
374			printk(KERN_WARNING "B TX_NEXT without skb\n");
375		}
376	}
377	bch->tx_skb = NULL;
378	test_and_clear_bit(FLG_TX_BUSY, &bch->Flags);
379	return 0;
380}
381EXPORT_SYMBOL(get_next_bframe);
382
383void
384queue_ch_frame(struct mISDNchannel *ch, u_int pr, int id, struct sk_buff *skb)
385{
386	struct mISDNhead *hh;
387
388	if (!skb) {
389		_queue_data(ch, pr, id, 0, NULL, GFP_ATOMIC);
390	} else {
391		if (ch->peer) {
392			hh = mISDN_HEAD_P(skb);
393			hh->prim = pr;
394			hh->id = id;
395			if (!ch->recv(ch->peer, skb))
396				return;
397		}
398		dev_kfree_skb(skb);
399	}
400}
401EXPORT_SYMBOL(queue_ch_frame);
402
403int
404dchannel_senddata(struct dchannel *ch, struct sk_buff *skb)
405{
406	/* check oversize */
407	if (skb->len <= 0) {
408		printk(KERN_WARNING "%s: skb too small\n", __func__);
409		return -EINVAL;
410	}
411	if (skb->len > ch->maxlen) {
412		printk(KERN_WARNING "%s: skb too large(%d/%d)\n",
413		       __func__, skb->len, ch->maxlen);
414		return -EINVAL;
415	}
416	/* HW lock must be obtained */
417	if (test_and_set_bit(FLG_TX_BUSY, &ch->Flags)) {
418		skb_queue_tail(&ch->squeue, skb);
419		return 0;
420	} else {
421		/* write to fifo */
422		ch->tx_skb = skb;
423		ch->tx_idx = 0;
424		return 1;
425	}
426}
427EXPORT_SYMBOL(dchannel_senddata);
428
429int
430bchannel_senddata(struct bchannel *ch, struct sk_buff *skb)
431{
432
433	/* check oversize */
434	if (skb->len <= 0) {
435		printk(KERN_WARNING "%s: skb too small\n", __func__);
436		return -EINVAL;
437	}
438	if (skb->len > ch->maxlen) {
439		printk(KERN_WARNING "%s: skb too large(%d/%d)\n",
440		       __func__, skb->len, ch->maxlen);
441		return -EINVAL;
442	}
443	/* HW lock must be obtained */
444	/* check for pending next_skb */
445	if (ch->next_skb) {
446		printk(KERN_WARNING
447		       "%s: next_skb exist ERROR (skb->len=%d next_skb->len=%d)\n",
448		       __func__, skb->len, ch->next_skb->len);
449		return -EBUSY;
450	}
451	if (test_and_set_bit(FLG_TX_BUSY, &ch->Flags)) {
452		test_and_set_bit(FLG_TX_NEXT, &ch->Flags);
453		ch->next_skb = skb;
454		return 0;
455	} else {
456		/* write to fifo */
457		ch->tx_skb = skb;
458		ch->tx_idx = 0;
459		confirm_Bsend(ch);
460		return 1;
461	}
462}
463EXPORT_SYMBOL(bchannel_senddata);
464
465/* The function allocates a new receive skb on demand with a size for the
466 * requirements of the current protocol. It returns the tailroom of the
467 * receive skb or an error.
468 */
469int
470bchannel_get_rxbuf(struct bchannel *bch, int reqlen)
471{
472	int len;
473
474	if (bch->rx_skb) {
475		len = skb_tailroom(bch->rx_skb);
476		if (len < reqlen) {
477			pr_warn("B%d no space for %d (only %d) bytes\n",
478				bch->nr, reqlen, len);
479			if (test_bit(FLG_TRANSPARENT, &bch->Flags)) {
480				/* send what we have now and try a new buffer */
481				recv_Bchannel(bch, 0, true);
482			} else {
483				/* on HDLC we have to drop too big frames */
484				return -EMSGSIZE;
485			}
486		} else {
487			return len;
488		}
489	}
490	/* update current min/max length first */
491	if (unlikely(bch->maxlen != bch->next_maxlen))
492		bch->maxlen = bch->next_maxlen;
493	if (unlikely(bch->minlen != bch->next_minlen))
494		bch->minlen = bch->next_minlen;
495	if (unlikely(reqlen > bch->maxlen))
496		return -EMSGSIZE;
497	if (test_bit(FLG_TRANSPARENT, &bch->Flags)) {
498		if (reqlen >= bch->minlen) {
499			len = reqlen;
500		} else {
501			len = 2 * bch->minlen;
502			if (len > bch->maxlen)
503				len = bch->maxlen;
504		}
505	} else {
506		/* with HDLC we do not know the length yet */
507		len = bch->maxlen;
508	}
509	bch->rx_skb = mI_alloc_skb(len, GFP_ATOMIC);
510	if (!bch->rx_skb) {
511		pr_warn("B%d receive no memory for %d bytes\n", bch->nr, len);
512		len = -ENOMEM;
513	}
514	return len;
515}
516EXPORT_SYMBOL(bchannel_get_rxbuf);
517