1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3
4  Broadcom B43legacy wireless driver
5
6  PIO Transmission
7
8  Copyright (c) 2005 Michael Buesch <m@bues.ch>
9
10
11*/
12
13#include "b43legacy.h"
14#include "pio.h"
15#include "main.h"
16#include "xmit.h"
17
18#include <linux/delay.h>
19#include <linux/slab.h>
20
21
22static void tx_start(struct b43legacy_pioqueue *queue)
23{
24	b43legacy_pio_write(queue, B43legacy_PIO_TXCTL,
25			    B43legacy_PIO_TXCTL_INIT);
26}
27
28static void tx_octet(struct b43legacy_pioqueue *queue,
29		     u8 octet)
30{
31	if (queue->need_workarounds) {
32		b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, octet);
33		b43legacy_pio_write(queue, B43legacy_PIO_TXCTL,
34				    B43legacy_PIO_TXCTL_WRITELO);
35	} else {
36		b43legacy_pio_write(queue, B43legacy_PIO_TXCTL,
37				    B43legacy_PIO_TXCTL_WRITELO);
38		b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, octet);
39	}
40}
41
42static u16 tx_get_next_word(const u8 *txhdr,
43			    const u8 *packet,
44			    size_t txhdr_size,
45			    unsigned int *pos)
46{
47	const u8 *source;
48	unsigned int i = *pos;
49	u16 ret;
50
51	if (i < txhdr_size)
52		source = txhdr;
53	else {
54		source = packet;
55		i -= txhdr_size;
56	}
57	ret = le16_to_cpu(*((__le16 *)(source + i)));
58	*pos += 2;
59
60	return ret;
61}
62
63static void tx_data(struct b43legacy_pioqueue *queue,
64		    u8 *txhdr,
65		    const u8 *packet,
66		    unsigned int octets)
67{
68	u16 data;
69	unsigned int i = 0;
70
71	if (queue->need_workarounds) {
72		data = tx_get_next_word(txhdr, packet,
73					sizeof(struct b43legacy_txhdr_fw3), &i);
74		b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, data);
75	}
76	b43legacy_pio_write(queue, B43legacy_PIO_TXCTL,
77			    B43legacy_PIO_TXCTL_WRITELO |
78			    B43legacy_PIO_TXCTL_WRITEHI);
79	while (i < octets - 1) {
80		data = tx_get_next_word(txhdr, packet,
81					sizeof(struct b43legacy_txhdr_fw3), &i);
82		b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, data);
83	}
84	if (octets % 2)
85		tx_octet(queue, packet[octets -
86			 sizeof(struct b43legacy_txhdr_fw3) - 1]);
87}
88
89static void tx_complete(struct b43legacy_pioqueue *queue,
90			struct sk_buff *skb)
91{
92	if (queue->need_workarounds) {
93		b43legacy_pio_write(queue, B43legacy_PIO_TXDATA,
94				    skb->data[skb->len - 1]);
95		b43legacy_pio_write(queue, B43legacy_PIO_TXCTL,
96				    B43legacy_PIO_TXCTL_WRITELO |
97				    B43legacy_PIO_TXCTL_COMPLETE);
98	} else
99		b43legacy_pio_write(queue, B43legacy_PIO_TXCTL,
100				    B43legacy_PIO_TXCTL_COMPLETE);
101}
102
103static u16 generate_cookie(struct b43legacy_pioqueue *queue,
104			   struct b43legacy_pio_txpacket *packet)
105{
106	u16 cookie = 0x0000;
107	int packetindex;
108
109	/* We use the upper 4 bits for the PIO
110	 * controller ID and the lower 12 bits
111	 * for the packet index (in the cache).
112	 */
113	switch (queue->mmio_base) {
114	case B43legacy_MMIO_PIO1_BASE:
115		break;
116	case B43legacy_MMIO_PIO2_BASE:
117		cookie = 0x1000;
118		break;
119	case B43legacy_MMIO_PIO3_BASE:
120		cookie = 0x2000;
121		break;
122	case B43legacy_MMIO_PIO4_BASE:
123		cookie = 0x3000;
124		break;
125	default:
126		B43legacy_WARN_ON(1);
127	}
128	packetindex = pio_txpacket_getindex(packet);
129	B43legacy_WARN_ON(!(((u16)packetindex & 0xF000) == 0x0000));
130	cookie |= (u16)packetindex;
131
132	return cookie;
133}
134
135static
136struct b43legacy_pioqueue *parse_cookie(struct b43legacy_wldev *dev,
137					u16 cookie,
138					struct b43legacy_pio_txpacket **packet)
139{
140	struct b43legacy_pio *pio = &dev->pio;
141	struct b43legacy_pioqueue *queue = NULL;
142	int packetindex;
143
144	switch (cookie & 0xF000) {
145	case 0x0000:
146		queue = pio->queue0;
147		break;
148	case 0x1000:
149		queue = pio->queue1;
150		break;
151	case 0x2000:
152		queue = pio->queue2;
153		break;
154	case 0x3000:
155		queue = pio->queue3;
156		break;
157	default:
158		B43legacy_WARN_ON(1);
159	}
160	packetindex = (cookie & 0x0FFF);
161	B43legacy_WARN_ON(!(packetindex >= 0 && packetindex
162			  < B43legacy_PIO_MAXTXPACKETS));
163	*packet = &(queue->tx_packets_cache[packetindex]);
164
165	return queue;
166}
167
168union txhdr_union {
169	struct b43legacy_txhdr_fw3 txhdr_fw3;
170};
171
172static int pio_tx_write_fragment(struct b43legacy_pioqueue *queue,
173				  struct sk_buff *skb,
174				  struct b43legacy_pio_txpacket *packet,
175				  size_t txhdr_size)
176{
177	union txhdr_union txhdr_data;
178	u8 *txhdr = NULL;
179	unsigned int octets;
180	int err;
181
182	txhdr = (u8 *)(&txhdr_data.txhdr_fw3);
183
184	B43legacy_WARN_ON(skb_shinfo(skb)->nr_frags != 0);
185	err = b43legacy_generate_txhdr(queue->dev,
186				 txhdr, skb->data, skb->len,
187				 IEEE80211_SKB_CB(skb),
188				 generate_cookie(queue, packet));
189	if (err)
190		return err;
191
192	tx_start(queue);
193	octets = skb->len + txhdr_size;
194	if (queue->need_workarounds)
195		octets--;
196	tx_data(queue, txhdr, (u8 *)skb->data, octets);
197	tx_complete(queue, skb);
198
199	return 0;
200}
201
202static void free_txpacket(struct b43legacy_pio_txpacket *packet,
203			  int irq_context)
204{
205	struct b43legacy_pioqueue *queue = packet->queue;
206
207	if (packet->skb) {
208		if (irq_context)
209			dev_kfree_skb_irq(packet->skb);
210		else
211			dev_kfree_skb(packet->skb);
212	}
213	list_move(&packet->list, &queue->txfree);
214	queue->nr_txfree++;
215}
216
217static int pio_tx_packet(struct b43legacy_pio_txpacket *packet)
218{
219	struct b43legacy_pioqueue *queue = packet->queue;
220	struct sk_buff *skb = packet->skb;
221	u16 octets;
222	int err;
223
224	octets = (u16)skb->len + sizeof(struct b43legacy_txhdr_fw3);
225	if (queue->tx_devq_size < octets) {
226		b43legacywarn(queue->dev->wl, "PIO queue too small. "
227			"Dropping packet.\n");
228		/* Drop it silently (return success) */
229		free_txpacket(packet, 1);
230		return 0;
231	}
232	B43legacy_WARN_ON(queue->tx_devq_packets >
233			  B43legacy_PIO_MAXTXDEVQPACKETS);
234	B43legacy_WARN_ON(queue->tx_devq_used > queue->tx_devq_size);
235	/* Check if there is sufficient free space on the device
236	 * TX queue. If not, return and let the TX tasklet
237	 * retry later.
238	 */
239	if (queue->tx_devq_packets == B43legacy_PIO_MAXTXDEVQPACKETS)
240		return -EBUSY;
241	if (queue->tx_devq_used + octets > queue->tx_devq_size)
242		return -EBUSY;
243	/* Now poke the device. */
244	err = pio_tx_write_fragment(queue, skb, packet,
245			      sizeof(struct b43legacy_txhdr_fw3));
246	if (unlikely(err == -ENOKEY)) {
247		/* Drop this packet, as we don't have the encryption key
248		 * anymore and must not transmit it unencrypted. */
249		free_txpacket(packet, 1);
250		return 0;
251	}
252
253	/* Account for the packet size.
254	 * (We must not overflow the device TX queue)
255	 */
256	queue->tx_devq_packets++;
257	queue->tx_devq_used += octets;
258
259	/* Transmission started, everything ok, move the
260	 * packet to the txrunning list.
261	 */
262	list_move_tail(&packet->list, &queue->txrunning);
263
264	return 0;
265}
266
267static void tx_tasklet(struct tasklet_struct *t)
268{
269	struct b43legacy_pioqueue *queue = from_tasklet(queue, t, txtask);
270	struct b43legacy_wldev *dev = queue->dev;
271	unsigned long flags;
272	struct b43legacy_pio_txpacket *packet, *tmp_packet;
273	int err;
274	u16 txctl;
275
276	spin_lock_irqsave(&dev->wl->irq_lock, flags);
277	if (queue->tx_frozen)
278		goto out_unlock;
279	txctl = b43legacy_pio_read(queue, B43legacy_PIO_TXCTL);
280	if (txctl & B43legacy_PIO_TXCTL_SUSPEND)
281		goto out_unlock;
282
283	list_for_each_entry_safe(packet, tmp_packet, &queue->txqueue, list) {
284		/* Try to transmit the packet. This can fail, if
285		 * the device queue is full. In case of failure, the
286		 * packet is left in the txqueue.
287		 * If transmission succeed, the packet is moved to txrunning.
288		 * If it is impossible to transmit the packet, it
289		 * is dropped.
290		 */
291		err = pio_tx_packet(packet);
292		if (err)
293			break;
294	}
295out_unlock:
296	spin_unlock_irqrestore(&dev->wl->irq_lock, flags);
297}
298
299static void setup_txqueues(struct b43legacy_pioqueue *queue)
300{
301	struct b43legacy_pio_txpacket *packet;
302	int i;
303
304	queue->nr_txfree = B43legacy_PIO_MAXTXPACKETS;
305	for (i = 0; i < B43legacy_PIO_MAXTXPACKETS; i++) {
306		packet = &(queue->tx_packets_cache[i]);
307
308		packet->queue = queue;
309		INIT_LIST_HEAD(&packet->list);
310
311		list_add(&packet->list, &queue->txfree);
312	}
313}
314
315static
316struct b43legacy_pioqueue *b43legacy_setup_pioqueue(struct b43legacy_wldev *dev,
317						    u16 pio_mmio_base)
318{
319	struct b43legacy_pioqueue *queue;
320	u32 value;
321	u16 qsize;
322
323	queue = kzalloc(sizeof(*queue), GFP_KERNEL);
324	if (!queue)
325		goto out;
326
327	queue->dev = dev;
328	queue->mmio_base = pio_mmio_base;
329	queue->need_workarounds = (dev->dev->id.revision < 3);
330
331	INIT_LIST_HEAD(&queue->txfree);
332	INIT_LIST_HEAD(&queue->txqueue);
333	INIT_LIST_HEAD(&queue->txrunning);
334	tasklet_setup(&queue->txtask, tx_tasklet);
335
336	value = b43legacy_read32(dev, B43legacy_MMIO_MACCTL);
337	value &= ~B43legacy_MACCTL_BE;
338	b43legacy_write32(dev, B43legacy_MMIO_MACCTL, value);
339
340	qsize = b43legacy_read16(dev, queue->mmio_base
341				 + B43legacy_PIO_TXQBUFSIZE);
342	if (qsize == 0) {
343		b43legacyerr(dev->wl, "This card does not support PIO "
344		       "operation mode. Please use DMA mode "
345		       "(module parameter pio=0).\n");
346		goto err_freequeue;
347	}
348	if (qsize <= B43legacy_PIO_TXQADJUST) {
349		b43legacyerr(dev->wl, "PIO tx device-queue too small (%u)\n",
350		       qsize);
351		goto err_freequeue;
352	}
353	qsize -= B43legacy_PIO_TXQADJUST;
354	queue->tx_devq_size = qsize;
355
356	setup_txqueues(queue);
357
358out:
359	return queue;
360
361err_freequeue:
362	kfree(queue);
363	queue = NULL;
364	goto out;
365}
366
367static void cancel_transfers(struct b43legacy_pioqueue *queue)
368{
369	struct b43legacy_pio_txpacket *packet, *tmp_packet;
370
371	tasklet_kill(&queue->txtask);
372
373	list_for_each_entry_safe(packet, tmp_packet, &queue->txrunning, list)
374		free_txpacket(packet, 0);
375	list_for_each_entry_safe(packet, tmp_packet, &queue->txqueue, list)
376		free_txpacket(packet, 0);
377}
378
379static void b43legacy_destroy_pioqueue(struct b43legacy_pioqueue *queue)
380{
381	if (!queue)
382		return;
383
384	cancel_transfers(queue);
385	kfree(queue);
386}
387
388void b43legacy_pio_free(struct b43legacy_wldev *dev)
389{
390	struct b43legacy_pio *pio;
391
392	if (!b43legacy_using_pio(dev))
393		return;
394	pio = &dev->pio;
395
396	b43legacy_destroy_pioqueue(pio->queue3);
397	pio->queue3 = NULL;
398	b43legacy_destroy_pioqueue(pio->queue2);
399	pio->queue2 = NULL;
400	b43legacy_destroy_pioqueue(pio->queue1);
401	pio->queue1 = NULL;
402	b43legacy_destroy_pioqueue(pio->queue0);
403	pio->queue0 = NULL;
404}
405
406int b43legacy_pio_init(struct b43legacy_wldev *dev)
407{
408	struct b43legacy_pio *pio = &dev->pio;
409	struct b43legacy_pioqueue *queue;
410	int err = -ENOMEM;
411
412	queue = b43legacy_setup_pioqueue(dev, B43legacy_MMIO_PIO1_BASE);
413	if (!queue)
414		goto out;
415	pio->queue0 = queue;
416
417	queue = b43legacy_setup_pioqueue(dev, B43legacy_MMIO_PIO2_BASE);
418	if (!queue)
419		goto err_destroy0;
420	pio->queue1 = queue;
421
422	queue = b43legacy_setup_pioqueue(dev, B43legacy_MMIO_PIO3_BASE);
423	if (!queue)
424		goto err_destroy1;
425	pio->queue2 = queue;
426
427	queue = b43legacy_setup_pioqueue(dev, B43legacy_MMIO_PIO4_BASE);
428	if (!queue)
429		goto err_destroy2;
430	pio->queue3 = queue;
431
432	if (dev->dev->id.revision < 3)
433		dev->irq_mask |= B43legacy_IRQ_PIO_WORKAROUND;
434
435	b43legacydbg(dev->wl, "PIO initialized\n");
436	err = 0;
437out:
438	return err;
439
440err_destroy2:
441	b43legacy_destroy_pioqueue(pio->queue2);
442	pio->queue2 = NULL;
443err_destroy1:
444	b43legacy_destroy_pioqueue(pio->queue1);
445	pio->queue1 = NULL;
446err_destroy0:
447	b43legacy_destroy_pioqueue(pio->queue0);
448	pio->queue0 = NULL;
449	goto out;
450}
451
452int b43legacy_pio_tx(struct b43legacy_wldev *dev,
453		     struct sk_buff *skb)
454{
455	struct b43legacy_pioqueue *queue = dev->pio.queue1;
456	struct b43legacy_pio_txpacket *packet;
457
458	B43legacy_WARN_ON(queue->tx_suspended);
459	B43legacy_WARN_ON(list_empty(&queue->txfree));
460
461	packet = list_entry(queue->txfree.next, struct b43legacy_pio_txpacket,
462			    list);
463	packet->skb = skb;
464
465	list_move_tail(&packet->list, &queue->txqueue);
466	queue->nr_txfree--;
467	B43legacy_WARN_ON(queue->nr_txfree >= B43legacy_PIO_MAXTXPACKETS);
468
469	tasklet_schedule(&queue->txtask);
470
471	return 0;
472}
473
474void b43legacy_pio_handle_txstatus(struct b43legacy_wldev *dev,
475				   const struct b43legacy_txstatus *status)
476{
477	struct b43legacy_pioqueue *queue;
478	struct b43legacy_pio_txpacket *packet;
479	struct ieee80211_tx_info *info;
480	int retry_limit;
481
482	queue = parse_cookie(dev, status->cookie, &packet);
483	B43legacy_WARN_ON(!queue);
484
485	if (!packet->skb)
486		return;
487
488	queue->tx_devq_packets--;
489	queue->tx_devq_used -= (packet->skb->len +
490				sizeof(struct b43legacy_txhdr_fw3));
491
492	info = IEEE80211_SKB_CB(packet->skb);
493
494	/* preserve the confiured retry limit before clearing the status
495	 * The xmit function has overwritten the rc's value with the actual
496	 * retry limit done by the hardware */
497	retry_limit = info->status.rates[0].count;
498	ieee80211_tx_info_clear_status(info);
499
500	if (status->acked)
501		info->flags |= IEEE80211_TX_STAT_ACK;
502
503	if (status->rts_count > dev->wl->hw->conf.short_frame_max_tx_count) {
504		/*
505		 * If the short retries (RTS, not data frame) have exceeded
506		 * the limit, the hw will not have tried the selected rate,
507		 * but will have used the fallback rate instead.
508		 * Don't let the rate control count attempts for the selected
509		 * rate in this case, otherwise the statistics will be off.
510		 */
511		info->status.rates[0].count = 0;
512		info->status.rates[1].count = status->frame_count;
513	} else {
514		if (status->frame_count > retry_limit) {
515			info->status.rates[0].count = retry_limit;
516			info->status.rates[1].count = status->frame_count -
517					retry_limit;
518
519		} else {
520			info->status.rates[0].count = status->frame_count;
521			info->status.rates[1].idx = -1;
522		}
523	}
524	ieee80211_tx_status_irqsafe(dev->wl->hw, packet->skb);
525	packet->skb = NULL;
526
527	free_txpacket(packet, 1);
528	/* If there are packets on the txqueue, poke the tasklet
529	 * to transmit them.
530	 */
531	if (!list_empty(&queue->txqueue))
532		tasklet_schedule(&queue->txtask);
533}
534
535static void pio_rx_error(struct b43legacy_pioqueue *queue,
536			 int clear_buffers,
537			 const char *error)
538{
539	int i;
540
541	b43legacyerr(queue->dev->wl, "PIO RX error: %s\n", error);
542	b43legacy_pio_write(queue, B43legacy_PIO_RXCTL,
543			    B43legacy_PIO_RXCTL_READY);
544	if (clear_buffers) {
545		B43legacy_WARN_ON(queue->mmio_base != B43legacy_MMIO_PIO1_BASE);
546		for (i = 0; i < 15; i++) {
547			/* Dummy read. */
548			b43legacy_pio_read(queue, B43legacy_PIO_RXDATA);
549		}
550	}
551}
552
553void b43legacy_pio_rx(struct b43legacy_pioqueue *queue)
554{
555	__le16 preamble[21] = { 0 };
556	struct b43legacy_rxhdr_fw3 *rxhdr;
557	u16 tmp;
558	u16 len;
559	u16 macstat;
560	int i;
561	int preamble_readwords;
562	struct sk_buff *skb;
563
564	tmp = b43legacy_pio_read(queue, B43legacy_PIO_RXCTL);
565	if (!(tmp & B43legacy_PIO_RXCTL_DATAAVAILABLE))
566		return;
567	b43legacy_pio_write(queue, B43legacy_PIO_RXCTL,
568			    B43legacy_PIO_RXCTL_DATAAVAILABLE);
569
570	for (i = 0; i < 10; i++) {
571		tmp = b43legacy_pio_read(queue, B43legacy_PIO_RXCTL);
572		if (tmp & B43legacy_PIO_RXCTL_READY)
573			goto data_ready;
574		udelay(10);
575	}
576	b43legacydbg(queue->dev->wl, "PIO RX timed out\n");
577	return;
578data_ready:
579
580	len = b43legacy_pio_read(queue, B43legacy_PIO_RXDATA);
581	if (unlikely(len > 0x700)) {
582		pio_rx_error(queue, 0, "len > 0x700");
583		return;
584	}
585	if (unlikely(len == 0 && queue->mmio_base !=
586		     B43legacy_MMIO_PIO4_BASE)) {
587		pio_rx_error(queue, 0, "len == 0");
588		return;
589	}
590	preamble[0] = cpu_to_le16(len);
591	if (queue->mmio_base == B43legacy_MMIO_PIO4_BASE)
592		preamble_readwords = 14 / sizeof(u16);
593	else
594		preamble_readwords = 18 / sizeof(u16);
595	for (i = 0; i < preamble_readwords; i++) {
596		tmp = b43legacy_pio_read(queue, B43legacy_PIO_RXDATA);
597		preamble[i + 1] = cpu_to_le16(tmp);
598	}
599	rxhdr = (struct b43legacy_rxhdr_fw3 *)preamble;
600	macstat = le16_to_cpu(rxhdr->mac_status);
601	if (macstat & B43legacy_RX_MAC_FCSERR) {
602		pio_rx_error(queue,
603			     (queue->mmio_base == B43legacy_MMIO_PIO1_BASE),
604			     "Frame FCS error");
605		return;
606	}
607	if (queue->mmio_base == B43legacy_MMIO_PIO4_BASE) {
608		/* We received an xmit status. */
609		struct b43legacy_hwtxstatus *hw;
610
611		hw = (struct b43legacy_hwtxstatus *)(preamble + 1);
612		b43legacy_handle_hwtxstatus(queue->dev, hw);
613
614		return;
615	}
616
617	skb = dev_alloc_skb(len);
618	if (unlikely(!skb)) {
619		pio_rx_error(queue, 1, "OOM");
620		return;
621	}
622	skb_put(skb, len);
623	for (i = 0; i < len - 1; i += 2) {
624		tmp = b43legacy_pio_read(queue, B43legacy_PIO_RXDATA);
625		*((__le16 *)(skb->data + i)) = cpu_to_le16(tmp);
626	}
627	if (len % 2) {
628		tmp = b43legacy_pio_read(queue, B43legacy_PIO_RXDATA);
629		skb->data[len - 1] = (tmp & 0x00FF);
630	}
631	b43legacy_rx(queue->dev, skb, rxhdr);
632}
633
634void b43legacy_pio_tx_suspend(struct b43legacy_pioqueue *queue)
635{
636	b43legacy_power_saving_ctl_bits(queue->dev, -1, 1);
637	b43legacy_pio_write(queue, B43legacy_PIO_TXCTL,
638			    b43legacy_pio_read(queue, B43legacy_PIO_TXCTL)
639			    | B43legacy_PIO_TXCTL_SUSPEND);
640}
641
642void b43legacy_pio_tx_resume(struct b43legacy_pioqueue *queue)
643{
644	b43legacy_pio_write(queue, B43legacy_PIO_TXCTL,
645			    b43legacy_pio_read(queue, B43legacy_PIO_TXCTL)
646			    & ~B43legacy_PIO_TXCTL_SUSPEND);
647	b43legacy_power_saving_ctl_bits(queue->dev, -1, -1);
648	tasklet_schedule(&queue->txtask);
649}
650
651void b43legacy_pio_freeze_txqueues(struct b43legacy_wldev *dev)
652{
653	struct b43legacy_pio *pio;
654
655	B43legacy_WARN_ON(!b43legacy_using_pio(dev));
656	pio = &dev->pio;
657	pio->queue0->tx_frozen = 1;
658	pio->queue1->tx_frozen = 1;
659	pio->queue2->tx_frozen = 1;
660	pio->queue3->tx_frozen = 1;
661}
662
663void b43legacy_pio_thaw_txqueues(struct b43legacy_wldev *dev)
664{
665	struct b43legacy_pio *pio;
666
667	B43legacy_WARN_ON(!b43legacy_using_pio(dev));
668	pio = &dev->pio;
669	pio->queue0->tx_frozen = 0;
670	pio->queue1->tx_frozen = 0;
671	pio->queue2->tx_frozen = 0;
672	pio->queue3->tx_frozen = 0;
673	if (!list_empty(&pio->queue0->txqueue))
674		tasklet_schedule(&pio->queue0->txtask);
675	if (!list_empty(&pio->queue1->txqueue))
676		tasklet_schedule(&pio->queue1->txtask);
677	if (!list_empty(&pio->queue2->txqueue))
678		tasklet_schedule(&pio->queue2->txtask);
679	if (!list_empty(&pio->queue3->txqueue))
680		tasklet_schedule(&pio->queue3->txtask);
681}
682