if_ath_rx_edma.c revision 330897
1/*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2012 Adrian Chadd <adrian@FreeBSD.org>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer,
12 *    without modification.
13 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14 *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
15 *    redistribution must be conditioned upon including a substantially
16 *    similar Disclaimer requirement for further binary redistribution.
17 *
18 * NO WARRANTY
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
22 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
23 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
24 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
27 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
29 * THE POSSIBILITY OF SUCH DAMAGES.
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: stable/11/sys/dev/ath/if_ath_rx_edma.c 330897 2018-03-14 03:19:51Z eadler $");
34
35/*
36 * Driver for the Atheros Wireless LAN controller.
37 *
38 * This software is derived from work of Atsushi Onoe; his contribution
39 * is greatly appreciated.
40 */
41
42#include "opt_inet.h"
43#include "opt_ath.h"
44/*
45 * This is needed for register operations which are performed
46 * by the driver - eg, calls to ath_hal_gettsf32().
47 *
48 * It's also required for any AH_DEBUG checks in here, eg the
49 * module dependencies.
50 */
51#include "opt_ah.h"
52#include "opt_wlan.h"
53
54#include <sys/param.h>
55#include <sys/systm.h>
56#include <sys/sysctl.h>
57#include <sys/mbuf.h>
58#include <sys/malloc.h>
59#include <sys/lock.h>
60#include <sys/mutex.h>
61#include <sys/kernel.h>
62#include <sys/socket.h>
63#include <sys/sockio.h>
64#include <sys/errno.h>
65#include <sys/callout.h>
66#include <sys/bus.h>
67#include <sys/endian.h>
68#include <sys/kthread.h>
69#include <sys/taskqueue.h>
70#include <sys/priv.h>
71#include <sys/module.h>
72#include <sys/ktr.h>
73#include <sys/smp.h>	/* for mp_ncpus */
74
75#include <machine/bus.h>
76
77#include <net/if.h>
78#include <net/if_var.h>
79#include <net/if_dl.h>
80#include <net/if_media.h>
81#include <net/if_types.h>
82#include <net/if_arp.h>
83#include <net/ethernet.h>
84#include <net/if_llc.h>
85
86#include <net80211/ieee80211_var.h>
87#include <net80211/ieee80211_regdomain.h>
88#ifdef IEEE80211_SUPPORT_SUPERG
89#include <net80211/ieee80211_superg.h>
90#endif
91#ifdef IEEE80211_SUPPORT_TDMA
92#include <net80211/ieee80211_tdma.h>
93#endif
94
95#include <net/bpf.h>
96
97#ifdef INET
98#include <netinet/in.h>
99#include <netinet/if_ether.h>
100#endif
101
102#include <dev/ath/if_athvar.h>
103#include <dev/ath/ath_hal/ah_devid.h>		/* XXX for softled */
104#include <dev/ath/ath_hal/ah_diagcodes.h>
105
106#include <dev/ath/if_ath_debug.h>
107#include <dev/ath/if_ath_misc.h>
108#include <dev/ath/if_ath_tsf.h>
109#include <dev/ath/if_ath_tx.h>
110#include <dev/ath/if_ath_sysctl.h>
111#include <dev/ath/if_ath_led.h>
112#include <dev/ath/if_ath_keycache.h>
113#include <dev/ath/if_ath_rx.h>
114#include <dev/ath/if_ath_beacon.h>
115#include <dev/ath/if_athdfs.h>
116#include <dev/ath/if_ath_descdma.h>
117
118#ifdef ATH_TX99_DIAG
119#include <dev/ath/ath_tx99/ath_tx99.h>
120#endif
121
122#include <dev/ath/if_ath_rx_edma.h>
123
124#ifdef	ATH_DEBUG_ALQ
125#include <dev/ath/if_ath_alq.h>
126#endif
127
128/*
129 * some general macros
130  */
131#define	INCR(_l, _sz)		(_l) ++; (_l) &= ((_sz) - 1)
132#define	DECR(_l, _sz)		(_l) --; (_l) &= ((_sz) - 1)
133
134MALLOC_DECLARE(M_ATHDEV);
135
136/*
137 * XXX TODO:
138 *
139 * + Make sure the FIFO is correctly flushed and reinitialised
140 *   through a reset;
141 * + Verify multi-descriptor frames work!
142 * + There's a "memory use after free" which needs to be tracked down
143 *   and fixed ASAP.  I've seen this in the legacy path too, so it
144 *   may be a generic RX path issue.
145 */
146
147/*
148 * XXX shuffle the function orders so these pre-declarations aren't
149 * required!
150 */
151static	int ath_edma_rxfifo_alloc(struct ath_softc *sc, HAL_RX_QUEUE qtype,
152	    int nbufs);
153static	int ath_edma_rxfifo_flush(struct ath_softc *sc, HAL_RX_QUEUE qtype);
154static	void ath_edma_rxbuf_free(struct ath_softc *sc, struct ath_buf *bf);
155static	void ath_edma_recv_proc_queue(struct ath_softc *sc,
156	    HAL_RX_QUEUE qtype, int dosched);
157static	int ath_edma_recv_proc_deferred_queue(struct ath_softc *sc,
158	    HAL_RX_QUEUE qtype, int dosched);
159
160static void
161ath_edma_stoprecv(struct ath_softc *sc, int dodelay)
162{
163	struct ath_hal *ah = sc->sc_ah;
164
165	ATH_RX_LOCK(sc);
166
167	ath_hal_stoppcurecv(ah);
168	ath_hal_setrxfilter(ah, 0);
169
170	/*
171	 *
172	 */
173	if (ath_hal_stopdmarecv(ah) == AH_TRUE)
174		sc->sc_rx_stopped = 1;
175
176	/*
177	 * Give the various bus FIFOs (not EDMA descriptor FIFO)
178	 * time to finish flushing out data.
179	 */
180	DELAY(3000);
181
182	/* Flush RX pending for each queue */
183	/* XXX should generic-ify this */
184	if (sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending) {
185		m_freem(sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending);
186		sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending = NULL;
187	}
188
189	if (sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending) {
190		m_freem(sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending);
191		sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending = NULL;
192	}
193	ATH_RX_UNLOCK(sc);
194}
195
196/*
197 * Re-initialise the FIFO given the current buffer contents.
198 * Specifically, walk from head -> tail, pushing the FIFO contents
199 * back into the FIFO.
200 */
201static void
202ath_edma_reinit_fifo(struct ath_softc *sc, HAL_RX_QUEUE qtype)
203{
204	struct ath_rx_edma *re = &sc->sc_rxedma[qtype];
205	struct ath_buf *bf;
206	int i, j;
207
208	ATH_RX_LOCK_ASSERT(sc);
209
210	i = re->m_fifo_head;
211	for (j = 0; j < re->m_fifo_depth; j++) {
212		bf = re->m_fifo[i];
213		DPRINTF(sc, ATH_DEBUG_EDMA_RX,
214		    "%s: Q%d: pos=%i, addr=0x%jx\n",
215		    __func__,
216		    qtype,
217		    i,
218		    (uintmax_t)bf->bf_daddr);
219		ath_hal_putrxbuf(sc->sc_ah, bf->bf_daddr, qtype);
220		INCR(i, re->m_fifolen);
221	}
222
223	/* Ensure this worked out right */
224	if (i != re->m_fifo_tail) {
225		device_printf(sc->sc_dev, "%s: i (%d) != tail! (%d)\n",
226		    __func__,
227		    i,
228		    re->m_fifo_tail);
229	}
230}
231
232/*
233 * Start receive.
234 */
235static int
236ath_edma_startrecv(struct ath_softc *sc)
237{
238	struct ath_hal *ah = sc->sc_ah;
239
240	ATH_RX_LOCK(sc);
241
242	/*
243	 * Sanity check - are we being called whilst RX
244	 * isn't stopped?  If so, we may end up pushing
245	 * too many entries into the RX FIFO and
246	 * badness occurs.
247	 */
248
249	/* Enable RX FIFO */
250	ath_hal_rxena(ah);
251
252	/*
253	 * In theory the hardware has been initialised, right?
254	 */
255	if (sc->sc_rx_resetted == 1) {
256		DPRINTF(sc, ATH_DEBUG_EDMA_RX,
257		    "%s: Re-initing HP FIFO\n", __func__);
258		ath_edma_reinit_fifo(sc, HAL_RX_QUEUE_HP);
259		DPRINTF(sc, ATH_DEBUG_EDMA_RX,
260		    "%s: Re-initing LP FIFO\n", __func__);
261		ath_edma_reinit_fifo(sc, HAL_RX_QUEUE_LP);
262		sc->sc_rx_resetted = 0;
263	} else {
264		device_printf(sc->sc_dev,
265		    "%s: called without resetting chip?\n",
266		    __func__);
267	}
268
269	/* Add up to m_fifolen entries in each queue */
270	/*
271	 * These must occur after the above write so the FIFO buffers
272	 * are pushed/tracked in the same order as the hardware will
273	 * process them.
274	 *
275	 * XXX TODO: is this really necessary? We should've stopped
276	 * the hardware already and reinitialised it, so it's a no-op.
277	 */
278	ath_edma_rxfifo_alloc(sc, HAL_RX_QUEUE_HP,
279	    sc->sc_rxedma[HAL_RX_QUEUE_HP].m_fifolen);
280
281	ath_edma_rxfifo_alloc(sc, HAL_RX_QUEUE_LP,
282	    sc->sc_rxedma[HAL_RX_QUEUE_LP].m_fifolen);
283
284	ath_mode_init(sc);
285	ath_hal_startpcurecv(ah);
286
287	/*
288	 * We're now doing RX DMA!
289	 */
290	sc->sc_rx_stopped = 0;
291
292	ATH_RX_UNLOCK(sc);
293
294	return (0);
295}
296
297static void
298ath_edma_recv_sched_queue(struct ath_softc *sc, HAL_RX_QUEUE qtype,
299    int dosched)
300{
301
302	ATH_LOCK(sc);
303	ath_power_set_power_state(sc, HAL_PM_AWAKE);
304	ATH_UNLOCK(sc);
305
306	ath_edma_recv_proc_queue(sc, qtype, dosched);
307
308	ATH_LOCK(sc);
309	ath_power_restore_power_state(sc);
310	ATH_UNLOCK(sc);
311
312	taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
313}
314
315static void
316ath_edma_recv_sched(struct ath_softc *sc, int dosched)
317{
318
319	ATH_LOCK(sc);
320	ath_power_set_power_state(sc, HAL_PM_AWAKE);
321	ATH_UNLOCK(sc);
322
323	ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_HP, dosched);
324	ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_LP, dosched);
325
326	ATH_LOCK(sc);
327	ath_power_restore_power_state(sc);
328	ATH_UNLOCK(sc);
329
330	taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
331}
332
333static void
334ath_edma_recv_flush(struct ath_softc *sc)
335{
336
337	DPRINTF(sc, ATH_DEBUG_RECV, "%s: called\n", __func__);
338
339	ATH_PCU_LOCK(sc);
340	sc->sc_rxproc_cnt++;
341	ATH_PCU_UNLOCK(sc);
342
343	ATH_LOCK(sc);
344	ath_power_set_power_state(sc, HAL_PM_AWAKE);
345	ATH_UNLOCK(sc);
346
347	/*
348	 * Flush any active frames from FIFO -> deferred list
349	 */
350	ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_HP, 0);
351	ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_LP, 0);
352
353	/*
354	 * Process what's in the deferred queue
355	 */
356	/*
357	 * XXX: If we read the tsf/channoise here and then pass it in,
358	 * we could restore the power state before processing
359	 * the deferred queue.
360	 */
361	ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_HP, 0);
362	ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_LP, 0);
363
364	ATH_LOCK(sc);
365	ath_power_restore_power_state(sc);
366	ATH_UNLOCK(sc);
367
368	ATH_PCU_LOCK(sc);
369	sc->sc_rxproc_cnt--;
370	ATH_PCU_UNLOCK(sc);
371}
372
373/*
374 * Process frames from the current queue into the deferred queue.
375 */
376static void
377ath_edma_recv_proc_queue(struct ath_softc *sc, HAL_RX_QUEUE qtype,
378    int dosched)
379{
380	struct ath_rx_edma *re = &sc->sc_rxedma[qtype];
381	struct ath_rx_status *rs;
382	struct ath_desc *ds;
383	struct ath_buf *bf;
384	struct mbuf *m;
385	struct ath_hal *ah = sc->sc_ah;
386	uint64_t tsf;
387	uint16_t nf;
388	int npkts = 0;
389
390	tsf = ath_hal_gettsf64(ah);
391	nf = ath_hal_getchannoise(ah, sc->sc_curchan);
392	sc->sc_stats.ast_rx_noise = nf;
393
394	ATH_RX_LOCK(sc);
395
396#if 1
397	if (sc->sc_rx_resetted == 1) {
398		/*
399		 * XXX We shouldn't ever be scheduled if
400		 * receive has been stopped - so complain
401		 * loudly!
402		 */
403		device_printf(sc->sc_dev,
404		    "%s: sc_rx_resetted=1! Bad!\n",
405		    __func__);
406		ATH_RX_UNLOCK(sc);
407		return;
408	}
409#endif
410
411	do {
412		bf = re->m_fifo[re->m_fifo_head];
413		/* This shouldn't occur! */
414		if (bf == NULL) {
415			device_printf(sc->sc_dev, "%s: Q%d: NULL bf?\n",
416			    __func__,
417			    qtype);
418			break;
419		}
420		m = bf->bf_m;
421		ds = bf->bf_desc;
422
423		/*
424		 * Sync descriptor memory - this also syncs the buffer for us.
425		 * EDMA descriptors are in cached memory.
426		 */
427		bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
428		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
429		rs = &bf->bf_status.ds_rxstat;
430		bf->bf_rxstatus = ath_hal_rxprocdesc(ah, ds, bf->bf_daddr,
431		    NULL, rs);
432		if (bf->bf_rxstatus == HAL_EINPROGRESS)
433			break;
434#ifdef	ATH_DEBUG
435		if (sc->sc_debug & ATH_DEBUG_RECV_DESC)
436			ath_printrxbuf(sc, bf, 0, bf->bf_rxstatus == HAL_OK);
437#endif /* ATH_DEBUG */
438#ifdef	ATH_DEBUG_ALQ
439		if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_RXSTATUS))
440			if_ath_alq_post(&sc->sc_alq, ATH_ALQ_EDMA_RXSTATUS,
441			    sc->sc_rx_statuslen, (char *) ds);
442#endif /* ATH_DEBUG */
443
444		/*
445		 * Completed descriptor.
446		 */
447		DPRINTF(sc, ATH_DEBUG_EDMA_RX,
448		    "%s: Q%d: completed!\n", __func__, qtype);
449		npkts++;
450
451		/*
452		 * We've been synced already, so unmap.
453		 */
454		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
455
456		/*
457		 * Remove the FIFO entry and place it on the completion
458		 * queue.
459		 */
460		re->m_fifo[re->m_fifo_head] = NULL;
461		TAILQ_INSERT_TAIL(&sc->sc_rx_rxlist[qtype], bf, bf_list);
462
463		/* Bump the descriptor FIFO stats */
464		INCR(re->m_fifo_head, re->m_fifolen);
465		re->m_fifo_depth--;
466		/* XXX check it doesn't fall below 0 */
467	} while (re->m_fifo_depth > 0);
468
469	/* Append some more fresh frames to the FIFO */
470	if (dosched)
471		ath_edma_rxfifo_alloc(sc, qtype, re->m_fifolen);
472
473	ATH_RX_UNLOCK(sc);
474
475	/* rx signal state monitoring */
476	ath_hal_rxmonitor(ah, &sc->sc_halstats, sc->sc_curchan);
477
478	ATH_KTR(sc, ATH_KTR_INTERRUPTS, 1,
479	    "ath edma rx proc: npkts=%d\n",
480	    npkts);
481
482	return;
483}
484
485/*
486 * Flush the deferred queue.
487 *
488 * This destructively flushes the deferred queue - it doesn't
489 * call the wireless stack on each mbuf.
490 */
491static void
492ath_edma_flush_deferred_queue(struct ath_softc *sc)
493{
494	struct ath_buf *bf;
495
496	ATH_RX_LOCK_ASSERT(sc);
497
498	/* Free in one set, inside the lock */
499	while (! TAILQ_EMPTY(&sc->sc_rx_rxlist[HAL_RX_QUEUE_LP])) {
500		bf = TAILQ_FIRST(&sc->sc_rx_rxlist[HAL_RX_QUEUE_LP]);
501		TAILQ_REMOVE(&sc->sc_rx_rxlist[HAL_RX_QUEUE_LP], bf, bf_list);
502		/* Free the buffer/mbuf */
503		ath_edma_rxbuf_free(sc, bf);
504	}
505	while (! TAILQ_EMPTY(&sc->sc_rx_rxlist[HAL_RX_QUEUE_HP])) {
506		bf = TAILQ_FIRST(&sc->sc_rx_rxlist[HAL_RX_QUEUE_HP]);
507		TAILQ_REMOVE(&sc->sc_rx_rxlist[HAL_RX_QUEUE_HP], bf, bf_list);
508		/* Free the buffer/mbuf */
509		ath_edma_rxbuf_free(sc, bf);
510	}
511}
512
513static int
514ath_edma_recv_proc_deferred_queue(struct ath_softc *sc, HAL_RX_QUEUE qtype,
515    int dosched)
516{
517	int ngood = 0;
518	uint64_t tsf;
519	struct ath_buf *bf, *next;
520	struct ath_rx_status *rs;
521	int16_t nf;
522	ath_bufhead rxlist;
523	struct mbuf *m;
524
525	TAILQ_INIT(&rxlist);
526
527	nf = ath_hal_getchannoise(sc->sc_ah, sc->sc_curchan);
528	/*
529	 * XXX TODO: the NF/TSF should be stamped on the bufs themselves,
530	 * otherwise we may end up adding in the wrong values if this
531	 * is delayed too far..
532	 */
533	tsf = ath_hal_gettsf64(sc->sc_ah);
534
535	/* Copy the list over */
536	ATH_RX_LOCK(sc);
537	TAILQ_CONCAT(&rxlist, &sc->sc_rx_rxlist[qtype], bf_list);
538	ATH_RX_UNLOCK(sc);
539
540	/* Handle the completed descriptors */
541	/*
542	 * XXX is this SAFE call needed? The ath_buf entries
543	 * aren't modified by ath_rx_pkt, right?
544	 */
545	TAILQ_FOREACH_SAFE(bf, &rxlist, bf_list, next) {
546		/*
547		 * Skip the RX descriptor status - start at the data offset
548		 */
549		m_adj(bf->bf_m, sc->sc_rx_statuslen);
550
551		/* Handle the frame */
552
553		rs = &bf->bf_status.ds_rxstat;
554		m = bf->bf_m;
555		bf->bf_m = NULL;
556		if (ath_rx_pkt(sc, rs, bf->bf_rxstatus, tsf, nf, qtype, bf, m))
557			ngood++;
558	}
559
560	if (ngood) {
561		sc->sc_lastrx = tsf;
562	}
563
564	ATH_KTR(sc, ATH_KTR_INTERRUPTS, 1,
565	    "ath edma rx deferred proc: ngood=%d\n",
566	    ngood);
567
568	/* Free in one set, inside the lock */
569	ATH_RX_LOCK(sc);
570	while (! TAILQ_EMPTY(&rxlist)) {
571		bf = TAILQ_FIRST(&rxlist);
572		TAILQ_REMOVE(&rxlist, bf, bf_list);
573		/* Free the buffer/mbuf */
574		ath_edma_rxbuf_free(sc, bf);
575	}
576	ATH_RX_UNLOCK(sc);
577
578	return (ngood);
579}
580
581static void
582ath_edma_recv_tasklet(void *arg, int npending)
583{
584	struct ath_softc *sc = (struct ath_softc *) arg;
585#ifdef	IEEE80211_SUPPORT_SUPERG
586	struct ieee80211com *ic = &sc->sc_ic;
587#endif
588
589	DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: called; npending=%d\n",
590	    __func__,
591	    npending);
592
593	ATH_PCU_LOCK(sc);
594	if (sc->sc_inreset_cnt > 0) {
595		device_printf(sc->sc_dev, "%s: sc_inreset_cnt > 0; skipping\n",
596		    __func__);
597		ATH_PCU_UNLOCK(sc);
598		return;
599	}
600	sc->sc_rxproc_cnt++;
601	ATH_PCU_UNLOCK(sc);
602
603	ATH_LOCK(sc);
604	ath_power_set_power_state(sc, HAL_PM_AWAKE);
605	ATH_UNLOCK(sc);
606
607	ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_HP, 1);
608	ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_LP, 1);
609
610	ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_HP, 1);
611	ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_LP, 1);
612
613	/*
614	 * XXX: If we read the tsf/channoise here and then pass it in,
615	 * we could restore the power state before processing
616	 * the deferred queue.
617	 */
618	ATH_LOCK(sc);
619	ath_power_restore_power_state(sc);
620	ATH_UNLOCK(sc);
621
622#ifdef	IEEE80211_SUPPORT_SUPERG
623	ieee80211_ff_age_all(ic, 100);
624#endif
625	if (ath_dfs_tasklet_needed(sc, sc->sc_curchan))
626		taskqueue_enqueue(sc->sc_tq, &sc->sc_dfstask);
627
628	ATH_PCU_LOCK(sc);
629	sc->sc_rxproc_cnt--;
630	ATH_PCU_UNLOCK(sc);
631}
632
633/*
634 * Allocate an RX mbuf for the given ath_buf and initialise
635 * it for EDMA.
636 *
637 * + Allocate a 4KB mbuf;
638 * + Setup the DMA map for the given buffer;
639 * + Return that.
640 */
641static int
642ath_edma_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf)
643{
644
645	struct mbuf *m;
646	int error;
647	int len;
648
649	ATH_RX_LOCK_ASSERT(sc);
650
651	m = m_getm(NULL, sc->sc_edma_bufsize, M_NOWAIT, MT_DATA);
652	if (! m)
653		return (ENOBUFS);		/* XXX ?*/
654
655	/* XXX warn/enforce alignment */
656
657	len = m->m_ext.ext_size;
658#if 0
659	device_printf(sc->sc_dev, "%s: called: m=%p, size=%d, mtod=%p\n",
660	    __func__,
661	    m,
662	    len,
663	    mtod(m, char *));
664#endif
665
666	m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
667
668	/*
669	 * Populate ath_buf fields.
670	 */
671	bf->bf_desc = mtod(m, struct ath_desc *);
672	bf->bf_lastds = bf->bf_desc;	/* XXX only really for TX? */
673	bf->bf_m = m;
674
675	/*
676	 * Zero the descriptor and ensure it makes it out to the
677	 * bounce buffer if one is required.
678	 *
679	 * XXX PREWRITE will copy the whole buffer; we only needed it
680	 * to sync the first 32 DWORDS.  Oh well.
681	 */
682	memset(bf->bf_desc, '\0', sc->sc_rx_statuslen);
683
684	/*
685	 * Create DMA mapping.
686	 */
687	error = bus_dmamap_load_mbuf_sg(sc->sc_dmat,
688	    bf->bf_dmamap, m, bf->bf_segs, &bf->bf_nseg, BUS_DMA_NOWAIT);
689
690	if (error != 0) {
691		device_printf(sc->sc_dev, "%s: failed; error=%d\n",
692		    __func__,
693		    error);
694		m_freem(m);
695		return (error);
696	}
697
698	/*
699	 * Set daddr to the physical mapping page.
700	 */
701	bf->bf_daddr = bf->bf_segs[0].ds_addr;
702
703	/*
704	 * Prepare for the upcoming read.
705	 *
706	 * We need to both sync some data into the buffer (the zero'ed
707	 * descriptor payload) and also prepare for the read that's going
708	 * to occur.
709	 */
710	bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
711	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
712
713	/* Finish! */
714	return (0);
715}
716
717/*
718 * Allocate a RX buffer.
719 */
720static struct ath_buf *
721ath_edma_rxbuf_alloc(struct ath_softc *sc)
722{
723	struct ath_buf *bf;
724	int error;
725
726	ATH_RX_LOCK_ASSERT(sc);
727
728	/* Allocate buffer */
729	bf = TAILQ_FIRST(&sc->sc_rxbuf);
730	/* XXX shouldn't happen upon startup? */
731	if (bf == NULL) {
732		device_printf(sc->sc_dev, "%s: nothing on rxbuf?!\n",
733		    __func__);
734		return (NULL);
735	}
736
737	/* Remove it from the free list */
738	TAILQ_REMOVE(&sc->sc_rxbuf, bf, bf_list);
739
740	/* Assign RX mbuf to it */
741	error = ath_edma_rxbuf_init(sc, bf);
742	if (error != 0) {
743		device_printf(sc->sc_dev,
744		    "%s: bf=%p, rxbuf alloc failed! error=%d\n",
745		    __func__,
746		    bf,
747		    error);
748		TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
749		return (NULL);
750	}
751
752	return (bf);
753}
754
755static void
756ath_edma_rxbuf_free(struct ath_softc *sc, struct ath_buf *bf)
757{
758
759	ATH_RX_LOCK_ASSERT(sc);
760
761	/*
762	 * Only unload the frame if we haven't consumed
763	 * the mbuf via ath_rx_pkt().
764	 */
765	if (bf->bf_m) {
766		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
767		m_freem(bf->bf_m);
768		bf->bf_m = NULL;
769	}
770
771	/* XXX lock? */
772	TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
773}
774
775/*
776 * Allocate up to 'n' entries and push them onto the hardware FIFO.
777 *
778 * Return how many entries were successfully pushed onto the
779 * FIFO.
780 */
781static int
782ath_edma_rxfifo_alloc(struct ath_softc *sc, HAL_RX_QUEUE qtype, int nbufs)
783{
784	struct ath_rx_edma *re = &sc->sc_rxedma[qtype];
785	struct ath_buf *bf;
786	int i;
787
788	ATH_RX_LOCK_ASSERT(sc);
789
790	/*
791	 * Allocate buffers until the FIFO is full or nbufs is reached.
792	 */
793	for (i = 0; i < nbufs && re->m_fifo_depth < re->m_fifolen; i++) {
794		/* Ensure the FIFO is already blank, complain loudly! */
795		if (re->m_fifo[re->m_fifo_tail] != NULL) {
796			device_printf(sc->sc_dev,
797			    "%s: Q%d: fifo[%d] != NULL (%p)\n",
798			    __func__,
799			    qtype,
800			    re->m_fifo_tail,
801			    re->m_fifo[re->m_fifo_tail]);
802
803			/* Free the slot */
804			ath_edma_rxbuf_free(sc, re->m_fifo[re->m_fifo_tail]);
805			re->m_fifo_depth--;
806			/* XXX check it's not < 0 */
807			re->m_fifo[re->m_fifo_tail] = NULL;
808		}
809
810		bf = ath_edma_rxbuf_alloc(sc);
811		/* XXX should ensure the FIFO is not NULL? */
812		if (bf == NULL) {
813			device_printf(sc->sc_dev,
814			    "%s: Q%d: alloc failed: i=%d, nbufs=%d?\n",
815			    __func__,
816			    qtype,
817			    i,
818			    nbufs);
819			break;
820		}
821
822		re->m_fifo[re->m_fifo_tail] = bf;
823
824		/* Write to the RX FIFO */
825		DPRINTF(sc, ATH_DEBUG_EDMA_RX,
826		    "%s: Q%d: putrxbuf=%p (0x%jx)\n",
827		    __func__,
828		    qtype,
829		    bf->bf_desc,
830		    (uintmax_t) bf->bf_daddr);
831		ath_hal_putrxbuf(sc->sc_ah, bf->bf_daddr, qtype);
832
833		re->m_fifo_depth++;
834		INCR(re->m_fifo_tail, re->m_fifolen);
835	}
836
837	/*
838	 * Return how many were allocated.
839	 */
840	DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: Q%d: nbufs=%d, nalloced=%d\n",
841	    __func__,
842	    qtype,
843	    nbufs,
844	    i);
845	return (i);
846}
847
848static int
849ath_edma_rxfifo_flush(struct ath_softc *sc, HAL_RX_QUEUE qtype)
850{
851	struct ath_rx_edma *re = &sc->sc_rxedma[qtype];
852	int i;
853
854	ATH_RX_LOCK_ASSERT(sc);
855
856	for (i = 0; i < re->m_fifolen; i++) {
857		if (re->m_fifo[i] != NULL) {
858#ifdef	ATH_DEBUG
859			struct ath_buf *bf = re->m_fifo[i];
860
861			if (sc->sc_debug & ATH_DEBUG_RECV_DESC)
862				ath_printrxbuf(sc, bf, 0, HAL_OK);
863#endif
864			ath_edma_rxbuf_free(sc, re->m_fifo[i]);
865			re->m_fifo[i] = NULL;
866			re->m_fifo_depth--;
867		}
868	}
869
870	if (re->m_rxpending != NULL) {
871		m_freem(re->m_rxpending);
872		re->m_rxpending = NULL;
873	}
874	re->m_fifo_head = re->m_fifo_tail = re->m_fifo_depth = 0;
875
876	return (0);
877}
878
879/*
880 * Setup the initial RX FIFO structure.
881 */
882static int
883ath_edma_setup_rxfifo(struct ath_softc *sc, HAL_RX_QUEUE qtype)
884{
885	struct ath_rx_edma *re = &sc->sc_rxedma[qtype];
886
887	ATH_RX_LOCK_ASSERT(sc);
888
889	if (! ath_hal_getrxfifodepth(sc->sc_ah, qtype, &re->m_fifolen)) {
890		device_printf(sc->sc_dev, "%s: qtype=%d, failed\n",
891		    __func__,
892		    qtype);
893		return (-EINVAL);
894	}
895
896	if (bootverbose)
897		device_printf(sc->sc_dev,
898		    "%s: type=%d, FIFO depth = %d entries\n",
899		    __func__,
900		    qtype,
901		    re->m_fifolen);
902
903	/* Allocate ath_buf FIFO array, pre-zero'ed */
904	re->m_fifo = malloc(sizeof(struct ath_buf *) * re->m_fifolen,
905	    M_ATHDEV,
906	    M_NOWAIT | M_ZERO);
907	if (re->m_fifo == NULL) {
908		device_printf(sc->sc_dev, "%s: malloc failed\n",
909		    __func__);
910		return (-ENOMEM);
911	}
912
913	/*
914	 * Set initial "empty" state.
915	 */
916	re->m_rxpending = NULL;
917	re->m_fifo_head = re->m_fifo_tail = re->m_fifo_depth = 0;
918
919	return (0);
920}
921
922static int
923ath_edma_rxfifo_free(struct ath_softc *sc, HAL_RX_QUEUE qtype)
924{
925	struct ath_rx_edma *re = &sc->sc_rxedma[qtype];
926
927	device_printf(sc->sc_dev, "%s: called; qtype=%d\n",
928	    __func__,
929	    qtype);
930
931	free(re->m_fifo, M_ATHDEV);
932
933	return (0);
934}
935
936static int
937ath_edma_dma_rxsetup(struct ath_softc *sc)
938{
939	int error;
940
941	/*
942	 * Create RX DMA tag and buffers.
943	 */
944	error = ath_descdma_setup_rx_edma(sc, &sc->sc_rxdma, &sc->sc_rxbuf,
945	    "rx", ath_rxbuf, sc->sc_rx_statuslen);
946	if (error != 0)
947		return error;
948
949	ATH_RX_LOCK(sc);
950	(void) ath_edma_setup_rxfifo(sc, HAL_RX_QUEUE_HP);
951	(void) ath_edma_setup_rxfifo(sc, HAL_RX_QUEUE_LP);
952	ATH_RX_UNLOCK(sc);
953
954	return (0);
955}
956
957static int
958ath_edma_dma_rxteardown(struct ath_softc *sc)
959{
960
961	ATH_RX_LOCK(sc);
962	ath_edma_flush_deferred_queue(sc);
963	ath_edma_rxfifo_flush(sc, HAL_RX_QUEUE_HP);
964	ath_edma_rxfifo_free(sc, HAL_RX_QUEUE_HP);
965
966	ath_edma_rxfifo_flush(sc, HAL_RX_QUEUE_LP);
967	ath_edma_rxfifo_free(sc, HAL_RX_QUEUE_LP);
968	ATH_RX_UNLOCK(sc);
969
970	/* Free RX ath_buf */
971	/* Free RX DMA tag */
972	if (sc->sc_rxdma.dd_desc_len != 0)
973		ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
974
975	return (0);
976}
977
978void
979ath_recv_setup_edma(struct ath_softc *sc)
980{
981
982	/* Set buffer size to 4k */
983	sc->sc_edma_bufsize = 4096;
984
985	/* Fetch EDMA field and buffer sizes */
986	(void) ath_hal_getrxstatuslen(sc->sc_ah, &sc->sc_rx_statuslen);
987
988	/* Configure the hardware with the RX buffer size */
989	(void) ath_hal_setrxbufsize(sc->sc_ah, sc->sc_edma_bufsize -
990	    sc->sc_rx_statuslen);
991
992	if (bootverbose) {
993		device_printf(sc->sc_dev, "RX status length: %d\n",
994		    sc->sc_rx_statuslen);
995		device_printf(sc->sc_dev, "RX buffer size: %d\n",
996		    sc->sc_edma_bufsize);
997	}
998
999	sc->sc_rx.recv_stop = ath_edma_stoprecv;
1000	sc->sc_rx.recv_start = ath_edma_startrecv;
1001	sc->sc_rx.recv_flush = ath_edma_recv_flush;
1002	sc->sc_rx.recv_tasklet = ath_edma_recv_tasklet;
1003	sc->sc_rx.recv_rxbuf_init = ath_edma_rxbuf_init;
1004
1005	sc->sc_rx.recv_setup = ath_edma_dma_rxsetup;
1006	sc->sc_rx.recv_teardown = ath_edma_dma_rxteardown;
1007
1008	sc->sc_rx.recv_sched = ath_edma_recv_sched;
1009	sc->sc_rx.recv_sched_queue = ath_edma_recv_sched_queue;
1010}
1011