1/*-
2 * Copyright (c) 2012 Adrian Chadd <adrian@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer,
10 *    without modification.
11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
12 *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
13 *    redistribution must be conditioned upon including a substantially
14 *    similar Disclaimer requirement for further binary redistribution.
15 *
16 * NO WARRANTY
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
20 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
22 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27 * THE POSSIBILITY OF SUCH DAMAGES.
28 */
29
30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD: stable/11/sys/dev/ath/if_ath_rx_edma.c 337951 2018-08-17 03:05:09Z kevans $");
32
33/*
34 * Driver for the Atheros Wireless LAN controller.
35 *
36 * This software is derived from work of Atsushi Onoe; his contribution
37 * is greatly appreciated.
38 */
39
40#include "opt_inet.h"
41#include "opt_ath.h"
42/*
43 * This is needed for register operations which are performed
44 * by the driver - eg, calls to ath_hal_gettsf32().
45 *
46 * It's also required for any AH_DEBUG checks in here, eg the
47 * module dependencies.
48 */
49#include "opt_ah.h"
50#include "opt_wlan.h"
51
52#include <sys/param.h>
53#include <sys/systm.h>
54#include <sys/sysctl.h>
55#include <sys/mbuf.h>
56#include <sys/malloc.h>
57#include <sys/lock.h>
58#include <sys/mutex.h>
59#include <sys/kernel.h>
60#include <sys/socket.h>
61#include <sys/sockio.h>
62#include <sys/errno.h>
63#include <sys/callout.h>
64#include <sys/bus.h>
65#include <sys/endian.h>
66#include <sys/kthread.h>
67#include <sys/taskqueue.h>
68#include <sys/priv.h>
69#include <sys/module.h>
70#include <sys/ktr.h>
71#include <sys/smp.h>	/* for mp_ncpus */
72
73#include <machine/bus.h>
74
75#include <net/if.h>
76#include <net/if_var.h>
77#include <net/if_dl.h>
78#include <net/if_media.h>
79#include <net/if_types.h>
80#include <net/if_arp.h>
81#include <net/ethernet.h>
82#include <net/if_llc.h>
83
84#include <net80211/ieee80211_var.h>
85#include <net80211/ieee80211_regdomain.h>
86#ifdef IEEE80211_SUPPORT_SUPERG
87#include <net80211/ieee80211_superg.h>
88#endif
89#ifdef IEEE80211_SUPPORT_TDMA
90#include <net80211/ieee80211_tdma.h>
91#endif
92
93#include <net/bpf.h>
94
95#ifdef INET
96#include <netinet/in.h>
97#include <netinet/if_ether.h>
98#endif
99
100#include <dev/ath/if_athvar.h>
101#include <dev/ath/ath_hal/ah_devid.h>		/* XXX for softled */
102#include <dev/ath/ath_hal/ah_diagcodes.h>
103
104#include <dev/ath/if_ath_debug.h>
105#include <dev/ath/if_ath_misc.h>
106#include <dev/ath/if_ath_tsf.h>
107#include <dev/ath/if_ath_tx.h>
108#include <dev/ath/if_ath_sysctl.h>
109#include <dev/ath/if_ath_led.h>
110#include <dev/ath/if_ath_keycache.h>
111#include <dev/ath/if_ath_rx.h>
112#include <dev/ath/if_ath_beacon.h>
113#include <dev/ath/if_athdfs.h>
114#include <dev/ath/if_ath_descdma.h>
115
116#ifdef ATH_TX99_DIAG
117#include <dev/ath/ath_tx99/ath_tx99.h>
118#endif
119
120#include <dev/ath/if_ath_rx_edma.h>
121
122#ifdef	ATH_DEBUG_ALQ
123#include <dev/ath/if_ath_alq.h>
124#endif
125
126/*
127 * some general macros
128  */
129#define	INCR(_l, _sz)		(_l) ++; (_l) &= ((_sz) - 1)
130#define	DECR(_l, _sz)		(_l) --; (_l) &= ((_sz) - 1)
131
132MALLOC_DECLARE(M_ATHDEV);
133
134/*
135 * XXX TODO:
136 *
137 * + Make sure the FIFO is correctly flushed and reinitialised
138 *   through a reset;
139 * + Verify multi-descriptor frames work!
140 * + There's a "memory use after free" which needs to be tracked down
141 *   and fixed ASAP.  I've seen this in the legacy path too, so it
142 *   may be a generic RX path issue.
143 */
144
145/*
146 * XXX shuffle the function orders so these pre-declarations aren't
147 * required!
148 */
149static	int ath_edma_rxfifo_alloc(struct ath_softc *sc, HAL_RX_QUEUE qtype,
150	    int nbufs);
151static	int ath_edma_rxfifo_flush(struct ath_softc *sc, HAL_RX_QUEUE qtype);
152static	void ath_edma_rxbuf_free(struct ath_softc *sc, struct ath_buf *bf);
153static	void ath_edma_recv_proc_queue(struct ath_softc *sc,
154	    HAL_RX_QUEUE qtype, int dosched);
155static	int ath_edma_recv_proc_deferred_queue(struct ath_softc *sc,
156	    HAL_RX_QUEUE qtype, int dosched);
157
158static void
159ath_edma_stoprecv(struct ath_softc *sc, int dodelay)
160{
161	struct ath_hal *ah = sc->sc_ah;
162
163	ATH_RX_LOCK(sc);
164
165	ath_hal_stoppcurecv(ah);
166	ath_hal_setrxfilter(ah, 0);
167
168	/*
169	 *
170	 */
171	if (ath_hal_stopdmarecv(ah) == AH_TRUE)
172		sc->sc_rx_stopped = 1;
173
174	/*
175	 * Give the various bus FIFOs (not EDMA descriptor FIFO)
176	 * time to finish flushing out data.
177	 */
178	DELAY(3000);
179
180	/* Flush RX pending for each queue */
181	/* XXX should generic-ify this */
182	if (sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending) {
183		m_freem(sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending);
184		sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending = NULL;
185	}
186
187	if (sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending) {
188		m_freem(sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending);
189		sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending = NULL;
190	}
191	ATH_RX_UNLOCK(sc);
192}
193
194/*
195 * Re-initialise the FIFO given the current buffer contents.
196 * Specifically, walk from head -> tail, pushing the FIFO contents
197 * back into the FIFO.
198 */
199static void
200ath_edma_reinit_fifo(struct ath_softc *sc, HAL_RX_QUEUE qtype)
201{
202	struct ath_rx_edma *re = &sc->sc_rxedma[qtype];
203	struct ath_buf *bf;
204	int i, j;
205
206	ATH_RX_LOCK_ASSERT(sc);
207
208	i = re->m_fifo_head;
209	for (j = 0; j < re->m_fifo_depth; j++) {
210		bf = re->m_fifo[i];
211		DPRINTF(sc, ATH_DEBUG_EDMA_RX,
212		    "%s: Q%d: pos=%i, addr=0x%jx\n",
213		    __func__,
214		    qtype,
215		    i,
216		    (uintmax_t)bf->bf_daddr);
217		ath_hal_putrxbuf(sc->sc_ah, bf->bf_daddr, qtype);
218		INCR(i, re->m_fifolen);
219	}
220
221	/* Ensure this worked out right */
222	if (i != re->m_fifo_tail) {
223		device_printf(sc->sc_dev, "%s: i (%d) != tail! (%d)\n",
224		    __func__,
225		    i,
226		    re->m_fifo_tail);
227	}
228}
229
230/*
231 * Start receive.
232 */
233static int
234ath_edma_startrecv(struct ath_softc *sc)
235{
236	struct ath_hal *ah = sc->sc_ah;
237
238	ATH_RX_LOCK(sc);
239
240	/*
241	 * Sanity check - are we being called whilst RX
242	 * isn't stopped?  If so, we may end up pushing
243	 * too many entries into the RX FIFO and
244	 * badness occurs.
245	 */
246
247	/* Enable RX FIFO */
248	ath_hal_rxena(ah);
249
250	/*
251	 * In theory the hardware has been initialised, right?
252	 */
253	if (sc->sc_rx_resetted == 1) {
254		DPRINTF(sc, ATH_DEBUG_EDMA_RX,
255		    "%s: Re-initing HP FIFO\n", __func__);
256		ath_edma_reinit_fifo(sc, HAL_RX_QUEUE_HP);
257		DPRINTF(sc, ATH_DEBUG_EDMA_RX,
258		    "%s: Re-initing LP FIFO\n", __func__);
259		ath_edma_reinit_fifo(sc, HAL_RX_QUEUE_LP);
260		sc->sc_rx_resetted = 0;
261	} else {
262		device_printf(sc->sc_dev,
263		    "%s: called without resetting chip?\n",
264		    __func__);
265	}
266
267	/* Add up to m_fifolen entries in each queue */
268	/*
269	 * These must occur after the above write so the FIFO buffers
270	 * are pushed/tracked in the same order as the hardware will
271	 * process them.
272	 *
273	 * XXX TODO: is this really necessary? We should've stopped
274	 * the hardware already and reinitialised it, so it's a no-op.
275	 */
276	ath_edma_rxfifo_alloc(sc, HAL_RX_QUEUE_HP,
277	    sc->sc_rxedma[HAL_RX_QUEUE_HP].m_fifolen);
278
279	ath_edma_rxfifo_alloc(sc, HAL_RX_QUEUE_LP,
280	    sc->sc_rxedma[HAL_RX_QUEUE_LP].m_fifolen);
281
282	ath_mode_init(sc);
283	ath_hal_startpcurecv(ah);
284
285	/*
286	 * We're now doing RX DMA!
287	 */
288	sc->sc_rx_stopped = 0;
289
290	ATH_RX_UNLOCK(sc);
291
292	return (0);
293}
294
295static void
296ath_edma_recv_sched_queue(struct ath_softc *sc, HAL_RX_QUEUE qtype,
297    int dosched)
298{
299
300	ATH_LOCK(sc);
301	ath_power_set_power_state(sc, HAL_PM_AWAKE);
302	ATH_UNLOCK(sc);
303
304	ath_edma_recv_proc_queue(sc, qtype, dosched);
305
306	ATH_LOCK(sc);
307	ath_power_restore_power_state(sc);
308	ATH_UNLOCK(sc);
309
310	taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
311}
312
313static void
314ath_edma_recv_sched(struct ath_softc *sc, int dosched)
315{
316
317	ATH_LOCK(sc);
318	ath_power_set_power_state(sc, HAL_PM_AWAKE);
319	ATH_UNLOCK(sc);
320
321	ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_HP, dosched);
322	ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_LP, dosched);
323
324	ATH_LOCK(sc);
325	ath_power_restore_power_state(sc);
326	ATH_UNLOCK(sc);
327
328	taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
329}
330
331static void
332ath_edma_recv_flush(struct ath_softc *sc)
333{
334
335	DPRINTF(sc, ATH_DEBUG_RECV, "%s: called\n", __func__);
336
337	ATH_PCU_LOCK(sc);
338	sc->sc_rxproc_cnt++;
339	ATH_PCU_UNLOCK(sc);
340
341	ATH_LOCK(sc);
342	ath_power_set_power_state(sc, HAL_PM_AWAKE);
343	ATH_UNLOCK(sc);
344
345	/*
346	 * Flush any active frames from FIFO -> deferred list
347	 */
348	ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_HP, 0);
349	ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_LP, 0);
350
351	/*
352	 * Process what's in the deferred queue
353	 */
354	/*
355	 * XXX: If we read the tsf/channoise here and then pass it in,
356	 * we could restore the power state before processing
357	 * the deferred queue.
358	 */
359	ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_HP, 0);
360	ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_LP, 0);
361
362	ATH_LOCK(sc);
363	ath_power_restore_power_state(sc);
364	ATH_UNLOCK(sc);
365
366	ATH_PCU_LOCK(sc);
367	sc->sc_rxproc_cnt--;
368	ATH_PCU_UNLOCK(sc);
369}
370
371/*
372 * Process frames from the current queue into the deferred queue.
373 */
374static void
375ath_edma_recv_proc_queue(struct ath_softc *sc, HAL_RX_QUEUE qtype,
376    int dosched)
377{
378	struct ath_rx_edma *re = &sc->sc_rxedma[qtype];
379	struct ath_rx_status *rs;
380	struct ath_desc *ds;
381	struct ath_buf *bf;
382	struct mbuf *m;
383	struct ath_hal *ah = sc->sc_ah;
384	uint64_t tsf;
385	uint16_t nf;
386	int npkts = 0;
387
388	tsf = ath_hal_gettsf64(ah);
389	nf = ath_hal_getchannoise(ah, sc->sc_curchan);
390	sc->sc_stats.ast_rx_noise = nf;
391
392	ATH_RX_LOCK(sc);
393
394#if 1
395	if (sc->sc_rx_resetted == 1) {
396		/*
397		 * XXX We shouldn't ever be scheduled if
398		 * receive has been stopped - so complain
399		 * loudly!
400		 */
401		device_printf(sc->sc_dev,
402		    "%s: sc_rx_resetted=1! Bad!\n",
403		    __func__);
404		ATH_RX_UNLOCK(sc);
405		return;
406	}
407#endif
408
409	do {
410		bf = re->m_fifo[re->m_fifo_head];
411		/* This shouldn't occur! */
412		if (bf == NULL) {
413			device_printf(sc->sc_dev, "%s: Q%d: NULL bf?\n",
414			    __func__,
415			    qtype);
416			break;
417		}
418		m = bf->bf_m;
419		ds = bf->bf_desc;
420
421		/*
422		 * Sync descriptor memory - this also syncs the buffer for us.
423		 * EDMA descriptors are in cached memory.
424		 */
425		bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
426		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
427		rs = &bf->bf_status.ds_rxstat;
428		bf->bf_rxstatus = ath_hal_rxprocdesc(ah, ds, bf->bf_daddr,
429		    NULL, rs);
430		if (bf->bf_rxstatus == HAL_EINPROGRESS)
431			break;
432#ifdef	ATH_DEBUG
433		if (sc->sc_debug & ATH_DEBUG_RECV_DESC)
434			ath_printrxbuf(sc, bf, 0, bf->bf_rxstatus == HAL_OK);
435#endif /* ATH_DEBUG */
436#ifdef	ATH_DEBUG_ALQ
437		if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_RXSTATUS))
438			if_ath_alq_post(&sc->sc_alq, ATH_ALQ_EDMA_RXSTATUS,
439			    sc->sc_rx_statuslen, (char *) ds);
440#endif /* ATH_DEBUG */
441
442		/*
443		 * Completed descriptor.
444		 */
445		DPRINTF(sc, ATH_DEBUG_EDMA_RX,
446		    "%s: Q%d: completed!\n", __func__, qtype);
447		npkts++;
448
449		/*
450		 * We've been synced already, so unmap.
451		 */
452		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
453
454		/*
455		 * Remove the FIFO entry and place it on the completion
456		 * queue.
457		 */
458		re->m_fifo[re->m_fifo_head] = NULL;
459		TAILQ_INSERT_TAIL(&sc->sc_rx_rxlist[qtype], bf, bf_list);
460
461		/* Bump the descriptor FIFO stats */
462		INCR(re->m_fifo_head, re->m_fifolen);
463		re->m_fifo_depth--;
464		/* XXX check it doesn't fall below 0 */
465	} while (re->m_fifo_depth > 0);
466
467	/* Append some more fresh frames to the FIFO */
468	if (dosched)
469		ath_edma_rxfifo_alloc(sc, qtype, re->m_fifolen);
470
471	ATH_RX_UNLOCK(sc);
472
473	/* rx signal state monitoring */
474	ath_hal_rxmonitor(ah, &sc->sc_halstats, sc->sc_curchan);
475
476	ATH_KTR(sc, ATH_KTR_INTERRUPTS, 1,
477	    "ath edma rx proc: npkts=%d\n",
478	    npkts);
479
480	return;
481}
482
483/*
484 * Flush the deferred queue.
485 *
486 * This destructively flushes the deferred queue - it doesn't
487 * call the wireless stack on each mbuf.
488 */
489static void
490ath_edma_flush_deferred_queue(struct ath_softc *sc)
491{
492	struct ath_buf *bf;
493
494	ATH_RX_LOCK_ASSERT(sc);
495
496	/* Free in one set, inside the lock */
497	while (! TAILQ_EMPTY(&sc->sc_rx_rxlist[HAL_RX_QUEUE_LP])) {
498		bf = TAILQ_FIRST(&sc->sc_rx_rxlist[HAL_RX_QUEUE_LP]);
499		TAILQ_REMOVE(&sc->sc_rx_rxlist[HAL_RX_QUEUE_LP], bf, bf_list);
500		/* Free the buffer/mbuf */
501		ath_edma_rxbuf_free(sc, bf);
502	}
503	while (! TAILQ_EMPTY(&sc->sc_rx_rxlist[HAL_RX_QUEUE_HP])) {
504		bf = TAILQ_FIRST(&sc->sc_rx_rxlist[HAL_RX_QUEUE_HP]);
505		TAILQ_REMOVE(&sc->sc_rx_rxlist[HAL_RX_QUEUE_HP], bf, bf_list);
506		/* Free the buffer/mbuf */
507		ath_edma_rxbuf_free(sc, bf);
508	}
509}
510
511static int
512ath_edma_recv_proc_deferred_queue(struct ath_softc *sc, HAL_RX_QUEUE qtype,
513    int dosched)
514{
515	int ngood = 0;
516	uint64_t tsf;
517	struct ath_buf *bf, *next;
518	struct ath_rx_status *rs;
519	int16_t nf;
520	ath_bufhead rxlist;
521	struct mbuf *m;
522
523	TAILQ_INIT(&rxlist);
524
525	nf = ath_hal_getchannoise(sc->sc_ah, sc->sc_curchan);
526	/*
527	 * XXX TODO: the NF/TSF should be stamped on the bufs themselves,
528	 * otherwise we may end up adding in the wrong values if this
529	 * is delayed too far..
530	 */
531	tsf = ath_hal_gettsf64(sc->sc_ah);
532
533	/* Copy the list over */
534	ATH_RX_LOCK(sc);
535	TAILQ_CONCAT(&rxlist, &sc->sc_rx_rxlist[qtype], bf_list);
536	ATH_RX_UNLOCK(sc);
537
538	/* Handle the completed descriptors */
539	/*
540	 * XXX is this SAFE call needed? The ath_buf entries
541	 * aren't modified by ath_rx_pkt, right?
542	 */
543	TAILQ_FOREACH_SAFE(bf, &rxlist, bf_list, next) {
544		/*
545		 * Skip the RX descriptor status - start at the data offset
546		 */
547		m_adj(bf->bf_m, sc->sc_rx_statuslen);
548
549		/* Handle the frame */
550
551		rs = &bf->bf_status.ds_rxstat;
552		m = bf->bf_m;
553		bf->bf_m = NULL;
554		if (ath_rx_pkt(sc, rs, bf->bf_rxstatus, tsf, nf, qtype, bf, m))
555			ngood++;
556	}
557
558	if (ngood) {
559		sc->sc_lastrx = tsf;
560	}
561
562	ATH_KTR(sc, ATH_KTR_INTERRUPTS, 1,
563	    "ath edma rx deferred proc: ngood=%d\n",
564	    ngood);
565
566	/* Free in one set, inside the lock */
567	ATH_RX_LOCK(sc);
568	while (! TAILQ_EMPTY(&rxlist)) {
569		bf = TAILQ_FIRST(&rxlist);
570		TAILQ_REMOVE(&rxlist, bf, bf_list);
571		/* Free the buffer/mbuf */
572		ath_edma_rxbuf_free(sc, bf);
573	}
574	ATH_RX_UNLOCK(sc);
575
576	return (ngood);
577}
578
579static void
580ath_edma_recv_tasklet(void *arg, int npending)
581{
582	struct ath_softc *sc = (struct ath_softc *) arg;
583#ifdef	IEEE80211_SUPPORT_SUPERG
584	struct ieee80211com *ic = &sc->sc_ic;
585#endif
586
587	DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: called; npending=%d\n",
588	    __func__,
589	    npending);
590
591	ATH_PCU_LOCK(sc);
592	if (sc->sc_inreset_cnt > 0) {
593		device_printf(sc->sc_dev, "%s: sc_inreset_cnt > 0; skipping\n",
594		    __func__);
595		ATH_PCU_UNLOCK(sc);
596		return;
597	}
598	sc->sc_rxproc_cnt++;
599	ATH_PCU_UNLOCK(sc);
600
601	ATH_LOCK(sc);
602	ath_power_set_power_state(sc, HAL_PM_AWAKE);
603	ATH_UNLOCK(sc);
604
605	ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_HP, 1);
606	ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_LP, 1);
607
608	ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_HP, 1);
609	ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_LP, 1);
610
611	/*
612	 * XXX: If we read the tsf/channoise here and then pass it in,
613	 * we could restore the power state before processing
614	 * the deferred queue.
615	 */
616	ATH_LOCK(sc);
617	ath_power_restore_power_state(sc);
618	ATH_UNLOCK(sc);
619
620#ifdef	IEEE80211_SUPPORT_SUPERG
621	ieee80211_ff_age_all(ic, 100);
622#endif
623	if (ath_dfs_tasklet_needed(sc, sc->sc_curchan))
624		taskqueue_enqueue(sc->sc_tq, &sc->sc_dfstask);
625
626	ATH_PCU_LOCK(sc);
627	sc->sc_rxproc_cnt--;
628	ATH_PCU_UNLOCK(sc);
629}
630
631/*
632 * Allocate an RX mbuf for the given ath_buf and initialise
633 * it for EDMA.
634 *
635 * + Allocate a 4KB mbuf;
636 * + Setup the DMA map for the given buffer;
637 * + Return that.
638 */
639static int
640ath_edma_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf)
641{
642
643	struct mbuf *m;
644	int error;
645	int len;
646
647	ATH_RX_LOCK_ASSERT(sc);
648
649	m = m_getm(NULL, sc->sc_edma_bufsize, M_NOWAIT, MT_DATA);
650	if (! m)
651		return (ENOBUFS);		/* XXX ?*/
652
653	/* XXX warn/enforce alignment */
654
655	len = m->m_ext.ext_size;
656#if 0
657	device_printf(sc->sc_dev, "%s: called: m=%p, size=%d, mtod=%p\n",
658	    __func__,
659	    m,
660	    len,
661	    mtod(m, char *));
662#endif
663
664	m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
665
666	/*
667	 * Populate ath_buf fields.
668	 */
669	bf->bf_desc = mtod(m, struct ath_desc *);
670	bf->bf_lastds = bf->bf_desc;	/* XXX only really for TX? */
671	bf->bf_m = m;
672
673	/*
674	 * Zero the descriptor and ensure it makes it out to the
675	 * bounce buffer if one is required.
676	 *
677	 * XXX PREWRITE will copy the whole buffer; we only needed it
678	 * to sync the first 32 DWORDS.  Oh well.
679	 */
680	memset(bf->bf_desc, '\0', sc->sc_rx_statuslen);
681
682	/*
683	 * Create DMA mapping.
684	 */
685	error = bus_dmamap_load_mbuf_sg(sc->sc_dmat,
686	    bf->bf_dmamap, m, bf->bf_segs, &bf->bf_nseg, BUS_DMA_NOWAIT);
687
688	if (error != 0) {
689		device_printf(sc->sc_dev, "%s: failed; error=%d\n",
690		    __func__,
691		    error);
692		m_freem(m);
693		return (error);
694	}
695
696	/*
697	 * Set daddr to the physical mapping page.
698	 */
699	bf->bf_daddr = bf->bf_segs[0].ds_addr;
700
701	/*
702	 * Prepare for the upcoming read.
703	 *
704	 * We need to both sync some data into the buffer (the zero'ed
705	 * descriptor payload) and also prepare for the read that's going
706	 * to occur.
707	 */
708	bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
709	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
710
711	/* Finish! */
712	return (0);
713}
714
715/*
716 * Allocate a RX buffer.
717 */
718static struct ath_buf *
719ath_edma_rxbuf_alloc(struct ath_softc *sc)
720{
721	struct ath_buf *bf;
722	int error;
723
724	ATH_RX_LOCK_ASSERT(sc);
725
726	/* Allocate buffer */
727	bf = TAILQ_FIRST(&sc->sc_rxbuf);
728	/* XXX shouldn't happen upon startup? */
729	if (bf == NULL) {
730		DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: nothing on rxbuf?!\n",
731		    __func__);
732		return (NULL);
733	}
734
735	/* Remove it from the free list */
736	TAILQ_REMOVE(&sc->sc_rxbuf, bf, bf_list);
737
738	/* Assign RX mbuf to it */
739	error = ath_edma_rxbuf_init(sc, bf);
740	if (error != 0) {
741		device_printf(sc->sc_dev,
742		    "%s: bf=%p, rxbuf alloc failed! error=%d\n",
743		    __func__,
744		    bf,
745		    error);
746		TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
747		return (NULL);
748	}
749
750	return (bf);
751}
752
753static void
754ath_edma_rxbuf_free(struct ath_softc *sc, struct ath_buf *bf)
755{
756
757	ATH_RX_LOCK_ASSERT(sc);
758
759	/*
760	 * Only unload the frame if we haven't consumed
761	 * the mbuf via ath_rx_pkt().
762	 */
763	if (bf->bf_m) {
764		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
765		m_freem(bf->bf_m);
766		bf->bf_m = NULL;
767	}
768
769	/* XXX lock? */
770	TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
771}
772
773/*
774 * Allocate up to 'n' entries and push them onto the hardware FIFO.
775 *
776 * Return how many entries were successfully pushed onto the
777 * FIFO.
778 */
779static int
780ath_edma_rxfifo_alloc(struct ath_softc *sc, HAL_RX_QUEUE qtype, int nbufs)
781{
782	struct ath_rx_edma *re = &sc->sc_rxedma[qtype];
783	struct ath_buf *bf;
784	int i;
785
786	ATH_RX_LOCK_ASSERT(sc);
787
788	/*
789	 * Allocate buffers until the FIFO is full or nbufs is reached.
790	 */
791	for (i = 0; i < nbufs && re->m_fifo_depth < re->m_fifolen; i++) {
792		/* Ensure the FIFO is already blank, complain loudly! */
793		if (re->m_fifo[re->m_fifo_tail] != NULL) {
794			device_printf(sc->sc_dev,
795			    "%s: Q%d: fifo[%d] != NULL (%p)\n",
796			    __func__,
797			    qtype,
798			    re->m_fifo_tail,
799			    re->m_fifo[re->m_fifo_tail]);
800
801			/* Free the slot */
802			ath_edma_rxbuf_free(sc, re->m_fifo[re->m_fifo_tail]);
803			re->m_fifo_depth--;
804			/* XXX check it's not < 0 */
805			re->m_fifo[re->m_fifo_tail] = NULL;
806		}
807
808		bf = ath_edma_rxbuf_alloc(sc);
809		/* XXX should ensure the FIFO is not NULL? */
810		if (bf == NULL) {
811			DPRINTF(sc, ATH_DEBUG_EDMA_RX,
812			    "%s: Q%d: alloc failed: i=%d, nbufs=%d?\n",
813			    __func__,
814			    qtype,
815			    i,
816			    nbufs);
817			break;
818		}
819
820		re->m_fifo[re->m_fifo_tail] = bf;
821
822		/* Write to the RX FIFO */
823		DPRINTF(sc, ATH_DEBUG_EDMA_RX,
824		    "%s: Q%d: putrxbuf=%p (0x%jx)\n",
825		    __func__,
826		    qtype,
827		    bf->bf_desc,
828		    (uintmax_t) bf->bf_daddr);
829		ath_hal_putrxbuf(sc->sc_ah, bf->bf_daddr, qtype);
830
831		re->m_fifo_depth++;
832		INCR(re->m_fifo_tail, re->m_fifolen);
833	}
834
835	/*
836	 * Return how many were allocated.
837	 */
838	DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: Q%d: nbufs=%d, nalloced=%d\n",
839	    __func__,
840	    qtype,
841	    nbufs,
842	    i);
843	return (i);
844}
845
846static int
847ath_edma_rxfifo_flush(struct ath_softc *sc, HAL_RX_QUEUE qtype)
848{
849	struct ath_rx_edma *re = &sc->sc_rxedma[qtype];
850	int i;
851
852	ATH_RX_LOCK_ASSERT(sc);
853
854	for (i = 0; i < re->m_fifolen; i++) {
855		if (re->m_fifo[i] != NULL) {
856#ifdef	ATH_DEBUG
857			struct ath_buf *bf = re->m_fifo[i];
858
859			if (sc->sc_debug & ATH_DEBUG_RECV_DESC)
860				ath_printrxbuf(sc, bf, 0, HAL_OK);
861#endif
862			ath_edma_rxbuf_free(sc, re->m_fifo[i]);
863			re->m_fifo[i] = NULL;
864			re->m_fifo_depth--;
865		}
866	}
867
868	if (re->m_rxpending != NULL) {
869		m_freem(re->m_rxpending);
870		re->m_rxpending = NULL;
871	}
872	re->m_fifo_head = re->m_fifo_tail = re->m_fifo_depth = 0;
873
874	return (0);
875}
876
877/*
878 * Setup the initial RX FIFO structure.
879 */
880static int
881ath_edma_setup_rxfifo(struct ath_softc *sc, HAL_RX_QUEUE qtype)
882{
883	struct ath_rx_edma *re = &sc->sc_rxedma[qtype];
884
885	ATH_RX_LOCK_ASSERT(sc);
886
887	if (! ath_hal_getrxfifodepth(sc->sc_ah, qtype, &re->m_fifolen)) {
888		device_printf(sc->sc_dev, "%s: qtype=%d, failed\n",
889		    __func__,
890		    qtype);
891		return (-EINVAL);
892	}
893
894	if (bootverbose)
895		device_printf(sc->sc_dev,
896		    "%s: type=%d, FIFO depth = %d entries\n",
897		    __func__,
898		    qtype,
899		    re->m_fifolen);
900
901	/* Allocate ath_buf FIFO array, pre-zero'ed */
902	re->m_fifo = malloc(sizeof(struct ath_buf *) * re->m_fifolen,
903	    M_ATHDEV,
904	    M_NOWAIT | M_ZERO);
905	if (re->m_fifo == NULL) {
906		device_printf(sc->sc_dev, "%s: malloc failed\n",
907		    __func__);
908		return (-ENOMEM);
909	}
910
911	/*
912	 * Set initial "empty" state.
913	 */
914	re->m_rxpending = NULL;
915	re->m_fifo_head = re->m_fifo_tail = re->m_fifo_depth = 0;
916
917	return (0);
918}
919
920static int
921ath_edma_rxfifo_free(struct ath_softc *sc, HAL_RX_QUEUE qtype)
922{
923	struct ath_rx_edma *re = &sc->sc_rxedma[qtype];
924
925	device_printf(sc->sc_dev, "%s: called; qtype=%d\n",
926	    __func__,
927	    qtype);
928
929	free(re->m_fifo, M_ATHDEV);
930
931	return (0);
932}
933
934static int
935ath_edma_dma_rxsetup(struct ath_softc *sc)
936{
937	int error;
938
939	/*
940	 * Create RX DMA tag and buffers.
941	 */
942	error = ath_descdma_setup_rx_edma(sc, &sc->sc_rxdma, &sc->sc_rxbuf,
943	    "rx", ath_rxbuf, sc->sc_rx_statuslen);
944	if (error != 0)
945		return error;
946
947	ATH_RX_LOCK(sc);
948	(void) ath_edma_setup_rxfifo(sc, HAL_RX_QUEUE_HP);
949	(void) ath_edma_setup_rxfifo(sc, HAL_RX_QUEUE_LP);
950	ATH_RX_UNLOCK(sc);
951
952	return (0);
953}
954
955static int
956ath_edma_dma_rxteardown(struct ath_softc *sc)
957{
958
959	ATH_RX_LOCK(sc);
960	ath_edma_flush_deferred_queue(sc);
961	ath_edma_rxfifo_flush(sc, HAL_RX_QUEUE_HP);
962	ath_edma_rxfifo_free(sc, HAL_RX_QUEUE_HP);
963
964	ath_edma_rxfifo_flush(sc, HAL_RX_QUEUE_LP);
965	ath_edma_rxfifo_free(sc, HAL_RX_QUEUE_LP);
966	ATH_RX_UNLOCK(sc);
967
968	/* Free RX ath_buf */
969	/* Free RX DMA tag */
970	if (sc->sc_rxdma.dd_desc_len != 0)
971		ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
972
973	return (0);
974}
975
976void
977ath_recv_setup_edma(struct ath_softc *sc)
978{
979
980	/* Set buffer size to 4k */
981	sc->sc_edma_bufsize = 4096;
982
983	/* Fetch EDMA field and buffer sizes */
984	(void) ath_hal_getrxstatuslen(sc->sc_ah, &sc->sc_rx_statuslen);
985
986	/* Configure the hardware with the RX buffer size */
987	(void) ath_hal_setrxbufsize(sc->sc_ah, sc->sc_edma_bufsize -
988	    sc->sc_rx_statuslen);
989
990	if (bootverbose) {
991		device_printf(sc->sc_dev, "RX status length: %d\n",
992		    sc->sc_rx_statuslen);
993		device_printf(sc->sc_dev, "RX buffer size: %d\n",
994		    sc->sc_edma_bufsize);
995	}
996
997	sc->sc_rx.recv_stop = ath_edma_stoprecv;
998	sc->sc_rx.recv_start = ath_edma_startrecv;
999	sc->sc_rx.recv_flush = ath_edma_recv_flush;
1000	sc->sc_rx.recv_tasklet = ath_edma_recv_tasklet;
1001	sc->sc_rx.recv_rxbuf_init = ath_edma_rxbuf_init;
1002
1003	sc->sc_rx.recv_setup = ath_edma_dma_rxsetup;
1004	sc->sc_rx.recv_teardown = ath_edma_dma_rxteardown;
1005
1006	sc->sc_rx.recv_sched = ath_edma_recv_sched;
1007	sc->sc_rx.recv_sched_queue = ath_edma_recv_sched_queue;
1008}
1009