1/*	$OpenBSD: if_rtwn.c,v 1.6 2015/08/28 00:03:53 deraadt Exp $	*/
2
3/*-
4 * Copyright (c) 2010 Damien Bergamini <damien.bergamini@free.fr>
5 * Copyright (c) 2015 Stefan Sperling <stsp@openbsd.org>
6 * Copyright (c) 2016 Andriy Voskoboinyk <avos@FreeBSD.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21#include <sys/cdefs.h>
22__FBSDID("$FreeBSD$");
23
24#include "opt_wlan.h"
25
26#include <sys/param.h>
27#include <sys/lock.h>
28#include <sys/mutex.h>
29#include <sys/mbuf.h>
30#include <sys/kernel.h>
31#include <sys/socket.h>
32#include <sys/systm.h>
33#include <sys/malloc.h>
34#include <sys/queue.h>
35#include <sys/taskqueue.h>
36#include <sys/bus.h>
37#include <sys/endian.h>
38#include <sys/epoch.h>
39
40#include <machine/bus.h>
41#include <machine/resource.h>
42#include <sys/rman.h>
43
44#include <net/if.h>
45#include <net/ethernet.h>
46#include <net/if_media.h>
47
48#include <net80211/ieee80211_var.h>
49
50#include <dev/rtwn/if_rtwnreg.h>
51#include <dev/rtwn/if_rtwnvar.h>
52#include <dev/rtwn/if_rtwn_debug.h>
53#include <dev/rtwn/if_rtwn_rx.h>
54#include <dev/rtwn/if_rtwn_task.h>
55#include <dev/rtwn/if_rtwn_tx.h>
56
57#include <dev/rtwn/pci/rtwn_pci_var.h>
58#include <dev/rtwn/pci/rtwn_pci_rx.h>
59
60void
61rtwn_pci_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs,
62    int error)
63{
64
65	if (error != 0)
66		return;
67	KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
68	*(bus_addr_t *)arg = segs[0].ds_addr;
69}
70
71void
72rtwn_pci_setup_rx_desc(struct rtwn_pci_softc *pc,
73    struct rtwn_rx_stat_pci *desc, bus_addr_t addr, size_t len, int idx)
74{
75
76	memset(desc, 0, sizeof(*desc));
77	desc->rxdw0 = htole32(SM(RTWN_RXDW0_PKTLEN, len) |
78		((idx == RTWN_PCI_RX_LIST_COUNT - 1) ? RTWN_RXDW0_EOR : 0));
79	desc->rxbufaddr = htole32(addr);
80	bus_space_barrier(pc->pc_st, pc->pc_sh, 0, pc->pc_mapsize,
81	    BUS_SPACE_BARRIER_WRITE);
82	desc->rxdw0 |= htole32(RTWN_RXDW0_OWN);
83}
84
85static void
86rtwn_pci_rx_frame(struct rtwn_pci_softc *pc)
87{
88	struct epoch_tracker et;
89	struct rtwn_softc *sc = &pc->pc_sc;
90	struct rtwn_rx_ring *ring = &pc->rx_ring;
91	struct rtwn_rx_stat_pci *rx_desc = &ring->desc[ring->cur];
92	struct rtwn_rx_data *rx_data = &ring->rx_data[ring->cur];
93	struct ieee80211com *ic = &sc->sc_ic;
94	struct ieee80211_node *ni;
95	uint32_t rxdw0;
96	struct mbuf *m, *m1;
97	int infosz, pktlen, shift, error;
98
99	/* Dump Rx descriptor. */
100	RTWN_DPRINTF(sc, RTWN_DEBUG_RECV_DESC,
101	    "%s: dw: 0 %08X, 1 %08X, 2 %08X, 3 %08X, 4 %08X, tsfl %08X, "
102	    "addr: %08X (64: %08X)\n",
103	    __func__, le32toh(rx_desc->rxdw0), le32toh(rx_desc->rxdw1),
104	    le32toh(rx_desc->rxdw2), le32toh(rx_desc->rxdw3),
105	    le32toh(rx_desc->rxdw4), le32toh(rx_desc->tsf_low),
106	    le32toh(rx_desc->rxbufaddr), le32toh(rx_desc->rxbufaddr64));
107
108	rxdw0 = le32toh(rx_desc->rxdw0);
109	if (__predict_false(rxdw0 & (RTWN_RXDW0_CRCERR | RTWN_RXDW0_ICVERR))) {
110		/*
111		 * This should not happen since we setup our Rx filter
112		 * to not receive these frames.
113		 */
114		RTWN_DPRINTF(sc, RTWN_DEBUG_RECV,
115		    "%s: RX flags error (%s)\n", __func__,
116		    rxdw0 & RTWN_RXDW0_CRCERR ? "CRC" : "ICV");
117		goto fail;
118	}
119
120	pktlen = MS(rxdw0, RTWN_RXDW0_PKTLEN);
121	if (__predict_false(pktlen < sizeof(struct ieee80211_frame_ack) ||
122	    pktlen > MJUMPAGESIZE)) {
123		RTWN_DPRINTF(sc, RTWN_DEBUG_RECV,
124		    "%s: frame is too short/long: %d\n", __func__, pktlen);
125		goto fail;
126	}
127
128	infosz = MS(rxdw0, RTWN_RXDW0_INFOSZ) * 8;
129	shift = MS(rxdw0, RTWN_RXDW0_SHIFT);
130
131	m1 = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE);
132	if (__predict_false(m1 == NULL)) {
133		device_printf(sc->sc_dev, "%s: could not allocate RX mbuf\n",
134		    __func__);
135		goto fail;
136	}
137	bus_dmamap_sync(ring->data_dmat, rx_data->map, BUS_DMASYNC_POSTREAD);
138	bus_dmamap_unload(ring->data_dmat, rx_data->map);
139
140	error = bus_dmamap_load(ring->data_dmat, rx_data->map, mtod(m1, void *),
141	    MJUMPAGESIZE, rtwn_pci_dma_map_addr, &rx_data->paddr, 0);
142	if (error != 0) {
143		m_freem(m1);
144
145		error = bus_dmamap_load(ring->data_dmat, rx_data->map,
146		    mtod(rx_data->m, void *), MJUMPAGESIZE,
147		    rtwn_pci_dma_map_addr, &rx_data->paddr, BUS_DMA_NOWAIT);
148		if (error != 0)
149			panic("%s: could not load old RX mbuf",
150			    device_get_name(sc->sc_dev));
151
152		goto fail;
153	}
154
155	/* Finalize mbuf. */
156	m = rx_data->m;
157	rx_data->m = m1;
158	m->m_pkthdr.len = m->m_len = pktlen + infosz + shift;
159
160	ni = rtwn_rx_common(sc, m, rx_desc);
161
162	RTWN_DPRINTF(sc, RTWN_DEBUG_RECV,
163	    "%s: Rx frame len %d, infosz %d, shift %d\n",
164	    __func__, pktlen, infosz, shift);
165
166	/* Send the frame to the 802.11 layer. */
167	RTWN_UNLOCK(sc);
168
169	NET_EPOCH_ENTER(et);
170	if (ni != NULL) {
171		(void)ieee80211_input_mimo(ni, m);
172		/* Node is no longer needed. */
173		ieee80211_free_node(ni);
174	} else
175		(void)ieee80211_input_mimo_all(ic, m);
176	NET_EPOCH_EXIT(et);
177
178	RTWN_LOCK(sc);
179
180	return;
181
182fail:
183	counter_u64_add(ic->ic_ierrors, 1);
184}
185
186static int
187rtwn_pci_rx_buf_copy(struct rtwn_pci_softc *pc)
188{
189	struct rtwn_rx_ring *ring = &pc->rx_ring;
190	struct rtwn_rx_stat_pci *rx_desc = &ring->desc[ring->cur];
191	struct rtwn_rx_data *rx_data = &ring->rx_data[ring->cur];
192	uint32_t rxdw0;
193	int desc_size, pktlen;
194
195	/*
196	 * NB: tx_report() / c2h_report() expects to see USB Rx
197	 * descriptor - same as for PCIe, but without rxbufaddr* fields.
198	 */
199	desc_size = sizeof(struct rtwn_rx_stat_common);
200	KASSERT(sizeof(pc->pc_rx_buf) >= desc_size,
201	    ("adjust size for PCIe Rx buffer!"));
202
203	memcpy(pc->pc_rx_buf, rx_desc, desc_size);
204
205	rxdw0 = le32toh(rx_desc->rxdw0);
206	pktlen = MS(rxdw0, RTWN_RXDW0_PKTLEN);
207
208	if (pktlen > sizeof(pc->pc_rx_buf) - desc_size)
209	{
210		/* Looks like an ordinary Rx frame. */
211		return (desc_size);
212	}
213
214	bus_dmamap_sync(ring->data_dmat, rx_data->map, BUS_DMASYNC_POSTREAD);
215	memcpy(pc->pc_rx_buf + desc_size, mtod(rx_data->m, void *), pktlen);
216
217	return (desc_size + pktlen);
218}
219
220static void
221rtwn_pci_tx_report(struct rtwn_pci_softc *pc, int len)
222{
223	struct rtwn_softc *sc = &pc->pc_sc;
224
225	if (sc->sc_ratectl != RTWN_RATECTL_NET80211) {
226		/* shouldn't happen */
227		device_printf(sc->sc_dev,
228		    "%s called while ratectl = %d!\n",
229		     __func__, sc->sc_ratectl);
230		return;
231	}
232
233	RTWN_NT_LOCK(sc);
234	rtwn_handle_tx_report(sc, pc->pc_rx_buf, len);
235	RTWN_NT_UNLOCK(sc);
236
237#ifdef IEEE80211_SUPPORT_SUPERG
238	/*
239	 * NB: this will executed only when 'report' bit is set.
240	 */
241	if (sc->sc_tx_n_active > 0 && --sc->sc_tx_n_active <= 1)
242		rtwn_cmd_sleepable(sc, NULL, 0, rtwn_ff_flush_all);
243#endif
244}
245
246static void
247rtwn_pci_c2h_report(struct rtwn_pci_softc *pc, int len)
248{
249	rtwn_handle_c2h_report(&pc->pc_sc, pc->pc_rx_buf, len);
250}
251
252static void
253rtwn_pci_tx_done(struct rtwn_softc *sc, int qid)
254{
255	struct rtwn_pci_softc *pc = RTWN_PCI_SOFTC(sc);
256	struct rtwn_tx_ring *ring = &pc->tx_ring[qid];
257	struct rtwn_tx_desc_common *desc;
258	struct rtwn_tx_data *data;
259
260	RTWN_DPRINTF(sc, RTWN_DEBUG_INTR, "%s: qid %d, last %d, cur %d\n",
261	    __func__, qid, ring->last, ring->cur);
262
263	bus_dmamap_sync(ring->desc_dmat, ring->desc_map,
264	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
265
266	while(ring->last != ring->cur) {
267		data = &ring->tx_data[ring->last];
268		desc = (struct rtwn_tx_desc_common *)
269		    ((uint8_t *)ring->desc + sc->txdesc_len * ring->last);
270
271		KASSERT(data->m != NULL, ("no mbuf"));
272
273		if (desc->flags0 & RTWN_FLAGS0_OWN)
274			break;
275
276		/* Unmap and free mbuf. */
277		bus_dmamap_sync(ring->data_dmat, data->map,
278		    BUS_DMASYNC_POSTWRITE);
279		bus_dmamap_unload(ring->data_dmat, data->map);
280
281		if (data->ni != NULL) {	/* not a beacon frame */
282			ieee80211_tx_complete(data->ni, data->m, 0);
283
284			data->ni = NULL;
285			ring->queued--;
286			KASSERT(ring->queued >= 0,
287			    ("ring->queued (qid %d) underflow!\n", qid));
288		} else
289			m_freem(data->m);
290
291		data->m = NULL;
292		ring->last = (ring->last + 1) % RTWN_PCI_TX_LIST_COUNT;
293#ifndef D4054
294		if (ring->queued > 0)
295			sc->sc_tx_timer = 5;
296		else
297			sc->sc_tx_timer = 0;
298#endif
299	}
300
301	if ((sc->qfullmsk & (1 << qid)) != 0 &&
302	    ring->queued < (RTWN_PCI_TX_LIST_COUNT - 1)) {
303		sc->qfullmsk &= ~(1 << qid);
304		rtwn_start(sc);
305	}
306
307#ifdef  IEEE80211_SUPPORT_SUPERG
308	/*
309	 * If the TX active queue drops below a certain
310	 * threshold, ensure we age fast-frames out so they're
311	 * transmitted.
312	 */
313	if (sc->sc_ratectl != RTWN_RATECTL_NET80211 && ring->queued <= 1) {
314		/*
315		 * XXX TODO: just make this a callout timer schedule
316		 * so we can flush the FF staging queue if we're
317		 * approaching idle.
318		 */
319		rtwn_cmd_sleepable(sc, NULL, 0, rtwn_ff_flush_all);
320	}
321#endif
322}
323
324static void
325rtwn_pci_rx_done(struct rtwn_softc *sc)
326{
327	struct rtwn_pci_softc *pc = RTWN_PCI_SOFTC(sc);
328	struct rtwn_rx_ring *ring = &pc->rx_ring;
329	struct rtwn_rx_stat_pci *rx_desc;
330	struct rtwn_rx_data *rx_data;
331	int len;
332
333	bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_POSTREAD);
334
335	for (;;) {
336		rx_desc = &ring->desc[ring->cur];
337		rx_data = &ring->rx_data[ring->cur];
338
339		if (le32toh(rx_desc->rxdw0) & RTWN_RXDW0_OWN)
340			break;
341
342		len = rtwn_pci_rx_buf_copy(pc);
343
344		switch (rtwn_classify_intr(sc, pc->pc_rx_buf, len)) {
345		case RTWN_RX_DATA:
346			rtwn_pci_rx_frame(pc);
347			break;
348		case RTWN_RX_TX_REPORT:
349			rtwn_pci_tx_report(pc, len);
350			break;
351		case RTWN_RX_OTHER:
352			rtwn_pci_c2h_report(pc, len);
353			break;
354		default:
355			/* NOTREACHED */
356			KASSERT(0, ("unknown Rx classification code"));
357			break;
358		}
359
360		/* Update / reset RX descriptor (and set OWN bit). */
361		rtwn_pci_setup_rx_desc(pc, rx_desc, rx_data->paddr,
362		    MJUMPAGESIZE, ring->cur);
363
364		if (!(sc->sc_flags & RTWN_RUNNING))
365			return;
366
367		/* NB: device can reuse current descriptor. */
368		bus_dmamap_sync(ring->desc_dmat, ring->desc_map,
369		    BUS_DMASYNC_POSTREAD);
370
371		if (le32toh(rx_desc->rxdw0) & RTWN_RXDW0_OWN)
372			ring->cur = (ring->cur + 1) % RTWN_PCI_RX_LIST_COUNT;
373	}
374}
375
376void
377rtwn_pci_intr(void *arg)
378{
379	struct rtwn_softc *sc = arg;
380	struct rtwn_pci_softc *pc = RTWN_PCI_SOFTC(sc);
381	int i, status, tx_rings;
382
383	RTWN_LOCK(sc);
384#ifndef __HAIKU__
385	status = rtwn_pci_get_intr_status(pc, &tx_rings);
386	RTWN_DPRINTF(sc, RTWN_DEBUG_INTR, "%s: status %08X, tx_rings %08X\n",
387	    __func__, status, tx_rings);
388	if (status == 0 && tx_rings == 0)
389		goto unlock;
390#else
391	status = atomic_get(&pc->pc_intr_status);
392	tx_rings = atomic_get(&pc->pc_intr_tx_rings);
393#endif
394
395	if (status & (RTWN_PCI_INTR_RX | RTWN_PCI_INTR_TX_REPORT)) {
396		rtwn_pci_rx_done(sc);
397		if (!(sc->sc_flags & RTWN_RUNNING))
398			goto unlock;
399	}
400
401	if (tx_rings != 0)
402		for (i = 0; i < RTWN_PCI_NTXQUEUES; i++)
403			if (tx_rings & (1 << i))
404				rtwn_pci_tx_done(sc, i);
405
406	if (sc->sc_flags & RTWN_RUNNING)
407		rtwn_pci_enable_intr(pc);
408unlock:
409	RTWN_UNLOCK(sc);
410}
411