1/*	$OpenBSD: if_urtwn.c,v 1.16 2011/02/10 17:26:40 jakemsr Exp $	*/
2
3/*-
4 * Copyright (c) 2010 Damien Bergamini <damien.bergamini@free.fr>
5 * Copyright (c) 2014 Kevin Lo <kevlo@FreeBSD.org>
6 * Copyright (c) 2016 Andriy Voskoboinyk <avos@FreeBSD.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21#include <sys/param.h>
22#include <sys/sysctl.h>
23#include <sys/lock.h>
24#include <sys/mutex.h>
25#include <sys/mbuf.h>
26#include <sys/kernel.h>
27#include <sys/socket.h>
28#include <sys/systm.h>
29#include <sys/malloc.h>
30#include <sys/module.h>
31#include <sys/bus.h>
32#include <sys/endian.h>
33#include <sys/linker.h>
34#include <sys/kdb.h>
35
36#include <machine/bus.h>
37#include <machine/resource.h>
38#include <sys/rman.h>
39
40#include <dev/pci/pcireg.h>
41#include <dev/pci/pcivar.h>
42
43#include <net/if.h>
44#include <net/ethernet.h>
45#include <net/if_media.h>
46
47#include <net80211/ieee80211_var.h>
48
49#include <dev/rtwn/if_rtwnreg.h>
50#include <dev/rtwn/if_rtwnvar.h>
51#include <dev/rtwn/if_rtwn_nop.h>
52#include <dev/rtwn/if_rtwn_debug.h>
53
54#include <dev/rtwn/pci/rtwn_pci_var.h>
55
56#include <dev/rtwn/pci/rtwn_pci_attach.h>
57#include <dev/rtwn/pci/rtwn_pci_reg.h>
58#include <dev/rtwn/pci/rtwn_pci_rx.h>
59#include <dev/rtwn/pci/rtwn_pci_tx.h>
60
61#include <dev/rtwn/rtl8192c/pci/r92ce_reg.h>
62
63static device_probe_t	rtwn_pci_probe;
64static device_attach_t	rtwn_pci_attach;
65static device_detach_t	rtwn_pci_detach;
66static device_shutdown_t rtwn_pci_shutdown;
67static device_suspend_t	rtwn_pci_suspend;
68static device_resume_t	rtwn_pci_resume;
69
70static int	rtwn_pci_alloc_rx_list(struct rtwn_softc *);
71static void	rtwn_pci_reset_rx_list(struct rtwn_softc *);
72static void	rtwn_pci_free_rx_list(struct rtwn_softc *);
73static int	rtwn_pci_alloc_tx_list(struct rtwn_softc *, int);
74static void	rtwn_pci_reset_tx_ring_stopped(struct rtwn_softc *, int);
75static void	rtwn_pci_reset_beacon_ring(struct rtwn_softc *, int);
76static void	rtwn_pci_reset_tx_list(struct rtwn_softc *,
77		    struct ieee80211vap *, int);
78static void	rtwn_pci_free_tx_list(struct rtwn_softc *, int);
79static void	rtwn_pci_reset_lists(struct rtwn_softc *,
80		    struct ieee80211vap *);
81static int	rtwn_pci_fw_write_block(struct rtwn_softc *,
82		    const uint8_t *, uint16_t, int);
83static uint16_t	rtwn_pci_get_qmap(struct rtwn_softc *);
84static void	rtwn_pci_set_desc_addr(struct rtwn_softc *);
85static void	rtwn_pci_beacon_update_begin(struct rtwn_softc *,
86		    struct ieee80211vap *);
87static void	rtwn_pci_beacon_update_end(struct rtwn_softc *,
88		    struct ieee80211vap *);
89static void	rtwn_pci_attach_methods(struct rtwn_softc *);
90
91static const struct rtwn_pci_ident *
92rtwn_pci_probe_sub(device_t dev)
93{
94	int i, vendor_id, device_id;
95
96	vendor_id = pci_get_vendor(dev);
97	device_id = pci_get_device(dev);
98
99	for (i = 0; i < nitems(rtwn_pci_ident_table); i++) {
100		if (vendor_id == rtwn_pci_ident_table[i].vendor &&
101		    device_id == rtwn_pci_ident_table[i].device)
102			return (&rtwn_pci_ident_table[i]);
103	}
104
105	return (NULL);
106}
107
108static int
109rtwn_pci_probe(device_t dev)
110{
111	const struct rtwn_pci_ident *ident;
112
113	ident = rtwn_pci_probe_sub(dev);
114	if (ident != NULL) {
115		device_set_desc(dev, ident->name);
116		return (BUS_PROBE_DEFAULT);
117	}
118	return (ENXIO);
119}
120
121static int
122rtwn_pci_alloc_rx_list(struct rtwn_softc *sc)
123{
124	struct rtwn_pci_softc *pc = RTWN_PCI_SOFTC(sc);
125	struct rtwn_rx_ring *rx_ring = &pc->rx_ring;
126	struct rtwn_rx_data *rx_data;
127	bus_size_t size;
128	int i, error;
129
130	/* Allocate Rx descriptors. */
131	size = sizeof(struct rtwn_rx_stat_pci) * RTWN_PCI_RX_LIST_COUNT;
132	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
133	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
134	    size, 1, size, 0, NULL, NULL, &rx_ring->desc_dmat);
135	if (error != 0) {
136		device_printf(sc->sc_dev, "could not create rx desc DMA tag\n");
137		goto fail;
138	}
139
140	error = bus_dmamem_alloc(rx_ring->desc_dmat, (void **)&rx_ring->desc,
141	    BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
142	    &rx_ring->desc_map);
143	if (error != 0) {
144		device_printf(sc->sc_dev, "could not allocate rx desc\n");
145		goto fail;
146	}
147	error = bus_dmamap_load(rx_ring->desc_dmat, rx_ring->desc_map,
148	    rx_ring->desc, size, rtwn_pci_dma_map_addr, &rx_ring->paddr, 0);
149	if (error != 0) {
150		device_printf(sc->sc_dev, "could not load rx desc DMA map\n");
151		goto fail;
152	}
153	bus_dmamap_sync(rx_ring->desc_dmat, rx_ring->desc_map,
154	    BUS_DMASYNC_PREWRITE);
155
156	/* Create RX buffer DMA tag. */
157	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
158	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
159	    MJUMPAGESIZE, 1, MJUMPAGESIZE, 0, NULL, NULL, &rx_ring->data_dmat);
160	if (error != 0) {
161		device_printf(sc->sc_dev, "could not create rx buf DMA tag\n");
162		goto fail;
163	}
164
165	/* Allocate Rx buffers. */
166	for (i = 0; i < RTWN_PCI_RX_LIST_COUNT; i++) {
167		rx_data = &rx_ring->rx_data[i];
168		error = bus_dmamap_create(rx_ring->data_dmat, 0, &rx_data->map);
169		if (error != 0) {
170			device_printf(sc->sc_dev,
171			    "could not create rx buf DMA map\n");
172			goto fail;
173		}
174
175		rx_data->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
176		    MJUMPAGESIZE);
177		if (rx_data->m == NULL) {
178			device_printf(sc->sc_dev,
179			    "could not allocate rx mbuf\n");
180			error = ENOMEM;
181			goto fail;
182		}
183
184		error = bus_dmamap_load(rx_ring->data_dmat, rx_data->map,
185		    mtod(rx_data->m, void *), MJUMPAGESIZE,
186		    rtwn_pci_dma_map_addr, &rx_data->paddr, BUS_DMA_NOWAIT);
187		if (error != 0) {
188			device_printf(sc->sc_dev,
189			    "could not load rx buf DMA map");
190			goto fail;
191		}
192
193		rtwn_pci_setup_rx_desc(pc, &rx_ring->desc[i], rx_data->paddr,
194		    MJUMPAGESIZE, i);
195	}
196	rx_ring->cur = 0;
197
198	return (0);
199
200fail:
201	rtwn_pci_free_rx_list(sc);
202	return (error);
203}
204
205static void
206rtwn_pci_reset_rx_list(struct rtwn_softc *sc)
207{
208	struct rtwn_pci_softc *pc = RTWN_PCI_SOFTC(sc);
209	struct rtwn_rx_ring *rx_ring = &pc->rx_ring;
210	struct rtwn_rx_data *rx_data;
211	int i;
212
213	for (i = 0; i < RTWN_PCI_RX_LIST_COUNT; i++) {
214		rx_data = &rx_ring->rx_data[i];
215		rtwn_pci_setup_rx_desc(pc, &rx_ring->desc[i],
216		    rx_data->paddr, MJUMPAGESIZE, i);
217	}
218	rx_ring->cur = 0;
219}
220
221static void
222rtwn_pci_free_rx_list(struct rtwn_softc *sc)
223{
224	struct rtwn_pci_softc *pc = RTWN_PCI_SOFTC(sc);
225	struct rtwn_rx_ring *rx_ring = &pc->rx_ring;
226	struct rtwn_rx_data *rx_data;
227	int i;
228
229	if (rx_ring->desc_dmat != NULL) {
230		if (rx_ring->desc != NULL) {
231			bus_dmamap_sync(rx_ring->desc_dmat,
232			    rx_ring->desc_map,
233			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
234			bus_dmamap_unload(rx_ring->desc_dmat,
235			    rx_ring->desc_map);
236			bus_dmamem_free(rx_ring->desc_dmat, rx_ring->desc,
237			    rx_ring->desc_map);
238			rx_ring->desc = NULL;
239		}
240		bus_dma_tag_destroy(rx_ring->desc_dmat);
241		rx_ring->desc_dmat = NULL;
242	}
243
244	for (i = 0; i < RTWN_PCI_RX_LIST_COUNT; i++) {
245		rx_data = &rx_ring->rx_data[i];
246
247		if (rx_data->m != NULL) {
248			bus_dmamap_sync(rx_ring->data_dmat,
249			    rx_data->map, BUS_DMASYNC_POSTREAD);
250			bus_dmamap_unload(rx_ring->data_dmat, rx_data->map);
251			m_freem(rx_data->m);
252			rx_data->m = NULL;
253		}
254		bus_dmamap_destroy(rx_ring->data_dmat, rx_data->map);
255		rx_data->map = NULL;
256	}
257	if (rx_ring->data_dmat != NULL) {
258		bus_dma_tag_destroy(rx_ring->data_dmat);
259		rx_ring->data_dmat = NULL;
260	}
261}
262
263static int
264rtwn_pci_alloc_tx_list(struct rtwn_softc *sc, int qid)
265{
266	struct rtwn_pci_softc *pc = RTWN_PCI_SOFTC(sc);
267	struct rtwn_tx_ring *tx_ring = &pc->tx_ring[qid];
268	bus_size_t size;
269	int i, error;
270
271	size = sc->txdesc_len * RTWN_PCI_TX_LIST_COUNT;
272	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), PAGE_SIZE, 0,
273	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
274	    size, 1, size, 0, NULL, NULL, &tx_ring->desc_dmat);
275	if (error != 0) {
276		device_printf(sc->sc_dev, "could not create tx ring DMA tag\n");
277		goto fail;
278	}
279
280	error = bus_dmamem_alloc(tx_ring->desc_dmat, &tx_ring->desc,
281	    BUS_DMA_NOWAIT | BUS_DMA_ZERO, &tx_ring->desc_map);
282	if (error != 0) {
283		device_printf(sc->sc_dev, "can't map tx ring DMA memory\n");
284		goto fail;
285	}
286	error = bus_dmamap_load(tx_ring->desc_dmat, tx_ring->desc_map,
287	    tx_ring->desc, size, rtwn_pci_dma_map_addr, &tx_ring->paddr,
288	    BUS_DMA_NOWAIT);
289	if (error != 0) {
290		device_printf(sc->sc_dev, "could not load desc DMA map\n");
291		goto fail;
292	}
293	bus_dmamap_sync(tx_ring->desc_dmat, tx_ring->desc_map,
294	    BUS_DMASYNC_PREWRITE);
295
296	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
297	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
298	    MJUMPAGESIZE, 1, MJUMPAGESIZE, 0, NULL, NULL, &tx_ring->data_dmat);
299	if (error != 0) {
300		device_printf(sc->sc_dev, "could not create tx buf DMA tag\n");
301		goto fail;
302	}
303
304	for (i = 0; i < RTWN_PCI_TX_LIST_COUNT; i++) {
305		struct rtwn_tx_data *tx_data = &tx_ring->tx_data[i];
306		void *tx_desc = (uint8_t *)tx_ring->desc + sc->txdesc_len * i;
307		uint32_t next_desc_addr = tx_ring->paddr +
308		    sc->txdesc_len * ((i + 1) % RTWN_PCI_TX_LIST_COUNT);
309
310		rtwn_pci_setup_tx_desc(pc, tx_desc, next_desc_addr);
311
312		error = bus_dmamap_create(tx_ring->data_dmat, 0, &tx_data->map);
313		if (error != 0) {
314			device_printf(sc->sc_dev,
315			    "could not create tx buf DMA map\n");
316			return (error);
317		}
318		tx_data->m = NULL;
319		tx_data->ni = NULL;
320	}
321	return (0);
322
323fail:
324	rtwn_pci_free_tx_list(sc, qid);
325	return (error);
326}
327
328static void
329rtwn_pci_reset_tx_ring_stopped(struct rtwn_softc *sc, int qid)
330{
331	struct rtwn_pci_softc *pc = RTWN_PCI_SOFTC(sc);
332	struct rtwn_tx_ring *ring = &pc->tx_ring[qid];
333	int i;
334
335	for (i = 0; i < RTWN_PCI_TX_LIST_COUNT; i++) {
336		struct rtwn_tx_data *data = &ring->tx_data[i];
337		void *desc = (uint8_t *)ring->desc + sc->txdesc_len * i;
338
339		rtwn_pci_copy_tx_desc(pc, desc, NULL);
340
341		if (data->m != NULL) {
342			bus_dmamap_sync(ring->data_dmat, data->map,
343			    BUS_DMASYNC_POSTWRITE);
344			bus_dmamap_unload(ring->data_dmat, data->map);
345			m_freem(data->m);
346			data->m = NULL;
347		}
348		if (data->ni != NULL) {
349			ieee80211_free_node(data->ni);
350			data->ni = NULL;
351		}
352	}
353
354	bus_dmamap_sync(ring->desc_dmat, ring->desc_map,
355	    BUS_DMASYNC_POSTWRITE);
356
357	sc->qfullmsk &= ~(1 << qid);
358	ring->queued = 0;
359	ring->last = ring->cur = 0;
360}
361
362/*
363 * Clear entry 0 (or 1) in the beacon queue (other are not used).
364 */
365static void
366rtwn_pci_reset_beacon_ring(struct rtwn_softc *sc, int id)
367{
368	struct rtwn_pci_softc *pc = RTWN_PCI_SOFTC(sc);
369	struct rtwn_tx_ring *ring = &pc->tx_ring[RTWN_PCI_BEACON_QUEUE];
370	struct rtwn_tx_data *data = &ring->tx_data[id];
371	struct rtwn_tx_desc_common *txd = (struct rtwn_tx_desc_common *)
372	    ((uint8_t *)ring->desc + id * sc->txdesc_len);
373
374	bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_POSTREAD);
375	if (txd->flags0 & RTWN_FLAGS0_OWN) {
376		/* Clear OWN bit. */
377		txd->flags0 &= ~RTWN_FLAGS0_OWN;
378		bus_dmamap_sync(ring->desc_dmat, ring->desc_map,
379		    BUS_DMASYNC_PREWRITE);
380
381		/* Unload mbuf. */
382		bus_dmamap_sync(ring->data_dmat, data->map,
383		    BUS_DMASYNC_POSTWRITE);
384		bus_dmamap_unload(ring->data_dmat, data->map);
385	}
386}
387
388/*
389 * Drop stale entries from Tx ring before the vap will be deleted.
390 * In case if vap is NULL just free everything and reset cur / last pointers.
391 */
392static void
393rtwn_pci_reset_tx_list(struct rtwn_softc *sc, struct ieee80211vap *vap,
394    int qid)
395{
396	int i;
397
398	if (vap == NULL) {
399		if (qid != RTWN_PCI_BEACON_QUEUE) {
400			/*
401			 * Device was stopped; just clear all entries.
402			 */
403			rtwn_pci_reset_tx_ring_stopped(sc, qid);
404		} else {
405			for (i = 0; i < RTWN_PORT_COUNT; i++)
406				rtwn_pci_reset_beacon_ring(sc, i);
407		}
408	} else if (qid == RTWN_PCI_BEACON_QUEUE &&
409		   (vap->iv_opmode == IEEE80211_M_HOSTAP ||
410		    vap->iv_opmode == IEEE80211_M_IBSS)) {
411		struct rtwn_vap *uvp = RTWN_VAP(vap);
412
413		rtwn_pci_reset_beacon_ring(sc, uvp->id);
414	} else {
415		struct rtwn_pci_softc *pc = RTWN_PCI_SOFTC(sc);
416		struct rtwn_tx_ring *ring = &pc->tx_ring[qid];
417
418		for (i = 0; i < RTWN_PCI_TX_LIST_COUNT; i++) {
419			struct rtwn_tx_data *data = &ring->tx_data[i];
420			if (data->ni != NULL && data->ni->ni_vap == vap) {
421				/*
422				 * NB: if some vap is still running
423				 * rtwn_pci_tx_done() will free the mbuf;
424				 * otherwise, rtwn_stop() will reset all rings
425				 * after device shutdown.
426				 */
427				ieee80211_free_node(data->ni);
428				data->ni = NULL;
429			}
430		}
431	}
432}
433
434static void
435rtwn_pci_free_tx_list(struct rtwn_softc *sc, int qid)
436{
437	struct rtwn_pci_softc *pc = RTWN_PCI_SOFTC(sc);
438	struct rtwn_tx_ring *tx_ring = &pc->tx_ring[qid];
439	struct rtwn_tx_data *tx_data;
440	int i;
441
442	if (tx_ring->desc_dmat != NULL) {
443		if (tx_ring->desc != NULL) {
444			bus_dmamap_sync(tx_ring->desc_dmat,
445			    tx_ring->desc_map, BUS_DMASYNC_POSTWRITE);
446			bus_dmamap_unload(tx_ring->desc_dmat,
447			    tx_ring->desc_map);
448			bus_dmamem_free(tx_ring->desc_dmat, tx_ring->desc,
449			    tx_ring->desc_map);
450		}
451		bus_dma_tag_destroy(tx_ring->desc_dmat);
452	}
453
454	for (i = 0; i < RTWN_PCI_TX_LIST_COUNT; i++) {
455		tx_data = &tx_ring->tx_data[i];
456
457		if (tx_data->m != NULL) {
458			bus_dmamap_sync(tx_ring->data_dmat, tx_data->map,
459			    BUS_DMASYNC_POSTWRITE);
460			bus_dmamap_unload(tx_ring->data_dmat, tx_data->map);
461			m_freem(tx_data->m);
462			tx_data->m = NULL;
463		}
464	}
465	if (tx_ring->data_dmat != NULL) {
466		bus_dma_tag_destroy(tx_ring->data_dmat);
467		tx_ring->data_dmat = NULL;
468	}
469
470	sc->qfullmsk &= ~(1 << qid);
471	tx_ring->queued = 0;
472	tx_ring->last = tx_ring->cur = 0;
473}
474
475static void
476rtwn_pci_reset_lists(struct rtwn_softc *sc, struct ieee80211vap *vap)
477{
478	int i;
479
480	for (i = 0; i < RTWN_PCI_NTXQUEUES; i++)
481		rtwn_pci_reset_tx_list(sc, vap, i);
482
483	if (vap == NULL) {
484		sc->qfullmsk = 0;
485		rtwn_pci_reset_rx_list(sc);
486	}
487}
488
489static int
490rtwn_pci_fw_write_block(struct rtwn_softc *sc, const uint8_t *buf,
491    uint16_t reg, int mlen)
492{
493	int i;
494
495	for (i = 0; i < mlen; i++)
496		rtwn_pci_write_1(sc, reg++, buf[i]);
497
498	/* NB: cannot fail */
499	return (0);
500}
501
502static uint16_t
503rtwn_pci_get_qmap(struct rtwn_softc *sc)
504{
505	struct rtwn_pci_softc *pc = RTWN_PCI_SOFTC(sc);
506
507	KASSERT(pc->pc_qmap != 0, ("%s: qmap is not set!\n", __func__));
508
509	return (pc->pc_qmap);
510}
511
512static void
513rtwn_pci_set_desc_addr(struct rtwn_softc *sc)
514{
515	struct rtwn_pci_softc *pc = RTWN_PCI_SOFTC(sc);
516
517	RTWN_DPRINTF(sc, RTWN_DEBUG_RESET, "%s: addresses:\n"
518	    "bk: %08jX, be: %08jX, vi: %08jX, vo: %08jX\n"
519	    "bcn: %08jX, mgt: %08jX, high: %08jX, rx: %08jX\n",
520	    __func__, (uintmax_t)pc->tx_ring[RTWN_PCI_BK_QUEUE].paddr,
521	    (uintmax_t)pc->tx_ring[RTWN_PCI_BE_QUEUE].paddr,
522	    (uintmax_t)pc->tx_ring[RTWN_PCI_VI_QUEUE].paddr,
523	    (uintmax_t)pc->tx_ring[RTWN_PCI_VO_QUEUE].paddr,
524	    (uintmax_t)pc->tx_ring[RTWN_PCI_BEACON_QUEUE].paddr,
525	    (uintmax_t)pc->tx_ring[RTWN_PCI_MGNT_QUEUE].paddr,
526	    (uintmax_t)pc->tx_ring[RTWN_PCI_HIGH_QUEUE].paddr,
527	    (uintmax_t)pc->rx_ring.paddr);
528
529	/* Set Tx Configuration Register. */
530	rtwn_pci_write_4(sc, R92C_TCR, pc->tcr);
531
532	/* Configure Tx DMA. */
533	rtwn_pci_write_4(sc, R92C_BKQ_DESA,
534	    pc->tx_ring[RTWN_PCI_BK_QUEUE].paddr);
535	rtwn_pci_write_4(sc, R92C_BEQ_DESA,
536	    pc->tx_ring[RTWN_PCI_BE_QUEUE].paddr);
537	rtwn_pci_write_4(sc, R92C_VIQ_DESA,
538	    pc->tx_ring[RTWN_PCI_VI_QUEUE].paddr);
539	rtwn_pci_write_4(sc, R92C_VOQ_DESA,
540	    pc->tx_ring[RTWN_PCI_VO_QUEUE].paddr);
541	rtwn_pci_write_4(sc, R92C_BCNQ_DESA,
542	    pc->tx_ring[RTWN_PCI_BEACON_QUEUE].paddr);
543	rtwn_pci_write_4(sc, R92C_MGQ_DESA,
544	    pc->tx_ring[RTWN_PCI_MGNT_QUEUE].paddr);
545	rtwn_pci_write_4(sc, R92C_HQ_DESA,
546	    pc->tx_ring[RTWN_PCI_HIGH_QUEUE].paddr);
547
548	/* Configure Rx DMA. */
549	rtwn_pci_write_4(sc, R92C_RX_DESA, pc->rx_ring.paddr);
550}
551
552static void
553rtwn_pci_beacon_update_begin(struct rtwn_softc *sc, struct ieee80211vap *vap)
554{
555	struct rtwn_vap *rvp = RTWN_VAP(vap);
556
557	RTWN_ASSERT_LOCKED(sc);
558
559	rtwn_beacon_enable(sc, rvp->id, 0);
560}
561
562static void
563rtwn_pci_beacon_update_end(struct rtwn_softc *sc, struct ieee80211vap *vap)
564{
565	struct rtwn_vap *rvp = RTWN_VAP(vap);
566
567	RTWN_ASSERT_LOCKED(sc);
568
569	if (rvp->curr_mode != R92C_MSR_NOLINK)
570		rtwn_beacon_enable(sc, rvp->id, 1);
571}
572
573static void
574rtwn_pci_attach_methods(struct rtwn_softc *sc)
575{
576	sc->sc_write_1		= rtwn_pci_write_1;
577	sc->sc_write_2		= rtwn_pci_write_2;
578	sc->sc_write_4		= rtwn_pci_write_4;
579	sc->sc_read_1		= rtwn_pci_read_1;
580	sc->sc_read_2		= rtwn_pci_read_2;
581	sc->sc_read_4		= rtwn_pci_read_4;
582	sc->sc_delay		= rtwn_pci_delay;
583	sc->sc_tx_start		= rtwn_pci_tx_start;
584	sc->sc_reset_lists	= rtwn_pci_reset_lists;
585	sc->sc_abort_xfers	= rtwn_nop_softc;
586	sc->sc_fw_write_block	= rtwn_pci_fw_write_block;
587	sc->sc_get_qmap		= rtwn_pci_get_qmap;
588	sc->sc_set_desc_addr	= rtwn_pci_set_desc_addr;
589	sc->sc_drop_incorrect_tx = rtwn_nop_softc;
590	sc->sc_beacon_update_begin = rtwn_pci_beacon_update_begin;
591	sc->sc_beacon_update_end = rtwn_pci_beacon_update_end;
592	sc->sc_beacon_unload	= rtwn_pci_reset_beacon_ring;
593
594	sc->bcn_check_interval	= 25000;
595}
596
597static int
598rtwn_pci_attach(device_t dev)
599{
600	const struct rtwn_pci_ident *ident;
601	struct rtwn_pci_softc *pc = device_get_softc(dev);
602	struct rtwn_softc *sc = &pc->pc_sc;
603	struct ieee80211com *ic = &sc->sc_ic;
604	uint32_t lcsr;
605	int cap_off, i, error, rid;
606
607	ident = rtwn_pci_probe_sub(dev);
608	if (ident == NULL)
609		return (ENXIO);
610
611	/*
612	 * Get the offset of the PCI Express Capability Structure in PCI
613	 * Configuration Space.
614	 */
615	error = pci_find_cap(dev, PCIY_EXPRESS, &cap_off);
616	if (error != 0) {
617		device_printf(dev, "PCIe capability structure not found!\n");
618		return (error);
619	}
620
621	/* Enable bus-mastering. */
622	pci_enable_busmaster(dev);
623
624	rid = PCIR_BAR(2);
625	pc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
626	    RF_ACTIVE);
627	if (pc->mem == NULL) {
628		device_printf(dev, "can't map mem space\n");
629		return (ENOMEM);
630	}
631	pc->pc_st = rman_get_bustag(pc->mem);
632	pc->pc_sh = rman_get_bushandle(pc->mem);
633
634	/* Install interrupt handler. */
635	rid = 1;
636	if (pci_alloc_msi(dev, &rid) == 0)
637		rid = 1;
638	else
639		rid = 0;
640	pc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
641	    (rid != 0 ? 0 : RF_SHAREABLE));
642	if (pc->irq == NULL) {
643		device_printf(dev, "can't map interrupt\n");
644		goto detach;
645	}
646
647	/* Disable PCIe Active State Power Management (ASPM). */
648	lcsr = pci_read_config(dev, cap_off + PCIER_LINK_CTL, 4);
649	lcsr &= ~PCIEM_LINK_CTL_ASPMC;
650	pci_write_config(dev, cap_off + PCIER_LINK_CTL, lcsr, 4);
651
652	sc->sc_dev = dev;
653	ic->ic_name = device_get_nameunit(dev);
654
655	/* Need to be initialized early. */
656	rtwn_sysctlattach(sc);
657	mtx_init(&sc->sc_mtx, ic->ic_name, MTX_NETWORK_LOCK, MTX_DEF);
658
659	rtwn_pci_attach_methods(sc);
660	rtwn_pci_attach_private(pc, ident->chip);
661
662	/* Allocate Tx/Rx buffers. */
663	error = rtwn_pci_alloc_rx_list(sc);
664	if (error != 0) {
665		device_printf(dev,
666		    "could not allocate Rx buffers, error %d\n",
667		    error);
668		goto detach;
669	}
670	for (i = 0; i < RTWN_PCI_NTXQUEUES; i++) {
671		error = rtwn_pci_alloc_tx_list(sc, i);
672		if (error != 0) {
673			device_printf(dev,
674			    "could not allocate Tx buffers, error %d\n",
675			    error);
676			goto detach;
677		}
678	}
679
680	/* Generic attach. */
681	error = rtwn_attach(sc);
682	if (error != 0)
683		goto detach;
684
685	/*
686	 * Hook our interrupt after all initialization is complete.
687	 */
688	error = bus_setup_intr(dev, pc->irq, INTR_TYPE_NET | INTR_MPSAFE,
689	    NULL, rtwn_pci_intr, sc, &pc->pc_ih);
690	if (error != 0) {
691		device_printf(dev, "can't establish interrupt, error %d\n",
692		    error);
693		goto detach;
694	}
695
696	return (0);
697
698detach:
699	rtwn_pci_detach(dev);		/* failure */
700	return (ENXIO);
701}
702
703static int
704rtwn_pci_detach(device_t dev)
705{
706	struct rtwn_pci_softc *pc = device_get_softc(dev);
707	struct rtwn_softc *sc = &pc->pc_sc;
708	int i;
709
710	/* Generic detach. */
711	rtwn_detach(sc);
712
713	/* Uninstall interrupt handler. */
714	if (pc->irq != NULL) {
715		bus_teardown_intr(dev, pc->irq, pc->pc_ih);
716		bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(pc->irq),
717		    pc->irq);
718		pci_release_msi(dev);
719	}
720
721	/* Free Tx/Rx buffers. */
722	for (i = 0; i < RTWN_PCI_NTXQUEUES; i++)
723		rtwn_pci_free_tx_list(sc, i);
724	rtwn_pci_free_rx_list(sc);
725
726	if (pc->mem != NULL)
727		bus_release_resource(dev, SYS_RES_MEMORY,
728		    rman_get_rid(pc->mem), pc->mem);
729
730	rtwn_detach_private(sc);
731	mtx_destroy(&sc->sc_mtx);
732
733	return (0);
734}
735
736static int
737rtwn_pci_shutdown(device_t self)
738{
739	struct rtwn_pci_softc *pc = device_get_softc(self);
740
741	ieee80211_stop_all(&pc->pc_sc.sc_ic);
742	return (0);
743}
744
745static int
746rtwn_pci_suspend(device_t self)
747{
748	struct rtwn_pci_softc *pc = device_get_softc(self);
749
750	rtwn_suspend(&pc->pc_sc);
751
752	return (0);
753}
754
755static int
756rtwn_pci_resume(device_t self)
757{
758	struct rtwn_pci_softc *pc = device_get_softc(self);
759
760	rtwn_resume(&pc->pc_sc);
761
762	return (0);
763}
764
765static device_method_t rtwn_pci_methods[] = {
766	/* Device interface */
767	DEVMETHOD(device_probe,		rtwn_pci_probe),
768	DEVMETHOD(device_attach,	rtwn_pci_attach),
769	DEVMETHOD(device_detach,	rtwn_pci_detach),
770	DEVMETHOD(device_shutdown,	rtwn_pci_shutdown),
771	DEVMETHOD(device_suspend,	rtwn_pci_suspend),
772	DEVMETHOD(device_resume,	rtwn_pci_resume),
773
774	DEVMETHOD_END
775};
776
777static driver_t rtwn_pci_driver = {
778	"rtwn",
779	rtwn_pci_methods,
780	sizeof(struct rtwn_pci_softc)
781};
782
783DRIVER_MODULE(rtwn_pci, pci, rtwn_pci_driver, NULL, NULL);
784MODULE_VERSION(rtwn_pci, 1);
785MODULE_PNP_INFO("U16:vendor;U16:device;D:#", pci, rtwn, rtwn_pci_ident_table,
786    nitems(rtwn_pci_ident_table));
787MODULE_DEPEND(rtwn_pci, pci, 1, 1, 1);
788MODULE_DEPEND(rtwn_pci, wlan, 1, 1, 1);
789MODULE_DEPEND(rtwn_pci, rtwn, 2, 2, 2);
790