if_bwfm_pci.c revision 1.40
1/*	$OpenBSD: if_bwfm_pci.c,v 1.40 2021/02/25 02:48:20 dlg Exp $	*/
2/*
3 * Copyright (c) 2010-2016 Broadcom Corporation
4 * Copyright (c) 2017 Patrick Wildt <patrick@blueri.se>
5 *
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19#include "bpfilter.h"
20
21#include <sys/param.h>
22#include <sys/systm.h>
23#include <sys/buf.h>
24#include <sys/kernel.h>
25#include <sys/malloc.h>
26#include <sys/device.h>
27#include <sys/queue.h>
28#include <sys/socket.h>
29
30#if NBPFILTER > 0
31#include <net/bpf.h>
32#endif
33#include <net/if.h>
34#include <net/if_dl.h>
35#include <net/if_media.h>
36
37#include <netinet/in.h>
38#include <netinet/if_ether.h>
39
40#include <net80211/ieee80211_var.h>
41
42#include <machine/bus.h>
43
44#include <dev/pci/pcireg.h>
45#include <dev/pci/pcivar.h>
46#include <dev/pci/pcidevs.h>
47
48#include <dev/ic/bwfmvar.h>
49#include <dev/ic/bwfmreg.h>
50#include <dev/pci/if_bwfm_pci.h>
51
52#define BWFM_DMA_D2H_SCRATCH_BUF_LEN		8
53#define BWFM_DMA_D2H_RINGUPD_BUF_LEN		1024
54#define BWFM_DMA_H2D_IOCTL_BUF_LEN		ETHER_MAX_LEN
55
56#define BWFM_NUM_TX_MSGRINGS			2
57#define BWFM_NUM_RX_MSGRINGS			3
58
59#define BWFM_NUM_IOCTL_PKTIDS			8
60#define BWFM_NUM_TX_PKTIDS			2048
61#define BWFM_NUM_RX_PKTIDS			1024
62
63#define BWFM_NUM_IOCTL_DESCS			1
64#define BWFM_NUM_TX_DESCS			1
65#define BWFM_NUM_RX_DESCS			1
66
67#ifdef BWFM_DEBUG
68#define DPRINTF(x)	do { if (bwfm_debug > 0) printf x; } while (0)
69#define DPRINTFN(n, x)	do { if (bwfm_debug >= (n)) printf x; } while (0)
70static int bwfm_debug = 2;
71#else
72#define DPRINTF(x)	do { ; } while (0)
73#define DPRINTFN(n, x)	do { ; } while (0)
74#endif
75
76#define DEVNAME(sc)	((sc)->sc_sc.sc_dev.dv_xname)
77
78enum ring_status {
79	RING_CLOSED,
80	RING_CLOSING,
81	RING_OPEN,
82	RING_OPENING,
83};
84
85struct bwfm_pci_msgring {
86	uint32_t		 w_idx_addr;
87	uint32_t		 r_idx_addr;
88	uint32_t		 w_ptr;
89	uint32_t		 r_ptr;
90	int			 nitem;
91	int			 itemsz;
92	enum ring_status	 status;
93	struct bwfm_pci_dmamem	*ring;
94	struct mbuf		*m;
95
96	int			 fifo;
97	uint8_t			 mac[ETHER_ADDR_LEN];
98};
99
100struct bwfm_pci_ioctl {
101	uint16_t		 transid;
102	uint16_t		 retlen;
103	int16_t			 status;
104	struct mbuf		*m;
105	TAILQ_ENTRY(bwfm_pci_ioctl) next;
106};
107
108struct bwfm_pci_buf {
109	bus_dmamap_t	 bb_map;
110	struct mbuf	*bb_m;
111};
112
113struct bwfm_pci_pkts {
114	struct bwfm_pci_buf	*pkts;
115	uint32_t		 npkt;
116	int			 last;
117};
118
119struct bwfm_pci_softc {
120	struct bwfm_softc	 sc_sc;
121	pci_chipset_tag_t	 sc_pc;
122	pcitag_t		 sc_tag;
123	pcireg_t		 sc_id;
124	void 			*sc_ih;
125
126	int			 sc_initialized;
127
128	bus_space_tag_t		 sc_reg_iot;
129	bus_space_handle_t	 sc_reg_ioh;
130	bus_size_t		 sc_reg_ios;
131
132	bus_space_tag_t		 sc_tcm_iot;
133	bus_space_handle_t	 sc_tcm_ioh;
134	bus_size_t		 sc_tcm_ios;
135
136	bus_dma_tag_t		 sc_dmat;
137
138	uint32_t		 sc_shared_address;
139	uint32_t		 sc_shared_flags;
140	uint8_t			 sc_shared_version;
141
142	uint8_t			 sc_dma_idx_sz;
143	struct bwfm_pci_dmamem	*sc_dma_idx_buf;
144	size_t			 sc_dma_idx_bufsz;
145
146	uint16_t		 sc_max_rxbufpost;
147	uint32_t		 sc_rx_dataoffset;
148	uint32_t		 sc_htod_mb_data_addr;
149	uint32_t		 sc_dtoh_mb_data_addr;
150	uint32_t		 sc_ring_info_addr;
151
152	uint32_t		 sc_console_base_addr;
153	uint32_t		 sc_console_buf_addr;
154	uint32_t		 sc_console_buf_size;
155	uint32_t		 sc_console_readidx;
156
157	uint16_t		 sc_max_flowrings;
158	uint16_t		 sc_max_submissionrings;
159	uint16_t		 sc_max_completionrings;
160
161	struct bwfm_pci_msgring	 sc_ctrl_submit;
162	struct bwfm_pci_msgring	 sc_rxpost_submit;
163	struct bwfm_pci_msgring	 sc_ctrl_complete;
164	struct bwfm_pci_msgring	 sc_tx_complete;
165	struct bwfm_pci_msgring	 sc_rx_complete;
166	struct bwfm_pci_msgring	*sc_flowrings;
167
168	struct bwfm_pci_dmamem	*sc_scratch_buf;
169	struct bwfm_pci_dmamem	*sc_ringupd_buf;
170
171	TAILQ_HEAD(, bwfm_pci_ioctl) sc_ioctlq;
172	uint16_t		 sc_ioctl_transid;
173
174	struct if_rxring	 sc_ioctl_ring;
175	struct if_rxring	 sc_event_ring;
176	struct if_rxring	 sc_rxbuf_ring;
177
178	struct bwfm_pci_pkts	 sc_ioctl_pkts;
179	struct bwfm_pci_pkts	 sc_rx_pkts;
180	struct bwfm_pci_pkts	 sc_tx_pkts;
181	int			 sc_tx_pkts_full;
182};
183
184struct bwfm_pci_dmamem {
185	bus_dmamap_t		bdm_map;
186	bus_dma_segment_t	bdm_seg;
187	size_t			bdm_size;
188	caddr_t			bdm_kva;
189};
190
191#define BWFM_PCI_DMA_MAP(_bdm)	((_bdm)->bdm_map)
192#define BWFM_PCI_DMA_LEN(_bdm)	((_bdm)->bdm_size)
193#define BWFM_PCI_DMA_DVA(_bdm)	((uint64_t)(_bdm)->bdm_map->dm_segs[0].ds_addr)
194#define BWFM_PCI_DMA_KVA(_bdm)	((void *)(_bdm)->bdm_kva)
195
196int		 bwfm_pci_match(struct device *, void *, void *);
197void		 bwfm_pci_attach(struct device *, struct device *, void *);
198int		 bwfm_pci_detach(struct device *, int);
199
200int		 bwfm_pci_intr(void *);
201void		 bwfm_pci_intr_enable(struct bwfm_pci_softc *);
202void		 bwfm_pci_intr_disable(struct bwfm_pci_softc *);
203int		 bwfm_pci_load_microcode(struct bwfm_pci_softc *, const u_char *,
204		    size_t, const u_char *, size_t);
205void		 bwfm_pci_select_core(struct bwfm_pci_softc *, int );
206
207struct bwfm_pci_dmamem *
208		 bwfm_pci_dmamem_alloc(struct bwfm_pci_softc *, bus_size_t,
209		    bus_size_t);
210void		 bwfm_pci_dmamem_free(struct bwfm_pci_softc *, struct bwfm_pci_dmamem *);
211int		 bwfm_pci_pktid_avail(struct bwfm_pci_softc *,
212		    struct bwfm_pci_pkts *);
213int		 bwfm_pci_pktid_new(struct bwfm_pci_softc *,
214		    struct bwfm_pci_pkts *, struct mbuf *,
215		    uint32_t *, paddr_t *);
216struct mbuf *	 bwfm_pci_pktid_free(struct bwfm_pci_softc *,
217		    struct bwfm_pci_pkts *, uint32_t);
218void		 bwfm_pci_fill_rx_ioctl_ring(struct bwfm_pci_softc *,
219		    struct if_rxring *, uint32_t);
220void		 bwfm_pci_fill_rx_buf_ring(struct bwfm_pci_softc *);
221void		 bwfm_pci_fill_rx_rings(struct bwfm_pci_softc *);
222int		 bwfm_pci_setup_ring(struct bwfm_pci_softc *, struct bwfm_pci_msgring *,
223		    int, size_t, uint32_t, uint32_t, int, uint32_t, uint32_t *);
224int		 bwfm_pci_setup_flowring(struct bwfm_pci_softc *, struct bwfm_pci_msgring *,
225		    int, size_t);
226
227void		 bwfm_pci_ring_bell(struct bwfm_pci_softc *,
228		    struct bwfm_pci_msgring *);
229void		 bwfm_pci_ring_update_rptr(struct bwfm_pci_softc *,
230		    struct bwfm_pci_msgring *);
231void		 bwfm_pci_ring_update_wptr(struct bwfm_pci_softc *,
232		    struct bwfm_pci_msgring *);
233void		 bwfm_pci_ring_write_rptr(struct bwfm_pci_softc *,
234		    struct bwfm_pci_msgring *);
235void		 bwfm_pci_ring_write_wptr(struct bwfm_pci_softc *,
236		    struct bwfm_pci_msgring *);
237void *		 bwfm_pci_ring_write_reserve(struct bwfm_pci_softc *,
238		    struct bwfm_pci_msgring *);
239void *		 bwfm_pci_ring_write_reserve_multi(struct bwfm_pci_softc *,
240		    struct bwfm_pci_msgring *, int, int *);
241void *		 bwfm_pci_ring_read_avail(struct bwfm_pci_softc *,
242		    struct bwfm_pci_msgring *, int *);
243void		 bwfm_pci_ring_read_commit(struct bwfm_pci_softc *,
244		    struct bwfm_pci_msgring *, int);
245void		 bwfm_pci_ring_write_commit(struct bwfm_pci_softc *,
246		    struct bwfm_pci_msgring *);
247void		 bwfm_pci_ring_write_cancel(struct bwfm_pci_softc *,
248		    struct bwfm_pci_msgring *, int);
249
250void		 bwfm_pci_ring_rx(struct bwfm_pci_softc *,
251		    struct bwfm_pci_msgring *, struct mbuf_list *);
252void		 bwfm_pci_msg_rx(struct bwfm_pci_softc *, void *,
253		    struct mbuf_list *);
254
255uint32_t	 bwfm_pci_buscore_read(struct bwfm_softc *, uint32_t);
256void		 bwfm_pci_buscore_write(struct bwfm_softc *, uint32_t,
257		    uint32_t);
258int		 bwfm_pci_buscore_prepare(struct bwfm_softc *);
259int		 bwfm_pci_buscore_reset(struct bwfm_softc *);
260void		 bwfm_pci_buscore_activate(struct bwfm_softc *, uint32_t);
261
262int		 bwfm_pci_flowring_lookup(struct bwfm_pci_softc *,
263		     struct mbuf *);
264void		 bwfm_pci_flowring_create(struct bwfm_pci_softc *,
265		     struct mbuf *);
266void		 bwfm_pci_flowring_create_cb(struct bwfm_softc *, void *);
267void		 bwfm_pci_flowring_delete(struct bwfm_pci_softc *, int);
268
269int		 bwfm_pci_preinit(struct bwfm_softc *);
270void		 bwfm_pci_stop(struct bwfm_softc *);
271int		 bwfm_pci_txcheck(struct bwfm_softc *);
272int		 bwfm_pci_txdata(struct bwfm_softc *, struct mbuf *);
273
274#ifdef BWFM_DEBUG
275void		 bwfm_pci_debug_console(struct bwfm_pci_softc *);
276#endif
277
278int		 bwfm_pci_msgbuf_query_dcmd(struct bwfm_softc *, int,
279		    int, char *, size_t *);
280int		 bwfm_pci_msgbuf_set_dcmd(struct bwfm_softc *, int,
281		    int, char *, size_t);
282void		 bwfm_pci_msgbuf_rxioctl(struct bwfm_pci_softc *,
283		    struct msgbuf_ioctl_resp_hdr *);
284
285struct bwfm_buscore_ops bwfm_pci_buscore_ops = {
286	.bc_read = bwfm_pci_buscore_read,
287	.bc_write = bwfm_pci_buscore_write,
288	.bc_prepare = bwfm_pci_buscore_prepare,
289	.bc_reset = bwfm_pci_buscore_reset,
290	.bc_setup = NULL,
291	.bc_activate = bwfm_pci_buscore_activate,
292};
293
294struct bwfm_bus_ops bwfm_pci_bus_ops = {
295	.bs_preinit = bwfm_pci_preinit,
296	.bs_stop = bwfm_pci_stop,
297	.bs_txcheck = bwfm_pci_txcheck,
298	.bs_txdata = bwfm_pci_txdata,
299	.bs_txctl = NULL,
300};
301
302struct bwfm_proto_ops bwfm_pci_msgbuf_ops = {
303	.proto_query_dcmd = bwfm_pci_msgbuf_query_dcmd,
304	.proto_set_dcmd = bwfm_pci_msgbuf_set_dcmd,
305	.proto_rx = NULL,
306	.proto_rxctl = NULL,
307};
308
309struct cfattach bwfm_pci_ca = {
310	sizeof(struct bwfm_pci_softc),
311	bwfm_pci_match,
312	bwfm_pci_attach,
313	bwfm_pci_detach,
314};
315
316static const struct pci_matchid bwfm_pci_devices[] = {
317	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4350 },
318	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4356 },
319	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM43602 },
320	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4371 },
321};
322
323int
324bwfm_pci_match(struct device *parent, void *match, void *aux)
325{
326	return (pci_matchbyid(aux, bwfm_pci_devices,
327	    nitems(bwfm_pci_devices)));
328}
329
330void
331bwfm_pci_attach(struct device *parent, struct device *self, void *aux)
332{
333	struct bwfm_pci_softc *sc = (struct bwfm_pci_softc *)self;
334	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
335	const char *intrstr;
336	pci_intr_handle_t ih;
337
338	if (pci_mapreg_map(pa, PCI_MAPREG_START + 0x08,
339	    PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->sc_tcm_iot, &sc->sc_tcm_ioh,
340	    NULL, &sc->sc_tcm_ios, 0)) {
341		printf(": can't map bar1\n");
342		return;
343	}
344
345	if (pci_mapreg_map(pa, PCI_MAPREG_START + 0x00,
346	    PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->sc_reg_iot, &sc->sc_reg_ioh,
347	    NULL, &sc->sc_reg_ios, 0)) {
348		printf(": can't map bar0\n");
349		goto bar1;
350	}
351
352	sc->sc_pc = pa->pa_pc;
353	sc->sc_tag = pa->pa_tag;
354	sc->sc_id = pa->pa_id;
355	sc->sc_dmat = pa->pa_dmat;
356
357	/* Map and establish the interrupt. */
358	if (pci_intr_map_msi(pa, &ih) != 0 && pci_intr_map(pa, &ih) != 0) {
359		printf(": couldn't map interrupt\n");
360		goto bar0;
361	}
362	intrstr = pci_intr_string(pa->pa_pc, ih);
363
364	sc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_NET | IPL_MPSAFE,
365	    bwfm_pci_intr, sc, DEVNAME(sc));
366	if (sc->sc_ih == NULL) {
367		printf(": couldn't establish interrupt");
368		if (intrstr != NULL)
369			printf(" at %s", intrstr);
370		printf("\n");
371		goto bar1;
372	}
373	printf(": %s\n", intrstr);
374
375	sc->sc_sc.sc_bus_ops = &bwfm_pci_bus_ops;
376	sc->sc_sc.sc_proto_ops = &bwfm_pci_msgbuf_ops;
377	bwfm_attach(&sc->sc_sc);
378	config_mountroot(self, bwfm_attachhook);
379	return;
380
381bar0:
382	bus_space_unmap(sc->sc_reg_iot, sc->sc_reg_ioh, sc->sc_reg_ios);
383bar1:
384	bus_space_unmap(sc->sc_tcm_iot, sc->sc_tcm_ioh, sc->sc_tcm_ios);
385}
386
387int
388bwfm_pci_preinit(struct bwfm_softc *bwfm)
389{
390	struct bwfm_pci_softc *sc = (void *)bwfm;
391	struct bwfm_pci_ringinfo ringinfo;
392	const char *chip = NULL;
393	char name[128];
394	u_char *ucode, *nvram = NULL;
395	size_t size, nvsize, nvlen = 0;
396	uint32_t d2h_w_idx_ptr, d2h_r_idx_ptr;
397	uint32_t h2d_w_idx_ptr, h2d_r_idx_ptr;
398	uint32_t idx_offset, reg;
399	int i;
400
401	if (sc->sc_initialized)
402		return 0;
403
404	sc->sc_sc.sc_buscore_ops = &bwfm_pci_buscore_ops;
405	if (bwfm_chip_attach(&sc->sc_sc) != 0) {
406		printf("%s: cannot attach chip\n", DEVNAME(sc));
407		return 1;
408	}
409
410	bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
411	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
412	    BWFM_PCI_PCIE2REG_CONFIGADDR, 0x4e0);
413	reg = bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
414	    BWFM_PCI_PCIE2REG_CONFIGDATA);
415	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
416	    BWFM_PCI_PCIE2REG_CONFIGDATA, reg);
417
418	switch (bwfm->sc_chip.ch_chip)
419	{
420	case BRCM_CC_4350_CHIP_ID:
421		if (bwfm->sc_chip.ch_chiprev > 7)
422			chip = "4350";
423		else
424			chip = "4350c2";
425		break;
426	case BRCM_CC_4356_CHIP_ID:
427		chip = "4356";
428		break;
429	case BRCM_CC_43602_CHIP_ID:
430		chip = "43602";
431		break;
432	case BRCM_CC_4371_CHIP_ID:
433		chip = "4371";
434		break;
435	case BRCM_CC_4378_CHIP_ID:
436		chip = "4378";
437		break;
438	default:
439		printf("%s: unknown firmware for chip %s\n",
440		    DEVNAME(sc), bwfm->sc_chip.ch_name);
441		return 1;
442	}
443
444	snprintf(name, sizeof(name), "brcmfmac%s-pcie.bin", chip);
445	if (loadfirmware(name, &ucode, &size) != 0) {
446		printf("%s: failed loadfirmware of file %s\n",
447		    DEVNAME(sc), name);
448		return 1;
449	}
450
451	/* .txt needs to be processed first */
452	snprintf(name, sizeof(name), "brcmfmac%s-pcie.txt", chip);
453	if (loadfirmware(name, &nvram, &nvsize) == 0) {
454		if (bwfm_nvram_convert(nvram, nvsize, &nvlen) != 0) {
455			printf("%s: failed to process file %s\n",
456			    DEVNAME(sc), name);
457			free(ucode, M_DEVBUF, size);
458			free(nvram, M_DEVBUF, nvsize);
459			return 1;
460		}
461	}
462
463	/* .nvram is the pre-processed version */
464	if (nvlen == 0) {
465		snprintf(name, sizeof(name), "brcmfmac%s-pcie.nvram", chip);
466		if (loadfirmware(name, &nvram, &nvsize) == 0)
467			nvlen = nvsize;
468	}
469
470	/* Retrieve RAM size from firmware. */
471	if (size >= BWFM_RAMSIZE + 8) {
472		uint32_t *ramsize = (uint32_t *)&ucode[BWFM_RAMSIZE];
473		if (letoh32(ramsize[0]) == BWFM_RAMSIZE_MAGIC)
474			bwfm->sc_chip.ch_ramsize = letoh32(ramsize[1]);
475	}
476
477	if (bwfm_pci_load_microcode(sc, ucode, size, nvram, nvlen) != 0) {
478		printf("%s: could not load microcode\n",
479		    DEVNAME(sc));
480		free(ucode, M_DEVBUF, size);
481		free(nvram, M_DEVBUF, nvsize);
482		return 1;
483	}
484	free(ucode, M_DEVBUF, size);
485	free(nvram, M_DEVBUF, nvsize);
486
487	sc->sc_shared_flags = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
488	    sc->sc_shared_address + BWFM_SHARED_INFO);
489	sc->sc_shared_version = sc->sc_shared_flags;
490	if (sc->sc_shared_version > BWFM_SHARED_INFO_MAX_VERSION ||
491	    sc->sc_shared_version < BWFM_SHARED_INFO_MIN_VERSION) {
492		printf("%s: PCIe version %d unsupported\n",
493		    DEVNAME(sc), sc->sc_shared_version);
494		return 1;
495	}
496
497	if (sc->sc_shared_flags & BWFM_SHARED_INFO_DMA_INDEX) {
498		if (sc->sc_shared_flags & BWFM_SHARED_INFO_DMA_2B_IDX)
499			sc->sc_dma_idx_sz = sizeof(uint16_t);
500		else
501			sc->sc_dma_idx_sz = sizeof(uint32_t);
502	}
503
504	/* Maximum RX data buffers in the ring. */
505	sc->sc_max_rxbufpost = bus_space_read_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
506	    sc->sc_shared_address + BWFM_SHARED_MAX_RXBUFPOST);
507	if (sc->sc_max_rxbufpost == 0)
508		sc->sc_max_rxbufpost = BWFM_SHARED_MAX_RXBUFPOST_DEFAULT;
509
510	/* Alternative offset of data in a packet */
511	sc->sc_rx_dataoffset = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
512	    sc->sc_shared_address + BWFM_SHARED_RX_DATAOFFSET);
513
514	/* For Power Management */
515	sc->sc_htod_mb_data_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
516	    sc->sc_shared_address + BWFM_SHARED_HTOD_MB_DATA_ADDR);
517	sc->sc_dtoh_mb_data_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
518	    sc->sc_shared_address + BWFM_SHARED_DTOH_MB_DATA_ADDR);
519
520	/* Ring information */
521	sc->sc_ring_info_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
522	    sc->sc_shared_address + BWFM_SHARED_RING_INFO_ADDR);
523
524	/* Firmware's "dmesg" */
525	sc->sc_console_base_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
526	    sc->sc_shared_address + BWFM_SHARED_CONSOLE_ADDR);
527	sc->sc_console_buf_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
528	    sc->sc_console_base_addr + BWFM_CONSOLE_BUFADDR);
529	sc->sc_console_buf_size = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
530	    sc->sc_console_base_addr + BWFM_CONSOLE_BUFSIZE);
531
532	/* Read ring information. */
533	bus_space_read_region_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
534	    sc->sc_ring_info_addr, (void *)&ringinfo, sizeof(ringinfo));
535
536	if (sc->sc_shared_version >= 6) {
537		sc->sc_max_submissionrings = le16toh(ringinfo.max_submissionrings);
538		sc->sc_max_flowrings = le16toh(ringinfo.max_flowrings);
539		sc->sc_max_completionrings = le16toh(ringinfo.max_completionrings);
540	} else {
541		sc->sc_max_submissionrings = le16toh(ringinfo.max_flowrings);
542		sc->sc_max_flowrings = sc->sc_max_submissionrings -
543		    BWFM_NUM_TX_MSGRINGS;
544		sc->sc_max_completionrings = BWFM_NUM_RX_MSGRINGS;
545	}
546
547	if (sc->sc_dma_idx_sz == 0) {
548		d2h_w_idx_ptr = letoh32(ringinfo.d2h_w_idx_ptr);
549		d2h_r_idx_ptr = letoh32(ringinfo.d2h_r_idx_ptr);
550		h2d_w_idx_ptr = letoh32(ringinfo.h2d_w_idx_ptr);
551		h2d_r_idx_ptr = letoh32(ringinfo.h2d_r_idx_ptr);
552		idx_offset = sizeof(uint32_t);
553	} else {
554		uint64_t address;
555
556		/* Each TX/RX Ring has a Read and Write Ptr */
557		sc->sc_dma_idx_bufsz = (sc->sc_max_submissionrings +
558		    sc->sc_max_completionrings) * sc->sc_dma_idx_sz * 2;
559		sc->sc_dma_idx_buf = bwfm_pci_dmamem_alloc(sc,
560		    sc->sc_dma_idx_bufsz, 8);
561		if (sc->sc_dma_idx_buf == NULL) {
562			/* XXX: Fallback to TCM? */
563			printf("%s: cannot allocate idx buf\n",
564			    DEVNAME(sc));
565			return 1;
566		}
567
568		idx_offset = sc->sc_dma_idx_sz;
569		h2d_w_idx_ptr = 0;
570		address = BWFM_PCI_DMA_DVA(sc->sc_dma_idx_buf);
571		ringinfo.h2d_w_idx_hostaddr_low =
572		    htole32(address & 0xffffffff);
573		ringinfo.h2d_w_idx_hostaddr_high =
574		    htole32(address >> 32);
575
576		h2d_r_idx_ptr = h2d_w_idx_ptr +
577		    sc->sc_max_submissionrings * idx_offset;
578		address += sc->sc_max_submissionrings * idx_offset;
579		ringinfo.h2d_r_idx_hostaddr_low =
580		    htole32(address & 0xffffffff);
581		ringinfo.h2d_r_idx_hostaddr_high =
582		    htole32(address >> 32);
583
584		d2h_w_idx_ptr = h2d_r_idx_ptr +
585		    sc->sc_max_submissionrings * idx_offset;
586		address += sc->sc_max_submissionrings * idx_offset;
587		ringinfo.d2h_w_idx_hostaddr_low =
588		    htole32(address & 0xffffffff);
589		ringinfo.d2h_w_idx_hostaddr_high =
590		    htole32(address >> 32);
591
592		d2h_r_idx_ptr = d2h_w_idx_ptr +
593		    sc->sc_max_completionrings * idx_offset;
594		address += sc->sc_max_completionrings * idx_offset;
595		ringinfo.d2h_r_idx_hostaddr_low =
596		    htole32(address & 0xffffffff);
597		ringinfo.d2h_r_idx_hostaddr_high =
598		    htole32(address >> 32);
599
600		bus_space_write_region_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
601		    sc->sc_ring_info_addr, (void *)&ringinfo, sizeof(ringinfo));
602	}
603
604	uint32_t ring_mem_ptr = letoh32(ringinfo.ringmem);
605	/* TX ctrl ring: Send ctrl buffers, send IOCTLs */
606	if (bwfm_pci_setup_ring(sc, &sc->sc_ctrl_submit, 64, 40,
607	    h2d_w_idx_ptr, h2d_r_idx_ptr, 0, idx_offset,
608	    &ring_mem_ptr))
609		goto cleanup;
610	/* TX rxpost ring: Send clean data mbufs for RX */
611	if (bwfm_pci_setup_ring(sc, &sc->sc_rxpost_submit, 512, 32,
612	    h2d_w_idx_ptr, h2d_r_idx_ptr, 1, idx_offset,
613	    &ring_mem_ptr))
614		goto cleanup;
615	/* RX completion rings: recv our filled buffers back */
616	if (bwfm_pci_setup_ring(sc, &sc->sc_ctrl_complete, 64, 24,
617	    d2h_w_idx_ptr, d2h_r_idx_ptr, 0, idx_offset,
618	    &ring_mem_ptr))
619		goto cleanup;
620	if (bwfm_pci_setup_ring(sc, &sc->sc_tx_complete, 1024, 16,
621	    d2h_w_idx_ptr, d2h_r_idx_ptr, 1, idx_offset,
622	    &ring_mem_ptr))
623		goto cleanup;
624	if (bwfm_pci_setup_ring(sc, &sc->sc_rx_complete, 512, 32,
625	    d2h_w_idx_ptr, d2h_r_idx_ptr, 2, idx_offset,
626	    &ring_mem_ptr))
627		goto cleanup;
628
629	/* Dynamic TX rings for actual data */
630	sc->sc_flowrings = malloc(sc->sc_max_flowrings *
631	    sizeof(struct bwfm_pci_msgring), M_DEVBUF, M_WAITOK | M_ZERO);
632	for (i = 0; i < sc->sc_max_flowrings; i++) {
633		struct bwfm_pci_msgring *ring = &sc->sc_flowrings[i];
634		ring->w_idx_addr = h2d_w_idx_ptr + (i + 2) * idx_offset;
635		ring->r_idx_addr = h2d_r_idx_ptr + (i + 2) * idx_offset;
636	}
637
638	/* Scratch and ring update buffers for firmware */
639	if ((sc->sc_scratch_buf = bwfm_pci_dmamem_alloc(sc,
640	    BWFM_DMA_D2H_SCRATCH_BUF_LEN, 8)) == NULL)
641		goto cleanup;
642	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
643	    sc->sc_shared_address + BWFM_SHARED_DMA_SCRATCH_ADDR_LOW,
644	    BWFM_PCI_DMA_DVA(sc->sc_scratch_buf) & 0xffffffff);
645	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
646	    sc->sc_shared_address + BWFM_SHARED_DMA_SCRATCH_ADDR_HIGH,
647	    BWFM_PCI_DMA_DVA(sc->sc_scratch_buf) >> 32);
648	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
649	    sc->sc_shared_address + BWFM_SHARED_DMA_SCRATCH_LEN,
650	    BWFM_DMA_D2H_SCRATCH_BUF_LEN);
651
652	if ((sc->sc_ringupd_buf = bwfm_pci_dmamem_alloc(sc,
653	    BWFM_DMA_D2H_RINGUPD_BUF_LEN, 8)) == NULL)
654		goto cleanup;
655	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
656	    sc->sc_shared_address + BWFM_SHARED_DMA_RINGUPD_ADDR_LOW,
657	    BWFM_PCI_DMA_DVA(sc->sc_ringupd_buf) & 0xffffffff);
658	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
659	    sc->sc_shared_address + BWFM_SHARED_DMA_RINGUPD_ADDR_HIGH,
660	    BWFM_PCI_DMA_DVA(sc->sc_ringupd_buf) >> 32);
661	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
662	    sc->sc_shared_address + BWFM_SHARED_DMA_RINGUPD_LEN,
663	    BWFM_DMA_D2H_RINGUPD_BUF_LEN);
664
665	bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
666	bwfm_pci_intr_enable(sc);
667
668	/* Maps RX mbufs to a packet id and back. */
669	sc->sc_rx_pkts.npkt = BWFM_NUM_RX_PKTIDS;
670	sc->sc_rx_pkts.pkts = malloc(BWFM_NUM_RX_PKTIDS *
671	    sizeof(struct bwfm_pci_buf), M_DEVBUF, M_WAITOK | M_ZERO);
672	for (i = 0; i < BWFM_NUM_RX_PKTIDS; i++)
673		bus_dmamap_create(sc->sc_dmat, MSGBUF_MAX_PKT_SIZE,
674		    BWFM_NUM_RX_DESCS, MSGBUF_MAX_PKT_SIZE, 0, BUS_DMA_WAITOK,
675		    &sc->sc_rx_pkts.pkts[i].bb_map);
676
677	/* Maps TX mbufs to a packet id and back. */
678	sc->sc_tx_pkts.npkt = BWFM_NUM_TX_PKTIDS;
679	sc->sc_tx_pkts.pkts = malloc(BWFM_NUM_TX_PKTIDS
680	    * sizeof(struct bwfm_pci_buf), M_DEVBUF, M_WAITOK | M_ZERO);
681	for (i = 0; i < BWFM_NUM_TX_PKTIDS; i++)
682		bus_dmamap_create(sc->sc_dmat, MSGBUF_MAX_PKT_SIZE,
683		    BWFM_NUM_TX_DESCS, MSGBUF_MAX_PKT_SIZE, 0, BUS_DMA_WAITOK,
684		    &sc->sc_tx_pkts.pkts[i].bb_map);
685
686	/* Maps IOCTL mbufs to a packet id and back. */
687	sc->sc_ioctl_pkts.npkt = BWFM_NUM_IOCTL_PKTIDS;
688	sc->sc_ioctl_pkts.pkts = malloc(BWFM_NUM_IOCTL_PKTIDS
689	    * sizeof(struct bwfm_pci_buf), M_DEVBUF, M_WAITOK | M_ZERO);
690	for (i = 0; i < BWFM_NUM_IOCTL_PKTIDS; i++)
691		bus_dmamap_create(sc->sc_dmat, MSGBUF_MAX_PKT_SIZE,
692		    BWFM_NUM_IOCTL_DESCS, MSGBUF_MAX_PKT_SIZE, 0, BUS_DMA_WAITOK,
693		    &sc->sc_ioctl_pkts.pkts[i].bb_map);
694
695	/*
696	 * For whatever reason, could also be a bug somewhere in this
697	 * driver, the firmware needs a bunch of RX buffers otherwise
698	 * it won't send any RX complete messages.  64 buffers don't
699	 * suffice, but 128 buffers are enough.
700	 */
701	if_rxr_init(&sc->sc_rxbuf_ring, 128, sc->sc_max_rxbufpost);
702	if_rxr_init(&sc->sc_ioctl_ring, 8, 8);
703	if_rxr_init(&sc->sc_event_ring, 8, 8);
704	bwfm_pci_fill_rx_rings(sc);
705
706	TAILQ_INIT(&sc->sc_ioctlq);
707
708#ifdef BWFM_DEBUG
709	sc->sc_console_readidx = 0;
710	bwfm_pci_debug_console(sc);
711#endif
712
713	sc->sc_initialized = 1;
714	return 0;
715
716cleanup:
717	if (sc->sc_ringupd_buf)
718		bwfm_pci_dmamem_free(sc, sc->sc_ringupd_buf);
719	if (sc->sc_scratch_buf)
720		bwfm_pci_dmamem_free(sc, sc->sc_scratch_buf);
721	if (sc->sc_rx_complete.ring)
722		bwfm_pci_dmamem_free(sc, sc->sc_rx_complete.ring);
723	if (sc->sc_tx_complete.ring)
724		bwfm_pci_dmamem_free(sc, sc->sc_tx_complete.ring);
725	if (sc->sc_ctrl_complete.ring)
726		bwfm_pci_dmamem_free(sc, sc->sc_ctrl_complete.ring);
727	if (sc->sc_rxpost_submit.ring)
728		bwfm_pci_dmamem_free(sc, sc->sc_rxpost_submit.ring);
729	if (sc->sc_ctrl_submit.ring)
730		bwfm_pci_dmamem_free(sc, sc->sc_ctrl_submit.ring);
731	if (sc->sc_dma_idx_buf)
732		bwfm_pci_dmamem_free(sc, sc->sc_dma_idx_buf);
733	return 1;
734}
735
736int
737bwfm_pci_load_microcode(struct bwfm_pci_softc *sc, const u_char *ucode, size_t size,
738    const u_char *nvram, size_t nvlen)
739{
740	struct bwfm_softc *bwfm = (void *)sc;
741	struct bwfm_core *core;
742	uint32_t shared, written;
743	int i;
744
745	if (bwfm->sc_chip.ch_chip == BRCM_CC_43602_CHIP_ID) {
746		bwfm_pci_select_core(sc, BWFM_AGENT_CORE_ARM_CR4);
747		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
748		    BWFM_PCI_ARMCR4REG_BANKIDX, 5);
749		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
750		    BWFM_PCI_ARMCR4REG_BANKPDA, 0);
751		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
752		    BWFM_PCI_ARMCR4REG_BANKIDX, 7);
753		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
754		    BWFM_PCI_ARMCR4REG_BANKPDA, 0);
755	}
756
757	for (i = 0; i < size; i++)
758		bus_space_write_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
759		    bwfm->sc_chip.ch_rambase + i, ucode[i]);
760
761	/* Firmware replaces this with a pointer once up. */
762	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
763	    bwfm->sc_chip.ch_rambase + bwfm->sc_chip.ch_ramsize - 4, 0);
764
765	if (nvram) {
766		for (i = 0; i < nvlen; i++)
767			bus_space_write_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
768			    bwfm->sc_chip.ch_rambase + bwfm->sc_chip.ch_ramsize
769			    - nvlen  + i, nvram[i]);
770	}
771
772	written = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
773	    bwfm->sc_chip.ch_rambase + bwfm->sc_chip.ch_ramsize - 4);
774
775	/* Load reset vector from firmware and kickstart core. */
776	if (bwfm->sc_chip.ch_chip == BRCM_CC_43602_CHIP_ID) {
777		core = bwfm_chip_get_core(bwfm, BWFM_AGENT_INTERNAL_MEM);
778		bwfm->sc_chip.ch_core_reset(bwfm, core, 0, 0, 0);
779	}
780	bwfm_chip_set_active(bwfm, *(uint32_t *)ucode);
781
782	for (i = 0; i < 40; i++) {
783		delay(50 * 1000);
784		shared = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
785		    bwfm->sc_chip.ch_rambase + bwfm->sc_chip.ch_ramsize - 4);
786		if (shared != written)
787			break;
788	}
789	if (!shared) {
790		printf("%s: firmware did not come up\n", DEVNAME(sc));
791		return 1;
792	}
793
794	sc->sc_shared_address = shared;
795	return 0;
796}
797
798int
799bwfm_pci_detach(struct device *self, int flags)
800{
801	struct bwfm_pci_softc *sc = (struct bwfm_pci_softc *)self;
802
803	bwfm_detach(&sc->sc_sc, flags);
804
805	/* FIXME: free RX buffers */
806	/* FIXME: free TX buffers */
807	/* FIXME: free more memory */
808
809	bwfm_pci_dmamem_free(sc, sc->sc_ringupd_buf);
810	bwfm_pci_dmamem_free(sc, sc->sc_scratch_buf);
811	bwfm_pci_dmamem_free(sc, sc->sc_rx_complete.ring);
812	bwfm_pci_dmamem_free(sc, sc->sc_tx_complete.ring);
813	bwfm_pci_dmamem_free(sc, sc->sc_ctrl_complete.ring);
814	bwfm_pci_dmamem_free(sc, sc->sc_rxpost_submit.ring);
815	bwfm_pci_dmamem_free(sc, sc->sc_ctrl_submit.ring);
816	bwfm_pci_dmamem_free(sc, sc->sc_dma_idx_buf);
817	return 0;
818}
819
820/* DMA code */
821struct bwfm_pci_dmamem *
822bwfm_pci_dmamem_alloc(struct bwfm_pci_softc *sc, bus_size_t size, bus_size_t align)
823{
824	struct bwfm_pci_dmamem *bdm;
825	int nsegs;
826
827	bdm = malloc(sizeof(*bdm), M_DEVBUF, M_WAITOK | M_ZERO);
828	bdm->bdm_size = size;
829
830	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
831	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &bdm->bdm_map) != 0)
832		goto bdmfree;
833
834	if (bus_dmamem_alloc(sc->sc_dmat, size, align, 0, &bdm->bdm_seg, 1,
835	    &nsegs, BUS_DMA_WAITOK) != 0)
836		goto destroy;
837
838	if (bus_dmamem_map(sc->sc_dmat, &bdm->bdm_seg, nsegs, size,
839	    &bdm->bdm_kva, BUS_DMA_WAITOK | BUS_DMA_COHERENT) != 0)
840		goto free;
841
842	if (bus_dmamap_load(sc->sc_dmat, bdm->bdm_map, bdm->bdm_kva, size,
843	    NULL, BUS_DMA_WAITOK) != 0)
844		goto unmap;
845
846	bzero(bdm->bdm_kva, size);
847
848	return (bdm);
849
850unmap:
851	bus_dmamem_unmap(sc->sc_dmat, bdm->bdm_kva, size);
852free:
853	bus_dmamem_free(sc->sc_dmat, &bdm->bdm_seg, 1);
854destroy:
855	bus_dmamap_destroy(sc->sc_dmat, bdm->bdm_map);
856bdmfree:
857	free(bdm, M_DEVBUF, sizeof(*bdm));
858
859	return (NULL);
860}
861
862void
863bwfm_pci_dmamem_free(struct bwfm_pci_softc *sc, struct bwfm_pci_dmamem *bdm)
864{
865	bus_dmamem_unmap(sc->sc_dmat, bdm->bdm_kva, bdm->bdm_size);
866	bus_dmamem_free(sc->sc_dmat, &bdm->bdm_seg, 1);
867	bus_dmamap_destroy(sc->sc_dmat, bdm->bdm_map);
868	free(bdm, M_DEVBUF, sizeof(*bdm));
869}
870
871/*
872 * We need a simple mapping from a packet ID to mbufs, because when
873 * a transfer completed, we only know the ID so we have to look up
874 * the memory for the ID.  This simply looks for an empty slot.
875 */
876int
877bwfm_pci_pktid_avail(struct bwfm_pci_softc *sc, struct bwfm_pci_pkts *pkts)
878{
879	int i, idx;
880
881	idx = pkts->last + 1;
882	for (i = 0; i < pkts->npkt; i++) {
883		if (idx == pkts->npkt)
884			idx = 0;
885		if (pkts->pkts[idx].bb_m == NULL)
886			return 0;
887		idx++;
888	}
889	return ENOBUFS;
890}
891
892int
893bwfm_pci_pktid_new(struct bwfm_pci_softc *sc, struct bwfm_pci_pkts *pkts,
894    struct mbuf *m, uint32_t *pktid, paddr_t *paddr)
895{
896	int i, idx;
897
898	idx = pkts->last + 1;
899	for (i = 0; i < pkts->npkt; i++) {
900		if (idx == pkts->npkt)
901			idx = 0;
902		if (pkts->pkts[idx].bb_m == NULL) {
903			if (bus_dmamap_load_mbuf(sc->sc_dmat,
904			    pkts->pkts[idx].bb_map, m, BUS_DMA_NOWAIT) != 0) {
905				if (m_defrag(m, M_DONTWAIT))
906					return EFBIG;
907				if (bus_dmamap_load_mbuf(sc->sc_dmat,
908				    pkts->pkts[idx].bb_map, m, BUS_DMA_NOWAIT) != 0)
909					return EFBIG;
910			}
911			bus_dmamap_sync(sc->sc_dmat, pkts->pkts[idx].bb_map,
912			    0, pkts->pkts[idx].bb_map->dm_mapsize,
913			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
914			pkts->last = idx;
915			pkts->pkts[idx].bb_m = m;
916			*pktid = idx;
917			*paddr = pkts->pkts[idx].bb_map->dm_segs[0].ds_addr;
918			return 0;
919		}
920		idx++;
921	}
922	return ENOBUFS;
923}
924
925struct mbuf *
926bwfm_pci_pktid_free(struct bwfm_pci_softc *sc, struct bwfm_pci_pkts *pkts,
927    uint32_t pktid)
928{
929	struct mbuf *m;
930
931	if (pktid >= pkts->npkt || pkts->pkts[pktid].bb_m == NULL)
932		return NULL;
933	bus_dmamap_sync(sc->sc_dmat, pkts->pkts[pktid].bb_map, 0,
934	    pkts->pkts[pktid].bb_map->dm_mapsize,
935	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
936	bus_dmamap_unload(sc->sc_dmat, pkts->pkts[pktid].bb_map);
937	m = pkts->pkts[pktid].bb_m;
938	pkts->pkts[pktid].bb_m = NULL;
939	return m;
940}
941
942void
943bwfm_pci_fill_rx_rings(struct bwfm_pci_softc *sc)
944{
945	bwfm_pci_fill_rx_buf_ring(sc);
946	bwfm_pci_fill_rx_ioctl_ring(sc, &sc->sc_ioctl_ring,
947	    MSGBUF_TYPE_IOCTLRESP_BUF_POST);
948	bwfm_pci_fill_rx_ioctl_ring(sc, &sc->sc_event_ring,
949	    MSGBUF_TYPE_EVENT_BUF_POST);
950}
951
952void
953bwfm_pci_fill_rx_ioctl_ring(struct bwfm_pci_softc *sc, struct if_rxring *rxring,
954    uint32_t msgtype)
955{
956	struct msgbuf_rx_ioctl_resp_or_event *req;
957	struct mbuf *m;
958	uint32_t pktid;
959	paddr_t paddr;
960	int s, slots;
961
962	s = splnet();
963	for (slots = if_rxr_get(rxring, 8); slots > 0; slots--) {
964		if (bwfm_pci_pktid_avail(sc, &sc->sc_rx_pkts))
965			break;
966		req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
967		if (req == NULL)
968			break;
969		m = MCLGETL(NULL, M_DONTWAIT, MSGBUF_MAX_PKT_SIZE);
970		if (m == NULL) {
971			bwfm_pci_ring_write_cancel(sc, &sc->sc_ctrl_submit, 1);
972			break;
973		}
974		m->m_len = m->m_pkthdr.len = MSGBUF_MAX_PKT_SIZE;
975		if (bwfm_pci_pktid_new(sc, &sc->sc_rx_pkts, m, &pktid, &paddr)) {
976			bwfm_pci_ring_write_cancel(sc, &sc->sc_ctrl_submit, 1);
977			m_freem(m);
978			break;
979		}
980		memset(req, 0, sizeof(*req));
981		req->msg.msgtype = msgtype;
982		req->msg.request_id = htole32(pktid);
983		req->host_buf_len = htole16(MSGBUF_MAX_PKT_SIZE);
984		req->host_buf_addr.high_addr = htole32((uint64_t)paddr >> 32);
985		req->host_buf_addr.low_addr = htole32(paddr & 0xffffffff);
986		bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
987	}
988	if_rxr_put(rxring, slots);
989	splx(s);
990}
991
992void
993bwfm_pci_fill_rx_buf_ring(struct bwfm_pci_softc *sc)
994{
995	struct msgbuf_rx_bufpost *req;
996	struct mbuf *m;
997	uint32_t pktid;
998	paddr_t paddr;
999	int s, slots;
1000
1001	s = splnet();
1002	for (slots = if_rxr_get(&sc->sc_rxbuf_ring, sc->sc_max_rxbufpost);
1003	    slots > 0; slots--) {
1004		if (bwfm_pci_pktid_avail(sc, &sc->sc_rx_pkts))
1005			break;
1006		req = bwfm_pci_ring_write_reserve(sc, &sc->sc_rxpost_submit);
1007		if (req == NULL)
1008			break;
1009		m = MCLGETL(NULL, M_DONTWAIT, MSGBUF_MAX_PKT_SIZE);
1010		if (m == NULL) {
1011			bwfm_pci_ring_write_cancel(sc, &sc->sc_rxpost_submit, 1);
1012			break;
1013		}
1014		m->m_len = m->m_pkthdr.len = MSGBUF_MAX_PKT_SIZE;
1015		if (bwfm_pci_pktid_new(sc, &sc->sc_rx_pkts, m, &pktid, &paddr)) {
1016			bwfm_pci_ring_write_cancel(sc, &sc->sc_rxpost_submit, 1);
1017			m_freem(m);
1018			break;
1019		}
1020		memset(req, 0, sizeof(*req));
1021		req->msg.msgtype = MSGBUF_TYPE_RXBUF_POST;
1022		req->msg.request_id = htole32(pktid);
1023		req->data_buf_len = htole16(MSGBUF_MAX_PKT_SIZE);
1024		req->data_buf_addr.high_addr = htole32((uint64_t)paddr >> 32);
1025		req->data_buf_addr.low_addr = htole32(paddr & 0xffffffff);
1026		bwfm_pci_ring_write_commit(sc, &sc->sc_rxpost_submit);
1027	}
1028	if_rxr_put(&sc->sc_rxbuf_ring, slots);
1029	splx(s);
1030}
1031
1032int
1033bwfm_pci_setup_ring(struct bwfm_pci_softc *sc, struct bwfm_pci_msgring *ring,
1034    int nitem, size_t itemsz, uint32_t w_idx, uint32_t r_idx,
1035    int idx, uint32_t idx_off, uint32_t *ring_mem)
1036{
1037	ring->w_idx_addr = w_idx + idx * idx_off;
1038	ring->r_idx_addr = r_idx + idx * idx_off;
1039	ring->nitem = nitem;
1040	ring->itemsz = itemsz;
1041	bwfm_pci_ring_write_rptr(sc, ring);
1042	bwfm_pci_ring_write_wptr(sc, ring);
1043
1044	ring->ring = bwfm_pci_dmamem_alloc(sc, nitem * itemsz, 8);
1045	if (ring->ring == NULL)
1046		return ENOMEM;
1047	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1048	    *ring_mem + BWFM_RING_MEM_BASE_ADDR_LOW,
1049	    BWFM_PCI_DMA_DVA(ring->ring) & 0xffffffff);
1050	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1051	    *ring_mem + BWFM_RING_MEM_BASE_ADDR_HIGH,
1052	    BWFM_PCI_DMA_DVA(ring->ring) >> 32);
1053	bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1054	    *ring_mem + BWFM_RING_MAX_ITEM, nitem);
1055	bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1056	    *ring_mem + BWFM_RING_LEN_ITEMS, itemsz);
1057	*ring_mem = *ring_mem + BWFM_RING_MEM_SZ;
1058	return 0;
1059}
1060
1061int
1062bwfm_pci_setup_flowring(struct bwfm_pci_softc *sc, struct bwfm_pci_msgring *ring,
1063    int nitem, size_t itemsz)
1064{
1065	ring->w_ptr = 0;
1066	ring->r_ptr = 0;
1067	ring->nitem = nitem;
1068	ring->itemsz = itemsz;
1069	bwfm_pci_ring_write_rptr(sc, ring);
1070	bwfm_pci_ring_write_wptr(sc, ring);
1071
1072	ring->ring = bwfm_pci_dmamem_alloc(sc, nitem * itemsz, 8);
1073	if (ring->ring == NULL)
1074		return ENOMEM;
1075	return 0;
1076}
1077
1078/* Ring helpers */
1079void
1080bwfm_pci_ring_bell(struct bwfm_pci_softc *sc,
1081    struct bwfm_pci_msgring *ring)
1082{
1083	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1084	    BWFM_PCI_PCIE2REG_H2D_MAILBOX, 1);
1085}
1086
1087void
1088bwfm_pci_ring_update_rptr(struct bwfm_pci_softc *sc,
1089    struct bwfm_pci_msgring *ring)
1090{
1091	if (sc->sc_dma_idx_sz == 0) {
1092		ring->r_ptr = bus_space_read_2(sc->sc_tcm_iot,
1093		    sc->sc_tcm_ioh, ring->r_idx_addr);
1094	} else {
1095		bus_dmamap_sync(sc->sc_dmat,
1096		    BWFM_PCI_DMA_MAP(sc->sc_dma_idx_buf), ring->r_idx_addr,
1097		    sizeof(uint16_t), BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1098		ring->r_ptr = *(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1099		    + ring->r_idx_addr);
1100	}
1101}
1102
1103void
1104bwfm_pci_ring_update_wptr(struct bwfm_pci_softc *sc,
1105    struct bwfm_pci_msgring *ring)
1106{
1107	if (sc->sc_dma_idx_sz == 0) {
1108		ring->w_ptr = bus_space_read_2(sc->sc_tcm_iot,
1109		    sc->sc_tcm_ioh, ring->w_idx_addr);
1110	} else {
1111		bus_dmamap_sync(sc->sc_dmat,
1112		    BWFM_PCI_DMA_MAP(sc->sc_dma_idx_buf), ring->w_idx_addr,
1113		    sizeof(uint16_t), BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1114		ring->w_ptr = *(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1115		    + ring->w_idx_addr);
1116	}
1117}
1118
1119void
1120bwfm_pci_ring_write_rptr(struct bwfm_pci_softc *sc,
1121    struct bwfm_pci_msgring *ring)
1122{
1123	if (sc->sc_dma_idx_sz == 0) {
1124		bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1125		    ring->r_idx_addr, ring->r_ptr);
1126	} else {
1127		*(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1128		    + ring->r_idx_addr) = ring->r_ptr;
1129		bus_dmamap_sync(sc->sc_dmat,
1130		    BWFM_PCI_DMA_MAP(sc->sc_dma_idx_buf), ring->r_idx_addr,
1131		    sizeof(uint16_t), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1132	}
1133}
1134
1135void
1136bwfm_pci_ring_write_wptr(struct bwfm_pci_softc *sc,
1137    struct bwfm_pci_msgring *ring)
1138{
1139	if (sc->sc_dma_idx_sz == 0) {
1140		bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1141		    ring->w_idx_addr, ring->w_ptr);
1142	} else {
1143		*(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1144		    + ring->w_idx_addr) = ring->w_ptr;
1145		bus_dmamap_sync(sc->sc_dmat,
1146		    BWFM_PCI_DMA_MAP(sc->sc_dma_idx_buf), ring->w_idx_addr,
1147		    sizeof(uint16_t), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1148	}
1149}
1150
1151/*
1152 * Retrieve a free descriptor to put new stuff in, but don't commit
1153 * to it yet so we can rollback later if any error occurs.
1154 */
1155void *
1156bwfm_pci_ring_write_reserve(struct bwfm_pci_softc *sc,
1157    struct bwfm_pci_msgring *ring)
1158{
1159	int available;
1160	char *ret;
1161
1162	bwfm_pci_ring_update_rptr(sc, ring);
1163
1164	if (ring->r_ptr > ring->w_ptr)
1165		available = ring->r_ptr - ring->w_ptr;
1166	else
1167		available = ring->r_ptr + (ring->nitem - ring->w_ptr);
1168
1169	if (available <= 1)
1170		return NULL;
1171
1172	ret = BWFM_PCI_DMA_KVA(ring->ring) + (ring->w_ptr * ring->itemsz);
1173	ring->w_ptr += 1;
1174	if (ring->w_ptr == ring->nitem)
1175		ring->w_ptr = 0;
1176	return ret;
1177}
1178
1179void *
1180bwfm_pci_ring_write_reserve_multi(struct bwfm_pci_softc *sc,
1181    struct bwfm_pci_msgring *ring, int count, int *avail)
1182{
1183	int available;
1184	char *ret;
1185
1186	bwfm_pci_ring_update_rptr(sc, ring);
1187
1188	if (ring->r_ptr > ring->w_ptr)
1189		available = ring->r_ptr - ring->w_ptr;
1190	else
1191		available = ring->r_ptr + (ring->nitem - ring->w_ptr);
1192
1193	if (available <= 1)
1194		return NULL;
1195
1196	ret = BWFM_PCI_DMA_KVA(ring->ring) + (ring->w_ptr * ring->itemsz);
1197	*avail = min(count, available - 1);
1198	if (*avail + ring->w_ptr > ring->nitem)
1199		*avail = ring->nitem - ring->w_ptr;
1200	ring->w_ptr += *avail;
1201	if (ring->w_ptr == ring->nitem)
1202		ring->w_ptr = 0;
1203	return ret;
1204}
1205
1206/*
1207 * Read number of descriptors available (submitted by the firmware)
1208 * and retrieve pointer to first descriptor.
1209 */
1210void *
1211bwfm_pci_ring_read_avail(struct bwfm_pci_softc *sc,
1212    struct bwfm_pci_msgring *ring, int *avail)
1213{
1214	bwfm_pci_ring_update_wptr(sc, ring);
1215
1216	if (ring->w_ptr >= ring->r_ptr)
1217		*avail = ring->w_ptr - ring->r_ptr;
1218	else
1219		*avail = ring->nitem - ring->r_ptr;
1220
1221	if (*avail == 0)
1222		return NULL;
1223
1224	bus_dmamap_sync(sc->sc_dmat, BWFM_PCI_DMA_MAP(ring->ring),
1225	    ring->r_ptr * ring->itemsz, *avail * ring->itemsz,
1226	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1227	return BWFM_PCI_DMA_KVA(ring->ring) + (ring->r_ptr * ring->itemsz);
1228}
1229
1230/*
1231 * Let firmware know we read N descriptors.
1232 */
1233void
1234bwfm_pci_ring_read_commit(struct bwfm_pci_softc *sc,
1235    struct bwfm_pci_msgring *ring, int nitem)
1236{
1237	ring->r_ptr += nitem;
1238	if (ring->r_ptr == ring->nitem)
1239		ring->r_ptr = 0;
1240	bwfm_pci_ring_write_rptr(sc, ring);
1241}
1242
1243/*
1244 * Let firmware know that we submitted some descriptors.
1245 */
1246void
1247bwfm_pci_ring_write_commit(struct bwfm_pci_softc *sc,
1248    struct bwfm_pci_msgring *ring)
1249{
1250	bus_dmamap_sync(sc->sc_dmat, BWFM_PCI_DMA_MAP(ring->ring),
1251	    0, BWFM_PCI_DMA_LEN(ring->ring), BUS_DMASYNC_PREREAD |
1252	    BUS_DMASYNC_PREWRITE);
1253	bwfm_pci_ring_write_wptr(sc, ring);
1254	bwfm_pci_ring_bell(sc, ring);
1255}
1256
1257/*
1258 * Rollback N descriptors in case we don't actually want
1259 * to commit to it.
1260 */
1261void
1262bwfm_pci_ring_write_cancel(struct bwfm_pci_softc *sc,
1263    struct bwfm_pci_msgring *ring, int nitem)
1264{
1265	if (ring->w_ptr == 0)
1266		ring->w_ptr = ring->nitem - nitem;
1267	else
1268		ring->w_ptr -= nitem;
1269}
1270
1271/*
1272 * Foreach written descriptor on the ring, pass the descriptor to
1273 * a message handler and let the firmware know we handled it.
1274 */
1275void
1276bwfm_pci_ring_rx(struct bwfm_pci_softc *sc, struct bwfm_pci_msgring *ring,
1277    struct mbuf_list *ml)
1278{
1279	void *buf;
1280	int avail, processed;
1281
1282again:
1283	buf = bwfm_pci_ring_read_avail(sc, ring, &avail);
1284	if (buf == NULL)
1285		return;
1286
1287	processed = 0;
1288	while (avail) {
1289		bwfm_pci_msg_rx(sc, buf + sc->sc_rx_dataoffset, ml);
1290		buf += ring->itemsz;
1291		processed++;
1292		if (processed == 48) {
1293			bwfm_pci_ring_read_commit(sc, ring, processed);
1294			processed = 0;
1295		}
1296		avail--;
1297	}
1298	if (processed)
1299		bwfm_pci_ring_read_commit(sc, ring, processed);
1300	if (ring->r_ptr == 0)
1301		goto again;
1302}
1303
1304void
1305bwfm_pci_msg_rx(struct bwfm_pci_softc *sc, void *buf, struct mbuf_list *ml)
1306{
1307	struct ifnet *ifp = &sc->sc_sc.sc_ic.ic_if;
1308	struct msgbuf_ioctl_resp_hdr *resp;
1309	struct msgbuf_tx_status *tx;
1310	struct msgbuf_rx_complete *rx;
1311	struct msgbuf_rx_event *event;
1312	struct msgbuf_common_hdr *msg;
1313	struct msgbuf_flowring_create_resp *fcr;
1314	struct msgbuf_flowring_delete_resp *fdr;
1315	struct bwfm_pci_msgring *ring;
1316	struct mbuf *m;
1317	int flowid;
1318
1319	msg = (struct msgbuf_common_hdr *)buf;
1320	switch (msg->msgtype)
1321	{
1322	case MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT:
1323		fcr = (struct msgbuf_flowring_create_resp *)buf;
1324		flowid = letoh16(fcr->compl_hdr.flow_ring_id);
1325		if (flowid < 2)
1326			break;
1327		flowid -= 2;
1328		if (flowid >= sc->sc_max_flowrings)
1329			break;
1330		ring = &sc->sc_flowrings[flowid];
1331		if (ring->status != RING_OPENING)
1332			break;
1333		if (fcr->compl_hdr.status) {
1334			printf("%s: failed to open flowring %d\n",
1335			    DEVNAME(sc), flowid);
1336			ring->status = RING_CLOSED;
1337			if (ring->m) {
1338				m_freem(ring->m);
1339				ring->m = NULL;
1340			}
1341			ifq_restart(&ifp->if_snd);
1342			break;
1343		}
1344		ring->status = RING_OPEN;
1345		if (ring->m != NULL) {
1346			m = ring->m;
1347			ring->m = NULL;
1348			if (bwfm_pci_txdata(&sc->sc_sc, m))
1349				m_freem(ring->m);
1350		}
1351		ifq_restart(&ifp->if_snd);
1352		break;
1353	case MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT:
1354		fdr = (struct msgbuf_flowring_delete_resp *)buf;
1355		flowid = letoh16(fdr->compl_hdr.flow_ring_id);
1356		if (flowid < 2)
1357			break;
1358		flowid -= 2;
1359		if (flowid >= sc->sc_max_flowrings)
1360			break;
1361		ring = &sc->sc_flowrings[flowid];
1362		if (ring->status != RING_CLOSING)
1363			break;
1364		if (fdr->compl_hdr.status) {
1365			printf("%s: failed to delete flowring %d\n",
1366			    DEVNAME(sc), flowid);
1367			break;
1368		}
1369		bwfm_pci_dmamem_free(sc, ring->ring);
1370		ring->status = RING_CLOSED;
1371		break;
1372	case MSGBUF_TYPE_IOCTLPTR_REQ_ACK:
1373		m = bwfm_pci_pktid_free(sc, &sc->sc_ioctl_pkts,
1374		    letoh32(msg->request_id));
1375		if (m == NULL)
1376			break;
1377		m_freem(m);
1378		break;
1379	case MSGBUF_TYPE_IOCTL_CMPLT:
1380		resp = (struct msgbuf_ioctl_resp_hdr *)buf;
1381		bwfm_pci_msgbuf_rxioctl(sc, resp);
1382		if_rxr_put(&sc->sc_ioctl_ring, 1);
1383		bwfm_pci_fill_rx_rings(sc);
1384		break;
1385	case MSGBUF_TYPE_WL_EVENT:
1386		event = (struct msgbuf_rx_event *)buf;
1387		m = bwfm_pci_pktid_free(sc, &sc->sc_rx_pkts,
1388		    letoh32(event->msg.request_id));
1389		if (m == NULL)
1390			break;
1391		m_adj(m, sc->sc_rx_dataoffset);
1392		m->m_len = m->m_pkthdr.len = letoh16(event->event_data_len);
1393		bwfm_rx(&sc->sc_sc, m, ml);
1394		if_rxr_put(&sc->sc_event_ring, 1);
1395		bwfm_pci_fill_rx_rings(sc);
1396		break;
1397	case MSGBUF_TYPE_TX_STATUS:
1398		tx = (struct msgbuf_tx_status *)buf;
1399		m = bwfm_pci_pktid_free(sc, &sc->sc_tx_pkts,
1400		    letoh32(tx->msg.request_id) - 1);
1401		if (m == NULL)
1402			break;
1403		m_freem(m);
1404		if (sc->sc_tx_pkts_full) {
1405			sc->sc_tx_pkts_full = 0;
1406			ifq_restart(&ifp->if_snd);
1407		}
1408		break;
1409	case MSGBUF_TYPE_RX_CMPLT:
1410		rx = (struct msgbuf_rx_complete *)buf;
1411		m = bwfm_pci_pktid_free(sc, &sc->sc_rx_pkts,
1412		    letoh32(rx->msg.request_id));
1413		if (m == NULL)
1414			break;
1415		if (letoh16(rx->data_offset))
1416			m_adj(m, letoh16(rx->data_offset));
1417		else if (sc->sc_rx_dataoffset)
1418			m_adj(m, sc->sc_rx_dataoffset);
1419		m->m_len = m->m_pkthdr.len = letoh16(rx->data_len);
1420		bwfm_rx(&sc->sc_sc, m, ml);
1421		if_rxr_put(&sc->sc_rxbuf_ring, 1);
1422		bwfm_pci_fill_rx_rings(sc);
1423		break;
1424	default:
1425		printf("%s: msgtype 0x%08x\n", __func__, msg->msgtype);
1426		break;
1427	}
1428}
1429
1430/* Bus core helpers */
1431void
1432bwfm_pci_select_core(struct bwfm_pci_softc *sc, int id)
1433{
1434	struct bwfm_softc *bwfm = (void *)sc;
1435	struct bwfm_core *core;
1436
1437	core = bwfm_chip_get_core(bwfm, id);
1438	if (core == NULL) {
1439		printf("%s: could not find core to select", DEVNAME(sc));
1440		return;
1441	}
1442
1443	pci_conf_write(sc->sc_pc, sc->sc_tag,
1444	    BWFM_PCI_BAR0_WINDOW, core->co_base);
1445	if (pci_conf_read(sc->sc_pc, sc->sc_tag,
1446	    BWFM_PCI_BAR0_WINDOW) != core->co_base)
1447		pci_conf_write(sc->sc_pc, sc->sc_tag,
1448		    BWFM_PCI_BAR0_WINDOW, core->co_base);
1449}
1450
1451uint32_t
1452bwfm_pci_buscore_read(struct bwfm_softc *bwfm, uint32_t reg)
1453{
1454	struct bwfm_pci_softc *sc = (void *)bwfm;
1455	uint32_t page, offset;
1456
1457	page = reg & ~(BWFM_PCI_BAR0_REG_SIZE - 1);
1458	offset = reg & (BWFM_PCI_BAR0_REG_SIZE - 1);
1459	pci_conf_write(sc->sc_pc, sc->sc_tag, BWFM_PCI_BAR0_WINDOW, page);
1460	return bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh, offset);
1461}
1462
1463void
1464bwfm_pci_buscore_write(struct bwfm_softc *bwfm, uint32_t reg, uint32_t val)
1465{
1466	struct bwfm_pci_softc *sc = (void *)bwfm;
1467	uint32_t page, offset;
1468
1469	page = reg & ~(BWFM_PCI_BAR0_REG_SIZE - 1);
1470	offset = reg & (BWFM_PCI_BAR0_REG_SIZE - 1);
1471	pci_conf_write(sc->sc_pc, sc->sc_tag, BWFM_PCI_BAR0_WINDOW, page);
1472	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh, offset, val);
1473}
1474
1475int
1476bwfm_pci_buscore_prepare(struct bwfm_softc *bwfm)
1477{
1478	return 0;
1479}
1480
1481int
1482bwfm_pci_buscore_reset(struct bwfm_softc *bwfm)
1483{
1484	struct bwfm_pci_softc *sc = (void *)bwfm;
1485	struct bwfm_core *core;
1486	uint32_t reg;
1487	int i;
1488
1489	bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
1490	reg = pci_conf_read(sc->sc_pc, sc->sc_tag,
1491	    BWFM_PCI_CFGREG_LINK_STATUS_CTRL);
1492	pci_conf_write(sc->sc_pc, sc->sc_tag, BWFM_PCI_CFGREG_LINK_STATUS_CTRL,
1493	    reg & ~BWFM_PCI_CFGREG_LINK_STATUS_CTRL_ASPM_ENAB);
1494
1495	bwfm_pci_select_core(sc, BWFM_AGENT_CORE_CHIPCOMMON);
1496	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1497	    BWFM_CHIP_REG_WATCHDOG, 4);
1498	delay(100 * 1000);
1499
1500	bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
1501	pci_conf_write(sc->sc_pc, sc->sc_tag,
1502	    BWFM_PCI_CFGREG_LINK_STATUS_CTRL, reg);
1503
1504	core = bwfm_chip_get_core(bwfm, BWFM_AGENT_CORE_PCIE2);
1505	if (core->co_rev <= 13) {
1506		uint16_t cfg_offset[] = {
1507		    BWFM_PCI_CFGREG_STATUS_CMD,
1508		    BWFM_PCI_CFGREG_PM_CSR,
1509		    BWFM_PCI_CFGREG_MSI_CAP,
1510		    BWFM_PCI_CFGREG_MSI_ADDR_L,
1511		    BWFM_PCI_CFGREG_MSI_ADDR_H,
1512		    BWFM_PCI_CFGREG_MSI_DATA,
1513		    BWFM_PCI_CFGREG_LINK_STATUS_CTRL2,
1514		    BWFM_PCI_CFGREG_RBAR_CTRL,
1515		    BWFM_PCI_CFGREG_PML1_SUB_CTRL1,
1516		    BWFM_PCI_CFGREG_REG_BAR2_CONFIG,
1517		    BWFM_PCI_CFGREG_REG_BAR3_CONFIG,
1518		};
1519
1520		for (i = 0; i < nitems(cfg_offset); i++) {
1521			bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1522			    BWFM_PCI_PCIE2REG_CONFIGADDR, cfg_offset[i]);
1523			reg = bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1524			    BWFM_PCI_PCIE2REG_CONFIGDATA);
1525			DPRINTFN(3, ("%s: config offset 0x%04x, value 0x%04x\n",
1526			    DEVNAME(sc), cfg_offset[i], reg));
1527			bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1528			    BWFM_PCI_PCIE2REG_CONFIGDATA, reg);
1529		}
1530	}
1531
1532	reg = bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1533	    BWFM_PCI_PCIE2REG_MAILBOXINT);
1534	if (reg != 0xffffffff)
1535		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1536		    BWFM_PCI_PCIE2REG_MAILBOXINT, reg);
1537
1538	return 0;
1539}
1540
1541void
1542bwfm_pci_buscore_activate(struct bwfm_softc *bwfm, uint32_t rstvec)
1543{
1544	struct bwfm_pci_softc *sc = (void *)bwfm;
1545	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh, 0, rstvec);
1546}
1547
1548static int bwfm_pci_prio2fifo[8] = {
1549	1, /* best effort */
1550	0, /* IPTOS_PREC_IMMEDIATE */
1551	0, /* IPTOS_PREC_PRIORITY */
1552	1, /* IPTOS_PREC_FLASH */
1553	2, /* IPTOS_PREC_FLASHOVERRIDE */
1554	2, /* IPTOS_PREC_CRITIC_ECP */
1555	3, /* IPTOS_PREC_INTERNETCONTROL */
1556	3, /* IPTOS_PREC_NETCONTROL */
1557};
1558
1559int
1560bwfm_pci_flowring_lookup(struct bwfm_pci_softc *sc, struct mbuf *m)
1561{
1562	struct ieee80211com *ic = &sc->sc_sc.sc_ic;
1563#ifndef IEEE80211_STA_ONLY
1564	uint8_t *da = mtod(m, uint8_t *);
1565#endif
1566	int flowid, prio, fifo;
1567	int i, found;
1568
1569	prio = ieee80211_classify(ic, m);
1570	fifo = bwfm_pci_prio2fifo[prio];
1571
1572	switch (ic->ic_opmode)
1573	{
1574	case IEEE80211_M_STA:
1575		flowid = fifo;
1576		break;
1577#ifndef IEEE80211_STA_ONLY
1578	case IEEE80211_M_HOSTAP:
1579		if (ETHER_IS_MULTICAST(da))
1580			da = etherbroadcastaddr;
1581		flowid = da[5] * 2 + fifo;
1582		break;
1583#endif
1584	default:
1585		printf("%s: state not supported\n", DEVNAME(sc));
1586		return ENOBUFS;
1587	}
1588
1589	found = 0;
1590	flowid = flowid % sc->sc_max_flowrings;
1591	for (i = 0; i < sc->sc_max_flowrings; i++) {
1592		if (ic->ic_opmode == IEEE80211_M_STA &&
1593		    sc->sc_flowrings[flowid].status >= RING_OPEN &&
1594		    sc->sc_flowrings[flowid].fifo == fifo) {
1595			found = 1;
1596			break;
1597		}
1598#ifndef IEEE80211_STA_ONLY
1599		if (ic->ic_opmode == IEEE80211_M_HOSTAP &&
1600		    sc->sc_flowrings[flowid].status >= RING_OPEN &&
1601		    sc->sc_flowrings[flowid].fifo == fifo &&
1602		    !memcmp(sc->sc_flowrings[flowid].mac, da, ETHER_ADDR_LEN)) {
1603			found = 1;
1604			break;
1605		}
1606#endif
1607		flowid = (flowid + 1) % sc->sc_max_flowrings;
1608	}
1609
1610	if (found)
1611		return flowid;
1612
1613	return -1;
1614}
1615
1616void
1617bwfm_pci_flowring_create(struct bwfm_pci_softc *sc, struct mbuf *m)
1618{
1619	struct ieee80211com *ic = &sc->sc_sc.sc_ic;
1620	struct bwfm_cmd_flowring_create cmd;
1621#ifndef IEEE80211_STA_ONLY
1622	uint8_t *da = mtod(m, uint8_t *);
1623#endif
1624	struct bwfm_pci_msgring *ring;
1625	int flowid, prio, fifo;
1626	int i, found;
1627
1628	prio = ieee80211_classify(ic, m);
1629	fifo = bwfm_pci_prio2fifo[prio];
1630
1631	switch (ic->ic_opmode)
1632	{
1633	case IEEE80211_M_STA:
1634		flowid = fifo;
1635		break;
1636#ifndef IEEE80211_STA_ONLY
1637	case IEEE80211_M_HOSTAP:
1638		if (ETHER_IS_MULTICAST(da))
1639			da = etherbroadcastaddr;
1640		flowid = da[5] * 2 + fifo;
1641		break;
1642#endif
1643	default:
1644		printf("%s: state not supported\n", DEVNAME(sc));
1645		return;
1646	}
1647
1648	found = 0;
1649	flowid = flowid % sc->sc_max_flowrings;
1650	for (i = 0; i < sc->sc_max_flowrings; i++) {
1651		ring = &sc->sc_flowrings[flowid];
1652		if (ring->status == RING_CLOSED) {
1653			ring->status = RING_OPENING;
1654			found = 1;
1655			break;
1656		}
1657		flowid = (flowid + 1) % sc->sc_max_flowrings;
1658	}
1659
1660	/*
1661	 * We cannot recover from that so far.  Only a stop/init
1662	 * cycle can revive this if it ever happens at all.
1663	 */
1664	if (!found) {
1665		printf("%s: no flowring available\n", DEVNAME(sc));
1666		return;
1667	}
1668
1669	cmd.m = m;
1670	cmd.prio = prio;
1671	cmd.flowid = flowid;
1672	bwfm_do_async(&sc->sc_sc, bwfm_pci_flowring_create_cb, &cmd, sizeof(cmd));
1673}
1674
1675void
1676bwfm_pci_flowring_create_cb(struct bwfm_softc *bwfm, void *arg)
1677{
1678	struct bwfm_pci_softc *sc = (void *)bwfm;
1679#ifndef IEEE80211_STA_ONLY
1680	struct ieee80211com *ic = &sc->sc_sc.sc_ic;
1681#endif
1682	struct bwfm_cmd_flowring_create *cmd = arg;
1683	struct msgbuf_tx_flowring_create_req *req;
1684	struct bwfm_pci_msgring *ring;
1685	uint8_t *da, *sa;
1686	int s;
1687
1688	da = mtod(cmd->m, char *) + 0 * ETHER_ADDR_LEN;
1689	sa = mtod(cmd->m, char *) + 1 * ETHER_ADDR_LEN;
1690
1691	ring = &sc->sc_flowrings[cmd->flowid];
1692	if (ring->status != RING_OPENING) {
1693		printf("%s: flowring not opening\n", DEVNAME(sc));
1694		return;
1695	}
1696
1697	if (bwfm_pci_setup_flowring(sc, ring, 512, 48)) {
1698		printf("%s: cannot setup flowring\n", DEVNAME(sc));
1699		return;
1700	}
1701
1702	s = splnet();
1703	req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
1704	if (req == NULL) {
1705		printf("%s: cannot reserve for flowring\n", DEVNAME(sc));
1706		splx(s);
1707		return;
1708	}
1709
1710	ring->status = RING_OPENING;
1711	ring->fifo = bwfm_pci_prio2fifo[cmd->prio];
1712	ring->m = cmd->m;
1713	memcpy(ring->mac, da, ETHER_ADDR_LEN);
1714#ifndef IEEE80211_STA_ONLY
1715	if (ic->ic_opmode == IEEE80211_M_HOSTAP && ETHER_IS_MULTICAST(da))
1716		memcpy(ring->mac, etherbroadcastaddr, ETHER_ADDR_LEN);
1717#endif
1718
1719	req->msg.msgtype = MSGBUF_TYPE_FLOW_RING_CREATE;
1720	req->msg.ifidx = 0;
1721	req->msg.request_id = 0;
1722	req->tid = bwfm_pci_prio2fifo[cmd->prio];
1723	req->flow_ring_id = letoh16(cmd->flowid + 2);
1724	memcpy(req->da, da, ETHER_ADDR_LEN);
1725	memcpy(req->sa, sa, ETHER_ADDR_LEN);
1726	req->flow_ring_addr.high_addr =
1727	    letoh32(BWFM_PCI_DMA_DVA(ring->ring) >> 32);
1728	req->flow_ring_addr.low_addr =
1729	    letoh32(BWFM_PCI_DMA_DVA(ring->ring) & 0xffffffff);
1730	req->max_items = letoh16(512);
1731	req->len_item = letoh16(48);
1732
1733	bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
1734	splx(s);
1735}
1736
1737void
1738bwfm_pci_flowring_delete(struct bwfm_pci_softc *sc, int flowid)
1739{
1740	struct msgbuf_tx_flowring_delete_req *req;
1741	struct bwfm_pci_msgring *ring;
1742	int s;
1743
1744	ring = &sc->sc_flowrings[flowid];
1745	if (ring->status != RING_OPEN) {
1746		printf("%s: flowring not open\n", DEVNAME(sc));
1747		return;
1748	}
1749
1750	s = splnet();
1751	req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
1752	if (req == NULL) {
1753		printf("%s: cannot reserve for flowring\n", DEVNAME(sc));
1754		splx(s);
1755		return;
1756	}
1757
1758	ring->status = RING_CLOSING;
1759
1760	req->msg.msgtype = MSGBUF_TYPE_FLOW_RING_DELETE;
1761	req->msg.ifidx = 0;
1762	req->msg.request_id = 0;
1763	req->flow_ring_id = letoh16(flowid + 2);
1764	req->reason = 0;
1765
1766	bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
1767	splx(s);
1768}
1769
1770void
1771bwfm_pci_stop(struct bwfm_softc *bwfm)
1772{
1773	struct bwfm_pci_softc *sc = (void *)bwfm;
1774	struct bwfm_pci_msgring *ring;
1775	int i;
1776
1777	for (i = 0; i < sc->sc_max_flowrings; i++) {
1778		ring = &sc->sc_flowrings[i];
1779		if (ring->status == RING_OPEN)
1780			bwfm_pci_flowring_delete(sc, i);
1781	}
1782}
1783
1784int
1785bwfm_pci_txcheck(struct bwfm_softc *bwfm)
1786{
1787	struct bwfm_pci_softc *sc = (void *)bwfm;
1788	struct bwfm_pci_msgring *ring;
1789	int i;
1790
1791	/* If we are transitioning, we cannot send. */
1792	for (i = 0; i < sc->sc_max_flowrings; i++) {
1793		ring = &sc->sc_flowrings[i];
1794		if (ring->status == RING_OPENING)
1795			return ENOBUFS;
1796	}
1797
1798	if (bwfm_pci_pktid_avail(sc, &sc->sc_tx_pkts)) {
1799		sc->sc_tx_pkts_full = 1;
1800		return ENOBUFS;
1801	}
1802
1803	return 0;
1804}
1805
1806int
1807bwfm_pci_txdata(struct bwfm_softc *bwfm, struct mbuf *m)
1808{
1809	struct bwfm_pci_softc *sc = (void *)bwfm;
1810	struct bwfm_pci_msgring *ring;
1811	struct msgbuf_tx_msghdr *tx;
1812	uint32_t pktid;
1813	paddr_t paddr;
1814	int flowid, ret;
1815
1816	flowid = bwfm_pci_flowring_lookup(sc, m);
1817	if (flowid < 0) {
1818		/*
1819		 * We cannot send the packet right now as there is
1820		 * no flowring yet.  The flowring will be created
1821		 * asynchronously.  While the ring is transitioning
1822		 * the TX check will tell the upper layers that we
1823		 * cannot send packets right now.  When the flowring
1824		 * is created the queue will be restarted and this
1825		 * mbuf will be transmitted.
1826		 */
1827		bwfm_pci_flowring_create(sc, m);
1828		return 0;
1829	}
1830
1831	ring = &sc->sc_flowrings[flowid];
1832	if (ring->status == RING_OPENING ||
1833	    ring->status == RING_CLOSING) {
1834		printf("%s: tried to use a flow that was "
1835		    "transitioning in status %d\n",
1836		    DEVNAME(sc), ring->status);
1837		return ENOBUFS;
1838	}
1839
1840	tx = bwfm_pci_ring_write_reserve(sc, ring);
1841	if (tx == NULL)
1842		return ENOBUFS;
1843
1844	memset(tx, 0, sizeof(*tx));
1845	tx->msg.msgtype = MSGBUF_TYPE_TX_POST;
1846	tx->msg.ifidx = 0;
1847	tx->flags = BWFM_MSGBUF_PKT_FLAGS_FRAME_802_3;
1848	tx->flags |= ieee80211_classify(&sc->sc_sc.sc_ic, m) <<
1849	    BWFM_MSGBUF_PKT_FLAGS_PRIO_SHIFT;
1850	tx->seg_cnt = 1;
1851	memcpy(tx->txhdr, mtod(m, char *), ETHER_HDR_LEN);
1852
1853	ret = bwfm_pci_pktid_new(sc, &sc->sc_tx_pkts, m, &pktid, &paddr);
1854	if (ret) {
1855		if (ret == ENOBUFS) {
1856			printf("%s: no pktid available for TX\n",
1857			    DEVNAME(sc));
1858			sc->sc_tx_pkts_full = 1;
1859		}
1860		bwfm_pci_ring_write_cancel(sc, ring, 1);
1861		return ret;
1862	}
1863	paddr += ETHER_HDR_LEN;
1864
1865	tx->msg.request_id = htole32(pktid + 1);
1866	tx->data_len = htole16(m->m_len - ETHER_HDR_LEN);
1867	tx->data_buf_addr.high_addr = htole32((uint64_t)paddr >> 32);
1868	tx->data_buf_addr.low_addr = htole32(paddr & 0xffffffff);
1869
1870	bwfm_pci_ring_write_commit(sc, ring);
1871	return 0;
1872}
1873
1874#ifdef BWFM_DEBUG
1875void
1876bwfm_pci_debug_console(struct bwfm_pci_softc *sc)
1877{
1878	uint32_t newidx = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1879	    sc->sc_console_base_addr + BWFM_CONSOLE_WRITEIDX);
1880
1881	if (newidx != sc->sc_console_readidx)
1882		DPRINTFN(3, ("BWFM CONSOLE: "));
1883	while (newidx != sc->sc_console_readidx) {
1884		uint8_t ch = bus_space_read_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1885		    sc->sc_console_buf_addr + sc->sc_console_readidx);
1886		sc->sc_console_readidx++;
1887		if (sc->sc_console_readidx == sc->sc_console_buf_size)
1888			sc->sc_console_readidx = 0;
1889		if (ch == '\r')
1890			continue;
1891		DPRINTFN(3, ("%c", ch));
1892	}
1893}
1894#endif
1895
1896int
1897bwfm_pci_intr(void *v)
1898{
1899	struct bwfm_pci_softc *sc = (void *)v;
1900	struct ifnet *ifp = &sc->sc_sc.sc_ic.ic_if;
1901	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1902	uint32_t status;
1903
1904	if ((status = bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1905	    BWFM_PCI_PCIE2REG_MAILBOXINT)) == 0)
1906		return 0;
1907
1908	bwfm_pci_intr_disable(sc);
1909	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1910	    BWFM_PCI_PCIE2REG_MAILBOXINT, status);
1911
1912	if (status & (BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_0 |
1913	    BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_1))
1914		printf("%s: handle MB data\n", __func__);
1915
1916	if (status & BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_D2H_DB) {
1917		bwfm_pci_ring_rx(sc, &sc->sc_rx_complete, &ml);
1918		bwfm_pci_ring_rx(sc, &sc->sc_tx_complete, &ml);
1919		bwfm_pci_ring_rx(sc, &sc->sc_ctrl_complete, &ml);
1920
1921		if (ifiq_input(&ifp->if_rcv, &ml))
1922			if_rxr_livelocked(&sc->sc_rxbuf_ring);
1923	}
1924
1925#ifdef BWFM_DEBUG
1926	bwfm_pci_debug_console(sc);
1927#endif
1928
1929	bwfm_pci_intr_enable(sc);
1930	return 1;
1931}
1932
1933void
1934bwfm_pci_intr_enable(struct bwfm_pci_softc *sc)
1935{
1936	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1937	    BWFM_PCI_PCIE2REG_MAILBOXMASK,
1938	    BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_0 |
1939	    BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_1 |
1940	    BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_D2H_DB);
1941}
1942
1943void
1944bwfm_pci_intr_disable(struct bwfm_pci_softc *sc)
1945{
1946	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1947	    BWFM_PCI_PCIE2REG_MAILBOXMASK, 0);
1948}
1949
1950/* Msgbuf protocol implementation */
1951int
1952bwfm_pci_msgbuf_query_dcmd(struct bwfm_softc *bwfm, int ifidx,
1953    int cmd, char *buf, size_t *len)
1954{
1955	struct bwfm_pci_softc *sc = (void *)bwfm;
1956	struct msgbuf_ioctl_req_hdr *req;
1957	struct bwfm_pci_ioctl *ctl;
1958	struct mbuf *m;
1959	uint32_t pktid;
1960	paddr_t paddr;
1961	size_t buflen;
1962	int s;
1963
1964	buflen = min(*len, BWFM_DMA_H2D_IOCTL_BUF_LEN);
1965	m = MCLGETL(NULL, M_DONTWAIT, buflen);
1966	if (m == NULL)
1967		return 1;
1968	m->m_len = m->m_pkthdr.len = buflen;
1969
1970	if (buf)
1971		memcpy(mtod(m, char *), buf, buflen);
1972	else
1973		memset(mtod(m, char *), 0, buflen);
1974
1975	s = splnet();
1976	req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
1977	if (req == NULL) {
1978		splx(s);
1979		m_freem(m);
1980		return 1;
1981	}
1982
1983	if (bwfm_pci_pktid_new(sc, &sc->sc_ioctl_pkts, m, &pktid, &paddr)) {
1984		bwfm_pci_ring_write_cancel(sc, &sc->sc_ctrl_submit, 1);
1985		splx(s);
1986		m_freem(m);
1987		return 1;
1988	}
1989
1990	ctl = malloc(sizeof(*ctl), M_TEMP, M_WAITOK|M_ZERO);
1991	ctl->transid = sc->sc_ioctl_transid++;
1992	TAILQ_INSERT_TAIL(&sc->sc_ioctlq, ctl, next);
1993
1994	req->msg.msgtype = MSGBUF_TYPE_IOCTLPTR_REQ;
1995	req->msg.ifidx = 0;
1996	req->msg.flags = 0;
1997	req->msg.request_id = htole32(pktid);
1998	req->cmd = htole32(cmd);
1999	req->output_buf_len = htole16(*len);
2000	req->trans_id = htole16(ctl->transid);
2001
2002	req->input_buf_len = htole16(m->m_len);
2003	req->req_buf_addr.high_addr = htole32((uint64_t)paddr >> 32);
2004	req->req_buf_addr.low_addr = htole32(paddr & 0xffffffff);
2005
2006	bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
2007	splx(s);
2008
2009	tsleep_nsec(ctl, PWAIT, "bwfm", SEC_TO_NSEC(1));
2010	TAILQ_REMOVE(&sc->sc_ioctlq, ctl, next);
2011
2012	if (ctl->m == NULL) {
2013		free(ctl, M_TEMP, sizeof(*ctl));
2014		return 1;
2015	}
2016
2017	*len = min(ctl->retlen, m->m_len);
2018	*len = min(*len, buflen);
2019	if (buf)
2020		m_copydata(ctl->m, 0, *len, buf);
2021	m_freem(ctl->m);
2022
2023	if (ctl->status < 0) {
2024		free(ctl, M_TEMP, sizeof(*ctl));
2025		return 1;
2026	}
2027
2028	free(ctl, M_TEMP, sizeof(*ctl));
2029	return 0;
2030}
2031
2032int
2033bwfm_pci_msgbuf_set_dcmd(struct bwfm_softc *bwfm, int ifidx,
2034    int cmd, char *buf, size_t len)
2035{
2036	return bwfm_pci_msgbuf_query_dcmd(bwfm, ifidx, cmd, buf, &len);
2037}
2038
2039void
2040bwfm_pci_msgbuf_rxioctl(struct bwfm_pci_softc *sc,
2041    struct msgbuf_ioctl_resp_hdr *resp)
2042{
2043	struct bwfm_pci_ioctl *ctl, *tmp;
2044	struct mbuf *m;
2045
2046	m = bwfm_pci_pktid_free(sc, &sc->sc_rx_pkts,
2047	    letoh32(resp->msg.request_id));
2048
2049	TAILQ_FOREACH_SAFE(ctl, &sc->sc_ioctlq, next, tmp) {
2050		if (ctl->transid != letoh16(resp->trans_id))
2051			continue;
2052		ctl->m = m;
2053		ctl->retlen = letoh16(resp->resp_len);
2054		ctl->status = letoh16(resp->compl_hdr.status);
2055		wakeup(ctl);
2056		return;
2057	}
2058
2059	m_freem(m);
2060}
2061