1/*	$NetBSD: if_bwfm_pci.c,v 1.13 2022/05/23 13:53:37 rin Exp $	*/
2/*	$OpenBSD: if_bwfm_pci.c,v 1.18 2018/02/08 05:00:38 patrick Exp $	*/
3/*
4 * Copyright (c) 2010-2016 Broadcom Corporation
5 * Copyright (c) 2017 Patrick Wildt <patrick@blueri.se>
6 *
7 * Permission to use, copy, modify, and/or distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20#include <sys/cdefs.h>
21__KERNEL_RCSID(0, "$NetBSD: if_bwfm_pci.c,v 1.13 2022/05/23 13:53:37 rin Exp $");
22
23#include <sys/param.h>
24#include <sys/systm.h>
25#include <sys/buf.h>
26#include <sys/kernel.h>
27#include <sys/kmem.h>
28#include <sys/device.h>
29#include <sys/pool.h>
30#include <sys/workqueue.h>
31#include <sys/socket.h>
32
33#include <net/bpf.h>
34#include <net/if.h>
35#include <net/if_dl.h>
36#include <net/if_ether.h>
37#include <net/if_media.h>
38
39#include <netinet/in.h>
40
41#include <net80211/ieee80211_var.h>
42
43#include <dev/pci/pcireg.h>
44#include <dev/pci/pcivar.h>
45#include <dev/pci/pcidevs.h>
46
47#include <dev/ic/bwfmreg.h>
48#include <dev/ic/bwfmvar.h>
49#include <dev/pci/if_bwfm_pci.h>
50
51#define BWFM_DMA_D2H_SCRATCH_BUF_LEN		8
52#define BWFM_DMA_D2H_RINGUPD_BUF_LEN		1024
53#define BWFM_DMA_H2D_IOCTL_BUF_LEN		ETHER_MAX_LEN
54
55#define BWFM_NUM_TX_MSGRINGS			2
56#define BWFM_NUM_RX_MSGRINGS			3
57
58#define BWFM_NUM_TX_PKTIDS			2048
59#define BWFM_NUM_RX_PKTIDS			1024
60
61#define BWFM_NUM_TX_DESCS			1
62#define BWFM_NUM_RX_DESCS			1
63
64#ifdef BWFM_DEBUG
65#define DPRINTF(x)	do { if (bwfm_debug > 0) printf x; } while (0)
66#define DPRINTFN(n, x)	do { if (bwfm_debug >= (n)) printf x; } while (0)
67static int bwfm_debug = 2;
68#else
69#define DPRINTF(x)	do { ; } while (0)
70#define DPRINTFN(n, x)	do { ; } while (0)
71#endif
72
73#define DEVNAME(sc)	device_xname((sc)->sc_sc.sc_dev)
74#define letoh16		htole16
75#define letoh32		htole32
76#define nitems(x)	__arraycount(x)
77
78enum ring_status {
79	RING_CLOSED,
80	RING_CLOSING,
81	RING_OPEN,
82	RING_OPENING,
83};
84
85struct bwfm_pci_msgring {
86	uint32_t		 w_idx_addr;
87	uint32_t		 r_idx_addr;
88	uint32_t		 w_ptr;
89	uint32_t		 r_ptr;
90	int			 nitem;
91	int			 itemsz;
92	enum ring_status	 status;
93	struct bwfm_pci_dmamem	*ring;
94	struct mbuf		*m;
95
96	int			 fifo;
97	uint8_t			 mac[ETHER_ADDR_LEN];
98};
99
100struct bwfm_pci_buf {
101	bus_dmamap_t	 bb_map;
102	struct mbuf	*bb_m;
103};
104
105struct bwfm_pci_pkts {
106	struct bwfm_pci_buf	*pkts;
107	uint32_t		 npkt;
108	int			 last;
109};
110
111struct if_rxring {
112	u_int	rxr_total;
113	u_int	rxr_inuse;
114};
115
116struct bwfm_cmd_flowring_create {
117	struct work		 wq_cookie;
118	struct bwfm_pci_softc	*sc;
119	struct mbuf		*m;
120	int			 flowid;
121	int			 prio;
122};
123
124struct bwfm_pci_softc {
125	struct bwfm_softc	 sc_sc;
126	pci_chipset_tag_t	 sc_pc;
127	pcitag_t		 sc_tag;
128	pcireg_t		 sc_id;
129	void			*sc_ih;
130	pci_intr_handle_t	*sc_pihp;
131
132	bus_space_tag_t		 sc_reg_iot;
133	bus_space_handle_t	 sc_reg_ioh;
134	bus_size_t		 sc_reg_ios;
135
136	bus_space_tag_t		 sc_tcm_iot;
137	bus_space_handle_t	 sc_tcm_ioh;
138	bus_size_t		 sc_tcm_ios;
139
140	bus_dma_tag_t		 sc_dmat;
141
142	uint32_t		 sc_shared_address;
143	uint32_t		 sc_shared_flags;
144	uint8_t			 sc_shared_version;
145
146	uint8_t			 sc_dma_idx_sz;
147	struct bwfm_pci_dmamem	*sc_dma_idx_buf;
148	size_t			 sc_dma_idx_bufsz;
149
150	uint16_t		 sc_max_rxbufpost;
151	uint32_t		 sc_rx_dataoffset;
152	uint32_t		 sc_htod_mb_data_addr;
153	uint32_t		 sc_dtoh_mb_data_addr;
154	uint32_t		 sc_ring_info_addr;
155
156	uint32_t		 sc_console_base_addr;
157	uint32_t		 sc_console_buf_addr;
158	uint32_t		 sc_console_buf_size;
159	uint32_t		 sc_console_readidx;
160
161	struct pool		 sc_flowring_pool;
162	struct workqueue	*flowring_wq;
163
164	uint16_t		 sc_max_flowrings;
165	uint16_t		 sc_max_submissionrings;
166	uint16_t		 sc_max_completionrings;
167
168	struct bwfm_pci_msgring	 sc_ctrl_submit;
169	struct bwfm_pci_msgring	 sc_rxpost_submit;
170	struct bwfm_pci_msgring	 sc_ctrl_complete;
171	struct bwfm_pci_msgring	 sc_tx_complete;
172	struct bwfm_pci_msgring	 sc_rx_complete;
173	struct bwfm_pci_msgring	*sc_flowrings;
174
175	struct bwfm_pci_dmamem	*sc_scratch_buf;
176	struct bwfm_pci_dmamem	*sc_ringupd_buf;
177
178	struct bwfm_pci_dmamem	*sc_ioctl_buf;
179	int			 sc_ioctl_reqid;
180	uint32_t		 sc_ioctl_resp_pktid;
181	uint32_t		 sc_ioctl_resp_ret_len;
182	uint32_t		 sc_ioctl_resp_status;
183	int			 sc_ioctl_poll;
184
185	struct if_rxring	 sc_ioctl_ring;
186	struct if_rxring	 sc_event_ring;
187	struct if_rxring	 sc_rxbuf_ring;
188
189	struct bwfm_pci_pkts	 sc_rx_pkts;
190	struct bwfm_pci_pkts	 sc_tx_pkts;
191	int			 sc_tx_pkts_full;
192};
193
194struct bwfm_pci_dmamem {
195	bus_dmamap_t		bdm_map;
196	bus_dma_segment_t	bdm_seg;
197	size_t			bdm_size;
198	char *			bdm_kva;
199};
200
201#define BWFM_PCI_DMA_MAP(_bdm)	((_bdm)->bdm_map)
202#define BWFM_PCI_DMA_LEN(_bdm)	((_bdm)->bdm_size)
203#define BWFM_PCI_DMA_DVA(_bdm)	(uint64_t)((_bdm)->bdm_map->dm_segs[0].ds_addr)
204#define BWFM_PCI_DMA_KVA(_bdm)	((_bdm)->bdm_kva)
205
206static u_int	 if_rxr_get(struct if_rxring *rxr, unsigned int max);
207static void	 if_rxr_put(struct if_rxring *rxr, unsigned int n);
208static void	 if_rxr_init(struct if_rxring *rxr, unsigned int lwm, unsigned int hwm);
209
210int		 bwfm_pci_match(device_t parent, cfdata_t match, void *aux);
211void		 bwfm_pci_attachhook(device_t);
212void		 bwfm_pci_attach(device_t, device_t, void *);
213int		 bwfm_pci_detach(device_t, int);
214
215int		 bwfm_pci_intr(void *);
216void		 bwfm_pci_intr_enable(struct bwfm_pci_softc *);
217void		 bwfm_pci_intr_disable(struct bwfm_pci_softc *);
218int		 bwfm_pci_load_microcode(struct bwfm_pci_softc *, const u_char *,
219		    size_t);
220void		 bwfm_pci_select_core(struct bwfm_pci_softc *, int );
221
222struct bwfm_pci_dmamem *
223		 bwfm_pci_dmamem_alloc(struct bwfm_pci_softc *, bus_size_t,
224		    bus_size_t);
225void		 bwfm_pci_dmamem_free(struct bwfm_pci_softc *, struct bwfm_pci_dmamem *);
226int		 bwfm_pci_pktid_avail(struct bwfm_pci_softc *,
227		    struct bwfm_pci_pkts *);
228int		 bwfm_pci_pktid_new(struct bwfm_pci_softc *,
229		    struct bwfm_pci_pkts *, struct mbuf **,
230		    uint32_t *, paddr_t *);
231struct mbuf *	 bwfm_pci_pktid_free(struct bwfm_pci_softc *,
232		    struct bwfm_pci_pkts *, uint32_t);
233void		 bwfm_pci_fill_rx_ioctl_ring(struct bwfm_pci_softc *,
234		    struct if_rxring *, uint32_t);
235void		 bwfm_pci_fill_rx_buf_ring(struct bwfm_pci_softc *);
236void		 bwfm_pci_fill_rx_rings(struct bwfm_pci_softc *);
237int		 bwfm_pci_setup_ring(struct bwfm_pci_softc *, struct bwfm_pci_msgring *,
238		    int, size_t, uint32_t, uint32_t, int, uint32_t, uint32_t *);
239int		 bwfm_pci_setup_flowring(struct bwfm_pci_softc *, struct bwfm_pci_msgring *,
240		    int, size_t);
241
242void		 bwfm_pci_ring_bell(struct bwfm_pci_softc *,
243		    struct bwfm_pci_msgring *);
244void		 bwfm_pci_ring_update_rptr(struct bwfm_pci_softc *,
245		    struct bwfm_pci_msgring *);
246void		 bwfm_pci_ring_update_wptr(struct bwfm_pci_softc *,
247		    struct bwfm_pci_msgring *);
248void		 bwfm_pci_ring_write_rptr(struct bwfm_pci_softc *,
249		    struct bwfm_pci_msgring *);
250void		 bwfm_pci_ring_write_wptr(struct bwfm_pci_softc *,
251		    struct bwfm_pci_msgring *);
252void *		 bwfm_pci_ring_write_reserve(struct bwfm_pci_softc *,
253		    struct bwfm_pci_msgring *);
254void *		 bwfm_pci_ring_write_reserve_multi(struct bwfm_pci_softc *,
255		    struct bwfm_pci_msgring *, int, int *);
256void *		 bwfm_pci_ring_read_avail(struct bwfm_pci_softc *,
257		    struct bwfm_pci_msgring *, int *);
258void		 bwfm_pci_ring_read_commit(struct bwfm_pci_softc *,
259		    struct bwfm_pci_msgring *, int);
260void		 bwfm_pci_ring_write_commit(struct bwfm_pci_softc *,
261		    struct bwfm_pci_msgring *);
262void		 bwfm_pci_ring_write_cancel(struct bwfm_pci_softc *,
263		    struct bwfm_pci_msgring *, int);
264
265void		 bwfm_pci_ring_rx(struct bwfm_pci_softc *,
266		    struct bwfm_pci_msgring *);
267void		 bwfm_pci_msg_rx(struct bwfm_pci_softc *, void *);
268
269uint32_t	 bwfm_pci_buscore_read(struct bwfm_softc *, uint32_t);
270void		 bwfm_pci_buscore_write(struct bwfm_softc *, uint32_t,
271		    uint32_t);
272int		 bwfm_pci_buscore_prepare(struct bwfm_softc *);
273int		 bwfm_pci_buscore_reset(struct bwfm_softc *);
274void		 bwfm_pci_buscore_activate(struct bwfm_softc *, const uint32_t);
275
276int		 bwfm_pci_flowring_lookup(struct bwfm_pci_softc *,
277		     struct mbuf *);
278void		 bwfm_pci_flowring_create(struct bwfm_pci_softc *,
279		     struct mbuf *);
280void		 bwfm_pci_flowring_create_cb(struct work *, void *);
281void		 bwfm_pci_flowring_delete(struct bwfm_pci_softc *, int);
282
283void		 bwfm_pci_stop(struct bwfm_softc *);
284int		 bwfm_pci_txcheck(struct bwfm_softc *);
285int		 bwfm_pci_txdata(struct bwfm_softc *, struct mbuf **);
286
287#ifdef BWFM_DEBUG
288void		 bwfm_pci_debug_console(struct bwfm_pci_softc *);
289#endif
290
291int		 bwfm_pci_msgbuf_query_dcmd(struct bwfm_softc *, int,
292		    int, char *, size_t *);
293int		 bwfm_pci_msgbuf_set_dcmd(struct bwfm_softc *, int,
294		    int, char *, size_t);
295
296static const struct bwfm_buscore_ops bwfm_pci_buscore_ops = {
297	.bc_read = bwfm_pci_buscore_read,
298	.bc_write = bwfm_pci_buscore_write,
299	.bc_prepare = bwfm_pci_buscore_prepare,
300	.bc_reset = bwfm_pci_buscore_reset,
301	.bc_setup = NULL,
302	.bc_activate = bwfm_pci_buscore_activate,
303};
304
305static const struct bwfm_bus_ops bwfm_pci_bus_ops = {
306	.bs_init = NULL,
307	.bs_stop = bwfm_pci_stop,
308	.bs_txcheck = bwfm_pci_txcheck,
309	.bs_txdata = bwfm_pci_txdata,
310	.bs_txctl = NULL,
311	.bs_rxctl = NULL,
312};
313
314static const struct bwfm_proto_ops bwfm_pci_msgbuf_ops = {
315	.proto_query_dcmd = bwfm_pci_msgbuf_query_dcmd,
316	.proto_set_dcmd = bwfm_pci_msgbuf_set_dcmd,
317};
318
319
320CFATTACH_DECL_NEW(bwfm_pci, sizeof(struct bwfm_pci_softc),
321    bwfm_pci_match, bwfm_pci_attach, bwfm_pci_detach, NULL);
322
323static const struct bwfm_firmware_selector bwfm_pci_fwtab[] = {
324	BWFM_FW_ENTRY(BRCM_CC_43602_CHIP_ID,
325		      BWFM_FWSEL_ALLREVS, "brcmfmac43602-pcie"),
326
327	BWFM_FW_ENTRY(BRCM_CC_43465_CHIP_ID,
328		      BWFM_FWSEL_REV_GE(4), "brcmfmac4366c-pcie"),
329
330	BWFM_FW_ENTRY(BRCM_CC_4350_CHIP_ID,
331		      BWFM_FWSEL_REV_LE(7), "brcmfmac4350c2-pcie"),
332	BWFM_FW_ENTRY(BRCM_CC_4350_CHIP_ID,
333		      BWFM_FWSEL_REV_GE(8), "brcmfmac4350-pcie"),
334
335	BWFM_FW_ENTRY(BRCM_CC_43525_CHIP_ID,
336		      BWFM_FWSEL_REV_GE(4), "brcmfmac4365c-pcie"),
337
338	BWFM_FW_ENTRY(BRCM_CC_4356_CHIP_ID,
339		      BWFM_FWSEL_ALLREVS, "brcmfmac4356-pcie"),
340
341	BWFM_FW_ENTRY(BRCM_CC_43567_CHIP_ID,
342		      BWFM_FWSEL_ALLREVS, "brcmfmac43570-pcie"),
343	BWFM_FW_ENTRY(BRCM_CC_43569_CHIP_ID,
344		      BWFM_FWSEL_ALLREVS, "brcmfmac43570-pcie"),
345	BWFM_FW_ENTRY(BRCM_CC_43570_CHIP_ID,
346		      BWFM_FWSEL_ALLREVS, "brcmfmac43570-pcie"),
347
348	BWFM_FW_ENTRY(BRCM_CC_4358_CHIP_ID,
349		      BWFM_FWSEL_ALLREVS, "brcmfmac4358-pcie"),
350
351	BWFM_FW_ENTRY(BRCM_CC_4359_CHIP_ID,
352		      BWFM_FWSEL_ALLREVS, "brcmfmac4359-pcie"),
353
354	BWFM_FW_ENTRY(BRCM_CC_4365_CHIP_ID,
355		      BWFM_FWSEL_REV_LE(3), "brcmfmac4365b-pcie"),
356	BWFM_FW_ENTRY(BRCM_CC_4365_CHIP_ID,
357		      BWFM_FWSEL_REV_GE(4), "brcmfmac4365c-pcie"),
358
359	BWFM_FW_ENTRY(BRCM_CC_4366_CHIP_ID,
360		      BWFM_FWSEL_REV_LE(3), "brcmfmac4366b-pcie"),
361	BWFM_FW_ENTRY(BRCM_CC_4366_CHIP_ID,
362		      BWFM_FWSEL_REV_GE(4), "brcmfmac4366c-pcie"),
363	BWFM_FW_ENTRY(BRCM_CC_43664_CHIP_ID,
364		      BWFM_FWSEL_REV_GE(4), "brcmfmac4366c-pcie"),
365
366	BWFM_FW_ENTRY(BRCM_CC_4371_CHIP_ID,
367		      BWFM_FWSEL_ALLREVS, "brcmfmac4371-pcie"),
368
369	BWFM_FW_ENTRY_END
370};
371
372static const struct device_compatible_entry compat_data[] = {
373	{ .id = PCI_ID_CODE(PCI_VENDOR_BROADCOM,
374		PCI_PRODUCT_BROADCOM_BCM43602), },
375
376	{ .id = PCI_ID_CODE(PCI_VENDOR_BROADCOM,
377		PCI_PRODUCT_BROADCOM_BCM4350), },
378
379	PCI_COMPAT_EOL
380};
381
382static struct mbuf *
383MCLGETI(struct bwfm_pci_softc *sc __unused, int how,
384    struct ifnet *ifp __unused, u_int size)
385{
386	struct mbuf *m;
387
388	MGETHDR(m, how, MT_DATA);
389	if (m == NULL)
390		return NULL;
391
392	MEXTMALLOC(m, size, how);
393	if ((m->m_flags & M_EXT) == 0) {
394		m_freem(m);
395		return NULL;
396	}
397	return m;
398}
399
400int
401bwfm_pci_match(device_t parent, cfdata_t match, void *aux)
402{
403	struct pci_attach_args *pa = aux;
404
405	return pci_compatible_match(pa, compat_data);
406}
407
408void
409bwfm_pci_attach(device_t parent, device_t self, void *aux)
410{
411	struct bwfm_pci_softc *sc = device_private(self);
412	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
413	const char *intrstr;
414	char intrbuf[PCI_INTRSTR_LEN];
415
416	sc->sc_sc.sc_dev = self;
417
418	if (pci_mapreg_map(pa, PCI_MAPREG_START + 0x00,
419	    PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->sc_reg_iot, &sc->sc_reg_ioh,
420	    NULL, &sc->sc_reg_ios)) {
421		printf(": can't map bar0\n");
422		return;
423	}
424
425	if (pci_mapreg_map(pa, PCI_MAPREG_START + 0x08,
426	    PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->sc_tcm_iot, &sc->sc_tcm_ioh,
427	    NULL, &sc->sc_tcm_ios)) {
428		printf(": can't map bar1\n");
429		goto bar0;
430	}
431
432	sc->sc_pc = pa->pa_pc;
433	sc->sc_tag = pa->pa_tag;
434	sc->sc_id = pa->pa_id;
435
436	if (pci_dma64_available(pa))
437		sc->sc_dmat = pa->pa_dmat64;
438	else
439		sc->sc_dmat = pa->pa_dmat;
440
441	/* Map and establish the interrupt. */
442	if (pci_intr_alloc(pa, &sc->sc_pihp, NULL, 0) != 0) {
443		printf(": couldn't map interrupt\n");
444		goto bar1;
445	}
446	intrstr = pci_intr_string(pa->pa_pc, sc->sc_pihp[0], intrbuf, sizeof(intrbuf));
447
448	sc->sc_ih = pci_intr_establish_xname(pa->pa_pc, sc->sc_pihp[0], IPL_NET,
449	    bwfm_pci_intr, sc, device_xname(self));
450	if (sc->sc_ih == NULL) {
451		printf(": couldn't establish interrupt");
452		if (intrstr != NULL)
453			printf(" at %s", intrstr);
454		printf("\n");
455		goto bar1;
456	}
457	printf(": %s\n", intrstr);
458
459	config_mountroot(self, bwfm_pci_attachhook);
460	return;
461
462bar1:
463	bus_space_unmap(sc->sc_tcm_iot, sc->sc_tcm_ioh, sc->sc_tcm_ios);
464bar0:
465	bus_space_unmap(sc->sc_reg_iot, sc->sc_reg_ioh, sc->sc_reg_ios);
466}
467
468void
469bwfm_pci_attachhook(device_t self)
470{
471	struct bwfm_pci_softc *sc = device_private(self);
472	struct bwfm_softc *bwfm = (void *)sc;
473	struct bwfm_pci_ringinfo ringinfo;
474	struct bwfm_firmware_context fwctx;
475	uint8_t *ucode;
476	size_t ucsize;
477	uint32_t d2h_w_idx_ptr, d2h_r_idx_ptr;
478	uint32_t h2d_w_idx_ptr, h2d_r_idx_ptr;
479	uint32_t idx_offset, reg;
480	int i;
481
482	sc->sc_sc.sc_buscore_ops = &bwfm_pci_buscore_ops;
483	if (bwfm_chip_attach(&sc->sc_sc) != 0) {
484		aprint_error_dev(bwfm->sc_dev, "cannot attach chip\n");
485		return;
486	}
487
488	bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
489	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
490	    BWFM_PCI_PCIE2REG_CONFIGADDR, 0x4e0);
491	reg = bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
492	    BWFM_PCI_PCIE2REG_CONFIGDATA);
493	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
494	    BWFM_PCI_PCIE2REG_CONFIGDATA, reg);
495
496	bwfm_firmware_context_init(&fwctx,
497	    bwfm->sc_chip.ch_chip, bwfm->sc_chip.ch_chiprev, NULL,
498	    BWFM_FWREQ(BWFM_FILETYPE_UCODE));
499
500	if (!bwfm_firmware_open(bwfm, bwfm_pci_fwtab, &fwctx)) {
501		/* Error message already displayed. */
502		goto err;
503	}
504
505	ucode = bwfm_firmware_data(&fwctx, BWFM_FILETYPE_UCODE, &ucsize);
506	KASSERT(ucode != NULL);
507
508	/* Retrieve RAM size from firmware. */
509	if (ucsize >= BWFM_RAMSIZE + 8) {
510		uint32_t *ramsize = (uint32_t *)&ucode[BWFM_RAMSIZE];
511		if (letoh32(ramsize[0]) == BWFM_RAMSIZE_MAGIC)
512			bwfm->sc_chip.ch_ramsize = letoh32(ramsize[1]);
513	}
514
515	if (bwfm_pci_load_microcode(sc, ucode, ucsize) != 0) {
516		aprint_error_dev(bwfm->sc_dev, "could not load microcode\n");
517		goto err;
518	}
519
520	bwfm_firmware_close(&fwctx);
521
522	sc->sc_shared_flags = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
523	    sc->sc_shared_address + BWFM_SHARED_INFO);
524	sc->sc_shared_version = sc->sc_shared_flags;
525	if (sc->sc_shared_version > BWFM_SHARED_INFO_MAX_VERSION ||
526	    sc->sc_shared_version < BWFM_SHARED_INFO_MIN_VERSION) {
527		aprint_error_dev(bwfm->sc_dev,
528		    "PCIe version %d unsupported\n", sc->sc_shared_version);
529		return;
530	}
531
532	if (sc->sc_shared_flags & BWFM_SHARED_INFO_DMA_INDEX) {
533		if (sc->sc_shared_flags & BWFM_SHARED_INFO_DMA_2B_IDX)
534			sc->sc_dma_idx_sz = sizeof(uint16_t);
535		else
536			sc->sc_dma_idx_sz = sizeof(uint32_t);
537	}
538
539	/* Maximum RX data buffers in the ring. */
540	sc->sc_max_rxbufpost = bus_space_read_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
541	    sc->sc_shared_address + BWFM_SHARED_MAX_RXBUFPOST);
542	if (sc->sc_max_rxbufpost == 0)
543		sc->sc_max_rxbufpost = BWFM_SHARED_MAX_RXBUFPOST_DEFAULT;
544
545	/* Alternative offset of data in a packet */
546	sc->sc_rx_dataoffset = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
547	    sc->sc_shared_address + BWFM_SHARED_RX_DATAOFFSET);
548
549	/* For Power Management */
550	sc->sc_htod_mb_data_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
551	    sc->sc_shared_address + BWFM_SHARED_HTOD_MB_DATA_ADDR);
552	sc->sc_dtoh_mb_data_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
553	    sc->sc_shared_address + BWFM_SHARED_DTOH_MB_DATA_ADDR);
554
555	/* Ring information */
556	sc->sc_ring_info_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
557	    sc->sc_shared_address + BWFM_SHARED_RING_INFO_ADDR);
558
559	/* Firmware's "dmesg" */
560	sc->sc_console_base_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
561	    sc->sc_shared_address + BWFM_SHARED_CONSOLE_ADDR);
562	sc->sc_console_buf_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
563	    sc->sc_console_base_addr + BWFM_CONSOLE_BUFADDR);
564	sc->sc_console_buf_size = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
565	    sc->sc_console_base_addr + BWFM_CONSOLE_BUFSIZE);
566
567	/* Read ring information. */
568	bus_space_read_region_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
569	    sc->sc_ring_info_addr, (void *)&ringinfo, sizeof(ringinfo));
570
571	if (sc->sc_shared_version >= 6) {
572		sc->sc_max_submissionrings = le16toh(ringinfo.max_submissionrings);
573		sc->sc_max_flowrings = le16toh(ringinfo.max_flowrings);
574		sc->sc_max_completionrings = le16toh(ringinfo.max_completionrings);
575	} else {
576		sc->sc_max_submissionrings = le16toh(ringinfo.max_flowrings);
577		sc->sc_max_flowrings = sc->sc_max_submissionrings -
578		    BWFM_NUM_TX_MSGRINGS;
579		sc->sc_max_completionrings = BWFM_NUM_RX_MSGRINGS;
580	}
581
582	if (sc->sc_dma_idx_sz == 0) {
583		d2h_w_idx_ptr = letoh32(ringinfo.d2h_w_idx_ptr);
584		d2h_r_idx_ptr = letoh32(ringinfo.d2h_r_idx_ptr);
585		h2d_w_idx_ptr = letoh32(ringinfo.h2d_w_idx_ptr);
586		h2d_r_idx_ptr = letoh32(ringinfo.h2d_r_idx_ptr);
587		idx_offset = sizeof(uint32_t);
588	} else {
589		uint64_t address;
590
591		/* Each TX/RX Ring has a Read and Write Ptr */
592		sc->sc_dma_idx_bufsz = (sc->sc_max_submissionrings +
593		    sc->sc_max_completionrings) * sc->sc_dma_idx_sz * 2;
594		sc->sc_dma_idx_buf = bwfm_pci_dmamem_alloc(sc,
595		    sc->sc_dma_idx_bufsz, 8);
596		if (sc->sc_dma_idx_buf == NULL) {
597			/* XXX: Fallback to TCM? */
598			aprint_error_dev(bwfm->sc_dev,
599			    "cannot allocate idx buf\n");
600			return;
601		}
602
603		idx_offset = sc->sc_dma_idx_sz;
604		h2d_w_idx_ptr = 0;
605		address = BWFM_PCI_DMA_DVA(sc->sc_dma_idx_buf);
606		ringinfo.h2d_w_idx_hostaddr_low =
607		    htole32(address & 0xffffffff);
608		ringinfo.h2d_w_idx_hostaddr_high =
609		    htole32(address >> 32);
610
611		h2d_r_idx_ptr = h2d_w_idx_ptr +
612		    sc->sc_max_submissionrings * idx_offset;
613		address += sc->sc_max_submissionrings * idx_offset;
614		ringinfo.h2d_r_idx_hostaddr_low =
615		    htole32(address & 0xffffffff);
616		ringinfo.h2d_r_idx_hostaddr_high =
617		    htole32(address >> 32);
618
619		d2h_w_idx_ptr = h2d_r_idx_ptr +
620		    sc->sc_max_submissionrings * idx_offset;
621		address += sc->sc_max_submissionrings * idx_offset;
622		ringinfo.d2h_w_idx_hostaddr_low =
623		    htole32(address & 0xffffffff);
624		ringinfo.d2h_w_idx_hostaddr_high =
625		    htole32(address >> 32);
626
627		d2h_r_idx_ptr = d2h_w_idx_ptr +
628		    sc->sc_max_completionrings * idx_offset;
629		address += sc->sc_max_completionrings * idx_offset;
630		ringinfo.d2h_r_idx_hostaddr_low =
631		    htole32(address & 0xffffffff);
632		ringinfo.d2h_r_idx_hostaddr_high =
633		    htole32(address >> 32);
634
635		bus_space_write_region_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
636		    sc->sc_ring_info_addr, (void *)&ringinfo, sizeof(ringinfo));
637	}
638
639	uint32_t ring_mem_ptr = letoh32(ringinfo.ringmem);
640	/* TX ctrl ring: Send ctrl buffers, send IOCTLs */
641	if (bwfm_pci_setup_ring(sc, &sc->sc_ctrl_submit, 64, 40,
642	    h2d_w_idx_ptr, h2d_r_idx_ptr, 0, idx_offset,
643	    &ring_mem_ptr))
644		goto cleanup;
645	/* TX rxpost ring: Send clean data mbufs for RX */
646	if (bwfm_pci_setup_ring(sc, &sc->sc_rxpost_submit, 512, 32,
647	    h2d_w_idx_ptr, h2d_r_idx_ptr, 1, idx_offset,
648	    &ring_mem_ptr))
649		goto cleanup;
650	/* RX completion rings: recv our filled buffers back */
651	if (bwfm_pci_setup_ring(sc, &sc->sc_ctrl_complete, 64, 24,
652	    d2h_w_idx_ptr, d2h_r_idx_ptr, 0, idx_offset,
653	    &ring_mem_ptr))
654		goto cleanup;
655	if (bwfm_pci_setup_ring(sc, &sc->sc_tx_complete, 1024, 16,
656	    d2h_w_idx_ptr, d2h_r_idx_ptr, 1, idx_offset,
657	    &ring_mem_ptr))
658		goto cleanup;
659	if (bwfm_pci_setup_ring(sc, &sc->sc_rx_complete, 512, 32,
660	    d2h_w_idx_ptr, d2h_r_idx_ptr, 2, idx_offset,
661	    &ring_mem_ptr))
662		goto cleanup;
663
664	/* Dynamic TX rings for actual data */
665	sc->sc_flowrings = kmem_zalloc(sc->sc_max_flowrings *
666	    sizeof(struct bwfm_pci_msgring), KM_SLEEP);
667	for (i = 0; i < sc->sc_max_flowrings; i++) {
668		struct bwfm_pci_msgring *ring = &sc->sc_flowrings[i];
669		ring->w_idx_addr = h2d_w_idx_ptr + (i + 2) * idx_offset;
670		ring->r_idx_addr = h2d_r_idx_ptr + (i + 2) * idx_offset;
671	}
672
673	pool_init(&sc->sc_flowring_pool, sizeof(struct bwfm_cmd_flowring_create),
674	    0, 0, 0, "bwfmpl", NULL, IPL_NET);
675
676	/* Scratch and ring update buffers for firmware */
677	if ((sc->sc_scratch_buf = bwfm_pci_dmamem_alloc(sc,
678	    BWFM_DMA_D2H_SCRATCH_BUF_LEN, 8)) == NULL)
679		goto cleanup;
680	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
681	    sc->sc_shared_address + BWFM_SHARED_DMA_SCRATCH_ADDR_LOW,
682	    BWFM_PCI_DMA_DVA(sc->sc_scratch_buf) & 0xffffffff);
683	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
684	    sc->sc_shared_address + BWFM_SHARED_DMA_SCRATCH_ADDR_HIGH,
685	    BWFM_PCI_DMA_DVA(sc->sc_scratch_buf) >> 32);
686	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
687	    sc->sc_shared_address + BWFM_SHARED_DMA_SCRATCH_LEN,
688	    BWFM_DMA_D2H_SCRATCH_BUF_LEN);
689
690	if ((sc->sc_ringupd_buf = bwfm_pci_dmamem_alloc(sc,
691	    BWFM_DMA_D2H_RINGUPD_BUF_LEN, 8)) == NULL)
692		goto cleanup;
693	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
694	    sc->sc_shared_address + BWFM_SHARED_DMA_RINGUPD_ADDR_LOW,
695	    BWFM_PCI_DMA_DVA(sc->sc_ringupd_buf) & 0xffffffff);
696	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
697	    sc->sc_shared_address + BWFM_SHARED_DMA_RINGUPD_ADDR_HIGH,
698	    BWFM_PCI_DMA_DVA(sc->sc_ringupd_buf) >> 32);
699	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
700	    sc->sc_shared_address + BWFM_SHARED_DMA_RINGUPD_LEN,
701	    BWFM_DMA_D2H_RINGUPD_BUF_LEN);
702
703	if ((sc->sc_ioctl_buf = bwfm_pci_dmamem_alloc(sc,
704	    BWFM_DMA_H2D_IOCTL_BUF_LEN, 8)) == NULL)
705		goto cleanup;
706
707	if (workqueue_create(&sc->flowring_wq, "bwfmflow",
708	    bwfm_pci_flowring_create_cb, sc, PRI_SOFTNET, IPL_NET, 0))
709		goto cleanup;
710
711	bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
712	bwfm_pci_intr_enable(sc);
713
714	/* Maps RX mbufs to a packet id and back. */
715	sc->sc_rx_pkts.npkt = BWFM_NUM_RX_PKTIDS;
716	sc->sc_rx_pkts.pkts = kmem_zalloc(BWFM_NUM_RX_PKTIDS *
717	    sizeof(struct bwfm_pci_buf), KM_SLEEP);
718	for (i = 0; i < BWFM_NUM_RX_PKTIDS; i++)
719		bus_dmamap_create(sc->sc_dmat, MSGBUF_MAX_PKT_SIZE,
720		    BWFM_NUM_RX_DESCS, MSGBUF_MAX_PKT_SIZE, 0, BUS_DMA_WAITOK,
721		    &sc->sc_rx_pkts.pkts[i].bb_map);
722
723	/* Maps TX mbufs to a packet id and back. */
724	sc->sc_tx_pkts.npkt = BWFM_NUM_TX_PKTIDS;
725	sc->sc_tx_pkts.pkts = kmem_zalloc(BWFM_NUM_TX_PKTIDS
726	    * sizeof(struct bwfm_pci_buf), KM_SLEEP);
727	for (i = 0; i < BWFM_NUM_TX_PKTIDS; i++)
728		bus_dmamap_create(sc->sc_dmat, MSGBUF_MAX_PKT_SIZE,
729		    BWFM_NUM_TX_DESCS, MSGBUF_MAX_PKT_SIZE, 0, BUS_DMA_WAITOK,
730		    &sc->sc_tx_pkts.pkts[i].bb_map);
731
732	/*
733	 * For whatever reason, could also be a bug somewhere in this
734	 * driver, the firmware needs a bunch of RX buffers otherwise
735	 * it won't send any RX complete messages.  64 buffers don't
736	 * suffice, but 128 buffers are enough.
737	 */
738	if_rxr_init(&sc->sc_rxbuf_ring, 128, sc->sc_max_rxbufpost);
739	if_rxr_init(&sc->sc_ioctl_ring, 8, 8);
740	if_rxr_init(&sc->sc_event_ring, 8, 8);
741	bwfm_pci_fill_rx_rings(sc);
742
743
744#ifdef BWFM_DEBUG
745	sc->sc_console_readidx = 0;
746	bwfm_pci_debug_console(sc);
747#endif
748
749	sc->sc_ioctl_poll = 1;
750	sc->sc_sc.sc_bus_ops = &bwfm_pci_bus_ops;
751	sc->sc_sc.sc_proto_ops = &bwfm_pci_msgbuf_ops;
752	bwfm_attach(&sc->sc_sc);
753	sc->sc_ioctl_poll = 0;
754	return;
755
756cleanup:
757	if (sc->flowring_wq != NULL)
758		workqueue_destroy(sc->flowring_wq);
759	if (sc->sc_ih != NULL) {
760		pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
761		pci_intr_release(sc->sc_pc, sc->sc_pihp, 1);
762	}
763	if (sc->sc_ioctl_buf)
764		bwfm_pci_dmamem_free(sc, sc->sc_ioctl_buf);
765	if (sc->sc_ringupd_buf)
766		bwfm_pci_dmamem_free(sc, sc->sc_ringupd_buf);
767	if (sc->sc_scratch_buf)
768		bwfm_pci_dmamem_free(sc, sc->sc_scratch_buf);
769	if (sc->sc_rx_complete.ring)
770		bwfm_pci_dmamem_free(sc, sc->sc_rx_complete.ring);
771	if (sc->sc_tx_complete.ring)
772		bwfm_pci_dmamem_free(sc, sc->sc_tx_complete.ring);
773	if (sc->sc_ctrl_complete.ring)
774		bwfm_pci_dmamem_free(sc, sc->sc_ctrl_complete.ring);
775	if (sc->sc_rxpost_submit.ring)
776		bwfm_pci_dmamem_free(sc, sc->sc_rxpost_submit.ring);
777	if (sc->sc_ctrl_submit.ring)
778		bwfm_pci_dmamem_free(sc, sc->sc_ctrl_submit.ring);
779	if (sc->sc_dma_idx_buf)
780		bwfm_pci_dmamem_free(sc, sc->sc_dma_idx_buf);
781
782 err:
783	bwfm_firmware_close(&fwctx);
784}
785
786int
787bwfm_pci_load_microcode(struct bwfm_pci_softc *sc, const u_char *ucode, size_t size)
788{
789	struct bwfm_softc *bwfm = (void *)sc;
790	struct bwfm_core *core;
791	uint32_t shared;
792	int i;
793
794	if (bwfm->sc_chip.ch_chip == BRCM_CC_43602_CHIP_ID) {
795		bwfm_pci_select_core(sc, BWFM_AGENT_CORE_ARM_CR4);
796		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
797		    BWFM_PCI_ARMCR4REG_BANKIDX, 5);
798		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
799		    BWFM_PCI_ARMCR4REG_BANKPDA, 0);
800		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
801		    BWFM_PCI_ARMCR4REG_BANKIDX, 7);
802		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
803		    BWFM_PCI_ARMCR4REG_BANKPDA, 0);
804	}
805
806	for (i = 0; i < size; i++)
807		bus_space_write_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
808		    bwfm->sc_chip.ch_rambase + i, ucode[i]);
809
810	/* Firmware replaces this with a pointer once up. */
811	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
812	    bwfm->sc_chip.ch_rambase + bwfm->sc_chip.ch_ramsize - 4, 0);
813
814	/* TODO: restore NVRAM */
815
816	/* Load reset vector from firmware and kickstart core. */
817	if (bwfm->sc_chip.ch_chip == BRCM_CC_43602_CHIP_ID) {
818		core = bwfm_chip_get_core(bwfm, BWFM_AGENT_INTERNAL_MEM);
819		bwfm->sc_chip.ch_core_reset(bwfm, core, 0, 0, 0);
820	}
821	bwfm_chip_set_active(bwfm, *(const uint32_t *)ucode);
822
823	for (i = 0; i < 40; i++) {
824		delay(50 * 1000);
825		shared = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
826		    bwfm->sc_chip.ch_rambase + bwfm->sc_chip.ch_ramsize - 4);
827		if (shared)
828			break;
829	}
830	if (!shared) {
831		printf("%s: firmware did not come up\n", DEVNAME(sc));
832		return 1;
833	}
834
835	sc->sc_shared_address = shared;
836	return 0;
837}
838
839int
840bwfm_pci_detach(device_t self, int flags)
841{
842	struct bwfm_pci_softc *sc = device_private(self);
843
844	bwfm_detach(&sc->sc_sc, flags);
845
846	/* FIXME: free RX buffers */
847	/* FIXME: free TX buffers */
848	/* FIXME: free more memory */
849
850	kmem_free(sc->sc_flowrings, sc->sc_max_flowrings
851	    * sizeof(struct bwfm_pci_msgring));
852	pool_destroy(&sc->sc_flowring_pool);
853
854	workqueue_destroy(sc->flowring_wq);
855	pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
856	pci_intr_release(sc->sc_pc, sc->sc_pihp, 1);
857	bwfm_pci_dmamem_free(sc, sc->sc_ioctl_buf);
858	bwfm_pci_dmamem_free(sc, sc->sc_ringupd_buf);
859	bwfm_pci_dmamem_free(sc, sc->sc_scratch_buf);
860	bwfm_pci_dmamem_free(sc, sc->sc_rx_complete.ring);
861	bwfm_pci_dmamem_free(sc, sc->sc_tx_complete.ring);
862	bwfm_pci_dmamem_free(sc, sc->sc_ctrl_complete.ring);
863	bwfm_pci_dmamem_free(sc, sc->sc_rxpost_submit.ring);
864	bwfm_pci_dmamem_free(sc, sc->sc_ctrl_submit.ring);
865	bwfm_pci_dmamem_free(sc, sc->sc_dma_idx_buf);
866	return 0;
867}
868
869/* DMA code */
870struct bwfm_pci_dmamem *
871bwfm_pci_dmamem_alloc(struct bwfm_pci_softc *sc, bus_size_t size, bus_size_t align)
872{
873	struct bwfm_pci_dmamem *bdm;
874	int nsegs;
875
876	bdm = kmem_zalloc(sizeof(*bdm), KM_SLEEP);
877	bdm->bdm_size = size;
878
879	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
880	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &bdm->bdm_map) != 0)
881		goto bdmfree;
882
883	if (bus_dmamem_alloc(sc->sc_dmat, size, align, 0, &bdm->bdm_seg, 1,
884	    &nsegs, BUS_DMA_WAITOK) != 0)
885		goto destroy;
886
887	if (bus_dmamem_map(sc->sc_dmat, &bdm->bdm_seg, nsegs, size,
888	    (void **) &bdm->bdm_kva, BUS_DMA_WAITOK | BUS_DMA_COHERENT) != 0)
889		goto free;
890
891	if (bus_dmamap_load(sc->sc_dmat, bdm->bdm_map, bdm->bdm_kva, size,
892	    NULL, BUS_DMA_WAITOK) != 0)
893		goto unmap;
894
895	bzero(bdm->bdm_kva, size);
896
897	return (bdm);
898
899unmap:
900	bus_dmamem_unmap(sc->sc_dmat, bdm->bdm_kva, size);
901free:
902	bus_dmamem_free(sc->sc_dmat, &bdm->bdm_seg, 1);
903destroy:
904	bus_dmamap_destroy(sc->sc_dmat, bdm->bdm_map);
905bdmfree:
906	kmem_free(bdm, sizeof(*bdm));
907
908	return (NULL);
909}
910
911void
912bwfm_pci_dmamem_free(struct bwfm_pci_softc *sc, struct bwfm_pci_dmamem *bdm)
913{
914	bus_dmamap_unload(sc->sc_dmat, bdm->bdm_map);
915	bus_dmamem_unmap(sc->sc_dmat, bdm->bdm_kva, bdm->bdm_size);
916	bus_dmamem_free(sc->sc_dmat, &bdm->bdm_seg, 1);
917	bus_dmamap_destroy(sc->sc_dmat, bdm->bdm_map);
918	kmem_free(bdm, sizeof(*bdm));
919}
920
921/*
922 * We need a simple mapping from a packet ID to mbufs, because when
923 * a transfer completed, we only know the ID so we have to look up
924 * the memory for the ID.  This simply looks for an empty slot.
925 */
926int
927bwfm_pci_pktid_avail(struct bwfm_pci_softc *sc, struct bwfm_pci_pkts *pkts)
928{
929	int i, idx;
930
931	idx = pkts->last + 1;
932	for (i = 0; i < pkts->npkt; i++) {
933		if (idx == pkts->npkt)
934			idx = 0;
935		if (pkts->pkts[idx].bb_m == NULL)
936			return 0;
937		idx++;
938	}
939	return ENOBUFS;
940}
941
942int
943bwfm_pci_pktid_new(struct bwfm_pci_softc *sc, struct bwfm_pci_pkts *pkts,
944    struct mbuf **mp, uint32_t *pktid, paddr_t *paddr)
945{
946	int i, idx;
947
948	idx = pkts->last + 1;
949	for (i = 0; i < pkts->npkt; i++) {
950		if (idx == pkts->npkt)
951			idx = 0;
952		if (pkts->pkts[idx].bb_m == NULL) {
953			if (bus_dmamap_load_mbuf(sc->sc_dmat,
954			    pkts->pkts[idx].bb_map, *mp, BUS_DMA_NOWAIT) != 0) {
955				/*
956				 * Didn't fit.  Maybe it has too many
957				 * segments.  If it has only one
958				 * segment, fail; otherwise try to
959				 * compact it into a single mbuf
960				 * segment.
961				 */
962				if ((*mp)->m_next == NULL)
963					return ENOBUFS;
964				struct mbuf *m0 = MCLGETI(NULL, M_DONTWAIT,
965				    NULL, MSGBUF_MAX_PKT_SIZE);
966				if (m0 == NULL)
967					return ENOBUFS;
968				m_copydata(*mp, 0, (*mp)->m_pkthdr.len,
969				    mtod(m0, void *));
970				m0->m_pkthdr.len = m0->m_len =
971				    (*mp)->m_pkthdr.len;
972				m_freem(*mp);
973				*mp = m0;
974				if (bus_dmamap_load_mbuf(sc->sc_dmat,
975				    pkts->pkts[idx].bb_map, *mp, BUS_DMA_NOWAIT) != 0)
976					return EFBIG;
977			}
978			bus_dmamap_sync(sc->sc_dmat, pkts->pkts[idx].bb_map,
979			    0, pkts->pkts[idx].bb_map->dm_mapsize,
980			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
981			pkts->last = idx;
982			pkts->pkts[idx].bb_m = *mp;
983			*pktid = idx;
984			*paddr = pkts->pkts[idx].bb_map->dm_segs[0].ds_addr;
985			return 0;
986		}
987		idx++;
988	}
989	return ENOBUFS;
990}
991
992struct mbuf *
993bwfm_pci_pktid_free(struct bwfm_pci_softc *sc, struct bwfm_pci_pkts *pkts,
994    uint32_t pktid)
995{
996	struct mbuf *m;
997
998	if (pktid >= pkts->npkt || pkts->pkts[pktid].bb_m == NULL)
999		return NULL;
1000	bus_dmamap_sync(sc->sc_dmat, pkts->pkts[pktid].bb_map, 0,
1001	    pkts->pkts[pktid].bb_map->dm_mapsize,
1002	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1003	bus_dmamap_unload(sc->sc_dmat, pkts->pkts[pktid].bb_map);
1004	m = pkts->pkts[pktid].bb_m;
1005	pkts->pkts[pktid].bb_m = NULL;
1006	return m;
1007}
1008
1009void
1010bwfm_pci_fill_rx_rings(struct bwfm_pci_softc *sc)
1011{
1012	bwfm_pci_fill_rx_buf_ring(sc);
1013	bwfm_pci_fill_rx_ioctl_ring(sc, &sc->sc_ioctl_ring,
1014	    MSGBUF_TYPE_IOCTLRESP_BUF_POST);
1015	bwfm_pci_fill_rx_ioctl_ring(sc, &sc->sc_event_ring,
1016	    MSGBUF_TYPE_EVENT_BUF_POST);
1017}
1018
1019void
1020bwfm_pci_fill_rx_ioctl_ring(struct bwfm_pci_softc *sc, struct if_rxring *rxring,
1021    uint32_t msgtype)
1022{
1023	struct msgbuf_rx_ioctl_resp_or_event *req;
1024	struct mbuf *m;
1025	uint32_t pktid;
1026	paddr_t paddr;
1027	int s, slots;
1028	uint64_t devaddr;
1029
1030	s = splnet();
1031	for (slots = if_rxr_get(rxring, 8); slots > 0; slots--) {
1032		if (bwfm_pci_pktid_avail(sc, &sc->sc_rx_pkts))
1033			break;
1034		req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
1035		if (req == NULL)
1036			break;
1037		m = MCLGETI(NULL, M_DONTWAIT, NULL, MSGBUF_MAX_PKT_SIZE);
1038		if (m == NULL) {
1039			bwfm_pci_ring_write_cancel(sc, &sc->sc_ctrl_submit, 1);
1040			break;
1041		}
1042		m->m_len = m->m_pkthdr.len = MSGBUF_MAX_PKT_SIZE;
1043		if (bwfm_pci_pktid_new(sc, &sc->sc_rx_pkts, &m, &pktid, &paddr)) {
1044			bwfm_pci_ring_write_cancel(sc, &sc->sc_ctrl_submit, 1);
1045			m_freem(m);
1046			break;
1047		}
1048		devaddr = paddr;
1049		memset(req, 0, sizeof(*req));
1050		req->msg.msgtype = msgtype;
1051		req->msg.request_id = htole32(pktid);
1052		req->host_buf_len = htole16(MSGBUF_MAX_PKT_SIZE);
1053		req->host_buf_addr.high_addr = htole32(devaddr >> 32);
1054		req->host_buf_addr.low_addr = htole32(devaddr & 0xffffffff);
1055		bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
1056	}
1057	if_rxr_put(rxring, slots);
1058	splx(s);
1059}
1060
1061void
1062bwfm_pci_fill_rx_buf_ring(struct bwfm_pci_softc *sc)
1063{
1064	struct msgbuf_rx_bufpost *req;
1065	struct mbuf *m;
1066	uint32_t pktid;
1067	paddr_t paddr;
1068	int s, slots;
1069	uint64_t devaddr;
1070
1071	s = splnet();
1072	for (slots = if_rxr_get(&sc->sc_rxbuf_ring, sc->sc_max_rxbufpost);
1073	    slots > 0; slots--) {
1074		if (bwfm_pci_pktid_avail(sc, &sc->sc_rx_pkts))
1075			break;
1076		req = bwfm_pci_ring_write_reserve(sc, &sc->sc_rxpost_submit);
1077		if (req == NULL)
1078			break;
1079		m = MCLGETI(NULL, M_DONTWAIT, NULL, MSGBUF_MAX_PKT_SIZE);
1080		if (m == NULL) {
1081			bwfm_pci_ring_write_cancel(sc, &sc->sc_rxpost_submit, 1);
1082			break;
1083		}
1084		m->m_len = m->m_pkthdr.len = MSGBUF_MAX_PKT_SIZE;
1085		if (bwfm_pci_pktid_new(sc, &sc->sc_rx_pkts, &m, &pktid, &paddr)) {
1086			bwfm_pci_ring_write_cancel(sc, &sc->sc_rxpost_submit, 1);
1087			m_freem(m);
1088			break;
1089		}
1090		devaddr = paddr;
1091		memset(req, 0, sizeof(*req));
1092		req->msg.msgtype = MSGBUF_TYPE_RXBUF_POST;
1093		req->msg.request_id = htole32(pktid);
1094		req->data_buf_len = htole16(MSGBUF_MAX_PKT_SIZE);
1095		req->data_buf_addr.high_addr = htole32(devaddr >> 32);
1096		req->data_buf_addr.low_addr = htole32(devaddr & 0xffffffff);
1097		bwfm_pci_ring_write_commit(sc, &sc->sc_rxpost_submit);
1098	}
1099	if_rxr_put(&sc->sc_rxbuf_ring, slots);
1100	splx(s);
1101}
1102
1103int
1104bwfm_pci_setup_ring(struct bwfm_pci_softc *sc, struct bwfm_pci_msgring *ring,
1105    int nitem, size_t itemsz, uint32_t w_idx, uint32_t r_idx,
1106    int idx, uint32_t idx_off, uint32_t *ring_mem)
1107{
1108	ring->w_idx_addr = w_idx + idx * idx_off;
1109	ring->r_idx_addr = r_idx + idx * idx_off;
1110	ring->nitem = nitem;
1111	ring->itemsz = itemsz;
1112	bwfm_pci_ring_write_rptr(sc, ring);
1113	bwfm_pci_ring_write_wptr(sc, ring);
1114
1115	ring->ring = bwfm_pci_dmamem_alloc(sc, nitem * itemsz, 8);
1116	if (ring->ring == NULL)
1117		return ENOMEM;
1118	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1119	    *ring_mem + BWFM_RING_MEM_BASE_ADDR_LOW,
1120	    BWFM_PCI_DMA_DVA(ring->ring) & 0xffffffff);
1121	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1122	    *ring_mem + BWFM_RING_MEM_BASE_ADDR_HIGH,
1123	    BWFM_PCI_DMA_DVA(ring->ring) >> 32);
1124	bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1125	    *ring_mem + BWFM_RING_MAX_ITEM, nitem);
1126	bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1127	    *ring_mem + BWFM_RING_LEN_ITEMS, itemsz);
1128	*ring_mem = *ring_mem + BWFM_RING_MEM_SZ;
1129	return 0;
1130}
1131
1132int
1133bwfm_pci_setup_flowring(struct bwfm_pci_softc *sc, struct bwfm_pci_msgring *ring,
1134    int nitem, size_t itemsz)
1135{
1136	ring->w_ptr = 0;
1137	ring->r_ptr = 0;
1138	ring->nitem = nitem;
1139	ring->itemsz = itemsz;
1140	bwfm_pci_ring_write_rptr(sc, ring);
1141	bwfm_pci_ring_write_wptr(sc, ring);
1142
1143	ring->ring = bwfm_pci_dmamem_alloc(sc, nitem * itemsz, 8);
1144	if (ring->ring == NULL)
1145		return ENOMEM;
1146	return 0;
1147}
1148
1149/* Ring helpers */
1150void
1151bwfm_pci_ring_bell(struct bwfm_pci_softc *sc,
1152    struct bwfm_pci_msgring *ring)
1153{
1154	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1155	    BWFM_PCI_PCIE2REG_H2D_MAILBOX, 1);
1156}
1157
1158void
1159bwfm_pci_ring_update_rptr(struct bwfm_pci_softc *sc,
1160    struct bwfm_pci_msgring *ring)
1161{
1162	if (sc->sc_dma_idx_sz == 0) {
1163		ring->r_ptr = bus_space_read_2(sc->sc_tcm_iot,
1164		    sc->sc_tcm_ioh, ring->r_idx_addr);
1165	} else {
1166		bus_dmamap_sync(sc->sc_dmat,
1167		    BWFM_PCI_DMA_MAP(sc->sc_dma_idx_buf), ring->r_idx_addr,
1168		    sizeof(uint16_t), BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1169		ring->r_ptr = *(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1170		    + ring->r_idx_addr);
1171	}
1172}
1173
1174static u_int
1175if_rxr_get(struct if_rxring *rxr, unsigned int max)
1176{
1177	u_int taken = MIN(max, (rxr->rxr_total - rxr->rxr_inuse));
1178
1179	KASSERTMSG(rxr->rxr_inuse + taken <= rxr->rxr_total,
1180			"rxr->rxr_inuse: %d\n"
1181			"taken: %d\n"
1182			"rxr->rxr_total: %d\n",
1183			rxr->rxr_inuse, taken, rxr->rxr_total);
1184	rxr->rxr_inuse += taken;
1185
1186	return taken;
1187}
1188
1189static void
1190if_rxr_put(struct if_rxring *rxr, unsigned int n)
1191{
1192	KASSERTMSG(rxr->rxr_inuse >= n,
1193			"rxr->rxr_inuse: %d\n"
1194			"n: %d\n"
1195			"rxr->rxr_total: %d\n",
1196			rxr->rxr_inuse, n, rxr->rxr_total);
1197
1198	rxr->rxr_inuse -= n;
1199}
1200
1201static void
1202if_rxr_init(struct if_rxring *rxr, unsigned int lwm __unused, unsigned int hwm)
1203{
1204	(void) lwm;
1205
1206	rxr->rxr_total = hwm;
1207	rxr->rxr_inuse = 0;
1208}
1209
1210void
1211bwfm_pci_ring_update_wptr(struct bwfm_pci_softc *sc,
1212    struct bwfm_pci_msgring *ring)
1213{
1214	if (sc->sc_dma_idx_sz == 0) {
1215		ring->w_ptr = bus_space_read_2(sc->sc_tcm_iot,
1216		    sc->sc_tcm_ioh, ring->w_idx_addr);
1217	} else {
1218		ring->w_ptr = *(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1219		    + ring->w_idx_addr);
1220		bus_dmamap_sync(sc->sc_dmat,
1221		    BWFM_PCI_DMA_MAP(sc->sc_dma_idx_buf), ring->w_idx_addr,
1222		    sizeof(uint16_t), BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1223	}
1224}
1225
1226void
1227bwfm_pci_ring_write_rptr(struct bwfm_pci_softc *sc,
1228    struct bwfm_pci_msgring *ring)
1229{
1230	if (sc->sc_dma_idx_sz == 0) {
1231		bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1232		    ring->r_idx_addr, ring->r_ptr);
1233	} else {
1234		*(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1235		    + ring->r_idx_addr) = ring->r_ptr;
1236		bus_dmamap_sync(sc->sc_dmat,
1237		    BWFM_PCI_DMA_MAP(sc->sc_dma_idx_buf), ring->r_idx_addr,
1238		    sizeof(uint16_t), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1239	}
1240}
1241
1242void
1243bwfm_pci_ring_write_wptr(struct bwfm_pci_softc *sc,
1244    struct bwfm_pci_msgring *ring)
1245{
1246	if (sc->sc_dma_idx_sz == 0) {
1247		bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1248		    ring->w_idx_addr, ring->w_ptr);
1249	} else {
1250		*(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1251		    + ring->w_idx_addr) = ring->w_ptr;
1252		bus_dmamap_sync(sc->sc_dmat,
1253		    BWFM_PCI_DMA_MAP(sc->sc_dma_idx_buf), ring->w_idx_addr,
1254		    sizeof(uint16_t), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1255	}
1256}
1257
1258/*
1259 * Retrieve a free descriptor to put new stuff in, but don't commit
1260 * to it yet so we can rollback later if any error occurs.
1261 */
1262void *
1263bwfm_pci_ring_write_reserve(struct bwfm_pci_softc *sc,
1264    struct bwfm_pci_msgring *ring)
1265{
1266	int available;
1267	char *ret;
1268
1269	bwfm_pci_ring_update_rptr(sc, ring);
1270
1271	if (ring->r_ptr > ring->w_ptr)
1272		available = ring->r_ptr - ring->w_ptr;
1273	else
1274		available = ring->r_ptr + (ring->nitem - ring->w_ptr);
1275
1276	if (available < 1)
1277		return NULL;
1278
1279	ret = BWFM_PCI_DMA_KVA(ring->ring) + (ring->w_ptr * ring->itemsz);
1280	ring->w_ptr += 1;
1281	if (ring->w_ptr == ring->nitem)
1282		ring->w_ptr = 0;
1283	return ret;
1284}
1285
1286void *
1287bwfm_pci_ring_write_reserve_multi(struct bwfm_pci_softc *sc,
1288    struct bwfm_pci_msgring *ring, int count, int *avail)
1289{
1290	int available;
1291	char *ret;
1292
1293	bwfm_pci_ring_update_rptr(sc, ring);
1294
1295	if (ring->r_ptr > ring->w_ptr)
1296		available = ring->r_ptr - ring->w_ptr;
1297	else
1298		available = ring->r_ptr + (ring->nitem - ring->w_ptr);
1299
1300	if (available < 1)
1301		return NULL;
1302
1303	ret = BWFM_PCI_DMA_KVA(ring->ring) + (ring->w_ptr * ring->itemsz);
1304	*avail = uimin(count, available - 1);
1305	if (*avail + ring->w_ptr > ring->nitem)
1306		*avail = ring->nitem - ring->w_ptr;
1307	ring->w_ptr += *avail;
1308	if (ring->w_ptr == ring->nitem)
1309		ring->w_ptr = 0;
1310	return ret;
1311}
1312
1313/*
1314 * Read number of descriptors available (submitted by the firmware)
1315 * and retrieve pointer to first descriptor.
1316 */
1317void *
1318bwfm_pci_ring_read_avail(struct bwfm_pci_softc *sc,
1319    struct bwfm_pci_msgring *ring, int *avail)
1320{
1321	bwfm_pci_ring_update_wptr(sc, ring);
1322
1323	if (ring->w_ptr >= ring->r_ptr)
1324		*avail = ring->w_ptr - ring->r_ptr;
1325	else
1326		*avail = ring->nitem - ring->r_ptr;
1327
1328	if (*avail == 0)
1329		return NULL;
1330	bus_dmamap_sync(sc->sc_dmat, BWFM_PCI_DMA_MAP(ring->ring),
1331	    ring->r_ptr * ring->itemsz, *avail * ring->itemsz,
1332	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1333	return BWFM_PCI_DMA_KVA(ring->ring) + (ring->r_ptr * ring->itemsz);
1334}
1335
1336/*
1337 * Let firmware know we read N descriptors.
1338 */
1339void
1340bwfm_pci_ring_read_commit(struct bwfm_pci_softc *sc,
1341    struct bwfm_pci_msgring *ring, int nitem)
1342{
1343	ring->r_ptr += nitem;
1344	if (ring->r_ptr == ring->nitem)
1345		ring->r_ptr = 0;
1346	bwfm_pci_ring_write_rptr(sc, ring);
1347}
1348
1349/*
1350 * Let firmware know that we submitted some descriptors.
1351 */
1352void
1353bwfm_pci_ring_write_commit(struct bwfm_pci_softc *sc,
1354    struct bwfm_pci_msgring *ring)
1355{
1356	bus_dmamap_sync(sc->sc_dmat, BWFM_PCI_DMA_MAP(ring->ring),
1357	    0, BWFM_PCI_DMA_LEN(ring->ring), BUS_DMASYNC_PREREAD |
1358	    BUS_DMASYNC_PREWRITE);
1359	bwfm_pci_ring_write_wptr(sc, ring);
1360	bwfm_pci_ring_bell(sc, ring);
1361}
1362
1363/*
1364 * Rollback N descriptors in case we don't actually want
1365 * to commit to it.
1366 */
1367void
1368bwfm_pci_ring_write_cancel(struct bwfm_pci_softc *sc,
1369    struct bwfm_pci_msgring *ring, int nitem)
1370{
1371	if (ring->w_ptr == 0)
1372		ring->w_ptr = ring->nitem - nitem;
1373	else
1374		ring->w_ptr -= nitem;
1375}
1376
1377/*
1378 * Foreach written descriptor on the ring, pass the descriptor to
1379 * a message handler and let the firmware know we handled it.
1380 */
1381void
1382bwfm_pci_ring_rx(struct bwfm_pci_softc *sc, struct bwfm_pci_msgring *ring)
1383{
1384	char *buf;
1385	int avail, processed;
1386
1387again:
1388	buf = bwfm_pci_ring_read_avail(sc, ring, &avail);
1389	if (buf == NULL)
1390		return;
1391
1392	processed = 0;
1393	while (avail) {
1394		bwfm_pci_msg_rx(sc, buf + sc->sc_rx_dataoffset);
1395		buf += ring->itemsz;
1396		processed++;
1397		if (processed == 48) {
1398			bwfm_pci_ring_read_commit(sc, ring, processed);
1399			processed = 0;
1400		}
1401		avail--;
1402	}
1403	if (processed)
1404		bwfm_pci_ring_read_commit(sc, ring, processed);
1405	if (ring->r_ptr == 0)
1406		goto again;
1407}
1408
1409void
1410bwfm_pci_msg_rx(struct bwfm_pci_softc *sc, void *buf)
1411{
1412	struct ifnet *ifp = sc->sc_sc.sc_ic.ic_ifp;
1413	struct msgbuf_ioctl_resp_hdr *resp;
1414	struct msgbuf_tx_status *tx;
1415	struct msgbuf_rx_complete *rx;
1416	struct msgbuf_rx_event *event;
1417	struct msgbuf_common_hdr *msg;
1418	struct msgbuf_flowring_create_resp *fcr;
1419	struct msgbuf_flowring_delete_resp *fdr;
1420	struct bwfm_pci_msgring *ring;
1421	struct mbuf *m;
1422	int flowid;
1423
1424	msg = (struct msgbuf_common_hdr *)buf;
1425	switch (msg->msgtype)
1426	{
1427	case MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT:
1428		fcr = (struct msgbuf_flowring_create_resp *)buf;
1429		flowid = letoh16(fcr->compl_hdr.flow_ring_id);
1430		if (flowid < 2)
1431			break;
1432		flowid -= 2;
1433		if (flowid >= sc->sc_max_flowrings)
1434			break;
1435		ring = &sc->sc_flowrings[flowid];
1436		if (ring->status != RING_OPENING)
1437			break;
1438		if (fcr->compl_hdr.status) {
1439			printf("%s: failed to open flowring %d\n",
1440			    DEVNAME(sc), flowid);
1441			ring->status = RING_CLOSED;
1442			if (ring->m) {
1443				m_freem(ring->m);
1444				ring->m = NULL;
1445			}
1446			ifp->if_flags &= ~IFF_OACTIVE;
1447			ifp->if_start(ifp);
1448			break;
1449		}
1450		ring->status = RING_OPEN;
1451		if (ring->m != NULL) {
1452			m = ring->m;
1453			ring->m = NULL;
1454			if (bwfm_pci_txdata(&sc->sc_sc, &m))
1455				m_freem(ring->m);
1456		}
1457		ifp->if_flags &= ~IFF_OACTIVE;
1458		ifp->if_start(ifp);
1459		break;
1460	case MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT:
1461		fdr = (struct msgbuf_flowring_delete_resp *)buf;
1462		flowid = letoh16(fdr->compl_hdr.flow_ring_id);
1463		if (flowid < 2)
1464			break;
1465		flowid -= 2;
1466		if (flowid >= sc->sc_max_flowrings)
1467			break;
1468		ring = &sc->sc_flowrings[flowid];
1469		if (ring->status != RING_CLOSING)
1470			break;
1471		if (fdr->compl_hdr.status) {
1472			printf("%s: failed to delete flowring %d\n",
1473			    DEVNAME(sc), flowid);
1474			break;
1475		}
1476		bwfm_pci_dmamem_free(sc, ring->ring);
1477		ring->status = RING_CLOSED;
1478		break;
1479	case MSGBUF_TYPE_IOCTLPTR_REQ_ACK:
1480		break;
1481	case MSGBUF_TYPE_IOCTL_CMPLT:
1482		resp = (struct msgbuf_ioctl_resp_hdr *)buf;
1483		sc->sc_ioctl_resp_pktid = letoh32(resp->msg.request_id);
1484		sc->sc_ioctl_resp_ret_len = letoh16(resp->resp_len);
1485		sc->sc_ioctl_resp_status = letoh16(resp->compl_hdr.status);
1486		if_rxr_put(&sc->sc_ioctl_ring, 1);
1487		bwfm_pci_fill_rx_rings(sc);
1488		wakeup(&sc->sc_ioctl_buf);
1489		break;
1490	case MSGBUF_TYPE_WL_EVENT:
1491		event = (struct msgbuf_rx_event *)buf;
1492		m = bwfm_pci_pktid_free(sc, &sc->sc_rx_pkts,
1493		    letoh32(event->msg.request_id));
1494		if (m == NULL)
1495			break;
1496		m_adj(m, sc->sc_rx_dataoffset);
1497		m->m_len = m->m_pkthdr.len = letoh16(event->event_data_len);
1498		bwfm_rx(&sc->sc_sc, m);
1499		if_rxr_put(&sc->sc_event_ring, 1);
1500		bwfm_pci_fill_rx_rings(sc);
1501		break;
1502	case MSGBUF_TYPE_TX_STATUS:
1503		tx = (struct msgbuf_tx_status *)buf;
1504		m = bwfm_pci_pktid_free(sc, &sc->sc_tx_pkts,
1505		    letoh32(tx->msg.request_id));
1506		if (m == NULL)
1507			break;
1508		m_freem(m);
1509		if (sc->sc_tx_pkts_full) {
1510			sc->sc_tx_pkts_full = 0;
1511			ifp->if_flags &= ~IFF_OACTIVE;
1512			ifp->if_start(ifp);
1513		}
1514		break;
1515	case MSGBUF_TYPE_RX_CMPLT:
1516		rx = (struct msgbuf_rx_complete *)buf;
1517		m = bwfm_pci_pktid_free(sc, &sc->sc_rx_pkts,
1518		    letoh32(rx->msg.request_id));
1519		if (m == NULL)
1520			break;
1521		if (letoh16(rx->data_offset))
1522			m_adj(m, letoh16(rx->data_offset));
1523		else if (sc->sc_rx_dataoffset)
1524			m_adj(m, sc->sc_rx_dataoffset);
1525		m->m_len = m->m_pkthdr.len = letoh16(rx->data_len);
1526		bwfm_rx(&sc->sc_sc, m);
1527		if_rxr_put(&sc->sc_rxbuf_ring, 1);
1528		bwfm_pci_fill_rx_rings(sc);
1529		break;
1530	default:
1531		printf("%s: msgtype 0x%08x\n", __func__, msg->msgtype);
1532		break;
1533	}
1534}
1535
1536/* Bus core helpers */
1537void
1538bwfm_pci_select_core(struct bwfm_pci_softc *sc, int id)
1539{
1540	struct bwfm_softc *bwfm = (void *)sc;
1541	struct bwfm_core *core;
1542
1543	core = bwfm_chip_get_core(bwfm, id);
1544	if (core == NULL) {
1545		printf("%s: could not find core to select", DEVNAME(sc));
1546		return;
1547	}
1548
1549	pci_conf_write(sc->sc_pc, sc->sc_tag,
1550	    BWFM_PCI_BAR0_WINDOW, core->co_base);
1551	if (pci_conf_read(sc->sc_pc, sc->sc_tag,
1552	    BWFM_PCI_BAR0_WINDOW) != core->co_base)
1553		pci_conf_write(sc->sc_pc, sc->sc_tag,
1554		    BWFM_PCI_BAR0_WINDOW, core->co_base);
1555}
1556
1557uint32_t
1558bwfm_pci_buscore_read(struct bwfm_softc *bwfm, uint32_t reg)
1559{
1560	struct bwfm_pci_softc *sc = (void *)bwfm;
1561	uint32_t page, offset;
1562
1563	page = reg & ~(BWFM_PCI_BAR0_REG_SIZE - 1);
1564	offset = reg & (BWFM_PCI_BAR0_REG_SIZE - 1);
1565	pci_conf_write(sc->sc_pc, sc->sc_tag, BWFM_PCI_BAR0_WINDOW, page);
1566	return bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh, offset);
1567}
1568
1569void
1570bwfm_pci_buscore_write(struct bwfm_softc *bwfm, uint32_t reg, uint32_t val)
1571{
1572	struct bwfm_pci_softc *sc = (void *)bwfm;
1573	uint32_t page, offset;
1574
1575	page = reg & ~(BWFM_PCI_BAR0_REG_SIZE - 1);
1576	offset = reg & (BWFM_PCI_BAR0_REG_SIZE - 1);
1577	pci_conf_write(sc->sc_pc, sc->sc_tag, BWFM_PCI_BAR0_WINDOW, page);
1578	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh, offset, val);
1579}
1580
1581int
1582bwfm_pci_buscore_prepare(struct bwfm_softc *bwfm)
1583{
1584	return 0;
1585}
1586
1587int
1588bwfm_pci_buscore_reset(struct bwfm_softc *bwfm)
1589{
1590	struct bwfm_pci_softc *sc = (void *)bwfm;
1591	struct bwfm_core *core;
1592	uint32_t reg;
1593	int i;
1594
1595	bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
1596	reg = pci_conf_read(sc->sc_pc, sc->sc_tag,
1597	    BWFM_PCI_CFGREG_LINK_STATUS_CTRL);
1598	pci_conf_write(sc->sc_pc, sc->sc_tag, BWFM_PCI_CFGREG_LINK_STATUS_CTRL,
1599	    reg & ~BWFM_PCI_CFGREG_LINK_STATUS_CTRL_ASPM_ENAB);
1600
1601	bwfm_pci_select_core(sc, BWFM_AGENT_CORE_CHIPCOMMON);
1602	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1603	    BWFM_CHIP_REG_WATCHDOG, 4);
1604	delay(100 * 1000);
1605
1606	bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
1607	pci_conf_write(sc->sc_pc, sc->sc_tag,
1608	    BWFM_PCI_CFGREG_LINK_STATUS_CTRL, reg);
1609
1610	core = bwfm_chip_get_core(bwfm, BWFM_AGENT_CORE_PCIE2);
1611	if (core->co_rev <= 13) {
1612		uint16_t cfg_offset[] = {
1613		    BWFM_PCI_CFGREG_STATUS_CMD,
1614		    BWFM_PCI_CFGREG_PM_CSR,
1615		    BWFM_PCI_CFGREG_MSI_CAP,
1616		    BWFM_PCI_CFGREG_MSI_ADDR_L,
1617		    BWFM_PCI_CFGREG_MSI_ADDR_H,
1618		    BWFM_PCI_CFGREG_MSI_DATA,
1619		    BWFM_PCI_CFGREG_LINK_STATUS_CTRL2,
1620		    BWFM_PCI_CFGREG_RBAR_CTRL,
1621		    BWFM_PCI_CFGREG_PML1_SUB_CTRL1,
1622		    BWFM_PCI_CFGREG_REG_BAR2_CONFIG,
1623		    BWFM_PCI_CFGREG_REG_BAR3_CONFIG,
1624		};
1625
1626		for (i = 0; i < nitems(cfg_offset); i++) {
1627			bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1628			    BWFM_PCI_PCIE2REG_CONFIGADDR, cfg_offset[i]);
1629			reg = bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1630			    BWFM_PCI_PCIE2REG_CONFIGDATA);
1631			DPRINTFN(3, ("%s: config offset 0x%04x, value 0x%04x\n",
1632			    DEVNAME(sc), cfg_offset[i], reg));
1633			bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1634			    BWFM_PCI_PCIE2REG_CONFIGDATA, reg);
1635		}
1636	}
1637
1638	reg = bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1639	    BWFM_PCI_PCIE2REG_MAILBOXINT);
1640	if (reg != 0xffffffff)
1641		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1642		    BWFM_PCI_PCIE2REG_MAILBOXINT, reg);
1643
1644	return 0;
1645}
1646
1647void
1648bwfm_pci_buscore_activate(struct bwfm_softc *bwfm, const uint32_t rstvec)
1649{
1650	struct bwfm_pci_softc *sc = (void *)bwfm;
1651	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh, 0, rstvec);
1652}
1653
1654static int bwfm_pci_prio2fifo[8] = {
1655	1, /* best effort */
1656	0, /* IPTOS_PREC_IMMEDIATE */
1657	0, /* IPTOS_PREC_PRIORITY */
1658	1, /* IPTOS_PREC_FLASH */
1659	2, /* IPTOS_PREC_FLASHOVERRIDE */
1660	2, /* IPTOS_PREC_CRITIC_ECP */
1661	3, /* IPTOS_PREC_INTERNETCONTROL */
1662	3, /* IPTOS_PREC_NETCONTROL */
1663};
1664
1665int
1666bwfm_pci_flowring_lookup(struct bwfm_pci_softc *sc, struct mbuf *m)
1667{
1668	struct ieee80211com *ic = &sc->sc_sc.sc_ic;
1669	uint8_t *da = mtod(m, uint8_t *);
1670	struct ether_header *eh;
1671	int flowid, prio, fifo;
1672	int i, found, ac;
1673
1674	/* No QoS for EAPOL frames. */
1675	eh = mtod(m, struct ether_header *);
1676	ac = (eh->ether_type != htons(ETHERTYPE_PAE)) ?
1677	    M_WME_GETAC(m) : WME_AC_BE;
1678
1679	prio = ac;
1680	fifo = bwfm_pci_prio2fifo[prio];
1681
1682	switch (ic->ic_opmode)
1683	{
1684	case IEEE80211_M_STA:
1685		flowid = fifo;
1686		break;
1687#ifndef IEEE80211_STA_ONLY
1688	case IEEE80211_M_HOSTAP:
1689		if (ETHER_IS_MULTICAST(da))
1690			da = __UNCONST(etherbroadcastaddr);
1691		flowid = da[5] * 2 + fifo;
1692		break;
1693#endif
1694	default:
1695		printf("%s: state not supported\n", DEVNAME(sc));
1696		return ENOBUFS;
1697	}
1698
1699	found = 0;
1700	flowid = flowid % sc->sc_max_flowrings;
1701	for (i = 0; i < sc->sc_max_flowrings; i++) {
1702		if (ic->ic_opmode == IEEE80211_M_STA &&
1703		    sc->sc_flowrings[flowid].status >= RING_OPEN &&
1704		    sc->sc_flowrings[flowid].fifo == fifo) {
1705			found = 1;
1706			break;
1707		}
1708#ifndef IEEE80211_STA_ONLY
1709		if (ic->ic_opmode == IEEE80211_M_HOSTAP &&
1710		    sc->sc_flowrings[flowid].status >= RING_OPEN &&
1711		    sc->sc_flowrings[flowid].fifo == fifo &&
1712		    !memcmp(sc->sc_flowrings[flowid].mac, da, ETHER_ADDR_LEN)) {
1713			found = 1;
1714			break;
1715		}
1716#endif
1717		flowid = (flowid + 1) % sc->sc_max_flowrings;
1718	}
1719
1720	if (found)
1721		return flowid;
1722
1723	return -1;
1724}
1725
1726void
1727bwfm_pci_flowring_create(struct bwfm_pci_softc *sc, struct mbuf *m)
1728{
1729	struct ieee80211com *ic = &sc->sc_sc.sc_ic;
1730	struct bwfm_cmd_flowring_create * cmd;
1731	uint8_t *da = mtod(m, uint8_t *);
1732	struct ether_header *eh;
1733	struct bwfm_pci_msgring *ring;
1734	int flowid, prio, fifo;
1735	int i, found, ac;
1736
1737	cmd = pool_get(&sc->sc_flowring_pool, PR_NOWAIT);
1738	if (__predict_false(cmd == NULL))
1739		return;
1740
1741	/* No QoS for EAPOL frames. */
1742	eh = mtod(m, struct ether_header *);
1743	ac = (eh->ether_type != htons(ETHERTYPE_PAE)) ?
1744	    M_WME_GETAC(m) : WME_AC_BE;
1745
1746	prio = ac;
1747	fifo = bwfm_pci_prio2fifo[prio];
1748
1749	switch (ic->ic_opmode)
1750	{
1751	case IEEE80211_M_STA:
1752		flowid = fifo;
1753		break;
1754#ifndef IEEE80211_STA_ONLY
1755	case IEEE80211_M_HOSTAP:
1756		if (ETHER_IS_MULTICAST(da))
1757			da = __UNCONST(etherbroadcastaddr);
1758		flowid = da[5] * 2 + fifo;
1759		break;
1760#endif
1761	default:
1762		printf("%s: state not supported\n", DEVNAME(sc));
1763		return;
1764	}
1765
1766	found = 0;
1767	flowid = flowid % sc->sc_max_flowrings;
1768	for (i = 0; i < sc->sc_max_flowrings; i++) {
1769		ring = &sc->sc_flowrings[flowid];
1770		if (ring->status == RING_CLOSED) {
1771			ring->status = RING_OPENING;
1772			found = 1;
1773			break;
1774		}
1775		flowid = (flowid + 1) % sc->sc_max_flowrings;
1776	}
1777
1778	/*
1779	 * We cannot recover from that so far.  Only a stop/init
1780	 * cycle can revive this if it ever happens at all.
1781	 */
1782	if (!found) {
1783		printf("%s: no flowring available\n", DEVNAME(sc));
1784		return;
1785	}
1786
1787	cmd->sc = sc;
1788	cmd->m = m;
1789	cmd->prio = prio;
1790	cmd->flowid = flowid;
1791	workqueue_enqueue(sc->flowring_wq, &cmd->wq_cookie, NULL);
1792}
1793
1794void
1795bwfm_pci_flowring_create_cb(struct work *wk, void *arg) //(struct bwfm_softc *bwfm, void *arg)
1796{
1797	struct bwfm_cmd_flowring_create *cmd = container_of(wk, struct bwfm_cmd_flowring_create, wq_cookie);
1798	struct bwfm_pci_softc *sc = cmd->sc; // (void *)bwfm;
1799	struct ieee80211com *ic = &sc->sc_sc.sc_ic;
1800	struct msgbuf_tx_flowring_create_req *req;
1801	struct bwfm_pci_msgring *ring;
1802	uint8_t *da, *sa;
1803
1804	da = mtod(cmd->m, char *) + 0 * ETHER_ADDR_LEN;
1805	sa = mtod(cmd->m, char *) + 1 * ETHER_ADDR_LEN;
1806
1807	ring = &sc->sc_flowrings[cmd->flowid];
1808	if (ring->status != RING_OPENING) {
1809		printf("%s: flowring not opening\n", DEVNAME(sc));
1810		return;
1811	}
1812
1813	if (bwfm_pci_setup_flowring(sc, ring, 512, 48)) {
1814		printf("%s: cannot setup flowring\n", DEVNAME(sc));
1815		return;
1816	}
1817
1818	req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
1819	if (req == NULL) {
1820		printf("%s: cannot reserve for flowring\n", DEVNAME(sc));
1821		return;
1822	}
1823
1824	ring->status = RING_OPENING;
1825	ring->fifo = bwfm_pci_prio2fifo[cmd->prio];
1826	ring->m = cmd->m;
1827	memcpy(ring->mac, da, ETHER_ADDR_LEN);
1828#ifndef IEEE80211_STA_ONLY
1829	if (ic->ic_opmode == IEEE80211_M_HOSTAP && ETHER_IS_MULTICAST(da))
1830		memcpy(ring->mac, etherbroadcastaddr, ETHER_ADDR_LEN);
1831#endif
1832
1833	req->msg.msgtype = MSGBUF_TYPE_FLOW_RING_CREATE;
1834	req->msg.ifidx = 0;
1835	req->msg.request_id = 0;
1836	req->tid = bwfm_pci_prio2fifo[cmd->prio];
1837	req->flow_ring_id = letoh16(cmd->flowid + 2);
1838	memcpy(req->da, da, ETHER_ADDR_LEN);
1839	memcpy(req->sa, sa, ETHER_ADDR_LEN);
1840	req->flow_ring_addr.high_addr =
1841	    letoh32(BWFM_PCI_DMA_DVA(ring->ring) >> 32);
1842	req->flow_ring_addr.low_addr =
1843	    letoh32(BWFM_PCI_DMA_DVA(ring->ring) & 0xffffffff);
1844	req->max_items = letoh16(512);
1845	req->len_item = letoh16(48);
1846
1847	bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
1848	pool_put(&sc->sc_flowring_pool, cmd);
1849}
1850
1851void
1852bwfm_pci_flowring_delete(struct bwfm_pci_softc *sc, int flowid)
1853{
1854	struct msgbuf_tx_flowring_delete_req *req;
1855	struct bwfm_pci_msgring *ring;
1856
1857	ring = &sc->sc_flowrings[flowid];
1858	if (ring->status != RING_OPEN) {
1859		printf("%s: flowring not open\n", DEVNAME(sc));
1860		return;
1861	}
1862
1863	req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
1864	if (req == NULL) {
1865		printf("%s: cannot reserve for flowring\n", DEVNAME(sc));
1866		return;
1867	}
1868
1869	ring->status = RING_CLOSING;
1870
1871	req->msg.msgtype = MSGBUF_TYPE_FLOW_RING_DELETE;
1872	req->msg.ifidx = 0;
1873	req->msg.request_id = 0;
1874	req->flow_ring_id = letoh16(flowid + 2);
1875	req->reason = 0;
1876
1877	bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
1878}
1879
1880void
1881bwfm_pci_stop(struct bwfm_softc *bwfm)
1882{
1883	struct bwfm_pci_softc *sc = (void *)bwfm;
1884	struct bwfm_pci_msgring *ring;
1885	int i;
1886
1887	for (i = 0; i < sc->sc_max_flowrings; i++) {
1888		ring = &sc->sc_flowrings[i];
1889		if (ring->status == RING_OPEN)
1890			bwfm_pci_flowring_delete(sc, i);
1891	}
1892}
1893
1894int
1895bwfm_pci_txcheck(struct bwfm_softc *bwfm)
1896{
1897	struct bwfm_pci_softc *sc = (void *)bwfm;
1898	struct bwfm_pci_msgring *ring;
1899	int i;
1900
1901	/* If we are transitioning, we cannot send. */
1902	for (i = 0; i < sc->sc_max_flowrings; i++) {
1903		ring = &sc->sc_flowrings[i];
1904		if (ring->status == RING_OPENING)
1905			return ENOBUFS;
1906	}
1907
1908	if (bwfm_pci_pktid_avail(sc, &sc->sc_tx_pkts)) {
1909		sc->sc_tx_pkts_full = 1;
1910		return ENOBUFS;
1911	}
1912
1913	return 0;
1914}
1915
1916int
1917bwfm_pci_txdata(struct bwfm_softc *bwfm, struct mbuf **mp)
1918{
1919	struct bwfm_pci_softc *sc = (void *)bwfm;
1920	struct bwfm_pci_msgring *ring;
1921	struct msgbuf_tx_msghdr *tx;
1922	uint32_t pktid;
1923	paddr_t paddr;
1924	uint64_t devaddr;
1925	struct ether_header *eh;
1926	int flowid, ret, ac;
1927
1928	flowid = bwfm_pci_flowring_lookup(sc, *mp);
1929	if (flowid < 0) {
1930		/*
1931		 * We cannot send the packet right now as there is
1932		 * no flowring yet.  The flowring will be created
1933		 * asynchronously.  While the ring is transitioning
1934		 * the TX check will tell the upper layers that we
1935		 * cannot send packets right now.  When the flowring
1936		 * is created the queue will be restarted and this
1937		 * mbuf will be transmitted.
1938		 */
1939		bwfm_pci_flowring_create(sc, *mp);
1940		return 0;
1941	}
1942
1943	ring = &sc->sc_flowrings[flowid];
1944	if (ring->status == RING_OPENING ||
1945	    ring->status == RING_CLOSING) {
1946		printf("%s: tried to use a flow that was "
1947		    "transitioning in status %d\n",
1948		    DEVNAME(sc), ring->status);
1949		return ENOBUFS;
1950	}
1951
1952	tx = bwfm_pci_ring_write_reserve(sc, ring);
1953	if (tx == NULL)
1954		return ENOBUFS;
1955
1956	/* No QoS for EAPOL frames. */
1957	eh = mtod(*mp, struct ether_header *);
1958	ac = (eh->ether_type != htons(ETHERTYPE_PAE)) ?
1959	    M_WME_GETAC(*mp) : WME_AC_BE;
1960
1961	memset(tx, 0, sizeof(*tx));
1962	tx->msg.msgtype = MSGBUF_TYPE_TX_POST;
1963	tx->msg.ifidx = 0;
1964	tx->flags = BWFM_MSGBUF_PKT_FLAGS_FRAME_802_3;
1965	tx->flags |= ac << BWFM_MSGBUF_PKT_FLAGS_PRIO_SHIFT;
1966	tx->seg_cnt = 1;
1967	memcpy(tx->txhdr, mtod(*mp, char *), ETHER_HDR_LEN);
1968
1969	ret = bwfm_pci_pktid_new(sc, &sc->sc_tx_pkts, mp, &pktid, &paddr);
1970	if (ret) {
1971		if (ret == ENOBUFS) {
1972			printf("%s: no pktid available for TX\n",
1973			    DEVNAME(sc));
1974			sc->sc_tx_pkts_full = 1;
1975		}
1976		bwfm_pci_ring_write_cancel(sc, ring, 1);
1977		return ret;
1978	}
1979	devaddr = paddr + ETHER_HDR_LEN;
1980
1981	tx->msg.request_id = htole32(pktid);
1982	tx->data_len = htole16((*mp)->m_len - ETHER_HDR_LEN);
1983	tx->data_buf_addr.high_addr = htole32(devaddr >> 32);
1984	tx->data_buf_addr.low_addr = htole32(devaddr & 0xffffffff);
1985
1986	bwfm_pci_ring_write_commit(sc, ring);
1987	return 0;
1988}
1989
1990#ifdef BWFM_DEBUG
1991void
1992bwfm_pci_debug_console(struct bwfm_pci_softc *sc)
1993{
1994	uint32_t newidx = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1995	    sc->sc_console_base_addr + BWFM_CONSOLE_WRITEIDX);
1996
1997	if (newidx != sc->sc_console_readidx)
1998		DPRINTFN(3, ("BWFM CONSOLE: "));
1999	while (newidx != sc->sc_console_readidx) {
2000		uint8_t ch = bus_space_read_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
2001		    sc->sc_console_buf_addr + sc->sc_console_readidx);
2002		sc->sc_console_readidx++;
2003		if (sc->sc_console_readidx == sc->sc_console_buf_size)
2004			sc->sc_console_readidx = 0;
2005		if (ch == '\r')
2006			continue;
2007		DPRINTFN(3, ("%c", ch));
2008	}
2009}
2010#endif
2011
2012int
2013bwfm_pci_intr(void *v)
2014{
2015	struct bwfm_pci_softc *sc = (void *)v;
2016	uint32_t status;
2017
2018	if ((status = bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2019	    BWFM_PCI_PCIE2REG_MAILBOXINT)) == 0)
2020		return 0;
2021
2022	bwfm_pci_intr_disable(sc);
2023	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2024	    BWFM_PCI_PCIE2REG_MAILBOXINT, status);
2025
2026	if (status & (BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_0 |
2027	    BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_1))
2028		printf("%s: handle MB data\n", __func__);
2029
2030	if (status & BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_D2H_DB) {
2031		bwfm_pci_ring_rx(sc, &sc->sc_rx_complete);
2032		bwfm_pci_ring_rx(sc, &sc->sc_tx_complete);
2033		bwfm_pci_ring_rx(sc, &sc->sc_ctrl_complete);
2034	}
2035
2036#ifdef BWFM_DEBUG
2037	bwfm_pci_debug_console(sc);
2038#endif
2039
2040	bwfm_pci_intr_enable(sc);
2041	return 1;
2042}
2043
2044void
2045bwfm_pci_intr_enable(struct bwfm_pci_softc *sc)
2046{
2047	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2048	    BWFM_PCI_PCIE2REG_MAILBOXMASK,
2049	    BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_0 |
2050	    BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_1 |
2051	    BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_D2H_DB);
2052}
2053
2054void
2055bwfm_pci_intr_disable(struct bwfm_pci_softc *sc)
2056{
2057	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2058	    BWFM_PCI_PCIE2REG_MAILBOXMASK, 0);
2059}
2060
2061/* Msgbuf protocol implementation */
2062int
2063bwfm_pci_msgbuf_query_dcmd(struct bwfm_softc *bwfm, int ifidx,
2064    int cmd, char *buf, size_t *len)
2065{
2066	struct bwfm_pci_softc *sc = (void *)bwfm;
2067	struct msgbuf_ioctl_req_hdr *req;
2068	struct mbuf *m;
2069	size_t buflen;
2070	int s;
2071
2072	s = splnet();
2073	sc->sc_ioctl_resp_pktid = -1;
2074	req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
2075	if (req == NULL) {
2076		printf("%s: cannot reserve for write\n", DEVNAME(sc));
2077		splx(s);
2078		return 1;
2079	}
2080	req->msg.msgtype = MSGBUF_TYPE_IOCTLPTR_REQ;
2081	req->msg.ifidx = 0;
2082	req->msg.flags = 0;
2083	req->msg.request_id = htole32(MSGBUF_IOCTL_REQ_PKTID);
2084	req->cmd = htole32(cmd);
2085	req->output_buf_len = htole16(*len);
2086	req->trans_id = htole16(sc->sc_ioctl_reqid++);
2087
2088	buflen = uimin(*len, BWFM_DMA_H2D_IOCTL_BUF_LEN);
2089	req->input_buf_len = htole16(buflen);
2090	req->req_buf_addr.high_addr =
2091	    htole32((uint64_t)BWFM_PCI_DMA_DVA(sc->sc_ioctl_buf) >> 32);
2092	req->req_buf_addr.low_addr =
2093	    htole32((uint64_t)BWFM_PCI_DMA_DVA(sc->sc_ioctl_buf) & 0xffffffff);
2094	if (buf)
2095		memcpy(BWFM_PCI_DMA_KVA(sc->sc_ioctl_buf), buf, buflen);
2096	else
2097		memset(BWFM_PCI_DMA_KVA(sc->sc_ioctl_buf), 0, buflen);
2098
2099	bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
2100	splx(s);
2101
2102	if (tsleep(&sc->sc_ioctl_buf, PCATCH, "bwfm", hz)) {
2103		printf("%s: timeout waiting for ioctl response\n",
2104		    DEVNAME(sc));
2105		return 1;
2106	}
2107
2108	m = bwfm_pci_pktid_free(sc, &sc->sc_rx_pkts, sc->sc_ioctl_resp_pktid);
2109	if (m == NULL)
2110		return 1;
2111
2112	*len = uimin(buflen, sc->sc_ioctl_resp_ret_len);
2113	if (buf)
2114		memcpy(buf, mtod(m, char *), *len);
2115	m_freem(m);
2116	splx(s);
2117
2118	return 0;
2119}
2120
2121int
2122bwfm_pci_msgbuf_set_dcmd(struct bwfm_softc *bwfm, int ifidx,
2123    int cmd, char *buf, size_t len)
2124{
2125	return bwfm_pci_msgbuf_query_dcmd(bwfm, ifidx, cmd, buf, &len);
2126}
2127