1/*	$OpenBSD: if_nep.c,v 1.35 2024/05/24 06:02:56 jsg Exp $	*/
2/*
3 * Copyright (c) 2014, 2015 Mark Kettenis
4 *
5 * Permission to use, copy, modify, and distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#include "bpfilter.h"
19
20#include <sys/param.h>
21#include <sys/systm.h>
22#include <sys/device.h>
23#include <sys/ioctl.h>
24#include <sys/malloc.h>
25#include <sys/mbuf.h>
26#include <sys/pool.h>
27#include <sys/socket.h>
28
29#include <net/if.h>
30#include <net/if_media.h>
31
32#include <netinet/in.h>
33#include <netinet/if_ether.h>
34
35#if NBPFILTER > 0
36#include <net/bpf.h>
37#endif
38
39#include <dev/mii/miivar.h>
40
41#include <dev/pci/pcireg.h>
42#include <dev/pci/pcivar.h>
43#include <dev/pci/pcidevs.h>
44
45#ifdef __sparc64__
46#include <dev/ofw/openfirm.h>
47#endif
48
49/*
50 * The virtualization features make this a really complex device.  For
51 * now we try to keep things simple and use one logical device per
52 * port, using port numbers as logical device numbers.
53 */
54
55#define PIO		0x000000
56#define FZC_PIO		0x080000
57#define FZC_MAC		0x180000
58#define FZC_IPP		0x280000
59#define FFLP		0x300000
60#define FZC_FFLP	0x380000
61#define ZCP		0x500000
62#define FZC_ZCP		0x580000
63#define DMC		0x600000
64#define FZC_DMC		0x680000
65#define TXC		0x700000
66#define FZC_TXC		0x780000
67#define PIO_LDSV	0x800000
68#define PIO_IMASK0	0xa00000
69#define PIO_IMASK1	0xb00000
70
71#define RST_CTL			(FZC_PIO + 0x00038)
72#define SYS_ERR_MASK		(FZC_PIO + 0x00090)
73#define SYS_ERR_STAT		(FZC_PIO + 0x00098)
74
75#define LDN_RXDMA(chan)		(0 + (chan))
76#define LDN_TXDMA(chan)		(32 + (chan))
77#define LDN_MIF			63
78#define LDN_MAC(port)		(64 + (port))
79#define LDN_SYSERR		68
80
81#define LDSV0(ldg)		(PIO_LDSV + 0x00000 + (ldg) * 0x02000)
82#define LDSV1(ldg)		(PIO_LDSV + 0x00008 + (ldg) * 0x02000)
83#define LDSV2(ldg)		(PIO_LDSV + 0x00010 + (ldg) * 0x02000)
84#define LDGIMGN(ldg)		(PIO_LDSV + 0x00018 + (ldg) * 0x02000)
85#define  LDGIMGN_ARM		(1ULL << 31)
86#define  LDGIMGN_TIMER		(63ULL << 0)
87
88#define LD_IM0(idx)		(PIO_IMASK0 + 0x00000 + (idx) * 0x02000)
89#define  LD_IM0_LDF_MASK	(3ULL << 0)
90#define LD_IM1(idx)		(PIO_IMASK1 + 0x00000 + (idx - 64) * 0x02000)
91#define  LD_IM1_LDF_MASK	(3ULL << 0)
92
93#define SID(ldg)		(FZC_PIO + 0x10200 + (ldg) * 0x00008)
94#define LDG_NUM(ldn)		(FZC_PIO + 0x20000 + (ldn) * 0x00008)
95
96#define ipp_port(port)		(((port & 0x1) << 1) | (port & 0x2) >> 1)
97#define IPP_CFIG(port)		(FZC_IPP + 0x00000 + ipp_port(port) * 0x04000)
98#define  IPP_CFIG_SOFT_RST		(1ULL << 31)
99#define  IPP_CFIG_DFIFO_PIO_W		(1ULL << 5)
100#define  IPP_CFIG_IPP_ENABLE		(1ULL << 0)
101#define IPP_INT_STAT(port)	(FZC_IPP + 0x00040 + ipp_port(port) * 0x04000)
102#define IPP_MSK(port)		(FZC_IPP + 0x00048 + ipp_port(port) * 0x04000)
103#define IPP_DFIFO_RD1(port)	(FZC_IPP + 0x000c0 + ipp_port(port) * 0x04000)
104#define IPP_DFIFO_RD2(port)	(FZC_IPP + 0x000c8 + ipp_port(port) * 0x04000)
105#define IPP_DFIFO_RD3(port)	(FZC_IPP + 0x000d0 + ipp_port(port) * 0x04000)
106#define IPP_DFIFO_RD4(port)	(FZC_IPP + 0x000d8 + ipp_port(port) * 0x04000)
107#define IPP_DFIFO_RD5(port)	(FZC_IPP + 0x000e0 + ipp_port(port) * 0x04000)
108#define IPP_DFIFO_WR1(port)	(FZC_IPP + 0x000e8 + ipp_port(port) * 0x04000)
109#define IPP_DFIFO_WR2(port)	(FZC_IPP + 0x000f0 + ipp_port(port) * 0x04000)
110#define IPP_DFIFO_WR3(port)	(FZC_IPP + 0x000f8 + ipp_port(port) * 0x04000)
111#define IPP_DFIFO_WR4(port)	(FZC_IPP + 0x00100 + ipp_port(port) * 0x04000)
112#define IPP_DFIFO_WR5(port)	(FZC_IPP + 0x00108 + ipp_port(port) * 0x04000)
113#define IPP_DFIFO_RD_PTR(port)	(FZC_IPP + 0x00110 + ipp_port(port) * 0x04000)
114#define IPP_DFIFO_WR_PTR(port)	(FZC_IPP + 0x00118 + ipp_port(port) * 0x04000)
115
116#define IPP_NIU_DFIFO_ENTRIES		1024
117#define	IPP_P0_P1_DFIFO_ENTRIES		2048
118#define IPP_P2_P3_DFIFO_ENTRIES		1024
119
120#define ZCP_CFIG		(FZC_ZCP + 0x00000)
121#define ZCP_INT_STAT		(FZC_ZCP + 0x00008)
122#define ZCP_INT_MASK		(FZC_ZCP + 0x00010)
123
124#define TXC_DMA_MAX(chan)	(FZC_TXC + 0x00000 + (chan) * 0x01000)
125#define TXC_CONTROL		(FZC_TXC + 0x20000)
126#define  TXC_CONTROL_TXC_ENABLED	(1ULL << 4)
127#define TXC_PORT_DMA(port)	(FZC_TXC + 0x20028 + (port) * 0x00100)
128#define TXC_PKT_STUFFED(port)	(FZC_TXC + 0x20030 + (port) * 0x00100)
129#define TXC_PKT_XMIT(port)	(FZC_TXC + 0x20038 + (port) * 0x00100)
130#define TXC_INT_STAT_DBG	(FZC_TXC + 0x20420)
131#define TXC_INT_STAT		(FZC_TXC + 0x20428)
132#define TXC_INT_MASK		(FZC_TXC + 0x20430)
133#define  TXC_INT_MASK_PORT_INT_MASK(port) (0x3fULL << ((port) * 8))
134
135#define XTXMAC_SW_RST(port)	(FZC_MAC + 0x00000 + (port) * 0x06000)
136#define  XTXMAC_SW_RST_REG_RST		(1ULL << 1)
137#define  XTXMAC_SW_RST_SOFT_RST		(1ULL << 0)
138#define XRXMAC_SW_RST(port)	(FZC_MAC + 0x00008 + (port) * 0x06000)
139#define  XRXMAC_SW_RST_REG_RST		(1ULL << 1)
140#define  XRXMAC_SW_RST_SOFT_RST		(1ULL << 0)
141#define XTXMAC_STATUS(port)	(FZC_MAC + 0x00020 + (port) * 0x06000)
142#define XRXMAC_STATUS(port)	(FZC_MAC + 0x00028 + (port) * 0x06000)
143#define XTXMAC_STAT_MSK(port)	(FZC_MAC + 0x00040 + (port) * 0x06000)
144#define XRXMAC_STAT_MSK(port)	(FZC_MAC + 0x00048 + (port) * 0x06000)
145#define XMAC_CONFIG(port)	(FZC_MAC + 0x00060 + (port) * 0x06000)
146#define  XMAC_CONFIG_SEL_CLK_25MHZ	(1ULL << 31)
147#define  XMAC_CONFIG_1G_PCS_BYPASS	(1ULL << 30)
148#define  XMAC_CONFIG_MODE_MASK		(3ULL << 27)
149#define  XMAC_CONFIG_MODE_XGMII		(0ULL << 27)
150#define  XMAC_CONFIG_MODE_GMII		(1ULL << 27)
151#define  XMAC_CONFIG_MODE_MII		(2ULL << 27)
152#define  XMAC_CONFIG_LFS_DISABLE	(1ULL << 26)
153#define  XMAC_CONFIG_LOOPBACK		(1ULL << 25)
154#define  XMAC_CONFIG_TX_OUTPUT_EN	(1ULL << 24)
155#define  XMAC_CONFIG_SEL_POR_CLK_SRC	(1ULL << 23)
156#define  XMAC_CONFIG_HASH_FILTER_EN	(1ULL << 15)
157#define  XMAC_CONFIG_PROMISCUOUS_GROUP	(1ULL << 10)
158#define  XMAC_CONFIG_PROMISCUOUS	(1ULL << 9)
159#define  XMAC_CONFIG_RX_MAC_ENABLE	(1ULL << 8)
160#define  XMAC_CONFIG_ALWAYS_NO_CRC	(1ULL << 3)
161#define  XMAC_CONFIG_VAR_MIN_IPG_EN	(1ULL << 2)
162#define  XMAC_CONFIG_STRETCH_MODE	(1ULL << 1)
163#define  XMAC_CONFIG_TX_ENABLE		(1ULL << 0)
164
165#define XMAC_IPG(port)		(FZC_MAC + 0x00080 + (port) * 0x06000)
166#define  XMAC_IPG_IPG_VALUE1_MASK	(0xffULL << 8)
167#define  XMAC_IPG_IPG_VALUE1_12		(10ULL << 8)
168#define  XMAC_IPG_IPG_VALUE_MASK	(0x07ULL << 0)
169#define  XMAC_IPG_IPG_VALUE_12_15	(3ULL << 0)
170
171#define XMAC_MIN(port)		(FZC_MAC + 0x00088 + (port) * 0x06000)
172#define  XMAC_MIN_RX_MIN_PKT_SIZE_MASK	(0x3ffULL << 20)
173#define  XMAC_MIN_RX_MIN_PKT_SIZE_SHIFT	20
174#define  XMAC_MIN_TX_MIN_PKT_SIZE_MASK	(0x3ffULL << 0)
175#define  XMAC_MIN_TX_MIN_PKT_SIZE_SHIFT	0
176#define XMAC_MAX(port)		(FZC_MAC + 0x00090 + (port) * 0x06000)
177
178#define XMAC_ADDR0(port)	(FZC_MAC + 0x000a0 + (port) * 0x06000)
179#define XMAC_ADDR1(port)	(FZC_MAC + 0x000a8 + (port) * 0x06000)
180#define XMAC_ADDR2(port)	(FZC_MAC + 0x000b0 + (port) * 0x06000)
181
182#define XMAC_ADDR_CMPEN(port)	(FZC_MAC + 0x00208 + (port) * 0x06000)
183
184#define XMAC_ADD_FILT0(port)	(FZC_MAC + 0x00818 + (port) * 0x06000)
185#define XMAC_ADD_FILT1(port)	(FZC_MAC + 0x00820 + (port) * 0x06000)
186#define XMAC_ADD_FILT2(port)	(FZC_MAC + 0x00828 + (port) * 0x06000)
187#define XMAC_ADD_FILT12_MASK(port) (FZC_MAC + 0x00830 + (port) * 0x06000)
188#define XMAC_ADD_FILT00_MASK(port) (FZC_MAC + 0x00838 + (port) * 0x06000)
189
190#define XMAC_HASH_TBL0(port)	(FZC_MAC + 0x00840 + (port) * 0x06000)
191#define XMAC_HASH_TBL(port, i)	(XMAC_HASH_TBL0(port) + (i) * 0x00008)
192
193#define XMAC_HOST_INFO0(port)	(FZC_MAC + 0x00900 + (port) * 0x06000)
194#define XMAC_HOST_INFO(port, i)	(XMAC_HOST_INFO0(port) + (i) * 0x00008)
195
196#define RXMAC_BT_CNT(port)	(FZC_MAC + 0x00100 + (port) * 0x06000)
197
198#define TXMAC_FRM_CNT(port)	(FZC_MAC + 0x00170 + (port) * 0x06000)
199#define TXMAC_BYTE_CNT(port)	(FZC_MAC + 0x00178 + (port) * 0x06000)
200
201#define LINK_FAULT_CNT(port)	(FZC_MAC + 0x00180 + (port) * 0x06000)
202#define XMAC_SM_REG(port)	(FZC_MAC + 0x001a8 + (port) * 0x06000)
203
204#define TXMAC_SW_RST(port)	(FZC_MAC + 0x0c000 + ((port) - 2) * 0x04000)
205#define  TXMAC_SW_RST_SW_RST		(1ULL << 0)
206#define RXMAC_SW_RST(port)	(FZC_MAC + 0x0c008 + ((port) - 2) * 0x04000)
207#define  RXMAC_SW_RST_SW_RST		(1ULL << 0)
208#define TXMAC_CONFIG(port)	(FZC_MAC + 0x0c060 + ((port) - 2) * 0x04000)
209#define  TXMAC_CONFIG_TX_ENABLE		(1ULL << 0)
210#define RXMAC_CONFIG(port)	(FZC_MAC + 0x0c068 + ((port) - 2) * 0x04000)
211#define  RXMAC_CONFIG_ERROR_CHK_DIS	(1ULL << 7)
212#define  RXMAC_CONFIG_ADDR_FILTER_EN	(1ULL << 6)
213#define  RXMAC_CONFIG_HASH_FILTER_EN	(1ULL << 5)
214#define  RXMAC_CONFIG_PROMISCUOUS_GROUP	(1ULL << 4)
215#define  RXMAC_CONFIG_PROMISCUOUS	(1ULL << 3)
216#define  RXMAC_CONFIG_STRIP_FCS		(1ULL << 2)
217#define  RXMAC_CONFIG_STRIP_PAD		(1ULL << 1)
218#define  RXMAC_CONFIG_RX_ENABLE		(1ULL << 0)
219#define MAC_XIF_CONFIG(port)	(FZC_MAC + 0x0c078 + ((port) - 2) * 0x04000)
220#define  MAC_XIF_CONFIG_SEL_CLK_25MHZ	(1ULL << 7)
221#define  MAC_XIF_CONFIG_GMII_MODE	(1ULL << 3)
222#define  MAC_XIF_CONFIG_LOOPBACK	(1ULL << 1)
223#define  MAC_XIF_CONFIG_TX_OUTPUT_EN	(1ULL << 0)
224#define BMAC_MIN(port)		(FZC_MAC + 0x0c0a0 + ((port) - 2) * 0x04000)
225#define BMAC_MAX(port)		(FZC_MAC + 0x0c0a8 + ((port) - 2) * 0x04000)
226#define  BMAC_MAX_BURST_SHIFT		16
227#define MAC_PA_SIZE(port)	(FZC_MAC + 0x0c0b0 + ((port) - 2) * 0x04000)
228#define MAC_CTRL_TYPE(port)	(FZC_MAC + 0x0c0b8 + ((port) - 2) * 0x04000)
229#define BMAC_ADDR0(port)	(FZC_MAC + 0x0c100 + ((port) - 2) * 0x04000)
230#define BMAC_ADDR1(port)	(FZC_MAC + 0x0c108 + ((port) - 2) * 0x04000)
231#define BMAC_ADDR2(port)	(FZC_MAC + 0x0c110 + ((port) - 2) * 0x04000)
232
233#define MAC_ADDR_FILT0(port)	(FZC_MAC + 0x0c298 + ((port) - 2) * 0x04000)
234#define MAC_ADDR_FILT1(port)	(FZC_MAC + 0x0c2a0 + ((port) - 2) * 0x04000)
235#define MAC_ADDR_FILT2(port)	(FZC_MAC + 0x0c2a8 + ((port) - 2) * 0x04000)
236#define MAC_ADDR_FILT12_MASK(port) (FZC_MAC + 0x0c2b0 + ((port) - 2) * 0x04000)
237#define MAC_ADDR_FILT00_MASK(port) (FZC_MAC + 0x0c2b8 + ((port) - 2) * 0x04000)
238
239#define MAC_HASH_TBL0(port)	(FZC_MAC + 0x0c2c0 + ((port) - 2) * 0x04000)
240#define MAC_HASH_TBL(port, i)	(MAC_HASH_TBL0(port) + (i) * 0x00008)
241
242#define RXMAC_FRM_CNT(port)	(FZC_MAC + 0x0c370 + ((port) - 2) * 0x04000)
243#define BMAC_ALTAD_CMPEN(port)	(FZC_MAC + 0x0c3f8 + ((port) - 2) * 0x04000)
244
245#define BMAC_HOST_INFO0(port)	(FZC_MAC + 0x0c400 + ((port) - 2) * 0x04000)
246#define BMAC_HOST_INFO(port, i)	(BMAC_HOST_INFO0(port) + (i) * 0x00008)
247
248#define PCS_PORT_OFFSET(port)	((port < 2) ? ((port) * 0x06000) : \
249					(0x02000 + (port) * 0x4000))
250#define PCS_MII_CTL(port)	(FZC_MAC + 0x04000 + PCS_PORT_OFFSET(port))
251#define  PCS_MII_CTL_RESET		(1ULL << 15)
252#define PCS_DPATH_MODE(port)	(FZC_MAC + 0x040a0 + PCS_PORT_OFFSET(port))
253#define  PCS_DPATH_MODE_MII		(1ULL << 1)
254
255#define MIF_FRAME_OUTPUT	(FZC_MAC + 0x16018)
256#define  MIF_FRAME_DATA			0xffff
257#define  MIF_FRAME_TA0			(1ULL << 16)
258#define  MIF_FRAME_TA1			(1ULL << 17)
259#define  MIF_FRAME_REG_SHIFT		18
260#define  MIF_FRAME_PHY_SHIFT		23
261#define  MIF_FRAME_READ			0x60020000
262#define  MIF_FRAME_WRITE		0x50020000
263#define MIF_CONFIG		(FZC_MAC + 0x16020)
264#define  MIF_CONFIG_INDIRECT_MODE	(1ULL << 15)
265
266#define DEF_PT0_RDC		(FZC_DMC + 0x00008)
267#define DEF_PT_RDC(port)	(DEF_PT0_RDC + (port) * 0x00008)
268#define RDC_TBL(tbl, i)		(FZC_ZCP + 0x10000 + (tbl * 16 + i) * 0x00008)
269
270#define RX_LOG_PAGE_VLD(chan)	(FZC_DMC + 0x20000 + (chan) * 0x00040)
271#define  RX_LOG_PAGE_VLD_PAGE0		(1ULL << 0)
272#define  RX_LOG_PAGE_VLD_PAGE1		(1ULL << 1)
273#define  RX_LOG_PAGE_VLD_FUNC_SHIFT	2
274#define RX_LOG_MASK1(chan)	(FZC_DMC + 0x20008 + (chan) * 0x00040)
275#define RX_LOG_VALUE1(chan)	(FZC_DMC + 0x20010 + (chan) * 0x00040)
276#define RX_LOG_MASK2(chan)	(FZC_DMC + 0x20018 + (chan) * 0x00040)
277#define RX_LOG_VALUE2(chan)	(FZC_DMC + 0x20020 + (chan) * 0x00040)
278#define RX_LOG_PAGE_RELO1(chan)	(FZC_DMC + 0x20028 + (chan) * 0x00040)
279#define RX_LOG_PAGE_RELO2(chan)	(FZC_DMC + 0x20030 + (chan) * 0x00040)
280#define RX_LOG_PAGE_HDL(chan)	(FZC_DMC + 0x20038 + (chan) * 0x00040)
281
282#define RXDMA_CFIG1(chan)	(DMC + 0x00000 + (chan) * 0x00200)
283#define  RXDMA_CFIG1_EN			(1ULL << 31)
284#define  RXDMA_CFIG1_RST		(1ULL << 30)
285#define  RXDMA_CFIG1_QST		(1ULL << 29)
286#define RXDMA_CFIG2(chan)	(DMC + 0x00008 + (chan) * 0x00200)
287#define  RXDMA_CFIG2_OFFSET_MASK	(3ULL << 2)
288#define  RXDMA_CFIG2_OFFSET_0		(0ULL << 2)
289#define  RXDMA_CFIG2_OFFSET_64		(1ULL << 2)
290#define  RXDMA_CFIG2_OFFSET_128		(2ULL << 2)
291#define  RXDMA_CFIG2_FULL_HDR		(1ULL << 0)
292
293#define RBR_CFIG_A(chan)	(DMC + 0x00010 + (chan) * 0x00200)
294#define  RBR_CFIG_A_LEN_SHIFT		48
295#define RBR_CFIG_B(chan)	(DMC + 0x00018 + (chan) * 0x00200)
296#define  RBR_CFIG_B_BLKSIZE_MASK	(3ULL << 24)
297#define  RBR_CFIG_B_BLKSIZE_4K		(0ULL << 24)
298#define  RBR_CFIG_B_BLKSIZE_8K		(1ULL << 24)
299#define  RBR_CFIG_B_BLKSIZE_16K		(2ULL << 24)
300#define  RBR_CFIG_B_BLKSIZE_32K		(3ULL << 24)
301#define  RBR_CFIG_B_VLD2		(1ULL << 23)
302#define  RBR_CFIG_B_BUFSZ2_MASK		(3ULL << 16)
303#define  RBR_CFIG_B_BUFSZ2_2K		(0ULL << 16)
304#define  RBR_CFIG_B_BUFSZ2_4K		(1ULL << 16)
305#define  RBR_CFIG_B_BUFSZ2_8K		(2ULL << 16)
306#define  RBR_CFIG_B_BUFSZ2_16K		(3ULL << 16)
307#define  RBR_CFIG_B_VLD1		(1ULL << 15)
308#define  RBR_CFIG_B_BUFSZ1_MASK		(3ULL << 8)
309#define  RBR_CFIG_B_BUFSZ1_1K		(0ULL << 8)
310#define  RBR_CFIG_B_BUFSZ1_2K		(1ULL << 8)
311#define  RBR_CFIG_B_BUFSZ1_4K		(2ULL << 8)
312#define  RBR_CFIG_B_BUFSZ1_8K		(3ULL << 8)
313#define  RBR_CFIG_B_VLD0		(1ULL << 7)
314#define  RBR_CFIG_B_BUFSZ0_MASK		(3ULL << 0)
315#define  RBR_CFIG_B_BUFSZ0_256		(0ULL << 0)
316#define  RBR_CFIG_B_BUFSZ0_512		(1ULL << 0)
317#define  RBR_CFIG_B_BUFSZ0_1K		(2ULL << 0)
318#define  RBR_CFIG_B_BUFSZ0_2K		(3ULL << 0)
319#define RBR_KICK(chan)		(DMC + 0x00020 + (chan) * 0x00200)
320#define RBR_STAT(chan)		(DMC + 0x00028 + (chan) * 0x00200)
321#define RBR_HDH(chan)		(DMC + 0x00030 + (chan) * 0x00200)
322#define RBR_HDL(chan)		(DMC + 0x00038 + (chan) * 0x00200)
323#define RCRCFIG_A(chan)		(DMC + 0x00040 + (chan) * 0x00200)
324#define  RCRCFIG_A_LEN_SHIFT		48
325#define RCRCFIG_B(chan)		(DMC + 0x00048 + (chan) * 0x00200)
326#define  RCRCFIG_B_PTHRES_SHIFT		16
327#define  RCRCFIG_B_ENTOUT		(1ULL << 15)
328#define RCRSTAT_A(chan)		(DMC + 0x00050 + (chan) * 0x00200)
329#define RCRSTAT_B(chan)		(DMC + 0x00058 + (chan) * 0x00200)
330#define RCRSTAT_C(chan)		(DMC + 0x00060 + (chan) * 0x00200)
331
332#define RX_DMA_ENT_MSK(chan)	(DMC + 0x00068 + (chan) * 0x00200)
333#define  RX_DMA_ENT_MSK_RBR_EMPTY	(1ULL << 3)
334#define RX_DMA_CTL_STAT(chan)	(DMC + 0x00070 + (chan) * 0x00200)
335#define  RX_DMA_CTL_STAT_MEX		(1ULL << 47)
336#define  RX_DMA_CTL_STAT_RCRTHRES	(1ULL << 46)
337#define  RX_DMA_CTL_STAT_RCRTO		(1ULL << 45)
338#define  RX_DMA_CTL_STAT_RBR_EMPTY	(1ULL << 35)
339#define  RX_DMA_CTL_STAT_PTRREAD_SHIFT	16
340#define RX_DMA_CTL_STAT_DBG(chan) (DMC + 0x00098 + (chan) * 0x00200)
341
342#define TX_LOG_PAGE_VLD(chan)	(FZC_DMC + 0x40000 + (chan) * 0x00200)
343#define  TX_LOG_PAGE_VLD_PAGE0		(1ULL << 0)
344#define  TX_LOG_PAGE_VLD_PAGE1		(1ULL << 1)
345#define  TX_LOG_PAGE_VLD_FUNC_SHIFT	2
346#define TX_LOG_MASK1(chan)	(FZC_DMC + 0x40008 + (chan) * 0x00200)
347#define TX_LOG_VALUE1(chan)	(FZC_DMC + 0x40010 + (chan) * 0x00200)
348#define TX_LOG_MASK2(chan)	(FZC_DMC + 0x40018 + (chan) * 0x00200)
349#define TX_LOG_VALUE2(chan)	(FZC_DMC + 0x40020 + (chan) * 0x00200)
350#define TX_LOG_PAGE_RELO1(chan)	(FZC_DMC + 0x40028 + (chan) * 0x00200)
351#define TX_LOG_PAGE_RELO2(chan)	(FZC_DMC + 0x40030 + (chan) * 0x00200)
352#define TX_LOG_PAGE_HDL(chan)	(FZC_DMC + 0x40038 + (chan) * 0x00200)
353
354#define TX_RNG_CFIG(chan)	(DMC + 0x40000 + (chan) * 0x00200)
355#define  TX_RNG_CFIG_LEN_SHIFT		48
356#define TX_RING_HDL(chan)	(DMC + 0x40010 + (chan) * 0x00200)
357#define TX_RING_KICK(chan)	(DMC + 0x40018 + (chan) * 0x00200)
358#define  TX_RING_KICK_WRAP		(1ULL << 19)
359#define TX_ENT_MSK(chan)	(DMC + 0x40020 + (chan) * 0x00200)
360#define TX_CS(chan)		(DMC + 0x40028 + (chan) * 0x00200)
361#define  TX_CS_PKT_CNT_MASK		(0xfffULL << 48)
362#define  TX_CS_PKT_CNT_SHIFT		48
363#define  TX_CS_RST			(1ULL << 31)
364#define  TX_CS_STOP_N_GO		(1ULL << 28)
365#define  TX_CS_SNG_STATE		(1ULL << 27)
366#define TDMC_INTR_DBG(chan)	(DMC + 0x40060 + (chan) * 0x00200)
367#define TXDMA_MBH(chan)		(DMC + 0x40030 + (chan) * 0x00200)
368#define TXDMA_MBL(chan)		(DMC + 0x40038 + (chan) * 0x00200)
369#define TX_RNG_ERR_LOGH(chan)	(DMC + 0x40048 + (chan) * 0x00200)
370#define TX_RNG_ERR_LOGL(chan)	(DMC + 0x40050 + (chan) * 0x00200)
371
372#define RXD_MULTI		(1ULL << 63)
373#define RXD_L2_LEN_MASK		(0x3fffULL << 40)
374#define RXD_L2_LEN_SHIFT	40
375#define RXD_PKT_BUF_ADDR_MASK	0x3fffffffffULL
376#define RXD_PKT_BUF_ADDR_SHIFT	6
377
378struct nep_block {
379	bus_dmamap_t	nb_map;
380	void		*nb_block;
381};
382
383#define NEP_NRBDESC	256
384#define NEP_NRCDESC	512
385
386#define TXD_SOP			(1ULL << 63)
387#define TXD_MARK		(1ULL << 62)
388#define TXD_NUM_PTR_SHIFT	58
389#define TXD_TR_LEN_SHIFT	44
390
391struct nep_txbuf_hdr {
392	uint64_t	nh_flags;
393	uint64_t	nh_reserved;
394};
395
396struct nep_buf {
397	bus_dmamap_t	nb_map;
398	struct mbuf	*nb_m;
399};
400
401#define NEP_NTXDESC	256
402#define NEP_NTXSEGS	15
403
404struct nep_dmamem {
405	bus_dmamap_t		ndm_map;
406	bus_dma_segment_t	ndm_seg;
407	size_t			ndm_size;
408	caddr_t			ndm_kva;
409};
410#define NEP_DMA_MAP(_ndm)	((_ndm)->ndm_map)
411#define NEP_DMA_LEN(_ndm)	((_ndm)->ndm_size)
412#define NEP_DMA_DVA(_ndm)	((_ndm)->ndm_map->dm_segs[0].ds_addr)
413#define NEP_DMA_KVA(_ndm)	((void *)(_ndm)->ndm_kva);
414
415struct pool *nep_block_pool;
416
417struct nep_softc {
418	struct device		sc_dev;
419	struct arpcom		sc_ac;
420#define sc_lladdr	sc_ac.ac_enaddr
421	struct mii_data		sc_mii;
422#define sc_media	sc_mii.mii_media
423
424	bus_dma_tag_t		sc_dmat;
425	bus_space_tag_t		sc_memt;
426	bus_space_handle_t 	sc_memh;
427	bus_size_t		sc_mems;
428	void			*sc_ih;
429
430	int			sc_port;
431
432	struct nep_dmamem	*sc_txring;
433	struct nep_buf		*sc_txbuf;
434	uint64_t		*sc_txdesc;
435	int			sc_tx_prod;
436	int			sc_tx_cnt;
437	int			sc_tx_cons;
438
439	uint64_t		sc_wrap;
440	uint16_t		sc_pkt_cnt;
441
442	struct nep_dmamem	*sc_rbring;
443	struct nep_block	*sc_rb;
444	uint32_t		*sc_rbdesc;
445	struct if_rxring	sc_rx_ring;
446	int			sc_rx_prod;
447	struct nep_dmamem	*sc_rcring;
448	uint64_t		*sc_rcdesc;
449	int			sc_rx_cons;
450
451	struct nep_dmamem	*sc_rxmbox;
452
453	struct timeout		sc_tick;
454};
455
456int	nep_match(struct device *, void *, void *);
457void	nep_attach(struct device *, struct device *, void *);
458
459const struct cfattach nep_ca = {
460	sizeof(struct nep_softc), nep_match, nep_attach
461};
462
463struct cfdriver nep_cd = {
464	NULL, "nep", DV_DULL
465};
466
467static u_int	nep_mextfree_idx;
468
469int	nep_pci_enaddr(struct nep_softc *, struct pci_attach_args *);
470
471uint64_t nep_read(struct nep_softc *, uint32_t);
472void	nep_write(struct nep_softc *, uint32_t, uint64_t);
473int	nep_mii_readreg(struct device *, int, int);
474void	nep_mii_writereg(struct device *, int, int, int);
475void	nep_mii_statchg(struct device *);
476void	nep_xmac_mii_statchg(struct nep_softc *);
477void	nep_bmac_mii_statchg(struct nep_softc *);
478int	nep_media_change(struct ifnet *);
479void	nep_media_status(struct ifnet *, struct ifmediareq *);
480int	nep_intr(void *);
481
482void	nep_rx_proc(struct nep_softc *);
483void	nep_extfree(caddr_t, u_int, void *);
484void	nep_tx_proc(struct nep_softc *);
485
486void	nep_init_ipp(struct nep_softc *);
487void	nep_ipp_clear_dfifo(struct nep_softc *, uint64_t);
488void	nep_init_rx_mac(struct nep_softc *);
489void	nep_init_rx_xmac(struct nep_softc *);
490void	nep_init_rx_bmac(struct nep_softc *);
491void	nep_init_rx_channel(struct nep_softc *, int);
492void	nep_init_tx_mac(struct nep_softc *);
493void	nep_init_tx_xmac(struct nep_softc *);
494void	nep_init_tx_bmac(struct nep_softc *);
495void	nep_init_tx_channel(struct nep_softc *, int);
496void	nep_enable_rx_mac(struct nep_softc *);
497void	nep_disable_rx_mac(struct nep_softc *);
498void	nep_stop_dma(struct nep_softc *);
499
500void	nep_fill_rx_ring(struct nep_softc *);
501
502void	nep_up(struct nep_softc *);
503void	nep_down(struct nep_softc *);
504void	nep_iff(struct nep_softc *);
505int	nep_encap(struct nep_softc *, struct mbuf **, int *);
506
507void	nep_start(struct ifnet *);
508void	nep_watchdog(struct ifnet *);
509void	nep_tick(void *);
510int	nep_ioctl(struct ifnet *, u_long, caddr_t);
511
512struct nep_dmamem *nep_dmamem_alloc(struct nep_softc *, size_t);
513void	nep_dmamem_free(struct nep_softc *, struct nep_dmamem *);
514
515/*
516 * SUNW,pcie-neptune: 4x1G onboard on T5140/T5240
517 * SUNW,pcie-qgc: 4x1G, "Sun Quad GbE UTP x8 PCI Express Card"
518 * SUNW,pcie-qgc-pem: 4x1G, "Sun Quad GbE UTP x8 PCIe ExpressModule"
519 * SUNW,pcie-2xgf: 2x10G, "Sun Dual 10GbE XFP PCI Express Card"
520 * SUNW,pcie-2xgf-pem: 2x10G, "Sun Dual 10GbE XFP PCIe ExpressModule"
521 */
522int
523nep_match(struct device *parent, void *match, void *aux)
524{
525	struct pci_attach_args *pa = aux;
526
527	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_SUN &&
528	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SUN_NEPTUNE)
529		return (1);
530	return (0);
531}
532
533void
534nep_attach(struct device *parent, struct device *self, void *aux)
535{
536	struct nep_softc *sc = (struct nep_softc *)self;
537	struct pci_attach_args *pa = aux;
538	pci_intr_handle_t ih;
539	const char *intrstr = NULL;
540	struct ifnet *ifp = &sc->sc_ac.ac_if;
541	struct mii_data *mii = &sc->sc_mii;
542	pcireg_t memtype;
543	uint64_t val;
544
545	if (nep_mextfree_idx == 0)
546		nep_mextfree_idx = mextfree_register(nep_extfree);
547
548	sc->sc_dmat = pa->pa_dmat;
549
550	memtype = PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT;
551	if (pci_mapreg_map(pa, PCI_MAPREG_START, memtype, 0,
552	    &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_mems, 0)) {
553		printf(": can't map registers\n");
554		return;
555	}
556
557	if (pci_intr_map_msi(pa, &ih) && pci_intr_map(pa, &ih)) {
558		printf(": can't map interrupt\n");
559		bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
560		return;
561	}
562
563	intrstr = pci_intr_string(pa->pa_pc, ih);
564	sc->sc_ih =  pci_intr_establish(pa->pa_pc, ih, IPL_NET,
565	    nep_intr, sc, self->dv_xname);
566	if (sc->sc_ih == NULL) {
567		printf(": can't establish interrupt");
568		if (intrstr != NULL)
569			printf(" at %s", intrstr);
570		printf("\n");
571		bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
572		return;
573	}
574
575	printf(": %s", intrstr);
576
577	sc->sc_port = pa->pa_function;
578
579	nep_write(sc, SID(sc->sc_port), pa->pa_function << 5);
580	nep_write(sc, LDG_NUM(LDN_RXDMA(sc->sc_port)), sc->sc_port);
581	nep_write(sc, LDG_NUM(LDN_TXDMA(sc->sc_port)), sc->sc_port);
582	nep_write(sc, LDG_NUM(LDN_MAC(sc->sc_port)), sc->sc_port);
583
584	/* Port 0 gets the MIF and error interrupts. */
585	if (sc->sc_port == 0) {
586		nep_write(sc, LDG_NUM(LDN_MIF), sc->sc_port);
587		nep_write(sc, LDG_NUM(LDN_SYSERR), sc->sc_port);
588		nep_write(sc, ZCP_INT_MASK, 0);
589	}
590
591#ifdef __sparc64__
592	if (OF_getprop(PCITAG_NODE(pa->pa_tag), "local-mac-address",
593	    sc->sc_lladdr, ETHER_ADDR_LEN) <= 0)
594#endif
595		nep_pci_enaddr(sc, pa);
596
597	printf(", address %s\n", ether_sprintf(sc->sc_lladdr));
598
599	if (nep_block_pool == NULL) {
600		nep_block_pool = malloc(sizeof(*nep_block_pool),
601		    M_DEVBUF, M_WAITOK);
602		if (nep_block_pool == NULL) {
603			printf("%s: unable to allocate block pool\n",
604			    sc->sc_dev.dv_xname);
605			return;
606		}
607		pool_init(nep_block_pool, PAGE_SIZE, 0, IPL_NET, 0,
608		    "nepblk", NULL);
609	}
610
611	val = nep_read(sc, MIF_CONFIG);
612	val &= ~MIF_CONFIG_INDIRECT_MODE;
613	nep_write(sc, MIF_CONFIG, val);
614
615	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, sizeof(ifp->if_xname));
616	ifp->if_softc = sc;
617	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
618	ifp->if_ioctl = nep_ioctl;
619	ifp->if_start = nep_start;
620	ifp->if_watchdog = nep_watchdog;
621
622	mii->mii_ifp = ifp;
623	mii->mii_readreg = nep_mii_readreg;
624	mii->mii_writereg = nep_mii_writereg;
625	mii->mii_statchg = nep_mii_statchg;
626
627	ifmedia_init(&sc->sc_media, 0, nep_media_change, nep_media_status);
628
629	/*
630	 * The PHYs are wired up in reverse order on the 4x1G (RGMII)
631	 * configuration.
632	 */
633	mii_attach(&sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY,
634	    sc->sc_port ^ 0x3, 0);
635	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
636		printf("%s: no PHY found!\n", sc->sc_dev.dv_xname);
637		ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
638		ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL);
639	} else
640		ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO);
641
642	if_attach(ifp);
643	ether_ifattach(ifp);
644
645	timeout_set(&sc->sc_tick, nep_tick, sc);
646
647	/* Enable the MIF and error interrupts. */
648	if (sc->sc_port == 0) {
649		nep_write(sc, LD_IM0(LDN_MIF), 0);
650		nep_write(sc, LD_IM1(LDN_SYSERR), 0);
651	}
652}
653
654#define PROMHDR_PTR_DATA	0x18
655#define PROMDATA_PTR_VPD	0x08
656#define PROMDATA_LEN		0x10
657#define PROMDATA_TYPE		0x14
658
659static const uint8_t nep_promhdr[] = { 0x55, 0xaa };
660static const uint8_t nep_promdat[] = {
661	'P', 'C', 'I', 'R',
662	PCI_VENDOR_SUN & 0xff, PCI_VENDOR_SUN >> 8,
663	PCI_PRODUCT_SUN_NEPTUNE & 0xff, PCI_PRODUCT_SUN_NEPTUNE >> 8
664};
665
666int
667nep_pci_enaddr(struct nep_softc *sc, struct pci_attach_args *pa)
668{
669	struct pci_vpd_largeres *res;
670	struct pci_vpd *vpd;
671	bus_space_handle_t romh;
672	bus_space_tag_t romt;
673	bus_size_t romsize = 0;
674	u_int8_t buf[32], *desc;
675	pcireg_t address;
676	int dataoff, vpdoff, len;
677	int off = 0;
678	int rv = -1;
679
680	if (pci_mapreg_map(pa, PCI_ROM_REG, PCI_MAPREG_TYPE_MEM, 0,
681	    &romt, &romh, 0, &romsize, 0))
682		return (-1);
683
684	address = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ROM_REG);
685	address |= PCI_ROM_ENABLE;
686	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_ROM_REG, address);
687
688	while (off < romsize) {
689		bus_space_read_region_1(romt, romh, off, buf, sizeof(buf));
690		if (memcmp(buf, nep_promhdr, sizeof(nep_promhdr)))
691			goto fail;
692
693		dataoff =
694		    buf[PROMHDR_PTR_DATA] | (buf[PROMHDR_PTR_DATA + 1] << 8);
695		if (dataoff < 0x1c)
696			goto fail;
697		dataoff += off;
698
699		bus_space_read_region_1(romt, romh, dataoff, buf, sizeof(buf));
700		if (memcmp(buf, nep_promdat, sizeof(nep_promdat)))
701			goto fail;
702
703		if (buf[PROMDATA_TYPE] == 1)
704		    break;
705
706		len = buf[PROMDATA_LEN] | (buf[PROMDATA_LEN + 1] << 8);
707		off += len * 512;
708	}
709
710	vpdoff = buf[PROMDATA_PTR_VPD] | (buf[PROMDATA_PTR_VPD + 1] << 8);
711	if (vpdoff < 0x1c)
712		goto fail;
713	vpdoff += off;
714
715next:
716	bus_space_read_region_1(romt, romh, vpdoff, buf, sizeof(buf));
717	if (!PCI_VPDRES_ISLARGE(buf[0]))
718		goto fail;
719
720	res = (struct pci_vpd_largeres *)buf;
721	vpdoff += sizeof(*res);
722
723	len = ((res->vpdres_len_msb << 8) + res->vpdres_len_lsb);
724	switch(PCI_VPDRES_LARGE_NAME(res->vpdres_byte0)) {
725	case PCI_VPDRES_TYPE_IDENTIFIER_STRING:
726		/* Skip identifier string. */
727		vpdoff += len;
728		goto next;
729
730	case PCI_VPDRES_TYPE_VPD:
731		while (len > 0) {
732			bus_space_read_region_1(romt, romh, vpdoff,
733			     buf, sizeof(buf));
734
735			vpd = (struct pci_vpd *)buf;
736			vpdoff += sizeof(*vpd) + vpd->vpd_len;
737			len -= sizeof(*vpd) + vpd->vpd_len;
738
739			/*
740			 * We're looking for an "Enhanced" VPD...
741			 */
742			if (vpd->vpd_key0 != 'Z')
743				continue;
744
745			desc = buf + sizeof(*vpd);
746
747			/*
748			 * ...which is an instance property...
749			 */
750			if (desc[0] != 'I')
751				continue;
752			desc += 3;
753
754			/*
755			 * ...that's a byte array with the proper
756			 * length for a MAC address...
757			 */
758			if (desc[0] != 'B' || desc[1] != ETHER_ADDR_LEN)
759				continue;
760			desc += 2;
761
762			/*
763			 * ...named "local-mac-address".
764			 */
765			if (strcmp(desc, "local-mac-address") != 0)
766				continue;
767			desc += strlen("local-mac-address") + 1;
768
769			memcpy(sc->sc_ac.ac_enaddr, desc, ETHER_ADDR_LEN);
770			sc->sc_ac.ac_enaddr[5] += pa->pa_function;
771			rv = 0;
772		}
773		break;
774
775	default:
776		goto fail;
777	}
778
779 fail:
780	if (romsize != 0)
781		bus_space_unmap(romt, romh, romsize);
782
783	address = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ROM_REG);
784	address &= ~PCI_ROM_ENABLE;
785	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_ROM_REG, address);
786
787	return (rv);
788}
789
790uint64_t
791nep_read(struct nep_softc *sc, uint32_t reg)
792{
793	return (bus_space_read_8(sc->sc_memt, sc->sc_memh, reg));
794}
795
796void
797nep_write(struct nep_softc *sc, uint32_t reg, uint64_t value)
798{
799	bus_space_write_8(sc->sc_memt, sc->sc_memh, reg, value);
800}
801
802int
803nep_mii_readreg(struct device *self, int phy, int reg)
804{
805	struct nep_softc *sc = (struct nep_softc *)self;
806	uint64_t frame;
807	int n;
808
809	frame = MIF_FRAME_READ;
810	frame |= (reg << MIF_FRAME_REG_SHIFT) | (phy << MIF_FRAME_PHY_SHIFT);
811	nep_write(sc, MIF_FRAME_OUTPUT, frame);
812	for (n = 0; n < 1000; n++) {
813		delay(10);
814		frame = nep_read(sc, MIF_FRAME_OUTPUT);
815		if (frame & MIF_FRAME_TA0)
816			return (frame & MIF_FRAME_DATA);
817	}
818
819	printf("%s: %s timeout\n", sc->sc_dev.dv_xname, __func__);
820	return (0);
821}
822
823void
824nep_mii_writereg(struct device *self, int phy, int reg, int val)
825{
826	struct nep_softc *sc = (struct nep_softc *)self;
827	uint64_t frame;
828	int n;
829
830	frame = MIF_FRAME_WRITE;
831	frame |= (reg << MIF_FRAME_REG_SHIFT) | (phy << MIF_FRAME_PHY_SHIFT);
832	frame |= (val & MIF_FRAME_DATA);
833	nep_write(sc, MIF_FRAME_OUTPUT, frame);
834	for (n = 0; n < 1000; n++) {
835		delay(10);
836		frame = nep_read(sc, MIF_FRAME_OUTPUT);
837		if (frame & MIF_FRAME_TA0)
838			return;
839	}
840
841	printf("%s: %s timeout\n", sc->sc_dev.dv_xname, __func__);
842	return;
843}
844
845void
846nep_mii_statchg(struct device *dev)
847{
848	struct nep_softc *sc = (struct nep_softc *)dev;
849
850	if (sc->sc_port < 2)
851		nep_xmac_mii_statchg(sc);
852	else
853		nep_bmac_mii_statchg(sc);
854}
855
856void
857nep_xmac_mii_statchg(struct nep_softc *sc)
858{
859	struct mii_data *mii = &sc->sc_mii;
860	uint64_t val;
861
862	val = nep_read(sc, XMAC_CONFIG(sc->sc_port));
863
864	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX)
865		val |= XMAC_CONFIG_SEL_CLK_25MHZ;
866	else
867		val &= ~XMAC_CONFIG_SEL_CLK_25MHZ;
868
869	val |= XMAC_CONFIG_1G_PCS_BYPASS;
870
871	val &= ~XMAC_CONFIG_MODE_MASK;
872	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
873	    IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
874		val |= XMAC_CONFIG_MODE_GMII;
875	else
876		val |= XMAC_CONFIG_MODE_MII;
877
878	val |= XMAC_CONFIG_LFS_DISABLE;
879
880	if (mii->mii_media_active & IFM_LOOP)
881		val |= XMAC_CONFIG_LOOPBACK;
882	else
883		val &= ~XMAC_CONFIG_LOOPBACK;
884
885	val |= XMAC_CONFIG_TX_OUTPUT_EN;
886
887	nep_write(sc, XMAC_CONFIG(sc->sc_port), val);
888}
889
890void
891nep_bmac_mii_statchg(struct nep_softc *sc)
892{
893	struct mii_data *mii = &sc->sc_mii;
894	uint64_t val;
895
896	val = nep_read(sc, MAC_XIF_CONFIG(sc->sc_port));
897
898	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX)
899		val |= MAC_XIF_CONFIG_SEL_CLK_25MHZ;
900	else
901		val &= MAC_XIF_CONFIG_SEL_CLK_25MHZ;
902
903	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
904	    IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
905		val |= MAC_XIF_CONFIG_GMII_MODE;
906	else
907		val &= ~MAC_XIF_CONFIG_GMII_MODE;
908
909	if (mii->mii_media_active & IFM_LOOP)
910		val |= MAC_XIF_CONFIG_LOOPBACK;
911	else
912		val &= ~MAC_XIF_CONFIG_LOOPBACK;
913
914	val |= MAC_XIF_CONFIG_TX_OUTPUT_EN;
915
916	nep_write(sc, MAC_XIF_CONFIG(sc->sc_port), val);
917}
918
919int
920nep_media_change(struct ifnet *ifp)
921{
922	struct nep_softc *sc = ifp->if_softc;
923
924	if (LIST_FIRST(&sc->sc_mii.mii_phys))
925		mii_mediachg(&sc->sc_mii);
926
927	return (0);
928}
929
930void
931nep_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
932{
933	struct nep_softc *sc = ifp->if_softc;
934
935	if (LIST_FIRST(&sc->sc_mii.mii_phys)) {
936		mii_pollstat(&sc->sc_mii);
937		ifmr->ifm_active = sc->sc_mii.mii_media_active;
938		ifmr->ifm_status = sc->sc_mii.mii_media_status;
939	}
940}
941
942int
943nep_intr(void *arg)
944{
945	struct nep_softc *sc = arg;
946	uint64_t sv0, sv1, sv2;
947	int rearm = 0;
948
949	sv0 = nep_read(sc, LDSV0(sc->sc_port));
950	sv1 = nep_read(sc, LDSV1(sc->sc_port));
951	sv2 = nep_read(sc, LDSV2(sc->sc_port));
952
953	if ((sv0 | sv1 | sv2) == 0)
954		return (0);
955
956	if (sv0 & (1ULL << LDN_TXDMA(sc->sc_port))) {
957		nep_tx_proc(sc);
958		rearm = 1;
959	}
960
961	if (sv0 & (1ULL << LDN_RXDMA(sc->sc_port))) {
962		nep_rx_proc(sc);
963		rearm = 1;
964	}
965
966	if (rearm)
967		nep_write(sc, LDGIMGN(sc->sc_port), LDGIMGN_ARM | 2);
968	else
969		printf("%s: %s %llx %llx %llx\n", sc->sc_dev.dv_xname,
970		    __func__, sv0, sv1, sv2);
971
972	return (1);
973}
974
975void
976nep_rx_proc(struct nep_softc *sc)
977{
978	struct ifnet *ifp = &sc->sc_ac.ac_if;
979	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
980	uint64_t val;
981	uint16_t count;
982	uint16_t pktread = 0, ptrread = 0;
983	uint64_t rxd;
984	uint64_t addr;
985	bus_addr_t page;
986	bus_size_t off;
987	char *block;
988	struct mbuf *m;
989	int idx, len, i;
990
991	val = nep_read(sc, RX_DMA_CTL_STAT(sc->sc_port));
992	nep_write(sc, RX_DMA_CTL_STAT(sc->sc_port),
993	    RX_DMA_CTL_STAT_RCRTHRES | RX_DMA_CTL_STAT_RCRTO);
994
995	bus_dmamap_sync(sc->sc_dmat, NEP_DMA_MAP(sc->sc_rcring), 0,
996	    NEP_DMA_LEN(sc->sc_rcring), BUS_DMASYNC_POSTREAD);
997
998	count = nep_read(sc, RCRSTAT_A(sc->sc_port));
999	while (count > 0) {
1000		idx = sc->sc_rx_cons;
1001		KASSERT(idx < NEP_NRCDESC);
1002
1003		rxd = letoh64(sc->sc_rcdesc[idx]);
1004
1005		addr = (rxd & RXD_PKT_BUF_ADDR_MASK) << RXD_PKT_BUF_ADDR_SHIFT;
1006		len = (rxd & RXD_L2_LEN_MASK) >> RXD_L2_LEN_SHIFT;
1007		page = addr & ~PAGE_MASK;
1008		off = addr & PAGE_MASK;
1009		block = NULL;
1010		for (i = 0; i < NEP_NRBDESC; i++) {
1011			if (sc->sc_rb[i].nb_block &&
1012			    sc->sc_rb[i].nb_map->dm_segs[0].ds_addr == page) {
1013				block = sc->sc_rb[i].nb_block;
1014				break;
1015			}
1016		}
1017		if (block == NULL) {
1018			m = NULL;
1019		} else {
1020			bus_dmamap_unload(sc->sc_dmat, sc->sc_rb[i].nb_map);
1021			sc->sc_rb[i].nb_block = NULL;
1022
1023			MGETHDR(m, M_DONTWAIT, MT_DATA);
1024		}
1025
1026		if (m == NULL) {
1027			ifp->if_ierrors++;
1028		} else {
1029			MEXTADD(m, block + off, PAGE_SIZE, M_EXTWR,
1030			    nep_mextfree_idx, block);
1031			m->m_pkthdr.len = m->m_len = len;
1032			m->m_data += ETHER_ALIGN;
1033
1034			ml_enqueue(&ml, m);
1035		}
1036
1037		if_rxr_put(&sc->sc_rx_ring, 1);
1038		if ((rxd & RXD_MULTI) == 0) {
1039			count--;
1040			pktread++;
1041		}
1042		ptrread++;
1043		sc->sc_rx_cons++;
1044		if (sc->sc_rx_cons >= NEP_NRCDESC)
1045			sc->sc_rx_cons = 0;
1046	}
1047
1048	bus_dmamap_sync(sc->sc_dmat, NEP_DMA_MAP(sc->sc_rcring), 0,
1049	    NEP_DMA_LEN(sc->sc_rcring), BUS_DMASYNC_PREREAD);
1050
1051	if (ifiq_input(&ifp->if_rcv, &ml))
1052		if_rxr_livelocked(&sc->sc_rx_ring);
1053
1054	nep_fill_rx_ring(sc);
1055
1056	val = pktread | (ptrread << RX_DMA_CTL_STAT_PTRREAD_SHIFT);
1057	val |= RX_DMA_CTL_STAT_MEX;
1058	nep_write(sc, RX_DMA_CTL_STAT(sc->sc_port), val);
1059}
1060
1061void
1062nep_extfree(caddr_t buf, u_int size, void *arg)
1063{
1064	pool_put(nep_block_pool, arg);
1065}
1066
1067void
1068nep_tx_proc(struct nep_softc *sc)
1069{
1070	struct ifnet *ifp = &sc->sc_ac.ac_if;
1071	struct nep_buf *txb;
1072	uint64_t val;
1073	uint16_t pkt_cnt, count;
1074	int idx;
1075
1076	val = nep_read(sc, TX_CS(sc->sc_port));
1077	pkt_cnt = (val & TX_CS_PKT_CNT_MASK) >> TX_CS_PKT_CNT_SHIFT;
1078	count = (pkt_cnt - sc->sc_pkt_cnt);
1079	count &= (TX_CS_PKT_CNT_MASK >> TX_CS_PKT_CNT_SHIFT);
1080	sc->sc_pkt_cnt = pkt_cnt;
1081
1082	while (count > 0) {
1083		idx = sc->sc_tx_cons;
1084		KASSERT(idx < NEP_NTXDESC);
1085
1086		txb = &sc->sc_txbuf[idx];
1087		if (txb->nb_m) {
1088			bus_dmamap_sync(sc->sc_dmat, txb->nb_map, 0,
1089			    txb->nb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1090			bus_dmamap_unload(sc->sc_dmat, txb->nb_map);
1091
1092			m_freem(txb->nb_m);
1093			txb->nb_m = NULL;
1094			count--;
1095		}
1096
1097		ifq_clr_oactive(&ifp->if_snd);
1098
1099		sc->sc_tx_cnt--;
1100		sc->sc_tx_cons++;
1101		if (sc->sc_tx_cons >= NEP_NTXDESC)
1102			sc->sc_tx_cons = 0;
1103	}
1104
1105	if (sc->sc_tx_cnt == 0)
1106		ifp->if_timer = 0;
1107}
1108
1109void
1110nep_init_ipp(struct nep_softc *sc)
1111{
1112	uint64_t val;
1113	int num_entries;
1114	int n, i;
1115
1116	if (sc->sc_port < 2)
1117		num_entries = IPP_P0_P1_DFIFO_ENTRIES;
1118	else
1119		num_entries = IPP_P2_P3_DFIFO_ENTRIES;
1120
1121	for (i = 0; i < num_entries; i++)
1122		nep_ipp_clear_dfifo(sc, i);
1123
1124	(void)nep_read(sc, IPP_INT_STAT(sc->sc_port));
1125	(void)nep_read(sc, IPP_INT_STAT(sc->sc_port));
1126
1127	val = nep_read(sc, IPP_CFIG(sc->sc_port));
1128	val |= IPP_CFIG_SOFT_RST;
1129	nep_write(sc, IPP_CFIG(sc->sc_port), val);
1130	n = 1000;
1131	while (--n) {
1132		val = nep_read(sc, IPP_CFIG(sc->sc_port));
1133		if ((val & IPP_CFIG_SOFT_RST) == 0)
1134			break;
1135	}
1136	if (n == 0)
1137		printf("timeout resetting IPP\n");
1138
1139	val = nep_read(sc, IPP_CFIG(sc->sc_port));
1140	val |= IPP_CFIG_IPP_ENABLE;
1141	nep_write(sc, IPP_CFIG(sc->sc_port), val);
1142
1143	nep_write(sc, IPP_MSK(sc->sc_port), 0);
1144}
1145
1146void
1147nep_ipp_clear_dfifo(struct nep_softc *sc, uint64_t addr)
1148{
1149	uint64_t val;
1150
1151	val = nep_read(sc, IPP_CFIG(sc->sc_port));
1152	val |= IPP_CFIG_DFIFO_PIO_W;
1153	nep_write(sc, IPP_CFIG(sc->sc_port), val);
1154
1155	nep_write(sc, IPP_DFIFO_WR_PTR(sc->sc_port), addr);
1156	nep_write(sc, IPP_DFIFO_WR1(sc->sc_port), 0);
1157	nep_write(sc, IPP_DFIFO_WR2(sc->sc_port), 0);
1158	nep_write(sc, IPP_DFIFO_WR3(sc->sc_port), 0);
1159	nep_write(sc, IPP_DFIFO_WR4(sc->sc_port), 0);
1160	nep_write(sc, IPP_DFIFO_WR5(sc->sc_port), 0);
1161
1162	val &= ~IPP_CFIG_DFIFO_PIO_W;
1163	nep_write(sc, IPP_CFIG(sc->sc_port), val);
1164
1165	nep_write(sc, IPP_DFIFO_RD_PTR(sc->sc_port), addr);
1166	(void)nep_read(sc, IPP_DFIFO_RD1(sc->sc_port));
1167	(void)nep_read(sc, IPP_DFIFO_RD2(sc->sc_port));
1168	(void)nep_read(sc, IPP_DFIFO_RD3(sc->sc_port));
1169	(void)nep_read(sc, IPP_DFIFO_RD4(sc->sc_port));
1170	(void)nep_read(sc, IPP_DFIFO_RD5(sc->sc_port));
1171}
1172
1173void
1174nep_init_rx_mac(struct nep_softc *sc)
1175{
1176	if (sc->sc_port < 2)
1177		nep_init_rx_xmac(sc);
1178	else
1179		nep_init_rx_bmac(sc);
1180}
1181
1182void
1183nep_init_rx_xmac(struct nep_softc *sc)
1184{
1185	uint64_t addr0, addr1, addr2;
1186	uint64_t val;
1187	int n, i;
1188
1189	nep_write(sc, XRXMAC_SW_RST(sc->sc_port),
1190	    XRXMAC_SW_RST_REG_RST | XRXMAC_SW_RST_SOFT_RST);
1191	n = 1000;
1192	while (--n) {
1193		val = nep_read(sc, XRXMAC_SW_RST(sc->sc_port));
1194		if ((val & (XRXMAC_SW_RST_REG_RST |
1195		    XRXMAC_SW_RST_SOFT_RST)) == 0)
1196			break;
1197	}
1198	if (n == 0)
1199		printf("timeout resetting Rx MAC\n");
1200
1201	addr0 = (sc->sc_lladdr[4] << 8) | sc->sc_lladdr[5];
1202	addr1 = (sc->sc_lladdr[2] << 8) | sc->sc_lladdr[3];
1203	addr2 = (sc->sc_lladdr[0] << 8) | sc->sc_lladdr[1];
1204	nep_write(sc, XMAC_ADDR0(sc->sc_port), addr0);
1205	nep_write(sc, XMAC_ADDR1(sc->sc_port), addr1);
1206	nep_write(sc, XMAC_ADDR2(sc->sc_port), addr2);
1207
1208	nep_write(sc, XMAC_ADDR_CMPEN(sc->sc_port), 0);
1209
1210	nep_write(sc, XMAC_ADD_FILT0(sc->sc_port), 0);
1211	nep_write(sc, XMAC_ADD_FILT1(sc->sc_port), 0);
1212	nep_write(sc, XMAC_ADD_FILT2(sc->sc_port), 0);
1213	nep_write(sc, XMAC_ADD_FILT12_MASK(sc->sc_port), 0);
1214	nep_write(sc, XMAC_ADD_FILT00_MASK(sc->sc_port), 0);
1215
1216	for (i = 0; i < 16; i++)
1217		nep_write(sc, XMAC_HASH_TBL(sc->sc_port, i), 0);
1218
1219	for (i = 0; i < 20; i++)
1220		nep_write(sc, XMAC_HOST_INFO(sc->sc_port, i), sc->sc_port);
1221}
1222
1223void
1224nep_init_rx_bmac(struct nep_softc *sc)
1225{
1226	uint64_t addr0, addr1, addr2;
1227	uint64_t val;
1228	int n, i;
1229
1230	nep_write(sc, RXMAC_SW_RST(sc->sc_port), RXMAC_SW_RST_SW_RST);
1231	n = 1000;
1232	while (--n) {
1233		val = nep_read(sc, RXMAC_SW_RST(sc->sc_port));
1234		if ((val & RXMAC_SW_RST_SW_RST) == 0)
1235			break;
1236	}
1237	if (n == 0)
1238		printf("timeout resetting Rx MAC\n");
1239
1240	val = nep_read(sc, RXMAC_CONFIG(sc->sc_port));
1241	val &= ~RXMAC_CONFIG_ERROR_CHK_DIS;
1242	val &= ~RXMAC_CONFIG_PROMISCUOUS;
1243	val &= ~RXMAC_CONFIG_PROMISCUOUS_GROUP;
1244	val &= ~RXMAC_CONFIG_ADDR_FILTER_EN;
1245	val &= ~RXMAC_CONFIG_HASH_FILTER_EN;
1246	val &= ~RXMAC_CONFIG_STRIP_FCS;
1247	val &= ~RXMAC_CONFIG_STRIP_PAD;
1248	val &= ~RXMAC_CONFIG_RX_ENABLE;
1249	nep_write(sc, RXMAC_CONFIG(sc->sc_port), val);
1250
1251	addr0 = (sc->sc_lladdr[4] << 8) | sc->sc_lladdr[5];
1252	addr1 = (sc->sc_lladdr[2] << 8) | sc->sc_lladdr[3];
1253	addr2 = (sc->sc_lladdr[0] << 8) | sc->sc_lladdr[1];
1254	nep_write(sc, BMAC_ADDR0(sc->sc_port), addr0);
1255	nep_write(sc, BMAC_ADDR1(sc->sc_port), addr1);
1256	nep_write(sc, BMAC_ADDR2(sc->sc_port), addr2);
1257
1258	nep_write(sc, BMAC_ALTAD_CMPEN(sc->sc_port), 1);
1259
1260	nep_write(sc, MAC_ADDR_FILT0(sc->sc_port), 0);
1261	nep_write(sc, MAC_ADDR_FILT1(sc->sc_port), 0);
1262	nep_write(sc, MAC_ADDR_FILT2(sc->sc_port), 0);
1263	nep_write(sc, MAC_ADDR_FILT12_MASK(sc->sc_port), 0);
1264	nep_write(sc, MAC_ADDR_FILT00_MASK(sc->sc_port), 0);
1265
1266	for (i = 0; i < 16; i++)
1267		nep_write(sc, MAC_HASH_TBL(sc->sc_port, i), 0);
1268
1269	for (i = 0; i < 9; i++)
1270		nep_write(sc, BMAC_HOST_INFO(sc->sc_port, i), sc->sc_port);
1271}
1272
1273void
1274nep_init_rx_channel(struct nep_softc *sc, int chan)
1275{
1276	uint64_t val;
1277	int i, n;
1278
1279	val = nep_read(sc, RXDMA_CFIG1(chan));
1280	val &= ~RXDMA_CFIG1_EN;
1281	val |= RXDMA_CFIG1_RST;
1282	nep_write(sc, RXDMA_CFIG1(chan), RXDMA_CFIG1_RST);
1283
1284	n = 1000;
1285	while (--n) {
1286		val = nep_read(sc, RXDMA_CFIG1(chan));
1287		if ((val & RXDMA_CFIG1_RST) == 0)
1288			break;
1289	}
1290	if (n == 0)
1291		printf("timeout resetting Rx DMA\n");
1292
1293	nep_write(sc, RX_LOG_MASK1(chan), 0);
1294	nep_write(sc, RX_LOG_VALUE1(chan), 0);
1295	nep_write(sc, RX_LOG_MASK2(chan), 0);
1296	nep_write(sc, RX_LOG_VALUE2(chan), 0);
1297	nep_write(sc, RX_LOG_PAGE_RELO1(chan), 0);
1298	nep_write(sc, RX_LOG_PAGE_RELO2(chan), 0);
1299	nep_write(sc, RX_LOG_PAGE_HDL(chan), 0);
1300	nep_write(sc, RX_LOG_PAGE_VLD(chan),
1301	    (sc->sc_port << RX_LOG_PAGE_VLD_FUNC_SHIFT) |
1302	    RX_LOG_PAGE_VLD_PAGE0 | RX_LOG_PAGE_VLD_PAGE1);
1303
1304	nep_write(sc, RX_DMA_ENT_MSK(chan), RX_DMA_ENT_MSK_RBR_EMPTY);
1305	nep_write(sc, RX_DMA_CTL_STAT(chan), RX_DMA_CTL_STAT_MEX);
1306
1307	val = NEP_DMA_DVA(sc->sc_rxmbox) >> 32;
1308	nep_write(sc, RXDMA_CFIG1(chan), val);
1309
1310	val = NEP_DMA_DVA(sc->sc_rxmbox) & 0xffffffc0;
1311	nep_write(sc, RXDMA_CFIG2(chan), val);
1312
1313	val = NEP_DMA_DVA(sc->sc_rbring);
1314	val |= (uint64_t)NEP_NRBDESC << RBR_CFIG_A_LEN_SHIFT;
1315	nep_write(sc, RBR_CFIG_A(chan), val);
1316
1317	val = RBR_CFIG_B_BLKSIZE_8K;
1318	val |= RBR_CFIG_B_BUFSZ1_8K | RBR_CFIG_B_VLD1;
1319	nep_write(sc, RBR_CFIG_B(chan), val);
1320
1321	nep_write(sc, RBR_KICK(chan), 0);
1322
1323	val = NEP_DMA_DVA(sc->sc_rcring);
1324	val |= (uint64_t)NEP_NRCDESC << RCRCFIG_A_LEN_SHIFT;
1325	nep_write(sc, RCRCFIG_A(chan), val);
1326
1327	val = 8 | RCRCFIG_B_ENTOUT;
1328	val |= (16 << RCRCFIG_B_PTHRES_SHIFT);
1329	nep_write(sc, RCRCFIG_B(chan), val);
1330
1331	nep_write(sc, DEF_PT_RDC(sc->sc_port), chan);
1332	for (i = 0; i < 16; i++)
1333		nep_write(sc, RDC_TBL(sc->sc_port, i), chan);
1334}
1335
1336void
1337nep_init_tx_mac(struct nep_softc *sc)
1338{
1339	if (sc->sc_port < 2)
1340		nep_init_tx_xmac(sc);
1341	else
1342		nep_init_tx_bmac(sc);
1343}
1344
1345void
1346nep_init_tx_xmac(struct nep_softc *sc)
1347{
1348	uint64_t val;
1349	int n;
1350
1351	nep_write(sc, XTXMAC_SW_RST(sc->sc_port),
1352	    XTXMAC_SW_RST_REG_RST | XTXMAC_SW_RST_SOFT_RST);
1353	n = 1000;
1354	while (--n) {
1355		val = nep_read(sc, XTXMAC_SW_RST(sc->sc_port));
1356		if ((val & (XTXMAC_SW_RST_REG_RST |
1357		    XTXMAC_SW_RST_SOFT_RST)) == 0)
1358			break;
1359	}
1360	if (n == 0)
1361		printf("timeout resetting Tx MAC\n");
1362
1363	val = nep_read(sc, XMAC_CONFIG(sc->sc_port));
1364	val &= ~XMAC_CONFIG_ALWAYS_NO_CRC;
1365	val &= ~XMAC_CONFIG_VAR_MIN_IPG_EN;
1366	val &= ~XMAC_CONFIG_STRETCH_MODE;
1367	val &= ~XMAC_CONFIG_TX_ENABLE;
1368	nep_write(sc, XMAC_CONFIG(sc->sc_port), val);
1369
1370	val = nep_read(sc, XMAC_IPG(sc->sc_port));
1371	val &= ~XMAC_IPG_IPG_VALUE1_MASK;	/* MII/GMII mode */
1372	val |= XMAC_IPG_IPG_VALUE1_12;
1373	val &= ~XMAC_IPG_IPG_VALUE_MASK;	/* XGMII mode */
1374	val |= XMAC_IPG_IPG_VALUE_12_15;
1375	nep_write(sc, XMAC_IPG(sc->sc_port), val);
1376
1377	val = nep_read(sc, XMAC_MIN(sc->sc_port));
1378	val &= ~XMAC_MIN_RX_MIN_PKT_SIZE_MASK;
1379	val &= ~XMAC_MIN_TX_MIN_PKT_SIZE_MASK;
1380	val |= (64 << XMAC_MIN_RX_MIN_PKT_SIZE_SHIFT);
1381	val |= (64 << XMAC_MIN_TX_MIN_PKT_SIZE_SHIFT);
1382	nep_write(sc, XMAC_MIN(sc->sc_port), val);
1383	nep_write(sc, XMAC_MAX(sc->sc_port), ETHER_MAX_LEN);
1384
1385	nep_write(sc, TXMAC_FRM_CNT(sc->sc_port), 0);
1386	nep_write(sc, TXMAC_BYTE_CNT(sc->sc_port), 0);
1387}
1388
1389void
1390nep_init_tx_bmac(struct nep_softc *sc)
1391{
1392	uint64_t val;
1393	int n;
1394
1395	nep_write(sc, TXMAC_SW_RST(sc->sc_port), TXMAC_SW_RST_SW_RST);
1396	n = 1000;
1397	while (--n) {
1398		val = nep_read(sc, TXMAC_SW_RST(sc->sc_port));
1399		if ((val & TXMAC_SW_RST_SW_RST) == 0)
1400			break;
1401	}
1402	if (n == 0)
1403		printf("timeout resetting Tx MAC\n");
1404
1405	nep_write(sc, BMAC_MIN(sc->sc_port), 0x40);
1406	nep_write(sc, BMAC_MAX(sc->sc_port), ETHER_MAX_LEN |
1407	    (ETHER_MAX_LEN << BMAC_MAX_BURST_SHIFT));
1408	nep_write(sc, MAC_CTRL_TYPE(sc->sc_port), 0x8808);
1409	nep_write(sc, MAC_PA_SIZE(sc->sc_port), 0x7);
1410}
1411
1412void
1413nep_init_tx_channel(struct nep_softc *sc, int chan)
1414{
1415	uint64_t val;
1416	int n;
1417
1418	val = nep_read(sc, TXC_CONTROL);
1419	val |= TXC_CONTROL_TXC_ENABLED;
1420	val |= (1ULL << sc->sc_port);
1421	nep_write(sc, TXC_CONTROL, val);
1422
1423	nep_write(sc, TXC_PORT_DMA(sc->sc_port), 1ULL << chan);
1424
1425	val = nep_read(sc, TXC_INT_MASK);
1426	val &= ~TXC_INT_MASK_PORT_INT_MASK(sc->sc_port);
1427	nep_write(sc, TXC_INT_MASK, val);
1428
1429	val = nep_read(sc, TX_CS(chan));
1430	val |= TX_CS_RST;
1431	nep_write(sc, TX_CS(chan), val);
1432
1433	n = 1000;
1434	while (--n) {
1435		val = nep_read(sc, TX_CS(chan));
1436		if ((val & TX_CS_RST) == 0)
1437			break;
1438	}
1439	if (n == 0)
1440		printf("timeout resetting Tx DMA\n");
1441
1442	nep_write(sc, TX_LOG_MASK1(chan), 0);
1443	nep_write(sc, TX_LOG_VALUE1(chan), 0);
1444	nep_write(sc, TX_LOG_MASK2(chan), 0);
1445	nep_write(sc, TX_LOG_VALUE2(chan), 0);
1446	nep_write(sc, TX_LOG_PAGE_RELO1(chan), 0);
1447	nep_write(sc, TX_LOG_PAGE_RELO2(chan), 0);
1448	nep_write(sc, TX_LOG_PAGE_HDL(chan), 0);
1449	nep_write(sc, TX_LOG_PAGE_VLD(chan),
1450	    (sc->sc_port << TX_LOG_PAGE_VLD_FUNC_SHIFT) |
1451	    TX_LOG_PAGE_VLD_PAGE0 | TX_LOG_PAGE_VLD_PAGE1);
1452
1453	nep_write(sc, TX_RING_KICK(chan), 0);
1454
1455	nep_write(sc, TXC_DMA_MAX(chan), ETHER_MAX_LEN + 64);
1456	nep_write(sc, TX_ENT_MSK(chan), 0);
1457
1458	val = NEP_DMA_DVA(sc->sc_txring);
1459	val |= (NEP_DMA_LEN(sc->sc_txring) / 64) << TX_RNG_CFIG_LEN_SHIFT;
1460	nep_write(sc, TX_RNG_CFIG(chan), val);
1461
1462	nep_write(sc, TX_CS(chan), 0);
1463}
1464
1465void
1466nep_enable_rx_mac(struct nep_softc *sc)
1467{
1468	struct ifnet *ifp = &sc->sc_ac.ac_if;
1469	uint64_t val;
1470
1471	if (sc->sc_port < 2) {
1472		val = nep_read(sc, XMAC_CONFIG(sc->sc_port));
1473		val &= ~XMAC_CONFIG_PROMISCUOUS;
1474		val &= ~XMAC_CONFIG_PROMISCUOUS_GROUP;
1475		val &= ~XMAC_CONFIG_HASH_FILTER_EN;
1476		if (ifp->if_flags & IFF_PROMISC)
1477			val |= XMAC_CONFIG_PROMISCUOUS;
1478		if (ifp->if_flags & IFF_ALLMULTI)
1479			val |= XMAC_CONFIG_PROMISCUOUS_GROUP;
1480		else
1481			val |= XMAC_CONFIG_HASH_FILTER_EN;
1482		val |= XMAC_CONFIG_RX_MAC_ENABLE;
1483		nep_write(sc, XMAC_CONFIG(sc->sc_port), val);
1484	} else {
1485		val = nep_read(sc, RXMAC_CONFIG(sc->sc_port));
1486		val &= ~RXMAC_CONFIG_PROMISCUOUS;
1487		val &= ~RXMAC_CONFIG_PROMISCUOUS_GROUP;
1488		val &= ~RXMAC_CONFIG_HASH_FILTER_EN;
1489		if (ifp->if_flags & IFF_PROMISC)
1490			val |= RXMAC_CONFIG_PROMISCUOUS;
1491		if (ifp->if_flags & IFF_ALLMULTI)
1492			val |= RXMAC_CONFIG_PROMISCUOUS_GROUP;
1493		else
1494			val |= RXMAC_CONFIG_HASH_FILTER_EN;
1495		val |= RXMAC_CONFIG_RX_ENABLE;
1496		nep_write(sc, RXMAC_CONFIG(sc->sc_port), val);
1497	}
1498}
1499
1500void
1501nep_disable_rx_mac(struct nep_softc *sc)
1502{
1503	uint64_t val;
1504
1505	if (sc->sc_port < 2) {
1506		val = nep_read(sc, XMAC_CONFIG(sc->sc_port));
1507		val &= ~XMAC_CONFIG_RX_MAC_ENABLE;
1508		nep_write(sc, XMAC_CONFIG(sc->sc_port), val);
1509	} else {
1510		val = nep_read(sc, RXMAC_CONFIG(sc->sc_port));
1511		val &= ~RXMAC_CONFIG_RX_ENABLE;
1512		nep_write(sc, RXMAC_CONFIG(sc->sc_port), val);
1513	}
1514}
1515
1516void
1517nep_stop_dma(struct nep_softc *sc)
1518{
1519	uint64_t val;
1520	int n;
1521
1522	val = nep_read(sc, TX_CS(sc->sc_port));
1523	val |= TX_CS_STOP_N_GO;
1524	nep_write(sc, TX_CS(sc->sc_port), val);
1525
1526	n = 1000;
1527	while (--n) {
1528		val = nep_read(sc, TX_CS(sc->sc_port));
1529		if (val & TX_CS_SNG_STATE)
1530			break;
1531	}
1532	if (n == 0)
1533		printf("timeout stopping Tx DMA\n");
1534
1535	val = nep_read(sc, RXDMA_CFIG1(sc->sc_port));
1536	val &= ~RXDMA_CFIG1_EN;
1537	nep_write(sc, RXDMA_CFIG1(sc->sc_port), val);
1538
1539	n = 1000;
1540	while (--n) {
1541		val = nep_read(sc, RXDMA_CFIG1(sc->sc_port));
1542		if (val & RXDMA_CFIG1_QST)
1543			break;
1544	}
1545	if (n == 0)
1546		printf("timeout stopping Rx DMA\n");
1547}
1548
1549void
1550nep_up(struct nep_softc *sc)
1551{
1552	struct ifnet *ifp = &sc->sc_ac.ac_if;
1553	struct nep_block *rb;
1554	struct nep_buf *txb;
1555	uint64_t val;
1556	int i, n;
1557
1558	/* Allocate Rx block descriptor ring. */
1559	sc->sc_rbring = nep_dmamem_alloc(sc, NEP_NRBDESC * sizeof(uint32_t));
1560	if (sc->sc_rbring == NULL)
1561		return;
1562	sc->sc_rbdesc = NEP_DMA_KVA(sc->sc_rbring);
1563
1564	sc->sc_rb = malloc(sizeof(struct nep_block) * NEP_NRBDESC,
1565	    M_DEVBUF, M_WAITOK);
1566	for (i = 0; i < NEP_NRBDESC; i++) {
1567		rb = &sc->sc_rb[i];
1568		bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
1569		    BUS_DMA_WAITOK, &rb->nb_map);
1570		rb->nb_block = NULL;
1571	}
1572
1573	sc->sc_rx_prod = 0;
1574	if_rxr_init(&sc->sc_rx_ring, 16, NEP_NRBDESC);
1575
1576	/* Allocate Rx completion descriptor ring. */
1577	sc->sc_rcring = nep_dmamem_alloc(sc, NEP_NRCDESC * sizeof(uint64_t));
1578	if (sc->sc_rcring == NULL)
1579		goto free_rbring;
1580	sc->sc_rcdesc = NEP_DMA_KVA(sc->sc_rcring);
1581
1582	sc->sc_rx_cons = 0;
1583
1584	/* Allocate Rx mailbox. */
1585	sc->sc_rxmbox = nep_dmamem_alloc(sc, 64);
1586	if (sc->sc_rxmbox == NULL)
1587		goto free_rcring;
1588
1589	/* Allocate Tx descriptor ring. */
1590	sc->sc_txring = nep_dmamem_alloc(sc, NEP_NTXDESC * sizeof(uint64_t));
1591	if (sc->sc_txring == NULL)
1592		goto free_rxmbox;
1593	sc->sc_txdesc = NEP_DMA_KVA(sc->sc_txring);
1594
1595	sc->sc_txbuf = malloc(sizeof(struct nep_buf) * NEP_NTXDESC,
1596	    M_DEVBUF, M_WAITOK);
1597	for (i = 0; i < NEP_NTXDESC; i++) {
1598		txb = &sc->sc_txbuf[i];
1599		bus_dmamap_create(sc->sc_dmat, MCLBYTES, NEP_NTXSEGS,
1600		    MCLBYTES, 0, BUS_DMA_WAITOK, &txb->nb_map);
1601		txb->nb_m = NULL;
1602	}
1603
1604	sc->sc_tx_prod = sc->sc_tx_cons = 0;
1605	sc->sc_tx_cnt = 0;
1606	sc->sc_wrap = 0;
1607	sc->sc_pkt_cnt = 0;
1608
1609	if (sc->sc_port < 2) {
1610		/* Disable the POR loopback clock source. */
1611		val = nep_read(sc, XMAC_CONFIG(sc->sc_port));
1612		val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC;
1613		nep_write(sc, XMAC_CONFIG(sc->sc_port), val);
1614	}
1615
1616	nep_write(sc, PCS_DPATH_MODE(sc->sc_port), PCS_DPATH_MODE_MII);
1617	val = nep_read(sc, PCS_MII_CTL(sc->sc_port));
1618	val |= PCS_MII_CTL_RESET;
1619	nep_write(sc, PCS_MII_CTL(sc->sc_port), val);
1620	n = 1000;
1621	while (--n) {
1622		val = nep_read(sc, PCS_MII_CTL(sc->sc_port));
1623		if ((val & PCS_MII_CTL_RESET) == 0)
1624			break;
1625	}
1626	if (n == 0)
1627		printf("timeout resetting PCS\n");
1628
1629	nep_init_rx_mac(sc);
1630	nep_init_rx_channel(sc, sc->sc_port);
1631	nep_init_ipp(sc);
1632
1633	nep_init_tx_mac(sc);
1634	nep_init_tx_channel(sc, sc->sc_port);
1635
1636	nep_fill_rx_ring(sc);
1637
1638	nep_enable_rx_mac(sc);
1639	if (sc->sc_port < 2) {
1640		val = nep_read(sc, XMAC_CONFIG(sc->sc_port));
1641		val |= XMAC_CONFIG_TX_ENABLE;
1642		nep_write(sc, XMAC_CONFIG(sc->sc_port), val);
1643	} else {
1644		val = nep_read(sc, TXMAC_CONFIG(sc->sc_port));
1645		val |= TXMAC_CONFIG_TX_ENABLE;
1646		nep_write(sc, TXMAC_CONFIG(sc->sc_port), val);
1647	}
1648
1649	val = nep_read(sc, RXDMA_CFIG1(sc->sc_port));
1650	val |= RXDMA_CFIG1_EN;
1651	nep_write(sc, RXDMA_CFIG1(sc->sc_port), val);
1652
1653	ifp->if_flags |= IFF_RUNNING;
1654	ifq_clr_oactive(&ifp->if_snd);
1655	ifp->if_timer = 0;
1656
1657	/* Enable interrupts. */
1658	nep_write(sc, LD_IM1(LDN_MAC(sc->sc_port)), 0);
1659	nep_write(sc, LD_IM0(LDN_RXDMA(sc->sc_port)), 0);
1660	nep_write(sc, LD_IM0(LDN_TXDMA(sc->sc_port)), 0);
1661	nep_write(sc, LDGIMGN(sc->sc_port), LDGIMGN_ARM | 2);
1662
1663	timeout_add_sec(&sc->sc_tick, 1);
1664
1665	return;
1666
1667free_rxmbox:
1668	nep_dmamem_free(sc, sc->sc_rxmbox);
1669free_rcring:
1670	nep_dmamem_free(sc, sc->sc_rcring);
1671free_rbring:
1672	nep_dmamem_free(sc, sc->sc_rbring);
1673}
1674
1675void
1676nep_down(struct nep_softc *sc)
1677{
1678	struct ifnet *ifp = &sc->sc_ac.ac_if;
1679	struct nep_buf *txb;
1680	struct nep_block *rb;
1681	uint64_t val;
1682	int i;
1683
1684	timeout_del(&sc->sc_tick);
1685
1686	/* Disable interrupts. */
1687	nep_write(sc, LD_IM1(LDN_MAC(sc->sc_port)), 1);
1688	nep_write(sc, LD_IM0(LDN_RXDMA(sc->sc_port)), 1);
1689	nep_write(sc, LD_IM0(LDN_TXDMA(sc->sc_port)), 1);
1690
1691	ifp->if_flags &= ~IFF_RUNNING;
1692	ifq_clr_oactive(&ifp->if_snd);
1693	ifp->if_timer = 0;
1694
1695	nep_disable_rx_mac(sc);
1696
1697	val = nep_read(sc, IPP_CFIG(sc->sc_port));
1698	val &= ~IPP_CFIG_IPP_ENABLE;
1699	nep_write(sc, IPP_CFIG(sc->sc_port), val);
1700
1701	nep_stop_dma(sc);
1702
1703	for (i = 0; i < NEP_NTXDESC; i++) {
1704		txb = &sc->sc_txbuf[i];
1705		if (txb->nb_m) {
1706			bus_dmamap_sync(sc->sc_dmat, txb->nb_map, 0,
1707			    txb->nb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1708			bus_dmamap_unload(sc->sc_dmat, txb->nb_map);
1709			m_freem(txb->nb_m);
1710		}
1711		bus_dmamap_destroy(sc->sc_dmat, txb->nb_map);
1712	}
1713
1714	nep_dmamem_free(sc, sc->sc_txring);
1715	free(sc->sc_txbuf, M_DEVBUF, sizeof(struct nep_buf) * NEP_NTXDESC);
1716
1717	nep_dmamem_free(sc, sc->sc_rxmbox);
1718	nep_dmamem_free(sc, sc->sc_rcring);
1719
1720	for (i = 0; i < NEP_NRBDESC; i++) {
1721		rb = &sc->sc_rb[i];
1722		if (rb->nb_block) {
1723			bus_dmamap_sync(sc->sc_dmat, rb->nb_map, 0,
1724			    rb->nb_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1725			bus_dmamap_unload(sc->sc_dmat, rb->nb_map);
1726			pool_put(nep_block_pool, rb->nb_block);
1727		}
1728		bus_dmamap_destroy(sc->sc_dmat, rb->nb_map);
1729	}
1730
1731	nep_dmamem_free(sc, sc->sc_rbring);
1732	free(sc->sc_rb, M_DEVBUF, sizeof(struct nep_block) * NEP_NRBDESC);
1733}
1734
1735void
1736nep_iff(struct nep_softc *sc)
1737{
1738	struct arpcom *ac = &sc->sc_ac;
1739	struct ifnet *ifp = &sc->sc_ac.ac_if;
1740	struct ether_multi *enm;
1741	struct ether_multistep step;
1742	uint32_t crc, hash[16];
1743	int i;
1744
1745	nep_disable_rx_mac(sc);
1746
1747	ifp->if_flags &= ~IFF_ALLMULTI;
1748	memset(hash, 0, sizeof(hash));
1749
1750	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
1751		ifp->if_flags |= IFF_ALLMULTI;
1752	} else {
1753		ETHER_FIRST_MULTI(step, ac, enm);
1754		while (enm != NULL) {
1755                        crc = ether_crc32_le(enm->enm_addrlo,
1756                            ETHER_ADDR_LEN);
1757
1758                        crc >>= 24;
1759                        hash[crc >> 4] |= 1 << (15 - (crc & 15));
1760
1761			ETHER_NEXT_MULTI(step, enm);
1762		}
1763	}
1764
1765	for (i = 0; i < nitems(hash); i++) {
1766		if (sc->sc_port < 2)
1767			nep_write(sc, XMAC_HASH_TBL(sc->sc_port, i), hash[i]);
1768		else
1769			nep_write(sc, MAC_HASH_TBL(sc->sc_port, i), hash[i]);
1770	}
1771
1772	nep_enable_rx_mac(sc);
1773}
1774
1775int
1776nep_encap(struct nep_softc *sc, struct mbuf **m0, int *idx)
1777{
1778	struct mbuf *m = *m0;
1779	struct nep_txbuf_hdr *nh;
1780	uint64_t txd;
1781	bus_dmamap_t map;
1782	int cur, frag, i;
1783	int len, pad;
1784	int err;
1785
1786	/*
1787	 * MAC does not support padding of transmit packets that are
1788	 * fewer than 60 bytes.
1789	 */
1790	if (m->m_pkthdr.len < (ETHER_MIN_LEN - ETHER_CRC_LEN)) {
1791		struct mbuf *n;
1792		int padlen;
1793
1794		padlen = (ETHER_MIN_LEN - ETHER_CRC_LEN) - m->m_pkthdr.len;
1795		MGET(n, M_DONTWAIT, MT_DATA);
1796		if (n == NULL) {
1797			m_freem(m);
1798			return (ENOBUFS);
1799		}
1800		memset(mtod(n, caddr_t), 0, padlen);
1801		n->m_len = padlen;
1802		m_cat(m, n);
1803		m->m_pkthdr.len += padlen;
1804	}
1805
1806	if (m_leadingspace(m) < 16)
1807		pad = 0;
1808	else
1809		pad = mtod(m, u_long) % 16;
1810	len = m->m_pkthdr.len + pad;
1811	M_PREPEND(m, sizeof(*nh) + pad, M_DONTWAIT);
1812	if (m == NULL)
1813		return (ENOBUFS);
1814	nh = mtod(m, struct nep_txbuf_hdr *);
1815	nh->nh_flags = htole64((len << 16) | (pad / 2));
1816	nh->nh_reserved = 0;
1817
1818	cur = frag = *idx;
1819	map = sc->sc_txbuf[cur].nb_map;
1820
1821	err = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT);
1822	if (err) {
1823		/* XXX defrag */
1824		m_freem(m);
1825		return (ENOBUFS);
1826	}
1827
1828	/* Sync the DMA map. */
1829	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1830	    BUS_DMASYNC_PREWRITE);
1831
1832	txd = TXD_SOP | TXD_MARK;
1833	txd |= ((uint64_t)map->dm_nsegs << TXD_NUM_PTR_SHIFT);
1834	for (i = 0; i < map->dm_nsegs; i++) {
1835		txd |= ((uint64_t)map->dm_segs[i].ds_len << TXD_TR_LEN_SHIFT);
1836		txd |= map->dm_segs[i].ds_addr;
1837		sc->sc_txdesc[frag] = htole64(txd);
1838		txd = 0;
1839
1840		bus_dmamap_sync(sc->sc_dmat, NEP_DMA_MAP(sc->sc_txring),
1841		    frag * sizeof(txd), sizeof(txd), BUS_DMASYNC_PREWRITE);
1842
1843		cur = frag++;
1844		if (frag >= NEP_NTXDESC)
1845			frag = 0;
1846		KASSERT(frag != sc->sc_tx_cons);
1847	}
1848
1849	KASSERT(sc->sc_txbuf[cur].nb_m == NULL);
1850	sc->sc_txbuf[*idx].nb_map = sc->sc_txbuf[cur].nb_map;
1851	sc->sc_txbuf[cur].nb_map = map;
1852	sc->sc_txbuf[cur].nb_m = m;
1853
1854	if (frag < *idx)
1855		sc->sc_wrap ^= TX_RING_KICK_WRAP;
1856	nep_write(sc, TX_RING_KICK(sc->sc_port), sc->sc_wrap | (frag << 3));
1857
1858	sc->sc_tx_cnt += map->dm_nsegs;
1859	*idx = frag;
1860
1861	m_adj(m, sizeof(*nh) + pad);
1862	*m0 = m;
1863
1864	return (0);
1865}
1866
1867void
1868nep_start(struct ifnet *ifp)
1869{
1870	struct nep_softc *sc = (struct nep_softc *)ifp->if_softc;
1871	struct mbuf *m;
1872	int idx;
1873
1874	if (!(ifp->if_flags & IFF_RUNNING))
1875		return;
1876	if (ifq_is_oactive(&ifp->if_snd))
1877		return;
1878	if (ifq_empty(&ifp->if_snd))
1879		return;
1880
1881	idx = sc->sc_tx_prod;
1882	for (;;) {
1883		m = ifq_deq_begin(&ifp->if_snd);
1884		if (m == NULL)
1885			break;
1886
1887		if (sc->sc_tx_cnt >= (NEP_NTXDESC - NEP_NTXSEGS)) {
1888			ifq_deq_rollback(&ifp->if_snd, m);
1889			ifq_set_oactive(&ifp->if_snd);
1890			break;
1891		}
1892
1893		/* Now we are committed to transmit the packet. */
1894		ifq_deq_commit(&ifp->if_snd, m);
1895
1896		if (nep_encap(sc, &m, &idx))
1897			break;
1898
1899#if NBPFILTER > 0
1900		if (ifp->if_bpf)
1901			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1902#endif
1903	}
1904
1905	if (sc->sc_tx_prod != idx) {
1906		sc->sc_tx_prod = idx;
1907
1908		/* Set a timeout in case the chip goes out to lunch. */
1909		ifp->if_timer = 5;
1910	}
1911}
1912
1913void
1914nep_watchdog(struct ifnet *ifp)
1915{
1916	printf("%s\n", __func__);
1917}
1918
1919void
1920nep_tick(void *arg)
1921{
1922	struct nep_softc *sc = arg;
1923	int s;
1924
1925	s = splnet();
1926	mii_tick(&sc->sc_mii);
1927	splx(s);
1928
1929	timeout_add_sec(&sc->sc_tick, 1);
1930}
1931
1932int
1933nep_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1934{
1935	struct nep_softc *sc = (struct nep_softc *)ifp->if_softc;
1936	struct ifreq *ifr = (struct ifreq *)data;
1937	int s, error = 0;
1938
1939	s = splnet();
1940
1941	switch (cmd) {
1942	case SIOCSIFADDR:
1943		ifp->if_flags |= IFF_UP;
1944		/* FALLTHROUGH */
1945
1946	case SIOCSIFFLAGS:
1947		if (ISSET(ifp->if_flags, IFF_UP)) {
1948			if (ISSET(ifp->if_flags, IFF_RUNNING))
1949				error = ENETRESET;
1950			else
1951				nep_up(sc);
1952		} else {
1953			if (ISSET(ifp->if_flags, IFF_RUNNING))
1954				nep_down(sc);
1955		}
1956		break;
1957
1958	case SIOCGIFMEDIA:
1959	case SIOCSIFMEDIA:
1960		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
1961		break;
1962
1963	default:
1964		error = ether_ioctl(ifp, &sc->sc_ac, cmd, data);
1965	}
1966
1967	if (error == ENETRESET) {
1968		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
1969		    (IFF_UP | IFF_RUNNING))
1970			nep_iff(sc);
1971		error = 0;
1972	}
1973
1974	splx(s);
1975	return (error);
1976}
1977
1978void
1979nep_fill_rx_ring(struct nep_softc *sc)
1980{
1981	struct nep_block *rb;
1982	void *block;
1983	uint64_t val;
1984	u_int slots;
1985	int desc, err;
1986	int count = 0;
1987
1988	desc = sc->sc_rx_prod;
1989	slots = if_rxr_get(&sc->sc_rx_ring, NEP_NRBDESC);
1990	while (slots > 0) {
1991		rb = &sc->sc_rb[desc];
1992
1993		block = pool_get(nep_block_pool, PR_NOWAIT);
1994		if (block == NULL)
1995			break;
1996		err = bus_dmamap_load(sc->sc_dmat, rb->nb_map, block,
1997		     PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
1998		if (err) {
1999			pool_put(nep_block_pool, block);
2000			break;
2001		}
2002		rb->nb_block = block;
2003		sc->sc_rbdesc[desc++] =
2004		    htole32(rb->nb_map->dm_segs[0].ds_addr >> 12);
2005		count++;
2006		slots--;
2007		if (desc >= NEP_NRBDESC)
2008			desc = 0;
2009	}
2010	if_rxr_put(&sc->sc_rx_ring, slots);
2011	if (count > 0) {
2012		nep_write(sc, RBR_KICK(sc->sc_port), count);
2013		val = nep_read(sc, RX_DMA_CTL_STAT(sc->sc_port));
2014		val |= RX_DMA_CTL_STAT_RBR_EMPTY;
2015		nep_write(sc, RX_DMA_CTL_STAT(sc->sc_port), val);
2016		sc->sc_rx_prod = desc;
2017	}
2018}
2019
2020struct nep_dmamem *
2021nep_dmamem_alloc(struct nep_softc *sc, size_t size)
2022{
2023	struct nep_dmamem *m;
2024	int nsegs;
2025
2026	m = malloc(sizeof(*m), M_DEVBUF, M_NOWAIT | M_ZERO);
2027	if (m == NULL)
2028		return (NULL);
2029
2030	m->ndm_size = size;
2031
2032	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
2033	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &m->ndm_map) != 0)
2034		goto qdmfree;
2035
2036	if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &m->ndm_seg, 1,
2037	    &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0)
2038		goto destroy;
2039
2040	if (bus_dmamem_map(sc->sc_dmat, &m->ndm_seg, nsegs, size, &m->ndm_kva,
2041	    BUS_DMA_NOWAIT) != 0)
2042		goto free;
2043
2044	if (bus_dmamap_load(sc->sc_dmat, m->ndm_map, m->ndm_kva, size, NULL,
2045	    BUS_DMA_NOWAIT) != 0)
2046		goto unmap;
2047
2048	return (m);
2049
2050unmap:
2051	bus_dmamem_unmap(sc->sc_dmat, m->ndm_kva, m->ndm_size);
2052free:
2053	bus_dmamem_free(sc->sc_dmat, &m->ndm_seg, 1);
2054destroy:
2055	bus_dmamap_destroy(sc->sc_dmat, m->ndm_map);
2056qdmfree:
2057	free(m, M_DEVBUF, sizeof(*m));
2058
2059	return (NULL);
2060}
2061
2062void
2063nep_dmamem_free(struct nep_softc *sc, struct nep_dmamem *m)
2064{
2065	bus_dmamap_unload(sc->sc_dmat, m->ndm_map);
2066	bus_dmamem_unmap(sc->sc_dmat, m->ndm_kva, m->ndm_size);
2067	bus_dmamem_free(sc->sc_dmat, &m->ndm_seg, 1);
2068	bus_dmamap_destroy(sc->sc_dmat, m->ndm_map);
2069	free(m, M_DEVBUF, sizeof(*m));
2070}
2071