if_cnmac.c revision 1.6
1/*	$NetBSD: if_cnmac.c,v 1.6 2017/08/22 07:09:00 maya Exp $	*/
2
3#include <sys/cdefs.h>
4#if 0
5__KERNEL_RCSID(0, "$NetBSD: if_cnmac.c,v 1.6 2017/08/22 07:09:00 maya Exp $");
6#endif
7
8#include "opt_octeon.h"
9
10#ifdef	OCTEON_ETH_DEBUG
11
12#ifndef DIAGNOSTIC
13#define	DIAGNOSTIC
14#endif
15
16#ifndef DEBUG
17#define	DEBUG
18#endif
19
20#endif
21
22/*
23 * If no free send buffer is available, free all the sent buffer and bail out.
24 */
25#define OCTEON_ETH_SEND_QUEUE_CHECK
26
27/* XXX XXX XXX XXX XXX XXX */
28
29#include <sys/param.h>
30#include <sys/systm.h>
31#include <sys/pool.h>
32#include <sys/mbuf.h>
33#include <sys/malloc.h>
34#include <sys/kernel.h>
35#include <sys/socket.h>
36#include <sys/ioctl.h>
37#include <sys/errno.h>
38#include <sys/device.h>
39#include <sys/queue.h>
40#include <sys/conf.h>
41#include <sys/sysctl.h>
42#include <sys/syslog.h>
43
44#include <net/if.h>
45#include <net/if_dl.h>
46#include <net/if_media.h>
47#include <net/if_ether.h>
48#include <net/route.h>
49
50#include <net/bpf.h>
51
52#include <netinet/in.h>
53#include <netinet/in_systm.h>
54#include <netinet/in_var.h>
55#include <netinet/ip.h>
56
57#include <sys/bus.h>
58#include <machine/intr.h>
59#include <machine/endian.h>
60#include <machine/locore.h>
61
62#include <dev/mii/mii.h>
63#include <dev/mii/miivar.h>
64
65#include <mips/cpuregs.h>
66
67#include <mips/cavium/dev/octeon_asxreg.h>
68#include <mips/cavium/dev/octeon_ciureg.h>
69#include <mips/cavium/dev/octeon_npireg.h>
70#include <mips/cavium/dev/octeon_gmxreg.h>
71#include <mips/cavium/dev/octeon_ipdreg.h>
72#include <mips/cavium/dev/octeon_pipreg.h>
73#include <mips/cavium/dev/octeon_powreg.h>
74#include <mips/cavium/dev/octeon_faureg.h>
75#include <mips/cavium/dev/octeon_fpareg.h>
76#include <mips/cavium/dev/octeon_bootbusreg.h>
77#include <mips/cavium/include/iobusvar.h>
78#include <mips/cavium/octeonvar.h>
79#include <mips/cavium/dev/octeon_fpavar.h>
80#include <mips/cavium/dev/octeon_gmxvar.h>
81#include <mips/cavium/dev/octeon_fauvar.h>
82#include <mips/cavium/dev/octeon_powvar.h>
83#include <mips/cavium/dev/octeon_ipdvar.h>
84#include <mips/cavium/dev/octeon_pipvar.h>
85#include <mips/cavium/dev/octeon_pkovar.h>
86#include <mips/cavium/dev/octeon_asxvar.h>
87#include <mips/cavium/dev/octeon_smivar.h>
88#include <mips/cavium/dev/if_cnmacvar.h>
89
90#ifdef OCTEON_ETH_DEBUG
91#define	OCTEON_ETH_KASSERT(x)	KASSERT(x)
92#define	OCTEON_ETH_KDASSERT(x)	KDASSERT(x)
93#else
94#define	OCTEON_ETH_KASSERT(x)
95#define	OCTEON_ETH_KDASSERT(x)
96#endif
97
98/*
99 * Set the PKO to think command buffers are an odd length.  This makes it so we
100 * never have to divide a comamnd across two buffers.
101 */
102#define OCTEON_POOL_NWORDS_CMD	\
103	    (((uint32_t)OCTEON_POOL_SIZE_CMD / sizeof(uint64_t)) - 1)
104#define FPA_COMMAND_BUFFER_POOL_NWORDS	OCTEON_POOL_NWORDS_CMD	/* XXX */
105
106static void		octeon_eth_buf_init(struct octeon_eth_softc *);
107
108static int	octeon_eth_match(device_t, struct cfdata *, void *);
109static void	octeon_eth_attach(device_t, device_t, void *);
110static void	octeon_eth_pip_init(struct octeon_eth_softc *);
111static void	octeon_eth_ipd_init(struct octeon_eth_softc *);
112static void	octeon_eth_pko_init(struct octeon_eth_softc *);
113static void	octeon_eth_asx_init(struct octeon_eth_softc *);
114static void	octeon_eth_smi_init(struct octeon_eth_softc *);
115
116static void	octeon_eth_board_mac_addr(uint8_t *, size_t, struct octeon_eth_softc *);
117
118static int	octeon_eth_mii_readreg(device_t, int, int);
119static void	octeon_eth_mii_writereg(device_t, int, int, int);
120static void	octeon_eth_mii_statchg(struct ifnet *);
121
122static int	octeon_eth_mediainit(struct octeon_eth_softc *);
123static void	octeon_eth_mediastatus(struct ifnet *, struct ifmediareq *);
124static int	octeon_eth_mediachange(struct ifnet *);
125
126static inline void	octeon_eth_send_queue_flush_prefetch(struct octeon_eth_softc *);
127static inline void	octeon_eth_send_queue_flush_fetch(struct octeon_eth_softc *);
128static inline void	octeon_eth_send_queue_flush(struct octeon_eth_softc *);
129static inline void	octeon_eth_send_queue_flush_sync(struct octeon_eth_softc *);
130static inline int	octeon_eth_send_queue_is_full(struct octeon_eth_softc *);
131static inline void	octeon_eth_send_queue_add(struct octeon_eth_softc *,
132			    struct mbuf *, uint64_t *);
133static inline void	octeon_eth_send_queue_del(struct octeon_eth_softc *,
134			    struct mbuf **, uint64_t **);
135static inline int	octeon_eth_buf_free_work(struct octeon_eth_softc *,
136			    uint64_t *, uint64_t);
137static inline void	octeon_eth_buf_ext_free_m(struct mbuf *, void *, size_t, void *);
138static inline void	octeon_eth_buf_ext_free_ext(struct mbuf *, void *, size_t, void *);
139
140static int	octeon_eth_ioctl(struct ifnet *, u_long, void *);
141static void	octeon_eth_watchdog(struct ifnet *);
142static int	octeon_eth_init(struct ifnet *);
143static void	octeon_eth_stop(struct ifnet *, int);
144static void	octeon_eth_start(struct ifnet *);
145
146static inline int	octeon_eth_send_cmd(struct octeon_eth_softc *, uint64_t,
147			    uint64_t);
148static inline uint64_t	octeon_eth_send_makecmd_w1(int, paddr_t);
149static inline uint64_t 	octeon_eth_send_makecmd_w0(uint64_t, uint64_t, size_t,
150			    int);
151static inline int	octeon_eth_send_makecmd_gbuf(struct octeon_eth_softc *,
152			    struct mbuf *, uint64_t *, int *);
153static inline int	octeon_eth_send_makecmd(struct octeon_eth_softc *,
154			    struct mbuf *, uint64_t *, uint64_t *, uint64_t *);
155static inline int	octeon_eth_send_buf(struct octeon_eth_softc *,
156			    struct mbuf *, uint64_t *);
157static inline int	octeon_eth_send(struct octeon_eth_softc *,
158			    struct mbuf *);
159
160static int	octeon_eth_reset(struct octeon_eth_softc *);
161static int	octeon_eth_configure(struct octeon_eth_softc *);
162static int	octeon_eth_configure_common(struct octeon_eth_softc *);
163
164static void	octeon_eth_tick_free(void *arg);
165static void	octeon_eth_tick_misc(void *);
166
167static inline int	octeon_eth_recv_mbuf(struct octeon_eth_softc *,
168			    uint64_t *, struct mbuf **);
169static inline int	octeon_eth_recv_check_code(struct octeon_eth_softc *,
170			    uint64_t);
171static inline int	octeon_eth_recv_check_jumbo(struct octeon_eth_softc *,
172			    uint64_t);
173static inline int	octeon_eth_recv_check_link(struct octeon_eth_softc *,
174			    uint64_t);
175static inline int	octeon_eth_recv_check(struct octeon_eth_softc *,
176			    uint64_t);
177static inline int	octeon_eth_recv(struct octeon_eth_softc *, uint64_t *);
178static void		octeon_eth_recv_redir(struct ifnet *, struct mbuf *);
179static inline void	octeon_eth_recv_intr(void *, uint64_t *);
180
181/* device driver context */
182static struct	octeon_eth_softc *octeon_eth_gsc[GMX_PORT_NUNITS];
183static void	*octeon_eth_pow_recv_ih;
184
185/* sysctl'able parameters */
186int		octeon_eth_param_pko_cmd_w0_n2 = 1;
187int		octeon_eth_param_pip_dyn_rs = 1;
188int		octeon_eth_param_redir = 0;
189int		octeon_eth_param_pktbuf = 0;
190int		octeon_eth_param_rate = 0;
191int		octeon_eth_param_intr = 0;
192
193CFATTACH_DECL_NEW(cnmac, sizeof(struct octeon_eth_softc),
194    octeon_eth_match, octeon_eth_attach, NULL, NULL);
195
196#ifdef OCTEON_ETH_DEBUG
197
198static const struct octeon_evcnt_entry octeon_evcnt_entries[] = {
199#define	_ENTRY(name, type, parent, descr) \
200	OCTEON_EVCNT_ENTRY(struct octeon_eth_softc, name, type, parent, descr)
201	_ENTRY(rx,			MISC, NULL, "rx"),
202	_ENTRY(rxint,			INTR, NULL, "rx intr"),
203	_ENTRY(rxrs,			MISC, NULL, "rx dynamic short"),
204	_ENTRY(rxbufpkalloc,		MISC, NULL, "rx buf pkt alloc"),
205	_ENTRY(rxbufpkput,		MISC, NULL, "rx buf pkt put"),
206	_ENTRY(rxbufwqalloc,		MISC, NULL, "rx buf wqe alloc"),
207	_ENTRY(rxbufwqput,		MISC, NULL, "rx buf wqe put"),
208	_ENTRY(rxerrcode,		MISC, NULL, "rx code error"),
209	_ENTRY(rxerrfix,		MISC, NULL, "rx fixup error"),
210	_ENTRY(rxerrjmb,		MISC, NULL, "rx jmb error"),
211	_ENTRY(rxerrlink,		MISC, NULL, "rx link error"),
212	_ENTRY(rxerroff,		MISC, NULL, "rx offload error"),
213	_ENTRY(rxonperrshort,		MISC, NULL, "rx onp fixup short error"),
214	_ENTRY(rxonperrpreamble,	MISC, NULL, "rx onp fixup preamble error"),
215	_ENTRY(rxonperrcrc,		MISC, NULL, "rx onp fixup crc error"),
216	_ENTRY(rxonperraddress,		MISC, NULL, "rx onp fixup address error"),
217	_ENTRY(rxonponp,		MISC, NULL, "rx onp fixup onp packets"),
218	_ENTRY(rxonpok,			MISC, NULL, "rx onp fixup success packets"),
219	_ENTRY(tx,			MISC, NULL, "tx"),
220	_ENTRY(txadd,			MISC, NULL, "tx add"),
221	_ENTRY(txbufcballoc,		MISC, NULL, "tx buf cb alloc"),
222	_ENTRY(txbufcbget,		MISC, NULL, "tx buf cb get"),
223	_ENTRY(txbufgballoc,		MISC, NULL, "tx buf gb alloc"),
224	_ENTRY(txbufgbget,		MISC, NULL, "tx buf gb get"),
225	_ENTRY(txbufgbput,		MISC, NULL, "tx buf gb put"),
226	_ENTRY(txdel,			MISC, NULL, "tx del"),
227	_ENTRY(txerr,			MISC, NULL, "tx error"),
228	_ENTRY(txerrcmd,		MISC, NULL, "tx cmd error"),
229	_ENTRY(txerrgbuf,		MISC, NULL, "tx gbuf error"),
230	_ENTRY(txerrlink,		MISC, NULL, "tx link error"),
231	_ENTRY(txerrmkcmd,		MISC, NULL, "tx makecmd error"),
232#undef	_ENTRY
233};
234#endif
235
236/* ---- buffer management */
237
238static const struct octeon_eth_pool_param {
239	int			poolno;
240	size_t			size;
241	size_t			nelems;
242} octeon_eth_pool_params[] = {
243#define	_ENTRY(x)	{ OCTEON_POOL_NO_##x, OCTEON_POOL_SIZE_##x, OCTEON_POOL_NELEMS_##x }
244	_ENTRY(PKT),
245	_ENTRY(WQE),
246	_ENTRY(CMD),
247	_ENTRY(SG)
248#undef	_ENTRY
249};
250struct octeon_fpa_buf	*octeon_eth_pools[8/* XXX */];
251#define	octeon_eth_fb_pkt	octeon_eth_pools[OCTEON_POOL_NO_PKT]
252#define	octeon_eth_fb_wqe	octeon_eth_pools[OCTEON_POOL_NO_WQE]
253#define	octeon_eth_fb_cmd	octeon_eth_pools[OCTEON_POOL_NO_CMD]
254#define	octeon_eth_fb_sg	octeon_eth_pools[OCTEON_POOL_NO_SG]
255
256static void
257octeon_eth_buf_init(struct octeon_eth_softc *sc)
258{
259	static int once;
260	int i;
261	const struct octeon_eth_pool_param *pp;
262	struct octeon_fpa_buf *fb;
263
264	if (once == 1)
265		return;
266	once = 1;
267
268	for (i = 0; i < (int)__arraycount(octeon_eth_pool_params); i++) {
269		pp = &octeon_eth_pool_params[i];
270		octeon_fpa_buf_init(pp->poolno, pp->size, pp->nelems, &fb);
271		octeon_eth_pools[i] = fb;
272	}
273}
274
275/* ---- autoconf */
276
277static int
278octeon_eth_match(device_t parent, struct cfdata *match, void *aux)
279{
280	struct octeon_gmx_attach_args *ga = aux;
281
282	if (strcmp(match->cf_name, ga->ga_name) != 0) {
283		return 0;
284	}
285	return 1;
286}
287
288static void
289octeon_eth_attach(device_t parent, device_t self, void *aux)
290{
291	struct octeon_eth_softc *sc = device_private(self);
292	struct octeon_gmx_attach_args *ga = aux;
293	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
294	uint8_t enaddr[ETHER_ADDR_LEN];
295
296	sc->sc_dev = self;
297	sc->sc_regt = ga->ga_regt;
298	sc->sc_port = ga->ga_portno;
299	sc->sc_port_type = ga->ga_port_type;
300	sc->sc_gmx = ga->ga_gmx;
301	sc->sc_gmx_port = ga->ga_gmx_port;
302
303	sc->sc_init_flag = 0;
304	/*
305	 * XXXUEBAYASI
306	 * Setting PIP_IP_OFFSET[OFFSET] to 8 causes panic ... why???
307	 */
308	sc->sc_ip_offset = 0/* XXX */;
309
310	if (MIPS_PRID_IMPL(mips_options.mips_cpu_id) <= MIPS_CN30XX) {
311		SET(sc->sc_quirks, OCTEON_ETH_QUIRKS_NO_PRE_ALIGN);
312		SET(sc->sc_quirks, OCTEON_ETH_QUIRKS_NO_RX_INBND);
313	}
314
315	octeon_eth_board_mac_addr(enaddr, sizeof(enaddr), sc);
316	printf("%s: Ethernet address %s\n", device_xname(sc->sc_dev),
317	    ether_sprintf(enaddr));
318
319	octeon_eth_gsc[sc->sc_port] = sc;
320
321	SIMPLEQ_INIT(&sc->sc_sendq);
322	sc->sc_soft_req_thresh = 15/* XXX */;
323	sc->sc_ext_callback_cnt = 0;
324
325	octeon_gmx_stats_init(sc->sc_gmx_port);
326
327	callout_init(&sc->sc_tick_misc_ch, 0);
328	callout_init(&sc->sc_tick_free_ch, 0);
329
330	octeon_fau_op_init(&sc->sc_fau_done,
331	    OCTEON_CVMSEG_ETHER_OFFSET(sc->sc_port, csm_ether_fau_done),
332	    OCT_FAU_REG_ADDR_END - (8 * (sc->sc_port + 1))/* XXX */);
333	octeon_fau_op_set_8(&sc->sc_fau_done, 0);
334
335	octeon_eth_pip_init(sc);
336	octeon_eth_ipd_init(sc);
337	octeon_eth_pko_init(sc);
338	octeon_eth_asx_init(sc);
339	octeon_eth_smi_init(sc);
340
341	sc->sc_gmx_port->sc_ipd = sc->sc_ipd;
342	sc->sc_gmx_port->sc_port_asx = sc->sc_asx;
343	sc->sc_gmx_port->sc_port_mii = &sc->sc_mii;
344	sc->sc_gmx_port->sc_port_ec = &sc->sc_ethercom;
345	/* XXX */
346	sc->sc_gmx_port->sc_quirks = sc->sc_quirks;
347
348	/* XXX */
349	sc->sc_pow = &octeon_pow_softc;
350
351	octeon_eth_mediainit(sc);
352
353	strncpy(ifp->if_xname, device_xname(sc->sc_dev), sizeof(ifp->if_xname));
354	ifp->if_softc = sc;
355	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
356	ifp->if_ioctl = octeon_eth_ioctl;
357	ifp->if_start = octeon_eth_start;
358	ifp->if_watchdog = octeon_eth_watchdog;
359	ifp->if_init = octeon_eth_init;
360	ifp->if_stop = octeon_eth_stop;
361	IFQ_SET_MAXLEN(&ifp->if_snd, max(GATHER_QUEUE_SIZE, IFQ_MAXLEN));
362	IFQ_SET_READY(&ifp->if_snd);
363
364	/* XXX: not yet tx checksum */
365	ifp->if_capabilities =
366		IFCAP_CSUM_IPv4_Rx | IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx |
367		IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
368
369	octeon_gmx_set_mac_addr(sc->sc_gmx_port, enaddr);
370	octeon_gmx_set_filter(sc->sc_gmx_port);
371
372	if_attach(ifp);
373	ether_ifattach(ifp, enaddr);
374
375	/* XXX */
376	sc->sc_rate_recv_check_link_cap.tv_sec = 1;
377	sc->sc_rate_recv_check_jumbo_cap.tv_sec = 1;
378	sc->sc_rate_recv_check_code_cap.tv_sec = 1;
379	sc->sc_rate_recv_fixup_odd_nibble_short_cap.tv_sec = 1;
380	sc->sc_rate_recv_fixup_odd_nibble_preamble_cap.tv_sec = 1;
381	sc->sc_rate_recv_fixup_odd_nibble_crc_cap.tv_sec = 1;
382#ifdef OCTEON_ETH_DEBUG
383	sc->sc_rate_recv_fixup_odd_nibble_addr_cap.tv_sec = 1;
384#endif
385	/* XXX */
386
387#if 1
388	octeon_eth_buf_init(sc);
389#endif
390
391	if (octeon_eth_pow_recv_ih == NULL)
392		octeon_eth_pow_recv_ih = octeon_pow_intr_establish(OCTEON_POW_GROUP_PIP,
393		    IPL_NET, octeon_eth_recv_intr, NULL, NULL);
394
395	OCTEON_EVCNT_ATTACH_EVCNTS(sc, octeon_evcnt_entries,
396	    device_xname(sc->sc_dev));
397}
398
399/* ---- submodules */
400
401/* XXX */
402static void
403octeon_eth_pip_init(struct octeon_eth_softc *sc)
404{
405	struct octeon_pip_attach_args pip_aa;
406
407	pip_aa.aa_port = sc->sc_port;
408	pip_aa.aa_regt = sc->sc_regt;
409	pip_aa.aa_tag_type = POW_TAG_TYPE_ORDERED/* XXX */;
410	pip_aa.aa_receive_group = OCTEON_POW_GROUP_PIP;
411	pip_aa.aa_ip_offset = sc->sc_ip_offset;
412	octeon_pip_init(&pip_aa, &sc->sc_pip);
413}
414
415/* XXX */
416static void
417octeon_eth_ipd_init(struct octeon_eth_softc *sc)
418{
419	struct octeon_ipd_attach_args ipd_aa;
420
421	ipd_aa.aa_port = sc->sc_port;
422	ipd_aa.aa_regt = sc->sc_regt;
423	ipd_aa.aa_first_mbuff_skip = 184/* XXX */;
424	ipd_aa.aa_not_first_mbuff_skip = 0/* XXX */;
425	octeon_ipd_init(&ipd_aa, &sc->sc_ipd);
426}
427
428/* XXX */
429static void
430octeon_eth_pko_init(struct octeon_eth_softc *sc)
431{
432	struct octeon_pko_attach_args pko_aa;
433
434	pko_aa.aa_port = sc->sc_port;
435	pko_aa.aa_regt = sc->sc_regt;
436	pko_aa.aa_cmdptr = &sc->sc_cmdptr;
437	pko_aa.aa_cmd_buf_pool = OCTEON_POOL_NO_CMD;
438	pko_aa.aa_cmd_buf_size = OCTEON_POOL_NWORDS_CMD;
439	octeon_pko_init(&pko_aa, &sc->sc_pko);
440}
441
442/* XXX */
443static void
444octeon_eth_asx_init(struct octeon_eth_softc *sc)
445{
446	struct octeon_asx_attach_args asx_aa;
447
448	asx_aa.aa_port = sc->sc_port;
449	asx_aa.aa_regt = sc->sc_regt;
450	octeon_asx_init(&asx_aa, &sc->sc_asx);
451}
452
453static void
454octeon_eth_smi_init(struct octeon_eth_softc *sc)
455{
456	struct octeon_smi_attach_args smi_aa;
457
458	smi_aa.aa_port = sc->sc_port;
459	smi_aa.aa_regt = sc->sc_regt;
460	octeon_smi_init(&smi_aa, &sc->sc_smi);
461	octeon_smi_set_clock(sc->sc_smi, 0x1464ULL); /* XXX */
462}
463
464/* ---- XXX */
465
466#define	ADDR2UINT64(u, a) \
467	do { \
468		u = \
469		    (((uint64_t)a[0] << 40) | ((uint64_t)a[1] << 32) | \
470		     ((uint64_t)a[2] << 24) | ((uint64_t)a[3] << 16) | \
471		     ((uint64_t)a[4] <<  8) | ((uint64_t)a[5] <<  0)); \
472	} while (0)
473#define	UINT642ADDR(a, u) \
474	do { \
475		a[0] = (uint8_t)((u) >> 40); a[1] = (uint8_t)((u) >> 32); \
476		a[2] = (uint8_t)((u) >> 24); a[3] = (uint8_t)((u) >> 16); \
477		a[4] = (uint8_t)((u) >>  8); a[5] = (uint8_t)((u) >>  0); \
478	} while (0)
479
480static void
481octeon_eth_board_mac_addr(uint8_t *enaddr, size_t size, struct octeon_eth_softc *sc)
482{
483	prop_dictionary_t dict;
484	prop_data_t ea;
485
486	dict = device_properties(sc->sc_dev);
487	KASSERT(dict != NULL);
488	ea = prop_dictionary_get(dict, "mac-address");
489	KASSERT(ea != NULL);
490	memcpy(enaddr, prop_data_data_nocopy(ea), size);
491}
492
493/* ---- media */
494
495static int
496octeon_eth_mii_readreg(device_t self, int phy_addr, int reg)
497{
498	struct octeon_eth_softc *sc = device_private(self);
499
500	return octeon_smi_read(sc->sc_smi, phy_addr, reg);
501}
502
503static void
504octeon_eth_mii_writereg(device_t self, int phy_addr, int reg, int value)
505{
506	struct octeon_eth_softc *sc = device_private(self);
507
508	octeon_smi_write(sc->sc_smi, phy_addr, reg, value);
509}
510
511static void
512octeon_eth_mii_statchg(struct ifnet *ifp)
513{
514	struct octeon_eth_softc *sc = ifp->if_softc;
515
516	octeon_pko_port_enable(sc->sc_pko, 0);
517	octeon_gmx_port_enable(sc->sc_gmx_port, 0);
518
519	octeon_eth_reset(sc);
520
521	if (ISSET(ifp->if_flags, IFF_RUNNING))
522		octeon_gmx_set_filter(sc->sc_gmx_port);
523
524	octeon_pko_port_enable(sc->sc_pko, 1);
525	octeon_gmx_port_enable(sc->sc_gmx_port, 1);
526}
527
528static int
529octeon_eth_mediainit(struct octeon_eth_softc *sc)
530{
531	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
532	prop_object_t phy;
533
534	sc->sc_mii.mii_ifp = ifp;
535	sc->sc_mii.mii_readreg = octeon_eth_mii_readreg;
536	sc->sc_mii.mii_writereg = octeon_eth_mii_writereg;
537	sc->sc_mii.mii_statchg = octeon_eth_mii_statchg;
538	ifmedia_init(&sc->sc_mii.mii_media, 0, octeon_eth_mediachange,
539	    octeon_eth_mediastatus);
540
541	phy = prop_dictionary_get(device_properties(sc->sc_dev), "phy-addr");
542	KASSERT(phy != NULL);
543
544	mii_attach(sc->sc_dev, &sc->sc_mii,
545	    0xffffffff, prop_number_integer_value(phy),
546	    MII_OFFSET_ANY, MIIF_DOPAUSE);
547
548	/* XXX XXX XXX */
549	if (LIST_FIRST(&sc->sc_mii.mii_phys) != NULL) {
550		/* XXX XXX XXX */
551		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
552		/* XXX XXX XXX */
553	} else {
554		/* XXX XXX XXX */
555		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_NONE,
556		    MII_MEDIA_NONE, NULL);
557		/* XXX XXX XXX */
558		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_NONE);
559		/* XXX XXX XXX */
560	}
561	/* XXX XXX XXX */
562
563	return 0;
564}
565
566static void
567octeon_eth_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
568{
569	struct octeon_eth_softc *sc = ifp->if_softc;
570
571	mii_pollstat(&sc->sc_mii);
572
573	ifmr->ifm_status = sc->sc_mii.mii_media_status;
574	ifmr->ifm_active = sc->sc_mii.mii_media_active;
575	ifmr->ifm_active = (sc->sc_mii.mii_media_active & ~IFM_ETH_FMASK) |
576	    sc->sc_gmx_port->sc_port_flowflags;
577}
578
579static int
580octeon_eth_mediachange(struct ifnet *ifp)
581{
582	struct octeon_eth_softc *sc = ifp->if_softc;
583
584	mii_mediachg(&sc->sc_mii);
585
586	return 0;
587}
588
589/* ---- send buffer garbage collection */
590
591static inline void
592octeon_eth_send_queue_flush_prefetch(struct octeon_eth_softc *sc)
593{
594	OCTEON_ETH_KASSERT(sc->sc_prefetch == 0);
595	octeon_fau_op_inc_fetch_8(&sc->sc_fau_done, 0);
596	sc->sc_prefetch = 1;
597}
598
599static inline void
600octeon_eth_send_queue_flush_fetch(struct octeon_eth_softc *sc)
601{
602#ifndef  OCTEON_ETH_DEBUG
603	if (!sc->sc_prefetch)
604		return;
605#endif
606	OCTEON_ETH_KASSERT(sc->sc_prefetch == 1);
607	sc->sc_hard_done_cnt = octeon_fau_op_inc_read_8(&sc->sc_fau_done);
608	OCTEON_ETH_KASSERT(sc->sc_hard_done_cnt <= 0);
609	sc->sc_prefetch = 0;
610}
611
612static inline void
613octeon_eth_send_queue_flush(struct octeon_eth_softc *sc)
614{
615	const int64_t sent_count = sc->sc_hard_done_cnt;
616	int i;
617
618	OCTEON_ETH_KASSERT(sc->sc_flush == 0);
619	OCTEON_ETH_KASSERT(sent_count <= 0);
620
621	for (i = 0; i < 0 - sent_count; i++) {
622		struct mbuf *m;
623		uint64_t *gbuf;
624
625		octeon_eth_send_queue_del(sc, &m, &gbuf);
626
627		octeon_fpa_buf_put(octeon_eth_fb_sg, gbuf);
628		OCTEON_EVCNT_INC(sc, txbufgbput);
629
630		m_freem(m);
631	}
632
633	octeon_fau_op_inc_fetch_8(&sc->sc_fau_done, i);
634	sc->sc_flush = i;
635}
636
637static inline void
638octeon_eth_send_queue_flush_sync(struct octeon_eth_softc *sc)
639{
640	if (sc->sc_flush == 0)
641		return;
642
643	OCTEON_ETH_KASSERT(sc->sc_flush > 0);
644
645	/* XXX XXX XXX */
646	octeon_fau_op_inc_read_8(&sc->sc_fau_done);
647	sc->sc_soft_req_cnt -= sc->sc_flush;
648	OCTEON_ETH_KASSERT(sc->sc_soft_req_cnt >= 0);
649	/* XXX XXX XXX */
650
651	sc->sc_flush = 0;
652}
653
654static inline int
655octeon_eth_send_queue_is_full(struct octeon_eth_softc *sc)
656{
657#ifdef OCTEON_ETH_SEND_QUEUE_CHECK
658	int64_t nofree_cnt;
659
660	nofree_cnt = sc->sc_soft_req_cnt + sc->sc_hard_done_cnt;
661
662	if (__predict_false(nofree_cnt == GATHER_QUEUE_SIZE - 1)) {
663		octeon_eth_send_queue_flush(sc);
664		OCTEON_EVCNT_INC(sc, txerrgbuf);
665		octeon_eth_send_queue_flush_sync(sc);
666		return 1;
667	}
668
669#endif
670	return 0;
671}
672
673/*
674 * (Ab)use m_nextpkt and m_paddr to maintain mbuf chain and pointer to gather
675 * buffer.  Other mbuf members may be used by m_freem(), so don't touch them!
676 */
677
678struct _send_queue_entry {
679	union {
680		struct mbuf _sqe_s_mbuf;
681		struct {
682			char _sqe_s_entry_pad[offsetof(struct mbuf, m_nextpkt)];
683			SIMPLEQ_ENTRY(_send_queue_entry) _sqe_s_entry_entry;
684		} _sqe_s_entry;
685		struct {
686			char _sqe_s_gbuf_pad[offsetof(struct mbuf, m_paddr)];
687			uint64_t *_sqe_s_gbuf_gbuf;
688		} _sqe_s_gbuf;
689	} _sqe_u;
690#define	_sqe_entry	_sqe_u._sqe_s_entry._sqe_s_entry_entry
691#define	_sqe_gbuf	_sqe_u._sqe_s_gbuf._sqe_s_gbuf_gbuf
692};
693
694static inline void
695octeon_eth_send_queue_add(struct octeon_eth_softc *sc, struct mbuf *m,
696    uint64_t *gbuf)
697{
698	struct _send_queue_entry *sqe = (struct _send_queue_entry *)m;
699
700	sqe->_sqe_gbuf = gbuf;
701	SIMPLEQ_INSERT_TAIL(&sc->sc_sendq, sqe, _sqe_entry);
702
703	if ((m->m_flags & M_EXT) && m->m_ext.ext_free != NULL)
704		sc->sc_ext_callback_cnt++;
705
706	OCTEON_EVCNT_INC(sc, txadd);
707}
708
709static inline void
710octeon_eth_send_queue_del(struct octeon_eth_softc *sc, struct mbuf **rm,
711    uint64_t **rgbuf)
712{
713	struct _send_queue_entry *sqe;
714
715	sqe = SIMPLEQ_FIRST(&sc->sc_sendq);
716	OCTEON_ETH_KASSERT(sqe != NULL);
717	SIMPLEQ_REMOVE_HEAD(&sc->sc_sendq, _sqe_entry);
718
719	*rm = (void *)sqe;
720	*rgbuf = sqe->_sqe_gbuf;
721
722	if (((*rm)->m_flags & M_EXT) && (*rm)->m_ext.ext_free != NULL) {
723		sc->sc_ext_callback_cnt--;
724		OCTEON_ETH_KASSERT(sc->sc_ext_callback_cnt >= 0);
725	}
726
727	OCTEON_EVCNT_INC(sc, txdel);
728}
729
730static inline int
731octeon_eth_buf_free_work(struct octeon_eth_softc *sc, uint64_t *work,
732    uint64_t word2)
733{
734	/* XXX when jumbo frame */
735	if (ISSET(word2, PIP_WQE_WORD2_IP_BUFS)) {
736		paddr_t addr;
737		paddr_t start_buffer;
738
739		addr = work[3] & PIP_WQE_WORD3_ADDR;
740		start_buffer = addr & ~(2048 - 1);
741
742		octeon_fpa_buf_put_paddr(octeon_eth_fb_pkt, start_buffer);
743		OCTEON_EVCNT_INC(sc, rxbufpkput);
744	}
745
746	octeon_fpa_buf_put(octeon_eth_fb_wqe, work);
747	OCTEON_EVCNT_INC(sc, rxbufwqput);
748
749	return 0;
750}
751
752static inline void
753octeon_eth_buf_ext_free_m(struct mbuf *m, void *buf, size_t size, void *arg)
754{
755	uint64_t *work = (void *)arg;
756#ifdef OCTEON_ETH_DEBUG
757	struct octeon_eth_softc *sc = (void *)(uintptr_t)work[0];
758#endif
759	int s = splnet();
760
761	OCTEON_EVCNT_INC(sc, rxrs);
762
763	octeon_fpa_buf_put(octeon_eth_fb_wqe, work);
764	OCTEON_EVCNT_INC(sc, rxbufwqput);
765
766	OCTEON_ETH_KASSERT(m != NULL);
767
768	pool_cache_put(mb_cache, m);
769
770	splx(s);
771}
772
773static inline void
774octeon_eth_buf_ext_free_ext(struct mbuf *m, void *buf, size_t size,
775    void *arg)
776{
777	uint64_t *work = (void *)arg;
778#ifdef OCTEON_ETH_DEBUG
779	struct octeon_eth_softc *sc = (void *)(uintptr_t)work[0];
780#endif
781	int s = splnet();
782
783	octeon_fpa_buf_put(octeon_eth_fb_wqe, work);
784	OCTEON_EVCNT_INC(sc, rxbufwqput);
785
786	octeon_fpa_buf_put(octeon_eth_fb_pkt, buf);
787	OCTEON_EVCNT_INC(sc, rxbufpkput);
788
789	OCTEON_ETH_KASSERT(m != NULL);
790
791	pool_cache_put(mb_cache, m);
792
793	splx(s);
794}
795
796/* ---- ifnet interfaces */
797
798static int
799octeon_eth_ioctl(struct ifnet *ifp, u_long cmd, void *data)
800{
801	struct octeon_eth_softc *sc = ifp->if_softc;
802	struct ifreq *ifr = (struct ifreq *)data;
803	int s, error;
804
805	s = splnet();
806	switch (cmd) {
807	case SIOCSIFMEDIA:
808		/* Flow control requires full-duplex mode. */
809		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
810		    (ifr->ifr_media & IFM_FDX) == 0) {
811			ifr->ifr_media &= ~IFM_ETH_FMASK;
812		}
813		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
814			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
815				ifr->ifr_media |=
816				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
817			}
818			sc->sc_gmx_port->sc_port_flowflags =
819				ifr->ifr_media & IFM_ETH_FMASK;
820		}
821		/* FALLTHROUGH */
822	case SIOCGIFMEDIA:
823		/* XXX: Flow contorol */
824		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
825		break;
826	default:
827		error = ether_ioctl(ifp, cmd, data);
828		if (error == ENETRESET) {
829			/*
830			 * Multicast list has changed; set the hardware filter
831			 * accordingly.
832			 */
833			if (ISSET(ifp->if_flags, IFF_RUNNING))
834				octeon_gmx_set_filter(sc->sc_gmx_port);
835			error = 0;
836		}
837		break;
838	}
839	octeon_eth_start(ifp);
840	splx(s);
841
842	return (error);
843}
844
845/* ---- send (output) */
846
847static inline uint64_t
848octeon_eth_send_makecmd_w0(uint64_t fau0, uint64_t fau1, size_t len, int segs)
849{
850	return octeon_pko_cmd_word0(
851		OCT_FAU_OP_SIZE_64,		/* sz1 */
852		OCT_FAU_OP_SIZE_64,		/* sz0 */
853		1, fau1, 1, fau0,		/* s1, reg1, s0, reg0 */
854		0,				/* le */
855		octeon_eth_param_pko_cmd_w0_n2,	/* n2 */
856		1, 0,				/* q, r */
857		(segs == 1) ? 0 : 1,		/* g */
858		0, 0, 1,			/* ipoffp1, ii, df */
859		segs, (int)len);		/* segs, totalbytes */
860}
861
862static inline uint64_t
863octeon_eth_send_makecmd_w1(int size, paddr_t addr)
864{
865	return octeon_pko_cmd_word1(
866		0, 0,				/* i, back */
867		FPA_GATHER_BUFFER_POOL,		/* pool */
868		size, addr);			/* size, addr */
869}
870
871static inline int
872octeon_eth_send_makecmd_gbuf(struct octeon_eth_softc *sc, struct mbuf *m0,
873    uint64_t *gbuf, int *rsegs)
874{
875	struct mbuf *m;
876	int segs = 0;
877	uintptr_t laddr, rlen, nlen;
878
879	for (m = m0; m != NULL; m = m->m_next) {
880
881		if (__predict_false(m->m_len == 0))
882			continue;
883
884#if 0
885		OCTEON_ETH_KASSERT(((uint32_t)m->m_data & (PAGE_SIZE - 1))
886		   == (kvtophys((vaddr_t)m->m_data) & (PAGE_SIZE - 1)));
887#endif
888
889		/*
890		 * aligned 4k
891		 */
892		laddr = (uintptr_t)m->m_data & (PAGE_SIZE - 1);
893
894		if (laddr + m->m_len > PAGE_SIZE) {
895			/* XXX XXX XXX */
896			rlen = PAGE_SIZE - laddr;
897			nlen = m->m_len - rlen;
898			*(gbuf + segs) = octeon_eth_send_makecmd_w1(rlen,
899			    kvtophys((vaddr_t)m->m_data));
900			segs++;
901			if (segs > 63) {
902				return 1;
903			}
904			/* XXX XXX XXX */
905		} else {
906			rlen = 0;
907			nlen = m->m_len;
908		}
909
910		*(gbuf + segs) = octeon_eth_send_makecmd_w1(nlen,
911		    kvtophys((vaddr_t)(m->m_data + rlen)));
912		segs++;
913		if (segs > 63) {
914			return 1;
915		}
916	}
917
918	OCTEON_ETH_KASSERT(m == NULL);
919
920	*rsegs = segs;
921
922	return 0;
923}
924
925static inline int
926octeon_eth_send_makecmd(struct octeon_eth_softc *sc, struct mbuf *m,
927    uint64_t *gbuf, uint64_t *rpko_cmd_w0, uint64_t *rpko_cmd_w1)
928{
929	uint64_t pko_cmd_w0, pko_cmd_w1;
930	int segs;
931	int result = 0;
932
933	if (octeon_eth_send_makecmd_gbuf(sc, m, gbuf, &segs)) {
934		log(LOG_WARNING, "%s: there are a lot of number of segments"
935		    " of transmission data", device_xname(sc->sc_dev));
936		result = 1;
937		goto done;
938	}
939
940	/*
941	 * segs == 1	-> link mode (single continuous buffer)
942	 *		   WORD1[size] is number of bytes pointed by segment
943	 *
944	 * segs > 1	-> gather mode (scatter-gather buffer)
945	 *		   WORD1[size] is number of segments
946	 */
947	pko_cmd_w0 = octeon_eth_send_makecmd_w0(sc->sc_fau_done.fd_regno,
948	    0, m->m_pkthdr.len, segs);
949	if (segs == 1) {
950		pko_cmd_w1 = octeon_eth_send_makecmd_w1(
951		    m->m_pkthdr.len, kvtophys((vaddr_t)m->m_data));
952	} else {
953#ifdef __mips_n32
954		KASSERT(MIPS_KSEG0_P(gbuf));
955		pko_cmd_w1 = octeon_eth_send_makecmd_w1(segs,
956		    MIPS_KSEG0_TO_PHYS(gbuf));
957#else
958		pko_cmd_w1 = octeon_eth_send_makecmd_w1(segs,
959		    MIPS_XKPHYS_TO_PHYS(gbuf));
960#endif
961	}
962
963	*rpko_cmd_w0 = pko_cmd_w0;
964	*rpko_cmd_w1 = pko_cmd_w1;
965
966done:
967	return result;
968}
969
970static inline int
971octeon_eth_send_cmd(struct octeon_eth_softc *sc, uint64_t pko_cmd_w0,
972    uint64_t pko_cmd_w1)
973{
974	uint64_t *cmdptr;
975	int result = 0;
976
977#ifdef __mips_n32
978	KASSERT((sc->sc_cmdptr.cmdptr & ~MIPS_PHYS_MASK) == 0);
979	cmdptr = (uint64_t *)MIPS_PHYS_TO_KSEG0(sc->sc_cmdptr.cmdptr);
980#else
981	cmdptr = (uint64_t *)MIPS_PHYS_TO_XKPHYS_CACHED(sc->sc_cmdptr.cmdptr);
982#endif
983	cmdptr += sc->sc_cmdptr.cmdptr_idx;
984
985	OCTEON_ETH_KASSERT(cmdptr != NULL);
986
987	*cmdptr++ = pko_cmd_w0;
988	*cmdptr++ = pko_cmd_w1;
989
990	OCTEON_ETH_KASSERT(sc->sc_cmdptr.cmdptr_idx + 2 <= FPA_COMMAND_BUFFER_POOL_NWORDS - 1);
991
992	if (sc->sc_cmdptr.cmdptr_idx + 2 == FPA_COMMAND_BUFFER_POOL_NWORDS - 1) {
993		paddr_t buf;
994
995		buf = octeon_fpa_buf_get_paddr(octeon_eth_fb_cmd);
996		if (buf == 0) {
997			log(LOG_WARNING,
998			    "%s: can not allocate command buffer from free pool allocator\n",
999			    device_xname(sc->sc_dev));
1000			result = 1;
1001			goto done;
1002		}
1003		OCTEON_EVCNT_INC(sc, txbufcbget);
1004		*cmdptr++ = buf;
1005		sc->sc_cmdptr.cmdptr = (uint64_t)buf;
1006		sc->sc_cmdptr.cmdptr_idx = 0;
1007	} else {
1008		sc->sc_cmdptr.cmdptr_idx += 2;
1009	}
1010
1011	octeon_pko_op_doorbell_write(sc->sc_port, sc->sc_port, 2);
1012
1013done:
1014	return result;
1015}
1016
1017static inline int
1018octeon_eth_send_buf(struct octeon_eth_softc *sc, struct mbuf *m,
1019    uint64_t *gbuf)
1020{
1021	int result = 0, error;
1022	uint64_t pko_cmd_w0, pko_cmd_w1;
1023
1024	error = octeon_eth_send_makecmd(sc, m, gbuf, &pko_cmd_w0, &pko_cmd_w1);
1025	if (error != 0) {
1026		/* already logging */
1027		OCTEON_EVCNT_INC(sc, txerrmkcmd);
1028		result = error;
1029		goto done;
1030	}
1031
1032	error = octeon_eth_send_cmd(sc, pko_cmd_w0, pko_cmd_w1);
1033	if (error != 0) {
1034		/* already logging */
1035		OCTEON_EVCNT_INC(sc, txerrcmd);
1036		result = error;
1037	}
1038
1039done:
1040	return result;
1041}
1042
1043static inline int
1044octeon_eth_send(struct octeon_eth_softc *sc, struct mbuf *m)
1045{
1046	paddr_t gaddr = 0;
1047	uint64_t *gbuf = NULL;
1048	int result = 0, error;
1049
1050	OCTEON_EVCNT_INC(sc, tx);
1051
1052	gaddr = octeon_fpa_buf_get_paddr(octeon_eth_fb_sg);
1053	if (gaddr == 0) {
1054		log(LOG_WARNING,
1055		    "%s: can not allocate gather buffer from free pool allocator\n",
1056		    device_xname(sc->sc_dev));
1057		OCTEON_EVCNT_INC(sc, txerrgbuf);
1058		result = 1;
1059		goto done;
1060	}
1061	OCTEON_EVCNT_INC(sc, txbufgbget);
1062
1063#ifdef __mips_n32
1064	KASSERT((gaddr & ~MIPS_PHYS_MASK) == 0);
1065	gbuf = (uint64_t *)(uintptr_t)MIPS_PHYS_TO_KSEG0(gaddr);
1066#else
1067	gbuf = (uint64_t *)(uintptr_t)MIPS_PHYS_TO_XKPHYS_CACHED(gaddr);
1068#endif
1069
1070	OCTEON_ETH_KASSERT(gbuf != NULL);
1071
1072	error = octeon_eth_send_buf(sc, m, gbuf);
1073	if (error != 0) {
1074		/* already logging */
1075		octeon_fpa_buf_put_paddr(octeon_eth_fb_sg, gaddr);
1076		OCTEON_EVCNT_INC(sc, txbufgbput);
1077		result = error;
1078		goto done;
1079	}
1080
1081	octeon_eth_send_queue_add(sc, m, gbuf);
1082
1083done:
1084	return result;
1085}
1086
1087static void
1088octeon_eth_start(struct ifnet *ifp)
1089{
1090	struct octeon_eth_softc *sc = ifp->if_softc;
1091	struct mbuf *m;
1092
1093	/*
1094	 * performance tuning
1095	 * presend iobdma request
1096	 */
1097	octeon_eth_send_queue_flush_prefetch(sc);
1098
1099	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1100		goto last;
1101
1102	/* XXX assume that OCTEON doesn't buffer packets */
1103	if (__predict_false(!octeon_gmx_link_status(sc->sc_gmx_port))) {
1104		/* dequeue and drop them */
1105		while (1) {
1106			IFQ_DEQUEUE(&ifp->if_snd, m);
1107			if (m == NULL)
1108				break;
1109
1110			m_freem(m);
1111			IF_DROP(&ifp->if_snd);
1112			OCTEON_EVCNT_INC(sc, txerrlink);
1113		}
1114		goto last;
1115	}
1116
1117	for (;;) {
1118		IFQ_POLL(&ifp->if_snd, m);
1119		if (__predict_false(m == NULL))
1120			break;
1121
1122		/* XXX XXX XXX */
1123		octeon_eth_send_queue_flush_fetch(sc);
1124
1125		/*
1126		 * If no free send buffer is available, free all the sent buffer
1127		 * and bail out.
1128		 */
1129		if (octeon_eth_send_queue_is_full(sc)) {
1130			return;
1131		}
1132		/* XXX XXX XXX */
1133
1134		IFQ_DEQUEUE(&ifp->if_snd, m);
1135
1136		bpf_mtap(ifp, m);
1137
1138		/* XXX XXX XXX */
1139		if (sc->sc_soft_req_cnt > sc->sc_soft_req_thresh)
1140			octeon_eth_send_queue_flush(sc);
1141		if (octeon_eth_send(sc, m)) {
1142			IF_DROP(&ifp->if_snd);
1143			m_freem(m);
1144			log(LOG_WARNING,
1145		  	  "%s: failed in the transmission of the packet\n",
1146		    	  device_xname(sc->sc_dev));
1147			OCTEON_EVCNT_INC(sc, txerr);
1148		} else {
1149			sc->sc_soft_req_cnt++;
1150		}
1151		if (sc->sc_flush)
1152			octeon_eth_send_queue_flush_sync(sc);
1153		/* XXX XXX XXX */
1154
1155		/*
1156		 * send next iobdma request
1157		 */
1158		octeon_eth_send_queue_flush_prefetch(sc);
1159	}
1160
1161/*
1162 * Don't schedule send-buffer-free callout every time - those buffers are freed
1163 * by "free tick".  This makes some packets like NFS slower.
1164 */
1165#ifdef OCTEON_ETH_USENFS
1166	if (__predict_false(sc->sc_ext_callback_cnt > 0)) {
1167		int timo;
1168
1169		/* ??? */
1170		timo = hz - (100 * sc->sc_ext_callback_cnt);
1171		if (timo < 10)
1172			timo = 10;
1173		callout_schedule(&sc->sc_tick_free_ch, timo);
1174	}
1175#endif
1176
1177last:
1178	octeon_eth_send_queue_flush_fetch(sc);
1179}
1180
1181static void
1182octeon_eth_watchdog(struct ifnet *ifp)
1183{
1184	struct octeon_eth_softc *sc = ifp->if_softc;
1185
1186	printf("%s: device timeout\n", device_xname(sc->sc_dev));
1187
1188	octeon_eth_configure(sc);
1189
1190	SET(ifp->if_flags, IFF_RUNNING);
1191	CLR(ifp->if_flags, IFF_OACTIVE);
1192	ifp->if_timer = 0;
1193
1194	octeon_eth_start(ifp);
1195}
1196
1197static int
1198octeon_eth_init(struct ifnet *ifp)
1199{
1200	struct octeon_eth_softc *sc = ifp->if_softc;
1201
1202	/* XXX don't disable commonly used parts!!! XXX */
1203	if (sc->sc_init_flag == 0) {
1204		/* Cancel any pending I/O. */
1205		octeon_eth_stop(ifp, 0);
1206
1207		/* Initialize the device */
1208		octeon_eth_configure(sc);
1209
1210		octeon_pko_enable(sc->sc_pko);
1211		octeon_ipd_enable(sc->sc_ipd);
1212
1213		sc->sc_init_flag = 1;
1214	} else {
1215		octeon_gmx_port_enable(sc->sc_gmx_port, 1);
1216	}
1217	octeon_eth_mediachange(ifp);
1218
1219	octeon_gmx_set_filter(sc->sc_gmx_port);
1220
1221	callout_reset(&sc->sc_tick_misc_ch, hz, octeon_eth_tick_misc, sc);
1222	callout_reset(&sc->sc_tick_free_ch, hz, octeon_eth_tick_free, sc);
1223
1224	SET(ifp->if_flags, IFF_RUNNING);
1225	CLR(ifp->if_flags, IFF_OACTIVE);
1226
1227	return 0;
1228}
1229
1230static void
1231octeon_eth_stop(struct ifnet *ifp, int disable)
1232{
1233	struct octeon_eth_softc *sc = ifp->if_softc;
1234
1235	callout_stop(&sc->sc_tick_misc_ch);
1236	callout_stop(&sc->sc_tick_free_ch);
1237
1238	mii_down(&sc->sc_mii);
1239
1240	octeon_gmx_port_enable(sc->sc_gmx_port, 0);
1241
1242	/* Mark the interface as down and cancel the watchdog timer. */
1243	CLR(ifp->if_flags, IFF_RUNNING | IFF_OACTIVE);
1244	ifp->if_timer = 0;
1245}
1246
1247/* ---- misc */
1248
1249#define PKO_INDEX_MASK	((1ULL << 12/* XXX */) - 1)
1250
1251static int
1252octeon_eth_reset(struct octeon_eth_softc *sc)
1253{
1254	octeon_gmx_reset_speed(sc->sc_gmx_port);
1255	octeon_gmx_reset_flowctl(sc->sc_gmx_port);
1256	octeon_gmx_reset_timing(sc->sc_gmx_port);
1257
1258	return 0;
1259}
1260
1261static int
1262octeon_eth_configure(struct octeon_eth_softc *sc)
1263{
1264	octeon_gmx_port_enable(sc->sc_gmx_port, 0);
1265
1266	octeon_eth_reset(sc);
1267
1268	octeon_eth_configure_common(sc);
1269
1270	octeon_pko_port_config(sc->sc_pko);
1271	octeon_pko_port_enable(sc->sc_pko, 1);
1272	octeon_pip_port_config(sc->sc_pip);
1273
1274	octeon_gmx_tx_stats_rd_clr(sc->sc_gmx_port, 1);
1275	octeon_gmx_rx_stats_rd_clr(sc->sc_gmx_port, 1);
1276
1277	octeon_gmx_port_enable(sc->sc_gmx_port, 1);
1278
1279	return 0;
1280}
1281
1282static int
1283octeon_eth_configure_common(struct octeon_eth_softc *sc)
1284{
1285	static int once;
1286
1287	if (once == 1)
1288		return 0;
1289	once = 1;
1290
1291	octeon_ipd_config(sc->sc_ipd);
1292#ifdef OCTEON_ETH_IPD_RED
1293	octeon_ipd_red(sc->sc_ipd, RECV_QUEUE_SIZE >> 2, RECV_QUEUE_SIZE >> 3);
1294#endif
1295	octeon_pko_config(sc->sc_pko);
1296
1297	octeon_pow_config(sc->sc_pow, OCTEON_POW_GROUP_PIP);
1298
1299	return 0;
1300}
1301
1302/* ---- receive (input) */
1303
1304static inline int
1305octeon_eth_recv_mbuf(struct octeon_eth_softc *sc, uint64_t *work,
1306    struct mbuf **rm)
1307{
1308	struct mbuf *m;
1309	void (*ext_free)(struct mbuf *, void *, size_t, void *);
1310	void *ext_buf;
1311	size_t ext_size;
1312	void *data;
1313	uint64_t word1 = work[1];
1314	uint64_t word2 = work[2];
1315	uint64_t word3 = work[3];
1316
1317	MGETHDR(m, M_NOWAIT, MT_DATA);
1318	if (m == NULL)
1319		return 1;
1320	OCTEON_ETH_KASSERT(m != NULL);
1321
1322	if ((word2 & PIP_WQE_WORD2_IP_BUFS) == 0) {
1323		/* Dynamic short */
1324		ext_free = octeon_eth_buf_ext_free_m;
1325		ext_buf = &work[4];
1326		ext_size = 96;
1327
1328		data = &work[4 + sc->sc_ip_offset / sizeof(uint64_t)];
1329	} else {
1330		vaddr_t addr;
1331		vaddr_t start_buffer;
1332
1333#ifdef __mips_n32
1334		KASSERT((word3 & ~MIPS_PHYS_MASK) == 0);
1335		addr = MIPS_PHYS_TO_KSEG0(word3 & PIP_WQE_WORD3_ADDR);
1336#else
1337		addr = MIPS_PHYS_TO_XKPHYS_CACHED(word3 & PIP_WQE_WORD3_ADDR);
1338#endif
1339		start_buffer = addr & ~(2048 - 1);
1340
1341		ext_free = octeon_eth_buf_ext_free_ext;
1342		ext_buf = (void *)start_buffer;
1343		ext_size = 2048;
1344
1345		data = (void *)addr;
1346	}
1347
1348	/* embed sc pointer into work[0] for _ext_free evcnt */
1349	work[0] = (uintptr_t)sc;
1350
1351	MEXTADD(m, ext_buf, ext_size, 0, ext_free, work);
1352	OCTEON_ETH_KASSERT(ISSET(m->m_flags, M_EXT));
1353
1354	m->m_data = data;
1355	m->m_len = m->m_pkthdr.len = (word1 & PIP_WQE_WORD1_LEN) >> 48;
1356	m_set_rcvif(m, &sc->sc_ethercom.ec_if);
1357	/*
1358	 * not readonly buffer
1359	 */
1360	m->m_flags |= M_EXT_RW;
1361
1362	*rm = m;
1363
1364	OCTEON_ETH_KASSERT(*rm != NULL);
1365
1366	return 0;
1367}
1368
1369static inline int
1370octeon_eth_recv_check_code(struct octeon_eth_softc *sc, uint64_t word2)
1371{
1372	uint64_t opecode = word2 & PIP_WQE_WORD2_NOIP_OPECODE;
1373
1374	if (__predict_true(!ISSET(word2, PIP_WQE_WORD2_NOIP_RE)))
1375		return 0;
1376
1377	/* this error is harmless */
1378	if (opecode == PIP_OVER_ERR)
1379		return 0;
1380
1381	return 1;
1382}
1383
1384static inline int
1385octeon_eth_recv_check_jumbo(struct octeon_eth_softc *sc, uint64_t word2)
1386{
1387	if (__predict_false((word2 & PIP_WQE_WORD2_IP_BUFS) > (1ULL << 56)))
1388		return 1;
1389	return 0;
1390}
1391
1392static inline int
1393octeon_eth_recv_check_link(struct octeon_eth_softc *sc, uint64_t word2)
1394{
1395	if (__predict_false(!octeon_gmx_link_status(sc->sc_gmx_port)))
1396		return 1;
1397	return 0;
1398}
1399
1400static inline int
1401octeon_eth_recv_check(struct octeon_eth_softc *sc, uint64_t word2)
1402{
1403	if (__predict_false(octeon_eth_recv_check_link(sc, word2)) != 0) {
1404		if (ratecheck(&sc->sc_rate_recv_check_link_last,
1405		    &sc->sc_rate_recv_check_link_cap))
1406			log(LOG_DEBUG,
1407			    "%s: link is not up, the packet was dropped\n",
1408			    device_xname(sc->sc_dev));
1409		OCTEON_EVCNT_INC(sc, rxerrlink);
1410		return 1;
1411	}
1412
1413#if 0 /* XXX Performance tunig (Jumbo-frame is not supported yet!) */
1414	if (__predict_false(octeon_eth_recv_check_jumbo(sc, word2)) != 0) {
1415		/* XXX jumbo frame */
1416		if (ratecheck(&sc->sc_rate_recv_check_jumbo_last,
1417		    &sc->sc_rate_recv_check_jumbo_cap))
1418			log(LOG_DEBUG,
1419			    "jumbo frame was received\n");
1420		OCTEON_EVCNT_INC(sc, rxerrjmb);
1421		return 1;
1422	}
1423#endif
1424
1425	if (__predict_false(octeon_eth_recv_check_code(sc, word2)) != 0) {
1426
1427		if ((word2 & PIP_WQE_WORD2_NOIP_OPECODE) ==
1428				PIP_WQE_WORD2_RE_OPCODE_LENGTH) {
1429			/* no logging */
1430			/* XXX inclement special error count */
1431		} else if ((word2 & PIP_WQE_WORD2_NOIP_OPECODE) ==
1432				PIP_WQE_WORD2_RE_OPCODE_PARTIAL) {
1433			/* not an erorr. it's because of overload */
1434		} else {
1435
1436			if (ratecheck(&sc->sc_rate_recv_check_code_last,
1437			    &sc->sc_rate_recv_check_code_cap))
1438				log(LOG_WARNING,
1439				    "%s: reception error, packet dropped "
1440				    "(error code = %" PRId64 ")\n",
1441				    device_xname(sc->sc_dev), word2 & PIP_WQE_WORD2_NOIP_OPECODE);
1442		}
1443		OCTEON_EVCNT_INC(sc, rxerrcode);
1444		return 1;
1445	}
1446
1447	return 0;
1448}
1449
1450static inline int
1451octeon_eth_recv(struct octeon_eth_softc *sc, uint64_t *work)
1452{
1453	int result = 0;
1454	struct ifnet *ifp;
1455	struct mbuf *m;
1456	uint64_t word2;
1457
1458	/* XXX XXX XXX */
1459	/*
1460 	 * performance tuning
1461	 * presend iobdma request
1462	 */
1463	if (sc->sc_soft_req_cnt > sc->sc_soft_req_thresh) {
1464		octeon_eth_send_queue_flush_prefetch(sc);
1465	}
1466	/* XXX XXX XXX */
1467
1468	OCTEON_ETH_KASSERT(sc != NULL);
1469	OCTEON_ETH_KASSERT(work != NULL);
1470
1471	OCTEON_EVCNT_INC(sc, rx);
1472
1473	word2 = work[2];
1474	ifp = &sc->sc_ethercom.ec_if;
1475
1476	OCTEON_ETH_KASSERT(ifp != NULL);
1477
1478	if (__predict_false(octeon_eth_recv_check(sc, word2) != 0)) {
1479		ifp->if_ierrors++;
1480		result = 1;
1481		octeon_eth_buf_free_work(sc, work, word2);
1482		goto drop;
1483	}
1484
1485	if (__predict_false(octeon_eth_recv_mbuf(sc, work, &m) != 0)) {
1486		ifp->if_ierrors++;
1487		result = 1;
1488		octeon_eth_buf_free_work(sc, work, word2);
1489		goto drop;
1490	}
1491
1492	/* work[0] .. work[3] may not be valid any more */
1493
1494	OCTEON_ETH_KASSERT(m != NULL);
1495
1496	octeon_ipd_offload(word2, m->m_data, &m->m_pkthdr.csum_flags);
1497
1498	/* XXX XXX XXX */
1499	if (sc->sc_soft_req_cnt > sc->sc_soft_req_thresh) {
1500		octeon_eth_send_queue_flush_fetch(sc);
1501		octeon_eth_send_queue_flush(sc);
1502	}
1503
1504	/* XXX XXX XXX */
1505	if (sc->sc_flush)
1506		octeon_eth_send_queue_flush_sync(sc);
1507	/* XXX XXX XXX */
1508
1509	if_percpuq_enqueue(ifp->if_percpuq, m);
1510
1511	return 0;
1512
1513drop:
1514	/* XXX XXX XXX */
1515	if (sc->sc_soft_req_cnt > sc->sc_soft_req_thresh) {
1516		octeon_eth_send_queue_flush_fetch(sc);
1517	}
1518	/* XXX XXX XXX */
1519
1520	return result;
1521}
1522
1523static void
1524octeon_eth_recv_redir(struct ifnet *ifp, struct mbuf *m)
1525{
1526	struct octeon_eth_softc *rsc = ifp->if_softc;
1527	struct octeon_eth_softc *sc = NULL;
1528	int i;
1529
1530	for (i = 0; i < 3 /* XXX */; i++) {
1531		if (rsc->sc_redir & (1 << i))
1532			sc = octeon_eth_gsc[i];
1533	}
1534
1535	if (sc == NULL) {
1536		m_freem(m);
1537		return;
1538	}
1539	octeon_eth_send_queue_flush_prefetch(sc);
1540
1541	octeon_eth_send_queue_flush_fetch(sc);
1542
1543	if (octeon_eth_send_queue_is_full(sc)) {
1544		m_freem(m);
1545		return;
1546	}
1547	if (sc->sc_soft_req_cnt > sc->sc_soft_req_thresh)
1548		octeon_eth_send_queue_flush(sc);
1549
1550	if (octeon_eth_send(sc, m)) {
1551		IF_DROP(&ifp->if_snd);
1552		m_freem(m);
1553	} else {
1554		sc->sc_soft_req_cnt++;
1555	}
1556
1557	if (sc->sc_flush)
1558		octeon_eth_send_queue_flush_sync(sc);
1559}
1560
1561static inline void
1562octeon_eth_recv_intr(void *data, uint64_t *work)
1563{
1564	struct octeon_eth_softc *sc;
1565	int port;
1566
1567	OCTEON_ETH_KASSERT(work != NULL);
1568
1569	port = (work[1] & PIP_WQE_WORD1_IPRT) >> 42;
1570
1571	OCTEON_ETH_KASSERT(port < GMX_PORT_NUNITS);
1572
1573	sc = octeon_eth_gsc[port];
1574
1575	OCTEON_ETH_KASSERT(sc != NULL);
1576	OCTEON_ETH_KASSERT(port == sc->sc_port);
1577
1578	/* XXX process all work queue entries anyway */
1579
1580	(void)octeon_eth_recv(sc, work);
1581}
1582
1583/* ---- tick */
1584
1585/*
1586 * octeon_eth_tick_free
1587 *
1588 * => garbage collect send gather buffer / mbuf
1589 * => called at softclock
1590 */
1591static void
1592octeon_eth_tick_free(void *arg)
1593{
1594	struct octeon_eth_softc *sc = arg;
1595	int timo;
1596	int s;
1597
1598	s = splnet();
1599	/* XXX XXX XXX */
1600	if (sc->sc_soft_req_cnt > 0) {
1601		octeon_eth_send_queue_flush_prefetch(sc);
1602		octeon_eth_send_queue_flush_fetch(sc);
1603		octeon_eth_send_queue_flush(sc);
1604		octeon_eth_send_queue_flush_sync(sc);
1605	}
1606	/* XXX XXX XXX */
1607
1608	/* XXX XXX XXX */
1609	/* ??? */
1610	timo = hz - (100 * sc->sc_ext_callback_cnt);
1611	if (timo < 10)
1612		 timo = 10;
1613	callout_schedule(&sc->sc_tick_free_ch, timo);
1614	/* XXX XXX XXX */
1615	splx(s);
1616}
1617
1618/*
1619 * octeon_eth_tick_misc
1620 *
1621 * => collect statistics
1622 * => check link status
1623 * => called at softclock
1624 */
1625static void
1626octeon_eth_tick_misc(void *arg)
1627{
1628	struct octeon_eth_softc *sc = arg;
1629	struct ifnet *ifp;
1630	int s;
1631
1632	s = splnet();
1633
1634	ifp = &sc->sc_ethercom.ec_if;
1635
1636	octeon_gmx_stats(sc->sc_gmx_port);
1637	octeon_pip_stats(sc->sc_pip, ifp, sc->sc_port);
1638	mii_tick(&sc->sc_mii);
1639
1640	splx(s);
1641
1642	callout_schedule(&sc->sc_tick_misc_ch, hz);
1643}
1644
1645/* ---- odd nibble preamble workaround (software CRC processing) */
1646
1647/* ---- sysctl */
1648
1649static int	octeon_eth_sysctl_verify(SYSCTLFN_ARGS);
1650static int	octeon_eth_sysctl_pool(SYSCTLFN_ARGS);
1651static int	octeon_eth_sysctl_rd(SYSCTLFN_ARGS);
1652
1653static int	octeon_eth_sysctl_pkocmdw0n2_num;
1654static int	octeon_eth_sysctl_pipdynrs_num;
1655static int	octeon_eth_sysctl_redir_num;
1656static int	octeon_eth_sysctl_pkt_pool_num;
1657static int	octeon_eth_sysctl_wqe_pool_num;
1658static int	octeon_eth_sysctl_cmd_pool_num;
1659static int	octeon_eth_sysctl_sg_pool_num;
1660static int	octeon_eth_sysctl_pktbuf_num;
1661
1662/*
1663 * Set up sysctl(3) MIB, hw.cnmac.*.
1664 */
1665SYSCTL_SETUP(sysctl_octeon_eth, "sysctl cnmac subtree setup")
1666{
1667	int rc;
1668	int octeon_eth_sysctl_root_num;
1669	const struct sysctlnode *node;
1670
1671	if ((rc = sysctl_createv(clog, 0, NULL, NULL,
1672	    0, CTLTYPE_NODE, "hw", NULL,
1673	    NULL, 0, NULL, 0, CTL_HW, CTL_EOL)) != 0) {
1674		goto err;
1675	}
1676
1677	if ((rc = sysctl_createv(clog, 0, NULL, &node,
1678	    0, CTLTYPE_NODE, "cnmac",
1679	    SYSCTL_DESCR("cnmac interface controls"),
1680	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) {
1681		goto err;
1682	}
1683
1684	octeon_eth_sysctl_root_num = node->sysctl_num;
1685
1686	if ((rc = sysctl_createv(clog, 0, NULL, &node,
1687	    CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
1688	    CTLTYPE_INT, "pko_cmd_w0_n2",
1689	    SYSCTL_DESCR("PKO command WORD0 N2 bit"),
1690	    octeon_eth_sysctl_verify, 0,
1691	    &octeon_eth_param_pko_cmd_w0_n2,
1692	    0, CTL_HW, octeon_eth_sysctl_root_num, CTL_CREATE,
1693	    CTL_EOL)) != 0) {
1694	    goto err;
1695	}
1696
1697	octeon_eth_sysctl_pkocmdw0n2_num = node->sysctl_num;
1698
1699	if ((rc = sysctl_createv(clog, 0, NULL, &node,
1700	    CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
1701	    CTLTYPE_INT, "pip_dyn_rs",
1702	    SYSCTL_DESCR("PIP dynamic short in WQE"),
1703	    octeon_eth_sysctl_verify, 0,
1704	    &octeon_eth_param_pip_dyn_rs,
1705	    0, CTL_HW, octeon_eth_sysctl_root_num, CTL_CREATE,
1706	    CTL_EOL)) != 0) {
1707	    goto err;
1708	}
1709
1710	octeon_eth_sysctl_pipdynrs_num = node->sysctl_num;
1711
1712	if ((rc = sysctl_createv(clog, 0, NULL, &node,
1713	    CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
1714	    CTLTYPE_INT, "redir",
1715	    SYSCTL_DESCR("input port redirection"),
1716	    octeon_eth_sysctl_verify, 0,
1717	    &octeon_eth_param_redir,
1718	    0, CTL_HW, octeon_eth_sysctl_root_num, CTL_CREATE,
1719	    CTL_EOL)) != 0) {
1720	    goto err;
1721	}
1722
1723	octeon_eth_sysctl_redir_num = node->sysctl_num;
1724
1725	if ((rc = sysctl_createv(clog, 0, NULL, &node,
1726	    CTLFLAG_PERMANENT,
1727	    CTLTYPE_INT, "pkt_pool",
1728	    SYSCTL_DESCR("packet pool available"),
1729	    octeon_eth_sysctl_pool, 0, NULL,
1730	    0, CTL_HW, octeon_eth_sysctl_root_num, CTL_CREATE,
1731	    CTL_EOL)) != 0) {
1732	    goto err;
1733	}
1734
1735	octeon_eth_sysctl_pkt_pool_num = node->sysctl_num;
1736
1737	if ((rc = sysctl_createv(clog, 0, NULL, &node,
1738	    CTLFLAG_PERMANENT,
1739	    CTLTYPE_INT, "wqe_pool",
1740	    SYSCTL_DESCR("wqe pool available"),
1741	    octeon_eth_sysctl_pool, 0, NULL,
1742	    0, CTL_HW, octeon_eth_sysctl_root_num, CTL_CREATE,
1743	    CTL_EOL)) != 0) {
1744	    goto err;
1745	}
1746
1747	octeon_eth_sysctl_wqe_pool_num = node->sysctl_num;
1748
1749	if ((rc = sysctl_createv(clog, 0, NULL, &node,
1750	    CTLFLAG_PERMANENT,
1751	    CTLTYPE_INT, "cmd_pool",
1752	    SYSCTL_DESCR("cmd pool available"),
1753	    octeon_eth_sysctl_pool, 0, NULL,
1754	    0, CTL_HW, octeon_eth_sysctl_root_num, CTL_CREATE,
1755	    CTL_EOL)) != 0) {
1756	    goto err;
1757	}
1758
1759	octeon_eth_sysctl_cmd_pool_num = node->sysctl_num;
1760
1761	if ((rc = sysctl_createv(clog, 0, NULL, &node,
1762	    CTLFLAG_PERMANENT,
1763	    CTLTYPE_INT, "sg_pool",
1764	    SYSCTL_DESCR("sg pool available"),
1765	    octeon_eth_sysctl_pool, 0, NULL,
1766	    0, CTL_HW, octeon_eth_sysctl_root_num, CTL_CREATE,
1767	    CTL_EOL)) != 0) {
1768	    goto err;
1769	}
1770
1771	octeon_eth_sysctl_sg_pool_num = node->sysctl_num;
1772
1773	if ((rc = sysctl_createv(clog, 0, NULL, &node,
1774	    CTLFLAG_PERMANENT | CTLFLAG_READONLY,
1775	    CTLTYPE_INT, "pktbuf",
1776	    SYSCTL_DESCR("input packet buffer size on POW"),
1777	    octeon_eth_sysctl_rd, 0,
1778	    &octeon_eth_param_pktbuf,
1779	    0, CTL_HW, octeon_eth_sysctl_root_num, CTL_CREATE,
1780	    CTL_EOL)) != 0) {
1781	    goto err;
1782	}
1783
1784	octeon_eth_sysctl_pktbuf_num = node->sysctl_num;
1785
1786	return;
1787
1788err:
1789	aprint_error("%s: syctl_createv failed (rc = %d)\n", __func__, rc);
1790}
1791
1792static int
1793octeon_eth_sysctl_verify(SYSCTLFN_ARGS)
1794{
1795	int error, v;
1796	struct sysctlnode node;
1797	struct octeon_eth_softc *sc;
1798	int i;
1799	int s;
1800
1801	node = *rnode;
1802	v = *(int *)rnode->sysctl_data;
1803	node.sysctl_data = &v;
1804	error = sysctl_lookup(SYSCTLFN_CALL(&node));
1805	if (error || newp == NULL)
1806		return error;
1807
1808	if (node.sysctl_num == octeon_eth_sysctl_pkocmdw0n2_num) {
1809		if (v < 0 || v > 1)
1810			return EINVAL;
1811		*(int *)rnode->sysctl_data = v;
1812		return 0;
1813	}
1814
1815	if (node.sysctl_num == octeon_eth_sysctl_pipdynrs_num) {
1816		if (v < 0 || v > 1)
1817			return EINVAL;
1818		*(int *)rnode->sysctl_data = v;
1819		s = splnet();
1820		for (i = 0; i < 3/* XXX */; i++) {
1821			sc = octeon_eth_gsc[i];	/* XXX */
1822			octeon_pip_prt_cfg_enable(sc->sc_pip, PIP_PRT_CFGN_DYN_RS, v);
1823		}
1824		splx(s);
1825		return 0;
1826	}
1827
1828	if (node.sysctl_num == octeon_eth_sysctl_redir_num) {
1829		if (v & ~((0x7 << (4 * 0)) | (0x7 << (4 * 1)) | (0x7 << (4 * 2))))
1830			return EINVAL;
1831		*(int *)rnode->sysctl_data = v;
1832		s = splnet();
1833		for (i = 0; i < 3/* XXX */; i++) {
1834			struct ifnet *ifp;
1835
1836			sc = octeon_eth_gsc[i];	/* XXX */
1837			ifp = &sc->sc_ethercom.ec_if;
1838
1839			sc->sc_redir = (octeon_eth_param_redir >> (4 * i)) & 0x7;
1840			if (sc->sc_redir == 0) {
1841				if (ISSET(ifp->if_flags, IFF_PROMISC)) {
1842					CLR(ifp->if_flags, IFF_PROMISC);
1843					octeon_eth_mii_statchg(ifp);
1844					/* octeon_gmx_set_filter(sc->sc_gmx_port); */
1845				}
1846				ifp->_if_input = ether_input;
1847			}
1848			else {
1849				if (!ISSET(ifp->if_flags, IFF_PROMISC)) {
1850					SET(ifp->if_flags, IFF_PROMISC);
1851					octeon_eth_mii_statchg(ifp);
1852					/* octeon_gmx_set_filter(sc->sc_gmx_port); */
1853				}
1854				ifp->_if_input = octeon_eth_recv_redir;
1855			}
1856		}
1857		splx(s);
1858		return 0;
1859	}
1860
1861	return EINVAL;
1862}
1863
1864static int
1865octeon_eth_sysctl_pool(SYSCTLFN_ARGS)
1866{
1867	int error, newval = 0;
1868	struct sysctlnode node;
1869	int s;
1870
1871	node = *rnode;
1872	node.sysctl_data = &newval;
1873	s = splnet();
1874	if (node.sysctl_num == octeon_eth_sysctl_pkt_pool_num) {
1875		error = octeon_fpa_available_fpa_pool(&newval, OCTEON_POOL_NO_PKT);
1876	} else if (node.sysctl_num == octeon_eth_sysctl_wqe_pool_num) {
1877		error = octeon_fpa_available_fpa_pool(&newval, OCTEON_POOL_NO_WQE);
1878	} else if (node.sysctl_num == octeon_eth_sysctl_cmd_pool_num) {
1879		error = octeon_fpa_available_fpa_pool(&newval, OCTEON_POOL_NO_CMD);
1880	} else if (node.sysctl_num == octeon_eth_sysctl_sg_pool_num) {
1881		error = octeon_fpa_available_fpa_pool(&newval, OCTEON_POOL_NO_SG);
1882	} else {
1883		splx(s);
1884		return EINVAL;
1885	}
1886	splx(s);
1887	if (error)
1888		return error;
1889	error = sysctl_lookup(SYSCTLFN_CALL(&node));
1890	if (error || newp == NULL)
1891		return error;
1892
1893	return 0;
1894}
1895
1896static int
1897octeon_eth_sysctl_rd(SYSCTLFN_ARGS)
1898{
1899	int error, v;
1900	struct sysctlnode node;
1901	int s;
1902
1903	node = *rnode;
1904	v = *(int *)rnode->sysctl_data;
1905	node.sysctl_data = &v;
1906	error = sysctl_lookup(SYSCTLFN_CALL(&node));
1907	if (error || newp != NULL)
1908		return error;
1909
1910	if (node.sysctl_num == octeon_eth_sysctl_pktbuf_num) {
1911		uint64_t tmp;
1912		int n;
1913
1914		s = splnet();
1915		tmp = octeon_fpa_query(0);
1916		n = (int)tmp;
1917		splx(s);
1918		*(int *)rnode->sysctl_data = n;
1919		octeon_eth_param_pktbuf = n;
1920		*(int *)oldp = n;
1921		return 0;
1922	}
1923
1924	return EINVAL;
1925}
1926