1/*
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2016 Alexander Motin <mav@FreeBSD.org>
5 * Copyright (c) 2015 Peter Grehan <grehan@freebsd.org>
6 * Copyright (c) 2013 Jeremiah Lott, Avere Systems
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer
14 *    in this position and unchanged.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD$");
34
35#include <sys/types.h>
36#ifndef WITHOUT_CAPSICUM
37#include <sys/capsicum.h>
38#endif
39#include <sys/limits.h>
40#include <sys/ioctl.h>
41#include <sys/uio.h>
42#include <net/ethernet.h>
43#include <netinet/in.h>
44#include <netinet/tcp.h>
45
46#ifndef WITHOUT_CAPSICUM
47#include <capsicum_helpers.h>
48#endif
49#include <machine/vmm_snapshot.h>
50
51#include <err.h>
52#include <errno.h>
53#include <fcntl.h>
54#include <md5.h>
55#include <stdio.h>
56#include <stdlib.h>
57#include <string.h>
58#include <sysexits.h>
59#include <unistd.h>
60#include <pthread.h>
61#include <pthread_np.h>
62
63#include "e1000_regs.h"
64#include "e1000_defines.h"
65#include "mii.h"
66
67#include "bhyverun.h"
68#include "debug.h"
69#include "pci_emul.h"
70#include "mevent.h"
71#include "net_utils.h"
72#include "net_backends.h"
73
74/* Hardware/register definitions XXX: move some to common code. */
75#define E82545_VENDOR_ID_INTEL			0x8086
76#define E82545_DEV_ID_82545EM_COPPER		0x100F
77#define E82545_SUBDEV_ID			0x1008
78
79#define E82545_REVISION_4			4
80
81#define E82545_MDIC_DATA_MASK			0x0000FFFF
82#define E82545_MDIC_OP_MASK			0x0c000000
83#define E82545_MDIC_IE				0x20000000
84
85#define E82545_EECD_FWE_DIS	0x00000010 /* Flash writes disabled */
86#define E82545_EECD_FWE_EN	0x00000020 /* Flash writes enabled */
87#define E82545_EECD_FWE_MASK	0x00000030 /* Flash writes mask */
88
89#define E82545_BAR_REGISTER			0
90#define E82545_BAR_REGISTER_LEN			(128*1024)
91#define E82545_BAR_FLASH			1
92#define E82545_BAR_FLASH_LEN			(64*1024)
93#define E82545_BAR_IO				2
94#define E82545_BAR_IO_LEN			8
95
96#define E82545_IOADDR				0x00000000
97#define E82545_IODATA				0x00000004
98#define E82545_IO_REGISTER_MAX			0x0001FFFF
99#define E82545_IO_FLASH_BASE			0x00080000
100#define E82545_IO_FLASH_MAX			0x000FFFFF
101
102#define E82545_ARRAY_ENTRY(reg, offset)		(reg + (offset<<2))
103#define E82545_RAR_MAX				15
104#define E82545_MTA_MAX				127
105#define E82545_VFTA_MAX				127
106
107/* Slightly modified from the driver versions, hardcoded for 3 opcode bits,
108 * followed by 6 address bits.
109 * TODO: make opcode bits and addr bits configurable?
110 * NVM Commands - Microwire */
111#define E82545_NVM_OPCODE_BITS	3
112#define E82545_NVM_ADDR_BITS	6
113#define E82545_NVM_DATA_BITS	16
114#define E82545_NVM_OPADDR_BITS	(E82545_NVM_OPCODE_BITS + E82545_NVM_ADDR_BITS)
115#define E82545_NVM_ADDR_MASK	((1 << E82545_NVM_ADDR_BITS)-1)
116#define E82545_NVM_OPCODE_MASK	\
117    (((1 << E82545_NVM_OPCODE_BITS) - 1) << E82545_NVM_ADDR_BITS)
118#define E82545_NVM_OPCODE_READ	(0x6 << E82545_NVM_ADDR_BITS)	/* read */
119#define E82545_NVM_OPCODE_WRITE	(0x5 << E82545_NVM_ADDR_BITS)	/* write */
120#define E82545_NVM_OPCODE_ERASE	(0x7 << E82545_NVM_ADDR_BITS)	/* erase */
121#define	E82545_NVM_OPCODE_EWEN	(0x4 << E82545_NVM_ADDR_BITS)	/* wr-enable */
122
123#define	E82545_NVM_EEPROM_SIZE	64 /* 64 * 16-bit values == 128K */
124
125#define E1000_ICR_SRPD		0x00010000
126
127/* This is an arbitrary number.  There is no hard limit on the chip. */
128#define I82545_MAX_TXSEGS	64
129
130/* Legacy receive descriptor */
131struct e1000_rx_desc {
132	uint64_t buffer_addr;	/* Address of the descriptor's data buffer */
133	uint16_t length;	/* Length of data DMAed into data buffer */
134	uint16_t csum;		/* Packet checksum */
135	uint8_t	 status;       	/* Descriptor status */
136	uint8_t  errors;	/* Descriptor Errors */
137	uint16_t special;
138};
139
140/* Transmit descriptor types */
141#define	E1000_TXD_MASK		(E1000_TXD_CMD_DEXT | 0x00F00000)
142#define E1000_TXD_TYP_L		(0)
143#define E1000_TXD_TYP_C		(E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_C)
144#define E1000_TXD_TYP_D		(E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D)
145
146/* Legacy transmit descriptor */
147struct e1000_tx_desc {
148	uint64_t buffer_addr;   /* Address of the descriptor's data buffer */
149	union {
150		uint32_t data;
151		struct {
152			uint16_t length;  /* Data buffer length */
153			uint8_t  cso;  /* Checksum offset */
154			uint8_t  cmd;  /* Descriptor control */
155		} flags;
156	} lower;
157	union {
158		uint32_t data;
159		struct {
160			uint8_t status; /* Descriptor status */
161			uint8_t css;  /* Checksum start */
162			uint16_t special;
163		} fields;
164	} upper;
165};
166
167/* Context descriptor */
168struct e1000_context_desc {
169	union {
170		uint32_t ip_config;
171		struct {
172			uint8_t ipcss;  /* IP checksum start */
173			uint8_t ipcso;  /* IP checksum offset */
174			uint16_t ipcse;  /* IP checksum end */
175		} ip_fields;
176	} lower_setup;
177	union {
178		uint32_t tcp_config;
179		struct {
180			uint8_t tucss;  /* TCP checksum start */
181			uint8_t tucso;  /* TCP checksum offset */
182			uint16_t tucse;  /* TCP checksum end */
183		} tcp_fields;
184	} upper_setup;
185	uint32_t cmd_and_length;
186	union {
187		uint32_t data;
188		struct {
189			uint8_t status;  /* Descriptor status */
190			uint8_t hdr_len;  /* Header length */
191			uint16_t mss;  /* Maximum segment size */
192		} fields;
193	} tcp_seg_setup;
194};
195
196/* Data descriptor */
197struct e1000_data_desc {
198	uint64_t buffer_addr;  /* Address of the descriptor's buffer address */
199	union {
200		uint32_t data;
201		struct {
202			uint16_t length;  /* Data buffer length */
203			uint8_t typ_len_ext;
204			uint8_t cmd;
205		} flags;
206	} lower;
207	union {
208		uint32_t data;
209		struct {
210			uint8_t status;  /* Descriptor status */
211			uint8_t popts;  /* Packet Options */
212			uint16_t special;
213		} fields;
214	} upper;
215};
216
217union e1000_tx_udesc {
218	struct e1000_tx_desc td;
219	struct e1000_context_desc cd;
220	struct e1000_data_desc dd;
221};
222
223/* Tx checksum info for a packet. */
224struct ck_info {
225	int	ck_valid;	/* ck_info is valid */
226	uint8_t	ck_start;	/* start byte of cksum calcuation */
227	uint8_t	ck_off;		/* offset of cksum insertion */
228	uint16_t ck_len;	/* length of cksum calc: 0 is to packet-end */
229};
230
231/*
232 * Debug printf
233 */
234static int e82545_debug = 0;
235#define WPRINTF(msg,params...) PRINTLN("e82545: " msg, params)
236#define DPRINTF(msg,params...) if (e82545_debug) WPRINTF(msg, params)
237
238#define	MIN(a,b) (((a)<(b))?(a):(b))
239#define	MAX(a,b) (((a)>(b))?(a):(b))
240
241/* s/w representation of the RAL/RAH regs */
242struct  eth_uni {
243	int		eu_valid;
244	int		eu_addrsel;
245	struct ether_addr eu_eth;
246};
247
248
249struct e82545_softc {
250	struct pci_devinst *esc_pi;
251	struct vmctx	*esc_ctx;
252	struct mevent   *esc_mevpitr;
253	pthread_mutex_t	esc_mtx;
254	struct ether_addr esc_mac;
255	net_backend_t	*esc_be;
256
257	/* General */
258	uint32_t	esc_CTRL;	/* x0000 device ctl */
259	uint32_t	esc_FCAL;	/* x0028 flow ctl addr lo */
260	uint32_t	esc_FCAH;	/* x002C flow ctl addr hi */
261	uint32_t	esc_FCT;	/* x0030 flow ctl type */
262	uint32_t	esc_VET;	/* x0038 VLAN eth type */
263	uint32_t	esc_FCTTV;	/* x0170 flow ctl tx timer */
264	uint32_t	esc_LEDCTL;	/* x0E00 LED control */
265	uint32_t	esc_PBA;	/* x1000 pkt buffer allocation */
266
267	/* Interrupt control */
268	int		esc_irq_asserted;
269	uint32_t	esc_ICR;	/* x00C0 cause read/clear */
270	uint32_t	esc_ITR;	/* x00C4 intr throttling */
271	uint32_t	esc_ICS;	/* x00C8 cause set */
272	uint32_t	esc_IMS;	/* x00D0 mask set/read */
273	uint32_t	esc_IMC;	/* x00D8 mask clear */
274
275	/* Transmit */
276	union e1000_tx_udesc *esc_txdesc;
277	struct e1000_context_desc esc_txctx;
278	pthread_t	esc_tx_tid;
279	pthread_cond_t	esc_tx_cond;
280	int		esc_tx_enabled;
281	int		esc_tx_active;
282	uint32_t	esc_TXCW;	/* x0178 transmit config */
283	uint32_t	esc_TCTL;	/* x0400 transmit ctl */
284	uint32_t	esc_TIPG;	/* x0410 inter-packet gap */
285	uint16_t	esc_AIT;	/* x0458 Adaptive Interframe Throttle */
286	uint64_t	esc_tdba;      	/* verified 64-bit desc table addr */
287	uint32_t	esc_TDBAL;	/* x3800 desc table addr, low bits */
288	uint32_t	esc_TDBAH;	/* x3804 desc table addr, hi 32-bits */
289	uint32_t	esc_TDLEN;	/* x3808 # descriptors in bytes */
290	uint16_t	esc_TDH;	/* x3810 desc table head idx */
291	uint16_t	esc_TDHr;	/* internal read version of TDH */
292	uint16_t	esc_TDT;	/* x3818 desc table tail idx */
293	uint32_t	esc_TIDV;	/* x3820 intr delay */
294	uint32_t	esc_TXDCTL;	/* x3828 desc control */
295	uint32_t	esc_TADV;	/* x382C intr absolute delay */
296
297	/* L2 frame acceptance */
298	struct eth_uni	esc_uni[16];	/* 16 x unicast MAC addresses */
299	uint32_t	esc_fmcast[128]; /* Multicast filter bit-match */
300	uint32_t	esc_fvlan[128]; /* VLAN 4096-bit filter */
301
302	/* Receive */
303	struct e1000_rx_desc *esc_rxdesc;
304	pthread_cond_t	esc_rx_cond;
305	int		esc_rx_enabled;
306	int		esc_rx_active;
307	int		esc_rx_loopback;
308	uint32_t	esc_RCTL;	/* x0100 receive ctl */
309	uint32_t	esc_FCRTL;	/* x2160 flow cntl thresh, low */
310	uint32_t	esc_FCRTH;	/* x2168 flow cntl thresh, hi */
311	uint64_t	esc_rdba;	/* verified 64-bit desc table addr */
312	uint32_t	esc_RDBAL;	/* x2800 desc table addr, low bits */
313	uint32_t	esc_RDBAH;	/* x2804 desc table addr, hi 32-bits*/
314	uint32_t	esc_RDLEN;	/* x2808 #descriptors */
315	uint16_t	esc_RDH;	/* x2810 desc table head idx */
316	uint16_t	esc_RDT;	/* x2818 desc table tail idx */
317	uint32_t	esc_RDTR;	/* x2820 intr delay */
318	uint32_t	esc_RXDCTL;	/* x2828 desc control */
319	uint32_t	esc_RADV;	/* x282C intr absolute delay */
320	uint32_t	esc_RSRPD;	/* x2C00 recv small packet detect */
321	uint32_t	esc_RXCSUM;     /* x5000 receive cksum ctl */
322
323	/* IO Port register access */
324	uint32_t io_addr;
325
326	/* Shadow copy of MDIC */
327	uint32_t mdi_control;
328	/* Shadow copy of EECD */
329	uint32_t eeprom_control;
330	/* Latest NVM in/out */
331	uint16_t nvm_data;
332	uint16_t nvm_opaddr;
333	/* stats */
334	uint32_t missed_pkt_count; /* dropped for no room in rx queue */
335	uint32_t pkt_rx_by_size[6];
336	uint32_t pkt_tx_by_size[6];
337	uint32_t good_pkt_rx_count;
338	uint32_t bcast_pkt_rx_count;
339	uint32_t mcast_pkt_rx_count;
340	uint32_t good_pkt_tx_count;
341	uint32_t bcast_pkt_tx_count;
342	uint32_t mcast_pkt_tx_count;
343	uint32_t oversize_rx_count;
344	uint32_t tso_tx_count;
345	uint64_t good_octets_rx;
346	uint64_t good_octets_tx;
347	uint64_t missed_octets; /* counts missed and oversized */
348
349	uint8_t nvm_bits:6; /* number of bits remaining in/out */
350	uint8_t nvm_mode:2;
351#define E82545_NVM_MODE_OPADDR  0x0
352#define E82545_NVM_MODE_DATAIN  0x1
353#define E82545_NVM_MODE_DATAOUT 0x2
354	/* EEPROM data */
355	uint16_t eeprom_data[E82545_NVM_EEPROM_SIZE];
356};
357
358static void e82545_reset(struct e82545_softc *sc, int dev);
359static void e82545_rx_enable(struct e82545_softc *sc);
360static void e82545_rx_disable(struct e82545_softc *sc);
361static void e82545_rx_callback(int fd, enum ev_type type, void *param);
362static void e82545_tx_start(struct e82545_softc *sc);
363static void e82545_tx_enable(struct e82545_softc *sc);
364static void e82545_tx_disable(struct e82545_softc *sc);
365
366static inline int
367e82545_size_stat_index(uint32_t size)
368{
369	if (size <= 64) {
370		return 0;
371	} else if (size >= 1024) {
372		return 5;
373	} else {
374		/* should be 1-4 */
375		return (ffs(size) - 6);
376	}
377}
378
379static void
380e82545_init_eeprom(struct e82545_softc *sc)
381{
382	uint16_t checksum, i;
383
384        /* mac addr */
385	sc->eeprom_data[NVM_MAC_ADDR] = ((uint16_t)sc->esc_mac.octet[0]) |
386		(((uint16_t)sc->esc_mac.octet[1]) << 8);
387	sc->eeprom_data[NVM_MAC_ADDR+1] = ((uint16_t)sc->esc_mac.octet[2]) |
388		(((uint16_t)sc->esc_mac.octet[3]) << 8);
389	sc->eeprom_data[NVM_MAC_ADDR+2] = ((uint16_t)sc->esc_mac.octet[4]) |
390		(((uint16_t)sc->esc_mac.octet[5]) << 8);
391
392	/* pci ids */
393	sc->eeprom_data[NVM_SUB_DEV_ID] = E82545_SUBDEV_ID;
394	sc->eeprom_data[NVM_SUB_VEN_ID] = E82545_VENDOR_ID_INTEL;
395	sc->eeprom_data[NVM_DEV_ID] = E82545_DEV_ID_82545EM_COPPER;
396	sc->eeprom_data[NVM_VEN_ID] = E82545_VENDOR_ID_INTEL;
397
398	/* fill in the checksum */
399        checksum = 0;
400	for (i = 0; i < NVM_CHECKSUM_REG; i++) {
401		checksum += sc->eeprom_data[i];
402	}
403	checksum = NVM_SUM - checksum;
404	sc->eeprom_data[NVM_CHECKSUM_REG] = checksum;
405	DPRINTF("eeprom checksum: 0x%x", checksum);
406}
407
408static void
409e82545_write_mdi(struct e82545_softc *sc, uint8_t reg_addr,
410			uint8_t phy_addr, uint32_t data)
411{
412	DPRINTF("Write mdi reg:0x%x phy:0x%x data: 0x%x", reg_addr, phy_addr, data);
413}
414
415static uint32_t
416e82545_read_mdi(struct e82545_softc *sc, uint8_t reg_addr,
417			uint8_t phy_addr)
418{
419	//DPRINTF("Read mdi reg:0x%x phy:0x%x", reg_addr, phy_addr);
420	switch (reg_addr) {
421	case PHY_STATUS:
422		return (MII_SR_LINK_STATUS | MII_SR_AUTONEG_CAPS |
423			MII_SR_AUTONEG_COMPLETE);
424	case PHY_AUTONEG_ADV:
425		return NWAY_AR_SELECTOR_FIELD;
426	case PHY_LP_ABILITY:
427		return 0;
428	case PHY_1000T_STATUS:
429		return (SR_1000T_LP_FD_CAPS | SR_1000T_REMOTE_RX_STATUS |
430			SR_1000T_LOCAL_RX_STATUS);
431	case PHY_ID1:
432		return (M88E1011_I_PHY_ID >> 16) & 0xFFFF;
433	case PHY_ID2:
434		return (M88E1011_I_PHY_ID | E82545_REVISION_4) & 0xFFFF;
435	default:
436		DPRINTF("Unknown mdi read reg:0x%x phy:0x%x", reg_addr, phy_addr);
437		return 0;
438	}
439	/* not reached */
440}
441
442static void
443e82545_eecd_strobe(struct e82545_softc *sc)
444{
445	/* Microwire state machine */
446	/*
447	DPRINTF("eeprom state machine srtobe "
448		"0x%x 0x%x 0x%x 0x%x",
449		sc->nvm_mode, sc->nvm_bits,
450		sc->nvm_opaddr, sc->nvm_data);*/
451
452	if (sc->nvm_bits == 0) {
453		DPRINTF("eeprom state machine not expecting data! "
454			"0x%x 0x%x 0x%x 0x%x",
455			sc->nvm_mode, sc->nvm_bits,
456			sc->nvm_opaddr, sc->nvm_data);
457		return;
458	}
459	sc->nvm_bits--;
460	if (sc->nvm_mode == E82545_NVM_MODE_DATAOUT) {
461		/* shifting out */
462		if (sc->nvm_data & 0x8000) {
463			sc->eeprom_control |= E1000_EECD_DO;
464		} else {
465			sc->eeprom_control &= ~E1000_EECD_DO;
466		}
467		sc->nvm_data <<= 1;
468		if (sc->nvm_bits == 0) {
469			/* read done, back to opcode mode. */
470			sc->nvm_opaddr = 0;
471			sc->nvm_mode = E82545_NVM_MODE_OPADDR;
472			sc->nvm_bits = E82545_NVM_OPADDR_BITS;
473		}
474	} else if (sc->nvm_mode == E82545_NVM_MODE_DATAIN) {
475		/* shifting in */
476		sc->nvm_data <<= 1;
477		if (sc->eeprom_control & E1000_EECD_DI) {
478			sc->nvm_data |= 1;
479		}
480		if (sc->nvm_bits == 0) {
481			/* eeprom write */
482			uint16_t op = sc->nvm_opaddr & E82545_NVM_OPCODE_MASK;
483			uint16_t addr = sc->nvm_opaddr & E82545_NVM_ADDR_MASK;
484			if (op != E82545_NVM_OPCODE_WRITE) {
485				DPRINTF("Illegal eeprom write op 0x%x",
486					sc->nvm_opaddr);
487			} else if (addr >= E82545_NVM_EEPROM_SIZE) {
488				DPRINTF("Illegal eeprom write addr 0x%x",
489					sc->nvm_opaddr);
490			} else {
491				DPRINTF("eeprom write eeprom[0x%x] = 0x%x",
492				addr, sc->nvm_data);
493				sc->eeprom_data[addr] = sc->nvm_data;
494			}
495			/* back to opcode mode */
496			sc->nvm_opaddr = 0;
497			sc->nvm_mode = E82545_NVM_MODE_OPADDR;
498			sc->nvm_bits = E82545_NVM_OPADDR_BITS;
499		}
500	} else if (sc->nvm_mode == E82545_NVM_MODE_OPADDR) {
501		sc->nvm_opaddr <<= 1;
502		if (sc->eeprom_control & E1000_EECD_DI) {
503			sc->nvm_opaddr |= 1;
504		}
505		if (sc->nvm_bits == 0) {
506			uint16_t op = sc->nvm_opaddr & E82545_NVM_OPCODE_MASK;
507			switch (op) {
508			case E82545_NVM_OPCODE_EWEN:
509				DPRINTF("eeprom write enable: 0x%x",
510					sc->nvm_opaddr);
511				/* back to opcode mode */
512				sc->nvm_opaddr = 0;
513				sc->nvm_mode = E82545_NVM_MODE_OPADDR;
514				sc->nvm_bits = E82545_NVM_OPADDR_BITS;
515				break;
516			case E82545_NVM_OPCODE_READ:
517			{
518				uint16_t addr = sc->nvm_opaddr &
519					E82545_NVM_ADDR_MASK;
520				sc->nvm_mode = E82545_NVM_MODE_DATAOUT;
521				sc->nvm_bits = E82545_NVM_DATA_BITS;
522				if (addr < E82545_NVM_EEPROM_SIZE) {
523					sc->nvm_data = sc->eeprom_data[addr];
524					DPRINTF("eeprom read: eeprom[0x%x] = 0x%x",
525						addr, sc->nvm_data);
526				} else {
527					DPRINTF("eeprom illegal read: 0x%x",
528						sc->nvm_opaddr);
529					sc->nvm_data = 0;
530				}
531				break;
532			}
533			case E82545_NVM_OPCODE_WRITE:
534				sc->nvm_mode = E82545_NVM_MODE_DATAIN;
535				sc->nvm_bits = E82545_NVM_DATA_BITS;
536				sc->nvm_data = 0;
537				break;
538			default:
539				DPRINTF("eeprom unknown op: 0x%x",
540					sc->nvm_opaddr);
541				/* back to opcode mode */
542				sc->nvm_opaddr = 0;
543				sc->nvm_mode = E82545_NVM_MODE_OPADDR;
544				sc->nvm_bits = E82545_NVM_OPADDR_BITS;
545			}
546		}
547	} else {
548		DPRINTF("eeprom state machine wrong state! "
549			"0x%x 0x%x 0x%x 0x%x",
550			sc->nvm_mode, sc->nvm_bits,
551			sc->nvm_opaddr, sc->nvm_data);
552	}
553}
554
555static void
556e82545_itr_callback(int fd, enum ev_type type, void *param)
557{
558	uint32_t new;
559	struct e82545_softc *sc = param;
560
561	pthread_mutex_lock(&sc->esc_mtx);
562	new = sc->esc_ICR & sc->esc_IMS;
563	if (new && !sc->esc_irq_asserted) {
564		DPRINTF("itr callback: lintr assert %x", new);
565		sc->esc_irq_asserted = 1;
566		pci_lintr_assert(sc->esc_pi);
567	} else {
568		mevent_delete(sc->esc_mevpitr);
569		sc->esc_mevpitr = NULL;
570	}
571	pthread_mutex_unlock(&sc->esc_mtx);
572}
573
574static void
575e82545_icr_assert(struct e82545_softc *sc, uint32_t bits)
576{
577	uint32_t new;
578
579	DPRINTF("icr assert: 0x%x", bits);
580
581	/*
582	 * An interrupt is only generated if bits are set that
583	 * aren't already in the ICR, these bits are unmasked,
584	 * and there isn't an interrupt already pending.
585	 */
586	new = bits & ~sc->esc_ICR & sc->esc_IMS;
587	sc->esc_ICR |= bits;
588
589	if (new == 0) {
590		DPRINTF("icr assert: masked %x, ims %x", new, sc->esc_IMS);
591	} else if (sc->esc_mevpitr != NULL) {
592		DPRINTF("icr assert: throttled %x, ims %x", new, sc->esc_IMS);
593	} else if (!sc->esc_irq_asserted) {
594		DPRINTF("icr assert: lintr assert %x", new);
595		sc->esc_irq_asserted = 1;
596		pci_lintr_assert(sc->esc_pi);
597		if (sc->esc_ITR != 0) {
598			sc->esc_mevpitr = mevent_add(
599			    (sc->esc_ITR + 3905) / 3906,  /* 256ns -> 1ms */
600			    EVF_TIMER, e82545_itr_callback, sc);
601		}
602	}
603}
604
605static void
606e82545_ims_change(struct e82545_softc *sc, uint32_t bits)
607{
608	uint32_t new;
609
610	/*
611	 * Changing the mask may allow previously asserted
612	 * but masked interrupt requests to generate an interrupt.
613	 */
614	new = bits & sc->esc_ICR & ~sc->esc_IMS;
615	sc->esc_IMS |= bits;
616
617	if (new == 0) {
618		DPRINTF("ims change: masked %x, ims %x", new, sc->esc_IMS);
619	} else if (sc->esc_mevpitr != NULL) {
620		DPRINTF("ims change: throttled %x, ims %x", new, sc->esc_IMS);
621	} else if (!sc->esc_irq_asserted) {
622		DPRINTF("ims change: lintr assert %x", new);
623		sc->esc_irq_asserted = 1;
624		pci_lintr_assert(sc->esc_pi);
625		if (sc->esc_ITR != 0) {
626			sc->esc_mevpitr = mevent_add(
627			    (sc->esc_ITR + 3905) / 3906,  /* 256ns -> 1ms */
628			    EVF_TIMER, e82545_itr_callback, sc);
629		}
630	}
631}
632
633static void
634e82545_icr_deassert(struct e82545_softc *sc, uint32_t bits)
635{
636
637	DPRINTF("icr deassert: 0x%x", bits);
638	sc->esc_ICR &= ~bits;
639
640	/*
641	 * If there are no longer any interrupt sources and there
642	 * was an asserted interrupt, clear it
643	 */
644	if (sc->esc_irq_asserted && !(sc->esc_ICR & sc->esc_IMS)) {
645		DPRINTF("icr deassert: lintr deassert %x", bits);
646		pci_lintr_deassert(sc->esc_pi);
647		sc->esc_irq_asserted = 0;
648	}
649}
650
651static void
652e82545_intr_write(struct e82545_softc *sc, uint32_t offset, uint32_t value)
653{
654
655	DPRINTF("intr_write: off %x, val %x", offset, value);
656
657	switch (offset) {
658	case E1000_ICR:
659		e82545_icr_deassert(sc, value);
660		break;
661	case E1000_ITR:
662		sc->esc_ITR = value;
663		break;
664	case E1000_ICS:
665		sc->esc_ICS = value;	/* not used: store for debug */
666		e82545_icr_assert(sc, value);
667		break;
668	case E1000_IMS:
669		e82545_ims_change(sc, value);
670		break;
671	case E1000_IMC:
672		sc->esc_IMC = value;	/* for debug */
673		sc->esc_IMS &= ~value;
674		// XXX clear interrupts if all ICR bits now masked
675		// and interrupt was pending ?
676		break;
677	default:
678		break;
679	}
680}
681
682static uint32_t
683e82545_intr_read(struct e82545_softc *sc, uint32_t offset)
684{
685	uint32_t retval;
686
687	retval = 0;
688
689	DPRINTF("intr_read: off %x", offset);
690
691	switch (offset) {
692	case E1000_ICR:
693		retval = sc->esc_ICR;
694		sc->esc_ICR = 0;
695		e82545_icr_deassert(sc, ~0);
696		break;
697	case E1000_ITR:
698		retval = sc->esc_ITR;
699		break;
700	case E1000_ICS:
701		/* write-only register */
702		break;
703	case E1000_IMS:
704		retval = sc->esc_IMS;
705		break;
706	case E1000_IMC:
707		/* write-only register */
708		break;
709	default:
710		break;
711	}
712
713	return (retval);
714}
715
716static void
717e82545_devctl(struct e82545_softc *sc, uint32_t val)
718{
719
720	sc->esc_CTRL = val & ~E1000_CTRL_RST;
721
722	if (val & E1000_CTRL_RST) {
723		DPRINTF("e1k: s/w reset, ctl %x", val);
724		e82545_reset(sc, 1);
725	}
726	/* XXX check for phy reset ? */
727}
728
729static void
730e82545_rx_update_rdba(struct e82545_softc *sc)
731{
732
733	/* XXX verify desc base/len within phys mem range */
734	sc->esc_rdba = (uint64_t)sc->esc_RDBAH << 32 |
735	    sc->esc_RDBAL;
736
737	/* Cache host mapping of guest descriptor array */
738	sc->esc_rxdesc = paddr_guest2host(sc->esc_ctx,
739	    sc->esc_rdba, sc->esc_RDLEN);
740}
741
742static void
743e82545_rx_ctl(struct e82545_softc *sc, uint32_t val)
744{
745	int on;
746
747	on = ((val & E1000_RCTL_EN) == E1000_RCTL_EN);
748
749	/* Save RCTL after stripping reserved bits 31:27,24,21,14,11:10,0 */
750	sc->esc_RCTL = val & ~0xF9204c01;
751
752	DPRINTF("rx_ctl - %s RCTL %x, val %x",
753		on ? "on" : "off", sc->esc_RCTL, val);
754
755	/* state change requested */
756	if (on != sc->esc_rx_enabled) {
757		if (on) {
758			/* Catch disallowed/unimplemented settings */
759			//assert(!(val & E1000_RCTL_LBM_TCVR));
760
761			if (sc->esc_RCTL & E1000_RCTL_LBM_TCVR) {
762				sc->esc_rx_loopback = 1;
763			} else {
764				sc->esc_rx_loopback = 0;
765			}
766
767			e82545_rx_update_rdba(sc);
768			e82545_rx_enable(sc);
769		} else {
770			e82545_rx_disable(sc);
771			sc->esc_rx_loopback = 0;
772			sc->esc_rdba = 0;
773			sc->esc_rxdesc = NULL;
774		}
775	}
776}
777
778static void
779e82545_tx_update_tdba(struct e82545_softc *sc)
780{
781
782	/* XXX verify desc base/len within phys mem range */
783	sc->esc_tdba = (uint64_t)sc->esc_TDBAH << 32 | sc->esc_TDBAL;
784
785	/* Cache host mapping of guest descriptor array */
786	sc->esc_txdesc = paddr_guest2host(sc->esc_ctx, sc->esc_tdba,
787            sc->esc_TDLEN);
788}
789
790static void
791e82545_tx_ctl(struct e82545_softc *sc, uint32_t val)
792{
793	int on;
794
795	on = ((val & E1000_TCTL_EN) == E1000_TCTL_EN);
796
797	/* ignore TCTL_EN settings that don't change state */
798	if (on == sc->esc_tx_enabled)
799		return;
800
801	if (on) {
802		e82545_tx_update_tdba(sc);
803		e82545_tx_enable(sc);
804	} else {
805		e82545_tx_disable(sc);
806		sc->esc_tdba = 0;
807		sc->esc_txdesc = NULL;
808	}
809
810	/* Save TCTL value after stripping reserved bits 31:25,23,2,0 */
811	sc->esc_TCTL = val & ~0xFE800005;
812}
813
814int
815e82545_bufsz(uint32_t rctl)
816{
817
818	switch (rctl & (E1000_RCTL_BSEX | E1000_RCTL_SZ_256)) {
819	case (E1000_RCTL_SZ_2048): return (2048);
820	case (E1000_RCTL_SZ_1024): return (1024);
821	case (E1000_RCTL_SZ_512): return (512);
822	case (E1000_RCTL_SZ_256): return (256);
823	case (E1000_RCTL_BSEX|E1000_RCTL_SZ_16384): return (16384);
824	case (E1000_RCTL_BSEX|E1000_RCTL_SZ_8192): return (8192);
825	case (E1000_RCTL_BSEX|E1000_RCTL_SZ_4096): return (4096);
826	}
827	return (256);	/* Forbidden value. */
828}
829
830/* XXX one packet at a time until this is debugged */
831static void
832e82545_rx_callback(int fd, enum ev_type type, void *param)
833{
834	struct e82545_softc *sc = param;
835	struct e1000_rx_desc *rxd;
836	struct iovec vec[64];
837	int left, len, lim, maxpktsz, maxpktdesc, bufsz, i, n, size;
838	uint32_t cause = 0;
839	uint16_t *tp, tag, head;
840
841	pthread_mutex_lock(&sc->esc_mtx);
842	DPRINTF("rx_run: head %x, tail %x", sc->esc_RDH, sc->esc_RDT);
843
844	if (!sc->esc_rx_enabled || sc->esc_rx_loopback) {
845		DPRINTF("rx disabled (!%d || %d) -- packet(s) dropped",
846		    sc->esc_rx_enabled, sc->esc_rx_loopback);
847		while (netbe_rx_discard(sc->esc_be) > 0) {
848		}
849		goto done1;
850	}
851	bufsz = e82545_bufsz(sc->esc_RCTL);
852	maxpktsz = (sc->esc_RCTL & E1000_RCTL_LPE) ? 16384 : 1522;
853	maxpktdesc = (maxpktsz + bufsz - 1) / bufsz;
854	size = sc->esc_RDLEN / 16;
855	head = sc->esc_RDH;
856	left = (size + sc->esc_RDT - head) % size;
857	if (left < maxpktdesc) {
858		DPRINTF("rx overflow (%d < %d) -- packet(s) dropped",
859		    left, maxpktdesc);
860		while (netbe_rx_discard(sc->esc_be) > 0) {
861		}
862		goto done1;
863	}
864
865	sc->esc_rx_active = 1;
866	pthread_mutex_unlock(&sc->esc_mtx);
867
868	for (lim = size / 4; lim > 0 && left >= maxpktdesc; lim -= n) {
869
870		/* Grab rx descriptor pointed to by the head pointer */
871		for (i = 0; i < maxpktdesc; i++) {
872			rxd = &sc->esc_rxdesc[(head + i) % size];
873			vec[i].iov_base = paddr_guest2host(sc->esc_ctx,
874			    rxd->buffer_addr, bufsz);
875			vec[i].iov_len = bufsz;
876		}
877		len = netbe_recv(sc->esc_be, vec, maxpktdesc);
878		if (len <= 0) {
879			DPRINTF("netbe_recv() returned %d", len);
880			goto done;
881		}
882
883		/*
884		 * Adjust the packet length based on whether the CRC needs
885		 * to be stripped or if the packet is less than the minimum
886		 * eth packet size.
887		 */
888		if (len < ETHER_MIN_LEN - ETHER_CRC_LEN)
889			len = ETHER_MIN_LEN - ETHER_CRC_LEN;
890		if (!(sc->esc_RCTL & E1000_RCTL_SECRC))
891			len += ETHER_CRC_LEN;
892		n = (len + bufsz - 1) / bufsz;
893
894		DPRINTF("packet read %d bytes, %d segs, head %d",
895		    len, n, head);
896
897		/* Apply VLAN filter. */
898		tp = (uint16_t *)vec[0].iov_base + 6;
899		if ((sc->esc_RCTL & E1000_RCTL_VFE) &&
900		    (ntohs(tp[0]) == sc->esc_VET)) {
901			tag = ntohs(tp[1]) & 0x0fff;
902			if ((sc->esc_fvlan[tag >> 5] &
903			    (1 << (tag & 0x1f))) != 0) {
904				DPRINTF("known VLAN %d", tag);
905			} else {
906				DPRINTF("unknown VLAN %d", tag);
907				n = 0;
908				continue;
909			}
910		}
911
912		/* Update all consumed descriptors. */
913		for (i = 0; i < n - 1; i++) {
914			rxd = &sc->esc_rxdesc[(head + i) % size];
915			rxd->length = bufsz;
916			rxd->csum = 0;
917			rxd->errors = 0;
918			rxd->special = 0;
919			rxd->status = E1000_RXD_STAT_DD;
920		}
921		rxd = &sc->esc_rxdesc[(head + i) % size];
922		rxd->length = len % bufsz;
923		rxd->csum = 0;
924		rxd->errors = 0;
925		rxd->special = 0;
926		/* XXX signal no checksum for now */
927		rxd->status = E1000_RXD_STAT_PIF | E1000_RXD_STAT_IXSM |
928		    E1000_RXD_STAT_EOP | E1000_RXD_STAT_DD;
929
930		/* Schedule receive interrupts. */
931		if (len <= sc->esc_RSRPD) {
932			cause |= E1000_ICR_SRPD | E1000_ICR_RXT0;
933		} else {
934			/* XXX: RDRT and RADV timers should be here. */
935			cause |= E1000_ICR_RXT0;
936		}
937
938		head = (head + n) % size;
939		left -= n;
940	}
941
942done:
943	pthread_mutex_lock(&sc->esc_mtx);
944	sc->esc_rx_active = 0;
945	if (sc->esc_rx_enabled == 0)
946		pthread_cond_signal(&sc->esc_rx_cond);
947
948	sc->esc_RDH = head;
949	/* Respect E1000_RCTL_RDMTS */
950	left = (size + sc->esc_RDT - head) % size;
951	if (left < (size >> (((sc->esc_RCTL >> 8) & 3) + 1)))
952		cause |= E1000_ICR_RXDMT0;
953	/* Assert all accumulated interrupts. */
954	if (cause != 0)
955		e82545_icr_assert(sc, cause);
956done1:
957	DPRINTF("rx_run done: head %x, tail %x", sc->esc_RDH, sc->esc_RDT);
958	pthread_mutex_unlock(&sc->esc_mtx);
959}
960
961static uint16_t
962e82545_carry(uint32_t sum)
963{
964
965	sum = (sum & 0xFFFF) + (sum >> 16);
966	if (sum > 0xFFFF)
967		sum -= 0xFFFF;
968	return (sum);
969}
970
971static uint16_t
972e82545_buf_checksum(uint8_t *buf, int len)
973{
974	int i;
975	uint32_t sum = 0;
976
977	/* Checksum all the pairs of bytes first... */
978	for (i = 0; i < (len & ~1U); i += 2)
979		sum += *((u_int16_t *)(buf + i));
980
981	/*
982	 * If there's a single byte left over, checksum it, too.
983	 * Network byte order is big-endian, so the remaining byte is
984	 * the high byte.
985	 */
986	if (i < len)
987		sum += htons(buf[i] << 8);
988
989	return (e82545_carry(sum));
990}
991
992static uint16_t
993e82545_iov_checksum(struct iovec *iov, int iovcnt, int off, int len)
994{
995	int now, odd;
996	uint32_t sum = 0, s;
997
998	/* Skip completely unneeded vectors. */
999	while (iovcnt > 0 && iov->iov_len <= off && off > 0) {
1000		off -= iov->iov_len;
1001		iov++;
1002		iovcnt--;
1003	}
1004
1005	/* Calculate checksum of requested range. */
1006	odd = 0;
1007	while (len > 0 && iovcnt > 0) {
1008		now = MIN(len, iov->iov_len - off);
1009		s = e82545_buf_checksum(iov->iov_base + off, now);
1010		sum += odd ? (s << 8) : s;
1011		odd ^= (now & 1);
1012		len -= now;
1013		off = 0;
1014		iov++;
1015		iovcnt--;
1016	}
1017
1018	return (e82545_carry(sum));
1019}
1020
1021/*
1022 * Return the transmit descriptor type.
1023 */
1024int
1025e82545_txdesc_type(uint32_t lower)
1026{
1027	int type;
1028
1029	type = 0;
1030
1031	if (lower & E1000_TXD_CMD_DEXT)
1032		type = lower & E1000_TXD_MASK;
1033
1034	return (type);
1035}
1036
1037static void
1038e82545_transmit_checksum(struct iovec *iov, int iovcnt, struct ck_info *ck)
1039{
1040	uint16_t cksum;
1041	int cklen;
1042
1043	DPRINTF("tx cksum: iovcnt/s/off/len %d/%d/%d/%d",
1044	    iovcnt, ck->ck_start, ck->ck_off, ck->ck_len);
1045	cklen = ck->ck_len ? ck->ck_len - ck->ck_start + 1 : INT_MAX;
1046	cksum = e82545_iov_checksum(iov, iovcnt, ck->ck_start, cklen);
1047	*(uint16_t *)((uint8_t *)iov[0].iov_base + ck->ck_off) = ~cksum;
1048}
1049
1050static void
1051e82545_transmit_backend(struct e82545_softc *sc, struct iovec *iov, int iovcnt)
1052{
1053
1054	if (sc->esc_be == NULL)
1055		return;
1056
1057	(void) netbe_send(sc->esc_be, iov, iovcnt);
1058}
1059
1060static void
1061e82545_transmit_done(struct e82545_softc *sc, uint16_t head, uint16_t tail,
1062    uint16_t dsize, int *tdwb)
1063{
1064	union e1000_tx_udesc *dsc;
1065
1066	for ( ; head != tail; head = (head + 1) % dsize) {
1067		dsc = &sc->esc_txdesc[head];
1068		if (dsc->td.lower.data & E1000_TXD_CMD_RS) {
1069			dsc->td.upper.data |= E1000_TXD_STAT_DD;
1070			*tdwb = 1;
1071		}
1072	}
1073}
1074
1075static int
1076e82545_transmit(struct e82545_softc *sc, uint16_t head, uint16_t tail,
1077    uint16_t dsize, uint16_t *rhead, int *tdwb)
1078{
1079	uint8_t *hdr, *hdrp;
1080	struct iovec iovb[I82545_MAX_TXSEGS + 2];
1081	struct iovec tiov[I82545_MAX_TXSEGS + 2];
1082	struct e1000_context_desc *cd;
1083	struct ck_info ckinfo[2];
1084	struct iovec *iov;
1085	union  e1000_tx_udesc *dsc;
1086	int desc, dtype, len, ntype, iovcnt, tlen, tcp, tso;
1087	int mss, paylen, seg, tiovcnt, left, now, nleft, nnow, pv, pvoff;
1088	unsigned hdrlen, vlen;
1089	uint32_t tcpsum, tcpseq;
1090	uint16_t ipcs, tcpcs, ipid, ohead;
1091
1092	ckinfo[0].ck_valid = ckinfo[1].ck_valid = 0;
1093	iovcnt = 0;
1094	tlen = 0;
1095	ntype = 0;
1096	tso = 0;
1097	ohead = head;
1098
1099	/* iovb[0/1] may be used for writable copy of headers. */
1100	iov = &iovb[2];
1101
1102	for (desc = 0; ; desc++, head = (head + 1) % dsize) {
1103		if (head == tail) {
1104			*rhead = head;
1105			return (0);
1106		}
1107		dsc = &sc->esc_txdesc[head];
1108		dtype = e82545_txdesc_type(dsc->td.lower.data);
1109
1110		if (desc == 0) {
1111			switch (dtype) {
1112			case E1000_TXD_TYP_C:
1113				DPRINTF("tx ctxt desc idx %d: %016jx "
1114				    "%08x%08x",
1115				    head, dsc->td.buffer_addr,
1116				    dsc->td.upper.data, dsc->td.lower.data);
1117				/* Save context and return */
1118				sc->esc_txctx = dsc->cd;
1119				goto done;
1120			case E1000_TXD_TYP_L:
1121				DPRINTF("tx legacy desc idx %d: %08x%08x",
1122				    head, dsc->td.upper.data, dsc->td.lower.data);
1123				/*
1124				 * legacy cksum start valid in first descriptor
1125				 */
1126				ntype = dtype;
1127				ckinfo[0].ck_start = dsc->td.upper.fields.css;
1128				break;
1129			case E1000_TXD_TYP_D:
1130				DPRINTF("tx data desc idx %d: %08x%08x",
1131				    head, dsc->td.upper.data, dsc->td.lower.data);
1132				ntype = dtype;
1133				break;
1134			default:
1135				break;
1136			}
1137		} else {
1138			/* Descriptor type must be consistent */
1139			assert(dtype == ntype);
1140			DPRINTF("tx next desc idx %d: %08x%08x",
1141			    head, dsc->td.upper.data, dsc->td.lower.data);
1142		}
1143
1144		len = (dtype == E1000_TXD_TYP_L) ? dsc->td.lower.flags.length :
1145		    dsc->dd.lower.data & 0xFFFFF;
1146
1147		if (len > 0) {
1148			/* Strip checksum supplied by guest. */
1149			if ((dsc->td.lower.data & E1000_TXD_CMD_EOP) != 0 &&
1150			    (dsc->td.lower.data & E1000_TXD_CMD_IFCS) == 0)
1151				len -= 2;
1152			tlen += len;
1153			if (iovcnt < I82545_MAX_TXSEGS) {
1154				iov[iovcnt].iov_base = paddr_guest2host(
1155				    sc->esc_ctx, dsc->td.buffer_addr, len);
1156				iov[iovcnt].iov_len = len;
1157			}
1158			iovcnt++;
1159		}
1160
1161		/*
1162		 * Pull out info that is valid in the final descriptor
1163		 * and exit descriptor loop.
1164		 */
1165		if (dsc->td.lower.data & E1000_TXD_CMD_EOP) {
1166			if (dtype == E1000_TXD_TYP_L) {
1167				if (dsc->td.lower.data & E1000_TXD_CMD_IC) {
1168					ckinfo[0].ck_valid = 1;
1169					ckinfo[0].ck_off =
1170					    dsc->td.lower.flags.cso;
1171					ckinfo[0].ck_len = 0;
1172				}
1173			} else {
1174				cd = &sc->esc_txctx;
1175				if (dsc->dd.lower.data & E1000_TXD_CMD_TSE)
1176					tso = 1;
1177				if (dsc->dd.upper.fields.popts &
1178				    E1000_TXD_POPTS_IXSM)
1179					ckinfo[0].ck_valid = 1;
1180				if (dsc->dd.upper.fields.popts &
1181				    E1000_TXD_POPTS_IXSM || tso) {
1182					ckinfo[0].ck_start =
1183					    cd->lower_setup.ip_fields.ipcss;
1184					ckinfo[0].ck_off =
1185					    cd->lower_setup.ip_fields.ipcso;
1186					ckinfo[0].ck_len =
1187					    cd->lower_setup.ip_fields.ipcse;
1188				}
1189				if (dsc->dd.upper.fields.popts &
1190				    E1000_TXD_POPTS_TXSM)
1191					ckinfo[1].ck_valid = 1;
1192				if (dsc->dd.upper.fields.popts &
1193				    E1000_TXD_POPTS_TXSM || tso) {
1194					ckinfo[1].ck_start =
1195					    cd->upper_setup.tcp_fields.tucss;
1196					ckinfo[1].ck_off =
1197					    cd->upper_setup.tcp_fields.tucso;
1198					ckinfo[1].ck_len =
1199					    cd->upper_setup.tcp_fields.tucse;
1200				}
1201			}
1202			break;
1203		}
1204	}
1205
1206	if (iovcnt > I82545_MAX_TXSEGS) {
1207		WPRINTF("tx too many descriptors (%d > %d) -- dropped",
1208		    iovcnt, I82545_MAX_TXSEGS);
1209		goto done;
1210	}
1211
1212	hdrlen = vlen = 0;
1213	/* Estimate writable space for VLAN header insertion. */
1214	if ((sc->esc_CTRL & E1000_CTRL_VME) &&
1215	    (dsc->td.lower.data & E1000_TXD_CMD_VLE)) {
1216		hdrlen = ETHER_ADDR_LEN*2;
1217		vlen = ETHER_VLAN_ENCAP_LEN;
1218	}
1219	if (!tso) {
1220		/* Estimate required writable space for checksums. */
1221		if (ckinfo[0].ck_valid)
1222			hdrlen = MAX(hdrlen, ckinfo[0].ck_off + 2);
1223		if (ckinfo[1].ck_valid)
1224			hdrlen = MAX(hdrlen, ckinfo[1].ck_off + 2);
1225		/* Round up writable space to the first vector. */
1226		if (hdrlen != 0 && iov[0].iov_len > hdrlen &&
1227		    iov[0].iov_len < hdrlen + 100)
1228			hdrlen = iov[0].iov_len;
1229	} else {
1230		/* In case of TSO header length provided by software. */
1231		hdrlen = sc->esc_txctx.tcp_seg_setup.fields.hdr_len;
1232
1233		/*
1234		 * Cap the header length at 240 based on 7.2.4.5 of
1235		 * the Intel 82576EB (Rev 2.63) datasheet.
1236		 */
1237		if (hdrlen > 240) {
1238			WPRINTF("TSO hdrlen too large: %d", hdrlen);
1239			goto done;
1240		}
1241
1242		/*
1243		 * If VLAN insertion is requested, ensure the header
1244		 * at least holds the amount of data copied during
1245		 * VLAN insertion below.
1246		 *
1247		 * XXX: Realistic packets will include a full Ethernet
1248		 * header before the IP header at ckinfo[0].ck_start,
1249		 * but this check is sufficient to prevent
1250		 * out-of-bounds access below.
1251		 */
1252		if (vlen != 0 && hdrlen < ETHER_ADDR_LEN*2) {
1253			WPRINTF("TSO hdrlen too small for vlan insertion "
1254			    "(%d vs %d) -- dropped", hdrlen,
1255			    ETHER_ADDR_LEN*2);
1256			goto done;
1257		}
1258
1259		/*
1260		 * Ensure that the header length covers the used fields
1261		 * in the IP and TCP headers as well as the IP and TCP
1262		 * checksums.  The following fields are accessed below:
1263		 *
1264		 * Header | Field | Offset | Length
1265		 * -------+-------+--------+-------
1266		 * IPv4   | len   | 2      | 2
1267		 * IPv4   | ID    | 4      | 2
1268		 * IPv6   | len   | 4      | 2
1269		 * TCP    | seq # | 4      | 4
1270		 * TCP    | flags | 13     | 1
1271		 * UDP    | len   | 4      | 4
1272		 */
1273		if (hdrlen < ckinfo[0].ck_start + 6 ||
1274		    hdrlen < ckinfo[0].ck_off + 2) {
1275			WPRINTF("TSO hdrlen too small for IP fields (%d) "
1276			    "-- dropped", hdrlen);
1277			goto done;
1278		}
1279		if (sc->esc_txctx.cmd_and_length & E1000_TXD_CMD_TCP) {
1280			if (hdrlen < ckinfo[1].ck_start + 14 ||
1281			    (ckinfo[1].ck_valid &&
1282			    hdrlen < ckinfo[1].ck_off + 2)) {
1283				WPRINTF("TSO hdrlen too small for TCP fields "
1284				    "(%d) -- dropped", hdrlen);
1285				goto done;
1286			}
1287		} else {
1288			if (hdrlen < ckinfo[1].ck_start + 8) {
1289				WPRINTF("TSO hdrlen too small for UDP fields "
1290				    "(%d) -- dropped", hdrlen);
1291				goto done;
1292			}
1293		}
1294	}
1295
1296	/* Allocate, fill and prepend writable header vector. */
1297	if (hdrlen != 0) {
1298		hdr = __builtin_alloca(hdrlen + vlen);
1299		hdr += vlen;
1300		for (left = hdrlen, hdrp = hdr; left > 0;
1301		    left -= now, hdrp += now) {
1302			now = MIN(left, iov->iov_len);
1303			memcpy(hdrp, iov->iov_base, now);
1304			iov->iov_base += now;
1305			iov->iov_len -= now;
1306			if (iov->iov_len == 0) {
1307				iov++;
1308				iovcnt--;
1309			}
1310		}
1311		iov--;
1312		iovcnt++;
1313		iov->iov_base = hdr;
1314		iov->iov_len = hdrlen;
1315	} else
1316		hdr = NULL;
1317
1318	/* Insert VLAN tag. */
1319	if (vlen != 0) {
1320		hdr -= ETHER_VLAN_ENCAP_LEN;
1321		memmove(hdr, hdr + ETHER_VLAN_ENCAP_LEN, ETHER_ADDR_LEN*2);
1322		hdrlen += ETHER_VLAN_ENCAP_LEN;
1323		hdr[ETHER_ADDR_LEN*2 + 0] = sc->esc_VET >> 8;
1324		hdr[ETHER_ADDR_LEN*2 + 1] = sc->esc_VET & 0xff;
1325		hdr[ETHER_ADDR_LEN*2 + 2] = dsc->td.upper.fields.special >> 8;
1326		hdr[ETHER_ADDR_LEN*2 + 3] = dsc->td.upper.fields.special & 0xff;
1327		iov->iov_base = hdr;
1328		iov->iov_len += ETHER_VLAN_ENCAP_LEN;
1329		/* Correct checksum offsets after VLAN tag insertion. */
1330		ckinfo[0].ck_start += ETHER_VLAN_ENCAP_LEN;
1331		ckinfo[0].ck_off += ETHER_VLAN_ENCAP_LEN;
1332		if (ckinfo[0].ck_len != 0)
1333			ckinfo[0].ck_len += ETHER_VLAN_ENCAP_LEN;
1334		ckinfo[1].ck_start += ETHER_VLAN_ENCAP_LEN;
1335		ckinfo[1].ck_off += ETHER_VLAN_ENCAP_LEN;
1336		if (ckinfo[1].ck_len != 0)
1337			ckinfo[1].ck_len += ETHER_VLAN_ENCAP_LEN;
1338	}
1339
1340	/* Simple non-TSO case. */
1341	if (!tso) {
1342		/* Calculate checksums and transmit. */
1343		if (ckinfo[0].ck_valid)
1344			e82545_transmit_checksum(iov, iovcnt, &ckinfo[0]);
1345		if (ckinfo[1].ck_valid)
1346			e82545_transmit_checksum(iov, iovcnt, &ckinfo[1]);
1347		e82545_transmit_backend(sc, iov, iovcnt);
1348		goto done;
1349	}
1350
1351	/* Doing TSO. */
1352	tcp = (sc->esc_txctx.cmd_and_length & E1000_TXD_CMD_TCP) != 0;
1353	mss = sc->esc_txctx.tcp_seg_setup.fields.mss;
1354	paylen = (sc->esc_txctx.cmd_and_length & 0x000fffff);
1355	DPRINTF("tx %s segmentation offload %d+%d/%d bytes %d iovs",
1356	    tcp ? "TCP" : "UDP", hdrlen, paylen, mss, iovcnt);
1357	ipid = ntohs(*(uint16_t *)&hdr[ckinfo[0].ck_start + 4]);
1358	tcpseq = 0;
1359	if (tcp)
1360		tcpseq = ntohl(*(uint32_t *)&hdr[ckinfo[1].ck_start + 4]);
1361	ipcs = *(uint16_t *)&hdr[ckinfo[0].ck_off];
1362	tcpcs = 0;
1363	if (ckinfo[1].ck_valid)	/* Save partial pseudo-header checksum. */
1364		tcpcs = *(uint16_t *)&hdr[ckinfo[1].ck_off];
1365	pv = 1;
1366	pvoff = 0;
1367	for (seg = 0, left = paylen; left > 0; seg++, left -= now) {
1368		now = MIN(left, mss);
1369
1370		/* Construct IOVs for the segment. */
1371		/* Include whole original header. */
1372		tiov[0].iov_base = hdr;
1373		tiov[0].iov_len = hdrlen;
1374		tiovcnt = 1;
1375		/* Include respective part of payload IOV. */
1376		for (nleft = now; pv < iovcnt && nleft > 0; nleft -= nnow) {
1377			nnow = MIN(nleft, iov[pv].iov_len - pvoff);
1378			tiov[tiovcnt].iov_base = iov[pv].iov_base + pvoff;
1379			tiov[tiovcnt++].iov_len = nnow;
1380			if (pvoff + nnow == iov[pv].iov_len) {
1381				pv++;
1382				pvoff = 0;
1383			} else
1384				pvoff += nnow;
1385		}
1386		DPRINTF("tx segment %d %d+%d bytes %d iovs",
1387		    seg, hdrlen, now, tiovcnt);
1388
1389		/* Update IP header. */
1390		if (sc->esc_txctx.cmd_and_length & E1000_TXD_CMD_IP) {
1391			/* IPv4 -- set length and ID */
1392			*(uint16_t *)&hdr[ckinfo[0].ck_start + 2] =
1393			    htons(hdrlen - ckinfo[0].ck_start + now);
1394			*(uint16_t *)&hdr[ckinfo[0].ck_start + 4] =
1395			    htons(ipid + seg);
1396		} else {
1397			/* IPv6 -- set length */
1398			*(uint16_t *)&hdr[ckinfo[0].ck_start + 4] =
1399			    htons(hdrlen - ckinfo[0].ck_start - 40 +
1400				  now);
1401		}
1402
1403		/* Update pseudo-header checksum. */
1404		tcpsum = tcpcs;
1405		tcpsum += htons(hdrlen - ckinfo[1].ck_start + now);
1406
1407		/* Update TCP/UDP headers. */
1408		if (tcp) {
1409			/* Update sequence number and FIN/PUSH flags. */
1410			*(uint32_t *)&hdr[ckinfo[1].ck_start + 4] =
1411			    htonl(tcpseq + paylen - left);
1412			if (now < left) {
1413				hdr[ckinfo[1].ck_start + 13] &=
1414				    ~(TH_FIN | TH_PUSH);
1415			}
1416		} else {
1417			/* Update payload length. */
1418			*(uint32_t *)&hdr[ckinfo[1].ck_start + 4] =
1419			    hdrlen - ckinfo[1].ck_start + now;
1420		}
1421
1422		/* Calculate checksums and transmit. */
1423		if (ckinfo[0].ck_valid) {
1424			*(uint16_t *)&hdr[ckinfo[0].ck_off] = ipcs;
1425			e82545_transmit_checksum(tiov, tiovcnt, &ckinfo[0]);
1426		}
1427		if (ckinfo[1].ck_valid) {
1428			*(uint16_t *)&hdr[ckinfo[1].ck_off] =
1429			    e82545_carry(tcpsum);
1430			e82545_transmit_checksum(tiov, tiovcnt, &ckinfo[1]);
1431		}
1432		e82545_transmit_backend(sc, tiov, tiovcnt);
1433	}
1434
1435done:
1436	head = (head + 1) % dsize;
1437	e82545_transmit_done(sc, ohead, head, dsize, tdwb);
1438
1439	*rhead = head;
1440	return (desc + 1);
1441}
1442
1443static void
1444e82545_tx_run(struct e82545_softc *sc)
1445{
1446	uint32_t cause;
1447	uint16_t head, rhead, tail, size;
1448	int lim, tdwb, sent;
1449
1450	head = sc->esc_TDH;
1451	tail = sc->esc_TDT;
1452	size = sc->esc_TDLEN / 16;
1453	DPRINTF("tx_run: head %x, rhead %x, tail %x",
1454	    sc->esc_TDH, sc->esc_TDHr, sc->esc_TDT);
1455
1456	pthread_mutex_unlock(&sc->esc_mtx);
1457	rhead = head;
1458	tdwb = 0;
1459	for (lim = size / 4; sc->esc_tx_enabled && lim > 0; lim -= sent) {
1460		sent = e82545_transmit(sc, head, tail, size, &rhead, &tdwb);
1461		if (sent == 0)
1462			break;
1463		head = rhead;
1464	}
1465	pthread_mutex_lock(&sc->esc_mtx);
1466
1467	sc->esc_TDH = head;
1468	sc->esc_TDHr = rhead;
1469	cause = 0;
1470	if (tdwb)
1471		cause |= E1000_ICR_TXDW;
1472	if (lim != size / 4 && sc->esc_TDH == sc->esc_TDT)
1473		cause |= E1000_ICR_TXQE;
1474	if (cause)
1475		e82545_icr_assert(sc, cause);
1476
1477	DPRINTF("tx_run done: head %x, rhead %x, tail %x",
1478	    sc->esc_TDH, sc->esc_TDHr, sc->esc_TDT);
1479}
1480
1481static _Noreturn void *
1482e82545_tx_thread(void *param)
1483{
1484	struct e82545_softc *sc = param;
1485
1486	pthread_mutex_lock(&sc->esc_mtx);
1487	for (;;) {
1488		while (!sc->esc_tx_enabled || sc->esc_TDHr == sc->esc_TDT) {
1489			if (sc->esc_tx_enabled && sc->esc_TDHr != sc->esc_TDT)
1490				break;
1491			sc->esc_tx_active = 0;
1492			if (sc->esc_tx_enabled == 0)
1493				pthread_cond_signal(&sc->esc_tx_cond);
1494			pthread_cond_wait(&sc->esc_tx_cond, &sc->esc_mtx);
1495		}
1496		sc->esc_tx_active = 1;
1497
1498		/* Process some tx descriptors.  Lock dropped inside. */
1499		e82545_tx_run(sc);
1500	}
1501}
1502
1503static void
1504e82545_tx_start(struct e82545_softc *sc)
1505{
1506
1507	if (sc->esc_tx_active == 0)
1508		pthread_cond_signal(&sc->esc_tx_cond);
1509}
1510
1511static void
1512e82545_tx_enable(struct e82545_softc *sc)
1513{
1514
1515	sc->esc_tx_enabled = 1;
1516}
1517
1518static void
1519e82545_tx_disable(struct e82545_softc *sc)
1520{
1521
1522	sc->esc_tx_enabled = 0;
1523	while (sc->esc_tx_active)
1524		pthread_cond_wait(&sc->esc_tx_cond, &sc->esc_mtx);
1525}
1526
1527static void
1528e82545_rx_enable(struct e82545_softc *sc)
1529{
1530
1531	sc->esc_rx_enabled = 1;
1532}
1533
1534static void
1535e82545_rx_disable(struct e82545_softc *sc)
1536{
1537
1538	sc->esc_rx_enabled = 0;
1539	while (sc->esc_rx_active)
1540		pthread_cond_wait(&sc->esc_rx_cond, &sc->esc_mtx);
1541}
1542
1543static void
1544e82545_write_ra(struct e82545_softc *sc, int reg, uint32_t wval)
1545{
1546	struct eth_uni *eu;
1547	int idx;
1548
1549	idx = reg >> 1;
1550	assert(idx < 15);
1551
1552	eu = &sc->esc_uni[idx];
1553
1554	if (reg & 0x1) {
1555		/* RAH */
1556		eu->eu_valid = ((wval & E1000_RAH_AV) == E1000_RAH_AV);
1557		eu->eu_addrsel = (wval >> 16) & 0x3;
1558		eu->eu_eth.octet[5] = wval >> 8;
1559		eu->eu_eth.octet[4] = wval;
1560	} else {
1561		/* RAL */
1562		eu->eu_eth.octet[3] = wval >> 24;
1563		eu->eu_eth.octet[2] = wval >> 16;
1564		eu->eu_eth.octet[1] = wval >> 8;
1565		eu->eu_eth.octet[0] = wval;
1566	}
1567}
1568
1569static uint32_t
1570e82545_read_ra(struct e82545_softc *sc, int reg)
1571{
1572	struct eth_uni *eu;
1573	uint32_t retval;
1574	int idx;
1575
1576	idx = reg >> 1;
1577	assert(idx < 15);
1578
1579	eu = &sc->esc_uni[idx];
1580
1581	if (reg & 0x1) {
1582		/* RAH */
1583		retval = (eu->eu_valid << 31) |
1584			 (eu->eu_addrsel << 16) |
1585			 (eu->eu_eth.octet[5] << 8) |
1586			 eu->eu_eth.octet[4];
1587	} else {
1588		/* RAL */
1589		retval = (eu->eu_eth.octet[3] << 24) |
1590			 (eu->eu_eth.octet[2] << 16) |
1591			 (eu->eu_eth.octet[1] << 8) |
1592			 eu->eu_eth.octet[0];
1593	}
1594
1595	return (retval);
1596}
1597
1598static void
1599e82545_write_register(struct e82545_softc *sc, uint32_t offset, uint32_t value)
1600{
1601	int ridx;
1602
1603	if (offset & 0x3) {
1604		DPRINTF("Unaligned register write offset:0x%x value:0x%x", offset, value);
1605		return;
1606	}
1607	DPRINTF("Register write: 0x%x value: 0x%x", offset, value);
1608
1609	switch (offset) {
1610	case E1000_CTRL:
1611	case E1000_CTRL_DUP:
1612		e82545_devctl(sc, value);
1613		break;
1614	case E1000_FCAL:
1615		sc->esc_FCAL = value;
1616		break;
1617	case E1000_FCAH:
1618		sc->esc_FCAH = value & ~0xFFFF0000;
1619		break;
1620	case E1000_FCT:
1621		sc->esc_FCT = value & ~0xFFFF0000;
1622		break;
1623	case E1000_VET:
1624		sc->esc_VET = value & ~0xFFFF0000;
1625		break;
1626	case E1000_FCTTV:
1627		sc->esc_FCTTV = value & ~0xFFFF0000;
1628		break;
1629	case E1000_LEDCTL:
1630		sc->esc_LEDCTL = value & ~0x30303000;
1631		break;
1632	case E1000_PBA:
1633		sc->esc_PBA = value & 0x0000FF80;
1634		break;
1635	case E1000_ICR:
1636	case E1000_ITR:
1637	case E1000_ICS:
1638	case E1000_IMS:
1639	case E1000_IMC:
1640		e82545_intr_write(sc, offset, value);
1641		break;
1642	case E1000_RCTL:
1643		e82545_rx_ctl(sc, value);
1644		break;
1645	case E1000_FCRTL:
1646		sc->esc_FCRTL = value & ~0xFFFF0007;
1647		break;
1648	case E1000_FCRTH:
1649		sc->esc_FCRTH = value & ~0xFFFF0007;
1650		break;
1651	case E1000_RDBAL(0):
1652		sc->esc_RDBAL = value & ~0xF;
1653		if (sc->esc_rx_enabled) {
1654			/* Apparently legal: update cached address */
1655			e82545_rx_update_rdba(sc);
1656		}
1657		break;
1658	case E1000_RDBAH(0):
1659		assert(!sc->esc_rx_enabled);
1660		sc->esc_RDBAH = value;
1661		break;
1662	case E1000_RDLEN(0):
1663		assert(!sc->esc_rx_enabled);
1664		sc->esc_RDLEN = value & ~0xFFF0007F;
1665		break;
1666	case E1000_RDH(0):
1667		/* XXX should only ever be zero ? Range check ? */
1668		sc->esc_RDH = value;
1669		break;
1670	case E1000_RDT(0):
1671		/* XXX if this opens up the rx ring, do something ? */
1672		sc->esc_RDT = value;
1673		break;
1674	case E1000_RDTR:
1675		/* ignore FPD bit 31 */
1676		sc->esc_RDTR = value & ~0xFFFF0000;
1677		break;
1678	case E1000_RXDCTL(0):
1679		sc->esc_RXDCTL = value & ~0xFEC0C0C0;
1680		break;
1681	case E1000_RADV:
1682		sc->esc_RADV = value & ~0xFFFF0000;
1683		break;
1684	case E1000_RSRPD:
1685		sc->esc_RSRPD = value & ~0xFFFFF000;
1686		break;
1687	case E1000_RXCSUM:
1688		sc->esc_RXCSUM = value & ~0xFFFFF800;
1689		break;
1690	case E1000_TXCW:
1691		sc->esc_TXCW = value & ~0x3FFF0000;
1692		break;
1693	case E1000_TCTL:
1694		e82545_tx_ctl(sc, value);
1695		break;
1696	case E1000_TIPG:
1697		sc->esc_TIPG = value;
1698		break;
1699	case E1000_AIT:
1700		sc->esc_AIT = value;
1701		break;
1702	case E1000_TDBAL(0):
1703		sc->esc_TDBAL = value & ~0xF;
1704		if (sc->esc_tx_enabled)
1705			e82545_tx_update_tdba(sc);
1706		break;
1707	case E1000_TDBAH(0):
1708		sc->esc_TDBAH = value;
1709		if (sc->esc_tx_enabled)
1710			e82545_tx_update_tdba(sc);
1711		break;
1712	case E1000_TDLEN(0):
1713		sc->esc_TDLEN = value & ~0xFFF0007F;
1714		if (sc->esc_tx_enabled)
1715			e82545_tx_update_tdba(sc);
1716		break;
1717	case E1000_TDH(0):
1718		//assert(!sc->esc_tx_enabled);
1719		/* XXX should only ever be zero ? Range check ? */
1720		sc->esc_TDHr = sc->esc_TDH = value;
1721		break;
1722	case E1000_TDT(0):
1723		/* XXX range check ? */
1724		sc->esc_TDT = value;
1725		if (sc->esc_tx_enabled)
1726			e82545_tx_start(sc);
1727		break;
1728	case E1000_TIDV:
1729		sc->esc_TIDV = value & ~0xFFFF0000;
1730		break;
1731	case E1000_TXDCTL(0):
1732		//assert(!sc->esc_tx_enabled);
1733		sc->esc_TXDCTL = value & ~0xC0C0C0;
1734		break;
1735	case E1000_TADV:
1736		sc->esc_TADV = value & ~0xFFFF0000;
1737		break;
1738	case E1000_RAL(0) ... E1000_RAH(15):
1739		/* convert to u32 offset */
1740		ridx = (offset - E1000_RAL(0)) >> 2;
1741		e82545_write_ra(sc, ridx, value);
1742		break;
1743	case E1000_MTA ... (E1000_MTA + (127*4)):
1744		sc->esc_fmcast[(offset - E1000_MTA) >> 2] = value;
1745		break;
1746	case E1000_VFTA ... (E1000_VFTA + (127*4)):
1747		sc->esc_fvlan[(offset - E1000_VFTA) >> 2] = value;
1748		break;
1749	case E1000_EECD:
1750	{
1751		//DPRINTF("EECD write 0x%x -> 0x%x", sc->eeprom_control, value);
1752		/* edge triggered low->high */
1753		uint32_t eecd_strobe = ((sc->eeprom_control & E1000_EECD_SK) ?
1754			0 : (value & E1000_EECD_SK));
1755		uint32_t eecd_mask = (E1000_EECD_SK|E1000_EECD_CS|
1756					E1000_EECD_DI|E1000_EECD_REQ);
1757		sc->eeprom_control &= ~eecd_mask;
1758		sc->eeprom_control |= (value & eecd_mask);
1759		/* grant/revoke immediately */
1760		if (value & E1000_EECD_REQ) {
1761			sc->eeprom_control |= E1000_EECD_GNT;
1762		} else {
1763                        sc->eeprom_control &= ~E1000_EECD_GNT;
1764		}
1765		if (eecd_strobe && (sc->eeprom_control & E1000_EECD_CS)) {
1766			e82545_eecd_strobe(sc);
1767		}
1768		return;
1769	}
1770	case E1000_MDIC:
1771	{
1772		uint8_t reg_addr = (uint8_t)((value & E1000_MDIC_REG_MASK) >>
1773						E1000_MDIC_REG_SHIFT);
1774		uint8_t phy_addr = (uint8_t)((value & E1000_MDIC_PHY_MASK) >>
1775						E1000_MDIC_PHY_SHIFT);
1776		sc->mdi_control =
1777			(value & ~(E1000_MDIC_ERROR|E1000_MDIC_DEST));
1778		if ((value & E1000_MDIC_READY) != 0) {
1779			DPRINTF("Incorrect MDIC ready bit: 0x%x", value);
1780			return;
1781		}
1782		switch (value & E82545_MDIC_OP_MASK) {
1783		case E1000_MDIC_OP_READ:
1784			sc->mdi_control &= ~E82545_MDIC_DATA_MASK;
1785			sc->mdi_control |= e82545_read_mdi(sc, reg_addr, phy_addr);
1786			break;
1787		case E1000_MDIC_OP_WRITE:
1788			e82545_write_mdi(sc, reg_addr, phy_addr,
1789				value & E82545_MDIC_DATA_MASK);
1790			break;
1791		default:
1792			DPRINTF("Unknown MDIC op: 0x%x", value);
1793			return;
1794		}
1795		/* TODO: barrier? */
1796		sc->mdi_control |= E1000_MDIC_READY;
1797		if (value & E82545_MDIC_IE) {
1798			// TODO: generate interrupt
1799		}
1800		return;
1801	}
1802	case E1000_MANC:
1803	case E1000_STATUS:
1804		return;
1805	default:
1806		DPRINTF("Unknown write register: 0x%x value:%x", offset, value);
1807		return;
1808	}
1809}
1810
1811static uint32_t
1812e82545_read_register(struct e82545_softc *sc, uint32_t offset)
1813{
1814	uint32_t retval;
1815	int ridx;
1816
1817	if (offset & 0x3) {
1818		DPRINTF("Unaligned register read offset:0x%x", offset);
1819		return 0;
1820	}
1821
1822	DPRINTF("Register read: 0x%x", offset);
1823
1824	switch (offset) {
1825	case E1000_CTRL:
1826		retval = sc->esc_CTRL;
1827		break;
1828	case E1000_STATUS:
1829		retval = E1000_STATUS_FD | E1000_STATUS_LU |
1830		    E1000_STATUS_SPEED_1000;
1831		break;
1832	case E1000_FCAL:
1833		retval = sc->esc_FCAL;
1834		break;
1835	case E1000_FCAH:
1836		retval = sc->esc_FCAH;
1837		break;
1838	case E1000_FCT:
1839		retval = sc->esc_FCT;
1840		break;
1841	case E1000_VET:
1842		retval = sc->esc_VET;
1843		break;
1844	case E1000_FCTTV:
1845		retval = sc->esc_FCTTV;
1846		break;
1847	case E1000_LEDCTL:
1848		retval = sc->esc_LEDCTL;
1849		break;
1850	case E1000_PBA:
1851		retval = sc->esc_PBA;
1852		break;
1853	case E1000_ICR:
1854	case E1000_ITR:
1855	case E1000_ICS:
1856	case E1000_IMS:
1857	case E1000_IMC:
1858		retval = e82545_intr_read(sc, offset);
1859		break;
1860	case E1000_RCTL:
1861		retval = sc->esc_RCTL;
1862		break;
1863	case E1000_FCRTL:
1864		retval = sc->esc_FCRTL;
1865		break;
1866	case E1000_FCRTH:
1867		retval = sc->esc_FCRTH;
1868		break;
1869	case E1000_RDBAL(0):
1870		retval = sc->esc_RDBAL;
1871		break;
1872	case E1000_RDBAH(0):
1873		retval = sc->esc_RDBAH;
1874		break;
1875	case E1000_RDLEN(0):
1876		retval = sc->esc_RDLEN;
1877		break;
1878	case E1000_RDH(0):
1879		retval = sc->esc_RDH;
1880		break;
1881	case E1000_RDT(0):
1882		retval = sc->esc_RDT;
1883		break;
1884	case E1000_RDTR:
1885		retval = sc->esc_RDTR;
1886		break;
1887	case E1000_RXDCTL(0):
1888		retval = sc->esc_RXDCTL;
1889		break;
1890	case E1000_RADV:
1891		retval = sc->esc_RADV;
1892		break;
1893	case E1000_RSRPD:
1894		retval = sc->esc_RSRPD;
1895		break;
1896	case E1000_RXCSUM:
1897		retval = sc->esc_RXCSUM;
1898		break;
1899	case E1000_TXCW:
1900		retval = sc->esc_TXCW;
1901		break;
1902	case E1000_TCTL:
1903		retval = sc->esc_TCTL;
1904		break;
1905	case E1000_TIPG:
1906		retval = sc->esc_TIPG;
1907		break;
1908	case E1000_AIT:
1909		retval = sc->esc_AIT;
1910		break;
1911	case E1000_TDBAL(0):
1912		retval = sc->esc_TDBAL;
1913		break;
1914	case E1000_TDBAH(0):
1915		retval = sc->esc_TDBAH;
1916		break;
1917	case E1000_TDLEN(0):
1918		retval = sc->esc_TDLEN;
1919		break;
1920	case E1000_TDH(0):
1921		retval = sc->esc_TDH;
1922		break;
1923	case E1000_TDT(0):
1924		retval = sc->esc_TDT;
1925		break;
1926	case E1000_TIDV:
1927		retval = sc->esc_TIDV;
1928		break;
1929	case E1000_TXDCTL(0):
1930		retval = sc->esc_TXDCTL;
1931		break;
1932	case E1000_TADV:
1933		retval = sc->esc_TADV;
1934		break;
1935	case E1000_RAL(0) ... E1000_RAH(15):
1936		/* convert to u32 offset */
1937		ridx = (offset - E1000_RAL(0)) >> 2;
1938		retval = e82545_read_ra(sc, ridx);
1939		break;
1940	case E1000_MTA ... (E1000_MTA + (127*4)):
1941		retval = sc->esc_fmcast[(offset - E1000_MTA) >> 2];
1942		break;
1943	case E1000_VFTA ... (E1000_VFTA + (127*4)):
1944		retval = sc->esc_fvlan[(offset - E1000_VFTA) >> 2];
1945		break;
1946	case E1000_EECD:
1947		//DPRINTF("EECD read %x", sc->eeprom_control);
1948		retval = sc->eeprom_control;
1949		break;
1950	case E1000_MDIC:
1951		retval = sc->mdi_control;
1952		break;
1953	case E1000_MANC:
1954		retval = 0;
1955		break;
1956	/* stats that we emulate. */
1957	case E1000_MPC:
1958		retval = sc->missed_pkt_count;
1959		break;
1960	case E1000_PRC64:
1961		retval = sc->pkt_rx_by_size[0];
1962		break;
1963	case E1000_PRC127:
1964		retval = sc->pkt_rx_by_size[1];
1965		break;
1966	case E1000_PRC255:
1967		retval = sc->pkt_rx_by_size[2];
1968		break;
1969	case E1000_PRC511:
1970		retval = sc->pkt_rx_by_size[3];
1971		break;
1972	case E1000_PRC1023:
1973		retval = sc->pkt_rx_by_size[4];
1974		break;
1975	case E1000_PRC1522:
1976		retval = sc->pkt_rx_by_size[5];
1977		break;
1978	case E1000_GPRC:
1979		retval = sc->good_pkt_rx_count;
1980		break;
1981	case E1000_BPRC:
1982		retval = sc->bcast_pkt_rx_count;
1983		break;
1984	case E1000_MPRC:
1985		retval = sc->mcast_pkt_rx_count;
1986		break;
1987	case E1000_GPTC:
1988	case E1000_TPT:
1989		retval = sc->good_pkt_tx_count;
1990		break;
1991	case E1000_GORCL:
1992		retval = (uint32_t)sc->good_octets_rx;
1993		break;
1994	case E1000_GORCH:
1995		retval = (uint32_t)(sc->good_octets_rx >> 32);
1996		break;
1997	case E1000_TOTL:
1998	case E1000_GOTCL:
1999		retval = (uint32_t)sc->good_octets_tx;
2000		break;
2001	case E1000_TOTH:
2002	case E1000_GOTCH:
2003		retval = (uint32_t)(sc->good_octets_tx >> 32);
2004		break;
2005	case E1000_ROC:
2006		retval = sc->oversize_rx_count;
2007		break;
2008	case E1000_TORL:
2009		retval = (uint32_t)(sc->good_octets_rx + sc->missed_octets);
2010		break;
2011	case E1000_TORH:
2012		retval = (uint32_t)((sc->good_octets_rx +
2013		    sc->missed_octets) >> 32);
2014		break;
2015	case E1000_TPR:
2016		retval = sc->good_pkt_rx_count + sc->missed_pkt_count +
2017		    sc->oversize_rx_count;
2018		break;
2019	case E1000_PTC64:
2020		retval = sc->pkt_tx_by_size[0];
2021		break;
2022	case E1000_PTC127:
2023		retval = sc->pkt_tx_by_size[1];
2024		break;
2025	case E1000_PTC255:
2026		retval = sc->pkt_tx_by_size[2];
2027		break;
2028	case E1000_PTC511:
2029		retval = sc->pkt_tx_by_size[3];
2030		break;
2031	case E1000_PTC1023:
2032		retval = sc->pkt_tx_by_size[4];
2033		break;
2034	case E1000_PTC1522:
2035		retval = sc->pkt_tx_by_size[5];
2036		break;
2037	case E1000_MPTC:
2038		retval = sc->mcast_pkt_tx_count;
2039		break;
2040	case E1000_BPTC:
2041		retval = sc->bcast_pkt_tx_count;
2042		break;
2043	case E1000_TSCTC:
2044		retval = sc->tso_tx_count;
2045		break;
2046	/* stats that are always 0. */
2047	case E1000_CRCERRS:
2048	case E1000_ALGNERRC:
2049	case E1000_SYMERRS:
2050	case E1000_RXERRC:
2051	case E1000_SCC:
2052	case E1000_ECOL:
2053	case E1000_MCC:
2054	case E1000_LATECOL:
2055	case E1000_COLC:
2056	case E1000_DC:
2057	case E1000_TNCRS:
2058	case E1000_SEC:
2059	case E1000_CEXTERR:
2060	case E1000_RLEC:
2061	case E1000_XONRXC:
2062	case E1000_XONTXC:
2063	case E1000_XOFFRXC:
2064	case E1000_XOFFTXC:
2065	case E1000_FCRUC:
2066	case E1000_RNBC:
2067	case E1000_RUC:
2068	case E1000_RFC:
2069	case E1000_RJC:
2070	case E1000_MGTPRC:
2071	case E1000_MGTPDC:
2072	case E1000_MGTPTC:
2073	case E1000_TSCTFC:
2074		retval = 0;
2075		break;
2076	default:
2077		DPRINTF("Unknown read register: 0x%x", offset);
2078		retval = 0;
2079		break;
2080	}
2081
2082	return (retval);
2083}
2084
2085static void
2086e82545_write(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int baridx,
2087	     uint64_t offset, int size, uint64_t value)
2088{
2089	struct e82545_softc *sc;
2090
2091	//DPRINTF("Write bar:%d offset:0x%lx value:0x%lx size:%d", baridx, offset, value, size);
2092
2093	sc = pi->pi_arg;
2094
2095	pthread_mutex_lock(&sc->esc_mtx);
2096
2097	switch (baridx) {
2098	case E82545_BAR_IO:
2099		switch (offset) {
2100		case E82545_IOADDR:
2101			if (size != 4) {
2102				DPRINTF("Wrong io addr write sz:%d value:0x%lx", size, value);
2103			} else
2104				sc->io_addr = (uint32_t)value;
2105			break;
2106		case E82545_IODATA:
2107			if (size != 4) {
2108				DPRINTF("Wrong io data write size:%d value:0x%lx", size, value);
2109			} else if (sc->io_addr > E82545_IO_REGISTER_MAX) {
2110				DPRINTF("Non-register io write addr:0x%x value:0x%lx", sc->io_addr, value);
2111			} else
2112				e82545_write_register(sc, sc->io_addr,
2113						      (uint32_t)value);
2114			break;
2115		default:
2116			DPRINTF("Unknown io bar write offset:0x%lx value:0x%lx size:%d", offset, value, size);
2117			break;
2118		}
2119		break;
2120	case E82545_BAR_REGISTER:
2121		if (size != 4) {
2122			DPRINTF("Wrong register write size:%d offset:0x%lx value:0x%lx", size, offset, value);
2123		} else
2124			e82545_write_register(sc, (uint32_t)offset,
2125					      (uint32_t)value);
2126		break;
2127	default:
2128		DPRINTF("Unknown write bar:%d off:0x%lx val:0x%lx size:%d",
2129			baridx, offset, value, size);
2130	}
2131
2132	pthread_mutex_unlock(&sc->esc_mtx);
2133}
2134
2135static uint64_t
2136e82545_read(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int baridx,
2137	    uint64_t offset, int size)
2138{
2139	struct e82545_softc *sc;
2140	uint64_t retval;
2141
2142	//DPRINTF("Read  bar:%d offset:0x%lx size:%d", baridx, offset, size);
2143	sc = pi->pi_arg;
2144	retval = 0;
2145
2146	pthread_mutex_lock(&sc->esc_mtx);
2147
2148	switch (baridx) {
2149	case E82545_BAR_IO:
2150		switch (offset) {
2151		case E82545_IOADDR:
2152			if (size != 4) {
2153				DPRINTF("Wrong io addr read sz:%d", size);
2154			} else
2155				retval = sc->io_addr;
2156			break;
2157		case E82545_IODATA:
2158			if (size != 4) {
2159				DPRINTF("Wrong io data read sz:%d", size);
2160			}
2161			if (sc->io_addr > E82545_IO_REGISTER_MAX) {
2162				DPRINTF("Non-register io read addr:0x%x",
2163					sc->io_addr);
2164			} else
2165				retval = e82545_read_register(sc, sc->io_addr);
2166			break;
2167		default:
2168			DPRINTF("Unknown io bar read offset:0x%lx size:%d",
2169				offset, size);
2170			break;
2171		}
2172		break;
2173	case E82545_BAR_REGISTER:
2174		if (size != 4) {
2175			DPRINTF("Wrong register read size:%d offset:0x%lx",
2176				size, offset);
2177		} else
2178			retval = e82545_read_register(sc, (uint32_t)offset);
2179		break;
2180	default:
2181		DPRINTF("Unknown read bar:%d offset:0x%lx size:%d",
2182			baridx, offset, size);
2183		break;
2184	}
2185
2186	pthread_mutex_unlock(&sc->esc_mtx);
2187
2188	return (retval);
2189}
2190
2191static void
2192e82545_reset(struct e82545_softc *sc, int drvr)
2193{
2194	int i;
2195
2196	e82545_rx_disable(sc);
2197	e82545_tx_disable(sc);
2198
2199	/* clear outstanding interrupts */
2200	if (sc->esc_irq_asserted)
2201		pci_lintr_deassert(sc->esc_pi);
2202
2203	/* misc */
2204	if (!drvr) {
2205		sc->esc_FCAL = 0;
2206		sc->esc_FCAH = 0;
2207		sc->esc_FCT = 0;
2208		sc->esc_VET = 0;
2209		sc->esc_FCTTV = 0;
2210	}
2211	sc->esc_LEDCTL = 0x07061302;
2212	sc->esc_PBA = 0x00100030;
2213
2214	/* start nvm in opcode mode. */
2215	sc->nvm_opaddr = 0;
2216	sc->nvm_mode = E82545_NVM_MODE_OPADDR;
2217	sc->nvm_bits = E82545_NVM_OPADDR_BITS;
2218	sc->eeprom_control = E1000_EECD_PRES | E82545_EECD_FWE_EN;
2219	e82545_init_eeprom(sc);
2220
2221	/* interrupt */
2222	sc->esc_ICR = 0;
2223	sc->esc_ITR = 250;
2224	sc->esc_ICS = 0;
2225	sc->esc_IMS = 0;
2226	sc->esc_IMC = 0;
2227
2228	/* L2 filters */
2229	if (!drvr) {
2230		memset(sc->esc_fvlan, 0, sizeof(sc->esc_fvlan));
2231		memset(sc->esc_fmcast, 0, sizeof(sc->esc_fmcast));
2232		memset(sc->esc_uni, 0, sizeof(sc->esc_uni));
2233
2234		/* XXX not necessary on 82545 ?? */
2235		sc->esc_uni[0].eu_valid = 1;
2236		memcpy(sc->esc_uni[0].eu_eth.octet, sc->esc_mac.octet,
2237		    ETHER_ADDR_LEN);
2238	} else {
2239		/* Clear RAH valid bits */
2240		for (i = 0; i < 16; i++)
2241			sc->esc_uni[i].eu_valid = 0;
2242	}
2243
2244	/* receive */
2245	if (!drvr) {
2246		sc->esc_RDBAL = 0;
2247		sc->esc_RDBAH = 0;
2248	}
2249	sc->esc_RCTL = 0;
2250	sc->esc_FCRTL = 0;
2251	sc->esc_FCRTH = 0;
2252	sc->esc_RDLEN = 0;
2253	sc->esc_RDH = 0;
2254	sc->esc_RDT = 0;
2255	sc->esc_RDTR = 0;
2256	sc->esc_RXDCTL = (1 << 24) | (1 << 16); /* default GRAN/WTHRESH */
2257	sc->esc_RADV = 0;
2258	sc->esc_RXCSUM = 0;
2259
2260	/* transmit */
2261	if (!drvr) {
2262		sc->esc_TDBAL = 0;
2263		sc->esc_TDBAH = 0;
2264		sc->esc_TIPG = 0;
2265		sc->esc_AIT = 0;
2266		sc->esc_TIDV = 0;
2267		sc->esc_TADV = 0;
2268	}
2269	sc->esc_tdba = 0;
2270	sc->esc_txdesc = NULL;
2271	sc->esc_TXCW = 0;
2272	sc->esc_TCTL = 0;
2273	sc->esc_TDLEN = 0;
2274	sc->esc_TDT = 0;
2275	sc->esc_TDHr = sc->esc_TDH = 0;
2276	sc->esc_TXDCTL = 0;
2277}
2278
2279static int
2280e82545_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts)
2281{
2282	char nstr[80];
2283	struct e82545_softc *sc;
2284	char *optscopy;
2285	char *vtopts;
2286	int mac_provided;
2287
2288	DPRINTF("Loading with options: %s", opts);
2289
2290	/* Setup our softc */
2291	sc = calloc(1, sizeof(*sc));
2292
2293	pi->pi_arg = sc;
2294	sc->esc_pi = pi;
2295	sc->esc_ctx = ctx;
2296
2297	pthread_mutex_init(&sc->esc_mtx, NULL);
2298	pthread_cond_init(&sc->esc_rx_cond, NULL);
2299	pthread_cond_init(&sc->esc_tx_cond, NULL);
2300	pthread_create(&sc->esc_tx_tid, NULL, e82545_tx_thread, sc);
2301	snprintf(nstr, sizeof(nstr), "e82545-%d:%d tx", pi->pi_slot,
2302	    pi->pi_func);
2303        pthread_set_name_np(sc->esc_tx_tid, nstr);
2304
2305	pci_set_cfgdata16(pi, PCIR_DEVICE, E82545_DEV_ID_82545EM_COPPER);
2306	pci_set_cfgdata16(pi, PCIR_VENDOR, E82545_VENDOR_ID_INTEL);
2307	pci_set_cfgdata8(pi,  PCIR_CLASS, PCIC_NETWORK);
2308	pci_set_cfgdata8(pi, PCIR_SUBCLASS, PCIS_NETWORK_ETHERNET);
2309	pci_set_cfgdata16(pi, PCIR_SUBDEV_0, E82545_SUBDEV_ID);
2310	pci_set_cfgdata16(pi, PCIR_SUBVEND_0, E82545_VENDOR_ID_INTEL);
2311
2312	pci_set_cfgdata8(pi,  PCIR_HDRTYPE, PCIM_HDRTYPE_NORMAL);
2313	pci_set_cfgdata8(pi,  PCIR_INTPIN, 0x1);
2314
2315	/* TODO: this card also supports msi, but the freebsd driver for it
2316	 * does not, so I have not implemented it. */
2317	pci_lintr_request(pi);
2318
2319	pci_emul_alloc_bar(pi, E82545_BAR_REGISTER, PCIBAR_MEM32,
2320		E82545_BAR_REGISTER_LEN);
2321	pci_emul_alloc_bar(pi, E82545_BAR_FLASH, PCIBAR_MEM32,
2322		E82545_BAR_FLASH_LEN);
2323	pci_emul_alloc_bar(pi, E82545_BAR_IO, PCIBAR_IO,
2324		E82545_BAR_IO_LEN);
2325
2326	/*
2327	 * Attempt to open the net backend and read the MAC address
2328	 * if specified.  Copied from virtio-net, slightly modified.
2329	 */
2330	mac_provided = 0;
2331	sc->esc_be = NULL;
2332	if (opts != NULL) {
2333		int err = 0;
2334
2335		optscopy = vtopts = strdup(opts);
2336		(void) strsep(&vtopts, ",");
2337
2338		/*
2339		 * Parse the list of options in the form
2340		 *     key1=value1,...,keyN=valueN.
2341		 */
2342		while (vtopts != NULL) {
2343			char *value = vtopts;
2344			char *key;
2345
2346			key = strsep(&value, "=");
2347			if (value == NULL)
2348				break;
2349			vtopts = value;
2350			(void) strsep(&vtopts, ",");
2351
2352			if (strcmp(key, "mac") == 0) {
2353				err = net_parsemac(value, sc->esc_mac.octet);
2354				if (err)
2355					break;
2356				mac_provided = 1;
2357			}
2358		}
2359
2360		free(optscopy);
2361
2362		if (err) {
2363			free(sc);
2364			return (err);
2365		}
2366
2367		err = netbe_init(&sc->esc_be, opts, e82545_rx_callback, sc);
2368		if (err) {
2369			free(sc);
2370			return (err);
2371		}
2372	}
2373
2374	if (!mac_provided) {
2375		net_genmac(pi, sc->esc_mac.octet);
2376	}
2377
2378	netbe_rx_enable(sc->esc_be);
2379
2380	/* H/w initiated reset */
2381	e82545_reset(sc, 0);
2382
2383	return (0);
2384}
2385
2386#ifdef BHYVE_SNAPSHOT
2387static int
2388e82545_snapshot(struct vm_snapshot_meta *meta)
2389{
2390	int i;
2391	int ret;
2392	struct e82545_softc *sc;
2393	struct pci_devinst *pi;
2394	uint64_t bitmap_value;
2395
2396	pi = meta->dev_data;
2397	sc = pi->pi_arg;
2398
2399	/* esc_mevp and esc_mevpitr should be reinitiated at init. */
2400	SNAPSHOT_VAR_OR_LEAVE(sc->esc_mac, meta, ret, done);
2401
2402	/* General */
2403	SNAPSHOT_VAR_OR_LEAVE(sc->esc_CTRL, meta, ret, done);
2404	SNAPSHOT_VAR_OR_LEAVE(sc->esc_FCAL, meta, ret, done);
2405	SNAPSHOT_VAR_OR_LEAVE(sc->esc_FCAH, meta, ret, done);
2406	SNAPSHOT_VAR_OR_LEAVE(sc->esc_FCT, meta, ret, done);
2407	SNAPSHOT_VAR_OR_LEAVE(sc->esc_VET, meta, ret, done);
2408	SNAPSHOT_VAR_OR_LEAVE(sc->esc_FCTTV, meta, ret, done);
2409	SNAPSHOT_VAR_OR_LEAVE(sc->esc_LEDCTL, meta, ret, done);
2410	SNAPSHOT_VAR_OR_LEAVE(sc->esc_PBA, meta, ret, done);
2411
2412	/* Interrupt control */
2413	SNAPSHOT_VAR_OR_LEAVE(sc->esc_irq_asserted, meta, ret, done);
2414	SNAPSHOT_VAR_OR_LEAVE(sc->esc_ICR, meta, ret, done);
2415	SNAPSHOT_VAR_OR_LEAVE(sc->esc_ITR, meta, ret, done);
2416	SNAPSHOT_VAR_OR_LEAVE(sc->esc_ICS, meta, ret, done);
2417	SNAPSHOT_VAR_OR_LEAVE(sc->esc_IMS, meta, ret, done);
2418	SNAPSHOT_VAR_OR_LEAVE(sc->esc_IMC, meta, ret, done);
2419
2420	/*
2421	 * Transmit
2422	 *
2423	 * The fields in the unions are in superposition to access certain
2424	 * bytes in the larger uint variables.
2425	 * e.g., ip_config = [ipcss|ipcso|ipcse0|ipcse1]
2426	 */
2427	SNAPSHOT_VAR_OR_LEAVE(sc->esc_txctx.lower_setup.ip_config, meta, ret, done);
2428	SNAPSHOT_VAR_OR_LEAVE(sc->esc_txctx.upper_setup.tcp_config, meta, ret, done);
2429	SNAPSHOT_VAR_OR_LEAVE(sc->esc_txctx.cmd_and_length, meta, ret, done);
2430	SNAPSHOT_VAR_OR_LEAVE(sc->esc_txctx.tcp_seg_setup.data, meta, ret, done);
2431
2432	SNAPSHOT_VAR_OR_LEAVE(sc->esc_tx_enabled, meta, ret, done);
2433	SNAPSHOT_VAR_OR_LEAVE(sc->esc_tx_active, meta, ret, done);
2434	SNAPSHOT_VAR_OR_LEAVE(sc->esc_TXCW, meta, ret, done);
2435	SNAPSHOT_VAR_OR_LEAVE(sc->esc_TCTL, meta, ret, done);
2436	SNAPSHOT_VAR_OR_LEAVE(sc->esc_TIPG, meta, ret, done);
2437	SNAPSHOT_VAR_OR_LEAVE(sc->esc_AIT, meta, ret, done);
2438	SNAPSHOT_VAR_OR_LEAVE(sc->esc_tdba, meta, ret, done);
2439	SNAPSHOT_VAR_OR_LEAVE(sc->esc_TDBAL, meta, ret, done);
2440	SNAPSHOT_VAR_OR_LEAVE(sc->esc_TDBAH, meta, ret, done);
2441	SNAPSHOT_VAR_OR_LEAVE(sc->esc_TDLEN, meta, ret, done);
2442	SNAPSHOT_VAR_OR_LEAVE(sc->esc_TDH, meta, ret, done);
2443	SNAPSHOT_VAR_OR_LEAVE(sc->esc_TDHr, meta, ret, done);
2444	SNAPSHOT_VAR_OR_LEAVE(sc->esc_TDT, meta, ret, done);
2445	SNAPSHOT_VAR_OR_LEAVE(sc->esc_TIDV, meta, ret, done);
2446	SNAPSHOT_VAR_OR_LEAVE(sc->esc_TXDCTL, meta, ret, done);
2447	SNAPSHOT_VAR_OR_LEAVE(sc->esc_TADV, meta, ret, done);
2448
2449	/* Has dependency on esc_TDLEN; reoreder of fields from struct. */
2450	SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(sc->esc_txdesc, sc->esc_TDLEN,
2451		true, meta, ret, done);
2452
2453	/* L2 frame acceptance */
2454	for (i = 0; i < nitems(sc->esc_uni); i++) {
2455		SNAPSHOT_VAR_OR_LEAVE(sc->esc_uni[i].eu_valid, meta, ret, done);
2456		SNAPSHOT_VAR_OR_LEAVE(sc->esc_uni[i].eu_addrsel, meta, ret, done);
2457		SNAPSHOT_VAR_OR_LEAVE(sc->esc_uni[i].eu_eth, meta, ret, done);
2458	}
2459
2460	SNAPSHOT_BUF_OR_LEAVE(sc->esc_fmcast, sizeof(sc->esc_fmcast),
2461			      meta, ret, done);
2462	SNAPSHOT_BUF_OR_LEAVE(sc->esc_fvlan, sizeof(sc->esc_fvlan),
2463			      meta, ret, done);
2464
2465	/* Receive */
2466	SNAPSHOT_VAR_OR_LEAVE(sc->esc_rx_enabled, meta, ret, done);
2467	SNAPSHOT_VAR_OR_LEAVE(sc->esc_rx_active, meta, ret, done);
2468	SNAPSHOT_VAR_OR_LEAVE(sc->esc_rx_loopback, meta, ret, done);
2469	SNAPSHOT_VAR_OR_LEAVE(sc->esc_RCTL, meta, ret, done);
2470	SNAPSHOT_VAR_OR_LEAVE(sc->esc_FCRTL, meta, ret, done);
2471	SNAPSHOT_VAR_OR_LEAVE(sc->esc_FCRTH, meta, ret, done);
2472	SNAPSHOT_VAR_OR_LEAVE(sc->esc_rdba, meta, ret, done);
2473	SNAPSHOT_VAR_OR_LEAVE(sc->esc_RDBAL, meta, ret, done);
2474	SNAPSHOT_VAR_OR_LEAVE(sc->esc_RDBAH, meta, ret, done);
2475	SNAPSHOT_VAR_OR_LEAVE(sc->esc_RDLEN, meta, ret, done);
2476	SNAPSHOT_VAR_OR_LEAVE(sc->esc_RDH, meta, ret, done);
2477	SNAPSHOT_VAR_OR_LEAVE(sc->esc_RDT, meta, ret, done);
2478	SNAPSHOT_VAR_OR_LEAVE(sc->esc_RDTR, meta, ret, done);
2479	SNAPSHOT_VAR_OR_LEAVE(sc->esc_RXDCTL, meta, ret, done);
2480	SNAPSHOT_VAR_OR_LEAVE(sc->esc_RADV, meta, ret, done);
2481	SNAPSHOT_VAR_OR_LEAVE(sc->esc_RSRPD, meta, ret, done);
2482	SNAPSHOT_VAR_OR_LEAVE(sc->esc_RXCSUM, meta, ret, done);
2483
2484	/* Has dependency on esc_RDLEN; reoreder of fields from struct. */
2485	SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(sc->esc_rxdesc, sc->esc_TDLEN,
2486		true, meta, ret, done);
2487
2488	/* IO Port register access */
2489	SNAPSHOT_VAR_OR_LEAVE(sc->io_addr, meta, ret, done);
2490
2491	/* Shadow copy of MDIC */
2492	SNAPSHOT_VAR_OR_LEAVE(sc->mdi_control, meta, ret, done);
2493
2494	/* Shadow copy of EECD */
2495	SNAPSHOT_VAR_OR_LEAVE(sc->eeprom_control, meta, ret, done);
2496
2497	/* Latest NVM in/out */
2498	SNAPSHOT_VAR_OR_LEAVE(sc->nvm_data, meta, ret, done);
2499	SNAPSHOT_VAR_OR_LEAVE(sc->nvm_opaddr, meta, ret, done);
2500
2501	/* Stats */
2502	SNAPSHOT_VAR_OR_LEAVE(sc->missed_pkt_count, meta, ret, done);
2503	SNAPSHOT_BUF_OR_LEAVE(sc->pkt_rx_by_size, sizeof(sc->pkt_rx_by_size),
2504			      meta, ret, done);
2505	SNAPSHOT_BUF_OR_LEAVE(sc->pkt_tx_by_size, sizeof(sc->pkt_tx_by_size),
2506			      meta, ret, done);
2507	SNAPSHOT_VAR_OR_LEAVE(sc->good_pkt_rx_count, meta, ret, done);
2508	SNAPSHOT_VAR_OR_LEAVE(sc->bcast_pkt_rx_count, meta, ret, done);
2509	SNAPSHOT_VAR_OR_LEAVE(sc->mcast_pkt_rx_count, meta, ret, done);
2510	SNAPSHOT_VAR_OR_LEAVE(sc->good_pkt_tx_count, meta, ret, done);
2511	SNAPSHOT_VAR_OR_LEAVE(sc->bcast_pkt_tx_count, meta, ret, done);
2512	SNAPSHOT_VAR_OR_LEAVE(sc->mcast_pkt_tx_count, meta, ret, done);
2513	SNAPSHOT_VAR_OR_LEAVE(sc->oversize_rx_count, meta, ret, done);
2514	SNAPSHOT_VAR_OR_LEAVE(sc->tso_tx_count, meta, ret, done);
2515	SNAPSHOT_VAR_OR_LEAVE(sc->good_octets_rx, meta, ret, done);
2516	SNAPSHOT_VAR_OR_LEAVE(sc->good_octets_tx, meta, ret, done);
2517	SNAPSHOT_VAR_OR_LEAVE(sc->missed_octets, meta, ret, done);
2518
2519	if (meta->op == VM_SNAPSHOT_SAVE)
2520		bitmap_value = sc->nvm_bits;
2521	SNAPSHOT_VAR_OR_LEAVE(bitmap_value, meta, ret, done);
2522	if (meta->op == VM_SNAPSHOT_RESTORE)
2523		sc->nvm_bits = bitmap_value;
2524
2525	if (meta->op == VM_SNAPSHOT_SAVE)
2526		bitmap_value = sc->nvm_bits;
2527	SNAPSHOT_VAR_OR_LEAVE(bitmap_value, meta, ret, done);
2528	if (meta->op == VM_SNAPSHOT_RESTORE)
2529		sc->nvm_bits = bitmap_value;
2530
2531	/* EEPROM data */
2532	SNAPSHOT_BUF_OR_LEAVE(sc->eeprom_data, sizeof(sc->eeprom_data),
2533			      meta, ret, done);
2534
2535done:
2536	return (ret);
2537}
2538#endif
2539
2540struct pci_devemu pci_de_e82545 = {
2541	.pe_emu = 	"e1000",
2542	.pe_init =	e82545_init,
2543	.pe_barwrite =	e82545_write,
2544	.pe_barread =	e82545_read,
2545#ifdef BHYVE_SNAPSHOT
2546	.pe_snapshot =	e82545_snapshot,
2547#endif
2548};
2549PCI_EMUL_SET(pci_de_e82545);
2550
2551