1/*
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2016 Alexander Motin <mav@FreeBSD.org>
5 * Copyright (c) 2015 Peter Grehan <grehan@freebsd.org>
6 * Copyright (c) 2013 Jeremiah Lott, Avere Systems
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer
14 *    in this position and unchanged.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD$");
34
35#include <sys/types.h>
36#ifndef WITHOUT_CAPSICUM
37#include <sys/capsicum.h>
38#endif
39#include <sys/limits.h>
40#include <sys/ioctl.h>
41#include <sys/uio.h>
42#include <net/ethernet.h>
43#include <netinet/in.h>
44#include <netinet/tcp.h>
45
46#ifndef WITHOUT_CAPSICUM
47#include <capsicum_helpers.h>
48#endif
49#include <err.h>
50#include <errno.h>
51#include <fcntl.h>
52#include <md5.h>
53#include <stdio.h>
54#include <stdlib.h>
55#include <string.h>
56#include <sysexits.h>
57#include <unistd.h>
58#include <pthread.h>
59#include <pthread_np.h>
60
61#include "e1000_regs.h"
62#include "e1000_defines.h"
63#include "mii.h"
64
65#include "bhyverun.h"
66#include "debug.h"
67#include "pci_emul.h"
68#include "mevent.h"
69#include "net_utils.h"
70#include "net_backends.h"
71
72/* Hardware/register definitions XXX: move some to common code. */
73#define E82545_VENDOR_ID_INTEL			0x8086
74#define E82545_DEV_ID_82545EM_COPPER		0x100F
75#define E82545_SUBDEV_ID			0x1008
76
77#define E82545_REVISION_4			4
78
79#define E82545_MDIC_DATA_MASK			0x0000FFFF
80#define E82545_MDIC_OP_MASK			0x0c000000
81#define E82545_MDIC_IE				0x20000000
82
83#define E82545_EECD_FWE_DIS	0x00000010 /* Flash writes disabled */
84#define E82545_EECD_FWE_EN	0x00000020 /* Flash writes enabled */
85#define E82545_EECD_FWE_MASK	0x00000030 /* Flash writes mask */
86
87#define E82545_BAR_REGISTER			0
88#define E82545_BAR_REGISTER_LEN			(128*1024)
89#define E82545_BAR_FLASH			1
90#define E82545_BAR_FLASH_LEN			(64*1024)
91#define E82545_BAR_IO				2
92#define E82545_BAR_IO_LEN			8
93
94#define E82545_IOADDR				0x00000000
95#define E82545_IODATA				0x00000004
96#define E82545_IO_REGISTER_MAX			0x0001FFFF
97#define E82545_IO_FLASH_BASE			0x00080000
98#define E82545_IO_FLASH_MAX			0x000FFFFF
99
100#define E82545_ARRAY_ENTRY(reg, offset)		(reg + (offset<<2))
101#define E82545_RAR_MAX				15
102#define E82545_MTA_MAX				127
103#define E82545_VFTA_MAX				127
104
105/* Slightly modified from the driver versions, hardcoded for 3 opcode bits,
106 * followed by 6 address bits.
107 * TODO: make opcode bits and addr bits configurable?
108 * NVM Commands - Microwire */
109#define E82545_NVM_OPCODE_BITS	3
110#define E82545_NVM_ADDR_BITS	6
111#define E82545_NVM_DATA_BITS	16
112#define E82545_NVM_OPADDR_BITS	(E82545_NVM_OPCODE_BITS + E82545_NVM_ADDR_BITS)
113#define E82545_NVM_ADDR_MASK	((1 << E82545_NVM_ADDR_BITS)-1)
114#define E82545_NVM_OPCODE_MASK	\
115    (((1 << E82545_NVM_OPCODE_BITS) - 1) << E82545_NVM_ADDR_BITS)
116#define E82545_NVM_OPCODE_READ	(0x6 << E82545_NVM_ADDR_BITS)	/* read */
117#define E82545_NVM_OPCODE_WRITE	(0x5 << E82545_NVM_ADDR_BITS)	/* write */
118#define E82545_NVM_OPCODE_ERASE	(0x7 << E82545_NVM_ADDR_BITS)	/* erase */
119#define	E82545_NVM_OPCODE_EWEN	(0x4 << E82545_NVM_ADDR_BITS)	/* wr-enable */
120
121#define	E82545_NVM_EEPROM_SIZE	64 /* 64 * 16-bit values == 128K */
122
123#define E1000_ICR_SRPD		0x00010000
124
125/* This is an arbitrary number.  There is no hard limit on the chip. */
126#define I82545_MAX_TXSEGS	64
127
128/* Legacy receive descriptor */
129struct e1000_rx_desc {
130	uint64_t buffer_addr;	/* Address of the descriptor's data buffer */
131	uint16_t length;	/* Length of data DMAed into data buffer */
132	uint16_t csum;		/* Packet checksum */
133	uint8_t	 status;       	/* Descriptor status */
134	uint8_t  errors;	/* Descriptor Errors */
135	uint16_t special;
136};
137
138/* Transmit descriptor types */
139#define	E1000_TXD_MASK		(E1000_TXD_CMD_DEXT | 0x00F00000)
140#define E1000_TXD_TYP_L		(0)
141#define E1000_TXD_TYP_C		(E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_C)
142#define E1000_TXD_TYP_D		(E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D)
143
144/* Legacy transmit descriptor */
145struct e1000_tx_desc {
146	uint64_t buffer_addr;   /* Address of the descriptor's data buffer */
147	union {
148		uint32_t data;
149		struct {
150			uint16_t length;  /* Data buffer length */
151			uint8_t  cso;  /* Checksum offset */
152			uint8_t  cmd;  /* Descriptor control */
153		} flags;
154	} lower;
155	union {
156		uint32_t data;
157		struct {
158			uint8_t status; /* Descriptor status */
159			uint8_t css;  /* Checksum start */
160			uint16_t special;
161		} fields;
162	} upper;
163};
164
165/* Context descriptor */
166struct e1000_context_desc {
167	union {
168		uint32_t ip_config;
169		struct {
170			uint8_t ipcss;  /* IP checksum start */
171			uint8_t ipcso;  /* IP checksum offset */
172			uint16_t ipcse;  /* IP checksum end */
173		} ip_fields;
174	} lower_setup;
175	union {
176		uint32_t tcp_config;
177		struct {
178			uint8_t tucss;  /* TCP checksum start */
179			uint8_t tucso;  /* TCP checksum offset */
180			uint16_t tucse;  /* TCP checksum end */
181		} tcp_fields;
182	} upper_setup;
183	uint32_t cmd_and_length;
184	union {
185		uint32_t data;
186		struct {
187			uint8_t status;  /* Descriptor status */
188			uint8_t hdr_len;  /* Header length */
189			uint16_t mss;  /* Maximum segment size */
190		} fields;
191	} tcp_seg_setup;
192};
193
194/* Data descriptor */
195struct e1000_data_desc {
196	uint64_t buffer_addr;  /* Address of the descriptor's buffer address */
197	union {
198		uint32_t data;
199		struct {
200			uint16_t length;  /* Data buffer length */
201			uint8_t typ_len_ext;
202			uint8_t cmd;
203		} flags;
204	} lower;
205	union {
206		uint32_t data;
207		struct {
208			uint8_t status;  /* Descriptor status */
209			uint8_t popts;  /* Packet Options */
210			uint16_t special;
211		} fields;
212	} upper;
213};
214
215union e1000_tx_udesc {
216	struct e1000_tx_desc td;
217	struct e1000_context_desc cd;
218	struct e1000_data_desc dd;
219};
220
221/* Tx checksum info for a packet. */
222struct ck_info {
223	int	ck_valid;	/* ck_info is valid */
224	uint8_t	ck_start;	/* start byte of cksum calcuation */
225	uint8_t	ck_off;		/* offset of cksum insertion */
226	uint16_t ck_len;	/* length of cksum calc: 0 is to packet-end */
227};
228
229/*
230 * Debug printf
231 */
232static int e82545_debug = 0;
233#define WPRINTF(msg,params...) PRINTLN("e82545: " msg, params)
234#define DPRINTF(msg,params...) if (e82545_debug) WPRINTF(msg, params)
235
236#define	MIN(a,b) (((a)<(b))?(a):(b))
237#define	MAX(a,b) (((a)>(b))?(a):(b))
238
239/* s/w representation of the RAL/RAH regs */
240struct  eth_uni {
241	int		eu_valid;
242	int		eu_addrsel;
243	struct ether_addr eu_eth;
244};
245
246
247struct e82545_softc {
248	struct pci_devinst *esc_pi;
249	struct vmctx	*esc_ctx;
250	struct mevent   *esc_mevpitr;
251	pthread_mutex_t	esc_mtx;
252	struct ether_addr esc_mac;
253	net_backend_t	*esc_be;
254
255	/* General */
256	uint32_t	esc_CTRL;	/* x0000 device ctl */
257	uint32_t	esc_FCAL;	/* x0028 flow ctl addr lo */
258	uint32_t	esc_FCAH;	/* x002C flow ctl addr hi */
259	uint32_t	esc_FCT;	/* x0030 flow ctl type */
260	uint32_t	esc_VET;	/* x0038 VLAN eth type */
261	uint32_t	esc_FCTTV;	/* x0170 flow ctl tx timer */
262	uint32_t	esc_LEDCTL;	/* x0E00 LED control */
263	uint32_t	esc_PBA;	/* x1000 pkt buffer allocation */
264
265	/* Interrupt control */
266	int		esc_irq_asserted;
267	uint32_t	esc_ICR;	/* x00C0 cause read/clear */
268	uint32_t	esc_ITR;	/* x00C4 intr throttling */
269	uint32_t	esc_ICS;	/* x00C8 cause set */
270	uint32_t	esc_IMS;	/* x00D0 mask set/read */
271	uint32_t	esc_IMC;	/* x00D8 mask clear */
272
273	/* Transmit */
274	union e1000_tx_udesc *esc_txdesc;
275	struct e1000_context_desc esc_txctx;
276	pthread_t	esc_tx_tid;
277	pthread_cond_t	esc_tx_cond;
278	int		esc_tx_enabled;
279	int		esc_tx_active;
280	uint32_t	esc_TXCW;	/* x0178 transmit config */
281	uint32_t	esc_TCTL;	/* x0400 transmit ctl */
282	uint32_t	esc_TIPG;	/* x0410 inter-packet gap */
283	uint16_t	esc_AIT;	/* x0458 Adaptive Interframe Throttle */
284	uint64_t	esc_tdba;      	/* verified 64-bit desc table addr */
285	uint32_t	esc_TDBAL;	/* x3800 desc table addr, low bits */
286	uint32_t	esc_TDBAH;	/* x3804 desc table addr, hi 32-bits */
287	uint32_t	esc_TDLEN;	/* x3808 # descriptors in bytes */
288	uint16_t	esc_TDH;	/* x3810 desc table head idx */
289	uint16_t	esc_TDHr;	/* internal read version of TDH */
290	uint16_t	esc_TDT;	/* x3818 desc table tail idx */
291	uint32_t	esc_TIDV;	/* x3820 intr delay */
292	uint32_t	esc_TXDCTL;	/* x3828 desc control */
293	uint32_t	esc_TADV;	/* x382C intr absolute delay */
294
295	/* L2 frame acceptance */
296	struct eth_uni	esc_uni[16];	/* 16 x unicast MAC addresses */
297	uint32_t	esc_fmcast[128]; /* Multicast filter bit-match */
298	uint32_t	esc_fvlan[128]; /* VLAN 4096-bit filter */
299
300	/* Receive */
301	struct e1000_rx_desc *esc_rxdesc;
302	pthread_cond_t	esc_rx_cond;
303	int		esc_rx_enabled;
304	int		esc_rx_active;
305	int		esc_rx_loopback;
306	uint32_t	esc_RCTL;	/* x0100 receive ctl */
307	uint32_t	esc_FCRTL;	/* x2160 flow cntl thresh, low */
308	uint32_t	esc_FCRTH;	/* x2168 flow cntl thresh, hi */
309	uint64_t	esc_rdba;	/* verified 64-bit desc table addr */
310	uint32_t	esc_RDBAL;	/* x2800 desc table addr, low bits */
311	uint32_t	esc_RDBAH;	/* x2804 desc table addr, hi 32-bits*/
312	uint32_t	esc_RDLEN;	/* x2808 #descriptors */
313	uint16_t	esc_RDH;	/* x2810 desc table head idx */
314	uint16_t	esc_RDT;	/* x2818 desc table tail idx */
315	uint32_t	esc_RDTR;	/* x2820 intr delay */
316	uint32_t	esc_RXDCTL;	/* x2828 desc control */
317	uint32_t	esc_RADV;	/* x282C intr absolute delay */
318	uint32_t	esc_RSRPD;	/* x2C00 recv small packet detect */
319	uint32_t	esc_RXCSUM;     /* x5000 receive cksum ctl */
320
321	/* IO Port register access */
322	uint32_t io_addr;
323
324	/* Shadow copy of MDIC */
325	uint32_t mdi_control;
326	/* Shadow copy of EECD */
327	uint32_t eeprom_control;
328	/* Latest NVM in/out */
329	uint16_t nvm_data;
330	uint16_t nvm_opaddr;
331	/* stats */
332	uint32_t missed_pkt_count; /* dropped for no room in rx queue */
333	uint32_t pkt_rx_by_size[6];
334	uint32_t pkt_tx_by_size[6];
335	uint32_t good_pkt_rx_count;
336	uint32_t bcast_pkt_rx_count;
337	uint32_t mcast_pkt_rx_count;
338	uint32_t good_pkt_tx_count;
339	uint32_t bcast_pkt_tx_count;
340	uint32_t mcast_pkt_tx_count;
341	uint32_t oversize_rx_count;
342	uint32_t tso_tx_count;
343	uint64_t good_octets_rx;
344	uint64_t good_octets_tx;
345	uint64_t missed_octets; /* counts missed and oversized */
346
347	uint8_t nvm_bits:6; /* number of bits remaining in/out */
348	uint8_t nvm_mode:2;
349#define E82545_NVM_MODE_OPADDR  0x0
350#define E82545_NVM_MODE_DATAIN  0x1
351#define E82545_NVM_MODE_DATAOUT 0x2
352	/* EEPROM data */
353	uint16_t eeprom_data[E82545_NVM_EEPROM_SIZE];
354};
355
356static void e82545_reset(struct e82545_softc *sc, int dev);
357static void e82545_rx_enable(struct e82545_softc *sc);
358static void e82545_rx_disable(struct e82545_softc *sc);
359static void e82545_rx_callback(int fd, enum ev_type type, void *param);
360static void e82545_tx_start(struct e82545_softc *sc);
361static void e82545_tx_enable(struct e82545_softc *sc);
362static void e82545_tx_disable(struct e82545_softc *sc);
363
364static inline int
365e82545_size_stat_index(uint32_t size)
366{
367	if (size <= 64) {
368		return 0;
369	} else if (size >= 1024) {
370		return 5;
371	} else {
372		/* should be 1-4 */
373		return (ffs(size) - 6);
374	}
375}
376
377static void
378e82545_init_eeprom(struct e82545_softc *sc)
379{
380	uint16_t checksum, i;
381
382        /* mac addr */
383	sc->eeprom_data[NVM_MAC_ADDR] = ((uint16_t)sc->esc_mac.octet[0]) |
384		(((uint16_t)sc->esc_mac.octet[1]) << 8);
385	sc->eeprom_data[NVM_MAC_ADDR+1] = ((uint16_t)sc->esc_mac.octet[2]) |
386		(((uint16_t)sc->esc_mac.octet[3]) << 8);
387	sc->eeprom_data[NVM_MAC_ADDR+2] = ((uint16_t)sc->esc_mac.octet[4]) |
388		(((uint16_t)sc->esc_mac.octet[5]) << 8);
389
390	/* pci ids */
391	sc->eeprom_data[NVM_SUB_DEV_ID] = E82545_SUBDEV_ID;
392	sc->eeprom_data[NVM_SUB_VEN_ID] = E82545_VENDOR_ID_INTEL;
393	sc->eeprom_data[NVM_DEV_ID] = E82545_DEV_ID_82545EM_COPPER;
394	sc->eeprom_data[NVM_VEN_ID] = E82545_VENDOR_ID_INTEL;
395
396	/* fill in the checksum */
397        checksum = 0;
398	for (i = 0; i < NVM_CHECKSUM_REG; i++) {
399		checksum += sc->eeprom_data[i];
400	}
401	checksum = NVM_SUM - checksum;
402	sc->eeprom_data[NVM_CHECKSUM_REG] = checksum;
403	DPRINTF("eeprom checksum: 0x%x", checksum);
404}
405
406static void
407e82545_write_mdi(struct e82545_softc *sc, uint8_t reg_addr,
408			uint8_t phy_addr, uint32_t data)
409{
410	DPRINTF("Write mdi reg:0x%x phy:0x%x data: 0x%x", reg_addr, phy_addr, data);
411}
412
413static uint32_t
414e82545_read_mdi(struct e82545_softc *sc, uint8_t reg_addr,
415			uint8_t phy_addr)
416{
417	//DPRINTF("Read mdi reg:0x%x phy:0x%x", reg_addr, phy_addr);
418	switch (reg_addr) {
419	case PHY_STATUS:
420		return (MII_SR_LINK_STATUS | MII_SR_AUTONEG_CAPS |
421			MII_SR_AUTONEG_COMPLETE);
422	case PHY_AUTONEG_ADV:
423		return NWAY_AR_SELECTOR_FIELD;
424	case PHY_LP_ABILITY:
425		return 0;
426	case PHY_1000T_STATUS:
427		return (SR_1000T_LP_FD_CAPS | SR_1000T_REMOTE_RX_STATUS |
428			SR_1000T_LOCAL_RX_STATUS);
429	case PHY_ID1:
430		return (M88E1011_I_PHY_ID >> 16) & 0xFFFF;
431	case PHY_ID2:
432		return (M88E1011_I_PHY_ID | E82545_REVISION_4) & 0xFFFF;
433	default:
434		DPRINTF("Unknown mdi read reg:0x%x phy:0x%x", reg_addr, phy_addr);
435		return 0;
436	}
437	/* not reached */
438}
439
440static void
441e82545_eecd_strobe(struct e82545_softc *sc)
442{
443	/* Microwire state machine */
444	/*
445	DPRINTF("eeprom state machine srtobe "
446		"0x%x 0x%x 0x%x 0x%x",
447		sc->nvm_mode, sc->nvm_bits,
448		sc->nvm_opaddr, sc->nvm_data);*/
449
450	if (sc->nvm_bits == 0) {
451		DPRINTF("eeprom state machine not expecting data! "
452			"0x%x 0x%x 0x%x 0x%x",
453			sc->nvm_mode, sc->nvm_bits,
454			sc->nvm_opaddr, sc->nvm_data);
455		return;
456	}
457	sc->nvm_bits--;
458	if (sc->nvm_mode == E82545_NVM_MODE_DATAOUT) {
459		/* shifting out */
460		if (sc->nvm_data & 0x8000) {
461			sc->eeprom_control |= E1000_EECD_DO;
462		} else {
463			sc->eeprom_control &= ~E1000_EECD_DO;
464		}
465		sc->nvm_data <<= 1;
466		if (sc->nvm_bits == 0) {
467			/* read done, back to opcode mode. */
468			sc->nvm_opaddr = 0;
469			sc->nvm_mode = E82545_NVM_MODE_OPADDR;
470			sc->nvm_bits = E82545_NVM_OPADDR_BITS;
471		}
472	} else if (sc->nvm_mode == E82545_NVM_MODE_DATAIN) {
473		/* shifting in */
474		sc->nvm_data <<= 1;
475		if (sc->eeprom_control & E1000_EECD_DI) {
476			sc->nvm_data |= 1;
477		}
478		if (sc->nvm_bits == 0) {
479			/* eeprom write */
480			uint16_t op = sc->nvm_opaddr & E82545_NVM_OPCODE_MASK;
481			uint16_t addr = sc->nvm_opaddr & E82545_NVM_ADDR_MASK;
482			if (op != E82545_NVM_OPCODE_WRITE) {
483				DPRINTF("Illegal eeprom write op 0x%x",
484					sc->nvm_opaddr);
485			} else if (addr >= E82545_NVM_EEPROM_SIZE) {
486				DPRINTF("Illegal eeprom write addr 0x%x",
487					sc->nvm_opaddr);
488			} else {
489				DPRINTF("eeprom write eeprom[0x%x] = 0x%x",
490				addr, sc->nvm_data);
491				sc->eeprom_data[addr] = sc->nvm_data;
492			}
493			/* back to opcode mode */
494			sc->nvm_opaddr = 0;
495			sc->nvm_mode = E82545_NVM_MODE_OPADDR;
496			sc->nvm_bits = E82545_NVM_OPADDR_BITS;
497		}
498	} else if (sc->nvm_mode == E82545_NVM_MODE_OPADDR) {
499		sc->nvm_opaddr <<= 1;
500		if (sc->eeprom_control & E1000_EECD_DI) {
501			sc->nvm_opaddr |= 1;
502		}
503		if (sc->nvm_bits == 0) {
504			uint16_t op = sc->nvm_opaddr & E82545_NVM_OPCODE_MASK;
505			switch (op) {
506			case E82545_NVM_OPCODE_EWEN:
507				DPRINTF("eeprom write enable: 0x%x",
508					sc->nvm_opaddr);
509				/* back to opcode mode */
510				sc->nvm_opaddr = 0;
511				sc->nvm_mode = E82545_NVM_MODE_OPADDR;
512				sc->nvm_bits = E82545_NVM_OPADDR_BITS;
513				break;
514			case E82545_NVM_OPCODE_READ:
515			{
516				uint16_t addr = sc->nvm_opaddr &
517					E82545_NVM_ADDR_MASK;
518				sc->nvm_mode = E82545_NVM_MODE_DATAOUT;
519				sc->nvm_bits = E82545_NVM_DATA_BITS;
520				if (addr < E82545_NVM_EEPROM_SIZE) {
521					sc->nvm_data = sc->eeprom_data[addr];
522					DPRINTF("eeprom read: eeprom[0x%x] = 0x%x",
523						addr, sc->nvm_data);
524				} else {
525					DPRINTF("eeprom illegal read: 0x%x",
526						sc->nvm_opaddr);
527					sc->nvm_data = 0;
528				}
529				break;
530			}
531			case E82545_NVM_OPCODE_WRITE:
532				sc->nvm_mode = E82545_NVM_MODE_DATAIN;
533				sc->nvm_bits = E82545_NVM_DATA_BITS;
534				sc->nvm_data = 0;
535				break;
536			default:
537				DPRINTF("eeprom unknown op: 0x%x",
538					sc->nvm_opaddr);
539				/* back to opcode mode */
540				sc->nvm_opaddr = 0;
541				sc->nvm_mode = E82545_NVM_MODE_OPADDR;
542				sc->nvm_bits = E82545_NVM_OPADDR_BITS;
543			}
544		}
545	} else {
546		DPRINTF("eeprom state machine wrong state! "
547			"0x%x 0x%x 0x%x 0x%x",
548			sc->nvm_mode, sc->nvm_bits,
549			sc->nvm_opaddr, sc->nvm_data);
550	}
551}
552
553static void
554e82545_itr_callback(int fd, enum ev_type type, void *param)
555{
556	uint32_t new;
557	struct e82545_softc *sc = param;
558
559	pthread_mutex_lock(&sc->esc_mtx);
560	new = sc->esc_ICR & sc->esc_IMS;
561	if (new && !sc->esc_irq_asserted) {
562		DPRINTF("itr callback: lintr assert %x", new);
563		sc->esc_irq_asserted = 1;
564		pci_lintr_assert(sc->esc_pi);
565	} else {
566		mevent_delete(sc->esc_mevpitr);
567		sc->esc_mevpitr = NULL;
568	}
569	pthread_mutex_unlock(&sc->esc_mtx);
570}
571
572static void
573e82545_icr_assert(struct e82545_softc *sc, uint32_t bits)
574{
575	uint32_t new;
576
577	DPRINTF("icr assert: 0x%x", bits);
578
579	/*
580	 * An interrupt is only generated if bits are set that
581	 * aren't already in the ICR, these bits are unmasked,
582	 * and there isn't an interrupt already pending.
583	 */
584	new = bits & ~sc->esc_ICR & sc->esc_IMS;
585	sc->esc_ICR |= bits;
586
587	if (new == 0) {
588		DPRINTF("icr assert: masked %x, ims %x", new, sc->esc_IMS);
589	} else if (sc->esc_mevpitr != NULL) {
590		DPRINTF("icr assert: throttled %x, ims %x", new, sc->esc_IMS);
591	} else if (!sc->esc_irq_asserted) {
592		DPRINTF("icr assert: lintr assert %x", new);
593		sc->esc_irq_asserted = 1;
594		pci_lintr_assert(sc->esc_pi);
595		if (sc->esc_ITR != 0) {
596			sc->esc_mevpitr = mevent_add(
597			    (sc->esc_ITR + 3905) / 3906,  /* 256ns -> 1ms */
598			    EVF_TIMER, e82545_itr_callback, sc);
599		}
600	}
601}
602
603static void
604e82545_ims_change(struct e82545_softc *sc, uint32_t bits)
605{
606	uint32_t new;
607
608	/*
609	 * Changing the mask may allow previously asserted
610	 * but masked interrupt requests to generate an interrupt.
611	 */
612	new = bits & sc->esc_ICR & ~sc->esc_IMS;
613	sc->esc_IMS |= bits;
614
615	if (new == 0) {
616		DPRINTF("ims change: masked %x, ims %x", new, sc->esc_IMS);
617	} else if (sc->esc_mevpitr != NULL) {
618		DPRINTF("ims change: throttled %x, ims %x", new, sc->esc_IMS);
619	} else if (!sc->esc_irq_asserted) {
620		DPRINTF("ims change: lintr assert %x", new);
621		sc->esc_irq_asserted = 1;
622		pci_lintr_assert(sc->esc_pi);
623		if (sc->esc_ITR != 0) {
624			sc->esc_mevpitr = mevent_add(
625			    (sc->esc_ITR + 3905) / 3906,  /* 256ns -> 1ms */
626			    EVF_TIMER, e82545_itr_callback, sc);
627		}
628	}
629}
630
631static void
632e82545_icr_deassert(struct e82545_softc *sc, uint32_t bits)
633{
634
635	DPRINTF("icr deassert: 0x%x", bits);
636	sc->esc_ICR &= ~bits;
637
638	/*
639	 * If there are no longer any interrupt sources and there
640	 * was an asserted interrupt, clear it
641	 */
642	if (sc->esc_irq_asserted && !(sc->esc_ICR & sc->esc_IMS)) {
643		DPRINTF("icr deassert: lintr deassert %x", bits);
644		pci_lintr_deassert(sc->esc_pi);
645		sc->esc_irq_asserted = 0;
646	}
647}
648
649static void
650e82545_intr_write(struct e82545_softc *sc, uint32_t offset, uint32_t value)
651{
652
653	DPRINTF("intr_write: off %x, val %x", offset, value);
654
655	switch (offset) {
656	case E1000_ICR:
657		e82545_icr_deassert(sc, value);
658		break;
659	case E1000_ITR:
660		sc->esc_ITR = value;
661		break;
662	case E1000_ICS:
663		sc->esc_ICS = value;	/* not used: store for debug */
664		e82545_icr_assert(sc, value);
665		break;
666	case E1000_IMS:
667		e82545_ims_change(sc, value);
668		break;
669	case E1000_IMC:
670		sc->esc_IMC = value;	/* for debug */
671		sc->esc_IMS &= ~value;
672		// XXX clear interrupts if all ICR bits now masked
673		// and interrupt was pending ?
674		break;
675	default:
676		break;
677	}
678}
679
680static uint32_t
681e82545_intr_read(struct e82545_softc *sc, uint32_t offset)
682{
683	uint32_t retval;
684
685	retval = 0;
686
687	DPRINTF("intr_read: off %x", offset);
688
689	switch (offset) {
690	case E1000_ICR:
691		retval = sc->esc_ICR;
692		sc->esc_ICR = 0;
693		e82545_icr_deassert(sc, ~0);
694		break;
695	case E1000_ITR:
696		retval = sc->esc_ITR;
697		break;
698	case E1000_ICS:
699		/* write-only register */
700		break;
701	case E1000_IMS:
702		retval = sc->esc_IMS;
703		break;
704	case E1000_IMC:
705		/* write-only register */
706		break;
707	default:
708		break;
709	}
710
711	return (retval);
712}
713
714static void
715e82545_devctl(struct e82545_softc *sc, uint32_t val)
716{
717
718	sc->esc_CTRL = val & ~E1000_CTRL_RST;
719
720	if (val & E1000_CTRL_RST) {
721		DPRINTF("e1k: s/w reset, ctl %x", val);
722		e82545_reset(sc, 1);
723	}
724	/* XXX check for phy reset ? */
725}
726
727static void
728e82545_rx_update_rdba(struct e82545_softc *sc)
729{
730
731	/* XXX verify desc base/len within phys mem range */
732	sc->esc_rdba = (uint64_t)sc->esc_RDBAH << 32 |
733	    sc->esc_RDBAL;
734
735	/* Cache host mapping of guest descriptor array */
736	sc->esc_rxdesc = paddr_guest2host(sc->esc_ctx,
737	    sc->esc_rdba, sc->esc_RDLEN);
738}
739
740static void
741e82545_rx_ctl(struct e82545_softc *sc, uint32_t val)
742{
743	int on;
744
745	on = ((val & E1000_RCTL_EN) == E1000_RCTL_EN);
746
747	/* Save RCTL after stripping reserved bits 31:27,24,21,14,11:10,0 */
748	sc->esc_RCTL = val & ~0xF9204c01;
749
750	DPRINTF("rx_ctl - %s RCTL %x, val %x",
751		on ? "on" : "off", sc->esc_RCTL, val);
752
753	/* state change requested */
754	if (on != sc->esc_rx_enabled) {
755		if (on) {
756			/* Catch disallowed/unimplemented settings */
757			//assert(!(val & E1000_RCTL_LBM_TCVR));
758
759			if (sc->esc_RCTL & E1000_RCTL_LBM_TCVR) {
760				sc->esc_rx_loopback = 1;
761			} else {
762				sc->esc_rx_loopback = 0;
763			}
764
765			e82545_rx_update_rdba(sc);
766			e82545_rx_enable(sc);
767		} else {
768			e82545_rx_disable(sc);
769			sc->esc_rx_loopback = 0;
770			sc->esc_rdba = 0;
771			sc->esc_rxdesc = NULL;
772		}
773	}
774}
775
776static void
777e82545_tx_update_tdba(struct e82545_softc *sc)
778{
779
780	/* XXX verify desc base/len within phys mem range */
781	sc->esc_tdba = (uint64_t)sc->esc_TDBAH << 32 | sc->esc_TDBAL;
782
783	/* Cache host mapping of guest descriptor array */
784	sc->esc_txdesc = paddr_guest2host(sc->esc_ctx, sc->esc_tdba,
785            sc->esc_TDLEN);
786}
787
788static void
789e82545_tx_ctl(struct e82545_softc *sc, uint32_t val)
790{
791	int on;
792
793	on = ((val & E1000_TCTL_EN) == E1000_TCTL_EN);
794
795	/* ignore TCTL_EN settings that don't change state */
796	if (on == sc->esc_tx_enabled)
797		return;
798
799	if (on) {
800		e82545_tx_update_tdba(sc);
801		e82545_tx_enable(sc);
802	} else {
803		e82545_tx_disable(sc);
804		sc->esc_tdba = 0;
805		sc->esc_txdesc = NULL;
806	}
807
808	/* Save TCTL value after stripping reserved bits 31:25,23,2,0 */
809	sc->esc_TCTL = val & ~0xFE800005;
810}
811
812int
813e82545_bufsz(uint32_t rctl)
814{
815
816	switch (rctl & (E1000_RCTL_BSEX | E1000_RCTL_SZ_256)) {
817	case (E1000_RCTL_SZ_2048): return (2048);
818	case (E1000_RCTL_SZ_1024): return (1024);
819	case (E1000_RCTL_SZ_512): return (512);
820	case (E1000_RCTL_SZ_256): return (256);
821	case (E1000_RCTL_BSEX|E1000_RCTL_SZ_16384): return (16384);
822	case (E1000_RCTL_BSEX|E1000_RCTL_SZ_8192): return (8192);
823	case (E1000_RCTL_BSEX|E1000_RCTL_SZ_4096): return (4096);
824	}
825	return (256);	/* Forbidden value. */
826}
827
828/* XXX one packet at a time until this is debugged */
829static void
830e82545_rx_callback(int fd, enum ev_type type, void *param)
831{
832	struct e82545_softc *sc = param;
833	struct e1000_rx_desc *rxd;
834	struct iovec vec[64];
835	int left, len, lim, maxpktsz, maxpktdesc, bufsz, i, n, size;
836	uint32_t cause = 0;
837	uint16_t *tp, tag, head;
838
839	pthread_mutex_lock(&sc->esc_mtx);
840	DPRINTF("rx_run: head %x, tail %x", sc->esc_RDH, sc->esc_RDT);
841
842	if (!sc->esc_rx_enabled || sc->esc_rx_loopback) {
843		DPRINTF("rx disabled (!%d || %d) -- packet(s) dropped",
844		    sc->esc_rx_enabled, sc->esc_rx_loopback);
845		while (netbe_rx_discard(sc->esc_be) > 0) {
846		}
847		goto done1;
848	}
849	bufsz = e82545_bufsz(sc->esc_RCTL);
850	maxpktsz = (sc->esc_RCTL & E1000_RCTL_LPE) ? 16384 : 1522;
851	maxpktdesc = (maxpktsz + bufsz - 1) / bufsz;
852	size = sc->esc_RDLEN / 16;
853	head = sc->esc_RDH;
854	left = (size + sc->esc_RDT - head) % size;
855	if (left < maxpktdesc) {
856		DPRINTF("rx overflow (%d < %d) -- packet(s) dropped",
857		    left, maxpktdesc);
858		while (netbe_rx_discard(sc->esc_be) > 0) {
859		}
860		goto done1;
861	}
862
863	sc->esc_rx_active = 1;
864	pthread_mutex_unlock(&sc->esc_mtx);
865
866	for (lim = size / 4; lim > 0 && left >= maxpktdesc; lim -= n) {
867
868		/* Grab rx descriptor pointed to by the head pointer */
869		for (i = 0; i < maxpktdesc; i++) {
870			rxd = &sc->esc_rxdesc[(head + i) % size];
871			vec[i].iov_base = paddr_guest2host(sc->esc_ctx,
872			    rxd->buffer_addr, bufsz);
873			vec[i].iov_len = bufsz;
874		}
875		len = netbe_recv(sc->esc_be, vec, maxpktdesc);
876		if (len <= 0) {
877			DPRINTF("netbe_recv() returned %d", len);
878			goto done;
879		}
880
881		/*
882		 * Adjust the packet length based on whether the CRC needs
883		 * to be stripped or if the packet is less than the minimum
884		 * eth packet size.
885		 */
886		if (len < ETHER_MIN_LEN - ETHER_CRC_LEN)
887			len = ETHER_MIN_LEN - ETHER_CRC_LEN;
888		if (!(sc->esc_RCTL & E1000_RCTL_SECRC))
889			len += ETHER_CRC_LEN;
890		n = (len + bufsz - 1) / bufsz;
891
892		DPRINTF("packet read %d bytes, %d segs, head %d",
893		    len, n, head);
894
895		/* Apply VLAN filter. */
896		tp = (uint16_t *)vec[0].iov_base + 6;
897		if ((sc->esc_RCTL & E1000_RCTL_VFE) &&
898		    (ntohs(tp[0]) == sc->esc_VET)) {
899			tag = ntohs(tp[1]) & 0x0fff;
900			if ((sc->esc_fvlan[tag >> 5] &
901			    (1 << (tag & 0x1f))) != 0) {
902				DPRINTF("known VLAN %d", tag);
903			} else {
904				DPRINTF("unknown VLAN %d", tag);
905				n = 0;
906				continue;
907			}
908		}
909
910		/* Update all consumed descriptors. */
911		for (i = 0; i < n - 1; i++) {
912			rxd = &sc->esc_rxdesc[(head + i) % size];
913			rxd->length = bufsz;
914			rxd->csum = 0;
915			rxd->errors = 0;
916			rxd->special = 0;
917			rxd->status = E1000_RXD_STAT_DD;
918		}
919		rxd = &sc->esc_rxdesc[(head + i) % size];
920		rxd->length = len % bufsz;
921		rxd->csum = 0;
922		rxd->errors = 0;
923		rxd->special = 0;
924		/* XXX signal no checksum for now */
925		rxd->status = E1000_RXD_STAT_PIF | E1000_RXD_STAT_IXSM |
926		    E1000_RXD_STAT_EOP | E1000_RXD_STAT_DD;
927
928		/* Schedule receive interrupts. */
929		if (len <= sc->esc_RSRPD) {
930			cause |= E1000_ICR_SRPD | E1000_ICR_RXT0;
931		} else {
932			/* XXX: RDRT and RADV timers should be here. */
933			cause |= E1000_ICR_RXT0;
934		}
935
936		head = (head + n) % size;
937		left -= n;
938	}
939
940done:
941	pthread_mutex_lock(&sc->esc_mtx);
942	sc->esc_rx_active = 0;
943	if (sc->esc_rx_enabled == 0)
944		pthread_cond_signal(&sc->esc_rx_cond);
945
946	sc->esc_RDH = head;
947	/* Respect E1000_RCTL_RDMTS */
948	left = (size + sc->esc_RDT - head) % size;
949	if (left < (size >> (((sc->esc_RCTL >> 8) & 3) + 1)))
950		cause |= E1000_ICR_RXDMT0;
951	/* Assert all accumulated interrupts. */
952	if (cause != 0)
953		e82545_icr_assert(sc, cause);
954done1:
955	DPRINTF("rx_run done: head %x, tail %x", sc->esc_RDH, sc->esc_RDT);
956	pthread_mutex_unlock(&sc->esc_mtx);
957}
958
959static uint16_t
960e82545_carry(uint32_t sum)
961{
962
963	sum = (sum & 0xFFFF) + (sum >> 16);
964	if (sum > 0xFFFF)
965		sum -= 0xFFFF;
966	return (sum);
967}
968
969static uint16_t
970e82545_buf_checksum(uint8_t *buf, int len)
971{
972	int i;
973	uint32_t sum = 0;
974
975	/* Checksum all the pairs of bytes first... */
976	for (i = 0; i < (len & ~1U); i += 2)
977		sum += *((u_int16_t *)(buf + i));
978
979	/*
980	 * If there's a single byte left over, checksum it, too.
981	 * Network byte order is big-endian, so the remaining byte is
982	 * the high byte.
983	 */
984	if (i < len)
985		sum += htons(buf[i] << 8);
986
987	return (e82545_carry(sum));
988}
989
990static uint16_t
991e82545_iov_checksum(struct iovec *iov, int iovcnt, int off, int len)
992{
993	int now, odd;
994	uint32_t sum = 0, s;
995
996	/* Skip completely unneeded vectors. */
997	while (iovcnt > 0 && iov->iov_len <= off && off > 0) {
998		off -= iov->iov_len;
999		iov++;
1000		iovcnt--;
1001	}
1002
1003	/* Calculate checksum of requested range. */
1004	odd = 0;
1005	while (len > 0 && iovcnt > 0) {
1006		now = MIN(len, iov->iov_len - off);
1007		s = e82545_buf_checksum(iov->iov_base + off, now);
1008		sum += odd ? (s << 8) : s;
1009		odd ^= (now & 1);
1010		len -= now;
1011		off = 0;
1012		iov++;
1013		iovcnt--;
1014	}
1015
1016	return (e82545_carry(sum));
1017}
1018
1019/*
1020 * Return the transmit descriptor type.
1021 */
1022int
1023e82545_txdesc_type(uint32_t lower)
1024{
1025	int type;
1026
1027	type = 0;
1028
1029	if (lower & E1000_TXD_CMD_DEXT)
1030		type = lower & E1000_TXD_MASK;
1031
1032	return (type);
1033}
1034
1035static void
1036e82545_transmit_checksum(struct iovec *iov, int iovcnt, struct ck_info *ck)
1037{
1038	uint16_t cksum;
1039	int cklen;
1040
1041	DPRINTF("tx cksum: iovcnt/s/off/len %d/%d/%d/%d",
1042	    iovcnt, ck->ck_start, ck->ck_off, ck->ck_len);
1043	cklen = ck->ck_len ? ck->ck_len - ck->ck_start + 1 : INT_MAX;
1044	cksum = e82545_iov_checksum(iov, iovcnt, ck->ck_start, cklen);
1045	*(uint16_t *)((uint8_t *)iov[0].iov_base + ck->ck_off) = ~cksum;
1046}
1047
1048static void
1049e82545_transmit_backend(struct e82545_softc *sc, struct iovec *iov, int iovcnt)
1050{
1051
1052	if (sc->esc_be == NULL)
1053		return;
1054
1055	(void) netbe_send(sc->esc_be, iov, iovcnt);
1056}
1057
1058static void
1059e82545_transmit_done(struct e82545_softc *sc, uint16_t head, uint16_t tail,
1060    uint16_t dsize, int *tdwb)
1061{
1062	union e1000_tx_udesc *dsc;
1063
1064	for ( ; head != tail; head = (head + 1) % dsize) {
1065		dsc = &sc->esc_txdesc[head];
1066		if (dsc->td.lower.data & E1000_TXD_CMD_RS) {
1067			dsc->td.upper.data |= E1000_TXD_STAT_DD;
1068			*tdwb = 1;
1069		}
1070	}
1071}
1072
1073static int
1074e82545_transmit(struct e82545_softc *sc, uint16_t head, uint16_t tail,
1075    uint16_t dsize, uint16_t *rhead, int *tdwb)
1076{
1077	uint8_t *hdr, *hdrp;
1078	struct iovec iovb[I82545_MAX_TXSEGS + 2];
1079	struct iovec tiov[I82545_MAX_TXSEGS + 2];
1080	struct e1000_context_desc *cd;
1081	struct ck_info ckinfo[2];
1082	struct iovec *iov;
1083	union  e1000_tx_udesc *dsc;
1084	int desc, dtype, len, ntype, iovcnt, tlen, tcp, tso;
1085	int mss, paylen, seg, tiovcnt, left, now, nleft, nnow, pv, pvoff;
1086	unsigned hdrlen, vlen;
1087	uint32_t tcpsum, tcpseq;
1088	uint16_t ipcs, tcpcs, ipid, ohead;
1089
1090	ckinfo[0].ck_valid = ckinfo[1].ck_valid = 0;
1091	iovcnt = 0;
1092	tlen = 0;
1093	ntype = 0;
1094	tso = 0;
1095	ohead = head;
1096
1097	/* iovb[0/1] may be used for writable copy of headers. */
1098	iov = &iovb[2];
1099
1100	for (desc = 0; ; desc++, head = (head + 1) % dsize) {
1101		if (head == tail) {
1102			*rhead = head;
1103			return (0);
1104		}
1105		dsc = &sc->esc_txdesc[head];
1106		dtype = e82545_txdesc_type(dsc->td.lower.data);
1107
1108		if (desc == 0) {
1109			switch (dtype) {
1110			case E1000_TXD_TYP_C:
1111				DPRINTF("tx ctxt desc idx %d: %016jx "
1112				    "%08x%08x",
1113				    head, dsc->td.buffer_addr,
1114				    dsc->td.upper.data, dsc->td.lower.data);
1115				/* Save context and return */
1116				sc->esc_txctx = dsc->cd;
1117				goto done;
1118			case E1000_TXD_TYP_L:
1119				DPRINTF("tx legacy desc idx %d: %08x%08x",
1120				    head, dsc->td.upper.data, dsc->td.lower.data);
1121				/*
1122				 * legacy cksum start valid in first descriptor
1123				 */
1124				ntype = dtype;
1125				ckinfo[0].ck_start = dsc->td.upper.fields.css;
1126				break;
1127			case E1000_TXD_TYP_D:
1128				DPRINTF("tx data desc idx %d: %08x%08x",
1129				    head, dsc->td.upper.data, dsc->td.lower.data);
1130				ntype = dtype;
1131				break;
1132			default:
1133				break;
1134			}
1135		} else {
1136			/* Descriptor type must be consistent */
1137			assert(dtype == ntype);
1138			DPRINTF("tx next desc idx %d: %08x%08x",
1139			    head, dsc->td.upper.data, dsc->td.lower.data);
1140		}
1141
1142		len = (dtype == E1000_TXD_TYP_L) ? dsc->td.lower.flags.length :
1143		    dsc->dd.lower.data & 0xFFFFF;
1144
1145		if (len > 0) {
1146			/* Strip checksum supplied by guest. */
1147			if ((dsc->td.lower.data & E1000_TXD_CMD_EOP) != 0 &&
1148			    (dsc->td.lower.data & E1000_TXD_CMD_IFCS) == 0)
1149				len -= 2;
1150			tlen += len;
1151			if (iovcnt < I82545_MAX_TXSEGS) {
1152				iov[iovcnt].iov_base = paddr_guest2host(
1153				    sc->esc_ctx, dsc->td.buffer_addr, len);
1154				iov[iovcnt].iov_len = len;
1155			}
1156			iovcnt++;
1157		}
1158
1159		/*
1160		 * Pull out info that is valid in the final descriptor
1161		 * and exit descriptor loop.
1162		 */
1163		if (dsc->td.lower.data & E1000_TXD_CMD_EOP) {
1164			if (dtype == E1000_TXD_TYP_L) {
1165				if (dsc->td.lower.data & E1000_TXD_CMD_IC) {
1166					ckinfo[0].ck_valid = 1;
1167					ckinfo[0].ck_off =
1168					    dsc->td.lower.flags.cso;
1169					ckinfo[0].ck_len = 0;
1170				}
1171			} else {
1172				cd = &sc->esc_txctx;
1173				if (dsc->dd.lower.data & E1000_TXD_CMD_TSE)
1174					tso = 1;
1175				if (dsc->dd.upper.fields.popts &
1176				    E1000_TXD_POPTS_IXSM)
1177					ckinfo[0].ck_valid = 1;
1178				if (dsc->dd.upper.fields.popts &
1179				    E1000_TXD_POPTS_IXSM || tso) {
1180					ckinfo[0].ck_start =
1181					    cd->lower_setup.ip_fields.ipcss;
1182					ckinfo[0].ck_off =
1183					    cd->lower_setup.ip_fields.ipcso;
1184					ckinfo[0].ck_len =
1185					    cd->lower_setup.ip_fields.ipcse;
1186				}
1187				if (dsc->dd.upper.fields.popts &
1188				    E1000_TXD_POPTS_TXSM)
1189					ckinfo[1].ck_valid = 1;
1190				if (dsc->dd.upper.fields.popts &
1191				    E1000_TXD_POPTS_TXSM || tso) {
1192					ckinfo[1].ck_start =
1193					    cd->upper_setup.tcp_fields.tucss;
1194					ckinfo[1].ck_off =
1195					    cd->upper_setup.tcp_fields.tucso;
1196					ckinfo[1].ck_len =
1197					    cd->upper_setup.tcp_fields.tucse;
1198				}
1199			}
1200			break;
1201		}
1202	}
1203
1204	if (iovcnt > I82545_MAX_TXSEGS) {
1205		WPRINTF("tx too many descriptors (%d > %d) -- dropped",
1206		    iovcnt, I82545_MAX_TXSEGS);
1207		goto done;
1208	}
1209
1210	hdrlen = vlen = 0;
1211	/* Estimate writable space for VLAN header insertion. */
1212	if ((sc->esc_CTRL & E1000_CTRL_VME) &&
1213	    (dsc->td.lower.data & E1000_TXD_CMD_VLE)) {
1214		hdrlen = ETHER_ADDR_LEN*2;
1215		vlen = ETHER_VLAN_ENCAP_LEN;
1216	}
1217	if (!tso) {
1218		/* Estimate required writable space for checksums. */
1219		if (ckinfo[0].ck_valid)
1220			hdrlen = MAX(hdrlen, ckinfo[0].ck_off + 2);
1221		if (ckinfo[1].ck_valid)
1222			hdrlen = MAX(hdrlen, ckinfo[1].ck_off + 2);
1223		/* Round up writable space to the first vector. */
1224		if (hdrlen != 0 && iov[0].iov_len > hdrlen &&
1225		    iov[0].iov_len < hdrlen + 100)
1226			hdrlen = iov[0].iov_len;
1227	} else {
1228		/* In case of TSO header length provided by software. */
1229		hdrlen = sc->esc_txctx.tcp_seg_setup.fields.hdr_len;
1230
1231		/*
1232		 * Cap the header length at 240 based on 7.2.4.5 of
1233		 * the Intel 82576EB (Rev 2.63) datasheet.
1234		 */
1235		if (hdrlen > 240) {
1236			WPRINTF("TSO hdrlen too large: %d", hdrlen);
1237			goto done;
1238		}
1239
1240		/*
1241		 * If VLAN insertion is requested, ensure the header
1242		 * at least holds the amount of data copied during
1243		 * VLAN insertion below.
1244		 *
1245		 * XXX: Realistic packets will include a full Ethernet
1246		 * header before the IP header at ckinfo[0].ck_start,
1247		 * but this check is sufficient to prevent
1248		 * out-of-bounds access below.
1249		 */
1250		if (vlen != 0 && hdrlen < ETHER_ADDR_LEN*2) {
1251			WPRINTF("TSO hdrlen too small for vlan insertion "
1252			    "(%d vs %d) -- dropped", hdrlen,
1253			    ETHER_ADDR_LEN*2);
1254			goto done;
1255		}
1256
1257		/*
1258		 * Ensure that the header length covers the used fields
1259		 * in the IP and TCP headers as well as the IP and TCP
1260		 * checksums.  The following fields are accessed below:
1261		 *
1262		 * Header | Field | Offset | Length
1263		 * -------+-------+--------+-------
1264		 * IPv4   | len   | 2      | 2
1265		 * IPv4   | ID    | 4      | 2
1266		 * IPv6   | len   | 4      | 2
1267		 * TCP    | seq # | 4      | 4
1268		 * TCP    | flags | 13     | 1
1269		 * UDP    | len   | 4      | 4
1270		 */
1271		if (hdrlen < ckinfo[0].ck_start + 6 ||
1272		    hdrlen < ckinfo[0].ck_off + 2) {
1273			WPRINTF("TSO hdrlen too small for IP fields (%d) "
1274			    "-- dropped", hdrlen);
1275			goto done;
1276		}
1277		if (sc->esc_txctx.cmd_and_length & E1000_TXD_CMD_TCP) {
1278			if (hdrlen < ckinfo[1].ck_start + 14 ||
1279			    (ckinfo[1].ck_valid &&
1280			    hdrlen < ckinfo[1].ck_off + 2)) {
1281				WPRINTF("TSO hdrlen too small for TCP fields "
1282				    "(%d) -- dropped", hdrlen);
1283				goto done;
1284			}
1285		} else {
1286			if (hdrlen < ckinfo[1].ck_start + 8) {
1287				WPRINTF("TSO hdrlen too small for UDP fields "
1288				    "(%d) -- dropped", hdrlen);
1289				goto done;
1290			}
1291		}
1292	}
1293
1294	/* Allocate, fill and prepend writable header vector. */
1295	if (hdrlen != 0) {
1296		hdr = __builtin_alloca(hdrlen + vlen);
1297		hdr += vlen;
1298		for (left = hdrlen, hdrp = hdr; left > 0;
1299		    left -= now, hdrp += now) {
1300			now = MIN(left, iov->iov_len);
1301			memcpy(hdrp, iov->iov_base, now);
1302			iov->iov_base += now;
1303			iov->iov_len -= now;
1304			if (iov->iov_len == 0) {
1305				iov++;
1306				iovcnt--;
1307			}
1308		}
1309		iov--;
1310		iovcnt++;
1311		iov->iov_base = hdr;
1312		iov->iov_len = hdrlen;
1313	} else
1314		hdr = NULL;
1315
1316	/* Insert VLAN tag. */
1317	if (vlen != 0) {
1318		hdr -= ETHER_VLAN_ENCAP_LEN;
1319		memmove(hdr, hdr + ETHER_VLAN_ENCAP_LEN, ETHER_ADDR_LEN*2);
1320		hdrlen += ETHER_VLAN_ENCAP_LEN;
1321		hdr[ETHER_ADDR_LEN*2 + 0] = sc->esc_VET >> 8;
1322		hdr[ETHER_ADDR_LEN*2 + 1] = sc->esc_VET & 0xff;
1323		hdr[ETHER_ADDR_LEN*2 + 2] = dsc->td.upper.fields.special >> 8;
1324		hdr[ETHER_ADDR_LEN*2 + 3] = dsc->td.upper.fields.special & 0xff;
1325		iov->iov_base = hdr;
1326		iov->iov_len += ETHER_VLAN_ENCAP_LEN;
1327		/* Correct checksum offsets after VLAN tag insertion. */
1328		ckinfo[0].ck_start += ETHER_VLAN_ENCAP_LEN;
1329		ckinfo[0].ck_off += ETHER_VLAN_ENCAP_LEN;
1330		if (ckinfo[0].ck_len != 0)
1331			ckinfo[0].ck_len += ETHER_VLAN_ENCAP_LEN;
1332		ckinfo[1].ck_start += ETHER_VLAN_ENCAP_LEN;
1333		ckinfo[1].ck_off += ETHER_VLAN_ENCAP_LEN;
1334		if (ckinfo[1].ck_len != 0)
1335			ckinfo[1].ck_len += ETHER_VLAN_ENCAP_LEN;
1336	}
1337
1338	/* Simple non-TSO case. */
1339	if (!tso) {
1340		/* Calculate checksums and transmit. */
1341		if (ckinfo[0].ck_valid)
1342			e82545_transmit_checksum(iov, iovcnt, &ckinfo[0]);
1343		if (ckinfo[1].ck_valid)
1344			e82545_transmit_checksum(iov, iovcnt, &ckinfo[1]);
1345		e82545_transmit_backend(sc, iov, iovcnt);
1346		goto done;
1347	}
1348
1349	/* Doing TSO. */
1350	tcp = (sc->esc_txctx.cmd_and_length & E1000_TXD_CMD_TCP) != 0;
1351	mss = sc->esc_txctx.tcp_seg_setup.fields.mss;
1352	paylen = (sc->esc_txctx.cmd_and_length & 0x000fffff);
1353	DPRINTF("tx %s segmentation offload %d+%d/%d bytes %d iovs",
1354	    tcp ? "TCP" : "UDP", hdrlen, paylen, mss, iovcnt);
1355	ipid = ntohs(*(uint16_t *)&hdr[ckinfo[0].ck_start + 4]);
1356	tcpseq = 0;
1357	if (tcp)
1358		tcpseq = ntohl(*(uint32_t *)&hdr[ckinfo[1].ck_start + 4]);
1359	ipcs = *(uint16_t *)&hdr[ckinfo[0].ck_off];
1360	tcpcs = 0;
1361	if (ckinfo[1].ck_valid)	/* Save partial pseudo-header checksum. */
1362		tcpcs = *(uint16_t *)&hdr[ckinfo[1].ck_off];
1363	pv = 1;
1364	pvoff = 0;
1365	for (seg = 0, left = paylen; left > 0; seg++, left -= now) {
1366		now = MIN(left, mss);
1367
1368		/* Construct IOVs for the segment. */
1369		/* Include whole original header. */
1370		tiov[0].iov_base = hdr;
1371		tiov[0].iov_len = hdrlen;
1372		tiovcnt = 1;
1373		/* Include respective part of payload IOV. */
1374		for (nleft = now; pv < iovcnt && nleft > 0; nleft -= nnow) {
1375			nnow = MIN(nleft, iov[pv].iov_len - pvoff);
1376			tiov[tiovcnt].iov_base = iov[pv].iov_base + pvoff;
1377			tiov[tiovcnt++].iov_len = nnow;
1378			if (pvoff + nnow == iov[pv].iov_len) {
1379				pv++;
1380				pvoff = 0;
1381			} else
1382				pvoff += nnow;
1383		}
1384		DPRINTF("tx segment %d %d+%d bytes %d iovs",
1385		    seg, hdrlen, now, tiovcnt);
1386
1387		/* Update IP header. */
1388		if (sc->esc_txctx.cmd_and_length & E1000_TXD_CMD_IP) {
1389			/* IPv4 -- set length and ID */
1390			*(uint16_t *)&hdr[ckinfo[0].ck_start + 2] =
1391			    htons(hdrlen - ckinfo[0].ck_start + now);
1392			*(uint16_t *)&hdr[ckinfo[0].ck_start + 4] =
1393			    htons(ipid + seg);
1394		} else {
1395			/* IPv6 -- set length */
1396			*(uint16_t *)&hdr[ckinfo[0].ck_start + 4] =
1397			    htons(hdrlen - ckinfo[0].ck_start - 40 +
1398				  now);
1399		}
1400
1401		/* Update pseudo-header checksum. */
1402		tcpsum = tcpcs;
1403		tcpsum += htons(hdrlen - ckinfo[1].ck_start + now);
1404
1405		/* Update TCP/UDP headers. */
1406		if (tcp) {
1407			/* Update sequence number and FIN/PUSH flags. */
1408			*(uint32_t *)&hdr[ckinfo[1].ck_start + 4] =
1409			    htonl(tcpseq + paylen - left);
1410			if (now < left) {
1411				hdr[ckinfo[1].ck_start + 13] &=
1412				    ~(TH_FIN | TH_PUSH);
1413			}
1414		} else {
1415			/* Update payload length. */
1416			*(uint32_t *)&hdr[ckinfo[1].ck_start + 4] =
1417			    hdrlen - ckinfo[1].ck_start + now;
1418		}
1419
1420		/* Calculate checksums and transmit. */
1421		if (ckinfo[0].ck_valid) {
1422			*(uint16_t *)&hdr[ckinfo[0].ck_off] = ipcs;
1423			e82545_transmit_checksum(tiov, tiovcnt, &ckinfo[0]);
1424		}
1425		if (ckinfo[1].ck_valid) {
1426			*(uint16_t *)&hdr[ckinfo[1].ck_off] =
1427			    e82545_carry(tcpsum);
1428			e82545_transmit_checksum(tiov, tiovcnt, &ckinfo[1]);
1429		}
1430		e82545_transmit_backend(sc, tiov, tiovcnt);
1431	}
1432
1433done:
1434	head = (head + 1) % dsize;
1435	e82545_transmit_done(sc, ohead, head, dsize, tdwb);
1436
1437	*rhead = head;
1438	return (desc + 1);
1439}
1440
1441static void
1442e82545_tx_run(struct e82545_softc *sc)
1443{
1444	uint32_t cause;
1445	uint16_t head, rhead, tail, size;
1446	int lim, tdwb, sent;
1447
1448	head = sc->esc_TDH;
1449	tail = sc->esc_TDT;
1450	size = sc->esc_TDLEN / 16;
1451	DPRINTF("tx_run: head %x, rhead %x, tail %x",
1452	    sc->esc_TDH, sc->esc_TDHr, sc->esc_TDT);
1453
1454	pthread_mutex_unlock(&sc->esc_mtx);
1455	rhead = head;
1456	tdwb = 0;
1457	for (lim = size / 4; sc->esc_tx_enabled && lim > 0; lim -= sent) {
1458		sent = e82545_transmit(sc, head, tail, size, &rhead, &tdwb);
1459		if (sent == 0)
1460			break;
1461		head = rhead;
1462	}
1463	pthread_mutex_lock(&sc->esc_mtx);
1464
1465	sc->esc_TDH = head;
1466	sc->esc_TDHr = rhead;
1467	cause = 0;
1468	if (tdwb)
1469		cause |= E1000_ICR_TXDW;
1470	if (lim != size / 4 && sc->esc_TDH == sc->esc_TDT)
1471		cause |= E1000_ICR_TXQE;
1472	if (cause)
1473		e82545_icr_assert(sc, cause);
1474
1475	DPRINTF("tx_run done: head %x, rhead %x, tail %x",
1476	    sc->esc_TDH, sc->esc_TDHr, sc->esc_TDT);
1477}
1478
1479static _Noreturn void *
1480e82545_tx_thread(void *param)
1481{
1482	struct e82545_softc *sc = param;
1483
1484	pthread_mutex_lock(&sc->esc_mtx);
1485	for (;;) {
1486		while (!sc->esc_tx_enabled || sc->esc_TDHr == sc->esc_TDT) {
1487			if (sc->esc_tx_enabled && sc->esc_TDHr != sc->esc_TDT)
1488				break;
1489			sc->esc_tx_active = 0;
1490			if (sc->esc_tx_enabled == 0)
1491				pthread_cond_signal(&sc->esc_tx_cond);
1492			pthread_cond_wait(&sc->esc_tx_cond, &sc->esc_mtx);
1493		}
1494		sc->esc_tx_active = 1;
1495
1496		/* Process some tx descriptors.  Lock dropped inside. */
1497		e82545_tx_run(sc);
1498	}
1499}
1500
1501static void
1502e82545_tx_start(struct e82545_softc *sc)
1503{
1504
1505	if (sc->esc_tx_active == 0)
1506		pthread_cond_signal(&sc->esc_tx_cond);
1507}
1508
1509static void
1510e82545_tx_enable(struct e82545_softc *sc)
1511{
1512
1513	sc->esc_tx_enabled = 1;
1514}
1515
1516static void
1517e82545_tx_disable(struct e82545_softc *sc)
1518{
1519
1520	sc->esc_tx_enabled = 0;
1521	while (sc->esc_tx_active)
1522		pthread_cond_wait(&sc->esc_tx_cond, &sc->esc_mtx);
1523}
1524
1525static void
1526e82545_rx_enable(struct e82545_softc *sc)
1527{
1528
1529	sc->esc_rx_enabled = 1;
1530}
1531
1532static void
1533e82545_rx_disable(struct e82545_softc *sc)
1534{
1535
1536	sc->esc_rx_enabled = 0;
1537	while (sc->esc_rx_active)
1538		pthread_cond_wait(&sc->esc_rx_cond, &sc->esc_mtx);
1539}
1540
1541static void
1542e82545_write_ra(struct e82545_softc *sc, int reg, uint32_t wval)
1543{
1544	struct eth_uni *eu;
1545	int idx;
1546
1547	idx = reg >> 1;
1548	assert(idx < 15);
1549
1550	eu = &sc->esc_uni[idx];
1551
1552	if (reg & 0x1) {
1553		/* RAH */
1554		eu->eu_valid = ((wval & E1000_RAH_AV) == E1000_RAH_AV);
1555		eu->eu_addrsel = (wval >> 16) & 0x3;
1556		eu->eu_eth.octet[5] = wval >> 8;
1557		eu->eu_eth.octet[4] = wval;
1558	} else {
1559		/* RAL */
1560		eu->eu_eth.octet[3] = wval >> 24;
1561		eu->eu_eth.octet[2] = wval >> 16;
1562		eu->eu_eth.octet[1] = wval >> 8;
1563		eu->eu_eth.octet[0] = wval;
1564	}
1565}
1566
1567static uint32_t
1568e82545_read_ra(struct e82545_softc *sc, int reg)
1569{
1570	struct eth_uni *eu;
1571	uint32_t retval;
1572	int idx;
1573
1574	idx = reg >> 1;
1575	assert(idx < 15);
1576
1577	eu = &sc->esc_uni[idx];
1578
1579	if (reg & 0x1) {
1580		/* RAH */
1581		retval = (eu->eu_valid << 31) |
1582			 (eu->eu_addrsel << 16) |
1583			 (eu->eu_eth.octet[5] << 8) |
1584			 eu->eu_eth.octet[4];
1585	} else {
1586		/* RAL */
1587		retval = (eu->eu_eth.octet[3] << 24) |
1588			 (eu->eu_eth.octet[2] << 16) |
1589			 (eu->eu_eth.octet[1] << 8) |
1590			 eu->eu_eth.octet[0];
1591	}
1592
1593	return (retval);
1594}
1595
1596static void
1597e82545_write_register(struct e82545_softc *sc, uint32_t offset, uint32_t value)
1598{
1599	int ridx;
1600
1601	if (offset & 0x3) {
1602		DPRINTF("Unaligned register write offset:0x%x value:0x%x", offset, value);
1603		return;
1604	}
1605	DPRINTF("Register write: 0x%x value: 0x%x", offset, value);
1606
1607	switch (offset) {
1608	case E1000_CTRL:
1609	case E1000_CTRL_DUP:
1610		e82545_devctl(sc, value);
1611		break;
1612	case E1000_FCAL:
1613		sc->esc_FCAL = value;
1614		break;
1615	case E1000_FCAH:
1616		sc->esc_FCAH = value & ~0xFFFF0000;
1617		break;
1618	case E1000_FCT:
1619		sc->esc_FCT = value & ~0xFFFF0000;
1620		break;
1621	case E1000_VET:
1622		sc->esc_VET = value & ~0xFFFF0000;
1623		break;
1624	case E1000_FCTTV:
1625		sc->esc_FCTTV = value & ~0xFFFF0000;
1626		break;
1627	case E1000_LEDCTL:
1628		sc->esc_LEDCTL = value & ~0x30303000;
1629		break;
1630	case E1000_PBA:
1631		sc->esc_PBA = value & 0x0000FF80;
1632		break;
1633	case E1000_ICR:
1634	case E1000_ITR:
1635	case E1000_ICS:
1636	case E1000_IMS:
1637	case E1000_IMC:
1638		e82545_intr_write(sc, offset, value);
1639		break;
1640	case E1000_RCTL:
1641		e82545_rx_ctl(sc, value);
1642		break;
1643	case E1000_FCRTL:
1644		sc->esc_FCRTL = value & ~0xFFFF0007;
1645		break;
1646	case E1000_FCRTH:
1647		sc->esc_FCRTH = value & ~0xFFFF0007;
1648		break;
1649	case E1000_RDBAL(0):
1650		sc->esc_RDBAL = value & ~0xF;
1651		if (sc->esc_rx_enabled) {
1652			/* Apparently legal: update cached address */
1653			e82545_rx_update_rdba(sc);
1654		}
1655		break;
1656	case E1000_RDBAH(0):
1657		assert(!sc->esc_rx_enabled);
1658		sc->esc_RDBAH = value;
1659		break;
1660	case E1000_RDLEN(0):
1661		assert(!sc->esc_rx_enabled);
1662		sc->esc_RDLEN = value & ~0xFFF0007F;
1663		break;
1664	case E1000_RDH(0):
1665		/* XXX should only ever be zero ? Range check ? */
1666		sc->esc_RDH = value;
1667		break;
1668	case E1000_RDT(0):
1669		/* XXX if this opens up the rx ring, do something ? */
1670		sc->esc_RDT = value;
1671		break;
1672	case E1000_RDTR:
1673		/* ignore FPD bit 31 */
1674		sc->esc_RDTR = value & ~0xFFFF0000;
1675		break;
1676	case E1000_RXDCTL(0):
1677		sc->esc_RXDCTL = value & ~0xFEC0C0C0;
1678		break;
1679	case E1000_RADV:
1680		sc->esc_RADV = value & ~0xFFFF0000;
1681		break;
1682	case E1000_RSRPD:
1683		sc->esc_RSRPD = value & ~0xFFFFF000;
1684		break;
1685	case E1000_RXCSUM:
1686		sc->esc_RXCSUM = value & ~0xFFFFF800;
1687		break;
1688	case E1000_TXCW:
1689		sc->esc_TXCW = value & ~0x3FFF0000;
1690		break;
1691	case E1000_TCTL:
1692		e82545_tx_ctl(sc, value);
1693		break;
1694	case E1000_TIPG:
1695		sc->esc_TIPG = value;
1696		break;
1697	case E1000_AIT:
1698		sc->esc_AIT = value;
1699		break;
1700	case E1000_TDBAL(0):
1701		sc->esc_TDBAL = value & ~0xF;
1702		if (sc->esc_tx_enabled)
1703			e82545_tx_update_tdba(sc);
1704		break;
1705	case E1000_TDBAH(0):
1706		sc->esc_TDBAH = value;
1707		if (sc->esc_tx_enabled)
1708			e82545_tx_update_tdba(sc);
1709		break;
1710	case E1000_TDLEN(0):
1711		sc->esc_TDLEN = value & ~0xFFF0007F;
1712		if (sc->esc_tx_enabled)
1713			e82545_tx_update_tdba(sc);
1714		break;
1715	case E1000_TDH(0):
1716		//assert(!sc->esc_tx_enabled);
1717		/* XXX should only ever be zero ? Range check ? */
1718		sc->esc_TDHr = sc->esc_TDH = value;
1719		break;
1720	case E1000_TDT(0):
1721		/* XXX range check ? */
1722		sc->esc_TDT = value;
1723		if (sc->esc_tx_enabled)
1724			e82545_tx_start(sc);
1725		break;
1726	case E1000_TIDV:
1727		sc->esc_TIDV = value & ~0xFFFF0000;
1728		break;
1729	case E1000_TXDCTL(0):
1730		//assert(!sc->esc_tx_enabled);
1731		sc->esc_TXDCTL = value & ~0xC0C0C0;
1732		break;
1733	case E1000_TADV:
1734		sc->esc_TADV = value & ~0xFFFF0000;
1735		break;
1736	case E1000_RAL(0) ... E1000_RAH(15):
1737		/* convert to u32 offset */
1738		ridx = (offset - E1000_RAL(0)) >> 2;
1739		e82545_write_ra(sc, ridx, value);
1740		break;
1741	case E1000_MTA ... (E1000_MTA + (127*4)):
1742		sc->esc_fmcast[(offset - E1000_MTA) >> 2] = value;
1743		break;
1744	case E1000_VFTA ... (E1000_VFTA + (127*4)):
1745		sc->esc_fvlan[(offset - E1000_VFTA) >> 2] = value;
1746		break;
1747	case E1000_EECD:
1748	{
1749		//DPRINTF("EECD write 0x%x -> 0x%x", sc->eeprom_control, value);
1750		/* edge triggered low->high */
1751		uint32_t eecd_strobe = ((sc->eeprom_control & E1000_EECD_SK) ?
1752			0 : (value & E1000_EECD_SK));
1753		uint32_t eecd_mask = (E1000_EECD_SK|E1000_EECD_CS|
1754					E1000_EECD_DI|E1000_EECD_REQ);
1755		sc->eeprom_control &= ~eecd_mask;
1756		sc->eeprom_control |= (value & eecd_mask);
1757		/* grant/revoke immediately */
1758		if (value & E1000_EECD_REQ) {
1759			sc->eeprom_control |= E1000_EECD_GNT;
1760		} else {
1761                        sc->eeprom_control &= ~E1000_EECD_GNT;
1762		}
1763		if (eecd_strobe && (sc->eeprom_control & E1000_EECD_CS)) {
1764			e82545_eecd_strobe(sc);
1765		}
1766		return;
1767	}
1768	case E1000_MDIC:
1769	{
1770		uint8_t reg_addr = (uint8_t)((value & E1000_MDIC_REG_MASK) >>
1771						E1000_MDIC_REG_SHIFT);
1772		uint8_t phy_addr = (uint8_t)((value & E1000_MDIC_PHY_MASK) >>
1773						E1000_MDIC_PHY_SHIFT);
1774		sc->mdi_control =
1775			(value & ~(E1000_MDIC_ERROR|E1000_MDIC_DEST));
1776		if ((value & E1000_MDIC_READY) != 0) {
1777			DPRINTF("Incorrect MDIC ready bit: 0x%x", value);
1778			return;
1779		}
1780		switch (value & E82545_MDIC_OP_MASK) {
1781		case E1000_MDIC_OP_READ:
1782			sc->mdi_control &= ~E82545_MDIC_DATA_MASK;
1783			sc->mdi_control |= e82545_read_mdi(sc, reg_addr, phy_addr);
1784			break;
1785		case E1000_MDIC_OP_WRITE:
1786			e82545_write_mdi(sc, reg_addr, phy_addr,
1787				value & E82545_MDIC_DATA_MASK);
1788			break;
1789		default:
1790			DPRINTF("Unknown MDIC op: 0x%x", value);
1791			return;
1792		}
1793		/* TODO: barrier? */
1794		sc->mdi_control |= E1000_MDIC_READY;
1795		if (value & E82545_MDIC_IE) {
1796			// TODO: generate interrupt
1797		}
1798		return;
1799	}
1800	case E1000_MANC:
1801	case E1000_STATUS:
1802		return;
1803	default:
1804		DPRINTF("Unknown write register: 0x%x value:%x", offset, value);
1805		return;
1806	}
1807}
1808
1809static uint32_t
1810e82545_read_register(struct e82545_softc *sc, uint32_t offset)
1811{
1812	uint32_t retval;
1813	int ridx;
1814
1815	if (offset & 0x3) {
1816		DPRINTF("Unaligned register read offset:0x%x", offset);
1817		return 0;
1818	}
1819
1820	DPRINTF("Register read: 0x%x", offset);
1821
1822	switch (offset) {
1823	case E1000_CTRL:
1824		retval = sc->esc_CTRL;
1825		break;
1826	case E1000_STATUS:
1827		retval = E1000_STATUS_FD | E1000_STATUS_LU |
1828		    E1000_STATUS_SPEED_1000;
1829		break;
1830	case E1000_FCAL:
1831		retval = sc->esc_FCAL;
1832		break;
1833	case E1000_FCAH:
1834		retval = sc->esc_FCAH;
1835		break;
1836	case E1000_FCT:
1837		retval = sc->esc_FCT;
1838		break;
1839	case E1000_VET:
1840		retval = sc->esc_VET;
1841		break;
1842	case E1000_FCTTV:
1843		retval = sc->esc_FCTTV;
1844		break;
1845	case E1000_LEDCTL:
1846		retval = sc->esc_LEDCTL;
1847		break;
1848	case E1000_PBA:
1849		retval = sc->esc_PBA;
1850		break;
1851	case E1000_ICR:
1852	case E1000_ITR:
1853	case E1000_ICS:
1854	case E1000_IMS:
1855	case E1000_IMC:
1856		retval = e82545_intr_read(sc, offset);
1857		break;
1858	case E1000_RCTL:
1859		retval = sc->esc_RCTL;
1860		break;
1861	case E1000_FCRTL:
1862		retval = sc->esc_FCRTL;
1863		break;
1864	case E1000_FCRTH:
1865		retval = sc->esc_FCRTH;
1866		break;
1867	case E1000_RDBAL(0):
1868		retval = sc->esc_RDBAL;
1869		break;
1870	case E1000_RDBAH(0):
1871		retval = sc->esc_RDBAH;
1872		break;
1873	case E1000_RDLEN(0):
1874		retval = sc->esc_RDLEN;
1875		break;
1876	case E1000_RDH(0):
1877		retval = sc->esc_RDH;
1878		break;
1879	case E1000_RDT(0):
1880		retval = sc->esc_RDT;
1881		break;
1882	case E1000_RDTR:
1883		retval = sc->esc_RDTR;
1884		break;
1885	case E1000_RXDCTL(0):
1886		retval = sc->esc_RXDCTL;
1887		break;
1888	case E1000_RADV:
1889		retval = sc->esc_RADV;
1890		break;
1891	case E1000_RSRPD:
1892		retval = sc->esc_RSRPD;
1893		break;
1894	case E1000_RXCSUM:
1895		retval = sc->esc_RXCSUM;
1896		break;
1897	case E1000_TXCW:
1898		retval = sc->esc_TXCW;
1899		break;
1900	case E1000_TCTL:
1901		retval = sc->esc_TCTL;
1902		break;
1903	case E1000_TIPG:
1904		retval = sc->esc_TIPG;
1905		break;
1906	case E1000_AIT:
1907		retval = sc->esc_AIT;
1908		break;
1909	case E1000_TDBAL(0):
1910		retval = sc->esc_TDBAL;
1911		break;
1912	case E1000_TDBAH(0):
1913		retval = sc->esc_TDBAH;
1914		break;
1915	case E1000_TDLEN(0):
1916		retval = sc->esc_TDLEN;
1917		break;
1918	case E1000_TDH(0):
1919		retval = sc->esc_TDH;
1920		break;
1921	case E1000_TDT(0):
1922		retval = sc->esc_TDT;
1923		break;
1924	case E1000_TIDV:
1925		retval = sc->esc_TIDV;
1926		break;
1927	case E1000_TXDCTL(0):
1928		retval = sc->esc_TXDCTL;
1929		break;
1930	case E1000_TADV:
1931		retval = sc->esc_TADV;
1932		break;
1933	case E1000_RAL(0) ... E1000_RAH(15):
1934		/* convert to u32 offset */
1935		ridx = (offset - E1000_RAL(0)) >> 2;
1936		retval = e82545_read_ra(sc, ridx);
1937		break;
1938	case E1000_MTA ... (E1000_MTA + (127*4)):
1939		retval = sc->esc_fmcast[(offset - E1000_MTA) >> 2];
1940		break;
1941	case E1000_VFTA ... (E1000_VFTA + (127*4)):
1942		retval = sc->esc_fvlan[(offset - E1000_VFTA) >> 2];
1943		break;
1944	case E1000_EECD:
1945		//DPRINTF("EECD read %x", sc->eeprom_control);
1946		retval = sc->eeprom_control;
1947		break;
1948	case E1000_MDIC:
1949		retval = sc->mdi_control;
1950		break;
1951	case E1000_MANC:
1952		retval = 0;
1953		break;
1954	/* stats that we emulate. */
1955	case E1000_MPC:
1956		retval = sc->missed_pkt_count;
1957		break;
1958	case E1000_PRC64:
1959		retval = sc->pkt_rx_by_size[0];
1960		break;
1961	case E1000_PRC127:
1962		retval = sc->pkt_rx_by_size[1];
1963		break;
1964	case E1000_PRC255:
1965		retval = sc->pkt_rx_by_size[2];
1966		break;
1967	case E1000_PRC511:
1968		retval = sc->pkt_rx_by_size[3];
1969		break;
1970	case E1000_PRC1023:
1971		retval = sc->pkt_rx_by_size[4];
1972		break;
1973	case E1000_PRC1522:
1974		retval = sc->pkt_rx_by_size[5];
1975		break;
1976	case E1000_GPRC:
1977		retval = sc->good_pkt_rx_count;
1978		break;
1979	case E1000_BPRC:
1980		retval = sc->bcast_pkt_rx_count;
1981		break;
1982	case E1000_MPRC:
1983		retval = sc->mcast_pkt_rx_count;
1984		break;
1985	case E1000_GPTC:
1986	case E1000_TPT:
1987		retval = sc->good_pkt_tx_count;
1988		break;
1989	case E1000_GORCL:
1990		retval = (uint32_t)sc->good_octets_rx;
1991		break;
1992	case E1000_GORCH:
1993		retval = (uint32_t)(sc->good_octets_rx >> 32);
1994		break;
1995	case E1000_TOTL:
1996	case E1000_GOTCL:
1997		retval = (uint32_t)sc->good_octets_tx;
1998		break;
1999	case E1000_TOTH:
2000	case E1000_GOTCH:
2001		retval = (uint32_t)(sc->good_octets_tx >> 32);
2002		break;
2003	case E1000_ROC:
2004		retval = sc->oversize_rx_count;
2005		break;
2006	case E1000_TORL:
2007		retval = (uint32_t)(sc->good_octets_rx + sc->missed_octets);
2008		break;
2009	case E1000_TORH:
2010		retval = (uint32_t)((sc->good_octets_rx +
2011		    sc->missed_octets) >> 32);
2012		break;
2013	case E1000_TPR:
2014		retval = sc->good_pkt_rx_count + sc->missed_pkt_count +
2015		    sc->oversize_rx_count;
2016		break;
2017	case E1000_PTC64:
2018		retval = sc->pkt_tx_by_size[0];
2019		break;
2020	case E1000_PTC127:
2021		retval = sc->pkt_tx_by_size[1];
2022		break;
2023	case E1000_PTC255:
2024		retval = sc->pkt_tx_by_size[2];
2025		break;
2026	case E1000_PTC511:
2027		retval = sc->pkt_tx_by_size[3];
2028		break;
2029	case E1000_PTC1023:
2030		retval = sc->pkt_tx_by_size[4];
2031		break;
2032	case E1000_PTC1522:
2033		retval = sc->pkt_tx_by_size[5];
2034		break;
2035	case E1000_MPTC:
2036		retval = sc->mcast_pkt_tx_count;
2037		break;
2038	case E1000_BPTC:
2039		retval = sc->bcast_pkt_tx_count;
2040		break;
2041	case E1000_TSCTC:
2042		retval = sc->tso_tx_count;
2043		break;
2044	/* stats that are always 0. */
2045	case E1000_CRCERRS:
2046	case E1000_ALGNERRC:
2047	case E1000_SYMERRS:
2048	case E1000_RXERRC:
2049	case E1000_SCC:
2050	case E1000_ECOL:
2051	case E1000_MCC:
2052	case E1000_LATECOL:
2053	case E1000_COLC:
2054	case E1000_DC:
2055	case E1000_TNCRS:
2056	case E1000_SEC:
2057	case E1000_CEXTERR:
2058	case E1000_RLEC:
2059	case E1000_XONRXC:
2060	case E1000_XONTXC:
2061	case E1000_XOFFRXC:
2062	case E1000_XOFFTXC:
2063	case E1000_FCRUC:
2064	case E1000_RNBC:
2065	case E1000_RUC:
2066	case E1000_RFC:
2067	case E1000_RJC:
2068	case E1000_MGTPRC:
2069	case E1000_MGTPDC:
2070	case E1000_MGTPTC:
2071	case E1000_TSCTFC:
2072		retval = 0;
2073		break;
2074	default:
2075		DPRINTF("Unknown read register: 0x%x", offset);
2076		retval = 0;
2077		break;
2078	}
2079
2080	return (retval);
2081}
2082
2083static void
2084e82545_write(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int baridx,
2085	     uint64_t offset, int size, uint64_t value)
2086{
2087	struct e82545_softc *sc;
2088
2089	//DPRINTF("Write bar:%d offset:0x%lx value:0x%lx size:%d", baridx, offset, value, size);
2090
2091	sc = pi->pi_arg;
2092
2093	pthread_mutex_lock(&sc->esc_mtx);
2094
2095	switch (baridx) {
2096	case E82545_BAR_IO:
2097		switch (offset) {
2098		case E82545_IOADDR:
2099			if (size != 4) {
2100				DPRINTF("Wrong io addr write sz:%d value:0x%lx", size, value);
2101			} else
2102				sc->io_addr = (uint32_t)value;
2103			break;
2104		case E82545_IODATA:
2105			if (size != 4) {
2106				DPRINTF("Wrong io data write size:%d value:0x%lx", size, value);
2107			} else if (sc->io_addr > E82545_IO_REGISTER_MAX) {
2108				DPRINTF("Non-register io write addr:0x%x value:0x%lx", sc->io_addr, value);
2109			} else
2110				e82545_write_register(sc, sc->io_addr,
2111						      (uint32_t)value);
2112			break;
2113		default:
2114			DPRINTF("Unknown io bar write offset:0x%lx value:0x%lx size:%d", offset, value, size);
2115			break;
2116		}
2117		break;
2118	case E82545_BAR_REGISTER:
2119		if (size != 4) {
2120			DPRINTF("Wrong register write size:%d offset:0x%lx value:0x%lx", size, offset, value);
2121		} else
2122			e82545_write_register(sc, (uint32_t)offset,
2123					      (uint32_t)value);
2124		break;
2125	default:
2126		DPRINTF("Unknown write bar:%d off:0x%lx val:0x%lx size:%d",
2127			baridx, offset, value, size);
2128	}
2129
2130	pthread_mutex_unlock(&sc->esc_mtx);
2131}
2132
2133static uint64_t
2134e82545_read(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int baridx,
2135	    uint64_t offset, int size)
2136{
2137	struct e82545_softc *sc;
2138	uint64_t retval;
2139
2140	//DPRINTF("Read  bar:%d offset:0x%lx size:%d", baridx, offset, size);
2141	sc = pi->pi_arg;
2142	retval = 0;
2143
2144	pthread_mutex_lock(&sc->esc_mtx);
2145
2146	switch (baridx) {
2147	case E82545_BAR_IO:
2148		switch (offset) {
2149		case E82545_IOADDR:
2150			if (size != 4) {
2151				DPRINTF("Wrong io addr read sz:%d", size);
2152			} else
2153				retval = sc->io_addr;
2154			break;
2155		case E82545_IODATA:
2156			if (size != 4) {
2157				DPRINTF("Wrong io data read sz:%d", size);
2158			}
2159			if (sc->io_addr > E82545_IO_REGISTER_MAX) {
2160				DPRINTF("Non-register io read addr:0x%x",
2161					sc->io_addr);
2162			} else
2163				retval = e82545_read_register(sc, sc->io_addr);
2164			break;
2165		default:
2166			DPRINTF("Unknown io bar read offset:0x%lx size:%d",
2167				offset, size);
2168			break;
2169		}
2170		break;
2171	case E82545_BAR_REGISTER:
2172		if (size != 4) {
2173			DPRINTF("Wrong register read size:%d offset:0x%lx",
2174				size, offset);
2175		} else
2176			retval = e82545_read_register(sc, (uint32_t)offset);
2177		break;
2178	default:
2179		DPRINTF("Unknown read bar:%d offset:0x%lx size:%d",
2180			baridx, offset, size);
2181		break;
2182	}
2183
2184	pthread_mutex_unlock(&sc->esc_mtx);
2185
2186	return (retval);
2187}
2188
2189static void
2190e82545_reset(struct e82545_softc *sc, int drvr)
2191{
2192	int i;
2193
2194	e82545_rx_disable(sc);
2195	e82545_tx_disable(sc);
2196
2197	/* clear outstanding interrupts */
2198	if (sc->esc_irq_asserted)
2199		pci_lintr_deassert(sc->esc_pi);
2200
2201	/* misc */
2202	if (!drvr) {
2203		sc->esc_FCAL = 0;
2204		sc->esc_FCAH = 0;
2205		sc->esc_FCT = 0;
2206		sc->esc_VET = 0;
2207		sc->esc_FCTTV = 0;
2208	}
2209	sc->esc_LEDCTL = 0x07061302;
2210	sc->esc_PBA = 0x00100030;
2211
2212	/* start nvm in opcode mode. */
2213	sc->nvm_opaddr = 0;
2214	sc->nvm_mode = E82545_NVM_MODE_OPADDR;
2215	sc->nvm_bits = E82545_NVM_OPADDR_BITS;
2216	sc->eeprom_control = E1000_EECD_PRES | E82545_EECD_FWE_EN;
2217	e82545_init_eeprom(sc);
2218
2219	/* interrupt */
2220	sc->esc_ICR = 0;
2221	sc->esc_ITR = 250;
2222	sc->esc_ICS = 0;
2223	sc->esc_IMS = 0;
2224	sc->esc_IMC = 0;
2225
2226	/* L2 filters */
2227	if (!drvr) {
2228		memset(sc->esc_fvlan, 0, sizeof(sc->esc_fvlan));
2229		memset(sc->esc_fmcast, 0, sizeof(sc->esc_fmcast));
2230		memset(sc->esc_uni, 0, sizeof(sc->esc_uni));
2231
2232		/* XXX not necessary on 82545 ?? */
2233		sc->esc_uni[0].eu_valid = 1;
2234		memcpy(sc->esc_uni[0].eu_eth.octet, sc->esc_mac.octet,
2235		    ETHER_ADDR_LEN);
2236	} else {
2237		/* Clear RAH valid bits */
2238		for (i = 0; i < 16; i++)
2239			sc->esc_uni[i].eu_valid = 0;
2240	}
2241
2242	/* receive */
2243	if (!drvr) {
2244		sc->esc_RDBAL = 0;
2245		sc->esc_RDBAH = 0;
2246	}
2247	sc->esc_RCTL = 0;
2248	sc->esc_FCRTL = 0;
2249	sc->esc_FCRTH = 0;
2250	sc->esc_RDLEN = 0;
2251	sc->esc_RDH = 0;
2252	sc->esc_RDT = 0;
2253	sc->esc_RDTR = 0;
2254	sc->esc_RXDCTL = (1 << 24) | (1 << 16); /* default GRAN/WTHRESH */
2255	sc->esc_RADV = 0;
2256	sc->esc_RXCSUM = 0;
2257
2258	/* transmit */
2259	if (!drvr) {
2260		sc->esc_TDBAL = 0;
2261		sc->esc_TDBAH = 0;
2262		sc->esc_TIPG = 0;
2263		sc->esc_AIT = 0;
2264		sc->esc_TIDV = 0;
2265		sc->esc_TADV = 0;
2266	}
2267	sc->esc_tdba = 0;
2268	sc->esc_txdesc = NULL;
2269	sc->esc_TXCW = 0;
2270	sc->esc_TCTL = 0;
2271	sc->esc_TDLEN = 0;
2272	sc->esc_TDT = 0;
2273	sc->esc_TDHr = sc->esc_TDH = 0;
2274	sc->esc_TXDCTL = 0;
2275}
2276
2277static int
2278e82545_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts)
2279{
2280	char nstr[80];
2281	struct e82545_softc *sc;
2282	char *devname;
2283	char *vtopts;
2284	int mac_provided;
2285
2286	DPRINTF("Loading with options: %s", opts);
2287
2288	/* Setup our softc */
2289	sc = calloc(1, sizeof(*sc));
2290
2291	pi->pi_arg = sc;
2292	sc->esc_pi = pi;
2293	sc->esc_ctx = ctx;
2294
2295	pthread_mutex_init(&sc->esc_mtx, NULL);
2296	pthread_cond_init(&sc->esc_rx_cond, NULL);
2297	pthread_cond_init(&sc->esc_tx_cond, NULL);
2298	pthread_create(&sc->esc_tx_tid, NULL, e82545_tx_thread, sc);
2299	snprintf(nstr, sizeof(nstr), "e82545-%d:%d tx", pi->pi_slot,
2300	    pi->pi_func);
2301        pthread_set_name_np(sc->esc_tx_tid, nstr);
2302
2303	pci_set_cfgdata16(pi, PCIR_DEVICE, E82545_DEV_ID_82545EM_COPPER);
2304	pci_set_cfgdata16(pi, PCIR_VENDOR, E82545_VENDOR_ID_INTEL);
2305	pci_set_cfgdata8(pi,  PCIR_CLASS, PCIC_NETWORK);
2306	pci_set_cfgdata8(pi, PCIR_SUBCLASS, PCIS_NETWORK_ETHERNET);
2307	pci_set_cfgdata16(pi, PCIR_SUBDEV_0, E82545_SUBDEV_ID);
2308	pci_set_cfgdata16(pi, PCIR_SUBVEND_0, E82545_VENDOR_ID_INTEL);
2309
2310	pci_set_cfgdata8(pi,  PCIR_HDRTYPE, PCIM_HDRTYPE_NORMAL);
2311	pci_set_cfgdata8(pi,  PCIR_INTPIN, 0x1);
2312
2313	/* TODO: this card also supports msi, but the freebsd driver for it
2314	 * does not, so I have not implemented it. */
2315	pci_lintr_request(pi);
2316
2317	pci_emul_alloc_bar(pi, E82545_BAR_REGISTER, PCIBAR_MEM32,
2318		E82545_BAR_REGISTER_LEN);
2319	pci_emul_alloc_bar(pi, E82545_BAR_FLASH, PCIBAR_MEM32,
2320		E82545_BAR_FLASH_LEN);
2321	pci_emul_alloc_bar(pi, E82545_BAR_IO, PCIBAR_IO,
2322		E82545_BAR_IO_LEN);
2323
2324	/*
2325	 * Attempt to open the net backend and read the MAC address
2326	 * if specified.  Copied from virtio-net, slightly modified.
2327	 */
2328	mac_provided = 0;
2329	sc->esc_be = NULL;
2330	if (opts != NULL) {
2331		int err = 0;
2332
2333		devname = vtopts = strdup(opts);
2334		(void) strsep(&vtopts, ",");
2335
2336		/*
2337		 * Parse the list of options in the form
2338		 *     key1=value1,...,keyN=valueN.
2339		 */
2340		while (vtopts != NULL) {
2341			char *value = vtopts;
2342			char *key;
2343
2344			key = strsep(&value, "=");
2345			if (value == NULL)
2346				break;
2347			vtopts = value;
2348			(void) strsep(&vtopts, ",");
2349
2350			if (strcmp(key, "mac") == 0) {
2351				err = net_parsemac(value, sc->esc_mac.octet);
2352				if (err)
2353					break;
2354				mac_provided = 1;
2355			}
2356		}
2357
2358		if (err) {
2359			free(devname);
2360			return (err);
2361		}
2362
2363		err = netbe_init(&sc->esc_be, devname, e82545_rx_callback, sc);
2364		free(devname);
2365		if (err)
2366			return (err);
2367	}
2368
2369	if (!mac_provided) {
2370		net_genmac(pi, sc->esc_mac.octet);
2371	}
2372
2373	netbe_rx_enable(sc->esc_be);
2374
2375	/* H/w initiated reset */
2376	e82545_reset(sc, 0);
2377
2378	return (0);
2379}
2380
2381struct pci_devemu pci_de_e82545 = {
2382	.pe_emu = 	"e1000",
2383	.pe_init =	e82545_init,
2384	.pe_barwrite =	e82545_write,
2385	.pe_barread =	e82545_read
2386};
2387PCI_EMUL_SET(pci_de_e82545);
2388
2389