1/*-
2 * Copyright (c) 1992, 1993, University of Vermont and State
3 *  Agricultural College.
4 * Copyright (c) 1992, 1993, Garrett A. Wollman.
5 *
6 * Portions:
7 * Copyright (c) 1990, 1991, William F. Jolitz
8 * Copyright (c) 1990, The Regents of the University of California
9 *
10 * 3Com 3C507 support:
11 * Copyright (c) 1993, 1994, Charles M. Hannum
12 *
13 * EtherExpress 16 support:
14 * Copyright (c) 1993, 1994, 1995, Rodney W. Grimes
15 * Copyright (c) 1997, Aaron C. Smith
16 *
17 * All rights reserved.
18 *
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions
21 * are met:
22 * 1. Redistributions of source code must retain the above copyright
23 *    notice, this list of conditions and the following disclaimer.
24 * 2. Redistributions in binary form must reproduce the above copyright
25 *    notice, this list of conditions and the following disclaimer in the
26 *    documentation and/or other materials provided with the distribution.
27 * 3. All advertising materials mentioning features or use of this software
28 *    must display the following acknowledgement:
29 *	This product includes software developed by the University of
30 *	Vermont and State Agricultural College and Garrett A. Wollman, by
31 *	William F. Jolitz, by the University of California, Berkeley,
32 *	Lawrence Berkeley Laboratory, and their contributors, by
33 *	Charles M. Hannum, by Rodney W. Grimes, and by Aaron C. Smith.
34 * 4. Neither the names of the Universities nor the names of the authors
35 *    may be used to endorse or promote products derived from this software
36 *    without specific prior written permission.
37 *
38 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
39 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
40 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
41 * ARE DISCLAIMED.  IN NO EVENT SHALL THE UNIVERSITY OR AUTHORS BE LIABLE
42 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
43 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
44 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
45 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
46 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
47 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
48 * SUCH DAMAGE.
49 *
50 * MAINTAINER: Matthew N. Dodd <winter@jurai.net>
51 */
52
53#include <sys/cdefs.h>
54__FBSDID("$FreeBSD$");
55
56/*
57 * Intel 82586 Ethernet chip
58 * Register, bit, and structure definitions.
59 *
60 * Written by GAW with reference to the Clarkson Packet Driver code for this
61 * chip written by Russ Nelson and others.
62 *
63 * Intel EtherExpress 16 support from if_ix.c, written by Rodney W. Grimes.
64 */
65
66/*
67 * The i82586 is a very versatile chip, found in many implementations.
68 * Programming this chip is mostly the same, but certain details differ
69 * from card to card.  This driver is written so that different cards
70 * can be automatically detected at run-time.
71 */
72
73/*
74 * Mode of operation:
75 *
76 * We run the 82586 in a standard Ethernet mode.  We keep NFRAMES
77 * received frame descriptors around for the receiver to use, and
78 * NRXBUFS associated receive buffer descriptors, both in a circular
79 * list.  Whenever a frame is received, we rotate both lists as
80 * necessary.  (The 586 treats both lists as a simple queue.)  We also
81 * keep a transmit command around so that packets can be sent off
82 * quickly.
83 *
84 * We configure the adapter in AL-LOC = 1 mode, which means that the
85 * Ethernet/802.3 MAC header is placed at the beginning of the receive
86 * buffer rather than being split off into various fields in the RFD.
87 * This also means that we must include this header in the transmit
88 * buffer as well.
89 *
90 * By convention, all transmit commands, and only transmit commands,
91 * shall have the I (IE_CMD_INTR) bit set in the command.  This way,
92 * when an interrupt arrives at ieintr(), it is immediately possible
93 * to tell what precisely caused it.  ANY OTHER command-sending routines
94 * should run at splimp(), and should post an acknowledgement to every
95 * interrupt they generate.
96 *
97 * The 82586 has a 24-bit address space internally, and the adaptor's
98 * memory is located at the top of this region.  However, the value
99 * we are given in configuration is normally the *bottom* of the adaptor
100 * RAM.  So, we must go through a few gyrations to come up with a
101 * kernel virtual address which represents the actual beginning of the
102 * 586 address space.  First, we autosize the RAM by running through
103 * several possible sizes and trying to initialize the adapter under
104 * the assumption that the selected size is correct.  Then, knowing
105 * the correct RAM size, we set up our pointers in the softc `iomem'
106 * represents the computed base of the 586 address space.  `iomembot'
107 * represents the actual configured base of adapter RAM.  Finally,
108 * `iosize' represents the calculated size of 586 RAM.  Then, when
109 * laying out commands, we use the interval [iomembot, iomembot +
110 * iosize); to make 24-pointers, we subtract iomem, and to make
111 * 16-pointers, we subtract iomem and and with 0xffff.
112 */
113
114#include <sys/param.h>
115#include <sys/systm.h>
116#include <sys/eventhandler.h>
117#include <sys/kernel.h>
118#include <sys/malloc.h>
119#include <sys/mbuf.h>
120#include <sys/socket.h>
121#include <sys/sockio.h>
122#include <sys/syslog.h>
123
124#include <sys/module.h>
125#include <sys/bus.h>
126
127#include <machine/bus.h>
128#include <machine/resource.h>
129#include <sys/rman.h>
130
131#include <net/ethernet.h>
132#include <net/if.h>
133#include <net/if_types.h>
134#include <net/if_dl.h>
135
136#include <netinet/in.h>
137#include <netinet/if_ether.h>
138
139#include <dev/ic/i82586.h>
140#include <dev/ie/if_ievar.h>
141#include <dev/ie/if_iereg.h>
142#include <dev/ie/if_ie507.h>
143#include <dev/ie/if_iee16.h>
144#include <i386/isa/elink.h>
145
146#include <net/bpf.h>
147
148#ifdef DEBUG
149#define IED_RINT	0x01
150#define IED_TINT	0x02
151#define IED_RNR		0x04
152#define IED_CNA		0x08
153#define IED_READFRAME	0x10
154static int	ie_debug = IED_RNR;
155
156#endif
157
158#define IE_BUF_LEN	ETHER_MAX_LEN	/* length of transmit buffer */
159
160/* Forward declaration */
161struct ie_softc;
162
163static void	ieinit			(void *);
164static void	ieinit_locked		(struct ie_softc *);
165static void	ie_stop			(struct ie_softc *);
166static int	ieioctl			(struct ifnet *, u_long, caddr_t);
167static void	iestart			(struct ifnet *);
168static void	iestart_locked		(struct ifnet *);
169
170static __inline void
171		ee16_interrupt_enable	(struct ie_softc *);
172static void	ee16_eeprom_outbits	(struct ie_softc *, int, int);
173static void	ee16_eeprom_clock	(struct ie_softc *, int);
174static u_short	ee16_read_eeprom	(struct ie_softc *, int);
175static int	ee16_eeprom_inbits	(struct ie_softc *);
176
177static __inline void
178		ie_ack			(struct ie_softc *, u_int);
179static void	iereset			(struct ie_softc *);
180static void	ie_readframe		(struct ie_softc *, int);
181static void	ie_drop_packet_buffer	(struct ie_softc *);
182static void	find_ie_mem_size	(struct ie_softc *);
183static int	command_and_wait	(struct ie_softc *,
184					 int, void volatile *, int);
185static void	run_tdr			(struct ie_softc *,
186					 volatile struct ie_tdr_cmd *);
187static int	ierint			(struct ie_softc *);
188static int	ietint			(struct ie_softc *);
189static int	iernr			(struct ie_softc *);
190static void	start_receiver		(struct ie_softc *);
191static __inline int
192		ieget			(struct ie_softc *, struct mbuf **);
193static v_caddr_t setup_rfa		(struct ie_softc *, v_caddr_t);
194static int	mc_setup		(struct ie_softc *);
195static void	ie_mc_reset		(struct ie_softc *);
196
197#ifdef DEBUG
198static void	print_rbd		(volatile struct ie_recv_buf_desc * rbd);
199static int	in_ierint = 0;
200static int	in_ietint = 0;
201#endif
202
203static const char *ie_hardware_names[] = {
204	"None",
205	"StarLAN 10",
206	"EN100",
207	"StarLAN Fiber",
208	"3C507",
209	"NI5210",
210	"EtherExpress 16",
211	"Unknown"
212};
213
214/*
215 * sizeof(iscp) == 1+1+2+4 == 8
216 * sizeof(scb) == 2+2+2+2+2+2+2+2 == 16
217 * NFRAMES * sizeof(rfd) == NFRAMES*(2+2+2+2+6+6+2+2) == NFRAMES*24 == 384
218 * sizeof(xmit_cmd) == 2+2+2+2+6+2 == 18
219 * sizeof(transmit buffer) == 1512
220 * sizeof(transmit buffer desc) == 8
221 * -----
222 * 1946
223 *
224 * NRXBUFS * sizeof(rbd) == NRXBUFS*(2+2+4+2+2) == NRXBUFS*12
225 * NRXBUFS * IE_RBUF_SIZE == NRXBUFS*256
226 *
227 * NRXBUFS should be (16384 - 1946) / (256 + 12) == 14438 / 268 == 53
228 *
229 * With NRXBUFS == 48, this leaves us 1574 bytes for another command or
230 * more buffers.  Another transmit command would be 18+8+1512 == 1538
231 * ---just barely fits!
232 *
233 * Obviously all these would have to be reduced for smaller memory sizes.
234 * With a larger memory, it would be possible to roughly double the number
235 * of both transmit and receive buffers.
236 */
237
238#define	NFRAMES		4	/* number of receive frames */
239#define	NRXBUFS		24	/* number of buffers to allocate */
240#define	IE_RBUF_SIZE	256	/* size of each buffer, MUST BE POWER OF TWO */
241#define	NTXBUFS		1	/* number of transmit commands */
242#define	IE_TBUF_SIZE	ETHER_MAX_LEN	/* size of transmit buffer */
243
244#define MK_24(base, ptr) ((caddr_t)((uintptr_t)ptr - (uintptr_t)base))
245#define MK_16(base, ptr) ((u_short)(uintptr_t)MK_24(base, ptr))
246
247void
248ee16_shutdown(struct ie_softc *sc)
249{
250
251	ee16_reset_586(sc);
252	outb(PORT(sc) + IEE16_ECTRL, IEE16_RESET_ASIC);
253	outb(PORT(sc) + IEE16_ECTRL, 0);
254}
255
256/*
257 * Taken almost exactly from Bill's if_is.c, then modified beyond recognition.
258 */
259int
260ie_attach(device_t dev)
261{
262	struct ie_softc *       sc;
263	struct ifnet *          ifp;
264	size_t                  allocsize;
265	int                     error, factor;
266
267	sc = device_get_softc(dev);
268	ifp = sc->ifp = if_alloc(IFT_ETHER);
269	if (ifp == NULL) {
270		device_printf(sc->dev, "can not if_alloc()\n");
271		return (ENOSPC);
272	}
273
274	sc->dev = dev;
275	mtx_init(&sc->lock, device_get_nameunit(dev), MTX_NETWORK_LOCK,
276	    MTX_DEF);
277
278	/*
279	 * based on the amount of memory we have, allocate our tx and rx
280	 * resources.
281	 */
282	factor = rman_get_size(sc->mem_res) / 8192;
283	sc->nframes = factor * NFRAMES;
284	sc->nrxbufs = factor * NRXBUFS;
285	sc->ntxbufs = factor * NTXBUFS;
286
287	/*
288	 * Since all of these guys are arrays of pointers, allocate as one
289	 * big chunk and dole out accordingly.
290	 */
291	allocsize = sizeof(void *) * (sc->nframes
292				      + (sc->nrxbufs * 2)
293				      + (sc->ntxbufs * 3));
294	sc->rframes = (volatile struct ie_recv_frame_desc **) malloc(allocsize,
295								     M_DEVBUF,
296								   M_NOWAIT);
297	if (sc->rframes == NULL) {
298		mtx_destroy(&sc->lock);
299		return (ENXIO);
300	}
301	sc->rbuffs =
302	    (volatile struct ie_recv_buf_desc **)&sc->rframes[sc->nframes];
303	sc->cbuffs = (volatile u_char **)&sc->rbuffs[sc->nrxbufs];
304	sc->xmit_cmds =
305	    (volatile struct ie_xmit_cmd **)&sc->cbuffs[sc->nrxbufs];
306	sc->xmit_buffs =
307	    (volatile struct ie_xmit_buf **)&sc->xmit_cmds[sc->ntxbufs];
308	sc->xmit_cbuffs = (volatile u_char **)&sc->xmit_buffs[sc->ntxbufs];
309
310	if (bootverbose)
311		device_printf(sc->dev, "hardware type %s, revision %d\n",
312			ie_hardware_names[sc->hard_type], sc->hard_vers + 1);
313
314	ifp->if_softc = sc;
315	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
316	ifp->if_mtu = ETHERMTU;
317	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
318	ifp->if_start = iestart;
319	ifp->if_ioctl = ieioctl;
320	ifp->if_init = ieinit;
321	IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
322
323	ether_ifattach(ifp, sc->enaddr);
324
325	error = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE,
326				NULL, ie_intr, sc, &sc->irq_ih);
327	if (error) {
328		device_printf(dev, "Unable to register interrupt handler\n");
329		mtx_destroy(&sc->lock);
330		return (error);
331	}
332
333	return (0);
334}
335
336static __inline void
337ie_ack(struct ie_softc *sc, u_int mask)
338{
339
340	sc->scb->ie_command = sc->scb->ie_status & mask;
341	(*sc->ie_chan_attn) (sc);
342}
343
344/*
345 * What to do upon receipt of an interrupt.
346 */
347void
348ie_intr(void *xsc)
349{
350	struct ie_softc *sc = (struct ie_softc *)xsc;
351	u_short status;
352
353	IE_LOCK(sc);
354
355	/* Clear the interrupt latch on the 3C507. */
356	if (sc->hard_type == IE_3C507
357	 && (inb(PORT(sc) + IE507_CTRL) & EL_CTRL_INTL))
358		outb(PORT(sc) + IE507_ICTRL, 1);
359
360	/* disable interrupts on the EE16. */
361	if (sc->hard_type == IE_EE16)
362		outb(PORT(sc) + IEE16_IRQ, sc->irq_encoded);
363
364	status = sc->scb->ie_status;
365
366loop:
367
368	/* Don't ack interrupts which we didn't receive */
369	ie_ack(sc, IE_ST_WHENCE & status);
370
371	if (status & (IE_ST_RECV | IE_ST_RNR)) {
372#ifdef DEBUG
373		in_ierint++;
374		if (ie_debug & IED_RINT)
375			if_printf(sc->ifp, "rint\n");
376#endif
377		ierint(sc);
378#ifdef DEBUG
379		in_ierint--;
380#endif
381	}
382	if (status & IE_ST_DONE) {
383#ifdef DEBUG
384		in_ietint++;
385		if (ie_debug & IED_TINT)
386			if_printf(sc->ifp, "tint\n");
387#endif
388		ietint(sc);
389#ifdef DEBUG
390		in_ietint--;
391#endif
392	}
393	if (status & IE_ST_RNR) {
394#ifdef DEBUG
395		if (ie_debug & IED_RNR)
396			if_printf(sc->ifp, "rnr\n");
397#endif
398		iernr(sc);
399	}
400#ifdef DEBUG
401	if ((status & IE_ST_ALLDONE) && (ie_debug & IED_CNA))
402		if_printf(sc->ifp, "cna\n");
403#endif
404
405	if ((status = sc->scb->ie_status) & IE_ST_WHENCE)
406		goto loop;
407
408	/* Clear the interrupt latch on the 3C507. */
409	if (sc->hard_type == IE_3C507)
410		outb(PORT(sc) + IE507_ICTRL, 1);
411
412	/* enable interrupts on the EE16. */
413	if (sc->hard_type == IE_EE16)
414		outb(PORT(sc) + IEE16_IRQ, sc->irq_encoded | IEE16_IRQ_ENABLE);
415	IE_UNLOCK(sc);
416}
417
418/*
419 * Process a received-frame interrupt.
420 */
421static int
422ierint(struct ie_softc *sc)
423{
424	int	i, status;
425	static int timesthru = 1024;
426
427	i = sc->rfhead;
428	while (1) {
429		status = sc->rframes[i]->ie_fd_status;
430
431		if ((status & IE_FD_COMPLETE) && (status & IE_FD_OK)) {
432			sc->ifp->if_ipackets++;
433			if (!--timesthru) {
434				sc->ifp->if_ierrors +=
435				    sc->scb->ie_err_crc +
436				    sc->scb->ie_err_align +
437				    sc->scb->ie_err_resource +
438				    sc->scb->ie_err_overrun;
439				sc->scb->ie_err_crc = 0;
440				sc->scb->ie_err_align = 0;
441				sc->scb->ie_err_resource = 0;
442				sc->scb->ie_err_overrun = 0;
443				timesthru = 1024;
444			}
445			ie_readframe(sc, i);
446		} else {
447			if (status & IE_FD_RNR) {
448				if (!(sc->scb->ie_status & IE_RU_READY)) {
449					sc->rframes[0]->ie_fd_next =
450					    MK_16(MEM(sc), sc->rbuffs[0]);
451					sc->scb->ie_recv_list =
452					    MK_16(MEM(sc), sc->rframes[0]);
453					command_and_wait(sc, IE_RU_START, 0, 0);
454				}
455			}
456			break;
457		}
458		i = (i + 1) % sc->nframes;
459	}
460	return (0);
461}
462
463/*
464 * Process a command-complete interrupt.  These are only generated by
465 * the transmission of frames.	This routine is deceptively simple, since
466 * most of the real work is done by iestart().
467 */
468static int
469ietint(struct ie_softc *sc)
470{
471	struct ifnet *ifp = sc->ifp;
472	int	status;
473	int	i;
474
475	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
476
477	for (i = 0; i < sc->xmit_count; i++) {
478		status = sc->xmit_cmds[i]->ie_xmit_status;
479
480		if (status & IE_XS_LATECOLL) {
481			if_printf(ifp, "late collision\n");
482			ifp->if_collisions++;
483			ifp->if_oerrors++;
484		} else if (status & IE_XS_NOCARRIER) {
485			if_printf(ifp, "no carrier\n");
486			ifp->if_oerrors++;
487		} else if (status & IE_XS_LOSTCTS) {
488			if_printf(ifp, "lost CTS\n");
489			ifp->if_oerrors++;
490		} else if (status & IE_XS_UNDERRUN) {
491			if_printf(ifp, "DMA underrun\n");
492			ifp->if_oerrors++;
493		} else if (status & IE_XS_EXCMAX) {
494			if_printf(ifp, "too many collisions\n");
495			ifp->if_collisions += 16;
496			ifp->if_oerrors++;
497		} else {
498			ifp->if_opackets++;
499			ifp->if_collisions += status & IE_XS_MAXCOLL;
500		}
501	}
502	sc->xmit_count = 0;
503
504	/*
505	 * If multicast addresses were added or deleted while we were
506	 * transmitting, ie_mc_reset() set the want_mcsetup flag indicating
507	 * that we should do it.
508	 */
509	if (sc->want_mcsetup) {
510		mc_setup(sc);
511		sc->want_mcsetup = 0;
512	}
513	/* Wish I knew why this seems to be necessary... */
514	sc->xmit_cmds[0]->ie_xmit_status |= IE_STAT_COMPL;
515
516	iestart_locked(ifp);
517	return (0);		/* shouldn't be necessary */
518}
519
520/*
521 * Process a receiver-not-ready interrupt.  I believe that we get these
522 * when there aren't enough buffers to go around.  For now (FIXME), we
523 * just restart the receiver, and hope everything's ok.
524 */
525static int
526iernr(struct ie_softc *sc)
527{
528#ifdef doesnt_work
529	setup_rfa(sc, (v_caddr_t) sc->rframes[0]);
530
531	sc->scb->ie_recv_list = MK_16(MEM(sc), sc->rframes[0]);
532	command_and_wait(sc, IE_RU_START, 0, 0);
533#else
534	/* This doesn't work either, but it doesn't hang either. */
535	command_and_wait(sc, IE_RU_DISABLE, 0, 0);	/* just in case */
536	setup_rfa(sc, (v_caddr_t) sc->rframes[0]);	/* ignore cast-qual */
537
538	sc->scb->ie_recv_list = MK_16(MEM(sc), sc->rframes[0]);
539	command_and_wait(sc, IE_RU_START, 0, 0);	/* was ENABLE */
540
541#endif
542	ie_ack(sc, IE_ST_WHENCE);
543
544	sc->ifp->if_ierrors++;
545	return (0);
546}
547
548/*
549 * Compare two Ether/802 addresses for equality, inlined and
550 * unrolled for speed.	I'd love to have an inline assembler
551 * version of this...
552 */
553static __inline int
554ether_equal(u_char * one, u_char * two)
555{
556	if (one[0] != two[0])
557		return (0);
558	if (one[1] != two[1])
559		return (0);
560	if (one[2] != two[2])
561		return (0);
562	if (one[3] != two[3])
563		return (0);
564	if (one[4] != two[4])
565		return (0);
566	if (one[5] != two[5])
567		return (0);
568	return 1;
569}
570
571/*
572 * Determine quickly whether we should bother reading in this packet.
573 * This depends on whether BPF and/or bridging is enabled, whether we
574 * are receiving multicast address, and whether promiscuous mode is enabled.
575 * We assume that if IFF_PROMISC is set, then *somebody* wants to see
576 * all incoming packets.
577 */
578static __inline int
579check_eh(struct ie_softc *sc, struct ether_header *eh)
580{
581	/* Optimize the common case: normal operation. We've received
582	   either a unicast with our dest or a multicast packet. */
583	if (sc->promisc == 0) {
584		int i;
585
586		/* If not multicast, it's definitely for us */
587		if ((eh->ether_dhost[0] & 1) == 0)
588			return (1);
589
590		/* Accept broadcasts (loose but fast check) */
591		if (eh->ether_dhost[0] == 0xff)
592			return (1);
593
594		/* Compare against our multicast addresses */
595		for (i = 0; i < sc->mcast_count; i++) {
596			if (ether_equal(eh->ether_dhost,
597			    (u_char *)&sc->mcast_addrs[i]))
598				return (1);
599		}
600		return (0);
601	}
602
603	/* Always accept packets when in promiscuous mode */
604	if ((sc->promisc & IFF_PROMISC) != 0)
605		return (1);
606
607	/* Always accept packets directed at us */
608	if (ether_equal(eh->ether_dhost, IF_LLADDR(sc->ifp)))
609		return (1);
610
611	/* Must have IFF_ALLMULTI but not IFF_PROMISC set. The chip is
612	   actually in promiscuous mode, so discard unicast packets. */
613	return((eh->ether_dhost[0] & 1) != 0);
614}
615
616/*
617 * We want to isolate the bits that have meaning...  This assumes that
618 * IE_RBUF_SIZE is an even power of two.  If somehow the act_len exceeds
619 * the size of the buffer, then we are screwed anyway.
620 */
621static __inline int
622ie_buflen(struct ie_softc *sc, int head)
623{
624	return (sc->rbuffs[head]->ie_rbd_actual
625		& (IE_RBUF_SIZE | (IE_RBUF_SIZE - 1)));
626}
627
628static __inline int
629ie_packet_len(struct ie_softc *sc)
630{
631	int	i;
632	int	head = sc->rbhead;
633	int	acc = 0;
634
635	do {
636		if (!(sc->rbuffs[sc->rbhead]->ie_rbd_actual & IE_RBD_USED)) {
637#ifdef DEBUG
638			print_rbd(sc->rbuffs[sc->rbhead]);
639#endif
640			log(LOG_ERR,
641			    "%s: receive descriptors out of sync at %d\n",
642			    sc->ifp->if_xname, sc->rbhead);
643			iereset(sc);
644			return (-1);
645		}
646		i = sc->rbuffs[head]->ie_rbd_actual & IE_RBD_LAST;
647
648		acc += ie_buflen(sc, head);
649		head = (head + 1) % sc->nrxbufs;
650	} while (!i);
651
652	return (acc);
653}
654
655/*
656 * Read data off the interface, and turn it into an mbuf chain.
657 *
658 * This code is DRAMATICALLY different from the previous version; this
659 * version tries to allocate the entire mbuf chain up front, given the
660 * length of the data available.  This enables us to allocate mbuf
661 * clusters in many situations where before we would have had a long
662 * chain of partially-full mbufs.  This should help to speed up the
663 * operation considerably.  (Provided that it works, of course.)
664 */
665static __inline int
666ieget(struct ie_softc *sc, struct mbuf **mp)
667{
668	struct	ether_header eh;
669	struct	mbuf *m, *top, **mymp;
670	int	offset;
671	int	totlen, resid;
672	int	thismboff;
673	int	head;
674
675	totlen = ie_packet_len(sc);
676	if (totlen <= 0)
677		return (-1);
678
679	/*
680	 * Snarf the Ethernet header.
681	 */
682	bcopy(sc->cbuffs[sc->rbhead], &eh, sizeof(struct ether_header));
683	/* ignore cast-qual warning here */
684
685	/*
686	 * As quickly as possible, check if this packet is for us. If not,
687	 * don't waste a single cycle copying the rest of the packet in.
688	 * This is only a consideration when FILTER is defined; i.e., when
689	 * we are either running BPF or doing multicasting.
690	 */
691	if (!check_eh(sc, &eh)) {
692		ie_drop_packet_buffer(sc);
693		sc->ifp->if_ierrors--;	/* just this case, it's not an
694						 * error
695						 */
696		return (-1);
697	}
698
699	MGETHDR(m, M_NOWAIT, MT_DATA);
700	if (!m) {
701		ie_drop_packet_buffer(sc);
702		/* XXXX if_ierrors++; */
703		return (-1);
704	}
705
706	*mp = m;
707	m->m_pkthdr.rcvif = sc->ifp;
708	m->m_len = MHLEN;
709	resid = m->m_pkthdr.len = totlen;
710	top = 0;
711
712	mymp = &top;
713
714	/*
715	 * This loop goes through and allocates mbufs for all the data we
716	 * will be copying in.	It does not actually do the copying yet.
717	 */
718	do {			/* while(resid > 0) */
719		/*
720		 * Try to allocate an mbuf to hold the data that we have.
721		 * If we already allocated one, just get another one and
722		 * stick it on the end (eventually).  If we don't already
723		 * have one, try to allocate an mbuf cluster big enough to
724		 * hold the whole packet, if we think it's reasonable, or a
725		 * single mbuf which may or may not be big enough. Got that?
726		 */
727		if (top) {
728			MGET(m, M_NOWAIT, MT_DATA);
729			if (!m) {
730				m_freem(top);
731				ie_drop_packet_buffer(sc);
732				return (-1);
733			}
734			m->m_len = MLEN;
735		}
736		if (resid >= MINCLSIZE) {
737			MCLGET(m, M_NOWAIT);
738			if (m->m_flags & M_EXT)
739				m->m_len = min(resid, MCLBYTES);
740		} else {
741			if (resid < m->m_len) {
742				if (!top && resid + max_linkhdr <= m->m_len)
743					m->m_data += max_linkhdr;
744				m->m_len = resid;
745			}
746		}
747		resid -= m->m_len;
748		*mymp = m;
749		mymp = &m->m_next;
750	} while (resid > 0);
751
752	resid = totlen;					/* remaining data */
753	offset = 0;					/* packet offset */
754	thismboff = 0;					/* offset in m */
755
756	m = top;					/* current mbuf */
757	head = sc->rbhead;				/* current rx buffer */
758
759	/*
760	 * Now we take the mbuf chain (hopefully only one mbuf most of the
761	 * time) and stuff the data into it.  There are no possible failures
762	 * at or after this point.
763	 */
764	while (resid > 0) {	/* while there's stuff left */
765		int	thislen = ie_buflen(sc, head) - offset;
766
767		/*
768		 * If too much data for the current mbuf, then fill the
769		 * current one up, go to the next one, and try again.
770		 */
771		if (thislen > m->m_len - thismboff) {
772			int	newlen = m->m_len - thismboff;
773
774			bcopy((v_caddr_t) (sc->cbuffs[head] + offset),
775			      mtod(m, caddr_t) +thismboff, (unsigned) newlen);
776			/* ignore cast-qual warning */
777			m = m->m_next;
778			thismboff = 0;		/* new mbuf, so no offset */
779			offset += newlen;	/* we are now this far into
780						 * the packet */
781			resid -= newlen;	/* so there is this much left
782						 * to get */
783			continue;
784		}
785		/*
786		 * If there is more than enough space in the mbuf to hold
787		 * the contents of this buffer, copy everything in, advance
788		 * pointers, and so on.
789		 */
790		if (thislen < m->m_len - thismboff) {
791			bcopy((v_caddr_t) (sc->cbuffs[head] + offset),
792			    mtod(m, caddr_t) +thismboff, (unsigned) thislen);
793			thismboff += thislen;	/* we are this far into the
794						 * mbuf */
795			resid -= thislen;	/* and this much is left */
796			goto nextbuf;
797		}
798		/*
799		 * Otherwise, there is exactly enough space to put this
800		 * buffer's contents into the current mbuf.  Do the
801		 * combination of the above actions.
802		 */
803		bcopy((v_caddr_t) (sc->cbuffs[head] + offset),
804		      mtod(m, caddr_t) + thismboff, (unsigned) thislen);
805		m = m->m_next;
806		thismboff = 0;		/* new mbuf, start at the beginning */
807		resid -= thislen;	/* and we are this far through */
808
809		/*
810		 * Advance all the pointers.  We can get here from either of
811		 * the last two cases, but never the first.
812		 */
813nextbuf:
814		offset = 0;
815		sc->rbuffs[head]->ie_rbd_actual = 0;
816		sc->rbuffs[head]->ie_rbd_length |= IE_RBD_LAST;
817		sc->rbhead = head = (head + 1) % sc->nrxbufs;
818		sc->rbuffs[sc->rbtail]->ie_rbd_length &= ~IE_RBD_LAST;
819		sc->rbtail = (sc->rbtail + 1) % sc->nrxbufs;
820	}
821
822	/*
823	 * Unless something changed strangely while we were doing the copy,
824	 * we have now copied everything in from the shared memory. This
825	 * means that we are done.
826	 */
827	return (0);
828}
829
830/*
831 * Read frame NUM from unit UNIT (pre-cached as IE).
832 *
833 * This routine reads the RFD at NUM, and copies in the buffers from
834 * the list of RBD, then rotates the RBD and RFD lists so that the receiver
835 * doesn't start complaining.  Trailers are DROPPED---there's no point
836 * in wasting time on confusing code to deal with them.	 Hopefully,
837 * this machine will never ARP for trailers anyway.
838 */
839static void
840ie_readframe(struct ie_softc *sc, int	num/* frame number to read */)
841{
842	struct ifnet *ifp = sc->ifp;
843	struct ie_recv_frame_desc rfd;
844	struct mbuf *m = 0;
845#ifdef DEBUG
846	struct ether_header *eh;
847#endif
848
849	bcopy((v_caddr_t) (sc->rframes[num]), &rfd,
850	      sizeof(struct ie_recv_frame_desc));
851
852	/*
853	 * Immediately advance the RFD list, since we we have copied ours
854	 * now.
855	 */
856	sc->rframes[num]->ie_fd_status = 0;
857	sc->rframes[num]->ie_fd_last |= IE_FD_LAST;
858	sc->rframes[sc->rftail]->ie_fd_last &= ~IE_FD_LAST;
859	sc->rftail = (sc->rftail + 1) % sc->nframes;
860	sc->rfhead = (sc->rfhead + 1) % sc->nframes;
861
862	if (rfd.ie_fd_status & IE_FD_OK) {
863		if (ieget(sc, &m)) {
864			sc->ifp->if_ierrors++;	/* this counts as an
865							 * error */
866			return;
867		}
868	}
869#ifdef DEBUG
870	eh = mtod(m, struct ether_header *);
871	if (ie_debug & IED_READFRAME) {
872		if_printf(ifp, "frame from ether %6D type %x\n",
873		       eh->ether_shost, ":", (unsigned) eh->ether_type);
874	}
875	if (ntohs(eh->ether_type) > ETHERTYPE_TRAIL
876	    && ntohs(eh->ether_type) < (ETHERTYPE_TRAIL + ETHERTYPE_NTRAILER))
877		printf("received trailer!\n");
878#endif
879
880	if (!m)
881		return;
882
883	/*
884	 * Finally pass this packet up to higher layers.
885	 */
886	IE_UNLOCK(sc);
887	(*ifp->if_input)(ifp, m);
888	IE_LOCK(sc);
889}
890
891static void
892ie_drop_packet_buffer(struct ie_softc *sc)
893{
894	int	i;
895
896	do {
897		/*
898		 * This means we are somehow out of sync.  So, we reset the
899		 * adapter.
900		 */
901		if (!(sc->rbuffs[sc->rbhead]->ie_rbd_actual & IE_RBD_USED)) {
902#ifdef DEBUG
903			print_rbd(sc->rbuffs[sc->rbhead]);
904#endif
905			log(LOG_ERR, "%s: receive descriptors out of sync at %d\n",
906			    sc->ifp->if_xname, sc->rbhead);
907			iereset(sc);
908			return;
909		}
910		i = sc->rbuffs[sc->rbhead]->ie_rbd_actual & IE_RBD_LAST;
911
912		sc->rbuffs[sc->rbhead]->ie_rbd_length |= IE_RBD_LAST;
913		sc->rbuffs[sc->rbhead]->ie_rbd_actual = 0;
914		sc->rbhead = (sc->rbhead + 1) % sc->nrxbufs;
915		sc->rbuffs[sc->rbtail]->ie_rbd_length &= ~IE_RBD_LAST;
916		sc->rbtail = (sc->rbtail + 1) % sc->nrxbufs;
917	} while (!i);
918}
919
920
921/*
922 * Start transmission on an interface.
923 */
924static void
925iestart(struct ifnet *ifp)
926{
927	struct	 ie_softc *sc = ifp->if_softc;
928
929	IE_LOCK(sc);
930	iestart_locked(ifp);
931	IE_UNLOCK(sc);
932}
933
934static void
935iestart_locked(struct ifnet *ifp)
936{
937	struct	 ie_softc *sc = ifp->if_softc;
938	struct	 mbuf *m0, *m;
939	volatile unsigned char *buffer;
940	u_short	 len;
941
942	/*
943	 * This is not really volatile, in this routine, but it makes gcc
944	 * happy.
945	 */
946	volatile u_short *bptr = &sc->scb->ie_command_list;
947
948	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
949		return;
950	if (ifp->if_drv_flags & IFF_DRV_OACTIVE)
951		return;
952
953	do {
954		IF_DEQUEUE(&sc->ifp->if_snd, m);
955		if (!m)
956			break;
957
958		buffer = sc->xmit_cbuffs[sc->xmit_count];
959		len = 0;
960
961		for (m0 = m; m && len < IE_BUF_LEN; m = m->m_next) {
962			bcopy(mtod(m, caddr_t), buffer, m->m_len);
963			buffer += m->m_len;
964			len += m->m_len;
965		}
966
967		m_freem(m0);
968		len = max(len, ETHER_MIN_LEN);
969
970		/*
971		 * See if bpf is listening on this interface, let it see the
972		 * packet before we commit it to the wire.
973		 */
974		BPF_TAP(sc->ifp,
975			(void *)sc->xmit_cbuffs[sc->xmit_count], len);
976
977		sc->xmit_buffs[sc->xmit_count]->ie_xmit_flags =
978		    IE_XMIT_LAST|len;
979		sc->xmit_buffs[sc->xmit_count]->ie_xmit_next = 0xffff;
980		sc->xmit_buffs[sc->xmit_count]->ie_xmit_buf =
981		    MK_24(sc->iomem, sc->xmit_cbuffs[sc->xmit_count]);
982
983		sc->xmit_cmds[sc->xmit_count]->com.ie_cmd_cmd = IE_CMD_XMIT;
984		sc->xmit_cmds[sc->xmit_count]->ie_xmit_status = 0;
985		sc->xmit_cmds[sc->xmit_count]->ie_xmit_desc =
986		    MK_16(sc->iomem, sc->xmit_buffs[sc->xmit_count]);
987
988		*bptr = MK_16(sc->iomem, sc->xmit_cmds[sc->xmit_count]);
989		bptr = &sc->xmit_cmds[sc->xmit_count]->com.ie_cmd_link;
990		sc->xmit_count++;
991	} while (sc->xmit_count < sc->ntxbufs);
992
993	/*
994	 * If we queued up anything for transmission, send it.
995	 */
996	if (sc->xmit_count) {
997		sc->xmit_cmds[sc->xmit_count - 1]->com.ie_cmd_cmd |=
998		    IE_CMD_LAST | IE_CMD_INTR;
999
1000		/*
1001		 * By passing the command pointer as a null, we tell
1002		 * command_and_wait() to pretend that this isn't an action
1003		 * command.  I wish I understood what was happening here.
1004		 */
1005		command_and_wait(sc, IE_CU_START, 0, 0);
1006		ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1007	}
1008	return;
1009}
1010
1011/*
1012 * Check to see if there's an 82586 out there.
1013 */
1014int
1015check_ie_present(struct ie_softc *sc)
1016{
1017	volatile struct ie_sys_conf_ptr *scp;
1018	volatile struct ie_int_sys_conf_ptr *iscp;
1019	volatile struct ie_sys_ctl_block *scb;
1020	u_long	realbase;
1021
1022	realbase = (uintptr_t) sc->iomembot + sc->iosize  - (1 << 24);
1023
1024	scp = (volatile struct ie_sys_conf_ptr *) (uintptr_t)
1025	      (realbase + IE_SCP_ADDR);
1026	bzero((volatile char *) scp, sizeof *scp);
1027
1028	/*
1029	 * First we put the ISCP at the bottom of memory; this tests to make
1030	 * sure that our idea of the size of memory is the same as the
1031	 * controller's. This is NOT where the ISCP will be in normal
1032	 * operation.
1033	 */
1034	iscp = (volatile struct ie_int_sys_conf_ptr *) sc->iomembot;
1035	bzero((volatile char *)iscp, sizeof *iscp);
1036
1037	scb = (volatile struct ie_sys_ctl_block *) sc->iomembot;
1038	bzero((volatile char *)scb, sizeof *scb);
1039
1040	scp->ie_bus_use = sc->bus_use;	/* 8-bit or 16-bit */
1041	scp->ie_iscp_ptr = (caddr_t) (uintptr_t)
1042	    ((volatile char *) iscp - (volatile char *) (uintptr_t) realbase);
1043
1044	iscp->ie_busy = 1;
1045	iscp->ie_scb_offset = MK_16(realbase, scb) + 256;
1046
1047	(*sc->ie_reset_586) (sc);
1048	(*sc->ie_chan_attn) (sc);
1049
1050	DELAY(100);		/* wait a while... */
1051
1052	if (iscp->ie_busy) {
1053		return (0);
1054	}
1055	/*
1056	 * Now relocate the ISCP to its real home, and reset the controller
1057	 * again.
1058	 */
1059	iscp = (void *) Align((caddr_t) (uintptr_t)
1060			      (realbase + IE_SCP_ADDR -
1061			       sizeof(struct ie_int_sys_conf_ptr)));
1062	bzero((volatile char *) iscp, sizeof *iscp);	/* ignore cast-qual */
1063
1064	scp->ie_iscp_ptr = (caddr_t) (uintptr_t)
1065	    ((volatile char *) iscp - (volatile char *) (uintptr_t) realbase);
1066
1067	iscp->ie_busy = 1;
1068	iscp->ie_scb_offset = MK_16(realbase, scb);
1069
1070	(*sc->ie_reset_586) (sc);
1071	(*sc->ie_chan_attn) (sc);
1072
1073	DELAY(100);
1074
1075	if (iscp->ie_busy) {
1076		return (0);
1077	}
1078	sc->iomem = (caddr_t) (uintptr_t) realbase;
1079
1080	sc->iscp = iscp;
1081	sc->scb = scb;
1082
1083	/*
1084	 * Acknowledge any interrupts we may have caused...
1085	 */
1086	ie_ack(sc, IE_ST_WHENCE);
1087
1088	return (1);
1089}
1090
1091/*
1092 * Divine the memory size of ie board UNIT.
1093 * Better hope there's nothing important hiding just below the ie card...
1094 */
1095static void
1096find_ie_mem_size(struct ie_softc *sc)
1097{
1098	unsigned size;
1099
1100	sc->iosize = 0;
1101
1102	for (size = 65536; size >= 8192; size -= 8192) {
1103		if (check_ie_present(sc)) {
1104			return;
1105		}
1106	}
1107
1108	return;
1109}
1110
1111void
1112el_reset_586(struct ie_softc *sc)
1113{
1114	outb(PORT(sc) + IE507_CTRL, EL_CTRL_RESET);
1115	DELAY(100);
1116	outb(PORT(sc) + IE507_CTRL, EL_CTRL_NORMAL);
1117	DELAY(100);
1118}
1119
1120void
1121sl_reset_586(struct ie_softc *sc)
1122{
1123	outb(PORT(sc) + IEATT_RESET, 0);
1124}
1125
1126void
1127ee16_reset_586(struct ie_softc *sc)
1128{
1129	outb(PORT(sc) + IEE16_ECTRL, IEE16_RESET_586);
1130	DELAY(100);
1131	outb(PORT(sc) + IEE16_ECTRL, 0);
1132	DELAY(100);
1133}
1134
1135void
1136el_chan_attn(struct ie_softc *sc)
1137{
1138	outb(PORT(sc) + IE507_ATTN, 1);
1139}
1140
1141void
1142sl_chan_attn(struct ie_softc *sc)
1143{
1144	outb(PORT(sc) + IEATT_ATTN, 0);
1145}
1146
1147void
1148ee16_chan_attn(struct ie_softc *sc)
1149{
1150	outb(PORT(sc) + IEE16_ATTN, 0);
1151}
1152
1153u_short
1154ee16_read_eeprom(struct ie_softc *sc, int location)
1155{
1156	int	ectrl, edata;
1157
1158	ectrl = inb(sc->port + IEE16_ECTRL);
1159	ectrl &= IEE16_ECTRL_MASK;
1160	ectrl |= IEE16_ECTRL_EECS;
1161	outb(sc->port + IEE16_ECTRL, ectrl);
1162
1163	ee16_eeprom_outbits(sc, IEE16_EEPROM_READ, IEE16_EEPROM_OPSIZE1);
1164	ee16_eeprom_outbits(sc, location, IEE16_EEPROM_ADDR_SIZE);
1165	edata = ee16_eeprom_inbits(sc);
1166	ectrl = inb(sc->port + IEE16_ECTRL);
1167	ectrl &= ~(IEE16_RESET_ASIC | IEE16_ECTRL_EEDI | IEE16_ECTRL_EECS);
1168	outb(sc->port + IEE16_ECTRL, ectrl);
1169	ee16_eeprom_clock(sc, 1);
1170	ee16_eeprom_clock(sc, 0);
1171	return edata;
1172}
1173
1174static void
1175ee16_eeprom_outbits(struct ie_softc *sc, int edata, int count)
1176{
1177	int	ectrl, i;
1178
1179	ectrl = inb(sc->port + IEE16_ECTRL);
1180	ectrl &= ~IEE16_RESET_ASIC;
1181	for (i = count - 1; i >= 0; i--) {
1182		ectrl &= ~IEE16_ECTRL_EEDI;
1183		if (edata & (1 << i)) {
1184			ectrl |= IEE16_ECTRL_EEDI;
1185		}
1186		outb(sc->port + IEE16_ECTRL, ectrl);
1187		DELAY(1);	/* eeprom data must be setup for 0.4 uSec */
1188		ee16_eeprom_clock(sc, 1);
1189		ee16_eeprom_clock(sc, 0);
1190	}
1191	ectrl &= ~IEE16_ECTRL_EEDI;
1192	outb(sc->port + IEE16_ECTRL, ectrl);
1193	DELAY(1);		/* eeprom data must be held for 0.4 uSec */
1194}
1195
1196static int
1197ee16_eeprom_inbits(struct ie_softc *sc)
1198{
1199	int	ectrl, edata, i;
1200
1201	ectrl = inb(sc->port + IEE16_ECTRL);
1202	ectrl &= ~IEE16_RESET_ASIC;
1203	for (edata = 0, i = 0; i < 16; i++) {
1204		edata = edata << 1;
1205		ee16_eeprom_clock(sc, 1);
1206		ectrl = inb(sc->port + IEE16_ECTRL);
1207		if (ectrl & IEE16_ECTRL_EEDO) {
1208			edata |= 1;
1209		}
1210		ee16_eeprom_clock(sc, 0);
1211	}
1212	return (edata);
1213}
1214
1215static void
1216ee16_eeprom_clock(struct ie_softc *sc, int state)
1217{
1218	int	ectrl;
1219
1220	ectrl = inb(sc->port + IEE16_ECTRL);
1221	ectrl &= ~(IEE16_RESET_ASIC | IEE16_ECTRL_EESK);
1222	if (state) {
1223		ectrl |= IEE16_ECTRL_EESK;
1224	}
1225	outb(sc->port + IEE16_ECTRL, ectrl);
1226	DELAY(9);		/* EESK must be stable for 8.38 uSec */
1227}
1228
1229static __inline void
1230ee16_interrupt_enable(struct ie_softc *sc)
1231{
1232	DELAY(100);
1233	outb(sc->port + IEE16_IRQ, sc->irq_encoded | IEE16_IRQ_ENABLE);
1234	DELAY(100);
1235}
1236
1237void
1238sl_read_ether(struct ie_softc *sc, unsigned char *addr)
1239{
1240	int	i;
1241
1242	for (i = 0; i < 6; i++)
1243		addr[i] = inb(PORT(sc) + i);
1244}
1245
1246static void
1247iereset(struct ie_softc *sc)
1248{
1249	struct ifnet *ifp = sc->ifp;
1250
1251	if_printf(ifp, "reset\n");
1252	ie_stop(sc);
1253
1254	/*
1255	 * Stop i82586 dead in its tracks.
1256	 */
1257	if (command_and_wait(sc, IE_RU_ABORT | IE_CU_ABORT, 0, 0))
1258		if_printf(ifp, "abort commands timed out\n");
1259
1260	if (command_and_wait(sc, IE_RU_DISABLE | IE_CU_STOP, 0, 0))
1261		if_printf(ifp, "disable commands timed out\n");
1262
1263#ifdef notdef
1264	if (!check_ie_present(sc))
1265		panic("ie disappeared!");
1266#endif
1267
1268	if (ifp->if_flags & IFF_UP)
1269		ieinit_locked(sc);
1270
1271	return;
1272}
1273
1274/*
1275 * Send a command to the controller and wait for it to either
1276 * complete or be accepted, depending on the command.  If the
1277 * command pointer is null, then pretend that the command is
1278 * not an action command.  If the command pointer is not null,
1279 * and the command is an action command, wait for
1280 * ((volatile struct ie_cmd_common *)pcmd)->ie_cmd_status & MASK
1281 * to become true.
1282 */
1283static int
1284command_and_wait(struct ie_softc *sc, int cmd, volatile void *pcmd, int mask)
1285{
1286	volatile struct ie_cmd_common *cc = pcmd;
1287	int i;
1288
1289	sc->scb->ie_command = (u_short) cmd;
1290
1291	if (IE_ACTION_COMMAND(cmd) && pcmd) {
1292		(*sc->ie_chan_attn) (sc);
1293
1294		/*
1295		 * Now spin-lock waiting for status.  This is not a very
1296		 * nice thing to do, but I haven't figured out how, or
1297		 * indeed if, we can put the process waiting for action to
1298		 * sleep.  (We may be getting called through some other
1299		 * timeout running in the kernel.)
1300		 *
1301		 * According to the packet driver, the minimum timeout
1302		 * should be .369 seconds, which we round up to .37.
1303		 */
1304		for (i = 0; i < 370; i++) {
1305			if (cc->ie_cmd_status & mask)
1306				return (0);
1307			DELAY(1000);
1308		}
1309
1310		return (1);
1311	} else {
1312
1313		/*
1314		 * Otherwise, just wait for the command to be accepted.
1315		 */
1316		(*sc->ie_chan_attn) (sc);
1317
1318		while (sc->scb->ie_command);	/* spin lock */
1319
1320		return (0);
1321	}
1322}
1323
1324/*
1325 * Run the time-domain reflectometer...
1326 */
1327static void
1328run_tdr(struct ie_softc *sc, volatile struct ie_tdr_cmd *cmd)
1329{
1330	int	result;
1331
1332	cmd->com.ie_cmd_status = 0;
1333	cmd->com.ie_cmd_cmd = IE_CMD_TDR | IE_CMD_LAST;
1334	cmd->com.ie_cmd_link = 0xffff;
1335	cmd->ie_tdr_time = 0;
1336
1337	sc->scb->ie_command_list = MK_16(MEM(sc), cmd);
1338	cmd->ie_tdr_time = 0;
1339
1340	if (command_and_wait(sc, IE_CU_START, cmd, IE_STAT_COMPL))
1341		result = 0x2000;
1342	else
1343		result = cmd->ie_tdr_time;
1344
1345	ie_ack(sc, IE_ST_WHENCE);
1346
1347	if (result & IE_TDR_SUCCESS)
1348		return;
1349
1350	if (result & IE_TDR_XCVR) {
1351		if_printf(sc->ifp, "transceiver problem\n");
1352	} else if (result & IE_TDR_OPEN) {
1353		if_printf(sc->ifp, "TDR detected an open %d clocks away\n",
1354		       result & IE_TDR_TIME);
1355	} else if (result & IE_TDR_SHORT) {
1356		if_printf(sc->ifp, "TDR detected a short %d clocks away\n",
1357		       result & IE_TDR_TIME);
1358	} else {
1359		if_printf(sc->ifp, "TDR returned unknown status %x\n", result);
1360	}
1361}
1362
1363static void
1364start_receiver(struct ie_softc *sc)
1365{
1366
1367	sc->scb->ie_recv_list = MK_16(MEM(sc), sc->rframes[0]);
1368	command_and_wait(sc, IE_RU_START, 0, 0);
1369
1370	ie_ack(sc, IE_ST_WHENCE);
1371}
1372
1373/*
1374 * Here is a helper routine for iernr() and ieinit().  This sets up
1375 * the RFA.
1376 */
1377static v_caddr_t
1378setup_rfa(struct ie_softc *sc, v_caddr_t ptr)
1379{
1380	volatile struct ie_recv_frame_desc *rfd = (volatile void *)ptr;
1381	volatile struct ie_recv_buf_desc *rbd;
1382	int	i;
1383
1384	/* First lay them out */
1385	for (i = 0; i < sc->nframes; i++) {
1386		sc->rframes[i] = rfd;
1387		bzero((volatile char *) rfd, sizeof *rfd);	/* ignore cast-qual */
1388		rfd++;
1389	}
1390
1391	ptr = Alignvol(rfd);		/* ignore cast-qual */
1392
1393	/* Now link them together */
1394	for (i = 0; i < sc->nframes; i++) {
1395		sc->rframes[i]->ie_fd_next =
1396		    MK_16(MEM(sc), sc->rframes[(i + 1) % sc->nframes]);
1397	}
1398
1399	/* Finally, set the EOL bit on the last one. */
1400	sc->rframes[sc->nframes - 1]->ie_fd_last |= IE_FD_LAST;
1401
1402	/*
1403	 * Now lay out some buffers for the incoming frames.  Note that we
1404	 * set aside a bit of slop in each buffer, to make sure that we have
1405	 * enough space to hold a single frame in every buffer.
1406	 */
1407	rbd = (volatile void *) ptr;
1408
1409	for (i = 0; i < sc->nrxbufs; i++) {
1410		sc->rbuffs[i] = rbd;
1411		bzero((volatile char *)rbd, sizeof *rbd);
1412		ptr = Alignvol(ptr + sizeof *rbd);
1413		rbd->ie_rbd_length = IE_RBUF_SIZE;
1414		rbd->ie_rbd_buffer = MK_24(MEM(sc), ptr);
1415		sc->cbuffs[i] = (volatile void *) ptr;
1416		ptr += IE_RBUF_SIZE;
1417		rbd = (volatile void *) ptr;
1418	}
1419
1420	/* Now link them together */
1421	for (i = 0; i < sc->nrxbufs; i++) {
1422		sc->rbuffs[i]->ie_rbd_next =
1423		    MK_16(MEM(sc), sc->rbuffs[(i + 1) % sc->nrxbufs]);
1424	}
1425
1426	/* Tag EOF on the last one */
1427	sc->rbuffs[sc->nrxbufs - 1]->ie_rbd_length |= IE_RBD_LAST;
1428
1429	/*
1430	 * We use the head and tail pointers on receive to keep track of the
1431	 * order in which RFDs and RBDs are used.
1432	 */
1433	sc->rfhead = 0;
1434	sc->rftail = sc->nframes - 1;
1435	sc->rbhead = 0;
1436	sc->rbtail = sc->nrxbufs - 1;
1437
1438	sc->scb->ie_recv_list = MK_16(MEM(sc), sc->rframes[0]);
1439	sc->rframes[0]->ie_fd_buf_desc = MK_16(MEM(sc), sc->rbuffs[0]);
1440
1441	ptr = Alignvol(ptr);
1442	return (ptr);
1443}
1444
1445/*
1446 * Run the multicast setup command.
1447 */
1448static int
1449mc_setup(struct ie_softc *sc)
1450{
1451	volatile struct ie_mcast_cmd *cmd = (volatile void *)sc->xmit_cbuffs[0];
1452
1453	cmd->com.ie_cmd_status = 0;
1454	cmd->com.ie_cmd_cmd = IE_CMD_MCAST | IE_CMD_LAST;
1455	cmd->com.ie_cmd_link = 0xffff;
1456
1457	/* ignore cast-qual */
1458	bcopy((v_caddr_t) sc->mcast_addrs, (v_caddr_t) cmd->ie_mcast_addrs,
1459	      sc->mcast_count * sizeof *sc->mcast_addrs);
1460
1461	cmd->ie_mcast_bytes = sc->mcast_count * 6;	/* grrr... */
1462
1463	sc->scb->ie_command_list = MK_16(MEM(sc), cmd);
1464	if (command_and_wait(sc, IE_CU_START, cmd, IE_STAT_COMPL)
1465	    || !(cmd->com.ie_cmd_status & IE_STAT_OK)) {
1466		if_printf(sc->ifp, "multicast address setup command failed\n");
1467		return (0);
1468	}
1469	return (1);
1470}
1471
1472/*
1473 * This routine takes the environment generated by check_ie_present()
1474 * and adds to it all the other structures we need to operate the adapter.
1475 * This includes executing the CONFIGURE, IA-SETUP, and MC-SETUP commands,
1476 * starting the receiver unit, and clearing interrupts.
1477 */
1478static void
1479ieinit(xsc)
1480	void *xsc;
1481{
1482	struct ie_softc *sc = xsc;
1483
1484	IE_LOCK(sc);
1485	ieinit_locked(sc);
1486	IE_UNLOCK(sc);
1487}
1488
1489static void
1490ieinit_locked(struct ie_softc *sc)
1491{
1492	struct ifnet *ifp = sc->ifp;
1493	volatile struct ie_sys_ctl_block *scb = sc->scb;
1494	caddr_t ptr;
1495	int	i;
1496
1497	ptr = Alignvol((volatile char *) scb + sizeof *scb);
1498
1499	/*
1500	 * Send the configure command first.
1501	 */
1502	{
1503		volatile struct ie_config_cmd *cmd = (volatile void *) ptr;
1504
1505		ie_setup_config(cmd, sc->promisc,
1506				sc->hard_type == IE_STARLAN10);
1507		cmd->com.ie_cmd_status = 0;
1508		cmd->com.ie_cmd_cmd = IE_CMD_CONFIG | IE_CMD_LAST;
1509		cmd->com.ie_cmd_link = 0xffff;
1510
1511		scb->ie_command_list = MK_16(MEM(sc), cmd);
1512
1513		if (command_and_wait(sc, IE_CU_START, cmd, IE_STAT_COMPL)
1514		 || !(cmd->com.ie_cmd_status & IE_STAT_OK)) {
1515			if_printf(ifp, "configure command failed\n");
1516			return;
1517		}
1518	}
1519	/*
1520	 * Now send the Individual Address Setup command.
1521	 */
1522	{
1523		volatile struct ie_iasetup_cmd *cmd = (volatile void *) ptr;
1524
1525		cmd->com.ie_cmd_status = 0;
1526		cmd->com.ie_cmd_cmd = IE_CMD_IASETUP | IE_CMD_LAST;
1527		cmd->com.ie_cmd_link = 0xffff;
1528
1529		bcopy((volatile char *)IF_LLADDR(ifp),
1530		      (volatile char *)&cmd->ie_address, sizeof cmd->ie_address);
1531		scb->ie_command_list = MK_16(MEM(sc), cmd);
1532		if (command_and_wait(sc, IE_CU_START, cmd, IE_STAT_COMPL)
1533		    || !(cmd->com.ie_cmd_status & IE_STAT_OK)) {
1534			if_printf(ifp, "individual address "
1535			       "setup command failed\n");
1536			return;
1537		}
1538	}
1539
1540	/*
1541	 * Now run the time-domain reflectometer.
1542	 */
1543	run_tdr(sc, (volatile void *) ptr);
1544
1545	/*
1546	 * Acknowledge any interrupts we have generated thus far.
1547	 */
1548	ie_ack(sc, IE_ST_WHENCE);
1549
1550	/*
1551	 * Set up the RFA.
1552	 */
1553	ptr = setup_rfa(sc, ptr);
1554
1555	/*
1556	 * Finally, the transmit command and buffer are the last little bit
1557	 * of work.
1558	 */
1559
1560	/* transmit command buffers */
1561	for (i = 0; i < sc->ntxbufs; i++) {
1562		sc->xmit_cmds[i] = (volatile void *) ptr;
1563		ptr += sizeof *sc->xmit_cmds[i];
1564		ptr = Alignvol(ptr);
1565		sc->xmit_buffs[i] = (volatile void *)ptr;
1566		ptr += sizeof *sc->xmit_buffs[i];
1567		ptr = Alignvol(ptr);
1568	}
1569
1570	/* transmit buffers */
1571	for (i = 0; i < sc->ntxbufs - 1; i++) {
1572		sc->xmit_cbuffs[i] = (volatile void *)ptr;
1573		ptr += IE_BUF_LEN;
1574		ptr = Alignvol(ptr);
1575	}
1576	sc->xmit_cbuffs[sc->ntxbufs - 1] = (volatile void *) ptr;
1577
1578	for (i = 1; i < sc->ntxbufs; i++) {
1579		bzero((v_caddr_t) sc->xmit_cmds[i], sizeof *sc->xmit_cmds[i]);
1580		bzero((v_caddr_t) sc->xmit_buffs[i], sizeof *sc->xmit_buffs[i]);
1581	}
1582
1583	/*
1584	 * This must be coordinated with iestart() and ietint().
1585	 */
1586	sc->xmit_cmds[0]->ie_xmit_status = IE_STAT_COMPL;
1587
1588	/* take the ee16 out of loopback */
1589	if (sc->hard_type == IE_EE16) {
1590		u_int8_t bart_config;
1591
1592		bart_config = inb(PORT(sc) + IEE16_CONFIG);
1593		bart_config &= ~IEE16_BART_LOOPBACK;
1594		/* inb doesn't get bit! */
1595		bart_config |= IEE16_BART_MCS16_TEST;
1596		outb(PORT(sc) + IEE16_CONFIG, bart_config);
1597		ee16_interrupt_enable(sc);
1598		ee16_chan_attn(sc);
1599	}
1600	ifp->if_drv_flags |= IFF_DRV_RUNNING;	/* tell higher levels
1601							 * we're here */
1602	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1603
1604	start_receiver(sc);
1605
1606	return;
1607}
1608
1609static void
1610ie_stop(struct ie_softc *sc)
1611{
1612	struct ifnet *ifp = sc->ifp;
1613
1614	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1615	command_and_wait(sc, IE_RU_DISABLE, 0, 0);
1616}
1617
1618static int
1619ieioctl(struct ifnet *ifp, u_long command, caddr_t data)
1620{
1621	int	error = 0;
1622	struct	 ie_softc *sc = ifp->if_softc;
1623
1624	switch (command) {
1625	case SIOCSIFFLAGS:
1626		/*
1627		 * Note that this device doesn't have an "all multicast"
1628		 * mode, so we must turn on promiscuous mode and do the
1629		 * filtering manually.
1630		 */
1631		IE_LOCK(sc);
1632		if ((ifp->if_flags & IFF_UP) == 0 &&
1633		    (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1634			ie_stop(sc);
1635		} else if ((ifp->if_flags & IFF_UP) &&
1636			   (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1637			sc->promisc =
1638			    ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI);
1639			ieinit_locked(sc);
1640		} else if (sc->promisc ^
1641			   (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI))) {
1642			sc->promisc =
1643			    ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI);
1644			ieinit_locked(sc);
1645		}
1646		IE_UNLOCK(sc);
1647		break;
1648
1649	case SIOCADDMULTI:
1650	case SIOCDELMULTI:
1651		/*
1652		 * Update multicast listeners
1653		 */
1654		/* reset multicast filtering */
1655		IE_LOCK(sc);
1656		ie_mc_reset(sc);
1657		IE_UNLOCK(sc);
1658		error = 0;
1659		break;
1660
1661	default:
1662		error = ether_ioctl(ifp, command, data);
1663		break;
1664	}
1665
1666	return (error);
1667}
1668
1669static void
1670ie_mc_reset(struct ie_softc *sc)
1671{
1672	struct ifmultiaddr *ifma;
1673
1674	/*
1675	 * Step through the list of addresses.
1676	 */
1677	sc->mcast_count = 0;
1678	if_maddr_rlock(sc->ifp);
1679	TAILQ_FOREACH(ifma, &sc->ifp->if_multiaddrs, ifma_link) {
1680		if (ifma->ifma_addr->sa_family != AF_LINK)
1681			continue;
1682
1683		/* XXX - this is broken... */
1684		if (sc->mcast_count >= MAXMCAST) {
1685			sc->ifp->if_flags |= IFF_ALLMULTI;
1686			if (sc->ifp->if_flags & IFF_UP)
1687				ieinit_locked(sc);
1688			goto setflag;
1689		}
1690		bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1691		      &(sc->mcast_addrs[sc->mcast_count]), 6);
1692		sc->mcast_count++;
1693	}
1694	if_maddr_runlock(sc->ifp);
1695
1696setflag:
1697	sc->want_mcsetup = 1;
1698}
1699
1700
1701#ifdef DEBUG
1702static void
1703print_rbd(volatile struct ie_recv_buf_desc * rbd)
1704{
1705	printf("RBD at %p:\n"
1706	       "actual %04x, next %04x, buffer %p\n"
1707	       "length %04x, mbz %04x\n",
1708	       (volatile void *) rbd,
1709	       rbd->ie_rbd_actual, rbd->ie_rbd_next,
1710	       (void *) rbd->ie_rbd_buffer,
1711	       rbd->ie_rbd_length, rbd->mbz);
1712}
1713
1714#endif				/* DEBUG */
1715
1716int
1717ie_alloc_resources (device_t dev)
1718{
1719	struct ie_softc *       sc;
1720	int                     error;
1721
1722	error = 0;
1723	sc = device_get_softc(dev);
1724
1725	sc->io_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &sc->io_rid,
1726					    RF_ACTIVE);
1727	if (!sc->io_res) {
1728		device_printf(dev, "No I/O space?!\n");
1729		error = ENOMEM;
1730		goto bad;
1731	}
1732	sc->io_bt = rman_get_bustag(sc->io_res);
1733	sc->io_bh = rman_get_bushandle(sc->io_res);
1734
1735	sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid,
1736					     RF_ACTIVE);
1737	if (!sc->mem_res) {
1738                device_printf(dev, "No Memory!\n");
1739		error = ENOMEM;
1740		goto bad;
1741	}
1742	sc->mem_bt = rman_get_bustag(sc->mem_res);
1743	sc->mem_bh = rman_get_bushandle(sc->mem_res);
1744
1745	sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid,
1746					     RF_ACTIVE);
1747	if (!sc->irq_res) {
1748		device_printf(dev, "No IRQ!\n");
1749		error = ENOMEM;
1750		goto bad;
1751	}
1752
1753	sc->port = rman_get_start(sc->io_res);  /* XXX hack */
1754	sc->iomembot = rman_get_virtual(sc->mem_res);
1755	sc->iosize = rman_get_size(sc->mem_res);
1756
1757	return (0);
1758bad:
1759	return (error);
1760}
1761
1762void
1763ie_release_resources (device_t dev)
1764{
1765	struct ie_softc *       sc;
1766
1767	sc = device_get_softc(dev);
1768
1769	if (sc->irq_ih)
1770		bus_teardown_intr(dev, sc->irq_res, sc->irq_ih);
1771	if (sc->rframes)
1772		free(sc->rframes, M_DEVBUF);
1773	if (sc->io_res)
1774		bus_release_resource(dev, SYS_RES_IOPORT,
1775				     sc->io_rid, sc->io_res);
1776	if (sc->irq_res)
1777		bus_release_resource(dev, SYS_RES_IRQ,
1778				     sc->irq_rid, sc->irq_res);
1779	if (sc->mem_res)
1780		bus_release_resource(dev, SYS_RES_MEMORY,
1781				     sc->mem_rid, sc->mem_res);
1782	if (sc->ifp)
1783		if_free(sc->ifp);
1784
1785	return;
1786}
1787
1788int
1789ie_detach (device_t dev)
1790{
1791	struct ie_softc *	sc;
1792	struct ifnet *		ifp;
1793
1794	sc = device_get_softc(dev);
1795	ifp = sc->ifp;
1796
1797	IE_LOCK(sc);
1798	if (sc->hard_type == IE_EE16)
1799		ee16_shutdown(sc);
1800
1801	ie_stop(sc);
1802	IE_UNLOCK(sc);
1803	ether_ifdetach(ifp);
1804	ie_release_resources(dev);
1805	mtx_destroy(&sc->lock);
1806
1807	return (0);
1808}
1809