1/*- 2 * Copyright (c) 1999 The NetBSD Foundation, Inc. |
3 * Copyright (c) 2001-2003 Thomas Moestl <tmm@FreeBSD.org>. |
4 * All rights reserved. 5 * 6 * This code is derived from software contributed to The NetBSD Foundation 7 * by Paul Kranenburg. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: --- 19 unchanged lines hidden (view full) --- 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 * 37 * from: NetBSD: hme.c,v 1.20 2000/12/14 06:27:25 thorpej Exp 38 * |
39 * $FreeBSD: head/sys/dev/hme/if_hme.c 108834 2003-01-06 22:12:57Z tmm $ |
40 */ 41 42/* 43 * HME Ethernet module driver. 44 * 45 * The HME is e.g. part of the PCIO PCI multi function device. 46 * It supports TX gathering and TX and RX checksum offloading. 47 * RX buffers must be aligned at a programmable offset modulo 16. We choose 2 --- 52 unchanged lines hidden (view full) --- 100 u_int32_t, u_int32_t); 101static void hme_mifinit(struct hme_softc *); 102static void hme_reset(struct hme_softc *); 103static void hme_setladrf(struct hme_softc *, int); 104 105static int hme_mediachange(struct ifnet *); 106static void hme_mediastatus(struct ifnet *, struct ifmediareq *); 107 |
108static int hme_load_txmbuf(struct hme_softc *, struct mbuf *); |
109static void hme_read(struct hme_softc *, int, int); 110static void hme_eint(struct hme_softc *, u_int); 111static void hme_rint(struct hme_softc *); 112static void hme_tint(struct hme_softc *); 113 114static void hme_cdma_callback(void *, bus_dma_segment_t *, int, int); |
115static void hme_rxdma_callback(void *, bus_dma_segment_t *, int, 116 bus_size_t, int); 117static void hme_txdma_callback(void *, bus_dma_segment_t *, int, 118 bus_size_t, int); |
119 120devclass_t hme_devclass; 121 122static int hme_nerr; 123 124DRIVER_MODULE(miibus, hme, miibus_driver, miibus_devclass, 0, 0); 125MODULE_DEPEND(hem, miibus, 1, 1, 1); 126 --- 122 unchanged lines hidden (view full) --- 249 if (error != 0) 250 goto fail_rxdesc; 251 } 252 error = bus_dmamap_create(sc->sc_rdmatag, 0, 253 &sc->sc_rb.rb_spare_dmamap); 254 if (error != 0) 255 goto fail_rxdesc; 256 /* Same for the TX descs. */ |
257 for (tdesc = 0; tdesc < HME_NTXQ; tdesc++) { |
258 sc->sc_rb.rb_txdesc[tdesc].htx_m = NULL; |
259 error = bus_dmamap_create(sc->sc_tdmatag, 0, 260 &sc->sc_rb.rb_txdesc[tdesc].htx_dmamap); 261 if (error != 0) 262 goto fail_txdesc; 263 } 264 265 device_printf(sc->sc_dev, "Ethernet address:"); 266 for (i = 0; i < 6; i++) --- 6 unchanged lines hidden (view full) --- 273 ifp->if_name = "hme"; 274 ifp->if_mtu = ETHERMTU; 275 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX |IFF_MULTICAST; 276 ifp->if_start = hme_start; 277 ifp->if_ioctl = hme_ioctl; 278 ifp->if_init = hme_init; 279 ifp->if_output = ether_output; 280 ifp->if_watchdog = hme_watchdog; |
281 ifp->if_snd.ifq_maxlen = HME_NTXQ; |
282 283 hme_mifinit(sc); 284 285 if ((error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, hme_mediachange, 286 hme_mediastatus)) != 0) { 287 device_printf(sc->sc_dev, "phy probe failed: %d\n", error); 288 goto fail_rxdesc; 289 } --- 107 unchanged lines hidden (view full) --- 397 return; 398 DELAY(20); 399 } 400 401 device_printf(sc->sc_dev, "hme_stop: reset failed\n"); 402} 403 404static void |
405hme_rxdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, 406 bus_size_t totsize, int error) |
407{ 408 bus_addr_t *a = xsc; 409 |
410 KASSERT(nsegs == 1, ("hme_rxdma_callback: multiple segments!")); 411 if (error != 0) |
412 return; 413 *a = segs[0].ds_addr; 414} 415 416/* 417 * Discard the contents of an mbuf in the RX ring, freeing the buffer in the 418 * ring for subsequent use. 419 */ 420static void 421hme_discard_rxbuf(struct hme_softc *sc, int ix, int sync) 422{ 423 424 /* 425 * Dropped a packet, reinitialize the descriptor and turn the 426 * ownership back to the hardware. 427 */ 428 HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, ix, HME_XD_OWN | |
429 HME_XD_ENCODE_RSIZE(HME_DESC_RXLEN(sc, &sc->sc_rb.rb_rxdesc[ix]))); |
430 if (sync) { 431 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, 432 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 433 } 434} 435 436static int 437hme_add_rxbuf(struct hme_softc *sc, unsigned int ri, int keepold) 438{ 439 struct hme_rxdesc *rd; 440 struct mbuf *m; 441 bus_addr_t ba; |
442 bus_dmamap_t map; |
443 uintptr_t b; |
444 int a, unmap; |
445 446 rd = &sc->sc_rb.rb_rxdesc[ri]; 447 unmap = rd->hrx_m != NULL; 448 if (unmap && keepold) { 449 /* 450 * Reinitialize the descriptor flags, as they may have been 451 * altered by the hardware. 452 */ 453 hme_discard_rxbuf(sc, ri, 0); 454 return (0); 455 } |
456 if ((m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR)) == NULL) |
457 return (ENOBUFS); |
458 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 459 b = mtod(m, uintptr_t); |
460 /* 461 * Required alignment boundary. At least 16 is needed, but since 462 * the mapping must be done in a way that a burst can start on a 463 * natural boundary we might need to extend this. 464 */ |
465 a = max(HME_MINRXALIGN, sc->sc_burst); |
466 /* |
467 * Make sure the buffer suitably aligned. The 2 byte offset is removed 468 * when the mbuf is handed up. XXX: this ensures at least 16 byte 469 * alignment of the header adjacent to the ethernet header, which 470 * should be sufficient in all cases. Nevertheless, this second-guesses 471 * ALIGN(). |
472 */ |
473 m_adj(m, roundup2(b, a) - b); 474 if (bus_dmamap_load_mbuf(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap, 475 m, hme_rxdma_callback, &ba, 0) != 0) { 476 m_freem(m); 477 return (ENOBUFS); 478 } |
479 if (unmap) { 480 bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap, 481 BUS_DMASYNC_POSTREAD); 482 bus_dmamap_unload(sc->sc_rdmatag, rd->hrx_dmamap); 483 } 484 map = rd->hrx_dmamap; 485 rd->hrx_dmamap = sc->sc_rb.rb_spare_dmamap; 486 sc->sc_rb.rb_spare_dmamap = map; |
487 bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap, BUS_DMASYNC_PREREAD); 488 HME_XD_SETADDR(sc->sc_pci, sc->sc_rb.rb_rxd, ri, ba); |
489 rd->hrx_m = m; |
490 HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, ri, HME_XD_OWN | 491 HME_XD_ENCODE_RSIZE(HME_DESC_RXLEN(sc, rd))); |
492 return (0); |
493} 494 495static int 496hme_meminit(struct hme_softc *sc) 497{ 498 struct hme_ring *hr = &sc->sc_rb; 499 struct hme_txdesc *td; 500 bus_addr_t dma; --- 25 unchanged lines hidden (view full) --- 526 /* Again move forward to the next 2048 byte boundary.*/ 527 dma = (bus_addr_t)roundup((u_long)dma, 2048); 528 p = (caddr_t)roundup((u_long)p, 2048); 529 530 /* 531 * Initialize transmit buffer descriptors 532 */ 533 for (i = 0; i < HME_NTXDESC; i++) { |
534 HME_XD_SETADDR(sc->sc_pci, hr->rb_txd, i, 0); 535 HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, i, 0); |
536 } 537 538 STAILQ_INIT(&sc->sc_rb.rb_txfreeq); 539 STAILQ_INIT(&sc->sc_rb.rb_txbusyq); 540 for (i = 0; i < HME_NTXQ; i++) { 541 td = &sc->sc_rb.rb_txdesc[i]; |
542 if (td->htx_m != NULL) { 543 m_freem(td->htx_m); |
544 bus_dmamap_unload(sc->sc_tdmatag, td->htx_dmamap); |
545 td->htx_m = NULL; 546 } |
547 STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txfreeq, td, htx_q); |
548 } 549 550 /* 551 * Initialize receive buffer descriptors 552 */ 553 for (i = 0; i < HME_NRXDESC; i++) { 554 error = hme_add_rxbuf(sc, i, 1); 555 if (error != 0) --- 62 unchanged lines hidden (view full) --- 618 */ 619 620 /* step 1 & 2. Reset the Ethernet Channel */ 621 hme_stop(sc); 622 623 /* Re-initialize the MIF */ 624 hme_mifinit(sc); 625 |
626#if 0 627 /* Mask all MIF interrupts, just in case */ 628 HME_MIF_WRITE_4(sc, HME_MIFI_IMASK, 0xffff); 629#endif 630 631 /* step 3. Setup data structures in host memory */ 632 if (hme_meminit(sc) != 0) { 633 device_printf(sc->sc_dev, "out of buffers; init aborted."); --- 85 unchanged lines hidden (view full) --- 719 case 256: 720 v |= HME_ERX_CFG_RINGSIZE256; 721 break; 722 default: 723 printf("hme: invalid Receive Descriptor ring size\n"); 724 break; 725 } 726 |
727 /* Enable DMA, fix RX first byte offset. */ |
728 v &= ~HME_ERX_CFG_FBO_MASK; |
729 v |= HME_ERX_CFG_DMAENABLE | (HME_RXOFFS << HME_ERX_CFG_FBO_SHIFT); |
730 CTR1(KTR_HME, "hme_init: programming ERX_CFG to %x", (u_int)v); 731 HME_ERX_WRITE_4(sc, HME_ERXI_CFG, v); 732 733 /* step 11. XIF Configuration */ 734 v = HME_MAC_READ_4(sc, HME_MACI_XIF); 735 v |= HME_MAC_XIF_OE; 736 /* If an external transceiver is connected, enable its MII drivers */ 737 if ((HME_MIF_READ_4(sc, HME_MIFI_CFG) & HME_MIF_CFG_MDI1) != 0) --- 11 unchanged lines hidden (view full) --- 749 /* step 13. TX_MAC Configuration Register */ 750 v = HME_MAC_READ_4(sc, HME_MACI_TXCFG); 751 v |= (HME_MAC_TXCFG_ENABLE | HME_MAC_TXCFG_DGIVEUP); 752 CTR1(KTR_HME, "hme_init: programming TX_MAC to %x", (u_int)v); 753 HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, v); 754 755 /* step 14. Issue Transmit Pending command */ 756 |
757#ifdef HMEDEBUG 758 /* Debug: double-check. */ 759 CTR4(KTR_HME, "hme_init: tx ring %#x, rsz %#x, rx ring %#x, " 760 "rxsize %#x", HME_ETX_READ_4(sc, HME_ETXI_RING), 761 HME_ETX_READ_4(sc, HME_ETXI_RSIZE), 762 HME_ERX_READ_4(sc, HME_ERXI_RING), 763 HME_MAC_READ_4(sc, HME_MACI_RXSIZE)); 764 CTR3(KTR_HME, "hme_init: intr mask %#x, erx cfg %#x, etx cfg %#x", --- 10 unchanged lines hidden (view full) --- 775 776 ifp->if_flags |= IFF_RUNNING; 777 ifp->if_flags &= ~IFF_OACTIVE; 778 ifp->if_timer = 0; 779 hme_start(ifp); 780} 781 782struct hme_txdma_arg { |
783 struct hme_softc *hta_sc; 784 struct hme_txdesc *hta_htx; 785 int hta_ndescs; |
786}; 787 |
788/* 789 * XXX: this relies on the fact that segments returned by bus_dmamap_load_mbuf() 790 * are readable from the nearest burst boundary on (i.e. potentially before 791 * ds_addr) to the first boundary beyond the end. This is usually a safe 792 * assumption to make, but is not documented. 793 */ |
794static void |
795hme_txdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, 796 bus_size_t totsz, int error) |
797{ 798 struct hme_txdma_arg *ta = xsc; |
799 struct hme_txdesc *htx; 800 bus_size_t len = 0; |
801 caddr_t txd; |
802 u_int32_t flags = 0; 803 int i, tdhead, pci; |
804 |
805 if (error != 0) 806 return; 807 |
808 tdhead = ta->hta_sc->sc_rb.rb_tdhead; |
809 pci = ta->hta_sc->sc_pci; 810 txd = ta->hta_sc->sc_rb.rb_txd; |
811 htx = ta->hta_htx; 812 813 if (ta->hta_sc->sc_rb.rb_td_nbusy + nsegs >= HME_NTXDESC) { 814 ta->hta_ndescs = -1; 815 return; 816 } 817 ta->hta_ndescs = nsegs; 818 |
819 for (i = 0; i < nsegs; i++) { |
820 if (segs[i].ds_len == 0) 821 continue; 822 |
823 /* Fill the ring entry. */ |
824 flags = HME_XD_ENCODE_TSIZE(segs[i].ds_len); 825 if (len == 0) |
826 flags |= HME_XD_SOP; |
827 if (len + segs[i].ds_len == totsz) |
828 flags |= HME_XD_EOP; |
829 CTR5(KTR_HME, "hme_txdma_callback: seg %d/%d, ri %d, " |
830 "flags %#x, addr %#x", i + 1, nsegs, tdhead, (u_int)flags, 831 (u_int)segs[i].ds_addr); 832 HME_XD_SETFLAGS(pci, txd, tdhead, flags); 833 HME_XD_SETADDR(pci, txd, tdhead, segs[i].ds_addr); |
834 835 ta->hta_sc->sc_rb.rb_td_nbusy++; |
836 htx->htx_lastdesc = tdhead; 837 tdhead = (tdhead + 1) % HME_NTXDESC; 838 len += segs[i].ds_len; |
839 } |
840 ta->hta_sc->sc_rb.rb_tdhead = tdhead; 841 KASSERT((flags & HME_XD_EOP) != 0, 842 ("hme_txdma_callback: missed end of packet!")); |
843} 844 845/* 846 * Routine to dma map an mbuf chain, set up the descriptor rings accordingly and 847 * start the transmission. 848 * Returns 0 on success, -1 if there were not enough free descriptors to map 849 * the packet, or an errno otherwise. 850 */ 851static int |
852hme_load_txmbuf(struct hme_softc *sc, struct mbuf *m0) |
853{ 854 struct hme_txdma_arg cba; |
855 struct hme_txdesc *td; |
856 int error, si, ri; |
857 u_int32_t flags; 858 |
859 si = sc->sc_rb.rb_tdhead; |
860 if ((td = STAILQ_FIRST(&sc->sc_rb.rb_txfreeq)) == NULL) 861 return (-1); 862 td->htx_m = m0; |
863 cba.hta_sc = sc; |
864 cba.hta_htx = td; 865 if ((error = bus_dmamap_load_mbuf(sc->sc_tdmatag, td->htx_dmamap, 866 m0, hme_txdma_callback, &cba, 0)) != 0) 867 goto fail; 868 if (cba.hta_ndescs == -1) { 869 error = -1; 870 goto fail; |
871 } |
872 bus_dmamap_sync(sc->sc_tdmatag, td->htx_dmamap, 873 BUS_DMASYNC_PREWRITE); 874 875 STAILQ_REMOVE_HEAD(&sc->sc_rb.rb_txfreeq, htx_q); 876 STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txbusyq, td, htx_q); 877 |
878 /* Turn descriptor ownership to the hme, back to forth. */ 879 ri = sc->sc_rb.rb_tdhead; 880 CTR2(KTR_HME, "hme_load_mbuf: next desc is %d (%#x)", 881 ri, HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri)); 882 do { 883 ri = (ri + HME_NTXDESC - 1) % HME_NTXDESC; 884 flags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri) | 885 HME_XD_OWN; --- 4 unchanged lines hidden (view full) --- 890 891 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, 892 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 893 894 /* start the transmission. */ 895 HME_ETX_WRITE_4(sc, HME_ETXI_PENDING, HME_ETX_TP_DMAWAKEUP); 896 return (0); 897fail: |
898 bus_dmamap_unload(sc->sc_tdmatag, td->htx_dmamap); |
899 return (error); 900} 901 902/* 903 * Pass a packet to the higher levels. 904 */ 905static void 906hme_read(struct hme_softc *sc, int ix, int len) 907{ 908 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 909 struct mbuf *m; |
910 911 if (len <= sizeof(struct ether_header) || 912 len > ETHERMTU + sizeof(struct ether_header)) { 913#ifdef HMEDEBUG 914 HME_WHINE(sc->sc_dev, "invalid packet size %d; dropping\n", 915 len); 916#endif 917 ifp->if_ierrors++; 918 hme_discard_rxbuf(sc, ix, 1); 919 return; 920 } 921 922 m = sc->sc_rb.rb_rxdesc[ix].hrx_m; |
923 CTR1(KTR_HME, "hme_read: len %d", len); |
924 925 if (hme_add_rxbuf(sc, ix, 0) != 0) { 926 /* 927 * hme_add_rxbuf will leave the old buffer in the ring until 928 * it is sure that a new buffer can be mapped. If it can not, 929 * drop the packet, but leave the interface up. 930 */ 931 ifp->if_iqdrops++; 932 hme_discard_rxbuf(sc, ix, 1); 933 return; 934 } 935 936 ifp->if_ipackets++; 937 938 /* Changed the rings; sync. */ 939 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, 940 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 941 942 m->m_pkthdr.rcvif = ifp; |
943 m->m_pkthdr.len = m->m_len = len + HME_RXOFFS; 944 m_adj(m, HME_RXOFFS); |
945 /* Pass the packet up. */ 946 (*ifp->if_input)(ifp, m); 947} 948 949static void 950hme_start(struct ifnet *ifp) 951{ 952 struct hme_softc *sc = (struct hme_softc *)ifp->if_softc; --- 4 unchanged lines hidden (view full) --- 957 return; 958 959 error = 0; 960 for (;;) { 961 IF_DEQUEUE(&ifp->if_snd, m); 962 if (m == NULL) 963 break; 964 |
965 error = hme_load_txmbuf(sc, m); 966 if (error == -1) { |
967 ifp->if_flags |= IFF_OACTIVE; 968 IF_PREPEND(&ifp->if_snd, m); 969 break; |
970 } else if (error > 0) { 971 printf("hme_start: error %d while loading mbuf\n", 972 error); |
973 } else { 974 enq = 1; 975 BPF_MTAP(ifp, m); 976 } 977 } 978 979 if (sc->sc_rb.rb_td_nbusy == HME_NTXDESC || error == -1) 980 ifp->if_flags |= IFF_OACTIVE; --- 4 unchanged lines hidden (view full) --- 985 986/* 987 * Transmit interrupt. 988 */ 989static void 990hme_tint(struct hme_softc *sc) 991{ 992 struct ifnet *ifp = &sc->sc_arpcom.ac_if; |
993 struct hme_txdesc *htx; |
994 unsigned int ri, txflags; 995 996 /* 997 * Unload collision counters 998 */ 999 ifp->if_collisions += 1000 HME_MAC_READ_4(sc, HME_MACI_NCCNT) + 1001 HME_MAC_READ_4(sc, HME_MACI_FCCNT) + 1002 HME_MAC_READ_4(sc, HME_MACI_EXCNT) + 1003 HME_MAC_READ_4(sc, HME_MACI_LTCNT); 1004 1005 /* 1006 * then clear the hardware counters. 1007 */ 1008 HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0); 1009 HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0); 1010 HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0); 1011 HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0); 1012 |
1013 htx = STAILQ_FIRST(&sc->sc_rb.rb_txbusyq); |
1014 /* Fetch current position in the transmit ring */ 1015 for (ri = sc->sc_rb.rb_tdtail;; ri = (ri + 1) % HME_NTXDESC) { 1016 if (sc->sc_rb.rb_td_nbusy <= 0) { 1017 CTR0(KTR_HME, "hme_tint: not busy!"); 1018 break; 1019 } 1020 1021 txflags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri); 1022 CTR2(KTR_HME, "hme_tint: index %d, flags %#x", ri, txflags); 1023 1024 if ((txflags & HME_XD_OWN) != 0) 1025 break; 1026 |
1027 CTR0(KTR_HME, "hme_tint: not owned"); |
1028 --sc->sc_rb.rb_td_nbusy; 1029 ifp->if_flags &= ~IFF_OACTIVE; 1030 1031 /* Complete packet transmitted? */ 1032 if ((txflags & HME_XD_EOP) == 0) 1033 continue; 1034 |
1035 KASSERT(htx->htx_lastdesc == ri, 1036 ("hme_tint: ring indices skewed: %d != %d!", 1037 htx->htx_lastdesc, ri)); 1038 bus_dmamap_sync(sc->sc_tdmatag, htx->htx_dmamap, 1039 BUS_DMASYNC_POSTWRITE); 1040 bus_dmamap_unload(sc->sc_tdmatag, htx->htx_dmamap); 1041 |
1042 ifp->if_opackets++; |
1043 m_freem(htx->htx_m); 1044 htx->htx_m = NULL; 1045 STAILQ_REMOVE_HEAD(&sc->sc_rb.rb_txbusyq, htx_q); 1046 STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txfreeq, htx, htx_q); 1047 htx = STAILQ_FIRST(&sc->sc_rb.rb_txbusyq); |
1048 } 1049 /* Turn off watchdog */ 1050 if (sc->sc_rb.rb_td_nbusy == 0) 1051 ifp->if_timer = 0; 1052 1053 /* Update ring */ 1054 sc->sc_rb.rb_tdtail = ri; 1055 --- 256 unchanged lines hidden (view full) --- 1312 * start it. 1313 */ 1314 hme_init(sc); 1315 } else if ((ifp->if_flags & IFF_UP) != 0) { 1316 /* 1317 * Reset the interface to pick up changes in any other 1318 * flags that affect hardware registers. 1319 */ |
1320 hme_init(sc); 1321 } 1322#ifdef HMEDEBUG 1323 sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0; 1324#endif 1325 break; 1326 1327 case SIOCADDMULTI: --- 124 unchanged lines hidden --- |