Deleted Added
full compact
if_bge.c (117659) if_bge.c (118026)
1/*
2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2001
4 * Bill Paul <wpaul@windriver.com>. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:

--- 50 unchanged lines hidden (view full) ---

59 * Without external SSRAM, you can only have at most 4 TX rings,
60 * and the use of the mini RX ring is disabled. This seems to imply
61 * that these features are simply not available on the BCM5701. As a
62 * result, this driver does not implement any support for the mini RX
63 * ring.
64 */
65
66#include <sys/cdefs.h>
1/*
2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2001
4 * Bill Paul <wpaul@windriver.com>. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:

--- 50 unchanged lines hidden (view full) ---

59 * Without external SSRAM, you can only have at most 4 TX rings,
60 * and the use of the mini RX ring is disabled. This seems to imply
61 * that these features are simply not available on the BCM5701. As a
62 * result, this driver does not implement any support for the mini RX
63 * ring.
64 */
65
66#include <sys/cdefs.h>
67__FBSDID("$FreeBSD: head/sys/dev/bge/if_bge.c 117659 2003-07-16 00:09:56Z wpaul $");
67__FBSDID("$FreeBSD: head/sys/dev/bge/if_bge.c 118026 2003-07-25 19:42:44Z wpaul $");
68
69#include <sys/param.h>
68
69#include <sys/param.h>
70#include <sys/endian.h>
70#include <sys/systm.h>
71#include <sys/sockio.h>
72#include <sys/mbuf.h>
73#include <sys/malloc.h>
74#include <sys/kernel.h>
75#include <sys/socket.h>
76#include <sys/queue.h>
77

--- 86 unchanged lines hidden (view full) ---

164 { 0, 0, NULL }
165};
166
167static int bge_probe (device_t);
168static int bge_attach (device_t);
169static int bge_detach (device_t);
170static void bge_release_resources
171 (struct bge_softc *);
71#include <sys/systm.h>
72#include <sys/sockio.h>
73#include <sys/mbuf.h>
74#include <sys/malloc.h>
75#include <sys/kernel.h>
76#include <sys/socket.h>
77#include <sys/queue.h>
78

--- 86 unchanged lines hidden (view full) ---

165 { 0, 0, NULL }
166};
167
168static int bge_probe (device_t);
169static int bge_attach (device_t);
170static int bge_detach (device_t);
171static void bge_release_resources
172 (struct bge_softc *);
173static void bge_dma_map_addr (void *, bus_dma_segment_t *, int, int);
174static void bge_dma_map_tx_desc (void *, bus_dma_segment_t *, int,
175 bus_size_t, int);
176static int bge_dma_alloc (device_t);
177static void bge_dma_free (struct bge_softc *);
178
172static void bge_txeof (struct bge_softc *);
173static void bge_rxeof (struct bge_softc *);
174
175static void bge_tick (void *);
176static void bge_stats_update (struct bge_softc *);
177static void bge_stats_update_regs
178 (struct bge_softc *);
179static int bge_encap (struct bge_softc *, struct mbuf *,

--- 136 unchanged lines hidden (view full) ---

316 dev = sc->bge_dev;
317
318 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
319 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
320
321 return;
322}
323
179static void bge_txeof (struct bge_softc *);
180static void bge_rxeof (struct bge_softc *);
181
182static void bge_tick (void *);
183static void bge_stats_update (struct bge_softc *);
184static void bge_stats_update_regs
185 (struct bge_softc *);
186static int bge_encap (struct bge_softc *, struct mbuf *,

--- 136 unchanged lines hidden (view full) ---

323 dev = sc->bge_dev;
324
325 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
326 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
327
328 return;
329}
330
331/*
332 * Map a single buffer address.
333 */
334
335static void
336bge_dma_map_addr(arg, segs, nseg, error)
337 void *arg;
338 bus_dma_segment_t *segs;
339 int nseg;
340 int error;
341{
342 struct bge_dmamap_arg *ctx;
343
344 if (error)
345 return;
346
347 ctx = arg;
348
349 if (nseg > ctx->bge_maxsegs) {
350 ctx->bge_maxsegs = 0;
351 return;
352 }
353
354 ctx->bge_busaddr = segs->ds_addr;
355
356 return;
357}
358
359/*
360 * Map an mbuf chain into an TX ring.
361 */
362
363static void
364bge_dma_map_tx_desc(arg, segs, nseg, mapsize, error)
365 void *arg;
366 bus_dma_segment_t *segs;
367 int nseg;
368 bus_size_t mapsize;
369 int error;
370{
371 struct bge_dmamap_arg *ctx;
372 struct bge_tx_bd *d = NULL;
373 int i = 0, idx;
374
375 if (error)
376 return;
377
378 ctx = arg;
379
380 /* Signal error to caller if there's too many segments */
381 if (nseg > ctx->bge_maxsegs) {
382 ctx->bge_maxsegs = 0;
383 return;
384 }
385
386 idx = ctx->bge_idx;
387 while(1) {
388 d = &ctx->bge_ring[idx];
389 d->bge_addr.bge_addr_lo =
390 htole32(BGE_ADDR_LO(segs[i].ds_addr));
391 d->bge_addr.bge_addr_hi =
392 htole32(BGE_ADDR_HI(segs[i].ds_addr));
393 d->bge_len = htole16(segs[i].ds_len);
394 d->bge_flags = htole16(ctx->bge_flags);
395 i++;
396 if (i == nseg)
397 break;
398 BGE_INC(idx, BGE_TX_RING_CNT);
399 }
400
401 d->bge_flags |= htole16(BGE_TXBDFLAG_END);
402 ctx->bge_maxsegs = nseg;
403 ctx->bge_idx = idx;
404
405 return;
406}
407
408
324#ifdef notdef
325static u_int8_t
326bge_vpd_readbyte(sc, addr)
327 struct bge_softc *sc;
328 int addr;
329{
330 int i;
331 device_t dev;

--- 290 unchanged lines hidden (view full) ---

622 * Memory management for jumbo frames.
623 */
624
625static int
626bge_alloc_jumbo_mem(sc)
627 struct bge_softc *sc;
628{
629 caddr_t ptr;
409#ifdef notdef
410static u_int8_t
411bge_vpd_readbyte(sc, addr)
412 struct bge_softc *sc;
413 int addr;
414{
415 int i;
416 device_t dev;

--- 290 unchanged lines hidden (view full) ---

707 * Memory management for jumbo frames.
708 */
709
710static int
711bge_alloc_jumbo_mem(sc)
712 struct bge_softc *sc;
713{
714 caddr_t ptr;
630 register int i;
715 register int i, error;
631 struct bge_jpool_entry *entry;
632
716 struct bge_jpool_entry *entry;
717
633 /* Grab a big chunk o' storage. */
634 sc->bge_cdata.bge_jumbo_buf = contigmalloc(BGE_JMEM, M_DEVBUF,
635 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
718 /* Create tag for jumbo buffer block */
636
719
637 if (sc->bge_cdata.bge_jumbo_buf == NULL) {
638 printf("bge%d: no memory for jumbo buffers!\n", sc->bge_unit);
639 return(ENOBUFS);
720 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
721 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
722 NULL, BGE_JMEM, 1, BGE_JMEM, 0, NULL, NULL,
723 &sc->bge_cdata.bge_jumbo_tag);
724
725 if (error) {
726 printf("bge%d: could not allocate jumbo dma tag\n",
727 sc->bge_unit);
728 return (ENOMEM);
640 }
641
729 }
730
731 /* Allocate DMA'able memory for jumbo buffer block */
732
733 error = bus_dmamem_alloc(sc->bge_cdata.bge_jumbo_tag,
734 (void **)&sc->bge_ldata.bge_jumbo_buf, BUS_DMA_NOWAIT,
735 &sc->bge_cdata.bge_jumbo_map);
736
737 if (error)
738 return (ENOMEM);
739
642 SLIST_INIT(&sc->bge_jfree_listhead);
643 SLIST_INIT(&sc->bge_jinuse_listhead);
644
645 /*
646 * Now divide it up into 9K pieces and save the addresses
647 * in an array.
648 */
740 SLIST_INIT(&sc->bge_jfree_listhead);
741 SLIST_INIT(&sc->bge_jinuse_listhead);
742
743 /*
744 * Now divide it up into 9K pieces and save the addresses
745 * in an array.
746 */
649 ptr = sc->bge_cdata.bge_jumbo_buf;
747 ptr = sc->bge_ldata.bge_jumbo_buf;
650 for (i = 0; i < BGE_JSLOTS; i++) {
651 sc->bge_cdata.bge_jslots[i] = ptr;
652 ptr += BGE_JLEN;
653 entry = malloc(sizeof(struct bge_jpool_entry),
654 M_DEVBUF, M_NOWAIT);
655 if (entry == NULL) {
748 for (i = 0; i < BGE_JSLOTS; i++) {
749 sc->bge_cdata.bge_jslots[i] = ptr;
750 ptr += BGE_JLEN;
751 entry = malloc(sizeof(struct bge_jpool_entry),
752 M_DEVBUF, M_NOWAIT);
753 if (entry == NULL) {
656 contigfree(sc->bge_cdata.bge_jumbo_buf,
657 BGE_JMEM, M_DEVBUF);
658 sc->bge_cdata.bge_jumbo_buf = NULL;
754 bge_free_jumbo_mem(sc);
755 sc->bge_ldata.bge_jumbo_buf = NULL;
659 printf("bge%d: no memory for jumbo "
660 "buffer queue!\n", sc->bge_unit);
661 return(ENOBUFS);
662 }
663 entry->slot = i;
664 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead,
665 entry, jpool_entries);
666 }

--- 9 unchanged lines hidden (view full) ---

676 struct bge_jpool_entry *entry;
677
678 for (i = 0; i < BGE_JSLOTS; i++) {
679 entry = SLIST_FIRST(&sc->bge_jfree_listhead);
680 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries);
681 free(entry, M_DEVBUF);
682 }
683
756 printf("bge%d: no memory for jumbo "
757 "buffer queue!\n", sc->bge_unit);
758 return(ENOBUFS);
759 }
760 entry->slot = i;
761 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead,
762 entry, jpool_entries);
763 }

--- 9 unchanged lines hidden (view full) ---

773 struct bge_jpool_entry *entry;
774
775 for (i = 0; i < BGE_JSLOTS; i++) {
776 entry = SLIST_FIRST(&sc->bge_jfree_listhead);
777 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries);
778 free(entry, M_DEVBUF);
779 }
780
684 contigfree(sc->bge_cdata.bge_jumbo_buf, BGE_JMEM, M_DEVBUF);
781 /* Destroy jumbo buffer block */
685
782
783 if (sc->bge_ldata.bge_rx_jumbo_ring)
784 bus_dmamem_free(sc->bge_cdata.bge_jumbo_tag,
785 sc->bge_ldata.bge_jumbo_buf,
786 sc->bge_cdata.bge_jumbo_map);
787
788 if (sc->bge_cdata.bge_rx_jumbo_ring_map)
789 bus_dmamap_destroy(sc->bge_cdata.bge_jumbo_tag,
790 sc->bge_cdata.bge_jumbo_map);
791
792 if (sc->bge_cdata.bge_jumbo_tag)
793 bus_dma_tag_destroy(sc->bge_cdata.bge_jumbo_tag);
794
686 return;
687}
688
689/*
690 * Allocate a jumbo buffer.
691 */
692static void *
693bge_jalloc(sc)

--- 29 unchanged lines hidden (view full) ---

723 sc = (struct bge_softc *)args;
724
725 if (sc == NULL)
726 panic("bge_jfree: can't find softc pointer!");
727
728 /* calculate the slot this buffer belongs to */
729
730 i = ((vm_offset_t)buf
795 return;
796}
797
798/*
799 * Allocate a jumbo buffer.
800 */
801static void *
802bge_jalloc(sc)

--- 29 unchanged lines hidden (view full) ---

832 sc = (struct bge_softc *)args;
833
834 if (sc == NULL)
835 panic("bge_jfree: can't find softc pointer!");
836
837 /* calculate the slot this buffer belongs to */
838
839 i = ((vm_offset_t)buf
731 - (vm_offset_t)sc->bge_cdata.bge_jumbo_buf) / BGE_JLEN;
840 - (vm_offset_t)sc->bge_ldata.bge_jumbo_buf) / BGE_JLEN;
732
733 if ((i < 0) || (i >= BGE_JSLOTS))
734 panic("bge_jfree: asked to free buffer that we don't manage!");
735
736 entry = SLIST_FIRST(&sc->bge_jinuse_listhead);
737 if (entry == NULL)
738 panic("bge_jfree: buffer not in use!");
739 entry->slot = i;

--- 10 unchanged lines hidden (view full) ---

750static int
751bge_newbuf_std(sc, i, m)
752 struct bge_softc *sc;
753 int i;
754 struct mbuf *m;
755{
756 struct mbuf *m_new = NULL;
757 struct bge_rx_bd *r;
841
842 if ((i < 0) || (i >= BGE_JSLOTS))
843 panic("bge_jfree: asked to free buffer that we don't manage!");
844
845 entry = SLIST_FIRST(&sc->bge_jinuse_listhead);
846 if (entry == NULL)
847 panic("bge_jfree: buffer not in use!");
848 entry->slot = i;

--- 10 unchanged lines hidden (view full) ---

859static int
860bge_newbuf_std(sc, i, m)
861 struct bge_softc *sc;
862 int i;
863 struct mbuf *m;
864{
865 struct mbuf *m_new = NULL;
866 struct bge_rx_bd *r;
867 struct bge_dmamap_arg ctx;
868 int error;
758
759 if (m == NULL) {
760 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
761 if (m_new == NULL) {
762 return(ENOBUFS);
763 }
764
765 MCLGET(m_new, M_DONTWAIT);

--- 6 unchanged lines hidden (view full) ---

772 m_new = m;
773 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
774 m_new->m_data = m_new->m_ext.ext_buf;
775 }
776
777 if (!sc->bge_rx_alignment_bug)
778 m_adj(m_new, ETHER_ALIGN);
779 sc->bge_cdata.bge_rx_std_chain[i] = m_new;
869
870 if (m == NULL) {
871 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
872 if (m_new == NULL) {
873 return(ENOBUFS);
874 }
875
876 MCLGET(m_new, M_DONTWAIT);

--- 6 unchanged lines hidden (view full) ---

883 m_new = m;
884 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
885 m_new->m_data = m_new->m_ext.ext_buf;
886 }
887
888 if (!sc->bge_rx_alignment_bug)
889 m_adj(m_new, ETHER_ALIGN);
890 sc->bge_cdata.bge_rx_std_chain[i] = m_new;
780 r = &sc->bge_rdata->bge_rx_std_ring[i];
781 BGE_HOSTADDR(r->bge_addr, vtophys(mtod(m_new, caddr_t)));
782 r->bge_flags = BGE_RXBDFLAG_END;
783 r->bge_len = m_new->m_len;
784 r->bge_idx = i;
891 r = &sc->bge_ldata.bge_rx_std_ring[i];
892 ctx.bge_maxsegs = 1;
893 ctx.sc = sc;
894 error = bus_dmamap_load(sc->bge_cdata.bge_mtag,
895 sc->bge_cdata.bge_rx_std_dmamap[i], mtod(m_new, void *),
896 m_new->m_len, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
897 if (error || ctx.bge_maxsegs == 0) {
898 if (m == NULL)
899 m_freem(m_new);
900 return(ENOMEM);
901 }
902 r->bge_addr.bge_addr_lo = htole32(BGE_ADDR_LO(ctx.bge_busaddr));
903 r->bge_addr.bge_addr_hi = htole32(BGE_ADDR_HI(ctx.bge_busaddr));
904 r->bge_flags = htole16(BGE_RXBDFLAG_END);
905 r->bge_len = htole16(m_new->m_len);
906 r->bge_idx = htole16(i);
785
907
908 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
909 sc->bge_cdata.bge_rx_std_dmamap[i],
910 BUS_DMASYNC_PREREAD);
911
786 return(0);
787}
788
789/*
790 * Initialize a jumbo receive ring descriptor. This allocates
791 * a jumbo buffer from the pool managed internally by the driver.
792 */
793static int
794bge_newbuf_jumbo(sc, i, m)
795 struct bge_softc *sc;
796 int i;
797 struct mbuf *m;
798{
799 struct mbuf *m_new = NULL;
800 struct bge_rx_bd *r;
912 return(0);
913}
914
915/*
916 * Initialize a jumbo receive ring descriptor. This allocates
917 * a jumbo buffer from the pool managed internally by the driver.
918 */
919static int
920bge_newbuf_jumbo(sc, i, m)
921 struct bge_softc *sc;
922 int i;
923 struct mbuf *m;
924{
925 struct mbuf *m_new = NULL;
926 struct bge_rx_bd *r;
927 struct bge_dmamap_arg ctx;
928 int error;
801
802 if (m == NULL) {
803 caddr_t *buf = NULL;
804
805 /* Allocate the mbuf. */
806 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
807 if (m_new == NULL) {
808 return(ENOBUFS);

--- 17 unchanged lines hidden (view full) ---

826 m_new = m;
827 m_new->m_data = m_new->m_ext.ext_buf;
828 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
829 }
830
831 if (!sc->bge_rx_alignment_bug)
832 m_adj(m_new, ETHER_ALIGN);
833 /* Set up the descriptor. */
929
930 if (m == NULL) {
931 caddr_t *buf = NULL;
932
933 /* Allocate the mbuf. */
934 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
935 if (m_new == NULL) {
936 return(ENOBUFS);

--- 17 unchanged lines hidden (view full) ---

954 m_new = m;
955 m_new->m_data = m_new->m_ext.ext_buf;
956 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
957 }
958
959 if (!sc->bge_rx_alignment_bug)
960 m_adj(m_new, ETHER_ALIGN);
961 /* Set up the descriptor. */
834 r = &sc->bge_rdata->bge_rx_jumbo_ring[i];
835 sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
962 sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
836 BGE_HOSTADDR(r->bge_addr, vtophys(mtod(m_new, caddr_t)));
837 r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING;
838 r->bge_len = m_new->m_len;
839 r->bge_idx = i;
963 r = &sc->bge_ldata.bge_rx_jumbo_ring[i];
964 ctx.bge_maxsegs = 1;
965 ctx.sc = sc;
966 error = bus_dmamap_load(sc->bge_cdata.bge_mtag_jumbo,
967 sc->bge_cdata.bge_rx_jumbo_dmamap[i], mtod(m_new, void *),
968 m_new->m_len, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
969 if (error || ctx.bge_maxsegs == 0) {
970 if (m == NULL)
971 m_freem(m_new);
972 return(ENOMEM);
973 }
974 r->bge_addr.bge_addr_lo = htole32(BGE_ADDR_LO(ctx.bge_busaddr));
975 r->bge_addr.bge_addr_hi = htole32(BGE_ADDR_HI(ctx.bge_busaddr));
976 r->bge_flags = htole16(BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING);
977 r->bge_len = htole16(m_new->m_len);
978 r->bge_idx = htole16(i);
840
979
980 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
981 sc->bge_cdata.bge_rx_jumbo_dmamap[i],
982 BUS_DMASYNC_PREREAD);
983
841 return(0);
842}
843
844/*
845 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
846 * that's 1MB or memory, which is a lot. For now, we fill only the first
847 * 256 ring entries and hope that our CPU is fast enough to keep up with
848 * the NIC.

--- 4 unchanged lines hidden (view full) ---

853{
854 int i;
855
856 for (i = 0; i < BGE_SSLOTS; i++) {
857 if (bge_newbuf_std(sc, i, NULL) == ENOBUFS)
858 return(ENOBUFS);
859 };
860
984 return(0);
985}
986
987/*
988 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
989 * that's 1MB or memory, which is a lot. For now, we fill only the first
990 * 256 ring entries and hope that our CPU is fast enough to keep up with
991 * the NIC.

--- 4 unchanged lines hidden (view full) ---

996{
997 int i;
998
999 for (i = 0; i < BGE_SSLOTS; i++) {
1000 if (bge_newbuf_std(sc, i, NULL) == ENOBUFS)
1001 return(ENOBUFS);
1002 };
1003
1004 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1005 sc->bge_cdata.bge_rx_std_ring_map,
1006 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1007
861 sc->bge_std = i - 1;
862 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
863
864 return(0);
865}
866
867static void
868bge_free_rx_ring_std(sc)
869 struct bge_softc *sc;
870{
871 int i;
872
873 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
874 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
875 m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
876 sc->bge_cdata.bge_rx_std_chain[i] = NULL;
1008 sc->bge_std = i - 1;
1009 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
1010
1011 return(0);
1012}
1013
1014static void
1015bge_free_rx_ring_std(sc)
1016 struct bge_softc *sc;
1017{
1018 int i;
1019
1020 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1021 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
1022 m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
1023 sc->bge_cdata.bge_rx_std_chain[i] = NULL;
1024 bus_dmamap_unload(sc->bge_cdata.bge_mtag,
1025 sc->bge_cdata.bge_rx_std_dmamap[i]);
877 }
1026 }
878 bzero((char *)&sc->bge_rdata->bge_rx_std_ring[i],
1027 bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i],
879 sizeof(struct bge_rx_bd));
880 }
881
882 return;
883}
884
885static int
886bge_init_rx_ring_jumbo(sc)
887 struct bge_softc *sc;
888{
889 int i;
890 struct bge_rcb *rcb;
891
892 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
893 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
894 return(ENOBUFS);
895 };
896
1028 sizeof(struct bge_rx_bd));
1029 }
1030
1031 return;
1032}
1033
1034static int
1035bge_init_rx_ring_jumbo(sc)
1036 struct bge_softc *sc;
1037{
1038 int i;
1039 struct bge_rcb *rcb;
1040
1041 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1042 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
1043 return(ENOBUFS);
1044 };
1045
1046 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1047 sc->bge_cdata.bge_rx_jumbo_ring_map,
1048 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1049
897 sc->bge_jumbo = i - 1;
898
1050 sc->bge_jumbo = i - 1;
1051
899 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
1052 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
900 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0);
901 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
902
903 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
904
905 return(0);
906}
907
908static void
909bge_free_rx_ring_jumbo(sc)
910 struct bge_softc *sc;
911{
912 int i;
913
914 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
915 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
916 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
917 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
1053 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0);
1054 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1055
1056 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
1057
1058 return(0);
1059}
1060
1061static void
1062bge_free_rx_ring_jumbo(sc)
1063 struct bge_softc *sc;
1064{
1065 int i;
1066
1067 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1068 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
1069 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
1070 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
1071 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
1072 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
918 }
1073 }
919 bzero((char *)&sc->bge_rdata->bge_rx_jumbo_ring[i],
1074 bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i],
920 sizeof(struct bge_rx_bd));
921 }
922
923 return;
924}
925
926static void
927bge_free_tx_ring(sc)
928 struct bge_softc *sc;
929{
930 int i;
931
1075 sizeof(struct bge_rx_bd));
1076 }
1077
1078 return;
1079}
1080
1081static void
1082bge_free_tx_ring(sc)
1083 struct bge_softc *sc;
1084{
1085 int i;
1086
932 if (sc->bge_rdata->bge_tx_ring == NULL)
1087 if (sc->bge_ldata.bge_tx_ring == NULL)
933 return;
934
935 for (i = 0; i < BGE_TX_RING_CNT; i++) {
936 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
937 m_freem(sc->bge_cdata.bge_tx_chain[i]);
938 sc->bge_cdata.bge_tx_chain[i] = NULL;
1088 return;
1089
1090 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1091 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
1092 m_freem(sc->bge_cdata.bge_tx_chain[i]);
1093 sc->bge_cdata.bge_tx_chain[i] = NULL;
1094 bus_dmamap_unload(sc->bge_cdata.bge_mtag,
1095 sc->bge_cdata.bge_tx_dmamap[i]);
939 }
1096 }
940 bzero((char *)&sc->bge_rdata->bge_tx_ring[i],
1097 bzero((char *)&sc->bge_ldata.bge_tx_ring[i],
941 sizeof(struct bge_tx_bd));
942 }
943
944 return;
945}
946
947static int
948bge_init_tx_ring(sc)

--- 270 unchanged lines hidden (view full) ---

1219
1220 if (i == BGE_TIMEOUT) {
1221 printf("bge%d: flow-through queue init failed\n",
1222 sc->bge_unit);
1223 return(ENXIO);
1224 }
1225
1226 /* Initialize the standard RX ring control block */
1098 sizeof(struct bge_tx_bd));
1099 }
1100
1101 return;
1102}
1103
1104static int
1105bge_init_tx_ring(sc)

--- 270 unchanged lines hidden (view full) ---

1376
1377 if (i == BGE_TIMEOUT) {
1378 printf("bge%d: flow-through queue init failed\n",
1379 sc->bge_unit);
1380 return(ENXIO);
1381 }
1382
1383 /* Initialize the standard RX ring control block */
1227 rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb;
1228 BGE_HOSTADDR(rcb->bge_hostaddr,
1229 vtophys(&sc->bge_rdata->bge_rx_std_ring));
1384 rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
1385 rcb->bge_hostaddr.bge_addr_lo =
1386 BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
1387 rcb->bge_hostaddr.bge_addr_hi =
1388 BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
1389 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1390 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD);
1230 if (sc->bge_asicrev == BGE_ASICREV_BCM5705)
1231 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1232 else
1233 rcb->bge_maxlen_flags =
1234 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1235 if (sc->bge_extram)
1236 rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS;
1237 else
1238 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1239 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1240 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1391 if (sc->bge_asicrev == BGE_ASICREV_BCM5705)
1392 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1393 else
1394 rcb->bge_maxlen_flags =
1395 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1396 if (sc->bge_extram)
1397 rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS;
1398 else
1399 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1400 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1401 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1402
1241 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1242 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1243
1244 /*
1245 * Initialize the jumbo RX ring control block
1246 * We set the 'ring disabled' bit in the flags
1247 * field until we're actually ready to start
1248 * using this ring (i.e. once we set the MTU
1249 * high enough to require it).
1250 */
1251 if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
1403 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1404 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1405
1406 /*
1407 * Initialize the jumbo RX ring control block
1408 * We set the 'ring disabled' bit in the flags
1409 * field until we're actually ready to start
1410 * using this ring (i.e. once we set the MTU
1411 * high enough to require it).
1412 */
1413 if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
1252 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
1253 BGE_HOSTADDR(rcb->bge_hostaddr,
1254 vtophys(&sc->bge_rdata->bge_rx_jumbo_ring));
1414 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1415
1416 rcb->bge_hostaddr.bge_addr_lo =
1417 BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1418 rcb->bge_hostaddr.bge_addr_hi =
1419 BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1420 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1421 sc->bge_cdata.bge_rx_jumbo_ring_map,
1422 BUS_DMASYNC_PREREAD);
1255 rcb->bge_maxlen_flags =
1256 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN,
1257 BGE_RCB_FLAG_RING_DISABLED);
1258 if (sc->bge_extram)
1259 rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS;
1260 else
1261 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1262 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1263 rcb->bge_hostaddr.bge_addr_hi);
1264 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1265 rcb->bge_hostaddr.bge_addr_lo);
1423 rcb->bge_maxlen_flags =
1424 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN,
1425 BGE_RCB_FLAG_RING_DISABLED);
1426 if (sc->bge_extram)
1427 rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS;
1428 else
1429 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1430 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1431 rcb->bge_hostaddr.bge_addr_hi);
1432 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1433 rcb->bge_hostaddr.bge_addr_lo);
1434
1266 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1267 rcb->bge_maxlen_flags);
1268 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1269
1270 /* Set up dummy disabled mini ring RCB */
1435 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1436 rcb->bge_maxlen_flags);
1437 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1438
1439 /* Set up dummy disabled mini ring RCB */
1271 rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb;
1440 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
1272 rcb->bge_maxlen_flags =
1273 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1274 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1275 rcb->bge_maxlen_flags);
1276 }
1277
1278 /*
1279 * Set the BD ring replentish thresholds. The recommended

--- 15 unchanged lines hidden (view full) ---

1295 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1296 vrcb->bge_nicaddr = 0;
1297 vrcb++;
1298 }
1299
1300 /* Configure TX RCB 0 (we use only the first ring) */
1301 vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1302 BGE_SEND_RING_RCB);
1441 rcb->bge_maxlen_flags =
1442 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1443 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1444 rcb->bge_maxlen_flags);
1445 }
1446
1447 /*
1448 * Set the BD ring replentish thresholds. The recommended

--- 15 unchanged lines hidden (view full) ---

1464 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1465 vrcb->bge_nicaddr = 0;
1466 vrcb++;
1467 }
1468
1469 /* Configure TX RCB 0 (we use only the first ring) */
1470 vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1471 BGE_SEND_RING_RCB);
1303 vrcb->bge_hostaddr.bge_addr_hi = 0;
1304 BGE_HOSTADDR(vrcb->bge_hostaddr, vtophys(&sc->bge_rdata->bge_tx_ring));
1472 vrcb->bge_hostaddr.bge_addr_lo =
1473 htole32(BGE_ADDR_LO(sc->bge_ldata.bge_tx_ring_paddr));
1474 vrcb->bge_hostaddr.bge_addr_hi =
1475 htole32(BGE_ADDR_HI(sc->bge_ldata.bge_tx_ring_paddr));
1305 vrcb->bge_nicaddr = BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT);
1306 if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
1307 vrcb->bge_maxlen_flags =
1308 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0);
1309
1310 /* Disable all unused RX return rings */
1311 vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1312 BGE_RX_RETURN_RING_RCB);

--- 17 unchanged lines hidden (view full) ---

1330 /*
1331 * Set up RX return ring 0
1332 * Note that the NIC address for RX return rings is 0x00000000.
1333 * The return rings live entirely within the host, so the
1334 * nicaddr field in the RCB isn't used.
1335 */
1336 vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1337 BGE_RX_RETURN_RING_RCB);
1476 vrcb->bge_nicaddr = BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT);
1477 if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
1478 vrcb->bge_maxlen_flags =
1479 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0);
1480
1481 /* Disable all unused RX return rings */
1482 vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1483 BGE_RX_RETURN_RING_RCB);

--- 17 unchanged lines hidden (view full) ---

1501 /*
1502 * Set up RX return ring 0
1503 * Note that the NIC address for RX return rings is 0x00000000.
1504 * The return rings live entirely within the host, so the
1505 * nicaddr field in the RCB isn't used.
1506 */
1507 vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1508 BGE_RX_RETURN_RING_RCB);
1338 vrcb->bge_hostaddr.bge_addr_hi = 0;
1339 BGE_HOSTADDR(vrcb->bge_hostaddr,
1340 vtophys(&sc->bge_rdata->bge_rx_return_ring));
1509 vrcb->bge_hostaddr.bge_addr_lo =
1510 BGE_ADDR_LO(sc->bge_ldata.bge_rx_return_ring_paddr);
1511 vrcb->bge_hostaddr.bge_addr_hi =
1512 BGE_ADDR_HI(sc->bge_ldata.bge_rx_return_ring_paddr);
1513 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
1514 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREWRITE);
1341 vrcb->bge_nicaddr = 0x00000000;
1342 vrcb->bge_maxlen_flags =
1343 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0);
1344
1345 /* Set random backoff seed for TX */
1346 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1347 sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] +
1348 sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] +

--- 44 unchanged lines hidden (view full) ---

1393 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1394 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1395 }
1396 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
1397 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
1398
1399 /* Set up address of statistics block */
1400 if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
1515 vrcb->bge_nicaddr = 0x00000000;
1516 vrcb->bge_maxlen_flags =
1517 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0);
1518
1519 /* Set random backoff seed for TX */
1520 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1521 sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] +
1522 sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] +

--- 44 unchanged lines hidden (view full) ---

1567 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1568 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1569 }
1570 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
1571 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
1572
1573 /* Set up address of statistics block */
1574 if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
1401 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, 0);
1575 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
1576 BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
1402 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1577 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1403 vtophys(&sc->bge_rdata->bge_info.bge_stats));
1404
1578 BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
1405 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1406 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1407 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1408 }
1409
1410 /* Set up address of status block */
1579 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1580 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1581 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1582 }
1583
1584 /* Set up address of status block */
1411 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, 0);
1585 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1586 BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
1412 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1587 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1413 vtophys(&sc->bge_rdata->bge_status_block));
1588 BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
1589 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
1590 sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREWRITE);
1591 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx = 0;
1592 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx = 0;
1414
1593
1415 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0;
1416 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0;
1417
1418 /* Turn on host coalescing state machine */
1419 CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
1420
1421 /* Turn on RX BD completion state machine and enable attentions */
1422 CSR_WRITE_4(sc, BGE_RBDC_MODE,
1423 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1424
1425 /* Turn on RX list placement state machine */

--- 64 unchanged lines hidden (view full) ---

1490 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1491 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1492 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1493
1494 /* ack/clear link change events */
1495 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1496 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1497 BGE_MACSTAT_LINK_CHANGED);
1594 /* Turn on host coalescing state machine */
1595 CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
1596
1597 /* Turn on RX BD completion state machine and enable attentions */
1598 CSR_WRITE_4(sc, BGE_RBDC_MODE,
1599 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1600
1601 /* Turn on RX list placement state machine */

--- 64 unchanged lines hidden (view full) ---

1666 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1667 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1668 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1669
1670 /* ack/clear link change events */
1671 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1672 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1673 BGE_MACSTAT_LINK_CHANGED);
1674 CSR_WRITE_4(sc, BGE_MI_STS, 0);
1498
1499 /* Enable PHY auto polling (for MII/GMII only) */
1500 if (sc->bge_tbi) {
1501 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1502 } else {
1503 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16);
1504 if (sc->bge_asicrev == BGE_ASICREV_BCM5700)
1505 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,

--- 47 unchanged lines hidden (view full) ---

1553 return(0);
1554 }
1555 t++;
1556 }
1557
1558 return(ENXIO);
1559}
1560
1675
1676 /* Enable PHY auto polling (for MII/GMII only) */
1677 if (sc->bge_tbi) {
1678 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1679 } else {
1680 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16);
1681 if (sc->bge_asicrev == BGE_ASICREV_BCM5700)
1682 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,

--- 47 unchanged lines hidden (view full) ---

1730 return(0);
1731 }
1732 t++;
1733 }
1734
1735 return(ENXIO);
1736}
1737
1738static void
1739bge_dma_free(sc)
1740 struct bge_softc *sc;
1741{
1742 int i;
1743
1744
1745 /* Destroy DMA maps for RX buffers */
1746
1747 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1748 if (sc->bge_cdata.bge_rx_std_dmamap[i])
1749 bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
1750 sc->bge_cdata.bge_rx_std_dmamap[i]);
1751 }
1752
1753 /* Destroy DMA maps for jumbo RX buffers */
1754
1755 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1756 if (sc->bge_cdata.bge_rx_jumbo_dmamap[i])
1757 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
1758 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1759 }
1760
1761 /* Destroy DMA maps for TX buffers */
1762
1763 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1764 if (sc->bge_cdata.bge_tx_dmamap[i])
1765 bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
1766 sc->bge_cdata.bge_tx_dmamap[i]);
1767 }
1768
1769 if (sc->bge_cdata.bge_mtag)
1770 bus_dma_tag_destroy(sc->bge_cdata.bge_mtag);
1771
1772
1773 /* Destroy standard RX ring */
1774
1775 if (sc->bge_ldata.bge_rx_std_ring)
1776 bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag,
1777 sc->bge_ldata.bge_rx_std_ring,
1778 sc->bge_cdata.bge_rx_std_ring_map);
1779
1780 if (sc->bge_cdata.bge_rx_std_ring_map) {
1781 bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag,
1782 sc->bge_cdata.bge_rx_std_ring_map);
1783 bus_dmamap_destroy(sc->bge_cdata.bge_rx_std_ring_tag,
1784 sc->bge_cdata.bge_rx_std_ring_map);
1785 }
1786
1787 if (sc->bge_cdata.bge_rx_std_ring_tag)
1788 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag);
1789
1790 /* Destroy jumbo RX ring */
1791
1792 if (sc->bge_ldata.bge_rx_jumbo_ring)
1793 bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1794 sc->bge_ldata.bge_rx_jumbo_ring,
1795 sc->bge_cdata.bge_rx_jumbo_ring_map);
1796
1797 if (sc->bge_cdata.bge_rx_jumbo_ring_map) {
1798 bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1799 sc->bge_cdata.bge_rx_jumbo_ring_map);
1800 bus_dmamap_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1801 sc->bge_cdata.bge_rx_jumbo_ring_map);
1802 }
1803
1804 if (sc->bge_cdata.bge_rx_jumbo_ring_tag)
1805 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag);
1806
1807 /* Destroy RX return ring */
1808
1809 if (sc->bge_ldata.bge_rx_return_ring)
1810 bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag,
1811 sc->bge_ldata.bge_rx_return_ring,
1812 sc->bge_cdata.bge_rx_return_ring_map);
1813
1814 if (sc->bge_cdata.bge_rx_return_ring_map) {
1815 bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag,
1816 sc->bge_cdata.bge_rx_return_ring_map);
1817 bus_dmamap_destroy(sc->bge_cdata.bge_rx_return_ring_tag,
1818 sc->bge_cdata.bge_rx_return_ring_map);
1819 }
1820
1821 if (sc->bge_cdata.bge_rx_return_ring_tag)
1822 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag);
1823
1824 /* Destroy TX ring */
1825
1826 if (sc->bge_ldata.bge_tx_ring)
1827 bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag,
1828 sc->bge_ldata.bge_tx_ring,
1829 sc->bge_cdata.bge_tx_ring_map);
1830
1831 if (sc->bge_cdata.bge_tx_ring_map) {
1832 bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag,
1833 sc->bge_cdata.bge_tx_ring_map);
1834 bus_dmamap_destroy(sc->bge_cdata.bge_tx_ring_tag,
1835 sc->bge_cdata.bge_tx_ring_map);
1836 }
1837
1838 if (sc->bge_cdata.bge_tx_ring_tag)
1839 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag);
1840
1841 /* Destroy status block */
1842
1843 if (sc->bge_ldata.bge_status_block)
1844 bus_dmamem_free(sc->bge_cdata.bge_status_tag,
1845 sc->bge_ldata.bge_status_block,
1846 sc->bge_cdata.bge_status_map);
1847
1848 if (sc->bge_cdata.bge_status_map) {
1849 bus_dmamap_unload(sc->bge_cdata.bge_status_tag,
1850 sc->bge_cdata.bge_status_map);
1851 bus_dmamap_destroy(sc->bge_cdata.bge_status_tag,
1852 sc->bge_cdata.bge_status_map);
1853 }
1854
1855 if (sc->bge_cdata.bge_status_tag)
1856 bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag);
1857
1858 /* Destroy statistics block */
1859
1860 if (sc->bge_ldata.bge_stats)
1861 bus_dmamem_free(sc->bge_cdata.bge_stats_tag,
1862 sc->bge_ldata.bge_stats,
1863 sc->bge_cdata.bge_stats_map);
1864
1865 if (sc->bge_cdata.bge_stats_map) {
1866 bus_dmamap_unload(sc->bge_cdata.bge_stats_tag,
1867 sc->bge_cdata.bge_stats_map);
1868 bus_dmamap_destroy(sc->bge_cdata.bge_stats_tag,
1869 sc->bge_cdata.bge_stats_map);
1870 }
1871
1872 if (sc->bge_cdata.bge_stats_tag)
1873 bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag);
1874
1875 /* Destroy the parent tag */
1876
1877 if (sc->bge_cdata.bge_parent_tag)
1878 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
1879
1880 return;
1881}
1882
1561static int
1883static int
1884bge_dma_alloc(dev)
1885 device_t dev;
1886{
1887 struct bge_softc *sc;
1888 int nseg, i, error;
1889 struct bge_dmamap_arg ctx;
1890
1891 sc = device_get_softc(dev);
1892
1893 /*
1894 * Allocate the parent bus DMA tag appropriate for PCI.
1895 */
1896#define BGE_NSEG_NEW 32
1897 error = bus_dma_tag_create(NULL, /* parent */
1898 PAGE_SIZE, 0, /* alignment, boundary */
1899 BUS_SPACE_MAXADDR, /* lowaddr */
1900 BUS_SPACE_MAXADDR_32BIT,/* highaddr */
1901 NULL, NULL, /* filter, filterarg */
1902 MAXBSIZE, BGE_NSEG_NEW, /* maxsize, nsegments */
1903 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
1904 BUS_DMA_ALLOCNOW, /* flags */
1905 NULL, NULL, /* lockfunc, lockarg */
1906 &sc->bge_cdata.bge_parent_tag);
1907
1908 /*
1909 * Create tag for RX mbufs.
1910 */
1911 nseg = 32;
1912 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, ETHER_ALIGN,
1913 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1914 NULL, MCLBYTES * nseg, nseg, MCLBYTES, 0, NULL, NULL,
1915 &sc->bge_cdata.bge_mtag);
1916
1917 if (error) {
1918 device_printf(dev, "could not allocate dma tag\n");
1919 return (ENOMEM);
1920 }
1921
1922 /* Create DMA maps for RX buffers */
1923
1924 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1925 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
1926 &sc->bge_cdata.bge_rx_std_dmamap[i]);
1927 if (error) {
1928 device_printf(dev, "can't create DMA map for RX\n");
1929 return(ENOMEM);
1930 }
1931 }
1932
1933 /* Create DMA maps for TX buffers */
1934
1935 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1936 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
1937 &sc->bge_cdata.bge_tx_dmamap[i]);
1938 if (error) {
1939 device_printf(dev, "can't create DMA map for RX\n");
1940 return(ENOMEM);
1941 }
1942 }
1943
1944 /* Create tag for standard RX ring */
1945
1946 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1947 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1948 NULL, BGE_STD_RX_RING_SZ, 1, BGE_STD_RX_RING_SZ, 0,
1949 NULL, NULL, &sc->bge_cdata.bge_rx_std_ring_tag);
1950
1951 if (error) {
1952 device_printf(dev, "could not allocate dma tag\n");
1953 return (ENOMEM);
1954 }
1955
1956 /* Allocate DMA'able memory for standard RX ring */
1957
1958 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_std_ring_tag,
1959 (void **)&sc->bge_ldata.bge_rx_std_ring, BUS_DMA_NOWAIT,
1960 &sc->bge_cdata.bge_rx_std_ring_map);
1961 if (error)
1962 return (ENOMEM);
1963
1964 bzero((char *)sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ);
1965
1966 /* Load the address of the standard RX ring */
1967
1968 ctx.bge_maxsegs = 1;
1969 ctx.sc = sc;
1970
1971 error = bus_dmamap_load(sc->bge_cdata.bge_rx_std_ring_tag,
1972 sc->bge_cdata.bge_rx_std_ring_map, sc->bge_ldata.bge_rx_std_ring,
1973 BGE_STD_RX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1974
1975 if (error)
1976 return (ENOMEM);
1977
1978 sc->bge_ldata.bge_rx_std_ring_paddr = ctx.bge_busaddr;
1979
1980 if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
1981
1982 /*
1983 * Create tag for jumbo mbufs.
1984 * This is really a bit of a kludge. We allocate a special
1985 * jumbo buffer pool which (thanks to the way our DMA
1986 * memory allocation works) will consist of contiguous
1987 * pages. This means that even though a jumbo buffer might
1988 * be larger than a page size, we don't really need to
1989 * map it into more than one DMA segment. However, the
1990 * default mbuf tag will result in multi-segment mappings,
1991 * so we have to create a special jumbo mbuf tag that
1992 * lets us get away with mapping the jumbo buffers as
1993 * a single segment. I think eventually the driver should
1994 * be changed so that it uses ordinary mbufs and cluster
1995 * buffers, i.e. jumbo frames can span multiple DMA
1996 * descriptors. But that's a project for another day.
1997 */
1998
1999 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2000 ETHER_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2001 NULL, MCLBYTES * nseg, nseg, BGE_JLEN, 0, NULL, NULL,
2002 &sc->bge_cdata.bge_mtag_jumbo);
2003
2004 if (error) {
2005 device_printf(dev, "could not allocate dma tag\n");
2006 return (ENOMEM);
2007 }
2008
2009 /* Create tag for jumbo RX ring */
2010
2011 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2012 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2013 NULL, BGE_JUMBO_RX_RING_SZ, 1, BGE_JUMBO_RX_RING_SZ, 0,
2014 NULL, NULL, &sc->bge_cdata.bge_rx_jumbo_ring_tag);
2015
2016 if (error) {
2017 device_printf(dev, "could not allocate dma tag\n");
2018 return (ENOMEM);
2019 }
2020
2021 /* Allocate DMA'able memory for jumbo RX ring */
2022
2023 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2024 (void **)&sc->bge_ldata.bge_rx_jumbo_ring, BUS_DMA_NOWAIT,
2025 &sc->bge_cdata.bge_rx_jumbo_ring_map);
2026 if (error)
2027 return (ENOMEM);
2028
2029 bzero((char *)sc->bge_ldata.bge_rx_jumbo_ring,
2030 BGE_JUMBO_RX_RING_SZ);
2031
2032 /* Load the address of the jumbo RX ring */
2033
2034 ctx.bge_maxsegs = 1;
2035 ctx.sc = sc;
2036
2037 error = bus_dmamap_load(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2038 sc->bge_cdata.bge_rx_jumbo_ring_map,
2039 sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ,
2040 bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2041
2042 if (error)
2043 return (ENOMEM);
2044
2045 sc->bge_ldata.bge_rx_jumbo_ring_paddr = ctx.bge_busaddr;
2046
2047 /* Create DMA maps for jumbo RX buffers */
2048
2049 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
2050 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
2051 0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
2052 if (error) {
2053 device_printf(dev,
2054 "can't create DMA map for RX\n");
2055 return(ENOMEM);
2056 }
2057 }
2058
2059 }
2060
2061 /* Create tag for RX return ring */
2062
2063 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2064 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2065 NULL, BGE_RX_RTN_RING_SZ(sc), 1, BGE_RX_RTN_RING_SZ(sc), 0,
2066 NULL, NULL, &sc->bge_cdata.bge_rx_return_ring_tag);
2067
2068 if (error) {
2069 device_printf(dev, "could not allocate dma tag\n");
2070 return (ENOMEM);
2071 }
2072
2073 /* Allocate DMA'able memory for RX return ring */
2074
2075 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_return_ring_tag,
2076 (void **)&sc->bge_ldata.bge_rx_return_ring, BUS_DMA_NOWAIT,
2077 &sc->bge_cdata.bge_rx_return_ring_map);
2078 if (error)
2079 return (ENOMEM);
2080
2081 bzero((char *)sc->bge_ldata.bge_rx_return_ring,
2082 BGE_RX_RTN_RING_SZ(sc));
2083
2084 /* Load the address of the RX return ring */
2085
2086 ctx.bge_maxsegs = 1;
2087 ctx.sc = sc;
2088
2089 error = bus_dmamap_load(sc->bge_cdata.bge_rx_return_ring_tag,
2090 sc->bge_cdata.bge_rx_return_ring_map,
2091 sc->bge_ldata.bge_rx_return_ring, BGE_RX_RTN_RING_SZ(sc),
2092 bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2093
2094 if (error)
2095 return (ENOMEM);
2096
2097 sc->bge_ldata.bge_rx_return_ring_paddr = ctx.bge_busaddr;
2098
2099 /* Create tag for TX ring */
2100
2101 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2102 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2103 NULL, BGE_TX_RING_SZ, 1, BGE_TX_RING_SZ, 0, NULL, NULL,
2104 &sc->bge_cdata.bge_tx_ring_tag);
2105
2106 if (error) {
2107 device_printf(dev, "could not allocate dma tag\n");
2108 return (ENOMEM);
2109 }
2110
2111 /* Allocate DMA'able memory for TX ring */
2112
2113 error = bus_dmamem_alloc(sc->bge_cdata.bge_tx_ring_tag,
2114 (void **)&sc->bge_ldata.bge_tx_ring, BUS_DMA_NOWAIT,
2115 &sc->bge_cdata.bge_tx_ring_map);
2116 if (error)
2117 return (ENOMEM);
2118
2119 bzero((char *)sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ);
2120
2121 /* Load the address of the TX ring */
2122
2123 ctx.bge_maxsegs = 1;
2124 ctx.sc = sc;
2125
2126 error = bus_dmamap_load(sc->bge_cdata.bge_tx_ring_tag,
2127 sc->bge_cdata.bge_tx_ring_map, sc->bge_ldata.bge_tx_ring,
2128 BGE_TX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2129
2130 if (error)
2131 return (ENOMEM);
2132
2133 sc->bge_ldata.bge_tx_ring_paddr = ctx.bge_busaddr;
2134
2135 /* Create tag for status block */
2136
2137 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2138 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2139 NULL, BGE_STATUS_BLK_SZ, 1, BGE_STATUS_BLK_SZ, 0,
2140 NULL, NULL, &sc->bge_cdata.bge_status_tag);
2141
2142 if (error) {
2143 device_printf(dev, "could not allocate dma tag\n");
2144 return (ENOMEM);
2145 }
2146
2147 /* Allocate DMA'able memory for status block */
2148
2149 error = bus_dmamem_alloc(sc->bge_cdata.bge_status_tag,
2150 (void **)&sc->bge_ldata.bge_status_block, BUS_DMA_NOWAIT,
2151 &sc->bge_cdata.bge_status_map);
2152 if (error)
2153 return (ENOMEM);
2154
2155 bzero((char *)sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ);
2156
2157 /* Load the address of the status block */
2158
2159 ctx.sc = sc;
2160 ctx.bge_maxsegs = 1;
2161
2162 error = bus_dmamap_load(sc->bge_cdata.bge_status_tag,
2163 sc->bge_cdata.bge_status_map, sc->bge_ldata.bge_status_block,
2164 BGE_STATUS_BLK_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2165
2166 if (error)
2167 return (ENOMEM);
2168
2169 sc->bge_ldata.bge_status_block_paddr = ctx.bge_busaddr;
2170
2171 /* Create tag for statistics block */
2172
2173 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2174 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2175 NULL, BGE_STATS_SZ, 1, BGE_STATS_SZ, 0, NULL, NULL,
2176 &sc->bge_cdata.bge_stats_tag);
2177
2178 if (error) {
2179 device_printf(dev, "could not allocate dma tag\n");
2180 return (ENOMEM);
2181 }
2182
2183 /* Allocate DMA'able memory for statistics block */
2184
2185 error = bus_dmamem_alloc(sc->bge_cdata.bge_stats_tag,
2186 (void **)&sc->bge_ldata.bge_stats, BUS_DMA_NOWAIT,
2187 &sc->bge_cdata.bge_stats_map);
2188 if (error)
2189 return (ENOMEM);
2190
2191 bzero((char *)sc->bge_ldata.bge_stats, BGE_STATS_SZ);
2192
2193 /* Load the address of the statstics block */
2194
2195 ctx.sc = sc;
2196 ctx.bge_maxsegs = 1;
2197
2198 error = bus_dmamap_load(sc->bge_cdata.bge_stats_tag,
2199 sc->bge_cdata.bge_stats_map, sc->bge_ldata.bge_stats,
2200 BGE_STATS_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2201
2202 if (error)
2203 return (ENOMEM);
2204
2205 sc->bge_ldata.bge_stats_paddr = ctx.bge_busaddr;
2206
2207 return(0);
2208}
2209
2210static int
1562bge_attach(dev)
1563 device_t dev;
1564{
1565 int s;
1566 struct ifnet *ifp;
1567 struct bge_softc *sc;
1568 u_int32_t hwcfg = 0;
1569 u_int32_t mac_addr = 0;

--- 79 unchanged lines hidden (view full) ---

1649 }
1650
1651 /*
1652 * A Broadcom chip was detected. Inform the world.
1653 */
1654 printf("bge%d: Ethernet address: %6D\n", unit,
1655 sc->arpcom.ac_enaddr, ":");
1656
2211bge_attach(dev)
2212 device_t dev;
2213{
2214 int s;
2215 struct ifnet *ifp;
2216 struct bge_softc *sc;
2217 u_int32_t hwcfg = 0;
2218 u_int32_t mac_addr = 0;

--- 79 unchanged lines hidden (view full) ---

2298 }
2299
2300 /*
2301 * A Broadcom chip was detected. Inform the world.
2302 */
2303 printf("bge%d: Ethernet address: %6D\n", unit,
2304 sc->arpcom.ac_enaddr, ":");
2305
1657 /* Allocate the general information block and ring buffers. */
1658 sc->bge_rdata = contigmalloc(sizeof(struct bge_ring_data), M_DEVBUF,
1659 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
1660
1661 if (sc->bge_rdata == NULL) {
1662 bge_release_resources(sc);
1663 error = ENXIO;
1664 printf("bge%d: no memory for list buffers!\n", sc->bge_unit);
1665 goto fail;
1666 }
1667
1668 bzero(sc->bge_rdata, sizeof(struct bge_ring_data));
1669
1670 /* Save ASIC rev. */
1671
1672 sc->bge_chipid =
1673 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) &
1674 BGE_PCIMISCCTL_ASICREV;
1675 sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
1676 sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
1677
2306 /* Save ASIC rev. */
2307
2308 sc->bge_chipid =
2309 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) &
2310 BGE_PCIMISCCTL_ASICREV;
2311 sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
2312 sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
2313
2314 /* 5705 limits RX return ring to 512 entries. */
2315 if (sc->bge_asicrev == BGE_ASICREV_BCM5705)
2316 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
2317 else
2318 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
2319
2320 if (bge_dma_alloc(dev)) {
2321 printf ("bge%d: failed to allocate DMA resources\n",
2322 sc->bge_unit);
2323 bge_release_resources(sc);
2324 error = ENXIO;
2325 goto fail;
2326 }
2327
1678 /*
1679 * Try to allocate memory for jumbo buffers.
1680 * The 5705 does not appear to support jumbo frames.
1681 */
1682 if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
1683 if (bge_alloc_jumbo_mem(sc)) {
1684 printf("bge%d: jumbo buffer allocation "
1685 "failed\n", sc->bge_unit);

--- 5 unchanged lines hidden (view full) ---

1691
1692 /* Set default tuneable values. */
1693 sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
1694 sc->bge_rx_coal_ticks = 150;
1695 sc->bge_tx_coal_ticks = 150;
1696 sc->bge_rx_max_coal_bds = 64;
1697 sc->bge_tx_max_coal_bds = 128;
1698
2328 /*
2329 * Try to allocate memory for jumbo buffers.
2330 * The 5705 does not appear to support jumbo frames.
2331 */
2332 if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
2333 if (bge_alloc_jumbo_mem(sc)) {
2334 printf("bge%d: jumbo buffer allocation "
2335 "failed\n", sc->bge_unit);

--- 5 unchanged lines hidden (view full) ---

2341
2342 /* Set default tuneable values. */
2343 sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
2344 sc->bge_rx_coal_ticks = 150;
2345 sc->bge_tx_coal_ticks = 150;
2346 sc->bge_rx_max_coal_bds = 64;
2347 sc->bge_tx_max_coal_bds = 128;
2348
1699 /* 5705 limits RX return ring to 512 entries. */
1700 if (sc->bge_asicrev == BGE_ASICREV_BCM5705)
1701 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
1702 else
1703 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
1704
1705 /* Set up ifnet structure */
1706 ifp = &sc->arpcom.ac_if;
1707 ifp->if_softc = sc;
1708 ifp->if_unit = sc->bge_unit;
1709 ifp->if_name = "bge";
1710 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1711 ifp->if_ioctl = bge_ioctl;
1712 ifp->if_output = ether_output;

--- 138 unchanged lines hidden (view full) ---

1851
1852 if (sc->bge_irq != NULL)
1853 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bge_irq);
1854
1855 if (sc->bge_res != NULL)
1856 bus_release_resource(dev, SYS_RES_MEMORY,
1857 BGE_PCI_BAR0, sc->bge_res);
1858
2349 /* Set up ifnet structure */
2350 ifp = &sc->arpcom.ac_if;
2351 ifp->if_softc = sc;
2352 ifp->if_unit = sc->bge_unit;
2353 ifp->if_name = "bge";
2354 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2355 ifp->if_ioctl = bge_ioctl;
2356 ifp->if_output = ether_output;

--- 138 unchanged lines hidden (view full) ---

2495
2496 if (sc->bge_irq != NULL)
2497 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bge_irq);
2498
2499 if (sc->bge_res != NULL)
2500 bus_release_resource(dev, SYS_RES_MEMORY,
2501 BGE_PCI_BAR0, sc->bge_res);
2502
1859 if (sc->bge_rdata != NULL)
1860 contigfree(sc->bge_rdata,
1861 sizeof(struct bge_ring_data), M_DEVBUF);
2503 bge_dma_free(sc);
1862
1863 return;
1864}
1865
1866static void
1867bge_reset(sc)
1868 struct bge_softc *sc;
1869{

--- 91 unchanged lines hidden (view full) ---

1961bge_rxeof(sc)
1962 struct bge_softc *sc;
1963{
1964 struct ifnet *ifp;
1965 int stdcnt = 0, jumbocnt = 0;
1966
1967 ifp = &sc->arpcom.ac_if;
1968
2504
2505 return;
2506}
2507
2508static void
2509bge_reset(sc)
2510 struct bge_softc *sc;
2511{

--- 91 unchanged lines hidden (view full) ---

2603bge_rxeof(sc)
2604 struct bge_softc *sc;
2605{
2606 struct ifnet *ifp;
2607 int stdcnt = 0, jumbocnt = 0;
2608
2609 ifp = &sc->arpcom.ac_if;
2610
2611 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
2612 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTWRITE);
2613 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
2614 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTREAD);
2615 if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
2616 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2617 sc->bge_cdata.bge_rx_jumbo_ring_map,
2618 BUS_DMASYNC_POSTREAD);
2619 }
2620
1969 while(sc->bge_rx_saved_considx !=
2621 while(sc->bge_rx_saved_considx !=
1970 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx) {
2622 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx) {
1971 struct bge_rx_bd *cur_rx;
1972 u_int32_t rxidx;
1973 struct ether_header *eh;
1974 struct mbuf *m = NULL;
1975 u_int16_t vlan_tag = 0;
1976 int have_tag = 0;
1977
1978 cur_rx =
2623 struct bge_rx_bd *cur_rx;
2624 u_int32_t rxidx;
2625 struct ether_header *eh;
2626 struct mbuf *m = NULL;
2627 u_int16_t vlan_tag = 0;
2628 int have_tag = 0;
2629
2630 cur_rx =
1979 &sc->bge_rdata->bge_rx_return_ring[sc->bge_rx_saved_considx];
2631 &sc->bge_ldata.bge_rx_return_ring[sc->bge_rx_saved_considx];
1980
1981 rxidx = cur_rx->bge_idx;
1982 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt);
1983
1984 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
1985 have_tag = 1;
1986 vlan_tag = cur_rx->bge_vlan_tag;
1987 }
1988
1989 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
1990 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
2632
2633 rxidx = cur_rx->bge_idx;
2634 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt);
2635
2636 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
2637 have_tag = 1;
2638 vlan_tag = cur_rx->bge_vlan_tag;
2639 }
2640
2641 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
2642 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
2643 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
2644 sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx],
2645 BUS_DMASYNC_POSTREAD);
2646 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
2647 sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx]);
1991 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
1992 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
1993 jumbocnt++;
1994 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
1995 ifp->if_ierrors++;
1996 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
1997 continue;
1998 }
1999 if (bge_newbuf_jumbo(sc,
2000 sc->bge_jumbo, NULL) == ENOBUFS) {
2001 ifp->if_ierrors++;
2002 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2003 continue;
2004 }
2005 } else {
2006 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
2648 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
2649 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
2650 jumbocnt++;
2651 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2652 ifp->if_ierrors++;
2653 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2654 continue;
2655 }
2656 if (bge_newbuf_jumbo(sc,
2657 sc->bge_jumbo, NULL) == ENOBUFS) {
2658 ifp->if_ierrors++;
2659 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2660 continue;
2661 }
2662 } else {
2663 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
2664 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
2665 sc->bge_cdata.bge_rx_std_dmamap[rxidx],
2666 BUS_DMASYNC_POSTREAD);
2667 bus_dmamap_unload(sc->bge_cdata.bge_mtag,
2668 sc->bge_cdata.bge_rx_std_dmamap[rxidx]);
2007 m = sc->bge_cdata.bge_rx_std_chain[rxidx];
2008 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
2009 stdcnt++;
2010 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2011 ifp->if_ierrors++;
2012 bge_newbuf_std(sc, sc->bge_std, m);
2013 continue;
2014 }

--- 39 unchanged lines hidden (view full) ---

2054 * attach that information to the packet.
2055 */
2056 if (have_tag)
2057 VLAN_INPUT_TAG(ifp, m, vlan_tag, continue);
2058
2059 (*ifp->if_input)(ifp, m);
2060 }
2061
2669 m = sc->bge_cdata.bge_rx_std_chain[rxidx];
2670 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
2671 stdcnt++;
2672 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2673 ifp->if_ierrors++;
2674 bge_newbuf_std(sc, sc->bge_std, m);
2675 continue;
2676 }

--- 39 unchanged lines hidden (view full) ---

2716 * attach that information to the packet.
2717 */
2718 if (have_tag)
2719 VLAN_INPUT_TAG(ifp, m, vlan_tag, continue);
2720
2721 (*ifp->if_input)(ifp, m);
2722 }
2723
2724 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
2725 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREWRITE);
2726 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
2727 sc->bge_cdata.bge_rx_std_ring_map,
2728 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_PREWRITE);
2729 if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
2730 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2731 sc->bge_cdata.bge_rx_jumbo_ring_map,
2732 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2733 }
2734
2062 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
2063 if (stdcnt)
2064 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
2065 if (jumbocnt)
2066 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
2067
2068 return;
2069}

--- 7 unchanged lines hidden (view full) ---

2077
2078 ifp = &sc->arpcom.ac_if;
2079
2080 /*
2081 * Go through our tx ring and free mbufs for those
2082 * frames that have been sent.
2083 */
2084 while (sc->bge_tx_saved_considx !=
2735 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
2736 if (stdcnt)
2737 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
2738 if (jumbocnt)
2739 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
2740
2741 return;
2742}

--- 7 unchanged lines hidden (view full) ---

2750
2751 ifp = &sc->arpcom.ac_if;
2752
2753 /*
2754 * Go through our tx ring and free mbufs for those
2755 * frames that have been sent.
2756 */
2757 while (sc->bge_tx_saved_considx !=
2085 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx) {
2758 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) {
2086 u_int32_t idx = 0;
2087
2088 idx = sc->bge_tx_saved_considx;
2759 u_int32_t idx = 0;
2760
2761 idx = sc->bge_tx_saved_considx;
2089 cur_tx = &sc->bge_rdata->bge_tx_ring[idx];
2762 cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
2090 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
2091 ifp->if_opackets++;
2092 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
2093 m_freem(sc->bge_cdata.bge_tx_chain[idx]);
2094 sc->bge_cdata.bge_tx_chain[idx] = NULL;
2763 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
2764 ifp->if_opackets++;
2765 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
2766 m_freem(sc->bge_cdata.bge_tx_chain[idx]);
2767 sc->bge_cdata.bge_tx_chain[idx] = NULL;
2768 bus_dmamap_unload(sc->bge_cdata.bge_mtag,
2769 sc->bge_cdata.bge_tx_dmamap[idx]);
2095 }
2096 sc->bge_txcnt--;
2097 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
2098 ifp->if_timer = 0;
2099 }
2100
2101 if (cur_tx != NULL)
2102 ifp->if_flags &= ~IFF_OACTIVE;

--- 7 unchanged lines hidden (view full) ---

2110{
2111 struct bge_softc *sc;
2112 struct ifnet *ifp;
2113 u_int32_t statusword;
2114 u_int32_t status;
2115
2116 sc = xsc;
2117 ifp = &sc->arpcom.ac_if;
2770 }
2771 sc->bge_txcnt--;
2772 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
2773 ifp->if_timer = 0;
2774 }
2775
2776 if (cur_tx != NULL)
2777 ifp->if_flags &= ~IFF_OACTIVE;

--- 7 unchanged lines hidden (view full) ---

2785{
2786 struct bge_softc *sc;
2787 struct ifnet *ifp;
2788 u_int32_t statusword;
2789 u_int32_t status;
2790
2791 sc = xsc;
2792 ifp = &sc->arpcom.ac_if;
2793
2794 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2795 sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTWRITE);
2796
2118 statusword =
2797 statusword =
2119 atomic_readandclear_32(&sc->bge_rdata->bge_status_block.bge_status);
2798 atomic_readandclear_32(&sc->bge_ldata.bge_status_block->bge_status);
2120
2121#ifdef notdef
2122 /* Avoid this for now -- checking this register is expensive. */
2123 /* Make sure this is really our interrupt. */
2124 if (!(CSR_READ_4(sc, BGE_MISC_LOCAL_CTL) & BGE_MLC_INTR_STATE))
2125 return;
2126#endif
2127 /* Ack interrupt and stop others from occuring. */

--- 58 unchanged lines hidden (view full) ---

2186 if (ifp->if_flags & IFF_RUNNING) {
2187 /* Check RX return ring producer/consumer */
2188 bge_rxeof(sc);
2189
2190 /* Check TX ring producer/consumer */
2191 bge_txeof(sc);
2192 }
2193
2799
2800#ifdef notdef
2801 /* Avoid this for now -- checking this register is expensive. */
2802 /* Make sure this is really our interrupt. */
2803 if (!(CSR_READ_4(sc, BGE_MISC_LOCAL_CTL) & BGE_MLC_INTR_STATE))
2804 return;
2805#endif
2806 /* Ack interrupt and stop others from occuring. */

--- 58 unchanged lines hidden (view full) ---

2865 if (ifp->if_flags & IFF_RUNNING) {
2866 /* Check RX return ring producer/consumer */
2867 bge_rxeof(sc);
2868
2869 /* Check TX ring producer/consumer */
2870 bge_txeof(sc);
2871 }
2872
2873 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2874 sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREWRITE);
2875
2194 bge_handle_events(sc);
2195
2196 /* Re-enable interrupts. */
2197 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
2198
2199 if (ifp->if_flags & IFF_RUNNING && ifp->if_snd.ifq_head != NULL)
2200 bge_start(ifp);
2201

--- 122 unchanged lines hidden (view full) ---

2324 */
2325static int
2326bge_encap(sc, m_head, txidx)
2327 struct bge_softc *sc;
2328 struct mbuf *m_head;
2329 u_int32_t *txidx;
2330{
2331 struct bge_tx_bd *f = NULL;
2876 bge_handle_events(sc);
2877
2878 /* Re-enable interrupts. */
2879 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
2880
2881 if (ifp->if_flags & IFF_RUNNING && ifp->if_snd.ifq_head != NULL)
2882 bge_start(ifp);
2883

--- 122 unchanged lines hidden (view full) ---

3006 */
3007static int
3008bge_encap(sc, m_head, txidx)
3009 struct bge_softc *sc;
3010 struct mbuf *m_head;
3011 u_int32_t *txidx;
3012{
3013 struct bge_tx_bd *f = NULL;
2332 struct mbuf *m;
2333 u_int32_t frag, cur, cnt = 0;
2334 u_int16_t csum_flags = 0;
2335 struct m_tag *mtag;
3014 u_int16_t csum_flags = 0;
3015 struct m_tag *mtag;
3016 struct bge_dmamap_arg ctx;
3017 bus_dmamap_t map;
3018 int error;
2336
3019
2337 m = m_head;
2338 cur = frag = *txidx;
2339
2340 if (m_head->m_pkthdr.csum_flags) {
2341 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
2342 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
2343 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
2344 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
2345 if (m_head->m_flags & M_LASTFRAG)
2346 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
2347 else if (m_head->m_flags & M_FRAG)
2348 csum_flags |= BGE_TXBDFLAG_IP_FRAG;
2349 }
2350
3020
3021 if (m_head->m_pkthdr.csum_flags) {
3022 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
3023 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
3024 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
3025 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
3026 if (m_head->m_flags & M_LASTFRAG)
3027 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
3028 else if (m_head->m_flags & M_FRAG)
3029 csum_flags |= BGE_TXBDFLAG_IP_FRAG;
3030 }
3031
2351 mtag = VLAN_OUTPUT_TAG(&sc->arpcom.ac_if, m);
3032 mtag = VLAN_OUTPUT_TAG(&sc->arpcom.ac_if, m_head);
2352
3033
3034 ctx.sc = sc;
3035 ctx.bge_idx = *txidx;
3036 ctx.bge_ring = sc->bge_ldata.bge_tx_ring;
3037 ctx.bge_flags = csum_flags;
2353 /*
3038 /*
2354 * Start packing the mbufs in this chain into
2355 * the fragment pointers. Stop when we run out
2356 * of fragments or hit the end of the mbuf chain.
3039 * Sanity check: avoid coming within 16 descriptors
3040 * of the end of the ring.
2357 */
3041 */
2358 for (m = m_head; m != NULL; m = m->m_next) {
2359 if (m->m_len != 0) {
2360 f = &sc->bge_rdata->bge_tx_ring[frag];
2361 if (sc->bge_cdata.bge_tx_chain[frag] != NULL)
2362 break;
2363 BGE_HOSTADDR(f->bge_addr,
2364 vtophys(mtod(m, vm_offset_t)));
2365 f->bge_len = m->m_len;
2366 f->bge_flags = csum_flags;
2367 if (mtag != NULL) {
2368 f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
2369 f->bge_vlan_tag = VLAN_TAG_VALUE(mtag);
2370 } else {
2371 f->bge_vlan_tag = 0;
2372 }
2373 /*
2374 * Sanity check: avoid coming within 16 descriptors
2375 * of the end of the ring.
2376 */
2377 if ((BGE_TX_RING_CNT - (sc->bge_txcnt + cnt)) < 16)
2378 return(ENOBUFS);
2379 cur = frag;
2380 BGE_INC(frag, BGE_TX_RING_CNT);
2381 cnt++;
2382 }
2383 }
3042 ctx.bge_maxsegs = (BGE_TX_RING_CNT - sc->bge_txcnt) - 16;
2384
3043
2385 if (m != NULL)
2386 return(ENOBUFS);
3044 map = sc->bge_cdata.bge_tx_dmamap[*txidx];
3045 error = bus_dmamap_load_mbuf(sc->bge_cdata.bge_mtag, map,
3046 m_head, bge_dma_map_tx_desc, &ctx, BUS_DMA_NOWAIT);
2387
3047
2388 if (frag == sc->bge_tx_saved_considx)
2389 return(ENOBUFS);
3048 if (error || ctx.bge_maxsegs == 0 /*||
3049 ctx.bge_idx == sc->bge_tx_saved_considx*/)
3050 return (ENOBUFS);
2390
3051
2391 sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END;
2392 sc->bge_cdata.bge_tx_chain[cur] = m_head;
2393 sc->bge_txcnt += cnt;
3052 /*
3053 * Insure that the map for this transmission
3054 * is placed at the array index of the last descriptor
3055 * in this chain.
3056 */
3057 sc->bge_cdata.bge_tx_dmamap[*txidx] =
3058 sc->bge_cdata.bge_tx_dmamap[ctx.bge_idx];
3059 sc->bge_cdata.bge_tx_dmamap[ctx.bge_idx] = map;
3060 sc->bge_cdata.bge_tx_chain[ctx.bge_idx] = m_head;
3061 sc->bge_txcnt += ctx.bge_maxsegs;
3062 f = &sc->bge_ldata.bge_tx_ring[*txidx];
3063 if (mtag != NULL) {
3064 f->bge_flags |= htole16(BGE_TXBDFLAG_VLAN_TAG);
3065 f->bge_vlan_tag = htole16(VLAN_TAG_VALUE(mtag));
3066 } else {
3067 f->bge_vlan_tag = 0;
3068 }
2394
3069
2395 *txidx = frag;
3070 BGE_INC(ctx.bge_idx, BGE_TX_RING_CNT);
3071 *txidx = ctx.bge_idx;
2396
2397 return(0);
2398}
2399
2400/*
2401 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
2402 * to the mbuf data regions directly in the transmit descriptors.
2403 */

--- 496 unchanged lines hidden ---
3072
3073 return(0);
3074}
3075
3076/*
3077 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3078 * to the mbuf data regions directly in the transmit descriptors.
3079 */

--- 496 unchanged lines hidden ---