384 sc->sc_led_blink = 1; 385 sc->sc_txpwr_calib = 1; 386#ifdef BWI_DEBUG 387 sc->sc_debug = bwi_debug; 388#endif 389 bwi_power_on(sc, 1); 390 391 error = bwi_bbp_attach(sc); 392 if (error) 393 goto fail; 394 395 error = bwi_bbp_power_on(sc, BWI_CLOCK_MODE_FAST); 396 if (error) 397 goto fail; 398 399 if (BWI_REGWIN_EXIST(&sc->sc_com_regwin)) { 400 error = bwi_set_clock_delay(sc); 401 if (error) 402 goto fail; 403 404 error = bwi_set_clock_mode(sc, BWI_CLOCK_MODE_FAST); 405 if (error) 406 goto fail; 407 408 error = bwi_get_pwron_delay(sc); 409 if (error) 410 goto fail; 411 } 412 413 error = bwi_bus_attach(sc); 414 if (error) 415 goto fail; 416 417 bwi_get_card_flags(sc); 418 419 bwi_led_attach(sc); 420 421 for (i = 0; i < sc->sc_nmac; ++i) { 422 struct bwi_regwin *old; 423 424 mac = &sc->sc_mac[i]; 425 error = bwi_regwin_switch(sc, &mac->mac_regwin, &old); 426 if (error) 427 goto fail; 428 429 error = bwi_mac_lateattach(mac); 430 if (error) 431 goto fail; 432 433 error = bwi_regwin_switch(sc, old, NULL); 434 if (error) 435 goto fail; 436 } 437 438 /* 439 * XXX First MAC is known to exist 440 * TODO2 441 */ 442 mac = &sc->sc_mac[0]; 443 phy = &mac->mac_phy; 444 445 bwi_bbp_power_off(sc); 446 447 error = bwi_dma_alloc(sc); 448 if (error) 449 goto fail; 450 451 error = bwi_mac_fw_alloc(mac); 452 if (error) 453 goto fail; 454 455 callout_init_mtx(&sc->sc_watchdog_timer, &sc->sc_mtx, 0); 456 457 /* 458 * Setup ratesets, phytype, channels and get MAC address 459 */ 460 if (phy->phy_mode == IEEE80211_MODE_11B || 461 phy->phy_mode == IEEE80211_MODE_11G) { 462 if (phy->phy_mode == IEEE80211_MODE_11B) { 463 ic->ic_phytype = IEEE80211_T_DS; 464 } else { 465 ic->ic_phytype = IEEE80211_T_OFDM; 466 } 467 468 bwi_get_eaddr(sc, BWI_SPROM_11BG_EADDR, ic->ic_macaddr); 469 if (IEEE80211_IS_MULTICAST(ic->ic_macaddr)) { 470 bwi_get_eaddr(sc, BWI_SPROM_11A_EADDR, ic->ic_macaddr); 471 if (IEEE80211_IS_MULTICAST(ic->ic_macaddr)) { 472 device_printf(dev, 473 "invalid MAC address: %6D\n", 474 ic->ic_macaddr, ":"); 475 } 476 } 477 } else if (phy->phy_mode == IEEE80211_MODE_11A) { 478 /* TODO:11A */ 479 error = ENXIO; 480 goto fail; 481 } else { 482 panic("unknown phymode %d\n", phy->phy_mode); 483 } 484 485 /* Get locale */ 486 sc->sc_locale = __SHIFTOUT(bwi_read_sprom(sc, BWI_SPROM_CARD_INFO), 487 BWI_SPROM_CARD_INFO_LOCALE); 488 DPRINTF(sc, BWI_DBG_ATTACH, "locale: %d\n", sc->sc_locale); 489 /* XXX use locale */ 490 bwi_getradiocaps(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans, 491 ic->ic_channels); 492 493 ic->ic_softc = sc; 494 ic->ic_name = device_get_nameunit(dev); 495 ic->ic_caps = IEEE80211_C_STA | 496 IEEE80211_C_SHSLOT | 497 IEEE80211_C_SHPREAMBLE | 498 IEEE80211_C_WPA | 499 IEEE80211_C_BGSCAN | 500 IEEE80211_C_MONITOR; 501 ic->ic_opmode = IEEE80211_M_STA; 502 ieee80211_ifattach(ic); 503 504 ic->ic_headroom = sizeof(struct bwi_txbuf_hdr); 505 506 /* override default methods */ 507 ic->ic_vap_create = bwi_vap_create; 508 ic->ic_vap_delete = bwi_vap_delete; 509 ic->ic_raw_xmit = bwi_raw_xmit; 510 ic->ic_updateslot = bwi_updateslot; 511 ic->ic_scan_start = bwi_scan_start; 512 ic->ic_scan_end = bwi_scan_end; 513 ic->ic_getradiocaps = bwi_getradiocaps; 514 ic->ic_set_channel = bwi_set_channel; 515 ic->ic_transmit = bwi_transmit; 516 ic->ic_parent = bwi_parent; 517 518 sc->sc_rates = ieee80211_get_ratetable(ic->ic_curchan); 519 520 ieee80211_radiotap_attach(ic, 521 &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th), 522 BWI_TX_RADIOTAP_PRESENT, 523 &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th), 524 BWI_RX_RADIOTAP_PRESENT); 525 526 /* 527 * Add sysctl nodes 528 */ 529 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), 530 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, 531 "fw_version", CTLFLAG_RD, &sc->sc_fw_version, 0, 532 "Firmware version"); 533 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), 534 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, 535 "led_idle", CTLFLAG_RW, &sc->sc_led_idle, 0, 536 "# ticks before LED enters idle state"); 537 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), 538 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, 539 "led_blink", CTLFLAG_RW, &sc->sc_led_blink, 0, 540 "Allow LED to blink"); 541 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), 542 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, 543 "txpwr_calib", CTLFLAG_RW, &sc->sc_txpwr_calib, 0, 544 "Enable software TX power calibration"); 545#ifdef BWI_DEBUG 546 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 547 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, 548 "debug", CTLFLAG_RW, &sc->sc_debug, 0, "Debug flags"); 549#endif 550 if (bootverbose) 551 ieee80211_announce(ic); 552 553 return (0); 554fail: 555 BWI_LOCK_DESTROY(sc); 556 return (error); 557} 558 559int 560bwi_detach(struct bwi_softc *sc) 561{ 562 struct ieee80211com *ic = &sc->sc_ic; 563 int i; 564 565 bwi_stop(sc, 1); 566 callout_drain(&sc->sc_led_blink_ch); 567 callout_drain(&sc->sc_calib_ch); 568 callout_drain(&sc->sc_watchdog_timer); 569 ieee80211_ifdetach(ic); 570 571 for (i = 0; i < sc->sc_nmac; ++i) 572 bwi_mac_detach(&sc->sc_mac[i]); 573 bwi_dma_free(sc); 574 taskqueue_free(sc->sc_tq); 575 mbufq_drain(&sc->sc_snd); 576 577 BWI_LOCK_DESTROY(sc); 578 579 return (0); 580} 581 582static struct ieee80211vap * 583bwi_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, 584 enum ieee80211_opmode opmode, int flags, 585 const uint8_t bssid[IEEE80211_ADDR_LEN], 586 const uint8_t mac[IEEE80211_ADDR_LEN]) 587{ 588 struct bwi_vap *bvp; 589 struct ieee80211vap *vap; 590 591 if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ 592 return NULL; 593 bvp = malloc(sizeof(struct bwi_vap), M_80211_VAP, M_WAITOK | M_ZERO); 594 vap = &bvp->bv_vap; 595 /* enable s/w bmiss handling for sta mode */ 596 ieee80211_vap_setup(ic, vap, name, unit, opmode, 597 flags | IEEE80211_CLONE_NOBEACONS, bssid); 598 599 /* override default methods */ 600 bvp->bv_newstate = vap->iv_newstate; 601 vap->iv_newstate = bwi_newstate; 602#if 0 603 vap->iv_update_beacon = bwi_beacon_update; 604#endif 605 ieee80211_ratectl_init(vap); 606 607 /* complete setup */ 608 ieee80211_vap_attach(vap, bwi_media_change, ieee80211_media_status, 609 mac); 610 ic->ic_opmode = opmode; 611 return vap; 612} 613 614static void 615bwi_vap_delete(struct ieee80211vap *vap) 616{ 617 struct bwi_vap *bvp = BWI_VAP(vap); 618 619 ieee80211_ratectl_deinit(vap); 620 ieee80211_vap_detach(vap); 621 free(bvp, M_80211_VAP); 622} 623 624void 625bwi_suspend(struct bwi_softc *sc) 626{ 627 bwi_stop(sc, 1); 628} 629 630void 631bwi_resume(struct bwi_softc *sc) 632{ 633 634 if (sc->sc_ic.ic_nrunning > 0) 635 bwi_init(sc); 636} 637 638int 639bwi_shutdown(struct bwi_softc *sc) 640{ 641 bwi_stop(sc, 1); 642 return 0; 643} 644 645static void 646bwi_power_on(struct bwi_softc *sc, int with_pll) 647{ 648 uint32_t gpio_in, gpio_out, gpio_en; 649 uint16_t status; 650 651 gpio_in = pci_read_config(sc->sc_dev, BWI_PCIR_GPIO_IN, 4); 652 if (gpio_in & BWI_PCIM_GPIO_PWR_ON) 653 goto back; 654 655 gpio_out = pci_read_config(sc->sc_dev, BWI_PCIR_GPIO_OUT, 4); 656 gpio_en = pci_read_config(sc->sc_dev, BWI_PCIR_GPIO_ENABLE, 4); 657 658 gpio_out |= BWI_PCIM_GPIO_PWR_ON; 659 gpio_en |= BWI_PCIM_GPIO_PWR_ON; 660 if (with_pll) { 661 /* Turn off PLL first */ 662 gpio_out |= BWI_PCIM_GPIO_PLL_PWR_OFF; 663 gpio_en |= BWI_PCIM_GPIO_PLL_PWR_OFF; 664 } 665 666 pci_write_config(sc->sc_dev, BWI_PCIR_GPIO_OUT, gpio_out, 4); 667 pci_write_config(sc->sc_dev, BWI_PCIR_GPIO_ENABLE, gpio_en, 4); 668 DELAY(1000); 669 670 if (with_pll) { 671 /* Turn on PLL */ 672 gpio_out &= ~BWI_PCIM_GPIO_PLL_PWR_OFF; 673 pci_write_config(sc->sc_dev, BWI_PCIR_GPIO_OUT, gpio_out, 4); 674 DELAY(5000); 675 } 676 677back: 678 /* Clear "Signaled Target Abort" */ 679 status = pci_read_config(sc->sc_dev, PCIR_STATUS, 2); 680 status &= ~PCIM_STATUS_STABORT; 681 pci_write_config(sc->sc_dev, PCIR_STATUS, status, 2); 682} 683 684static int 685bwi_power_off(struct bwi_softc *sc, int with_pll) 686{ 687 uint32_t gpio_out, gpio_en; 688 689 pci_read_config(sc->sc_dev, BWI_PCIR_GPIO_IN, 4); /* dummy read */ 690 gpio_out = pci_read_config(sc->sc_dev, BWI_PCIR_GPIO_OUT, 4); 691 gpio_en = pci_read_config(sc->sc_dev, BWI_PCIR_GPIO_ENABLE, 4); 692 693 gpio_out &= ~BWI_PCIM_GPIO_PWR_ON; 694 gpio_en |= BWI_PCIM_GPIO_PWR_ON; 695 if (with_pll) { 696 gpio_out |= BWI_PCIM_GPIO_PLL_PWR_OFF; 697 gpio_en |= BWI_PCIM_GPIO_PLL_PWR_OFF; 698 } 699 700 pci_write_config(sc->sc_dev, BWI_PCIR_GPIO_OUT, gpio_out, 4); 701 pci_write_config(sc->sc_dev, BWI_PCIR_GPIO_ENABLE, gpio_en, 4); 702 return 0; 703} 704 705int 706bwi_regwin_switch(struct bwi_softc *sc, struct bwi_regwin *rw, 707 struct bwi_regwin **old_rw) 708{ 709 int error; 710 711 if (old_rw != NULL) 712 *old_rw = NULL; 713 714 if (!BWI_REGWIN_EXIST(rw)) 715 return EINVAL; 716 717 if (sc->sc_cur_regwin != rw) { 718 error = bwi_regwin_select(sc, rw->rw_id); 719 if (error) { 720 device_printf(sc->sc_dev, "can't select regwin %d\n", 721 rw->rw_id); 722 return error; 723 } 724 } 725 726 if (old_rw != NULL) 727 *old_rw = sc->sc_cur_regwin; 728 sc->sc_cur_regwin = rw; 729 return 0; 730} 731 732static int 733bwi_regwin_select(struct bwi_softc *sc, int id) 734{ 735 uint32_t win = BWI_PCIM_REGWIN(id); 736 int i; 737 738#define RETRY_MAX 50 739 for (i = 0; i < RETRY_MAX; ++i) { 740 pci_write_config(sc->sc_dev, BWI_PCIR_SEL_REGWIN, win, 4); 741 if (pci_read_config(sc->sc_dev, BWI_PCIR_SEL_REGWIN, 4) == win) 742 return 0; 743 DELAY(10); 744 } 745#undef RETRY_MAX 746 747 return ENXIO; 748} 749 750static void 751bwi_regwin_info(struct bwi_softc *sc, uint16_t *type, uint8_t *rev) 752{ 753 uint32_t val; 754 755 val = CSR_READ_4(sc, BWI_ID_HI); 756 *type = BWI_ID_HI_REGWIN_TYPE(val); 757 *rev = BWI_ID_HI_REGWIN_REV(val); 758 759 DPRINTF(sc, BWI_DBG_ATTACH, "regwin: type 0x%03x, rev %d, " 760 "vendor 0x%04x\n", *type, *rev, 761 __SHIFTOUT(val, BWI_ID_HI_REGWIN_VENDOR_MASK)); 762} 763 764static int 765bwi_bbp_attach(struct bwi_softc *sc) 766{ 767 uint16_t bbp_id, rw_type; 768 uint8_t rw_rev; 769 uint32_t info; 770 int error, nregwin, i; 771 772 /* 773 * Get 0th regwin information 774 * NOTE: 0th regwin should exist 775 */ 776 error = bwi_regwin_select(sc, 0); 777 if (error) { 778 device_printf(sc->sc_dev, "can't select regwin 0\n"); 779 return error; 780 } 781 bwi_regwin_info(sc, &rw_type, &rw_rev); 782 783 /* 784 * Find out BBP id 785 */ 786 bbp_id = 0; 787 info = 0; 788 if (rw_type == BWI_REGWIN_T_COM) { 789 info = CSR_READ_4(sc, BWI_INFO); 790 bbp_id = __SHIFTOUT(info, BWI_INFO_BBPID_MASK); 791 792 BWI_CREATE_REGWIN(&sc->sc_com_regwin, 0, rw_type, rw_rev); 793 794 sc->sc_cap = CSR_READ_4(sc, BWI_CAPABILITY); 795 } else { 796 for (i = 0; i < nitems(bwi_bbpid_map); ++i) { 797 if (sc->sc_pci_did >= bwi_bbpid_map[i].did_min && 798 sc->sc_pci_did <= bwi_bbpid_map[i].did_max) { 799 bbp_id = bwi_bbpid_map[i].bbp_id; 800 break; 801 } 802 } 803 if (bbp_id == 0) { 804 device_printf(sc->sc_dev, "no BBP id for device id " 805 "0x%04x\n", sc->sc_pci_did); 806 return ENXIO; 807 } 808 809 info = __SHIFTIN(sc->sc_pci_revid, BWI_INFO_BBPREV_MASK) | 810 __SHIFTIN(0, BWI_INFO_BBPPKG_MASK); 811 } 812 813 /* 814 * Find out number of regwins 815 */ 816 nregwin = 0; 817 if (rw_type == BWI_REGWIN_T_COM && rw_rev >= 4) { 818 nregwin = __SHIFTOUT(info, BWI_INFO_NREGWIN_MASK); 819 } else { 820 for (i = 0; i < nitems(bwi_regwin_count); ++i) { 821 if (bwi_regwin_count[i].bbp_id == bbp_id) { 822 nregwin = bwi_regwin_count[i].nregwin; 823 break; 824 } 825 } 826 if (nregwin == 0) { 827 device_printf(sc->sc_dev, "no number of win for " 828 "BBP id 0x%04x\n", bbp_id); 829 return ENXIO; 830 } 831 } 832 833 /* Record BBP id/rev for later using */ 834 sc->sc_bbp_id = bbp_id; 835 sc->sc_bbp_rev = __SHIFTOUT(info, BWI_INFO_BBPREV_MASK); 836 sc->sc_bbp_pkg = __SHIFTOUT(info, BWI_INFO_BBPPKG_MASK); 837 device_printf(sc->sc_dev, "BBP: id 0x%04x, rev 0x%x, pkg %d\n", 838 sc->sc_bbp_id, sc->sc_bbp_rev, sc->sc_bbp_pkg); 839 840 DPRINTF(sc, BWI_DBG_ATTACH, "nregwin %d, cap 0x%08x\n", 841 nregwin, sc->sc_cap); 842 843 /* 844 * Create rest of the regwins 845 */ 846 847 /* Don't re-create common regwin, if it is already created */ 848 i = BWI_REGWIN_EXIST(&sc->sc_com_regwin) ? 1 : 0; 849 850 for (; i < nregwin; ++i) { 851 /* 852 * Get regwin information 853 */ 854 error = bwi_regwin_select(sc, i); 855 if (error) { 856 device_printf(sc->sc_dev, 857 "can't select regwin %d\n", i); 858 return error; 859 } 860 bwi_regwin_info(sc, &rw_type, &rw_rev); 861 862 /* 863 * Try attach: 864 * 1) Bus (PCI/PCIE) regwin 865 * 2) MAC regwin 866 * Ignore rest types of regwin 867 */ 868 if (rw_type == BWI_REGWIN_T_BUSPCI || 869 rw_type == BWI_REGWIN_T_BUSPCIE) { 870 if (BWI_REGWIN_EXIST(&sc->sc_bus_regwin)) { 871 device_printf(sc->sc_dev, 872 "bus regwin already exists\n"); 873 } else { 874 BWI_CREATE_REGWIN(&sc->sc_bus_regwin, i, 875 rw_type, rw_rev); 876 } 877 } else if (rw_type == BWI_REGWIN_T_MAC) { 878 /* XXX ignore return value */ 879 bwi_mac_attach(sc, i, rw_rev); 880 } 881 } 882 883 /* At least one MAC shold exist */ 884 if (!BWI_REGWIN_EXIST(&sc->sc_mac[0].mac_regwin)) { 885 device_printf(sc->sc_dev, "no MAC was found\n"); 886 return ENXIO; 887 } 888 KASSERT(sc->sc_nmac > 0, ("no mac's")); 889 890 /* Bus regwin must exist */ 891 if (!BWI_REGWIN_EXIST(&sc->sc_bus_regwin)) { 892 device_printf(sc->sc_dev, "no bus regwin was found\n"); 893 return ENXIO; 894 } 895 896 /* Start with first MAC */ 897 error = bwi_regwin_switch(sc, &sc->sc_mac[0].mac_regwin, NULL); 898 if (error) 899 return error; 900 901 return 0; 902} 903 904int 905bwi_bus_init(struct bwi_softc *sc, struct bwi_mac *mac) 906{ 907 struct bwi_regwin *old, *bus; 908 uint32_t val; 909 int error; 910 911 bus = &sc->sc_bus_regwin; 912 KASSERT(sc->sc_cur_regwin == &mac->mac_regwin, ("not cur regwin")); 913 914 /* 915 * Tell bus to generate requested interrupts 916 */ 917 if (bus->rw_rev < 6 && bus->rw_type == BWI_REGWIN_T_BUSPCI) { 918 /* 919 * NOTE: Read BWI_FLAGS from MAC regwin 920 */ 921 val = CSR_READ_4(sc, BWI_FLAGS); 922 923 error = bwi_regwin_switch(sc, bus, &old); 924 if (error) 925 return error; 926 927 CSR_SETBITS_4(sc, BWI_INTRVEC, (val & BWI_FLAGS_INTR_MASK)); 928 } else { 929 uint32_t mac_mask; 930 931 mac_mask = 1 << mac->mac_id; 932 933 error = bwi_regwin_switch(sc, bus, &old); 934 if (error) 935 return error; 936 937 val = pci_read_config(sc->sc_dev, BWI_PCIR_INTCTL, 4); 938 val |= mac_mask << 8; 939 pci_write_config(sc->sc_dev, BWI_PCIR_INTCTL, val, 4); 940 } 941 942 if (sc->sc_flags & BWI_F_BUS_INITED) 943 goto back; 944 945 if (bus->rw_type == BWI_REGWIN_T_BUSPCI) { 946 /* 947 * Enable prefetch and burst 948 */ 949 CSR_SETBITS_4(sc, BWI_BUS_CONFIG, 950 BWI_BUS_CONFIG_PREFETCH | BWI_BUS_CONFIG_BURST); 951 952 if (bus->rw_rev < 5) { 953 struct bwi_regwin *com = &sc->sc_com_regwin; 954 955 /* 956 * Configure timeouts for bus operation 957 */ 958 959 /* 960 * Set service timeout and request timeout 961 */ 962 CSR_SETBITS_4(sc, BWI_CONF_LO, 963 __SHIFTIN(BWI_CONF_LO_SERVTO, BWI_CONF_LO_SERVTO_MASK) | 964 __SHIFTIN(BWI_CONF_LO_REQTO, BWI_CONF_LO_REQTO_MASK)); 965 966 /* 967 * If there is common regwin, we switch to that regwin 968 * and switch back to bus regwin once we have done. 969 */ 970 if (BWI_REGWIN_EXIST(com)) { 971 error = bwi_regwin_switch(sc, com, NULL); 972 if (error) 973 return error; 974 } 975 976 /* Let bus know what we have changed */ 977 CSR_WRITE_4(sc, BWI_BUS_ADDR, BWI_BUS_ADDR_MAGIC); 978 CSR_READ_4(sc, BWI_BUS_ADDR); /* Flush */ 979 CSR_WRITE_4(sc, BWI_BUS_DATA, 0); 980 CSR_READ_4(sc, BWI_BUS_DATA); /* Flush */ 981 982 if (BWI_REGWIN_EXIST(com)) { 983 error = bwi_regwin_switch(sc, bus, NULL); 984 if (error) 985 return error; 986 } 987 } else if (bus->rw_rev >= 11) { 988 /* 989 * Enable memory read multiple 990 */ 991 CSR_SETBITS_4(sc, BWI_BUS_CONFIG, BWI_BUS_CONFIG_MRM); 992 } 993 } else { 994 /* TODO:PCIE */ 995 } 996 997 sc->sc_flags |= BWI_F_BUS_INITED; 998back: 999 return bwi_regwin_switch(sc, old, NULL); 1000} 1001 1002static void 1003bwi_get_card_flags(struct bwi_softc *sc) 1004{ 1005#define PCI_VENDOR_APPLE 0x106b 1006#define PCI_VENDOR_DELL 0x1028 1007 sc->sc_card_flags = bwi_read_sprom(sc, BWI_SPROM_CARD_FLAGS); 1008 if (sc->sc_card_flags == 0xffff) 1009 sc->sc_card_flags = 0; 1010 1011 if (sc->sc_pci_subvid == PCI_VENDOR_DELL && 1012 sc->sc_bbp_id == BWI_BBPID_BCM4301 && 1013 sc->sc_pci_revid == 0x74) 1014 sc->sc_card_flags |= BWI_CARD_F_BT_COEXIST; 1015 1016 if (sc->sc_pci_subvid == PCI_VENDOR_APPLE && 1017 sc->sc_pci_subdid == 0x4e && /* XXX */ 1018 sc->sc_pci_revid > 0x40) 1019 sc->sc_card_flags |= BWI_CARD_F_PA_GPIO9; 1020 1021 DPRINTF(sc, BWI_DBG_ATTACH, "card flags 0x%04x\n", sc->sc_card_flags); 1022#undef PCI_VENDOR_DELL 1023#undef PCI_VENDOR_APPLE 1024} 1025 1026static void 1027bwi_get_eaddr(struct bwi_softc *sc, uint16_t eaddr_ofs, uint8_t *eaddr) 1028{ 1029 int i; 1030 1031 for (i = 0; i < 3; ++i) { 1032 *((uint16_t *)eaddr + i) = 1033 htobe16(bwi_read_sprom(sc, eaddr_ofs + 2 * i)); 1034 } 1035} 1036 1037static void 1038bwi_get_clock_freq(struct bwi_softc *sc, struct bwi_clock_freq *freq) 1039{ 1040 struct bwi_regwin *com; 1041 uint32_t val; 1042 u_int div; 1043 int src; 1044 1045 bzero(freq, sizeof(*freq)); 1046 com = &sc->sc_com_regwin; 1047 1048 KASSERT(BWI_REGWIN_EXIST(com), ("regwin does not exist")); 1049 KASSERT(sc->sc_cur_regwin == com, ("wrong regwin")); 1050 KASSERT(sc->sc_cap & BWI_CAP_CLKMODE, ("wrong clock mode")); 1051 1052 /* 1053 * Calculate clock frequency 1054 */ 1055 src = -1; 1056 div = 0; 1057 if (com->rw_rev < 6) { 1058 val = pci_read_config(sc->sc_dev, BWI_PCIR_GPIO_OUT, 4); 1059 if (val & BWI_PCIM_GPIO_OUT_CLKSRC) { 1060 src = BWI_CLKSRC_PCI; 1061 div = 64; 1062 } else { 1063 src = BWI_CLKSRC_CS_OSC; 1064 div = 32; 1065 } 1066 } else if (com->rw_rev < 10) { 1067 val = CSR_READ_4(sc, BWI_CLOCK_CTRL); 1068 1069 src = __SHIFTOUT(val, BWI_CLOCK_CTRL_CLKSRC); 1070 if (src == BWI_CLKSRC_LP_OSC) { 1071 div = 1; 1072 } else { 1073 div = (__SHIFTOUT(val, BWI_CLOCK_CTRL_FDIV) + 1) << 2; 1074 1075 /* Unknown source */ 1076 if (src >= BWI_CLKSRC_MAX) 1077 src = BWI_CLKSRC_CS_OSC; 1078 } 1079 } else { 1080 val = CSR_READ_4(sc, BWI_CLOCK_INFO); 1081 1082 src = BWI_CLKSRC_CS_OSC; 1083 div = (__SHIFTOUT(val, BWI_CLOCK_INFO_FDIV) + 1) << 2; 1084 } 1085 1086 KASSERT(src >= 0 && src < BWI_CLKSRC_MAX, ("bad src %d", src)); 1087 KASSERT(div != 0, ("div zero")); 1088 1089 DPRINTF(sc, BWI_DBG_ATTACH, "clksrc %s\n", 1090 src == BWI_CLKSRC_PCI ? "PCI" : 1091 (src == BWI_CLKSRC_LP_OSC ? "LP_OSC" : "CS_OSC")); 1092 1093 freq->clkfreq_min = bwi_clkfreq[src].freq_min / div; 1094 freq->clkfreq_max = bwi_clkfreq[src].freq_max / div; 1095 1096 DPRINTF(sc, BWI_DBG_ATTACH, "clkfreq min %u, max %u\n", 1097 freq->clkfreq_min, freq->clkfreq_max); 1098} 1099 1100static int 1101bwi_set_clock_mode(struct bwi_softc *sc, enum bwi_clock_mode clk_mode) 1102{ 1103 struct bwi_regwin *old, *com; 1104 uint32_t clk_ctrl, clk_src; 1105 int error, pwr_off = 0; 1106 1107 com = &sc->sc_com_regwin; 1108 if (!BWI_REGWIN_EXIST(com)) 1109 return 0; 1110 1111 if (com->rw_rev >= 10 || com->rw_rev < 6) 1112 return 0; 1113 1114 /* 1115 * For common regwin whose rev is [6, 10), the chip 1116 * must be capable to change clock mode. 1117 */ 1118 if ((sc->sc_cap & BWI_CAP_CLKMODE) == 0) 1119 return 0; 1120 1121 error = bwi_regwin_switch(sc, com, &old); 1122 if (error) 1123 return error; 1124 1125 if (clk_mode == BWI_CLOCK_MODE_FAST) 1126 bwi_power_on(sc, 0); /* Don't turn on PLL */ 1127 1128 clk_ctrl = CSR_READ_4(sc, BWI_CLOCK_CTRL); 1129 clk_src = __SHIFTOUT(clk_ctrl, BWI_CLOCK_CTRL_CLKSRC); 1130 1131 switch (clk_mode) { 1132 case BWI_CLOCK_MODE_FAST: 1133 clk_ctrl &= ~BWI_CLOCK_CTRL_SLOW; 1134 clk_ctrl |= BWI_CLOCK_CTRL_IGNPLL; 1135 break; 1136 case BWI_CLOCK_MODE_SLOW: 1137 clk_ctrl |= BWI_CLOCK_CTRL_SLOW; 1138 break; 1139 case BWI_CLOCK_MODE_DYN: 1140 clk_ctrl &= ~(BWI_CLOCK_CTRL_SLOW | 1141 BWI_CLOCK_CTRL_IGNPLL | 1142 BWI_CLOCK_CTRL_NODYN); 1143 if (clk_src != BWI_CLKSRC_CS_OSC) { 1144 clk_ctrl |= BWI_CLOCK_CTRL_NODYN; 1145 pwr_off = 1; 1146 } 1147 break; 1148 } 1149 CSR_WRITE_4(sc, BWI_CLOCK_CTRL, clk_ctrl); 1150 1151 if (pwr_off) 1152 bwi_power_off(sc, 0); /* Leave PLL as it is */ 1153 1154 return bwi_regwin_switch(sc, old, NULL); 1155} 1156 1157static int 1158bwi_set_clock_delay(struct bwi_softc *sc) 1159{ 1160 struct bwi_regwin *old, *com; 1161 int error; 1162 1163 com = &sc->sc_com_regwin; 1164 if (!BWI_REGWIN_EXIST(com)) 1165 return 0; 1166 1167 error = bwi_regwin_switch(sc, com, &old); 1168 if (error) 1169 return error; 1170 1171 if (sc->sc_bbp_id == BWI_BBPID_BCM4321) { 1172 if (sc->sc_bbp_rev == 0) 1173 CSR_WRITE_4(sc, BWI_CONTROL, BWI_CONTROL_MAGIC0); 1174 else if (sc->sc_bbp_rev == 1) 1175 CSR_WRITE_4(sc, BWI_CONTROL, BWI_CONTROL_MAGIC1); 1176 } 1177 1178 if (sc->sc_cap & BWI_CAP_CLKMODE) { 1179 if (com->rw_rev >= 10) { 1180 CSR_FILT_SETBITS_4(sc, BWI_CLOCK_INFO, 0xffff, 0x40000); 1181 } else { 1182 struct bwi_clock_freq freq; 1183 1184 bwi_get_clock_freq(sc, &freq); 1185 CSR_WRITE_4(sc, BWI_PLL_ON_DELAY, 1186 howmany(freq.clkfreq_max * 150, 1000000)); 1187 CSR_WRITE_4(sc, BWI_FREQ_SEL_DELAY, 1188 howmany(freq.clkfreq_max * 15, 1000000)); 1189 } 1190 } 1191 1192 return bwi_regwin_switch(sc, old, NULL); 1193} 1194 1195static void 1196bwi_init(struct bwi_softc *sc) 1197{ 1198 struct ieee80211com *ic = &sc->sc_ic; 1199 1200 BWI_LOCK(sc); 1201 bwi_init_statechg(sc, 1); 1202 BWI_UNLOCK(sc); 1203 1204 if (sc->sc_flags & BWI_F_RUNNING) 1205 ieee80211_start_all(ic); /* start all vap's */ 1206} 1207 1208static void 1209bwi_init_statechg(struct bwi_softc *sc, int statechg) 1210{ 1211 struct bwi_mac *mac; 1212 int error; 1213 1214 BWI_ASSERT_LOCKED(sc); 1215 1216 bwi_stop_locked(sc, statechg); 1217 1218 bwi_bbp_power_on(sc, BWI_CLOCK_MODE_FAST); 1219 1220 /* TODO: 2 MAC */ 1221 1222 mac = &sc->sc_mac[0]; 1223 error = bwi_regwin_switch(sc, &mac->mac_regwin, NULL); 1224 if (error) { 1225 device_printf(sc->sc_dev, "%s: error %d on regwin switch\n", 1226 __func__, error); 1227 goto bad; 1228 } 1229 error = bwi_mac_init(mac); 1230 if (error) { 1231 device_printf(sc->sc_dev, "%s: error %d on MAC init\n", 1232 __func__, error); 1233 goto bad; 1234 } 1235 1236 bwi_bbp_power_on(sc, BWI_CLOCK_MODE_DYN); 1237 1238 bwi_set_bssid(sc, bwi_zero_addr); /* Clear BSSID */ 1239 bwi_set_addr_filter(sc, BWI_ADDR_FILTER_MYADDR, sc->sc_ic.ic_macaddr); 1240 1241 bwi_mac_reset_hwkeys(mac); 1242 1243 if ((mac->mac_flags & BWI_MAC_F_HAS_TXSTATS) == 0) { 1244 int i; 1245 1246#define NRETRY 1000 1247 /* 1248 * Drain any possible pending TX status 1249 */ 1250 for (i = 0; i < NRETRY; ++i) { 1251 if ((CSR_READ_4(sc, BWI_TXSTATUS0) & 1252 BWI_TXSTATUS0_VALID) == 0) 1253 break; 1254 CSR_READ_4(sc, BWI_TXSTATUS1); 1255 } 1256 if (i == NRETRY) 1257 device_printf(sc->sc_dev, 1258 "%s: can't drain TX status\n", __func__); 1259#undef NRETRY 1260 } 1261 1262 if (mac->mac_phy.phy_mode == IEEE80211_MODE_11G) 1263 bwi_mac_updateslot(mac, 1); 1264 1265 /* Start MAC */ 1266 error = bwi_mac_start(mac); 1267 if (error) { 1268 device_printf(sc->sc_dev, "%s: error %d starting MAC\n", 1269 __func__, error); 1270 goto bad; 1271 } 1272 1273 /* Clear stop flag before enabling interrupt */ 1274 sc->sc_flags &= ~BWI_F_STOP; 1275 sc->sc_flags |= BWI_F_RUNNING; 1276 callout_reset(&sc->sc_watchdog_timer, hz, bwi_watchdog, sc); 1277 1278 /* Enable intrs */ 1279 bwi_enable_intrs(sc, BWI_INIT_INTRS); 1280 return; 1281bad: 1282 bwi_stop_locked(sc, 1); 1283} 1284 1285static void 1286bwi_parent(struct ieee80211com *ic) 1287{ 1288 struct bwi_softc *sc = ic->ic_softc; 1289 int startall = 0; 1290 1291 BWI_LOCK(sc); 1292 if (ic->ic_nrunning > 0) { 1293 struct bwi_mac *mac; 1294 int promisc = -1; 1295 1296 KASSERT(sc->sc_cur_regwin->rw_type == BWI_REGWIN_T_MAC, 1297 ("current regwin type %d", 1298 sc->sc_cur_regwin->rw_type)); 1299 mac = (struct bwi_mac *)sc->sc_cur_regwin; 1300 1301 if (ic->ic_promisc > 0 && (sc->sc_flags & BWI_F_PROMISC) == 0) { 1302 promisc = 1; 1303 sc->sc_flags |= BWI_F_PROMISC; 1304 } else if (ic->ic_promisc == 0 && 1305 (sc->sc_flags & BWI_F_PROMISC) != 0) { 1306 promisc = 0; 1307 sc->sc_flags &= ~BWI_F_PROMISC; 1308 } 1309 1310 if (promisc >= 0) 1311 bwi_mac_set_promisc(mac, promisc); 1312 } 1313 if (ic->ic_nrunning > 0) { 1314 if ((sc->sc_flags & BWI_F_RUNNING) == 0) { 1315 bwi_init_statechg(sc, 1); 1316 startall = 1; 1317 } 1318 } else if (sc->sc_flags & BWI_F_RUNNING) 1319 bwi_stop_locked(sc, 1); 1320 BWI_UNLOCK(sc); 1321 if (startall) 1322 ieee80211_start_all(ic); 1323} 1324 1325static int 1326bwi_transmit(struct ieee80211com *ic, struct mbuf *m) 1327{ 1328 struct bwi_softc *sc = ic->ic_softc; 1329 int error; 1330 1331 BWI_LOCK(sc); 1332 if ((sc->sc_flags & BWI_F_RUNNING) == 0) { 1333 BWI_UNLOCK(sc); 1334 return (ENXIO); 1335 } 1336 error = mbufq_enqueue(&sc->sc_snd, m); 1337 if (error) { 1338 BWI_UNLOCK(sc); 1339 return (error); 1340 } 1341 bwi_start_locked(sc); 1342 BWI_UNLOCK(sc); 1343 return (0); 1344} 1345 1346static void 1347bwi_start_locked(struct bwi_softc *sc) 1348{ 1349 struct bwi_txbuf_data *tbd = &sc->sc_tx_bdata[BWI_TX_DATA_RING]; 1350 struct ieee80211_frame *wh; 1351 struct ieee80211_node *ni; 1352 struct mbuf *m; 1353 int trans, idx; 1354 1355 BWI_ASSERT_LOCKED(sc); 1356 1357 trans = 0; 1358 idx = tbd->tbd_idx; 1359 1360 while (tbd->tbd_buf[idx].tb_mbuf == NULL && 1361 tbd->tbd_used + BWI_TX_NSPRDESC < BWI_TX_NDESC && 1362 (m = mbufq_dequeue(&sc->sc_snd)) != NULL) { 1363 ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; 1364 wh = mtod(m, struct ieee80211_frame *); 1365 if ((wh->i_fc[1] & IEEE80211_FC1_PROTECTED) != 0 && 1366 ieee80211_crypto_encap(ni, m) == NULL) { 1367 if_inc_counter(ni->ni_vap->iv_ifp, 1368 IFCOUNTER_OERRORS, 1); 1369 ieee80211_free_node(ni); 1370 m_freem(m); 1371 continue; 1372 } 1373 if (bwi_encap(sc, idx, m, ni) != 0) { 1374 /* 'm' is freed in bwi_encap() if we reach here */ 1375 if (ni != NULL) { 1376 if_inc_counter(ni->ni_vap->iv_ifp, 1377 IFCOUNTER_OERRORS, 1); 1378 ieee80211_free_node(ni); 1379 } else 1380 counter_u64_add(sc->sc_ic.ic_oerrors, 1); 1381 continue; 1382 } 1383 trans = 1; 1384 tbd->tbd_used++; 1385 idx = (idx + 1) % BWI_TX_NDESC; 1386 } 1387 1388 tbd->tbd_idx = idx; 1389 if (trans) 1390 sc->sc_tx_timer = 5; 1391} 1392 1393static int 1394bwi_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, 1395 const struct ieee80211_bpf_params *params) 1396{ 1397 struct ieee80211com *ic = ni->ni_ic; 1398 struct bwi_softc *sc = ic->ic_softc; 1399 /* XXX wme? */ 1400 struct bwi_txbuf_data *tbd = &sc->sc_tx_bdata[BWI_TX_DATA_RING]; 1401 int idx, error; 1402 1403 if ((sc->sc_flags & BWI_F_RUNNING) == 0) { 1404 m_freem(m); 1405 return ENETDOWN; 1406 } 1407 1408 BWI_LOCK(sc); 1409 idx = tbd->tbd_idx; 1410 KASSERT(tbd->tbd_buf[idx].tb_mbuf == NULL, ("slot %d not empty", idx)); 1411 if (params == NULL) { 1412 /* 1413 * Legacy path; interpret frame contents to decide 1414 * precisely how to send the frame. 1415 */ 1416 error = bwi_encap(sc, idx, m, ni); 1417 } else { 1418 /* 1419 * Caller supplied explicit parameters to use in 1420 * sending the frame. 1421 */ 1422 error = bwi_encap_raw(sc, idx, m, ni, params); 1423 } 1424 if (error == 0) { 1425 tbd->tbd_used++; 1426 tbd->tbd_idx = (idx + 1) % BWI_TX_NDESC; 1427 sc->sc_tx_timer = 5; 1428 } 1429 BWI_UNLOCK(sc); 1430 return error; 1431} 1432 1433static void 1434bwi_watchdog(void *arg) 1435{ 1436 struct bwi_softc *sc; 1437 1438 sc = arg; 1439 BWI_ASSERT_LOCKED(sc); 1440 if (sc->sc_tx_timer != 0 && --sc->sc_tx_timer == 0) { 1441 device_printf(sc->sc_dev, "watchdog timeout\n"); 1442 counter_u64_add(sc->sc_ic.ic_oerrors, 1); 1443 taskqueue_enqueue(sc->sc_tq, &sc->sc_restart_task); 1444 } 1445 callout_reset(&sc->sc_watchdog_timer, hz, bwi_watchdog, sc); 1446} 1447 1448static void 1449bwi_stop(struct bwi_softc *sc, int statechg) 1450{ 1451 BWI_LOCK(sc); 1452 bwi_stop_locked(sc, statechg); 1453 BWI_UNLOCK(sc); 1454} 1455 1456static void 1457bwi_stop_locked(struct bwi_softc *sc, int statechg) 1458{ 1459 struct bwi_mac *mac; 1460 int i, error, pwr_off = 0; 1461 1462 BWI_ASSERT_LOCKED(sc); 1463 1464 callout_stop(&sc->sc_calib_ch); 1465 callout_stop(&sc->sc_led_blink_ch); 1466 sc->sc_led_blinking = 0; 1467 sc->sc_flags |= BWI_F_STOP; 1468 1469 if (sc->sc_flags & BWI_F_RUNNING) { 1470 KASSERT(sc->sc_cur_regwin->rw_type == BWI_REGWIN_T_MAC, 1471 ("current regwin type %d", sc->sc_cur_regwin->rw_type)); 1472 mac = (struct bwi_mac *)sc->sc_cur_regwin; 1473 1474 bwi_disable_intrs(sc, BWI_ALL_INTRS); 1475 CSR_READ_4(sc, BWI_MAC_INTR_MASK); 1476 bwi_mac_stop(mac); 1477 } 1478 1479 for (i = 0; i < sc->sc_nmac; ++i) { 1480 struct bwi_regwin *old_rw; 1481 1482 mac = &sc->sc_mac[i]; 1483 if ((mac->mac_flags & BWI_MAC_F_INITED) == 0) 1484 continue; 1485 1486 error = bwi_regwin_switch(sc, &mac->mac_regwin, &old_rw); 1487 if (error) 1488 continue; 1489 1490 bwi_mac_shutdown(mac); 1491 pwr_off = 1; 1492 1493 bwi_regwin_switch(sc, old_rw, NULL); 1494 } 1495 1496 if (pwr_off) 1497 bwi_bbp_power_off(sc); 1498 1499 sc->sc_tx_timer = 0; 1500 callout_stop(&sc->sc_watchdog_timer); 1501 sc->sc_flags &= ~BWI_F_RUNNING; 1502} 1503 1504void 1505bwi_intr(void *xsc) 1506{ 1507 struct bwi_softc *sc = xsc; 1508 struct bwi_mac *mac; 1509 uint32_t intr_status; 1510 uint32_t txrx_intr_status[BWI_TXRX_NRING]; 1511 int i, txrx_error, tx = 0, rx_data = -1; 1512 1513 BWI_LOCK(sc); 1514 1515 if ((sc->sc_flags & BWI_F_RUNNING) == 0 || 1516 (sc->sc_flags & BWI_F_STOP)) { 1517 BWI_UNLOCK(sc); 1518 return; 1519 } 1520 /* 1521 * Get interrupt status 1522 */ 1523 intr_status = CSR_READ_4(sc, BWI_MAC_INTR_STATUS); 1524 if (intr_status == 0xffffffff) { /* Not for us */ 1525 BWI_UNLOCK(sc); 1526 return; 1527 } 1528 1529 DPRINTF(sc, BWI_DBG_INTR, "intr status 0x%08x\n", intr_status); 1530 1531 intr_status &= CSR_READ_4(sc, BWI_MAC_INTR_MASK); 1532 if (intr_status == 0) { /* Nothing is interesting */ 1533 BWI_UNLOCK(sc); 1534 return; 1535 } 1536 1537 KASSERT(sc->sc_cur_regwin->rw_type == BWI_REGWIN_T_MAC, 1538 ("current regwin type %d", sc->sc_cur_regwin->rw_type)); 1539 mac = (struct bwi_mac *)sc->sc_cur_regwin; 1540 1541 txrx_error = 0; 1542 DPRINTF(sc, BWI_DBG_INTR, "%s\n", "TX/RX intr"); 1543 for (i = 0; i < BWI_TXRX_NRING; ++i) { 1544 uint32_t mask; 1545 1546 if (BWI_TXRX_IS_RX(i)) 1547 mask = BWI_TXRX_RX_INTRS; 1548 else 1549 mask = BWI_TXRX_TX_INTRS; 1550 1551 txrx_intr_status[i] = 1552 CSR_READ_4(sc, BWI_TXRX_INTR_STATUS(i)) & mask; 1553 1554 _DPRINTF(sc, BWI_DBG_INTR, ", %d 0x%08x", 1555 i, txrx_intr_status[i]); 1556 1557 if (txrx_intr_status[i] & BWI_TXRX_INTR_ERROR) { 1558 device_printf(sc->sc_dev, 1559 "%s: intr fatal TX/RX (%d) error 0x%08x\n", 1560 __func__, i, txrx_intr_status[i]); 1561 txrx_error = 1; 1562 } 1563 } 1564 _DPRINTF(sc, BWI_DBG_INTR, "%s\n", ""); 1565 1566 /* 1567 * Acknowledge interrupt 1568 */ 1569 CSR_WRITE_4(sc, BWI_MAC_INTR_STATUS, intr_status); 1570 1571 for (i = 0; i < BWI_TXRX_NRING; ++i) 1572 CSR_WRITE_4(sc, BWI_TXRX_INTR_STATUS(i), txrx_intr_status[i]); 1573 1574 /* Disable all interrupts */ 1575 bwi_disable_intrs(sc, BWI_ALL_INTRS); 1576 1577 /* 1578 * http://bcm-specs.sipsolutions.net/Interrupts 1579 * Says for this bit (0x800): 1580 * "Fatal Error 1581 * 1582 * We got this one while testing things when by accident the 1583 * template ram wasn't set to big endian when it should have 1584 * been after writing the initial values. It keeps on being 1585 * triggered, the only way to stop it seems to shut down the 1586 * chip." 1587 * 1588 * Suggesting that we should never get it and if we do we're not 1589 * feeding TX packets into the MAC correctly if we do... Apparently, 1590 * it is valid only on mac version 5 and higher, but I couldn't 1591 * find a reference for that... Since I see them from time to time 1592 * on my card, this suggests an error in the tx path still... 1593 */ 1594 if (intr_status & BWI_INTR_PHY_TXERR) { 1595 if (mac->mac_flags & BWI_MAC_F_PHYE_RESET) { 1596 device_printf(sc->sc_dev, "%s: intr PHY TX error\n", 1597 __func__); 1598 taskqueue_enqueue(sc->sc_tq, &sc->sc_restart_task); 1599 BWI_UNLOCK(sc); 1600 return; 1601 } 1602 } 1603 1604 if (txrx_error) { 1605 /* TODO: reset device */ 1606 } 1607 1608 if (intr_status & BWI_INTR_TBTT) 1609 bwi_mac_config_ps(mac); 1610 1611 if (intr_status & BWI_INTR_EO_ATIM) 1612 device_printf(sc->sc_dev, "EO_ATIM\n"); 1613 1614 if (intr_status & BWI_INTR_PMQ) { 1615 for (;;) { 1616 if ((CSR_READ_4(sc, BWI_MAC_PS_STATUS) & 0x8) == 0) 1617 break; 1618 } 1619 CSR_WRITE_2(sc, BWI_MAC_PS_STATUS, 0x2); 1620 } 1621 1622 if (intr_status & BWI_INTR_NOISE) 1623 device_printf(sc->sc_dev, "intr noise\n"); 1624 1625 if (txrx_intr_status[0] & BWI_TXRX_INTR_RX) { 1626 rx_data = sc->sc_rxeof(sc); 1627 if (sc->sc_flags & BWI_F_STOP) { 1628 BWI_UNLOCK(sc); 1629 return; 1630 } 1631 } 1632 1633 if (txrx_intr_status[3] & BWI_TXRX_INTR_RX) { 1634 sc->sc_txeof_status(sc); 1635 tx = 1; 1636 } 1637 1638 if (intr_status & BWI_INTR_TX_DONE) { 1639 bwi_txeof(sc); 1640 tx = 1; 1641 } 1642 1643 /* Re-enable interrupts */ 1644 bwi_enable_intrs(sc, BWI_INIT_INTRS); 1645 1646 if (sc->sc_blink_led != NULL && sc->sc_led_blink) { 1647 int evt = BWI_LED_EVENT_NONE; 1648 1649 if (tx && rx_data > 0) { 1650 if (sc->sc_rx_rate > sc->sc_tx_rate) 1651 evt = BWI_LED_EVENT_RX; 1652 else 1653 evt = BWI_LED_EVENT_TX; 1654 } else if (tx) { 1655 evt = BWI_LED_EVENT_TX; 1656 } else if (rx_data > 0) { 1657 evt = BWI_LED_EVENT_RX; 1658 } else if (rx_data == 0) { 1659 evt = BWI_LED_EVENT_POLL; 1660 } 1661 1662 if (evt != BWI_LED_EVENT_NONE) 1663 bwi_led_event(sc, evt); 1664 } 1665 1666 BWI_UNLOCK(sc); 1667} 1668 1669static void 1670bwi_scan_start(struct ieee80211com *ic) 1671{ 1672 struct bwi_softc *sc = ic->ic_softc; 1673 1674 BWI_LOCK(sc); 1675 /* Enable MAC beacon promiscuity */ 1676 CSR_SETBITS_4(sc, BWI_MAC_STATUS, BWI_MAC_STATUS_PASS_BCN); 1677 BWI_UNLOCK(sc); 1678} 1679 1680static void 1681bwi_getradiocaps(struct ieee80211com *ic, 1682 int maxchans, int *nchans, struct ieee80211_channel chans[]) 1683{ 1684 struct bwi_softc *sc = ic->ic_softc; 1685 struct bwi_mac *mac; 1686 struct bwi_phy *phy; 1687 uint8_t bands[IEEE80211_MODE_BYTES]; 1688 1689 /* 1690 * XXX First MAC is known to exist 1691 * TODO2 1692 */ 1693 mac = &sc->sc_mac[0]; 1694 phy = &mac->mac_phy; 1695 1696 memset(bands, 0, sizeof(bands)); 1697 switch (phy->phy_mode) { 1698 case IEEE80211_MODE_11G: 1699 setbit(bands, IEEE80211_MODE_11G); 1700 /* FALLTHROUGH */ 1701 case IEEE80211_MODE_11B: 1702 setbit(bands, IEEE80211_MODE_11B); 1703 break; 1704 case IEEE80211_MODE_11A: 1705 /* TODO:11A */ 1706 setbit(bands, IEEE80211_MODE_11A); 1707 device_printf(sc->sc_dev, "no 11a support\n"); 1708 return; 1709 default: 1710 panic("unknown phymode %d\n", phy->phy_mode); 1711 } 1712 1713 ieee80211_add_channel_list_2ghz(chans, maxchans, nchans, 1714 bwi_chan_2ghz, nitems(bwi_chan_2ghz), bands, 0); 1715} 1716 1717static void 1718bwi_set_channel(struct ieee80211com *ic) 1719{ 1720 struct bwi_softc *sc = ic->ic_softc; 1721 struct ieee80211_channel *c = ic->ic_curchan; 1722 struct bwi_mac *mac; 1723 1724 BWI_LOCK(sc); 1725 KASSERT(sc->sc_cur_regwin->rw_type == BWI_REGWIN_T_MAC, 1726 ("current regwin type %d", sc->sc_cur_regwin->rw_type)); 1727 mac = (struct bwi_mac *)sc->sc_cur_regwin; 1728 bwi_rf_set_chan(mac, ieee80211_chan2ieee(ic, c), 0); 1729 1730 sc->sc_rates = ieee80211_get_ratetable(c); 1731 1732 /* 1733 * Setup radio tap channel freq and flags 1734 */ 1735 sc->sc_tx_th.wt_chan_freq = sc->sc_rx_th.wr_chan_freq = 1736 htole16(c->ic_freq); 1737 sc->sc_tx_th.wt_chan_flags = sc->sc_rx_th.wr_chan_flags = 1738 htole16(c->ic_flags & 0xffff); 1739 1740 BWI_UNLOCK(sc); 1741} 1742 1743static void 1744bwi_scan_end(struct ieee80211com *ic) 1745{ 1746 struct bwi_softc *sc = ic->ic_softc; 1747 1748 BWI_LOCK(sc); 1749 CSR_CLRBITS_4(sc, BWI_MAC_STATUS, BWI_MAC_STATUS_PASS_BCN); 1750 BWI_UNLOCK(sc); 1751} 1752 1753static int 1754bwi_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) 1755{ 1756 struct bwi_vap *bvp = BWI_VAP(vap); 1757 struct ieee80211com *ic= vap->iv_ic; 1758 struct bwi_softc *sc = ic->ic_softc; 1759 enum ieee80211_state ostate = vap->iv_state; 1760 struct bwi_mac *mac; 1761 int error; 1762 1763 BWI_LOCK(sc); 1764 1765 callout_stop(&sc->sc_calib_ch); 1766 1767 if (nstate == IEEE80211_S_INIT) 1768 sc->sc_txpwrcb_type = BWI_TXPWR_INIT; 1769 1770 bwi_led_newstate(sc, nstate); 1771 1772 error = bvp->bv_newstate(vap, nstate, arg); 1773 if (error != 0) 1774 goto back; 1775 1776 /* 1777 * Clear the BSSID when we stop a STA 1778 */ 1779 if (vap->iv_opmode == IEEE80211_M_STA) { 1780 if (ostate == IEEE80211_S_RUN && nstate != IEEE80211_S_RUN) { 1781 /* 1782 * Clear out the BSSID. If we reassociate to 1783 * the same AP, this will reinialize things 1784 * correctly... 1785 */ 1786 if (ic->ic_opmode == IEEE80211_M_STA && 1787 !(sc->sc_flags & BWI_F_STOP)) 1788 bwi_set_bssid(sc, bwi_zero_addr); 1789 } 1790 } 1791 1792 if (vap->iv_opmode == IEEE80211_M_MONITOR) { 1793 /* Nothing to do */ 1794 } else if (nstate == IEEE80211_S_RUN) { 1795 bwi_set_bssid(sc, vap->iv_bss->ni_bssid); 1796 1797 KASSERT(sc->sc_cur_regwin->rw_type == BWI_REGWIN_T_MAC, 1798 ("current regwin type %d", sc->sc_cur_regwin->rw_type)); 1799 mac = (struct bwi_mac *)sc->sc_cur_regwin; 1800 1801 /* Initial TX power calibration */ 1802 bwi_mac_calibrate_txpower(mac, BWI_TXPWR_INIT); 1803#ifdef notyet 1804 sc->sc_txpwrcb_type = BWI_TXPWR_FORCE; 1805#else 1806 sc->sc_txpwrcb_type = BWI_TXPWR_CALIB; 1807#endif 1808 1809 callout_reset(&sc->sc_calib_ch, hz, bwi_calibrate, sc); 1810 } 1811back: 1812 BWI_UNLOCK(sc); 1813 1814 return error; 1815} 1816 1817static int 1818bwi_media_change(struct ifnet *ifp) 1819{ 1820 int error = ieee80211_media_change(ifp); 1821 /* NB: only the fixed rate can change and that doesn't need a reset */ 1822 return (error == ENETRESET ? 0 : error); 1823} 1824 1825static int 1826bwi_dma_alloc(struct bwi_softc *sc) 1827{ 1828 int error, i, has_txstats; 1829 bus_addr_t lowaddr = 0; 1830 bus_size_t tx_ring_sz, rx_ring_sz, desc_sz = 0; 1831 uint32_t txrx_ctrl_step = 0; 1832 1833 has_txstats = 0; 1834 for (i = 0; i < sc->sc_nmac; ++i) { 1835 if (sc->sc_mac[i].mac_flags & BWI_MAC_F_HAS_TXSTATS) { 1836 has_txstats = 1; 1837 break; 1838 } 1839 } 1840 1841 switch (sc->sc_bus_space) { 1842 case BWI_BUS_SPACE_30BIT: 1843 case BWI_BUS_SPACE_32BIT: 1844 if (sc->sc_bus_space == BWI_BUS_SPACE_30BIT) 1845 lowaddr = BWI_BUS_SPACE_MAXADDR; 1846 else 1847 lowaddr = BUS_SPACE_MAXADDR_32BIT; 1848 desc_sz = sizeof(struct bwi_desc32); 1849 txrx_ctrl_step = 0x20; 1850 1851 sc->sc_init_tx_ring = bwi_init_tx_ring32; 1852 sc->sc_free_tx_ring = bwi_free_tx_ring32; 1853 sc->sc_init_rx_ring = bwi_init_rx_ring32; 1854 sc->sc_free_rx_ring = bwi_free_rx_ring32; 1855 sc->sc_setup_rxdesc = bwi_setup_rx_desc32; 1856 sc->sc_setup_txdesc = bwi_setup_tx_desc32; 1857 sc->sc_rxeof = bwi_rxeof32; 1858 sc->sc_start_tx = bwi_start_tx32; 1859 if (has_txstats) { 1860 sc->sc_init_txstats = bwi_init_txstats32; 1861 sc->sc_free_txstats = bwi_free_txstats32; 1862 sc->sc_txeof_status = bwi_txeof_status32; 1863 } 1864 break; 1865 1866 case BWI_BUS_SPACE_64BIT: 1867 lowaddr = BUS_SPACE_MAXADDR; /* XXX */ 1868 desc_sz = sizeof(struct bwi_desc64); 1869 txrx_ctrl_step = 0x40; 1870 1871 sc->sc_init_tx_ring = bwi_init_tx_ring64; 1872 sc->sc_free_tx_ring = bwi_free_tx_ring64; 1873 sc->sc_init_rx_ring = bwi_init_rx_ring64; 1874 sc->sc_free_rx_ring = bwi_free_rx_ring64; 1875 sc->sc_setup_rxdesc = bwi_setup_rx_desc64; 1876 sc->sc_setup_txdesc = bwi_setup_tx_desc64; 1877 sc->sc_rxeof = bwi_rxeof64; 1878 sc->sc_start_tx = bwi_start_tx64; 1879 if (has_txstats) { 1880 sc->sc_init_txstats = bwi_init_txstats64; 1881 sc->sc_free_txstats = bwi_free_txstats64; 1882 sc->sc_txeof_status = bwi_txeof_status64; 1883 } 1884 break; 1885 } 1886 1887 KASSERT(lowaddr != 0, ("lowaddr zero")); 1888 KASSERT(desc_sz != 0, ("desc_sz zero")); 1889 KASSERT(txrx_ctrl_step != 0, ("txrx_ctrl_step zero")); 1890 1891 tx_ring_sz = roundup(desc_sz * BWI_TX_NDESC, BWI_RING_ALIGN); 1892 rx_ring_sz = roundup(desc_sz * BWI_RX_NDESC, BWI_RING_ALIGN); 1893 1894 /* 1895 * Create top level DMA tag 1896 */ 1897 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */ 1898 BWI_ALIGN, 0, /* alignment, bounds */ 1899 lowaddr, /* lowaddr */ 1900 BUS_SPACE_MAXADDR, /* highaddr */ 1901 NULL, NULL, /* filter, filterarg */ 1902 BUS_SPACE_MAXSIZE, /* maxsize */ 1903 BUS_SPACE_UNRESTRICTED, /* nsegments */ 1904 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1905 0, /* flags */ 1906 NULL, NULL, /* lockfunc, lockarg */ 1907 &sc->sc_parent_dtag); 1908 if (error) { 1909 device_printf(sc->sc_dev, "can't create parent DMA tag\n"); 1910 return error; 1911 } 1912 1913#define TXRX_CTRL(idx) (BWI_TXRX_CTRL_BASE + (idx) * txrx_ctrl_step) 1914 1915 /* 1916 * Create TX ring DMA stuffs 1917 */ 1918 error = bus_dma_tag_create(sc->sc_parent_dtag, 1919 BWI_RING_ALIGN, 0, 1920 BUS_SPACE_MAXADDR, 1921 BUS_SPACE_MAXADDR, 1922 NULL, NULL, 1923 tx_ring_sz, 1924 1, 1925 tx_ring_sz, 1926 0, 1927 NULL, NULL, 1928 &sc->sc_txring_dtag); 1929 if (error) { 1930 device_printf(sc->sc_dev, "can't create TX ring DMA tag\n"); 1931 return error; 1932 } 1933 1934 for (i = 0; i < BWI_TX_NRING; ++i) { 1935 error = bwi_dma_ring_alloc(sc, sc->sc_txring_dtag, 1936 &sc->sc_tx_rdata[i], tx_ring_sz, 1937 TXRX_CTRL(i)); 1938 if (error) { 1939 device_printf(sc->sc_dev, "%dth TX ring " 1940 "DMA alloc failed\n", i); 1941 return error; 1942 } 1943 } 1944 1945 /* 1946 * Create RX ring DMA stuffs 1947 */ 1948 error = bus_dma_tag_create(sc->sc_parent_dtag, 1949 BWI_RING_ALIGN, 0, 1950 BUS_SPACE_MAXADDR, 1951 BUS_SPACE_MAXADDR, 1952 NULL, NULL, 1953 rx_ring_sz, 1954 1, 1955 rx_ring_sz, 1956 0, 1957 NULL, NULL, 1958 &sc->sc_rxring_dtag); 1959 if (error) { 1960 device_printf(sc->sc_dev, "can't create RX ring DMA tag\n"); 1961 return error; 1962 } 1963 1964 error = bwi_dma_ring_alloc(sc, sc->sc_rxring_dtag, &sc->sc_rx_rdata, 1965 rx_ring_sz, TXRX_CTRL(0)); 1966 if (error) { 1967 device_printf(sc->sc_dev, "RX ring DMA alloc failed\n"); 1968 return error; 1969 } 1970 1971 if (has_txstats) { 1972 error = bwi_dma_txstats_alloc(sc, TXRX_CTRL(3), desc_sz); 1973 if (error) { 1974 device_printf(sc->sc_dev, 1975 "TX stats DMA alloc failed\n"); 1976 return error; 1977 } 1978 } 1979 1980#undef TXRX_CTRL 1981 1982 return bwi_dma_mbuf_create(sc); 1983} 1984 1985static void 1986bwi_dma_free(struct bwi_softc *sc) 1987{ 1988 if (sc->sc_txring_dtag != NULL) { 1989 int i; 1990 1991 for (i = 0; i < BWI_TX_NRING; ++i) { 1992 struct bwi_ring_data *rd = &sc->sc_tx_rdata[i]; 1993 1994 if (rd->rdata_desc != NULL) { 1995 bus_dmamap_unload(sc->sc_txring_dtag, 1996 rd->rdata_dmap); 1997 bus_dmamem_free(sc->sc_txring_dtag, 1998 rd->rdata_desc, 1999 rd->rdata_dmap); 2000 } 2001 } 2002 bus_dma_tag_destroy(sc->sc_txring_dtag); 2003 } 2004 2005 if (sc->sc_rxring_dtag != NULL) { 2006 struct bwi_ring_data *rd = &sc->sc_rx_rdata; 2007 2008 if (rd->rdata_desc != NULL) { 2009 bus_dmamap_unload(sc->sc_rxring_dtag, rd->rdata_dmap); 2010 bus_dmamem_free(sc->sc_rxring_dtag, rd->rdata_desc, 2011 rd->rdata_dmap); 2012 } 2013 bus_dma_tag_destroy(sc->sc_rxring_dtag); 2014 } 2015 2016 bwi_dma_txstats_free(sc); 2017 bwi_dma_mbuf_destroy(sc, BWI_TX_NRING, 1); 2018 2019 if (sc->sc_parent_dtag != NULL) 2020 bus_dma_tag_destroy(sc->sc_parent_dtag); 2021} 2022 2023static int 2024bwi_dma_ring_alloc(struct bwi_softc *sc, bus_dma_tag_t dtag, 2025 struct bwi_ring_data *rd, bus_size_t size, 2026 uint32_t txrx_ctrl) 2027{ 2028 int error; 2029 2030 error = bus_dmamem_alloc(dtag, &rd->rdata_desc, 2031 BUS_DMA_WAITOK | BUS_DMA_ZERO, 2032 &rd->rdata_dmap); 2033 if (error) { 2034 device_printf(sc->sc_dev, "can't allocate DMA mem\n"); 2035 return error; 2036 } 2037 2038 error = bus_dmamap_load(dtag, rd->rdata_dmap, rd->rdata_desc, size, 2039 bwi_dma_ring_addr, &rd->rdata_paddr, 2040 BUS_DMA_NOWAIT); 2041 if (error) { 2042 device_printf(sc->sc_dev, "can't load DMA mem\n"); 2043 bus_dmamem_free(dtag, rd->rdata_desc, rd->rdata_dmap); 2044 rd->rdata_desc = NULL; 2045 return error; 2046 } 2047 2048 rd->rdata_txrx_ctrl = txrx_ctrl; 2049 return 0; 2050} 2051 2052static int 2053bwi_dma_txstats_alloc(struct bwi_softc *sc, uint32_t ctrl_base, 2054 bus_size_t desc_sz) 2055{ 2056 struct bwi_txstats_data *st; 2057 bus_size_t dma_size; 2058 int error; 2059 2060 st = malloc(sizeof(*st), M_DEVBUF, M_NOWAIT | M_ZERO); 2061 if (st == NULL) { 2062 device_printf(sc->sc_dev, "can't allocate txstats data\n"); 2063 return ENOMEM; 2064 } 2065 sc->sc_txstats = st; 2066 2067 /* 2068 * Create TX stats descriptor DMA stuffs 2069 */ 2070 dma_size = roundup(desc_sz * BWI_TXSTATS_NDESC, BWI_RING_ALIGN); 2071 2072 error = bus_dma_tag_create(sc->sc_parent_dtag, 2073 BWI_RING_ALIGN, 2074 0, 2075 BUS_SPACE_MAXADDR, 2076 BUS_SPACE_MAXADDR, 2077 NULL, NULL, 2078 dma_size, 2079 1, 2080 dma_size, 2081 0, 2082 NULL, NULL, 2083 &st->stats_ring_dtag); 2084 if (error) { 2085 device_printf(sc->sc_dev, "can't create txstats ring " 2086 "DMA tag\n"); 2087 return error; 2088 } 2089 2090 error = bus_dmamem_alloc(st->stats_ring_dtag, &st->stats_ring, 2091 BUS_DMA_WAITOK | BUS_DMA_ZERO, 2092 &st->stats_ring_dmap); 2093 if (error) { 2094 device_printf(sc->sc_dev, "can't allocate txstats ring " 2095 "DMA mem\n"); 2096 bus_dma_tag_destroy(st->stats_ring_dtag); 2097 st->stats_ring_dtag = NULL; 2098 return error; 2099 } 2100 2101 error = bus_dmamap_load(st->stats_ring_dtag, st->stats_ring_dmap, 2102 st->stats_ring, dma_size, 2103 bwi_dma_ring_addr, &st->stats_ring_paddr, 2104 BUS_DMA_NOWAIT); 2105 if (error) { 2106 device_printf(sc->sc_dev, "can't load txstats ring DMA mem\n"); 2107 bus_dmamem_free(st->stats_ring_dtag, st->stats_ring, 2108 st->stats_ring_dmap); 2109 bus_dma_tag_destroy(st->stats_ring_dtag); 2110 st->stats_ring_dtag = NULL; 2111 return error; 2112 } 2113 2114 /* 2115 * Create TX stats DMA stuffs 2116 */ 2117 dma_size = roundup(sizeof(struct bwi_txstats) * BWI_TXSTATS_NDESC, 2118 BWI_ALIGN); 2119 2120 error = bus_dma_tag_create(sc->sc_parent_dtag, 2121 BWI_ALIGN, 2122 0, 2123 BUS_SPACE_MAXADDR, 2124 BUS_SPACE_MAXADDR, 2125 NULL, NULL, 2126 dma_size, 2127 1, 2128 dma_size, 2129 0, 2130 NULL, NULL, 2131 &st->stats_dtag); 2132 if (error) { 2133 device_printf(sc->sc_dev, "can't create txstats DMA tag\n"); 2134 return error; 2135 } 2136 2137 error = bus_dmamem_alloc(st->stats_dtag, (void **)&st->stats, 2138 BUS_DMA_WAITOK | BUS_DMA_ZERO, 2139 &st->stats_dmap); 2140 if (error) { 2141 device_printf(sc->sc_dev, "can't allocate txstats DMA mem\n"); 2142 bus_dma_tag_destroy(st->stats_dtag); 2143 st->stats_dtag = NULL; 2144 return error; 2145 } 2146 2147 error = bus_dmamap_load(st->stats_dtag, st->stats_dmap, st->stats, 2148 dma_size, bwi_dma_ring_addr, &st->stats_paddr, 2149 BUS_DMA_NOWAIT); 2150 if (error) { 2151 device_printf(sc->sc_dev, "can't load txstats DMA mem\n"); 2152 bus_dmamem_free(st->stats_dtag, st->stats, st->stats_dmap); 2153 bus_dma_tag_destroy(st->stats_dtag); 2154 st->stats_dtag = NULL; 2155 return error; 2156 } 2157 2158 st->stats_ctrl_base = ctrl_base; 2159 return 0; 2160} 2161 2162static void 2163bwi_dma_txstats_free(struct bwi_softc *sc) 2164{ 2165 struct bwi_txstats_data *st; 2166 2167 if (sc->sc_txstats == NULL) 2168 return; 2169 st = sc->sc_txstats; 2170 2171 if (st->stats_ring_dtag != NULL) { 2172 bus_dmamap_unload(st->stats_ring_dtag, st->stats_ring_dmap); 2173 bus_dmamem_free(st->stats_ring_dtag, st->stats_ring, 2174 st->stats_ring_dmap); 2175 bus_dma_tag_destroy(st->stats_ring_dtag); 2176 } 2177 2178 if (st->stats_dtag != NULL) { 2179 bus_dmamap_unload(st->stats_dtag, st->stats_dmap); 2180 bus_dmamem_free(st->stats_dtag, st->stats, st->stats_dmap); 2181 bus_dma_tag_destroy(st->stats_dtag); 2182 } 2183 2184 free(st, M_DEVBUF); 2185} 2186 2187static void 2188bwi_dma_ring_addr(void *arg, bus_dma_segment_t *seg, int nseg, int error) 2189{ 2190 KASSERT(nseg == 1, ("too many segments\n")); 2191 *((bus_addr_t *)arg) = seg->ds_addr; 2192} 2193 2194static int 2195bwi_dma_mbuf_create(struct bwi_softc *sc) 2196{ 2197 struct bwi_rxbuf_data *rbd = &sc->sc_rx_bdata; 2198 int i, j, k, ntx, error; 2199 2200 /* 2201 * Create TX/RX mbuf DMA tag 2202 */ 2203 error = bus_dma_tag_create(sc->sc_parent_dtag, 2204 1, 2205 0, 2206 BUS_SPACE_MAXADDR, 2207 BUS_SPACE_MAXADDR, 2208 NULL, NULL, 2209 MCLBYTES, 2210 1, 2211 MCLBYTES, 2212 BUS_DMA_ALLOCNOW, 2213 NULL, NULL, 2214 &sc->sc_buf_dtag); 2215 if (error) { 2216 device_printf(sc->sc_dev, "can't create mbuf DMA tag\n"); 2217 return error; 2218 } 2219 2220 ntx = 0; 2221 2222 /* 2223 * Create TX mbuf DMA map 2224 */ 2225 for (i = 0; i < BWI_TX_NRING; ++i) { 2226 struct bwi_txbuf_data *tbd = &sc->sc_tx_bdata[i]; 2227 2228 for (j = 0; j < BWI_TX_NDESC; ++j) { 2229 error = bus_dmamap_create(sc->sc_buf_dtag, 0, 2230 &tbd->tbd_buf[j].tb_dmap); 2231 if (error) { 2232 device_printf(sc->sc_dev, "can't create " 2233 "%dth tbd, %dth DMA map\n", i, j); 2234 2235 ntx = i; 2236 for (k = 0; k < j; ++k) { 2237 bus_dmamap_destroy(sc->sc_buf_dtag, 2238 tbd->tbd_buf[k].tb_dmap); 2239 } 2240 goto fail; 2241 } 2242 } 2243 } 2244 ntx = BWI_TX_NRING; 2245 2246 /* 2247 * Create RX mbuf DMA map and a spare DMA map 2248 */ 2249 error = bus_dmamap_create(sc->sc_buf_dtag, 0, 2250 &rbd->rbd_tmp_dmap); 2251 if (error) { 2252 device_printf(sc->sc_dev, 2253 "can't create spare RX buf DMA map\n"); 2254 goto fail; 2255 } 2256 2257 for (j = 0; j < BWI_RX_NDESC; ++j) { 2258 error = bus_dmamap_create(sc->sc_buf_dtag, 0, 2259 &rbd->rbd_buf[j].rb_dmap); 2260 if (error) { 2261 device_printf(sc->sc_dev, "can't create %dth " 2262 "RX buf DMA map\n", j); 2263 2264 for (k = 0; k < j; ++k) { 2265 bus_dmamap_destroy(sc->sc_buf_dtag, 2266 rbd->rbd_buf[j].rb_dmap); 2267 } 2268 bus_dmamap_destroy(sc->sc_buf_dtag, 2269 rbd->rbd_tmp_dmap); 2270 goto fail; 2271 } 2272 } 2273 2274 return 0; 2275fail: 2276 bwi_dma_mbuf_destroy(sc, ntx, 0); 2277 return error; 2278} 2279 2280static void 2281bwi_dma_mbuf_destroy(struct bwi_softc *sc, int ntx, int nrx) 2282{ 2283 int i, j; 2284 2285 if (sc->sc_buf_dtag == NULL) 2286 return; 2287 2288 for (i = 0; i < ntx; ++i) { 2289 struct bwi_txbuf_data *tbd = &sc->sc_tx_bdata[i]; 2290 2291 for (j = 0; j < BWI_TX_NDESC; ++j) { 2292 struct bwi_txbuf *tb = &tbd->tbd_buf[j]; 2293 2294 if (tb->tb_mbuf != NULL) { 2295 bus_dmamap_unload(sc->sc_buf_dtag, 2296 tb->tb_dmap); 2297 m_freem(tb->tb_mbuf); 2298 } 2299 if (tb->tb_ni != NULL) 2300 ieee80211_free_node(tb->tb_ni); 2301 bus_dmamap_destroy(sc->sc_buf_dtag, tb->tb_dmap); 2302 } 2303 } 2304 2305 if (nrx) { 2306 struct bwi_rxbuf_data *rbd = &sc->sc_rx_bdata; 2307 2308 bus_dmamap_destroy(sc->sc_buf_dtag, rbd->rbd_tmp_dmap); 2309 for (j = 0; j < BWI_RX_NDESC; ++j) { 2310 struct bwi_rxbuf *rb = &rbd->rbd_buf[j]; 2311 2312 if (rb->rb_mbuf != NULL) { 2313 bus_dmamap_unload(sc->sc_buf_dtag, 2314 rb->rb_dmap); 2315 m_freem(rb->rb_mbuf); 2316 } 2317 bus_dmamap_destroy(sc->sc_buf_dtag, rb->rb_dmap); 2318 } 2319 } 2320 2321 bus_dma_tag_destroy(sc->sc_buf_dtag); 2322 sc->sc_buf_dtag = NULL; 2323} 2324 2325static void 2326bwi_enable_intrs(struct bwi_softc *sc, uint32_t enable_intrs) 2327{ 2328 CSR_SETBITS_4(sc, BWI_MAC_INTR_MASK, enable_intrs); 2329} 2330 2331static void 2332bwi_disable_intrs(struct bwi_softc *sc, uint32_t disable_intrs) 2333{ 2334 CSR_CLRBITS_4(sc, BWI_MAC_INTR_MASK, disable_intrs); 2335} 2336 2337static int 2338bwi_init_tx_ring32(struct bwi_softc *sc, int ring_idx) 2339{ 2340 struct bwi_ring_data *rd; 2341 struct bwi_txbuf_data *tbd; 2342 uint32_t val, addr_hi, addr_lo; 2343 2344 KASSERT(ring_idx < BWI_TX_NRING, ("ring_idx %d", ring_idx)); 2345 rd = &sc->sc_tx_rdata[ring_idx]; 2346 tbd = &sc->sc_tx_bdata[ring_idx]; 2347 2348 tbd->tbd_idx = 0; 2349 tbd->tbd_used = 0; 2350 2351 bzero(rd->rdata_desc, sizeof(struct bwi_desc32) * BWI_TX_NDESC); 2352 bus_dmamap_sync(sc->sc_txring_dtag, rd->rdata_dmap, 2353 BUS_DMASYNC_PREWRITE); 2354 2355 addr_lo = __SHIFTOUT(rd->rdata_paddr, BWI_TXRX32_RINGINFO_ADDR_MASK); 2356 addr_hi = __SHIFTOUT(rd->rdata_paddr, BWI_TXRX32_RINGINFO_FUNC_MASK); 2357 2358 val = __SHIFTIN(addr_lo, BWI_TXRX32_RINGINFO_ADDR_MASK) | 2359 __SHIFTIN(BWI_TXRX32_RINGINFO_FUNC_TXRX, 2360 BWI_TXRX32_RINGINFO_FUNC_MASK); 2361 CSR_WRITE_4(sc, rd->rdata_txrx_ctrl + BWI_TX32_RINGINFO, val); 2362 2363 val = __SHIFTIN(addr_hi, BWI_TXRX32_CTRL_ADDRHI_MASK) | 2364 BWI_TXRX32_CTRL_ENABLE; 2365 CSR_WRITE_4(sc, rd->rdata_txrx_ctrl + BWI_TX32_CTRL, val); 2366 2367 return 0; 2368} 2369 2370static void 2371bwi_init_rxdesc_ring32(struct bwi_softc *sc, uint32_t ctrl_base, 2372 bus_addr_t paddr, int hdr_size, int ndesc) 2373{ 2374 uint32_t val, addr_hi, addr_lo; 2375 2376 addr_lo = __SHIFTOUT(paddr, BWI_TXRX32_RINGINFO_ADDR_MASK); 2377 addr_hi = __SHIFTOUT(paddr, BWI_TXRX32_RINGINFO_FUNC_MASK); 2378 2379 val = __SHIFTIN(addr_lo, BWI_TXRX32_RINGINFO_ADDR_MASK) | 2380 __SHIFTIN(BWI_TXRX32_RINGINFO_FUNC_TXRX, 2381 BWI_TXRX32_RINGINFO_FUNC_MASK); 2382 CSR_WRITE_4(sc, ctrl_base + BWI_RX32_RINGINFO, val); 2383 2384 val = __SHIFTIN(hdr_size, BWI_RX32_CTRL_HDRSZ_MASK) | 2385 __SHIFTIN(addr_hi, BWI_TXRX32_CTRL_ADDRHI_MASK) | 2386 BWI_TXRX32_CTRL_ENABLE; 2387 CSR_WRITE_4(sc, ctrl_base + BWI_RX32_CTRL, val); 2388 2389 CSR_WRITE_4(sc, ctrl_base + BWI_RX32_INDEX, 2390 (ndesc - 1) * sizeof(struct bwi_desc32)); 2391} 2392 2393static int 2394bwi_init_rx_ring32(struct bwi_softc *sc) 2395{ 2396 struct bwi_ring_data *rd = &sc->sc_rx_rdata; 2397 int i, error; 2398 2399 sc->sc_rx_bdata.rbd_idx = 0; 2400 2401 for (i = 0; i < BWI_RX_NDESC; ++i) { 2402 error = bwi_newbuf(sc, i, 1); 2403 if (error) { 2404 device_printf(sc->sc_dev, 2405 "can't allocate %dth RX buffer\n", i); 2406 return error; 2407 } 2408 } 2409 bus_dmamap_sync(sc->sc_rxring_dtag, rd->rdata_dmap, 2410 BUS_DMASYNC_PREWRITE); 2411 2412 bwi_init_rxdesc_ring32(sc, rd->rdata_txrx_ctrl, rd->rdata_paddr, 2413 sizeof(struct bwi_rxbuf_hdr), BWI_RX_NDESC); 2414 return 0; 2415} 2416 2417static int 2418bwi_init_txstats32(struct bwi_softc *sc) 2419{ 2420 struct bwi_txstats_data *st = sc->sc_txstats; 2421 bus_addr_t stats_paddr; 2422 int i; 2423 2424 bzero(st->stats, BWI_TXSTATS_NDESC * sizeof(struct bwi_txstats)); 2425 bus_dmamap_sync(st->stats_dtag, st->stats_dmap, BUS_DMASYNC_PREWRITE); 2426 2427 st->stats_idx = 0; 2428 2429 stats_paddr = st->stats_paddr; 2430 for (i = 0; i < BWI_TXSTATS_NDESC; ++i) { 2431 bwi_setup_desc32(sc, st->stats_ring, BWI_TXSTATS_NDESC, i, 2432 stats_paddr, sizeof(struct bwi_txstats), 0); 2433 stats_paddr += sizeof(struct bwi_txstats); 2434 } 2435 bus_dmamap_sync(st->stats_ring_dtag, st->stats_ring_dmap, 2436 BUS_DMASYNC_PREWRITE); 2437 2438 bwi_init_rxdesc_ring32(sc, st->stats_ctrl_base, 2439 st->stats_ring_paddr, 0, BWI_TXSTATS_NDESC); 2440 return 0; 2441} 2442 2443static void 2444bwi_setup_rx_desc32(struct bwi_softc *sc, int buf_idx, bus_addr_t paddr, 2445 int buf_len) 2446{ 2447 struct bwi_ring_data *rd = &sc->sc_rx_rdata; 2448 2449 KASSERT(buf_idx < BWI_RX_NDESC, ("buf_idx %d", buf_idx)); 2450 bwi_setup_desc32(sc, rd->rdata_desc, BWI_RX_NDESC, buf_idx, 2451 paddr, buf_len, 0); 2452} 2453 2454static void 2455bwi_setup_tx_desc32(struct bwi_softc *sc, struct bwi_ring_data *rd, 2456 int buf_idx, bus_addr_t paddr, int buf_len) 2457{ 2458 KASSERT(buf_idx < BWI_TX_NDESC, ("buf_idx %d", buf_idx)); 2459 bwi_setup_desc32(sc, rd->rdata_desc, BWI_TX_NDESC, buf_idx, 2460 paddr, buf_len, 1); 2461} 2462 2463static int 2464bwi_init_tx_ring64(struct bwi_softc *sc, int ring_idx) 2465{ 2466 /* TODO:64 */ 2467 return EOPNOTSUPP; 2468} 2469 2470static int 2471bwi_init_rx_ring64(struct bwi_softc *sc) 2472{ 2473 /* TODO:64 */ 2474 return EOPNOTSUPP; 2475} 2476 2477static int 2478bwi_init_txstats64(struct bwi_softc *sc) 2479{ 2480 /* TODO:64 */ 2481 return EOPNOTSUPP; 2482} 2483 2484static void 2485bwi_setup_rx_desc64(struct bwi_softc *sc, int buf_idx, bus_addr_t paddr, 2486 int buf_len) 2487{ 2488 /* TODO:64 */ 2489} 2490 2491static void 2492bwi_setup_tx_desc64(struct bwi_softc *sc, struct bwi_ring_data *rd, 2493 int buf_idx, bus_addr_t paddr, int buf_len) 2494{ 2495 /* TODO:64 */ 2496} 2497 2498static void 2499bwi_dma_buf_addr(void *arg, bus_dma_segment_t *seg, int nseg, 2500 bus_size_t mapsz __unused, int error) 2501{ 2502 if (!error) { 2503 KASSERT(nseg == 1, ("too many segments(%d)\n", nseg)); 2504 *((bus_addr_t *)arg) = seg->ds_addr; 2505 } 2506} 2507 2508static int 2509bwi_newbuf(struct bwi_softc *sc, int buf_idx, int init) 2510{ 2511 struct bwi_rxbuf_data *rbd = &sc->sc_rx_bdata; 2512 struct bwi_rxbuf *rxbuf = &rbd->rbd_buf[buf_idx]; 2513 struct bwi_rxbuf_hdr *hdr; 2514 bus_dmamap_t map; 2515 bus_addr_t paddr; 2516 struct mbuf *m; 2517 int error; 2518 2519 KASSERT(buf_idx < BWI_RX_NDESC, ("buf_idx %d", buf_idx)); 2520 2521 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 2522 if (m == NULL) { 2523 error = ENOBUFS; 2524 2525 /* 2526 * If the NIC is up and running, we need to: 2527 * - Clear RX buffer's header. 2528 * - Restore RX descriptor settings. 2529 */ 2530 if (init) 2531 return error; 2532 else 2533 goto back; 2534 } 2535 m->m_len = m->m_pkthdr.len = MCLBYTES; 2536 2537 /* 2538 * Try to load RX buf into temporary DMA map 2539 */ 2540 error = bus_dmamap_load_mbuf(sc->sc_buf_dtag, rbd->rbd_tmp_dmap, m, 2541 bwi_dma_buf_addr, &paddr, BUS_DMA_NOWAIT); 2542 if (error) { 2543 m_freem(m); 2544 2545 /* 2546 * See the comment above 2547 */ 2548 if (init) 2549 return error; 2550 else 2551 goto back; 2552 } 2553 2554 if (!init) 2555 bus_dmamap_unload(sc->sc_buf_dtag, rxbuf->rb_dmap); 2556 rxbuf->rb_mbuf = m; 2557 rxbuf->rb_paddr = paddr; 2558 2559 /* 2560 * Swap RX buf's DMA map with the loaded temporary one 2561 */ 2562 map = rxbuf->rb_dmap; 2563 rxbuf->rb_dmap = rbd->rbd_tmp_dmap; 2564 rbd->rbd_tmp_dmap = map; 2565 2566back: 2567 /* 2568 * Clear RX buf header 2569 */ 2570 hdr = mtod(rxbuf->rb_mbuf, struct bwi_rxbuf_hdr *); 2571 bzero(hdr, sizeof(*hdr)); 2572 bus_dmamap_sync(sc->sc_buf_dtag, rxbuf->rb_dmap, BUS_DMASYNC_PREWRITE); 2573 2574 /* 2575 * Setup RX buf descriptor 2576 */ 2577 sc->sc_setup_rxdesc(sc, buf_idx, rxbuf->rb_paddr, 2578 rxbuf->rb_mbuf->m_len - sizeof(*hdr)); 2579 return error; 2580} 2581 2582static void 2583bwi_set_addr_filter(struct bwi_softc *sc, uint16_t addr_ofs, 2584 const uint8_t *addr) 2585{ 2586 int i; 2587 2588 CSR_WRITE_2(sc, BWI_ADDR_FILTER_CTRL, 2589 BWI_ADDR_FILTER_CTRL_SET | addr_ofs); 2590 2591 for (i = 0; i < (IEEE80211_ADDR_LEN / 2); ++i) { 2592 uint16_t addr_val; 2593 2594 addr_val = (uint16_t)addr[i * 2] | 2595 (((uint16_t)addr[(i * 2) + 1]) << 8); 2596 CSR_WRITE_2(sc, BWI_ADDR_FILTER_DATA, addr_val); 2597 } 2598} 2599 2600static int 2601bwi_rxeof(struct bwi_softc *sc, int end_idx) 2602{ 2603 struct bwi_ring_data *rd = &sc->sc_rx_rdata; 2604 struct bwi_rxbuf_data *rbd = &sc->sc_rx_bdata; 2605 struct ieee80211com *ic = &sc->sc_ic; 2606 int idx, rx_data = 0; 2607 2608 idx = rbd->rbd_idx; 2609 while (idx != end_idx) { 2610 struct bwi_rxbuf *rb = &rbd->rbd_buf[idx]; 2611 struct bwi_rxbuf_hdr *hdr; 2612 struct ieee80211_frame_min *wh; 2613 struct ieee80211_node *ni; 2614 struct mbuf *m; 2615 uint32_t plcp; 2616 uint16_t flags2; 2617 int buflen, wh_ofs, hdr_extra, rssi, noise, type, rate; 2618 2619 m = rb->rb_mbuf; 2620 bus_dmamap_sync(sc->sc_buf_dtag, rb->rb_dmap, 2621 BUS_DMASYNC_POSTREAD); 2622 2623 if (bwi_newbuf(sc, idx, 0)) { 2624 counter_u64_add(ic->ic_ierrors, 1); 2625 goto next; 2626 } 2627 2628 hdr = mtod(m, struct bwi_rxbuf_hdr *); 2629 flags2 = le16toh(hdr->rxh_flags2); 2630 2631 hdr_extra = 0; 2632 if (flags2 & BWI_RXH_F2_TYPE2FRAME) 2633 hdr_extra = 2; 2634 wh_ofs = hdr_extra + 6; /* XXX magic number */ 2635 2636 buflen = le16toh(hdr->rxh_buflen); 2637 if (buflen < BWI_FRAME_MIN_LEN(wh_ofs)) { 2638 device_printf(sc->sc_dev, 2639 "%s: zero length data, hdr_extra %d\n", 2640 __func__, hdr_extra); 2641 counter_u64_add(ic->ic_ierrors, 1); 2642 m_freem(m); 2643 goto next; 2644 } 2645 2646 bcopy((uint8_t *)(hdr + 1) + hdr_extra, &plcp, sizeof(plcp)); 2647 rssi = bwi_calc_rssi(sc, hdr); 2648 noise = bwi_calc_noise(sc); 2649 2650 m->m_len = m->m_pkthdr.len = buflen + sizeof(*hdr); 2651 m_adj(m, sizeof(*hdr) + wh_ofs); 2652 2653 if (htole16(hdr->rxh_flags1) & BWI_RXH_F1_OFDM) 2654 rate = bwi_plcp2rate(plcp, IEEE80211_T_OFDM); 2655 else 2656 rate = bwi_plcp2rate(plcp, IEEE80211_T_CCK); 2657 2658 /* RX radio tap */ 2659 if (ieee80211_radiotap_active(ic)) 2660 bwi_rx_radiotap(sc, m, hdr, &plcp, rate, rssi, noise); 2661 2662 m_adj(m, -IEEE80211_CRC_LEN); 2663 2664 BWI_UNLOCK(sc); 2665 2666 wh = mtod(m, struct ieee80211_frame_min *); 2667 ni = ieee80211_find_rxnode(ic, wh); 2668 if (ni != NULL) { 2669 type = ieee80211_input(ni, m, rssi - noise, noise); 2670 ieee80211_free_node(ni); 2671 } else 2672 type = ieee80211_input_all(ic, m, rssi - noise, noise); 2673 if (type == IEEE80211_FC0_TYPE_DATA) { 2674 rx_data = 1; 2675 sc->sc_rx_rate = rate; 2676 } 2677 2678 BWI_LOCK(sc); 2679next: 2680 idx = (idx + 1) % BWI_RX_NDESC; 2681 2682 if (sc->sc_flags & BWI_F_STOP) { 2683 /* 2684 * Take the fast lane, don't do 2685 * any damage to softc 2686 */ 2687 return -1; 2688 } 2689 } 2690 2691 rbd->rbd_idx = idx; 2692 bus_dmamap_sync(sc->sc_rxring_dtag, rd->rdata_dmap, 2693 BUS_DMASYNC_PREWRITE); 2694 2695 return rx_data; 2696} 2697 2698static int 2699bwi_rxeof32(struct bwi_softc *sc) 2700{ 2701 uint32_t val, rx_ctrl; 2702 int end_idx, rx_data; 2703 2704 rx_ctrl = sc->sc_rx_rdata.rdata_txrx_ctrl; 2705 2706 val = CSR_READ_4(sc, rx_ctrl + BWI_RX32_STATUS); 2707 end_idx = __SHIFTOUT(val, BWI_RX32_STATUS_INDEX_MASK) / 2708 sizeof(struct bwi_desc32); 2709 2710 rx_data = bwi_rxeof(sc, end_idx); 2711 if (rx_data >= 0) { 2712 CSR_WRITE_4(sc, rx_ctrl + BWI_RX32_INDEX, 2713 end_idx * sizeof(struct bwi_desc32)); 2714 } 2715 return rx_data; 2716} 2717 2718static int 2719bwi_rxeof64(struct bwi_softc *sc) 2720{ 2721 /* TODO:64 */ 2722 return 0; 2723} 2724 2725static void 2726bwi_reset_rx_ring32(struct bwi_softc *sc, uint32_t rx_ctrl) 2727{ 2728 int i; 2729 2730 CSR_WRITE_4(sc, rx_ctrl + BWI_RX32_CTRL, 0); 2731 2732#define NRETRY 10 2733 2734 for (i = 0; i < NRETRY; ++i) { 2735 uint32_t status; 2736 2737 status = CSR_READ_4(sc, rx_ctrl + BWI_RX32_STATUS); 2738 if (__SHIFTOUT(status, BWI_RX32_STATUS_STATE_MASK) == 2739 BWI_RX32_STATUS_STATE_DISABLED) 2740 break; 2741 2742 DELAY(1000); 2743 } 2744 if (i == NRETRY) 2745 device_printf(sc->sc_dev, "reset rx ring timedout\n"); 2746 2747#undef NRETRY 2748 2749 CSR_WRITE_4(sc, rx_ctrl + BWI_RX32_RINGINFO, 0); 2750} 2751 2752static void 2753bwi_free_txstats32(struct bwi_softc *sc) 2754{ 2755 bwi_reset_rx_ring32(sc, sc->sc_txstats->stats_ctrl_base); 2756} 2757 2758static void 2759bwi_free_rx_ring32(struct bwi_softc *sc) 2760{ 2761 struct bwi_ring_data *rd = &sc->sc_rx_rdata; 2762 struct bwi_rxbuf_data *rbd = &sc->sc_rx_bdata; 2763 int i; 2764 2765 bwi_reset_rx_ring32(sc, rd->rdata_txrx_ctrl); 2766 2767 for (i = 0; i < BWI_RX_NDESC; ++i) { 2768 struct bwi_rxbuf *rb = &rbd->rbd_buf[i]; 2769 2770 if (rb->rb_mbuf != NULL) { 2771 bus_dmamap_unload(sc->sc_buf_dtag, rb->rb_dmap); 2772 m_freem(rb->rb_mbuf); 2773 rb->rb_mbuf = NULL; 2774 } 2775 } 2776} 2777 2778static void 2779bwi_free_tx_ring32(struct bwi_softc *sc, int ring_idx) 2780{ 2781 struct bwi_ring_data *rd; 2782 struct bwi_txbuf_data *tbd; 2783 uint32_t state, val; 2784 int i; 2785 2786 KASSERT(ring_idx < BWI_TX_NRING, ("ring_idx %d", ring_idx)); 2787 rd = &sc->sc_tx_rdata[ring_idx]; 2788 tbd = &sc->sc_tx_bdata[ring_idx]; 2789 2790#define NRETRY 10 2791 2792 for (i = 0; i < NRETRY; ++i) { 2793 val = CSR_READ_4(sc, rd->rdata_txrx_ctrl + BWI_TX32_STATUS); 2794 state = __SHIFTOUT(val, BWI_TX32_STATUS_STATE_MASK); 2795 if (state == BWI_TX32_STATUS_STATE_DISABLED || 2796 state == BWI_TX32_STATUS_STATE_IDLE || 2797 state == BWI_TX32_STATUS_STATE_STOPPED) 2798 break; 2799 2800 DELAY(1000); 2801 } 2802 if (i == NRETRY) { 2803 device_printf(sc->sc_dev, 2804 "%s: wait for TX ring(%d) stable timed out\n", 2805 __func__, ring_idx); 2806 } 2807 2808 CSR_WRITE_4(sc, rd->rdata_txrx_ctrl + BWI_TX32_CTRL, 0); 2809 for (i = 0; i < NRETRY; ++i) { 2810 val = CSR_READ_4(sc, rd->rdata_txrx_ctrl + BWI_TX32_STATUS); 2811 state = __SHIFTOUT(val, BWI_TX32_STATUS_STATE_MASK); 2812 if (state == BWI_TX32_STATUS_STATE_DISABLED) 2813 break; 2814 2815 DELAY(1000); 2816 } 2817 if (i == NRETRY) 2818 device_printf(sc->sc_dev, "%s: reset TX ring (%d) timed out\n", 2819 __func__, ring_idx); 2820 2821#undef NRETRY 2822 2823 DELAY(1000); 2824 2825 CSR_WRITE_4(sc, rd->rdata_txrx_ctrl + BWI_TX32_RINGINFO, 0); 2826 2827 for (i = 0; i < BWI_TX_NDESC; ++i) { 2828 struct bwi_txbuf *tb = &tbd->tbd_buf[i]; 2829 2830 if (tb->tb_mbuf != NULL) { 2831 bus_dmamap_unload(sc->sc_buf_dtag, tb->tb_dmap); 2832 m_freem(tb->tb_mbuf); 2833 tb->tb_mbuf = NULL; 2834 } 2835 if (tb->tb_ni != NULL) { 2836 ieee80211_free_node(tb->tb_ni); 2837 tb->tb_ni = NULL; 2838 } 2839 } 2840} 2841 2842static void 2843bwi_free_txstats64(struct bwi_softc *sc) 2844{ 2845 /* TODO:64 */ 2846} 2847 2848static void 2849bwi_free_rx_ring64(struct bwi_softc *sc) 2850{ 2851 /* TODO:64 */ 2852} 2853 2854static void 2855bwi_free_tx_ring64(struct bwi_softc *sc, int ring_idx) 2856{ 2857 /* TODO:64 */ 2858} 2859 2860/* XXX does not belong here */ 2861#define IEEE80211_OFDM_PLCP_RATE_MASK __BITS(3, 0) 2862#define IEEE80211_OFDM_PLCP_LEN_MASK __BITS(16, 5) 2863 2864static __inline void 2865bwi_ofdm_plcp_header(uint32_t *plcp0, int pkt_len, uint8_t rate) 2866{ 2867 uint32_t plcp; 2868 2869 plcp = __SHIFTIN(ieee80211_rate2plcp(rate, IEEE80211_T_OFDM), 2870 IEEE80211_OFDM_PLCP_RATE_MASK) | 2871 __SHIFTIN(pkt_len, IEEE80211_OFDM_PLCP_LEN_MASK); 2872 *plcp0 = htole32(plcp); 2873} 2874 2875static __inline void 2876bwi_ds_plcp_header(struct ieee80211_ds_plcp_hdr *plcp, int pkt_len, 2877 uint8_t rate) 2878{ 2879 int len, service, pkt_bitlen; 2880 2881 pkt_bitlen = pkt_len * NBBY; 2882 len = howmany(pkt_bitlen * 2, rate); 2883 2884 service = IEEE80211_PLCP_SERVICE_LOCKED; 2885 if (rate == (11 * 2)) { 2886 int pkt_bitlen1; 2887 2888 /* 2889 * PLCP service field needs to be adjusted, 2890 * if TX rate is 11Mbytes/s 2891 */ 2892 pkt_bitlen1 = len * 11; 2893 if (pkt_bitlen1 - pkt_bitlen >= NBBY) 2894 service |= IEEE80211_PLCP_SERVICE_LENEXT7; 2895 } 2896 2897 plcp->i_signal = ieee80211_rate2plcp(rate, IEEE80211_T_CCK); 2898 plcp->i_service = service; 2899 plcp->i_length = htole16(len); 2900 /* NOTE: do NOT touch i_crc */ 2901} 2902 2903static __inline void 2904bwi_plcp_header(const struct ieee80211_rate_table *rt, 2905 void *plcp, int pkt_len, uint8_t rate) 2906{ 2907 enum ieee80211_phytype modtype; 2908 2909 /* 2910 * Assume caller has zeroed 'plcp' 2911 */ 2912 modtype = ieee80211_rate2phytype(rt, rate); 2913 if (modtype == IEEE80211_T_OFDM) 2914 bwi_ofdm_plcp_header(plcp, pkt_len, rate); 2915 else if (modtype == IEEE80211_T_DS) 2916 bwi_ds_plcp_header(plcp, pkt_len, rate); 2917 else 2918 panic("unsupport modulation type %u\n", modtype); 2919} 2920 2921static int 2922bwi_encap(struct bwi_softc *sc, int idx, struct mbuf *m, 2923 struct ieee80211_node *ni) 2924{ 2925 struct ieee80211vap *vap = ni->ni_vap; 2926 struct ieee80211com *ic = &sc->sc_ic; 2927 struct bwi_ring_data *rd = &sc->sc_tx_rdata[BWI_TX_DATA_RING]; 2928 struct bwi_txbuf_data *tbd = &sc->sc_tx_bdata[BWI_TX_DATA_RING]; 2929 struct bwi_txbuf *tb = &tbd->tbd_buf[idx]; 2930 struct bwi_mac *mac; 2931 struct bwi_txbuf_hdr *hdr; 2932 struct ieee80211_frame *wh; 2933 const struct ieee80211_txparam *tp; 2934 uint8_t rate, rate_fb; 2935 uint32_t mac_ctrl; 2936 uint16_t phy_ctrl; 2937 bus_addr_t paddr; 2938 int type, ismcast, pkt_len, error, rix; 2939#if 0 2940 const uint8_t *p; 2941 int i; 2942#endif 2943 2944 KASSERT(sc->sc_cur_regwin->rw_type == BWI_REGWIN_T_MAC, 2945 ("current regwin type %d", sc->sc_cur_regwin->rw_type)); 2946 mac = (struct bwi_mac *)sc->sc_cur_regwin; 2947 2948 wh = mtod(m, struct ieee80211_frame *); 2949 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 2950 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 2951 2952 /* Get 802.11 frame len before prepending TX header */ 2953 pkt_len = m->m_pkthdr.len + IEEE80211_CRC_LEN; 2954 2955 /* 2956 * Find TX rate 2957 */ 2958 tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)]; 2959 if (type != IEEE80211_FC0_TYPE_DATA || (m->m_flags & M_EAPOL)) { 2960 rate = rate_fb = tp->mgmtrate; 2961 } else if (ismcast) { 2962 rate = rate_fb = tp->mcastrate; 2963 } else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) { 2964 rate = rate_fb = tp->ucastrate; 2965 } else { 2966 rix = ieee80211_ratectl_rate(ni, NULL, pkt_len); 2967 rate = ni->ni_txrate; 2968 2969 if (rix > 0) { 2970 rate_fb = ni->ni_rates.rs_rates[rix-1] & 2971 IEEE80211_RATE_VAL; 2972 } else { 2973 rate_fb = rate; 2974 } 2975 } 2976 tb->tb_rate[0] = rate; 2977 tb->tb_rate[1] = rate_fb; 2978 sc->sc_tx_rate = rate; 2979 2980 /* 2981 * TX radio tap 2982 */ 2983 if (ieee80211_radiotap_active_vap(vap)) { 2984 sc->sc_tx_th.wt_flags = 0; 2985 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) 2986 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; 2987 if (ieee80211_rate2phytype(sc->sc_rates, rate) == IEEE80211_T_DS && 2988 (ic->ic_flags & IEEE80211_F_SHPREAMBLE) && 2989 rate != (1 * 2)) { 2990 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 2991 } 2992 sc->sc_tx_th.wt_rate = rate; 2993 2994 ieee80211_radiotap_tx(vap, m); 2995 } 2996 2997 /* 2998 * Setup the embedded TX header 2999 */ 3000 M_PREPEND(m, sizeof(*hdr), M_NOWAIT); 3001 if (m == NULL) { 3002 device_printf(sc->sc_dev, "%s: prepend TX header failed\n", 3003 __func__); 3004 return ENOBUFS; 3005 } 3006 hdr = mtod(m, struct bwi_txbuf_hdr *); 3007 3008 bzero(hdr, sizeof(*hdr)); 3009 3010 bcopy(wh->i_fc, hdr->txh_fc, sizeof(hdr->txh_fc)); 3011 bcopy(wh->i_addr1, hdr->txh_addr1, sizeof(hdr->txh_addr1)); 3012 3013 if (!ismcast) { 3014 uint16_t dur; 3015 3016 dur = ieee80211_ack_duration(sc->sc_rates, rate, 3017 ic->ic_flags & ~IEEE80211_F_SHPREAMBLE); 3018 3019 hdr->txh_fb_duration = htole16(dur); 3020 } 3021 3022 hdr->txh_id = __SHIFTIN(BWI_TX_DATA_RING, BWI_TXH_ID_RING_MASK) | 3023 __SHIFTIN(idx, BWI_TXH_ID_IDX_MASK); 3024 3025 bwi_plcp_header(sc->sc_rates, hdr->txh_plcp, pkt_len, rate); 3026 bwi_plcp_header(sc->sc_rates, hdr->txh_fb_plcp, pkt_len, rate_fb); 3027 3028 phy_ctrl = __SHIFTIN(mac->mac_rf.rf_ant_mode, 3029 BWI_TXH_PHY_C_ANTMODE_MASK); 3030 if (ieee80211_rate2phytype(sc->sc_rates, rate) == IEEE80211_T_OFDM) 3031 phy_ctrl |= BWI_TXH_PHY_C_OFDM; 3032 else if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) && rate != (2 * 1)) 3033 phy_ctrl |= BWI_TXH_PHY_C_SHPREAMBLE; 3034 3035 mac_ctrl = BWI_TXH_MAC_C_HWSEQ | BWI_TXH_MAC_C_FIRST_FRAG; 3036 if (!ismcast) 3037 mac_ctrl |= BWI_TXH_MAC_C_ACK; 3038 if (ieee80211_rate2phytype(sc->sc_rates, rate_fb) == IEEE80211_T_OFDM) 3039 mac_ctrl |= BWI_TXH_MAC_C_FB_OFDM; 3040 3041 hdr->txh_mac_ctrl = htole32(mac_ctrl); 3042 hdr->txh_phy_ctrl = htole16(phy_ctrl); 3043 3044 /* Catch any further usage */ 3045 hdr = NULL; 3046 wh = NULL; 3047 3048 /* DMA load */ 3049 error = bus_dmamap_load_mbuf(sc->sc_buf_dtag, tb->tb_dmap, m, 3050 bwi_dma_buf_addr, &paddr, BUS_DMA_NOWAIT); 3051 if (error && error != EFBIG) { 3052 device_printf(sc->sc_dev, "%s: can't load TX buffer (1) %d\n", 3053 __func__, error); 3054 goto back; 3055 } 3056 3057 if (error) { /* error == EFBIG */ 3058 struct mbuf *m_new; 3059 3060 m_new = m_defrag(m, M_NOWAIT); 3061 if (m_new == NULL) { 3062 device_printf(sc->sc_dev, 3063 "%s: can't defrag TX buffer\n", __func__); 3064 error = ENOBUFS; 3065 goto back; 3066 } else { 3067 m = m_new; 3068 } 3069 3070 error = bus_dmamap_load_mbuf(sc->sc_buf_dtag, tb->tb_dmap, m, 3071 bwi_dma_buf_addr, &paddr, 3072 BUS_DMA_NOWAIT); 3073 if (error) { 3074 device_printf(sc->sc_dev, 3075 "%s: can't load TX buffer (2) %d\n", 3076 __func__, error); 3077 goto back; 3078 } 3079 } 3080 error = 0; 3081 3082 bus_dmamap_sync(sc->sc_buf_dtag, tb->tb_dmap, BUS_DMASYNC_PREWRITE); 3083 3084 tb->tb_mbuf = m; 3085 tb->tb_ni = ni; 3086 3087#if 0 3088 p = mtod(m, const uint8_t *); 3089 for (i = 0; i < m->m_pkthdr.len; ++i) { 3090 if (i != 0 && i % 8 == 0) 3091 printf("\n"); 3092 printf("%02x ", p[i]); 3093 } 3094 printf("\n"); 3095#endif 3096 DPRINTF(sc, BWI_DBG_TX, "idx %d, pkt_len %d, buflen %d\n", 3097 idx, pkt_len, m->m_pkthdr.len); 3098 3099 /* Setup TX descriptor */ 3100 sc->sc_setup_txdesc(sc, rd, idx, paddr, m->m_pkthdr.len); 3101 bus_dmamap_sync(sc->sc_txring_dtag, rd->rdata_dmap, 3102 BUS_DMASYNC_PREWRITE); 3103 3104 /* Kick start */ 3105 sc->sc_start_tx(sc, rd->rdata_txrx_ctrl, idx); 3106 3107back: 3108 if (error) 3109 m_freem(m); 3110 return error; 3111} 3112 3113static int 3114bwi_encap_raw(struct bwi_softc *sc, int idx, struct mbuf *m, 3115 struct ieee80211_node *ni, const struct ieee80211_bpf_params *params) 3116{ 3117 struct ieee80211vap *vap = ni->ni_vap; 3118 struct ieee80211com *ic = ni->ni_ic; 3119 struct bwi_ring_data *rd = &sc->sc_tx_rdata[BWI_TX_DATA_RING]; 3120 struct bwi_txbuf_data *tbd = &sc->sc_tx_bdata[BWI_TX_DATA_RING]; 3121 struct bwi_txbuf *tb = &tbd->tbd_buf[idx]; 3122 struct bwi_mac *mac; 3123 struct bwi_txbuf_hdr *hdr; 3124 struct ieee80211_frame *wh; 3125 uint8_t rate, rate_fb; 3126 uint32_t mac_ctrl; 3127 uint16_t phy_ctrl; 3128 bus_addr_t paddr; 3129 int ismcast, pkt_len, error; 3130 3131 KASSERT(sc->sc_cur_regwin->rw_type == BWI_REGWIN_T_MAC, 3132 ("current regwin type %d", sc->sc_cur_regwin->rw_type)); 3133 mac = (struct bwi_mac *)sc->sc_cur_regwin; 3134 3135 wh = mtod(m, struct ieee80211_frame *); 3136 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 3137 3138 /* Get 802.11 frame len before prepending TX header */ 3139 pkt_len = m->m_pkthdr.len + IEEE80211_CRC_LEN; 3140 3141 /* 3142 * Find TX rate 3143 */ 3144 rate = params->ibp_rate0; 3145 if (!ieee80211_isratevalid(ic->ic_rt, rate)) { 3146 /* XXX fall back to mcast/mgmt rate? */ 3147 m_freem(m); 3148 return EINVAL; 3149 } 3150 if (params->ibp_try1 != 0) { 3151 rate_fb = params->ibp_rate1; 3152 if (!ieee80211_isratevalid(ic->ic_rt, rate_fb)) { 3153 /* XXX fall back to rate0? */ 3154 m_freem(m); 3155 return EINVAL; 3156 } 3157 } else 3158 rate_fb = rate; 3159 tb->tb_rate[0] = rate; 3160 tb->tb_rate[1] = rate_fb; 3161 sc->sc_tx_rate = rate; 3162 3163 /* 3164 * TX radio tap 3165 */ 3166 if (ieee80211_radiotap_active_vap(vap)) { 3167 sc->sc_tx_th.wt_flags = 0; 3168 /* XXX IEEE80211_BPF_CRYPTO */ 3169 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) 3170 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; 3171 if (params->ibp_flags & IEEE80211_BPF_SHORTPRE) 3172 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 3173 sc->sc_tx_th.wt_rate = rate; 3174 3175 ieee80211_radiotap_tx(vap, m); 3176 } 3177 3178 /* 3179 * Setup the embedded TX header 3180 */ 3181 M_PREPEND(m, sizeof(*hdr), M_NOWAIT); 3182 if (m == NULL) { 3183 device_printf(sc->sc_dev, "%s: prepend TX header failed\n", 3184 __func__); 3185 return ENOBUFS; 3186 } 3187 hdr = mtod(m, struct bwi_txbuf_hdr *); 3188 3189 bzero(hdr, sizeof(*hdr)); 3190 3191 bcopy(wh->i_fc, hdr->txh_fc, sizeof(hdr->txh_fc)); 3192 bcopy(wh->i_addr1, hdr->txh_addr1, sizeof(hdr->txh_addr1)); 3193 3194 mac_ctrl = BWI_TXH_MAC_C_HWSEQ | BWI_TXH_MAC_C_FIRST_FRAG; 3195 if (!ismcast && (params->ibp_flags & IEEE80211_BPF_NOACK) == 0) { 3196 uint16_t dur; 3197 3198 dur = ieee80211_ack_duration(sc->sc_rates, rate_fb, 0); 3199 3200 hdr->txh_fb_duration = htole16(dur); 3201 mac_ctrl |= BWI_TXH_MAC_C_ACK; 3202 } 3203 3204 hdr->txh_id = __SHIFTIN(BWI_TX_DATA_RING, BWI_TXH_ID_RING_MASK) | 3205 __SHIFTIN(idx, BWI_TXH_ID_IDX_MASK); 3206 3207 bwi_plcp_header(sc->sc_rates, hdr->txh_plcp, pkt_len, rate); 3208 bwi_plcp_header(sc->sc_rates, hdr->txh_fb_plcp, pkt_len, rate_fb); 3209 3210 phy_ctrl = __SHIFTIN(mac->mac_rf.rf_ant_mode, 3211 BWI_TXH_PHY_C_ANTMODE_MASK); 3212 if (ieee80211_rate2phytype(sc->sc_rates, rate) == IEEE80211_T_OFDM) { 3213 phy_ctrl |= BWI_TXH_PHY_C_OFDM; 3214 mac_ctrl |= BWI_TXH_MAC_C_FB_OFDM; 3215 } else if (params->ibp_flags & IEEE80211_BPF_SHORTPRE) 3216 phy_ctrl |= BWI_TXH_PHY_C_SHPREAMBLE; 3217 3218 hdr->txh_mac_ctrl = htole32(mac_ctrl); 3219 hdr->txh_phy_ctrl = htole16(phy_ctrl); 3220 3221 /* Catch any further usage */ 3222 hdr = NULL; 3223 wh = NULL; 3224 3225 /* DMA load */ 3226 error = bus_dmamap_load_mbuf(sc->sc_buf_dtag, tb->tb_dmap, m, 3227 bwi_dma_buf_addr, &paddr, BUS_DMA_NOWAIT); 3228 if (error != 0) { 3229 struct mbuf *m_new; 3230 3231 if (error != EFBIG) { 3232 device_printf(sc->sc_dev, 3233 "%s: can't load TX buffer (1) %d\n", 3234 __func__, error); 3235 goto back; 3236 } 3237 m_new = m_defrag(m, M_NOWAIT); 3238 if (m_new == NULL) { 3239 device_printf(sc->sc_dev, 3240 "%s: can't defrag TX buffer\n", __func__); 3241 error = ENOBUFS; 3242 goto back; 3243 } 3244 m = m_new; 3245 error = bus_dmamap_load_mbuf(sc->sc_buf_dtag, tb->tb_dmap, m, 3246 bwi_dma_buf_addr, &paddr, 3247 BUS_DMA_NOWAIT); 3248 if (error) { 3249 device_printf(sc->sc_dev, 3250 "%s: can't load TX buffer (2) %d\n", 3251 __func__, error); 3252 goto back; 3253 } 3254 } 3255 3256 bus_dmamap_sync(sc->sc_buf_dtag, tb->tb_dmap, BUS_DMASYNC_PREWRITE); 3257 3258 tb->tb_mbuf = m; 3259 tb->tb_ni = ni; 3260 3261 DPRINTF(sc, BWI_DBG_TX, "idx %d, pkt_len %d, buflen %d\n", 3262 idx, pkt_len, m->m_pkthdr.len); 3263 3264 /* Setup TX descriptor */ 3265 sc->sc_setup_txdesc(sc, rd, idx, paddr, m->m_pkthdr.len); 3266 bus_dmamap_sync(sc->sc_txring_dtag, rd->rdata_dmap, 3267 BUS_DMASYNC_PREWRITE); 3268 3269 /* Kick start */ 3270 sc->sc_start_tx(sc, rd->rdata_txrx_ctrl, idx); 3271back: 3272 if (error) 3273 m_freem(m); 3274 return error; 3275} 3276 3277static void 3278bwi_start_tx32(struct bwi_softc *sc, uint32_t tx_ctrl, int idx) 3279{ 3280 idx = (idx + 1) % BWI_TX_NDESC; 3281 CSR_WRITE_4(sc, tx_ctrl + BWI_TX32_INDEX, 3282 idx * sizeof(struct bwi_desc32)); 3283} 3284 3285static void 3286bwi_start_tx64(struct bwi_softc *sc, uint32_t tx_ctrl, int idx) 3287{ 3288 /* TODO:64 */ 3289} 3290 3291static void 3292bwi_txeof_status32(struct bwi_softc *sc) 3293{ 3294 uint32_t val, ctrl_base; 3295 int end_idx; 3296 3297 ctrl_base = sc->sc_txstats->stats_ctrl_base; 3298 3299 val = CSR_READ_4(sc, ctrl_base + BWI_RX32_STATUS); 3300 end_idx = __SHIFTOUT(val, BWI_RX32_STATUS_INDEX_MASK) / 3301 sizeof(struct bwi_desc32); 3302 3303 bwi_txeof_status(sc, end_idx); 3304 3305 CSR_WRITE_4(sc, ctrl_base + BWI_RX32_INDEX, 3306 end_idx * sizeof(struct bwi_desc32)); 3307 3308 bwi_start_locked(sc); 3309} 3310 3311static void 3312bwi_txeof_status64(struct bwi_softc *sc) 3313{ 3314 /* TODO:64 */ 3315} 3316 3317static void 3318_bwi_txeof(struct bwi_softc *sc, uint16_t tx_id, int acked, int data_txcnt) 3319{ 3320 struct bwi_txbuf_data *tbd; 3321 struct bwi_txbuf *tb; 3322 int ring_idx, buf_idx; 3323 struct ieee80211_node *ni; 3324 struct ieee80211vap *vap; 3325 3326 if (tx_id == 0) { 3327 device_printf(sc->sc_dev, "%s: zero tx id\n", __func__); 3328 return; 3329 } 3330 3331 ring_idx = __SHIFTOUT(tx_id, BWI_TXH_ID_RING_MASK); 3332 buf_idx = __SHIFTOUT(tx_id, BWI_TXH_ID_IDX_MASK); 3333 3334 KASSERT(ring_idx == BWI_TX_DATA_RING, ("ring_idx %d", ring_idx)); 3335 KASSERT(buf_idx < BWI_TX_NDESC, ("buf_idx %d", buf_idx)); 3336 3337 tbd = &sc->sc_tx_bdata[ring_idx]; 3338 KASSERT(tbd->tbd_used > 0, ("tbd_used %d", tbd->tbd_used)); 3339 tbd->tbd_used--; 3340 3341 tb = &tbd->tbd_buf[buf_idx]; 3342 DPRINTF(sc, BWI_DBG_TXEOF, "txeof idx %d, " 3343 "acked %d, data_txcnt %d, ni %p\n", 3344 buf_idx, acked, data_txcnt, tb->tb_ni); 3345 3346 bus_dmamap_unload(sc->sc_buf_dtag, tb->tb_dmap); 3347 3348 if ((ni = tb->tb_ni) != NULL) { 3349 const struct bwi_txbuf_hdr *hdr = 3350 mtod(tb->tb_mbuf, const struct bwi_txbuf_hdr *); 3351 vap = ni->ni_vap; 3352 3353 /* NB: update rate control only for unicast frames */ 3354 if (hdr->txh_mac_ctrl & htole32(BWI_TXH_MAC_C_ACK)) { 3355 /* 3356 * Feed back 'acked and data_txcnt'. Note that the 3357 * generic AMRR code only understands one tx rate 3358 * and the estimator doesn't handle real retry counts 3359 * well so to avoid over-aggressive downshifting we 3360 * treat any number of retries as "1". 3361 */ 3362 ieee80211_ratectl_tx_complete(vap, ni, 3363 (data_txcnt > 1) ? IEEE80211_RATECTL_TX_SUCCESS : 3364 IEEE80211_RATECTL_TX_FAILURE, &acked, NULL); 3365 } 3366 ieee80211_tx_complete(ni, tb->tb_mbuf, !acked); 3367 tb->tb_ni = NULL; 3368 } else 3369 m_freem(tb->tb_mbuf); 3370 tb->tb_mbuf = NULL; 3371 3372 if (tbd->tbd_used == 0) 3373 sc->sc_tx_timer = 0; 3374} 3375 3376static void 3377bwi_txeof_status(struct bwi_softc *sc, int end_idx) 3378{ 3379 struct bwi_txstats_data *st = sc->sc_txstats; 3380 int idx; 3381 3382 bus_dmamap_sync(st->stats_dtag, st->stats_dmap, BUS_DMASYNC_POSTREAD); 3383 3384 idx = st->stats_idx; 3385 while (idx != end_idx) { 3386 const struct bwi_txstats *stats = &st->stats[idx]; 3387 3388 if ((stats->txs_flags & BWI_TXS_F_PENDING) == 0) { 3389 int data_txcnt; 3390 3391 data_txcnt = __SHIFTOUT(stats->txs_txcnt, 3392 BWI_TXS_TXCNT_DATA); 3393 _bwi_txeof(sc, le16toh(stats->txs_id), 3394 stats->txs_flags & BWI_TXS_F_ACKED, 3395 data_txcnt); 3396 } 3397 idx = (idx + 1) % BWI_TXSTATS_NDESC; 3398 } 3399 st->stats_idx = idx; 3400} 3401 3402static void 3403bwi_txeof(struct bwi_softc *sc) 3404{ 3405 3406 for (;;) { 3407 uint32_t tx_status0, tx_status1; 3408 uint16_t tx_id; 3409 int data_txcnt; 3410 3411 tx_status0 = CSR_READ_4(sc, BWI_TXSTATUS0); 3412 if ((tx_status0 & BWI_TXSTATUS0_VALID) == 0) 3413 break; 3414 tx_status1 = CSR_READ_4(sc, BWI_TXSTATUS1); 3415 3416 tx_id = __SHIFTOUT(tx_status0, BWI_TXSTATUS0_TXID_MASK); 3417 data_txcnt = __SHIFTOUT(tx_status0, 3418 BWI_TXSTATUS0_DATA_TXCNT_MASK); 3419 3420 if (tx_status0 & (BWI_TXSTATUS0_AMPDU | BWI_TXSTATUS0_PENDING)) 3421 continue; 3422 3423 _bwi_txeof(sc, le16toh(tx_id), tx_status0 & BWI_TXSTATUS0_ACKED, 3424 data_txcnt); 3425 } 3426 3427 bwi_start_locked(sc); 3428} 3429 3430static int 3431bwi_bbp_power_on(struct bwi_softc *sc, enum bwi_clock_mode clk_mode) 3432{ 3433 bwi_power_on(sc, 1); 3434 return bwi_set_clock_mode(sc, clk_mode); 3435} 3436 3437static void 3438bwi_bbp_power_off(struct bwi_softc *sc) 3439{ 3440 bwi_set_clock_mode(sc, BWI_CLOCK_MODE_SLOW); 3441 bwi_power_off(sc, 1); 3442} 3443 3444static int 3445bwi_get_pwron_delay(struct bwi_softc *sc) 3446{ 3447 struct bwi_regwin *com, *old; 3448 struct bwi_clock_freq freq; 3449 uint32_t val; 3450 int error; 3451 3452 com = &sc->sc_com_regwin; 3453 KASSERT(BWI_REGWIN_EXIST(com), ("no regwin")); 3454 3455 if ((sc->sc_cap & BWI_CAP_CLKMODE) == 0) 3456 return 0; 3457 3458 error = bwi_regwin_switch(sc, com, &old); 3459 if (error) 3460 return error; 3461 3462 bwi_get_clock_freq(sc, &freq); 3463 3464 val = CSR_READ_4(sc, BWI_PLL_ON_DELAY); 3465 sc->sc_pwron_delay = howmany((val + 2) * 1000000, freq.clkfreq_min); 3466 DPRINTF(sc, BWI_DBG_ATTACH, "power on delay %u\n", sc->sc_pwron_delay); 3467 3468 return bwi_regwin_switch(sc, old, NULL); 3469} 3470 3471static int 3472bwi_bus_attach(struct bwi_softc *sc) 3473{ 3474 struct bwi_regwin *bus, *old; 3475 int error; 3476 3477 bus = &sc->sc_bus_regwin; 3478 3479 error = bwi_regwin_switch(sc, bus, &old); 3480 if (error) 3481 return error; 3482 3483 if (!bwi_regwin_is_enabled(sc, bus)) 3484 bwi_regwin_enable(sc, bus, 0); 3485 3486 /* Disable interripts */ 3487 CSR_WRITE_4(sc, BWI_INTRVEC, 0); 3488 3489 return bwi_regwin_switch(sc, old, NULL); 3490} 3491 3492static const char * 3493bwi_regwin_name(const struct bwi_regwin *rw) 3494{ 3495 switch (rw->rw_type) { 3496 case BWI_REGWIN_T_COM: 3497 return "COM"; 3498 case BWI_REGWIN_T_BUSPCI: 3499 return "PCI"; 3500 case BWI_REGWIN_T_MAC: 3501 return "MAC"; 3502 case BWI_REGWIN_T_BUSPCIE: 3503 return "PCIE"; 3504 } 3505 panic("unknown regwin type 0x%04x\n", rw->rw_type); 3506 return NULL; 3507} 3508 3509static uint32_t 3510bwi_regwin_disable_bits(struct bwi_softc *sc) 3511{ 3512 uint32_t busrev; 3513 3514 /* XXX cache this */ 3515 busrev = __SHIFTOUT(CSR_READ_4(sc, BWI_ID_LO), BWI_ID_LO_BUSREV_MASK); 3516 DPRINTF(sc, BWI_DBG_ATTACH | BWI_DBG_INIT | BWI_DBG_MISC, 3517 "bus rev %u\n", busrev); 3518 3519 if (busrev == BWI_BUSREV_0) 3520 return BWI_STATE_LO_DISABLE1; 3521 else if (busrev == BWI_BUSREV_1) 3522 return BWI_STATE_LO_DISABLE2; 3523 else 3524 return (BWI_STATE_LO_DISABLE1 | BWI_STATE_LO_DISABLE2); 3525} 3526 3527int 3528bwi_regwin_is_enabled(struct bwi_softc *sc, struct bwi_regwin *rw) 3529{ 3530 uint32_t val, disable_bits; 3531 3532 disable_bits = bwi_regwin_disable_bits(sc); 3533 val = CSR_READ_4(sc, BWI_STATE_LO); 3534 3535 if ((val & (BWI_STATE_LO_CLOCK | 3536 BWI_STATE_LO_RESET | 3537 disable_bits)) == BWI_STATE_LO_CLOCK) { 3538 DPRINTF(sc, BWI_DBG_ATTACH | BWI_DBG_INIT, "%s is enabled\n", 3539 bwi_regwin_name(rw)); 3540 return 1; 3541 } else { 3542 DPRINTF(sc, BWI_DBG_ATTACH | BWI_DBG_INIT, "%s is disabled\n", 3543 bwi_regwin_name(rw)); 3544 return 0; 3545 } 3546} 3547 3548void 3549bwi_regwin_disable(struct bwi_softc *sc, struct bwi_regwin *rw, uint32_t flags) 3550{ 3551 uint32_t state_lo, disable_bits; 3552 int i; 3553 3554 state_lo = CSR_READ_4(sc, BWI_STATE_LO); 3555 3556 /* 3557 * If current regwin is in 'reset' state, it was already disabled. 3558 */ 3559 if (state_lo & BWI_STATE_LO_RESET) { 3560 DPRINTF(sc, BWI_DBG_ATTACH | BWI_DBG_INIT, 3561 "%s was already disabled\n", bwi_regwin_name(rw)); 3562 return; 3563 } 3564 3565 disable_bits = bwi_regwin_disable_bits(sc); 3566 3567 /* 3568 * Disable normal clock 3569 */ 3570 state_lo = BWI_STATE_LO_CLOCK | disable_bits; 3571 CSR_WRITE_4(sc, BWI_STATE_LO, state_lo); 3572 3573 /* 3574 * Wait until normal clock is disabled 3575 */ 3576#define NRETRY 1000 3577 for (i = 0; i < NRETRY; ++i) { 3578 state_lo = CSR_READ_4(sc, BWI_STATE_LO); 3579 if (state_lo & disable_bits) 3580 break; 3581 DELAY(10); 3582 } 3583 if (i == NRETRY) { 3584 device_printf(sc->sc_dev, "%s disable clock timeout\n", 3585 bwi_regwin_name(rw)); 3586 } 3587 3588 for (i = 0; i < NRETRY; ++i) { 3589 uint32_t state_hi; 3590 3591 state_hi = CSR_READ_4(sc, BWI_STATE_HI); 3592 if ((state_hi & BWI_STATE_HI_BUSY) == 0) 3593 break; 3594 DELAY(10); 3595 } 3596 if (i == NRETRY) { 3597 device_printf(sc->sc_dev, "%s wait BUSY unset timeout\n", 3598 bwi_regwin_name(rw)); 3599 } 3600#undef NRETRY 3601 3602 /* 3603 * Reset and disable regwin with gated clock 3604 */ 3605 state_lo = BWI_STATE_LO_RESET | disable_bits | 3606 BWI_STATE_LO_CLOCK | BWI_STATE_LO_GATED_CLOCK | 3607 __SHIFTIN(flags, BWI_STATE_LO_FLAGS_MASK); 3608 CSR_WRITE_4(sc, BWI_STATE_LO, state_lo); 3609 3610 /* Flush pending bus write */ 3611 CSR_READ_4(sc, BWI_STATE_LO); 3612 DELAY(1); 3613 3614 /* Reset and disable regwin */ 3615 state_lo = BWI_STATE_LO_RESET | disable_bits | 3616 __SHIFTIN(flags, BWI_STATE_LO_FLAGS_MASK); 3617 CSR_WRITE_4(sc, BWI_STATE_LO, state_lo); 3618 3619 /* Flush pending bus write */ 3620 CSR_READ_4(sc, BWI_STATE_LO); 3621 DELAY(1); 3622} 3623 3624void 3625bwi_regwin_enable(struct bwi_softc *sc, struct bwi_regwin *rw, uint32_t flags) 3626{ 3627 uint32_t state_lo, state_hi, imstate; 3628 3629 bwi_regwin_disable(sc, rw, flags); 3630 3631 /* Reset regwin with gated clock */ 3632 state_lo = BWI_STATE_LO_RESET | 3633 BWI_STATE_LO_CLOCK | 3634 BWI_STATE_LO_GATED_CLOCK | 3635 __SHIFTIN(flags, BWI_STATE_LO_FLAGS_MASK); 3636 CSR_WRITE_4(sc, BWI_STATE_LO, state_lo); 3637 3638 /* Flush pending bus write */ 3639 CSR_READ_4(sc, BWI_STATE_LO); 3640 DELAY(1); 3641 3642 state_hi = CSR_READ_4(sc, BWI_STATE_HI); 3643 if (state_hi & BWI_STATE_HI_SERROR) 3644 CSR_WRITE_4(sc, BWI_STATE_HI, 0); 3645 3646 imstate = CSR_READ_4(sc, BWI_IMSTATE); 3647 if (imstate & (BWI_IMSTATE_INBAND_ERR | BWI_IMSTATE_TIMEOUT)) { 3648 imstate &= ~(BWI_IMSTATE_INBAND_ERR | BWI_IMSTATE_TIMEOUT); 3649 CSR_WRITE_4(sc, BWI_IMSTATE, imstate); 3650 } 3651 3652 /* Enable regwin with gated clock */ 3653 state_lo = BWI_STATE_LO_CLOCK | 3654 BWI_STATE_LO_GATED_CLOCK | 3655 __SHIFTIN(flags, BWI_STATE_LO_FLAGS_MASK); 3656 CSR_WRITE_4(sc, BWI_STATE_LO, state_lo); 3657 3658 /* Flush pending bus write */ 3659 CSR_READ_4(sc, BWI_STATE_LO); 3660 DELAY(1); 3661 3662 /* Enable regwin with normal clock */ 3663 state_lo = BWI_STATE_LO_CLOCK | 3664 __SHIFTIN(flags, BWI_STATE_LO_FLAGS_MASK); 3665 CSR_WRITE_4(sc, BWI_STATE_LO, state_lo); 3666 3667 /* Flush pending bus write */ 3668 CSR_READ_4(sc, BWI_STATE_LO); 3669 DELAY(1); 3670} 3671 3672static void 3673bwi_set_bssid(struct bwi_softc *sc, const uint8_t *bssid) 3674{ 3675 struct bwi_mac *mac; 3676 struct bwi_myaddr_bssid buf; 3677 const uint8_t *p; 3678 uint32_t val; 3679 int n, i; 3680 3681 KASSERT(sc->sc_cur_regwin->rw_type == BWI_REGWIN_T_MAC, 3682 ("current regwin type %d", sc->sc_cur_regwin->rw_type)); 3683 mac = (struct bwi_mac *)sc->sc_cur_regwin; 3684 3685 bwi_set_addr_filter(sc, BWI_ADDR_FILTER_BSSID, bssid); 3686 3687 bcopy(sc->sc_ic.ic_macaddr, buf.myaddr, sizeof(buf.myaddr)); 3688 bcopy(bssid, buf.bssid, sizeof(buf.bssid)); 3689 3690 n = sizeof(buf) / sizeof(val); 3691 p = (const uint8_t *)&buf; 3692 for (i = 0; i < n; ++i) { 3693 int j; 3694 3695 val = 0; 3696 for (j = 0; j < sizeof(val); ++j) 3697 val |= ((uint32_t)(*p++)) << (j * 8); 3698 3699 TMPLT_WRITE_4(mac, 0x20 + (i * sizeof(val)), val); 3700 } 3701} 3702 3703static void 3704bwi_updateslot(struct ieee80211com *ic) 3705{ 3706 struct bwi_softc *sc = ic->ic_softc; 3707 struct bwi_mac *mac; 3708 3709 BWI_LOCK(sc); 3710 if (sc->sc_flags & BWI_F_RUNNING) { 3711 DPRINTF(sc, BWI_DBG_80211, "%s\n", __func__); 3712 3713 KASSERT(sc->sc_cur_regwin->rw_type == BWI_REGWIN_T_MAC, 3714 ("current regwin type %d", sc->sc_cur_regwin->rw_type)); 3715 mac = (struct bwi_mac *)sc->sc_cur_regwin; 3716 3717 bwi_mac_updateslot(mac, (ic->ic_flags & IEEE80211_F_SHSLOT)); 3718 } 3719 BWI_UNLOCK(sc); 3720} 3721 3722static void 3723bwi_calibrate(void *xsc) 3724{ 3725 struct bwi_softc *sc = xsc; 3726 struct bwi_mac *mac; 3727 3728 BWI_ASSERT_LOCKED(sc); 3729 3730 KASSERT(sc->sc_ic.ic_opmode != IEEE80211_M_MONITOR, 3731 ("opmode %d", sc->sc_ic.ic_opmode)); 3732 3733 KASSERT(sc->sc_cur_regwin->rw_type == BWI_REGWIN_T_MAC, 3734 ("current regwin type %d", sc->sc_cur_regwin->rw_type)); 3735 mac = (struct bwi_mac *)sc->sc_cur_regwin; 3736 3737 bwi_mac_calibrate_txpower(mac, sc->sc_txpwrcb_type); 3738 sc->sc_txpwrcb_type = BWI_TXPWR_CALIB; 3739 3740 /* XXX 15 seconds */ 3741 callout_reset(&sc->sc_calib_ch, hz * 15, bwi_calibrate, sc); 3742} 3743 3744static int 3745bwi_calc_rssi(struct bwi_softc *sc, const struct bwi_rxbuf_hdr *hdr) 3746{ 3747 struct bwi_mac *mac; 3748 3749 KASSERT(sc->sc_cur_regwin->rw_type == BWI_REGWIN_T_MAC, 3750 ("current regwin type %d", sc->sc_cur_regwin->rw_type)); 3751 mac = (struct bwi_mac *)sc->sc_cur_regwin; 3752 3753 return bwi_rf_calc_rssi(mac, hdr); 3754} 3755 3756static int 3757bwi_calc_noise(struct bwi_softc *sc) 3758{ 3759 struct bwi_mac *mac; 3760 3761 KASSERT(sc->sc_cur_regwin->rw_type == BWI_REGWIN_T_MAC, 3762 ("current regwin type %d", sc->sc_cur_regwin->rw_type)); 3763 mac = (struct bwi_mac *)sc->sc_cur_regwin; 3764 3765 return bwi_rf_calc_noise(mac); 3766} 3767 3768static __inline uint8_t 3769bwi_plcp2rate(const uint32_t plcp0, enum ieee80211_phytype type) 3770{ 3771 uint32_t plcp = le32toh(plcp0) & IEEE80211_OFDM_PLCP_RATE_MASK; 3772 return (ieee80211_plcp2rate(plcp, type)); 3773} 3774 3775static void 3776bwi_rx_radiotap(struct bwi_softc *sc, struct mbuf *m, 3777 struct bwi_rxbuf_hdr *hdr, const void *plcp, int rate, int rssi, int noise) 3778{ 3779 const struct ieee80211_frame_min *wh; 3780 3781 sc->sc_rx_th.wr_flags = IEEE80211_RADIOTAP_F_FCS; 3782 if (htole16(hdr->rxh_flags1) & BWI_RXH_F1_SHPREAMBLE) 3783 sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 3784 3785 wh = mtod(m, const struct ieee80211_frame_min *); 3786 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) 3787 sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_WEP; 3788 3789 sc->sc_rx_th.wr_tsf = hdr->rxh_tsf; /* No endian conversion */ 3790 sc->sc_rx_th.wr_rate = rate; 3791 sc->sc_rx_th.wr_antsignal = rssi; 3792 sc->sc_rx_th.wr_antnoise = noise; 3793} 3794 3795static void 3796bwi_led_attach(struct bwi_softc *sc) 3797{ 3798 const uint8_t *led_act = NULL; 3799 uint16_t gpio, val[BWI_LED_MAX]; 3800 int i; 3801 3802 for (i = 0; i < nitems(bwi_vendor_led_act); ++i) { 3803 if (sc->sc_pci_subvid == bwi_vendor_led_act[i].vid) { 3804 led_act = bwi_vendor_led_act[i].led_act; 3805 break; 3806 } 3807 } 3808 if (led_act == NULL) 3809 led_act = bwi_default_led_act; 3810 3811 gpio = bwi_read_sprom(sc, BWI_SPROM_GPIO01); 3812 val[0] = __SHIFTOUT(gpio, BWI_SPROM_GPIO_0); 3813 val[1] = __SHIFTOUT(gpio, BWI_SPROM_GPIO_1); 3814 3815 gpio = bwi_read_sprom(sc, BWI_SPROM_GPIO23); 3816 val[2] = __SHIFTOUT(gpio, BWI_SPROM_GPIO_2); 3817 val[3] = __SHIFTOUT(gpio, BWI_SPROM_GPIO_3); 3818 3819 for (i = 0; i < BWI_LED_MAX; ++i) { 3820 struct bwi_led *led = &sc->sc_leds[i]; 3821 3822 if (val[i] == 0xff) { 3823 led->l_act = led_act[i]; 3824 } else { 3825 if (val[i] & BWI_LED_ACT_LOW) 3826 led->l_flags |= BWI_LED_F_ACTLOW; 3827 led->l_act = __SHIFTOUT(val[i], BWI_LED_ACT_MASK); 3828 } 3829 led->l_mask = (1 << i); 3830 3831 if (led->l_act == BWI_LED_ACT_BLINK_SLOW || 3832 led->l_act == BWI_LED_ACT_BLINK_POLL || 3833 led->l_act == BWI_LED_ACT_BLINK) { 3834 led->l_flags |= BWI_LED_F_BLINK; 3835 if (led->l_act == BWI_LED_ACT_BLINK_POLL) 3836 led->l_flags |= BWI_LED_F_POLLABLE; 3837 else if (led->l_act == BWI_LED_ACT_BLINK_SLOW) 3838 led->l_flags |= BWI_LED_F_SLOW; 3839 3840 if (sc->sc_blink_led == NULL) { 3841 sc->sc_blink_led = led; 3842 if (led->l_flags & BWI_LED_F_SLOW) 3843 BWI_LED_SLOWDOWN(sc->sc_led_idle); 3844 } 3845 } 3846 3847 DPRINTF(sc, BWI_DBG_LED | BWI_DBG_ATTACH, 3848 "%dth led, act %d, lowact %d\n", i, 3849 led->l_act, led->l_flags & BWI_LED_F_ACTLOW); 3850 } 3851 callout_init_mtx(&sc->sc_led_blink_ch, &sc->sc_mtx, 0); 3852} 3853 3854static __inline uint16_t 3855bwi_led_onoff(const struct bwi_led *led, uint16_t val, int on) 3856{ 3857 if (led->l_flags & BWI_LED_F_ACTLOW) 3858 on = !on; 3859 if (on) 3860 val |= led->l_mask; 3861 else 3862 val &= ~led->l_mask; 3863 return val; 3864} 3865 3866static void 3867bwi_led_newstate(struct bwi_softc *sc, enum ieee80211_state nstate) 3868{ 3869 struct ieee80211com *ic = &sc->sc_ic; 3870 uint16_t val; 3871 int i; 3872 3873 if (nstate == IEEE80211_S_INIT) { 3874 callout_stop(&sc->sc_led_blink_ch); 3875 sc->sc_led_blinking = 0; 3876 } 3877 3878 if ((sc->sc_flags & BWI_F_RUNNING) == 0) 3879 return; 3880 3881 val = CSR_READ_2(sc, BWI_MAC_GPIO_CTRL); 3882 for (i = 0; i < BWI_LED_MAX; ++i) { 3883 struct bwi_led *led = &sc->sc_leds[i]; 3884 int on; 3885 3886 if (led->l_act == BWI_LED_ACT_UNKN || 3887 led->l_act == BWI_LED_ACT_NULL) 3888 continue; 3889 3890 if ((led->l_flags & BWI_LED_F_BLINK) && 3891 nstate != IEEE80211_S_INIT) 3892 continue; 3893 3894 switch (led->l_act) { 3895 case BWI_LED_ACT_ON: /* Always on */ 3896 on = 1; 3897 break; 3898 case BWI_LED_ACT_OFF: /* Always off */ 3899 case BWI_LED_ACT_5GHZ: /* TODO: 11A */ 3900 on = 0; 3901 break; 3902 default: 3903 on = 1; 3904 switch (nstate) { 3905 case IEEE80211_S_INIT: 3906 on = 0; 3907 break; 3908 case IEEE80211_S_RUN: 3909 if (led->l_act == BWI_LED_ACT_11G && 3910 ic->ic_curmode != IEEE80211_MODE_11G) 3911 on = 0; 3912 break; 3913 default: 3914 if (led->l_act == BWI_LED_ACT_ASSOC) 3915 on = 0; 3916 break; 3917 } 3918 break; 3919 } 3920 3921 val = bwi_led_onoff(led, val, on); 3922 } 3923 CSR_WRITE_2(sc, BWI_MAC_GPIO_CTRL, val); 3924} 3925static void 3926bwi_led_event(struct bwi_softc *sc, int event) 3927{ 3928 struct bwi_led *led = sc->sc_blink_led; 3929 int rate; 3930 3931 if (event == BWI_LED_EVENT_POLL) { 3932 if ((led->l_flags & BWI_LED_F_POLLABLE) == 0) 3933 return; 3934 if (ticks - sc->sc_led_ticks < sc->sc_led_idle) 3935 return; 3936 } 3937 3938 sc->sc_led_ticks = ticks; 3939 if (sc->sc_led_blinking) 3940 return; 3941 3942 switch (event) { 3943 case BWI_LED_EVENT_RX: 3944 rate = sc->sc_rx_rate; 3945 break; 3946 case BWI_LED_EVENT_TX: 3947 rate = sc->sc_tx_rate; 3948 break; 3949 case BWI_LED_EVENT_POLL: 3950 rate = 0; 3951 break; 3952 default: 3953 panic("unknown LED event %d\n", event); 3954 break; 3955 } 3956 bwi_led_blink_start(sc, bwi_led_duration[rate].on_dur, 3957 bwi_led_duration[rate].off_dur); 3958} 3959 3960static void 3961bwi_led_blink_start(struct bwi_softc *sc, int on_dur, int off_dur) 3962{ 3963 struct bwi_led *led = sc->sc_blink_led; 3964 uint16_t val; 3965 3966 val = CSR_READ_2(sc, BWI_MAC_GPIO_CTRL); 3967 val = bwi_led_onoff(led, val, 1); 3968 CSR_WRITE_2(sc, BWI_MAC_GPIO_CTRL, val); 3969 3970 if (led->l_flags & BWI_LED_F_SLOW) { 3971 BWI_LED_SLOWDOWN(on_dur); 3972 BWI_LED_SLOWDOWN(off_dur); 3973 } 3974 3975 sc->sc_led_blinking = 1; 3976 sc->sc_led_blink_offdur = off_dur; 3977 3978 callout_reset(&sc->sc_led_blink_ch, on_dur, bwi_led_blink_next, sc); 3979} 3980 3981static void 3982bwi_led_blink_next(void *xsc) 3983{ 3984 struct bwi_softc *sc = xsc; 3985 uint16_t val; 3986 3987 val = CSR_READ_2(sc, BWI_MAC_GPIO_CTRL); 3988 val = bwi_led_onoff(sc->sc_blink_led, val, 0); 3989 CSR_WRITE_2(sc, BWI_MAC_GPIO_CTRL, val); 3990 3991 callout_reset(&sc->sc_led_blink_ch, sc->sc_led_blink_offdur, 3992 bwi_led_blink_end, sc); 3993} 3994 3995static void 3996bwi_led_blink_end(void *xsc) 3997{ 3998 struct bwi_softc *sc = xsc; 3999 sc->sc_led_blinking = 0; 4000} 4001 4002static void 4003bwi_restart(void *xsc, int pending) 4004{ 4005 struct bwi_softc *sc = xsc; 4006 4007 device_printf(sc->sc_dev, "%s begin, help!\n", __func__); 4008 BWI_LOCK(sc); 4009 bwi_init_statechg(sc, 0); 4010#if 0 4011 bwi_start_locked(sc); 4012#endif 4013 BWI_UNLOCK(sc); 4014}
| 385 sc->sc_led_blink = 1; 386 sc->sc_txpwr_calib = 1; 387#ifdef BWI_DEBUG 388 sc->sc_debug = bwi_debug; 389#endif 390 bwi_power_on(sc, 1); 391 392 error = bwi_bbp_attach(sc); 393 if (error) 394 goto fail; 395 396 error = bwi_bbp_power_on(sc, BWI_CLOCK_MODE_FAST); 397 if (error) 398 goto fail; 399 400 if (BWI_REGWIN_EXIST(&sc->sc_com_regwin)) { 401 error = bwi_set_clock_delay(sc); 402 if (error) 403 goto fail; 404 405 error = bwi_set_clock_mode(sc, BWI_CLOCK_MODE_FAST); 406 if (error) 407 goto fail; 408 409 error = bwi_get_pwron_delay(sc); 410 if (error) 411 goto fail; 412 } 413 414 error = bwi_bus_attach(sc); 415 if (error) 416 goto fail; 417 418 bwi_get_card_flags(sc); 419 420 bwi_led_attach(sc); 421 422 for (i = 0; i < sc->sc_nmac; ++i) { 423 struct bwi_regwin *old; 424 425 mac = &sc->sc_mac[i]; 426 error = bwi_regwin_switch(sc, &mac->mac_regwin, &old); 427 if (error) 428 goto fail; 429 430 error = bwi_mac_lateattach(mac); 431 if (error) 432 goto fail; 433 434 error = bwi_regwin_switch(sc, old, NULL); 435 if (error) 436 goto fail; 437 } 438 439 /* 440 * XXX First MAC is known to exist 441 * TODO2 442 */ 443 mac = &sc->sc_mac[0]; 444 phy = &mac->mac_phy; 445 446 bwi_bbp_power_off(sc); 447 448 error = bwi_dma_alloc(sc); 449 if (error) 450 goto fail; 451 452 error = bwi_mac_fw_alloc(mac); 453 if (error) 454 goto fail; 455 456 callout_init_mtx(&sc->sc_watchdog_timer, &sc->sc_mtx, 0); 457 458 /* 459 * Setup ratesets, phytype, channels and get MAC address 460 */ 461 if (phy->phy_mode == IEEE80211_MODE_11B || 462 phy->phy_mode == IEEE80211_MODE_11G) { 463 if (phy->phy_mode == IEEE80211_MODE_11B) { 464 ic->ic_phytype = IEEE80211_T_DS; 465 } else { 466 ic->ic_phytype = IEEE80211_T_OFDM; 467 } 468 469 bwi_get_eaddr(sc, BWI_SPROM_11BG_EADDR, ic->ic_macaddr); 470 if (IEEE80211_IS_MULTICAST(ic->ic_macaddr)) { 471 bwi_get_eaddr(sc, BWI_SPROM_11A_EADDR, ic->ic_macaddr); 472 if (IEEE80211_IS_MULTICAST(ic->ic_macaddr)) { 473 device_printf(dev, 474 "invalid MAC address: %6D\n", 475 ic->ic_macaddr, ":"); 476 } 477 } 478 } else if (phy->phy_mode == IEEE80211_MODE_11A) { 479 /* TODO:11A */ 480 error = ENXIO; 481 goto fail; 482 } else { 483 panic("unknown phymode %d\n", phy->phy_mode); 484 } 485 486 /* Get locale */ 487 sc->sc_locale = __SHIFTOUT(bwi_read_sprom(sc, BWI_SPROM_CARD_INFO), 488 BWI_SPROM_CARD_INFO_LOCALE); 489 DPRINTF(sc, BWI_DBG_ATTACH, "locale: %d\n", sc->sc_locale); 490 /* XXX use locale */ 491 bwi_getradiocaps(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans, 492 ic->ic_channels); 493 494 ic->ic_softc = sc; 495 ic->ic_name = device_get_nameunit(dev); 496 ic->ic_caps = IEEE80211_C_STA | 497 IEEE80211_C_SHSLOT | 498 IEEE80211_C_SHPREAMBLE | 499 IEEE80211_C_WPA | 500 IEEE80211_C_BGSCAN | 501 IEEE80211_C_MONITOR; 502 ic->ic_opmode = IEEE80211_M_STA; 503 ieee80211_ifattach(ic); 504 505 ic->ic_headroom = sizeof(struct bwi_txbuf_hdr); 506 507 /* override default methods */ 508 ic->ic_vap_create = bwi_vap_create; 509 ic->ic_vap_delete = bwi_vap_delete; 510 ic->ic_raw_xmit = bwi_raw_xmit; 511 ic->ic_updateslot = bwi_updateslot; 512 ic->ic_scan_start = bwi_scan_start; 513 ic->ic_scan_end = bwi_scan_end; 514 ic->ic_getradiocaps = bwi_getradiocaps; 515 ic->ic_set_channel = bwi_set_channel; 516 ic->ic_transmit = bwi_transmit; 517 ic->ic_parent = bwi_parent; 518 519 sc->sc_rates = ieee80211_get_ratetable(ic->ic_curchan); 520 521 ieee80211_radiotap_attach(ic, 522 &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th), 523 BWI_TX_RADIOTAP_PRESENT, 524 &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th), 525 BWI_RX_RADIOTAP_PRESENT); 526 527 /* 528 * Add sysctl nodes 529 */ 530 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), 531 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, 532 "fw_version", CTLFLAG_RD, &sc->sc_fw_version, 0, 533 "Firmware version"); 534 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), 535 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, 536 "led_idle", CTLFLAG_RW, &sc->sc_led_idle, 0, 537 "# ticks before LED enters idle state"); 538 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), 539 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, 540 "led_blink", CTLFLAG_RW, &sc->sc_led_blink, 0, 541 "Allow LED to blink"); 542 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), 543 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, 544 "txpwr_calib", CTLFLAG_RW, &sc->sc_txpwr_calib, 0, 545 "Enable software TX power calibration"); 546#ifdef BWI_DEBUG 547 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 548 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, 549 "debug", CTLFLAG_RW, &sc->sc_debug, 0, "Debug flags"); 550#endif 551 if (bootverbose) 552 ieee80211_announce(ic); 553 554 return (0); 555fail: 556 BWI_LOCK_DESTROY(sc); 557 return (error); 558} 559 560int 561bwi_detach(struct bwi_softc *sc) 562{ 563 struct ieee80211com *ic = &sc->sc_ic; 564 int i; 565 566 bwi_stop(sc, 1); 567 callout_drain(&sc->sc_led_blink_ch); 568 callout_drain(&sc->sc_calib_ch); 569 callout_drain(&sc->sc_watchdog_timer); 570 ieee80211_ifdetach(ic); 571 572 for (i = 0; i < sc->sc_nmac; ++i) 573 bwi_mac_detach(&sc->sc_mac[i]); 574 bwi_dma_free(sc); 575 taskqueue_free(sc->sc_tq); 576 mbufq_drain(&sc->sc_snd); 577 578 BWI_LOCK_DESTROY(sc); 579 580 return (0); 581} 582 583static struct ieee80211vap * 584bwi_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, 585 enum ieee80211_opmode opmode, int flags, 586 const uint8_t bssid[IEEE80211_ADDR_LEN], 587 const uint8_t mac[IEEE80211_ADDR_LEN]) 588{ 589 struct bwi_vap *bvp; 590 struct ieee80211vap *vap; 591 592 if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ 593 return NULL; 594 bvp = malloc(sizeof(struct bwi_vap), M_80211_VAP, M_WAITOK | M_ZERO); 595 vap = &bvp->bv_vap; 596 /* enable s/w bmiss handling for sta mode */ 597 ieee80211_vap_setup(ic, vap, name, unit, opmode, 598 flags | IEEE80211_CLONE_NOBEACONS, bssid); 599 600 /* override default methods */ 601 bvp->bv_newstate = vap->iv_newstate; 602 vap->iv_newstate = bwi_newstate; 603#if 0 604 vap->iv_update_beacon = bwi_beacon_update; 605#endif 606 ieee80211_ratectl_init(vap); 607 608 /* complete setup */ 609 ieee80211_vap_attach(vap, bwi_media_change, ieee80211_media_status, 610 mac); 611 ic->ic_opmode = opmode; 612 return vap; 613} 614 615static void 616bwi_vap_delete(struct ieee80211vap *vap) 617{ 618 struct bwi_vap *bvp = BWI_VAP(vap); 619 620 ieee80211_ratectl_deinit(vap); 621 ieee80211_vap_detach(vap); 622 free(bvp, M_80211_VAP); 623} 624 625void 626bwi_suspend(struct bwi_softc *sc) 627{ 628 bwi_stop(sc, 1); 629} 630 631void 632bwi_resume(struct bwi_softc *sc) 633{ 634 635 if (sc->sc_ic.ic_nrunning > 0) 636 bwi_init(sc); 637} 638 639int 640bwi_shutdown(struct bwi_softc *sc) 641{ 642 bwi_stop(sc, 1); 643 return 0; 644} 645 646static void 647bwi_power_on(struct bwi_softc *sc, int with_pll) 648{ 649 uint32_t gpio_in, gpio_out, gpio_en; 650 uint16_t status; 651 652 gpio_in = pci_read_config(sc->sc_dev, BWI_PCIR_GPIO_IN, 4); 653 if (gpio_in & BWI_PCIM_GPIO_PWR_ON) 654 goto back; 655 656 gpio_out = pci_read_config(sc->sc_dev, BWI_PCIR_GPIO_OUT, 4); 657 gpio_en = pci_read_config(sc->sc_dev, BWI_PCIR_GPIO_ENABLE, 4); 658 659 gpio_out |= BWI_PCIM_GPIO_PWR_ON; 660 gpio_en |= BWI_PCIM_GPIO_PWR_ON; 661 if (with_pll) { 662 /* Turn off PLL first */ 663 gpio_out |= BWI_PCIM_GPIO_PLL_PWR_OFF; 664 gpio_en |= BWI_PCIM_GPIO_PLL_PWR_OFF; 665 } 666 667 pci_write_config(sc->sc_dev, BWI_PCIR_GPIO_OUT, gpio_out, 4); 668 pci_write_config(sc->sc_dev, BWI_PCIR_GPIO_ENABLE, gpio_en, 4); 669 DELAY(1000); 670 671 if (with_pll) { 672 /* Turn on PLL */ 673 gpio_out &= ~BWI_PCIM_GPIO_PLL_PWR_OFF; 674 pci_write_config(sc->sc_dev, BWI_PCIR_GPIO_OUT, gpio_out, 4); 675 DELAY(5000); 676 } 677 678back: 679 /* Clear "Signaled Target Abort" */ 680 status = pci_read_config(sc->sc_dev, PCIR_STATUS, 2); 681 status &= ~PCIM_STATUS_STABORT; 682 pci_write_config(sc->sc_dev, PCIR_STATUS, status, 2); 683} 684 685static int 686bwi_power_off(struct bwi_softc *sc, int with_pll) 687{ 688 uint32_t gpio_out, gpio_en; 689 690 pci_read_config(sc->sc_dev, BWI_PCIR_GPIO_IN, 4); /* dummy read */ 691 gpio_out = pci_read_config(sc->sc_dev, BWI_PCIR_GPIO_OUT, 4); 692 gpio_en = pci_read_config(sc->sc_dev, BWI_PCIR_GPIO_ENABLE, 4); 693 694 gpio_out &= ~BWI_PCIM_GPIO_PWR_ON; 695 gpio_en |= BWI_PCIM_GPIO_PWR_ON; 696 if (with_pll) { 697 gpio_out |= BWI_PCIM_GPIO_PLL_PWR_OFF; 698 gpio_en |= BWI_PCIM_GPIO_PLL_PWR_OFF; 699 } 700 701 pci_write_config(sc->sc_dev, BWI_PCIR_GPIO_OUT, gpio_out, 4); 702 pci_write_config(sc->sc_dev, BWI_PCIR_GPIO_ENABLE, gpio_en, 4); 703 return 0; 704} 705 706int 707bwi_regwin_switch(struct bwi_softc *sc, struct bwi_regwin *rw, 708 struct bwi_regwin **old_rw) 709{ 710 int error; 711 712 if (old_rw != NULL) 713 *old_rw = NULL; 714 715 if (!BWI_REGWIN_EXIST(rw)) 716 return EINVAL; 717 718 if (sc->sc_cur_regwin != rw) { 719 error = bwi_regwin_select(sc, rw->rw_id); 720 if (error) { 721 device_printf(sc->sc_dev, "can't select regwin %d\n", 722 rw->rw_id); 723 return error; 724 } 725 } 726 727 if (old_rw != NULL) 728 *old_rw = sc->sc_cur_regwin; 729 sc->sc_cur_regwin = rw; 730 return 0; 731} 732 733static int 734bwi_regwin_select(struct bwi_softc *sc, int id) 735{ 736 uint32_t win = BWI_PCIM_REGWIN(id); 737 int i; 738 739#define RETRY_MAX 50 740 for (i = 0; i < RETRY_MAX; ++i) { 741 pci_write_config(sc->sc_dev, BWI_PCIR_SEL_REGWIN, win, 4); 742 if (pci_read_config(sc->sc_dev, BWI_PCIR_SEL_REGWIN, 4) == win) 743 return 0; 744 DELAY(10); 745 } 746#undef RETRY_MAX 747 748 return ENXIO; 749} 750 751static void 752bwi_regwin_info(struct bwi_softc *sc, uint16_t *type, uint8_t *rev) 753{ 754 uint32_t val; 755 756 val = CSR_READ_4(sc, BWI_ID_HI); 757 *type = BWI_ID_HI_REGWIN_TYPE(val); 758 *rev = BWI_ID_HI_REGWIN_REV(val); 759 760 DPRINTF(sc, BWI_DBG_ATTACH, "regwin: type 0x%03x, rev %d, " 761 "vendor 0x%04x\n", *type, *rev, 762 __SHIFTOUT(val, BWI_ID_HI_REGWIN_VENDOR_MASK)); 763} 764 765static int 766bwi_bbp_attach(struct bwi_softc *sc) 767{ 768 uint16_t bbp_id, rw_type; 769 uint8_t rw_rev; 770 uint32_t info; 771 int error, nregwin, i; 772 773 /* 774 * Get 0th regwin information 775 * NOTE: 0th regwin should exist 776 */ 777 error = bwi_regwin_select(sc, 0); 778 if (error) { 779 device_printf(sc->sc_dev, "can't select regwin 0\n"); 780 return error; 781 } 782 bwi_regwin_info(sc, &rw_type, &rw_rev); 783 784 /* 785 * Find out BBP id 786 */ 787 bbp_id = 0; 788 info = 0; 789 if (rw_type == BWI_REGWIN_T_COM) { 790 info = CSR_READ_4(sc, BWI_INFO); 791 bbp_id = __SHIFTOUT(info, BWI_INFO_BBPID_MASK); 792 793 BWI_CREATE_REGWIN(&sc->sc_com_regwin, 0, rw_type, rw_rev); 794 795 sc->sc_cap = CSR_READ_4(sc, BWI_CAPABILITY); 796 } else { 797 for (i = 0; i < nitems(bwi_bbpid_map); ++i) { 798 if (sc->sc_pci_did >= bwi_bbpid_map[i].did_min && 799 sc->sc_pci_did <= bwi_bbpid_map[i].did_max) { 800 bbp_id = bwi_bbpid_map[i].bbp_id; 801 break; 802 } 803 } 804 if (bbp_id == 0) { 805 device_printf(sc->sc_dev, "no BBP id for device id " 806 "0x%04x\n", sc->sc_pci_did); 807 return ENXIO; 808 } 809 810 info = __SHIFTIN(sc->sc_pci_revid, BWI_INFO_BBPREV_MASK) | 811 __SHIFTIN(0, BWI_INFO_BBPPKG_MASK); 812 } 813 814 /* 815 * Find out number of regwins 816 */ 817 nregwin = 0; 818 if (rw_type == BWI_REGWIN_T_COM && rw_rev >= 4) { 819 nregwin = __SHIFTOUT(info, BWI_INFO_NREGWIN_MASK); 820 } else { 821 for (i = 0; i < nitems(bwi_regwin_count); ++i) { 822 if (bwi_regwin_count[i].bbp_id == bbp_id) { 823 nregwin = bwi_regwin_count[i].nregwin; 824 break; 825 } 826 } 827 if (nregwin == 0) { 828 device_printf(sc->sc_dev, "no number of win for " 829 "BBP id 0x%04x\n", bbp_id); 830 return ENXIO; 831 } 832 } 833 834 /* Record BBP id/rev for later using */ 835 sc->sc_bbp_id = bbp_id; 836 sc->sc_bbp_rev = __SHIFTOUT(info, BWI_INFO_BBPREV_MASK); 837 sc->sc_bbp_pkg = __SHIFTOUT(info, BWI_INFO_BBPPKG_MASK); 838 device_printf(sc->sc_dev, "BBP: id 0x%04x, rev 0x%x, pkg %d\n", 839 sc->sc_bbp_id, sc->sc_bbp_rev, sc->sc_bbp_pkg); 840 841 DPRINTF(sc, BWI_DBG_ATTACH, "nregwin %d, cap 0x%08x\n", 842 nregwin, sc->sc_cap); 843 844 /* 845 * Create rest of the regwins 846 */ 847 848 /* Don't re-create common regwin, if it is already created */ 849 i = BWI_REGWIN_EXIST(&sc->sc_com_regwin) ? 1 : 0; 850 851 for (; i < nregwin; ++i) { 852 /* 853 * Get regwin information 854 */ 855 error = bwi_regwin_select(sc, i); 856 if (error) { 857 device_printf(sc->sc_dev, 858 "can't select regwin %d\n", i); 859 return error; 860 } 861 bwi_regwin_info(sc, &rw_type, &rw_rev); 862 863 /* 864 * Try attach: 865 * 1) Bus (PCI/PCIE) regwin 866 * 2) MAC regwin 867 * Ignore rest types of regwin 868 */ 869 if (rw_type == BWI_REGWIN_T_BUSPCI || 870 rw_type == BWI_REGWIN_T_BUSPCIE) { 871 if (BWI_REGWIN_EXIST(&sc->sc_bus_regwin)) { 872 device_printf(sc->sc_dev, 873 "bus regwin already exists\n"); 874 } else { 875 BWI_CREATE_REGWIN(&sc->sc_bus_regwin, i, 876 rw_type, rw_rev); 877 } 878 } else if (rw_type == BWI_REGWIN_T_MAC) { 879 /* XXX ignore return value */ 880 bwi_mac_attach(sc, i, rw_rev); 881 } 882 } 883 884 /* At least one MAC shold exist */ 885 if (!BWI_REGWIN_EXIST(&sc->sc_mac[0].mac_regwin)) { 886 device_printf(sc->sc_dev, "no MAC was found\n"); 887 return ENXIO; 888 } 889 KASSERT(sc->sc_nmac > 0, ("no mac's")); 890 891 /* Bus regwin must exist */ 892 if (!BWI_REGWIN_EXIST(&sc->sc_bus_regwin)) { 893 device_printf(sc->sc_dev, "no bus regwin was found\n"); 894 return ENXIO; 895 } 896 897 /* Start with first MAC */ 898 error = bwi_regwin_switch(sc, &sc->sc_mac[0].mac_regwin, NULL); 899 if (error) 900 return error; 901 902 return 0; 903} 904 905int 906bwi_bus_init(struct bwi_softc *sc, struct bwi_mac *mac) 907{ 908 struct bwi_regwin *old, *bus; 909 uint32_t val; 910 int error; 911 912 bus = &sc->sc_bus_regwin; 913 KASSERT(sc->sc_cur_regwin == &mac->mac_regwin, ("not cur regwin")); 914 915 /* 916 * Tell bus to generate requested interrupts 917 */ 918 if (bus->rw_rev < 6 && bus->rw_type == BWI_REGWIN_T_BUSPCI) { 919 /* 920 * NOTE: Read BWI_FLAGS from MAC regwin 921 */ 922 val = CSR_READ_4(sc, BWI_FLAGS); 923 924 error = bwi_regwin_switch(sc, bus, &old); 925 if (error) 926 return error; 927 928 CSR_SETBITS_4(sc, BWI_INTRVEC, (val & BWI_FLAGS_INTR_MASK)); 929 } else { 930 uint32_t mac_mask; 931 932 mac_mask = 1 << mac->mac_id; 933 934 error = bwi_regwin_switch(sc, bus, &old); 935 if (error) 936 return error; 937 938 val = pci_read_config(sc->sc_dev, BWI_PCIR_INTCTL, 4); 939 val |= mac_mask << 8; 940 pci_write_config(sc->sc_dev, BWI_PCIR_INTCTL, val, 4); 941 } 942 943 if (sc->sc_flags & BWI_F_BUS_INITED) 944 goto back; 945 946 if (bus->rw_type == BWI_REGWIN_T_BUSPCI) { 947 /* 948 * Enable prefetch and burst 949 */ 950 CSR_SETBITS_4(sc, BWI_BUS_CONFIG, 951 BWI_BUS_CONFIG_PREFETCH | BWI_BUS_CONFIG_BURST); 952 953 if (bus->rw_rev < 5) { 954 struct bwi_regwin *com = &sc->sc_com_regwin; 955 956 /* 957 * Configure timeouts for bus operation 958 */ 959 960 /* 961 * Set service timeout and request timeout 962 */ 963 CSR_SETBITS_4(sc, BWI_CONF_LO, 964 __SHIFTIN(BWI_CONF_LO_SERVTO, BWI_CONF_LO_SERVTO_MASK) | 965 __SHIFTIN(BWI_CONF_LO_REQTO, BWI_CONF_LO_REQTO_MASK)); 966 967 /* 968 * If there is common regwin, we switch to that regwin 969 * and switch back to bus regwin once we have done. 970 */ 971 if (BWI_REGWIN_EXIST(com)) { 972 error = bwi_regwin_switch(sc, com, NULL); 973 if (error) 974 return error; 975 } 976 977 /* Let bus know what we have changed */ 978 CSR_WRITE_4(sc, BWI_BUS_ADDR, BWI_BUS_ADDR_MAGIC); 979 CSR_READ_4(sc, BWI_BUS_ADDR); /* Flush */ 980 CSR_WRITE_4(sc, BWI_BUS_DATA, 0); 981 CSR_READ_4(sc, BWI_BUS_DATA); /* Flush */ 982 983 if (BWI_REGWIN_EXIST(com)) { 984 error = bwi_regwin_switch(sc, bus, NULL); 985 if (error) 986 return error; 987 } 988 } else if (bus->rw_rev >= 11) { 989 /* 990 * Enable memory read multiple 991 */ 992 CSR_SETBITS_4(sc, BWI_BUS_CONFIG, BWI_BUS_CONFIG_MRM); 993 } 994 } else { 995 /* TODO:PCIE */ 996 } 997 998 sc->sc_flags |= BWI_F_BUS_INITED; 999back: 1000 return bwi_regwin_switch(sc, old, NULL); 1001} 1002 1003static void 1004bwi_get_card_flags(struct bwi_softc *sc) 1005{ 1006#define PCI_VENDOR_APPLE 0x106b 1007#define PCI_VENDOR_DELL 0x1028 1008 sc->sc_card_flags = bwi_read_sprom(sc, BWI_SPROM_CARD_FLAGS); 1009 if (sc->sc_card_flags == 0xffff) 1010 sc->sc_card_flags = 0; 1011 1012 if (sc->sc_pci_subvid == PCI_VENDOR_DELL && 1013 sc->sc_bbp_id == BWI_BBPID_BCM4301 && 1014 sc->sc_pci_revid == 0x74) 1015 sc->sc_card_flags |= BWI_CARD_F_BT_COEXIST; 1016 1017 if (sc->sc_pci_subvid == PCI_VENDOR_APPLE && 1018 sc->sc_pci_subdid == 0x4e && /* XXX */ 1019 sc->sc_pci_revid > 0x40) 1020 sc->sc_card_flags |= BWI_CARD_F_PA_GPIO9; 1021 1022 DPRINTF(sc, BWI_DBG_ATTACH, "card flags 0x%04x\n", sc->sc_card_flags); 1023#undef PCI_VENDOR_DELL 1024#undef PCI_VENDOR_APPLE 1025} 1026 1027static void 1028bwi_get_eaddr(struct bwi_softc *sc, uint16_t eaddr_ofs, uint8_t *eaddr) 1029{ 1030 int i; 1031 1032 for (i = 0; i < 3; ++i) { 1033 *((uint16_t *)eaddr + i) = 1034 htobe16(bwi_read_sprom(sc, eaddr_ofs + 2 * i)); 1035 } 1036} 1037 1038static void 1039bwi_get_clock_freq(struct bwi_softc *sc, struct bwi_clock_freq *freq) 1040{ 1041 struct bwi_regwin *com; 1042 uint32_t val; 1043 u_int div; 1044 int src; 1045 1046 bzero(freq, sizeof(*freq)); 1047 com = &sc->sc_com_regwin; 1048 1049 KASSERT(BWI_REGWIN_EXIST(com), ("regwin does not exist")); 1050 KASSERT(sc->sc_cur_regwin == com, ("wrong regwin")); 1051 KASSERT(sc->sc_cap & BWI_CAP_CLKMODE, ("wrong clock mode")); 1052 1053 /* 1054 * Calculate clock frequency 1055 */ 1056 src = -1; 1057 div = 0; 1058 if (com->rw_rev < 6) { 1059 val = pci_read_config(sc->sc_dev, BWI_PCIR_GPIO_OUT, 4); 1060 if (val & BWI_PCIM_GPIO_OUT_CLKSRC) { 1061 src = BWI_CLKSRC_PCI; 1062 div = 64; 1063 } else { 1064 src = BWI_CLKSRC_CS_OSC; 1065 div = 32; 1066 } 1067 } else if (com->rw_rev < 10) { 1068 val = CSR_READ_4(sc, BWI_CLOCK_CTRL); 1069 1070 src = __SHIFTOUT(val, BWI_CLOCK_CTRL_CLKSRC); 1071 if (src == BWI_CLKSRC_LP_OSC) { 1072 div = 1; 1073 } else { 1074 div = (__SHIFTOUT(val, BWI_CLOCK_CTRL_FDIV) + 1) << 2; 1075 1076 /* Unknown source */ 1077 if (src >= BWI_CLKSRC_MAX) 1078 src = BWI_CLKSRC_CS_OSC; 1079 } 1080 } else { 1081 val = CSR_READ_4(sc, BWI_CLOCK_INFO); 1082 1083 src = BWI_CLKSRC_CS_OSC; 1084 div = (__SHIFTOUT(val, BWI_CLOCK_INFO_FDIV) + 1) << 2; 1085 } 1086 1087 KASSERT(src >= 0 && src < BWI_CLKSRC_MAX, ("bad src %d", src)); 1088 KASSERT(div != 0, ("div zero")); 1089 1090 DPRINTF(sc, BWI_DBG_ATTACH, "clksrc %s\n", 1091 src == BWI_CLKSRC_PCI ? "PCI" : 1092 (src == BWI_CLKSRC_LP_OSC ? "LP_OSC" : "CS_OSC")); 1093 1094 freq->clkfreq_min = bwi_clkfreq[src].freq_min / div; 1095 freq->clkfreq_max = bwi_clkfreq[src].freq_max / div; 1096 1097 DPRINTF(sc, BWI_DBG_ATTACH, "clkfreq min %u, max %u\n", 1098 freq->clkfreq_min, freq->clkfreq_max); 1099} 1100 1101static int 1102bwi_set_clock_mode(struct bwi_softc *sc, enum bwi_clock_mode clk_mode) 1103{ 1104 struct bwi_regwin *old, *com; 1105 uint32_t clk_ctrl, clk_src; 1106 int error, pwr_off = 0; 1107 1108 com = &sc->sc_com_regwin; 1109 if (!BWI_REGWIN_EXIST(com)) 1110 return 0; 1111 1112 if (com->rw_rev >= 10 || com->rw_rev < 6) 1113 return 0; 1114 1115 /* 1116 * For common regwin whose rev is [6, 10), the chip 1117 * must be capable to change clock mode. 1118 */ 1119 if ((sc->sc_cap & BWI_CAP_CLKMODE) == 0) 1120 return 0; 1121 1122 error = bwi_regwin_switch(sc, com, &old); 1123 if (error) 1124 return error; 1125 1126 if (clk_mode == BWI_CLOCK_MODE_FAST) 1127 bwi_power_on(sc, 0); /* Don't turn on PLL */ 1128 1129 clk_ctrl = CSR_READ_4(sc, BWI_CLOCK_CTRL); 1130 clk_src = __SHIFTOUT(clk_ctrl, BWI_CLOCK_CTRL_CLKSRC); 1131 1132 switch (clk_mode) { 1133 case BWI_CLOCK_MODE_FAST: 1134 clk_ctrl &= ~BWI_CLOCK_CTRL_SLOW; 1135 clk_ctrl |= BWI_CLOCK_CTRL_IGNPLL; 1136 break; 1137 case BWI_CLOCK_MODE_SLOW: 1138 clk_ctrl |= BWI_CLOCK_CTRL_SLOW; 1139 break; 1140 case BWI_CLOCK_MODE_DYN: 1141 clk_ctrl &= ~(BWI_CLOCK_CTRL_SLOW | 1142 BWI_CLOCK_CTRL_IGNPLL | 1143 BWI_CLOCK_CTRL_NODYN); 1144 if (clk_src != BWI_CLKSRC_CS_OSC) { 1145 clk_ctrl |= BWI_CLOCK_CTRL_NODYN; 1146 pwr_off = 1; 1147 } 1148 break; 1149 } 1150 CSR_WRITE_4(sc, BWI_CLOCK_CTRL, clk_ctrl); 1151 1152 if (pwr_off) 1153 bwi_power_off(sc, 0); /* Leave PLL as it is */ 1154 1155 return bwi_regwin_switch(sc, old, NULL); 1156} 1157 1158static int 1159bwi_set_clock_delay(struct bwi_softc *sc) 1160{ 1161 struct bwi_regwin *old, *com; 1162 int error; 1163 1164 com = &sc->sc_com_regwin; 1165 if (!BWI_REGWIN_EXIST(com)) 1166 return 0; 1167 1168 error = bwi_regwin_switch(sc, com, &old); 1169 if (error) 1170 return error; 1171 1172 if (sc->sc_bbp_id == BWI_BBPID_BCM4321) { 1173 if (sc->sc_bbp_rev == 0) 1174 CSR_WRITE_4(sc, BWI_CONTROL, BWI_CONTROL_MAGIC0); 1175 else if (sc->sc_bbp_rev == 1) 1176 CSR_WRITE_4(sc, BWI_CONTROL, BWI_CONTROL_MAGIC1); 1177 } 1178 1179 if (sc->sc_cap & BWI_CAP_CLKMODE) { 1180 if (com->rw_rev >= 10) { 1181 CSR_FILT_SETBITS_4(sc, BWI_CLOCK_INFO, 0xffff, 0x40000); 1182 } else { 1183 struct bwi_clock_freq freq; 1184 1185 bwi_get_clock_freq(sc, &freq); 1186 CSR_WRITE_4(sc, BWI_PLL_ON_DELAY, 1187 howmany(freq.clkfreq_max * 150, 1000000)); 1188 CSR_WRITE_4(sc, BWI_FREQ_SEL_DELAY, 1189 howmany(freq.clkfreq_max * 15, 1000000)); 1190 } 1191 } 1192 1193 return bwi_regwin_switch(sc, old, NULL); 1194} 1195 1196static void 1197bwi_init(struct bwi_softc *sc) 1198{ 1199 struct ieee80211com *ic = &sc->sc_ic; 1200 1201 BWI_LOCK(sc); 1202 bwi_init_statechg(sc, 1); 1203 BWI_UNLOCK(sc); 1204 1205 if (sc->sc_flags & BWI_F_RUNNING) 1206 ieee80211_start_all(ic); /* start all vap's */ 1207} 1208 1209static void 1210bwi_init_statechg(struct bwi_softc *sc, int statechg) 1211{ 1212 struct bwi_mac *mac; 1213 int error; 1214 1215 BWI_ASSERT_LOCKED(sc); 1216 1217 bwi_stop_locked(sc, statechg); 1218 1219 bwi_bbp_power_on(sc, BWI_CLOCK_MODE_FAST); 1220 1221 /* TODO: 2 MAC */ 1222 1223 mac = &sc->sc_mac[0]; 1224 error = bwi_regwin_switch(sc, &mac->mac_regwin, NULL); 1225 if (error) { 1226 device_printf(sc->sc_dev, "%s: error %d on regwin switch\n", 1227 __func__, error); 1228 goto bad; 1229 } 1230 error = bwi_mac_init(mac); 1231 if (error) { 1232 device_printf(sc->sc_dev, "%s: error %d on MAC init\n", 1233 __func__, error); 1234 goto bad; 1235 } 1236 1237 bwi_bbp_power_on(sc, BWI_CLOCK_MODE_DYN); 1238 1239 bwi_set_bssid(sc, bwi_zero_addr); /* Clear BSSID */ 1240 bwi_set_addr_filter(sc, BWI_ADDR_FILTER_MYADDR, sc->sc_ic.ic_macaddr); 1241 1242 bwi_mac_reset_hwkeys(mac); 1243 1244 if ((mac->mac_flags & BWI_MAC_F_HAS_TXSTATS) == 0) { 1245 int i; 1246 1247#define NRETRY 1000 1248 /* 1249 * Drain any possible pending TX status 1250 */ 1251 for (i = 0; i < NRETRY; ++i) { 1252 if ((CSR_READ_4(sc, BWI_TXSTATUS0) & 1253 BWI_TXSTATUS0_VALID) == 0) 1254 break; 1255 CSR_READ_4(sc, BWI_TXSTATUS1); 1256 } 1257 if (i == NRETRY) 1258 device_printf(sc->sc_dev, 1259 "%s: can't drain TX status\n", __func__); 1260#undef NRETRY 1261 } 1262 1263 if (mac->mac_phy.phy_mode == IEEE80211_MODE_11G) 1264 bwi_mac_updateslot(mac, 1); 1265 1266 /* Start MAC */ 1267 error = bwi_mac_start(mac); 1268 if (error) { 1269 device_printf(sc->sc_dev, "%s: error %d starting MAC\n", 1270 __func__, error); 1271 goto bad; 1272 } 1273 1274 /* Clear stop flag before enabling interrupt */ 1275 sc->sc_flags &= ~BWI_F_STOP; 1276 sc->sc_flags |= BWI_F_RUNNING; 1277 callout_reset(&sc->sc_watchdog_timer, hz, bwi_watchdog, sc); 1278 1279 /* Enable intrs */ 1280 bwi_enable_intrs(sc, BWI_INIT_INTRS); 1281 return; 1282bad: 1283 bwi_stop_locked(sc, 1); 1284} 1285 1286static void 1287bwi_parent(struct ieee80211com *ic) 1288{ 1289 struct bwi_softc *sc = ic->ic_softc; 1290 int startall = 0; 1291 1292 BWI_LOCK(sc); 1293 if (ic->ic_nrunning > 0) { 1294 struct bwi_mac *mac; 1295 int promisc = -1; 1296 1297 KASSERT(sc->sc_cur_regwin->rw_type == BWI_REGWIN_T_MAC, 1298 ("current regwin type %d", 1299 sc->sc_cur_regwin->rw_type)); 1300 mac = (struct bwi_mac *)sc->sc_cur_regwin; 1301 1302 if (ic->ic_promisc > 0 && (sc->sc_flags & BWI_F_PROMISC) == 0) { 1303 promisc = 1; 1304 sc->sc_flags |= BWI_F_PROMISC; 1305 } else if (ic->ic_promisc == 0 && 1306 (sc->sc_flags & BWI_F_PROMISC) != 0) { 1307 promisc = 0; 1308 sc->sc_flags &= ~BWI_F_PROMISC; 1309 } 1310 1311 if (promisc >= 0) 1312 bwi_mac_set_promisc(mac, promisc); 1313 } 1314 if (ic->ic_nrunning > 0) { 1315 if ((sc->sc_flags & BWI_F_RUNNING) == 0) { 1316 bwi_init_statechg(sc, 1); 1317 startall = 1; 1318 } 1319 } else if (sc->sc_flags & BWI_F_RUNNING) 1320 bwi_stop_locked(sc, 1); 1321 BWI_UNLOCK(sc); 1322 if (startall) 1323 ieee80211_start_all(ic); 1324} 1325 1326static int 1327bwi_transmit(struct ieee80211com *ic, struct mbuf *m) 1328{ 1329 struct bwi_softc *sc = ic->ic_softc; 1330 int error; 1331 1332 BWI_LOCK(sc); 1333 if ((sc->sc_flags & BWI_F_RUNNING) == 0) { 1334 BWI_UNLOCK(sc); 1335 return (ENXIO); 1336 } 1337 error = mbufq_enqueue(&sc->sc_snd, m); 1338 if (error) { 1339 BWI_UNLOCK(sc); 1340 return (error); 1341 } 1342 bwi_start_locked(sc); 1343 BWI_UNLOCK(sc); 1344 return (0); 1345} 1346 1347static void 1348bwi_start_locked(struct bwi_softc *sc) 1349{ 1350 struct bwi_txbuf_data *tbd = &sc->sc_tx_bdata[BWI_TX_DATA_RING]; 1351 struct ieee80211_frame *wh; 1352 struct ieee80211_node *ni; 1353 struct mbuf *m; 1354 int trans, idx; 1355 1356 BWI_ASSERT_LOCKED(sc); 1357 1358 trans = 0; 1359 idx = tbd->tbd_idx; 1360 1361 while (tbd->tbd_buf[idx].tb_mbuf == NULL && 1362 tbd->tbd_used + BWI_TX_NSPRDESC < BWI_TX_NDESC && 1363 (m = mbufq_dequeue(&sc->sc_snd)) != NULL) { 1364 ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; 1365 wh = mtod(m, struct ieee80211_frame *); 1366 if ((wh->i_fc[1] & IEEE80211_FC1_PROTECTED) != 0 && 1367 ieee80211_crypto_encap(ni, m) == NULL) { 1368 if_inc_counter(ni->ni_vap->iv_ifp, 1369 IFCOUNTER_OERRORS, 1); 1370 ieee80211_free_node(ni); 1371 m_freem(m); 1372 continue; 1373 } 1374 if (bwi_encap(sc, idx, m, ni) != 0) { 1375 /* 'm' is freed in bwi_encap() if we reach here */ 1376 if (ni != NULL) { 1377 if_inc_counter(ni->ni_vap->iv_ifp, 1378 IFCOUNTER_OERRORS, 1); 1379 ieee80211_free_node(ni); 1380 } else 1381 counter_u64_add(sc->sc_ic.ic_oerrors, 1); 1382 continue; 1383 } 1384 trans = 1; 1385 tbd->tbd_used++; 1386 idx = (idx + 1) % BWI_TX_NDESC; 1387 } 1388 1389 tbd->tbd_idx = idx; 1390 if (trans) 1391 sc->sc_tx_timer = 5; 1392} 1393 1394static int 1395bwi_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, 1396 const struct ieee80211_bpf_params *params) 1397{ 1398 struct ieee80211com *ic = ni->ni_ic; 1399 struct bwi_softc *sc = ic->ic_softc; 1400 /* XXX wme? */ 1401 struct bwi_txbuf_data *tbd = &sc->sc_tx_bdata[BWI_TX_DATA_RING]; 1402 int idx, error; 1403 1404 if ((sc->sc_flags & BWI_F_RUNNING) == 0) { 1405 m_freem(m); 1406 return ENETDOWN; 1407 } 1408 1409 BWI_LOCK(sc); 1410 idx = tbd->tbd_idx; 1411 KASSERT(tbd->tbd_buf[idx].tb_mbuf == NULL, ("slot %d not empty", idx)); 1412 if (params == NULL) { 1413 /* 1414 * Legacy path; interpret frame contents to decide 1415 * precisely how to send the frame. 1416 */ 1417 error = bwi_encap(sc, idx, m, ni); 1418 } else { 1419 /* 1420 * Caller supplied explicit parameters to use in 1421 * sending the frame. 1422 */ 1423 error = bwi_encap_raw(sc, idx, m, ni, params); 1424 } 1425 if (error == 0) { 1426 tbd->tbd_used++; 1427 tbd->tbd_idx = (idx + 1) % BWI_TX_NDESC; 1428 sc->sc_tx_timer = 5; 1429 } 1430 BWI_UNLOCK(sc); 1431 return error; 1432} 1433 1434static void 1435bwi_watchdog(void *arg) 1436{ 1437 struct bwi_softc *sc; 1438 1439 sc = arg; 1440 BWI_ASSERT_LOCKED(sc); 1441 if (sc->sc_tx_timer != 0 && --sc->sc_tx_timer == 0) { 1442 device_printf(sc->sc_dev, "watchdog timeout\n"); 1443 counter_u64_add(sc->sc_ic.ic_oerrors, 1); 1444 taskqueue_enqueue(sc->sc_tq, &sc->sc_restart_task); 1445 } 1446 callout_reset(&sc->sc_watchdog_timer, hz, bwi_watchdog, sc); 1447} 1448 1449static void 1450bwi_stop(struct bwi_softc *sc, int statechg) 1451{ 1452 BWI_LOCK(sc); 1453 bwi_stop_locked(sc, statechg); 1454 BWI_UNLOCK(sc); 1455} 1456 1457static void 1458bwi_stop_locked(struct bwi_softc *sc, int statechg) 1459{ 1460 struct bwi_mac *mac; 1461 int i, error, pwr_off = 0; 1462 1463 BWI_ASSERT_LOCKED(sc); 1464 1465 callout_stop(&sc->sc_calib_ch); 1466 callout_stop(&sc->sc_led_blink_ch); 1467 sc->sc_led_blinking = 0; 1468 sc->sc_flags |= BWI_F_STOP; 1469 1470 if (sc->sc_flags & BWI_F_RUNNING) { 1471 KASSERT(sc->sc_cur_regwin->rw_type == BWI_REGWIN_T_MAC, 1472 ("current regwin type %d", sc->sc_cur_regwin->rw_type)); 1473 mac = (struct bwi_mac *)sc->sc_cur_regwin; 1474 1475 bwi_disable_intrs(sc, BWI_ALL_INTRS); 1476 CSR_READ_4(sc, BWI_MAC_INTR_MASK); 1477 bwi_mac_stop(mac); 1478 } 1479 1480 for (i = 0; i < sc->sc_nmac; ++i) { 1481 struct bwi_regwin *old_rw; 1482 1483 mac = &sc->sc_mac[i]; 1484 if ((mac->mac_flags & BWI_MAC_F_INITED) == 0) 1485 continue; 1486 1487 error = bwi_regwin_switch(sc, &mac->mac_regwin, &old_rw); 1488 if (error) 1489 continue; 1490 1491 bwi_mac_shutdown(mac); 1492 pwr_off = 1; 1493 1494 bwi_regwin_switch(sc, old_rw, NULL); 1495 } 1496 1497 if (pwr_off) 1498 bwi_bbp_power_off(sc); 1499 1500 sc->sc_tx_timer = 0; 1501 callout_stop(&sc->sc_watchdog_timer); 1502 sc->sc_flags &= ~BWI_F_RUNNING; 1503} 1504 1505void 1506bwi_intr(void *xsc) 1507{ 1508 struct bwi_softc *sc = xsc; 1509 struct bwi_mac *mac; 1510 uint32_t intr_status; 1511 uint32_t txrx_intr_status[BWI_TXRX_NRING]; 1512 int i, txrx_error, tx = 0, rx_data = -1; 1513 1514 BWI_LOCK(sc); 1515 1516 if ((sc->sc_flags & BWI_F_RUNNING) == 0 || 1517 (sc->sc_flags & BWI_F_STOP)) { 1518 BWI_UNLOCK(sc); 1519 return; 1520 } 1521 /* 1522 * Get interrupt status 1523 */ 1524 intr_status = CSR_READ_4(sc, BWI_MAC_INTR_STATUS); 1525 if (intr_status == 0xffffffff) { /* Not for us */ 1526 BWI_UNLOCK(sc); 1527 return; 1528 } 1529 1530 DPRINTF(sc, BWI_DBG_INTR, "intr status 0x%08x\n", intr_status); 1531 1532 intr_status &= CSR_READ_4(sc, BWI_MAC_INTR_MASK); 1533 if (intr_status == 0) { /* Nothing is interesting */ 1534 BWI_UNLOCK(sc); 1535 return; 1536 } 1537 1538 KASSERT(sc->sc_cur_regwin->rw_type == BWI_REGWIN_T_MAC, 1539 ("current regwin type %d", sc->sc_cur_regwin->rw_type)); 1540 mac = (struct bwi_mac *)sc->sc_cur_regwin; 1541 1542 txrx_error = 0; 1543 DPRINTF(sc, BWI_DBG_INTR, "%s\n", "TX/RX intr"); 1544 for (i = 0; i < BWI_TXRX_NRING; ++i) { 1545 uint32_t mask; 1546 1547 if (BWI_TXRX_IS_RX(i)) 1548 mask = BWI_TXRX_RX_INTRS; 1549 else 1550 mask = BWI_TXRX_TX_INTRS; 1551 1552 txrx_intr_status[i] = 1553 CSR_READ_4(sc, BWI_TXRX_INTR_STATUS(i)) & mask; 1554 1555 _DPRINTF(sc, BWI_DBG_INTR, ", %d 0x%08x", 1556 i, txrx_intr_status[i]); 1557 1558 if (txrx_intr_status[i] & BWI_TXRX_INTR_ERROR) { 1559 device_printf(sc->sc_dev, 1560 "%s: intr fatal TX/RX (%d) error 0x%08x\n", 1561 __func__, i, txrx_intr_status[i]); 1562 txrx_error = 1; 1563 } 1564 } 1565 _DPRINTF(sc, BWI_DBG_INTR, "%s\n", ""); 1566 1567 /* 1568 * Acknowledge interrupt 1569 */ 1570 CSR_WRITE_4(sc, BWI_MAC_INTR_STATUS, intr_status); 1571 1572 for (i = 0; i < BWI_TXRX_NRING; ++i) 1573 CSR_WRITE_4(sc, BWI_TXRX_INTR_STATUS(i), txrx_intr_status[i]); 1574 1575 /* Disable all interrupts */ 1576 bwi_disable_intrs(sc, BWI_ALL_INTRS); 1577 1578 /* 1579 * http://bcm-specs.sipsolutions.net/Interrupts 1580 * Says for this bit (0x800): 1581 * "Fatal Error 1582 * 1583 * We got this one while testing things when by accident the 1584 * template ram wasn't set to big endian when it should have 1585 * been after writing the initial values. It keeps on being 1586 * triggered, the only way to stop it seems to shut down the 1587 * chip." 1588 * 1589 * Suggesting that we should never get it and if we do we're not 1590 * feeding TX packets into the MAC correctly if we do... Apparently, 1591 * it is valid only on mac version 5 and higher, but I couldn't 1592 * find a reference for that... Since I see them from time to time 1593 * on my card, this suggests an error in the tx path still... 1594 */ 1595 if (intr_status & BWI_INTR_PHY_TXERR) { 1596 if (mac->mac_flags & BWI_MAC_F_PHYE_RESET) { 1597 device_printf(sc->sc_dev, "%s: intr PHY TX error\n", 1598 __func__); 1599 taskqueue_enqueue(sc->sc_tq, &sc->sc_restart_task); 1600 BWI_UNLOCK(sc); 1601 return; 1602 } 1603 } 1604 1605 if (txrx_error) { 1606 /* TODO: reset device */ 1607 } 1608 1609 if (intr_status & BWI_INTR_TBTT) 1610 bwi_mac_config_ps(mac); 1611 1612 if (intr_status & BWI_INTR_EO_ATIM) 1613 device_printf(sc->sc_dev, "EO_ATIM\n"); 1614 1615 if (intr_status & BWI_INTR_PMQ) { 1616 for (;;) { 1617 if ((CSR_READ_4(sc, BWI_MAC_PS_STATUS) & 0x8) == 0) 1618 break; 1619 } 1620 CSR_WRITE_2(sc, BWI_MAC_PS_STATUS, 0x2); 1621 } 1622 1623 if (intr_status & BWI_INTR_NOISE) 1624 device_printf(sc->sc_dev, "intr noise\n"); 1625 1626 if (txrx_intr_status[0] & BWI_TXRX_INTR_RX) { 1627 rx_data = sc->sc_rxeof(sc); 1628 if (sc->sc_flags & BWI_F_STOP) { 1629 BWI_UNLOCK(sc); 1630 return; 1631 } 1632 } 1633 1634 if (txrx_intr_status[3] & BWI_TXRX_INTR_RX) { 1635 sc->sc_txeof_status(sc); 1636 tx = 1; 1637 } 1638 1639 if (intr_status & BWI_INTR_TX_DONE) { 1640 bwi_txeof(sc); 1641 tx = 1; 1642 } 1643 1644 /* Re-enable interrupts */ 1645 bwi_enable_intrs(sc, BWI_INIT_INTRS); 1646 1647 if (sc->sc_blink_led != NULL && sc->sc_led_blink) { 1648 int evt = BWI_LED_EVENT_NONE; 1649 1650 if (tx && rx_data > 0) { 1651 if (sc->sc_rx_rate > sc->sc_tx_rate) 1652 evt = BWI_LED_EVENT_RX; 1653 else 1654 evt = BWI_LED_EVENT_TX; 1655 } else if (tx) { 1656 evt = BWI_LED_EVENT_TX; 1657 } else if (rx_data > 0) { 1658 evt = BWI_LED_EVENT_RX; 1659 } else if (rx_data == 0) { 1660 evt = BWI_LED_EVENT_POLL; 1661 } 1662 1663 if (evt != BWI_LED_EVENT_NONE) 1664 bwi_led_event(sc, evt); 1665 } 1666 1667 BWI_UNLOCK(sc); 1668} 1669 1670static void 1671bwi_scan_start(struct ieee80211com *ic) 1672{ 1673 struct bwi_softc *sc = ic->ic_softc; 1674 1675 BWI_LOCK(sc); 1676 /* Enable MAC beacon promiscuity */ 1677 CSR_SETBITS_4(sc, BWI_MAC_STATUS, BWI_MAC_STATUS_PASS_BCN); 1678 BWI_UNLOCK(sc); 1679} 1680 1681static void 1682bwi_getradiocaps(struct ieee80211com *ic, 1683 int maxchans, int *nchans, struct ieee80211_channel chans[]) 1684{ 1685 struct bwi_softc *sc = ic->ic_softc; 1686 struct bwi_mac *mac; 1687 struct bwi_phy *phy; 1688 uint8_t bands[IEEE80211_MODE_BYTES]; 1689 1690 /* 1691 * XXX First MAC is known to exist 1692 * TODO2 1693 */ 1694 mac = &sc->sc_mac[0]; 1695 phy = &mac->mac_phy; 1696 1697 memset(bands, 0, sizeof(bands)); 1698 switch (phy->phy_mode) { 1699 case IEEE80211_MODE_11G: 1700 setbit(bands, IEEE80211_MODE_11G); 1701 /* FALLTHROUGH */ 1702 case IEEE80211_MODE_11B: 1703 setbit(bands, IEEE80211_MODE_11B); 1704 break; 1705 case IEEE80211_MODE_11A: 1706 /* TODO:11A */ 1707 setbit(bands, IEEE80211_MODE_11A); 1708 device_printf(sc->sc_dev, "no 11a support\n"); 1709 return; 1710 default: 1711 panic("unknown phymode %d\n", phy->phy_mode); 1712 } 1713 1714 ieee80211_add_channel_list_2ghz(chans, maxchans, nchans, 1715 bwi_chan_2ghz, nitems(bwi_chan_2ghz), bands, 0); 1716} 1717 1718static void 1719bwi_set_channel(struct ieee80211com *ic) 1720{ 1721 struct bwi_softc *sc = ic->ic_softc; 1722 struct ieee80211_channel *c = ic->ic_curchan; 1723 struct bwi_mac *mac; 1724 1725 BWI_LOCK(sc); 1726 KASSERT(sc->sc_cur_regwin->rw_type == BWI_REGWIN_T_MAC, 1727 ("current regwin type %d", sc->sc_cur_regwin->rw_type)); 1728 mac = (struct bwi_mac *)sc->sc_cur_regwin; 1729 bwi_rf_set_chan(mac, ieee80211_chan2ieee(ic, c), 0); 1730 1731 sc->sc_rates = ieee80211_get_ratetable(c); 1732 1733 /* 1734 * Setup radio tap channel freq and flags 1735 */ 1736 sc->sc_tx_th.wt_chan_freq = sc->sc_rx_th.wr_chan_freq = 1737 htole16(c->ic_freq); 1738 sc->sc_tx_th.wt_chan_flags = sc->sc_rx_th.wr_chan_flags = 1739 htole16(c->ic_flags & 0xffff); 1740 1741 BWI_UNLOCK(sc); 1742} 1743 1744static void 1745bwi_scan_end(struct ieee80211com *ic) 1746{ 1747 struct bwi_softc *sc = ic->ic_softc; 1748 1749 BWI_LOCK(sc); 1750 CSR_CLRBITS_4(sc, BWI_MAC_STATUS, BWI_MAC_STATUS_PASS_BCN); 1751 BWI_UNLOCK(sc); 1752} 1753 1754static int 1755bwi_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) 1756{ 1757 struct bwi_vap *bvp = BWI_VAP(vap); 1758 struct ieee80211com *ic= vap->iv_ic; 1759 struct bwi_softc *sc = ic->ic_softc; 1760 enum ieee80211_state ostate = vap->iv_state; 1761 struct bwi_mac *mac; 1762 int error; 1763 1764 BWI_LOCK(sc); 1765 1766 callout_stop(&sc->sc_calib_ch); 1767 1768 if (nstate == IEEE80211_S_INIT) 1769 sc->sc_txpwrcb_type = BWI_TXPWR_INIT; 1770 1771 bwi_led_newstate(sc, nstate); 1772 1773 error = bvp->bv_newstate(vap, nstate, arg); 1774 if (error != 0) 1775 goto back; 1776 1777 /* 1778 * Clear the BSSID when we stop a STA 1779 */ 1780 if (vap->iv_opmode == IEEE80211_M_STA) { 1781 if (ostate == IEEE80211_S_RUN && nstate != IEEE80211_S_RUN) { 1782 /* 1783 * Clear out the BSSID. If we reassociate to 1784 * the same AP, this will reinialize things 1785 * correctly... 1786 */ 1787 if (ic->ic_opmode == IEEE80211_M_STA && 1788 !(sc->sc_flags & BWI_F_STOP)) 1789 bwi_set_bssid(sc, bwi_zero_addr); 1790 } 1791 } 1792 1793 if (vap->iv_opmode == IEEE80211_M_MONITOR) { 1794 /* Nothing to do */ 1795 } else if (nstate == IEEE80211_S_RUN) { 1796 bwi_set_bssid(sc, vap->iv_bss->ni_bssid); 1797 1798 KASSERT(sc->sc_cur_regwin->rw_type == BWI_REGWIN_T_MAC, 1799 ("current regwin type %d", sc->sc_cur_regwin->rw_type)); 1800 mac = (struct bwi_mac *)sc->sc_cur_regwin; 1801 1802 /* Initial TX power calibration */ 1803 bwi_mac_calibrate_txpower(mac, BWI_TXPWR_INIT); 1804#ifdef notyet 1805 sc->sc_txpwrcb_type = BWI_TXPWR_FORCE; 1806#else 1807 sc->sc_txpwrcb_type = BWI_TXPWR_CALIB; 1808#endif 1809 1810 callout_reset(&sc->sc_calib_ch, hz, bwi_calibrate, sc); 1811 } 1812back: 1813 BWI_UNLOCK(sc); 1814 1815 return error; 1816} 1817 1818static int 1819bwi_media_change(struct ifnet *ifp) 1820{ 1821 int error = ieee80211_media_change(ifp); 1822 /* NB: only the fixed rate can change and that doesn't need a reset */ 1823 return (error == ENETRESET ? 0 : error); 1824} 1825 1826static int 1827bwi_dma_alloc(struct bwi_softc *sc) 1828{ 1829 int error, i, has_txstats; 1830 bus_addr_t lowaddr = 0; 1831 bus_size_t tx_ring_sz, rx_ring_sz, desc_sz = 0; 1832 uint32_t txrx_ctrl_step = 0; 1833 1834 has_txstats = 0; 1835 for (i = 0; i < sc->sc_nmac; ++i) { 1836 if (sc->sc_mac[i].mac_flags & BWI_MAC_F_HAS_TXSTATS) { 1837 has_txstats = 1; 1838 break; 1839 } 1840 } 1841 1842 switch (sc->sc_bus_space) { 1843 case BWI_BUS_SPACE_30BIT: 1844 case BWI_BUS_SPACE_32BIT: 1845 if (sc->sc_bus_space == BWI_BUS_SPACE_30BIT) 1846 lowaddr = BWI_BUS_SPACE_MAXADDR; 1847 else 1848 lowaddr = BUS_SPACE_MAXADDR_32BIT; 1849 desc_sz = sizeof(struct bwi_desc32); 1850 txrx_ctrl_step = 0x20; 1851 1852 sc->sc_init_tx_ring = bwi_init_tx_ring32; 1853 sc->sc_free_tx_ring = bwi_free_tx_ring32; 1854 sc->sc_init_rx_ring = bwi_init_rx_ring32; 1855 sc->sc_free_rx_ring = bwi_free_rx_ring32; 1856 sc->sc_setup_rxdesc = bwi_setup_rx_desc32; 1857 sc->sc_setup_txdesc = bwi_setup_tx_desc32; 1858 sc->sc_rxeof = bwi_rxeof32; 1859 sc->sc_start_tx = bwi_start_tx32; 1860 if (has_txstats) { 1861 sc->sc_init_txstats = bwi_init_txstats32; 1862 sc->sc_free_txstats = bwi_free_txstats32; 1863 sc->sc_txeof_status = bwi_txeof_status32; 1864 } 1865 break; 1866 1867 case BWI_BUS_SPACE_64BIT: 1868 lowaddr = BUS_SPACE_MAXADDR; /* XXX */ 1869 desc_sz = sizeof(struct bwi_desc64); 1870 txrx_ctrl_step = 0x40; 1871 1872 sc->sc_init_tx_ring = bwi_init_tx_ring64; 1873 sc->sc_free_tx_ring = bwi_free_tx_ring64; 1874 sc->sc_init_rx_ring = bwi_init_rx_ring64; 1875 sc->sc_free_rx_ring = bwi_free_rx_ring64; 1876 sc->sc_setup_rxdesc = bwi_setup_rx_desc64; 1877 sc->sc_setup_txdesc = bwi_setup_tx_desc64; 1878 sc->sc_rxeof = bwi_rxeof64; 1879 sc->sc_start_tx = bwi_start_tx64; 1880 if (has_txstats) { 1881 sc->sc_init_txstats = bwi_init_txstats64; 1882 sc->sc_free_txstats = bwi_free_txstats64; 1883 sc->sc_txeof_status = bwi_txeof_status64; 1884 } 1885 break; 1886 } 1887 1888 KASSERT(lowaddr != 0, ("lowaddr zero")); 1889 KASSERT(desc_sz != 0, ("desc_sz zero")); 1890 KASSERT(txrx_ctrl_step != 0, ("txrx_ctrl_step zero")); 1891 1892 tx_ring_sz = roundup(desc_sz * BWI_TX_NDESC, BWI_RING_ALIGN); 1893 rx_ring_sz = roundup(desc_sz * BWI_RX_NDESC, BWI_RING_ALIGN); 1894 1895 /* 1896 * Create top level DMA tag 1897 */ 1898 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */ 1899 BWI_ALIGN, 0, /* alignment, bounds */ 1900 lowaddr, /* lowaddr */ 1901 BUS_SPACE_MAXADDR, /* highaddr */ 1902 NULL, NULL, /* filter, filterarg */ 1903 BUS_SPACE_MAXSIZE, /* maxsize */ 1904 BUS_SPACE_UNRESTRICTED, /* nsegments */ 1905 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1906 0, /* flags */ 1907 NULL, NULL, /* lockfunc, lockarg */ 1908 &sc->sc_parent_dtag); 1909 if (error) { 1910 device_printf(sc->sc_dev, "can't create parent DMA tag\n"); 1911 return error; 1912 } 1913 1914#define TXRX_CTRL(idx) (BWI_TXRX_CTRL_BASE + (idx) * txrx_ctrl_step) 1915 1916 /* 1917 * Create TX ring DMA stuffs 1918 */ 1919 error = bus_dma_tag_create(sc->sc_parent_dtag, 1920 BWI_RING_ALIGN, 0, 1921 BUS_SPACE_MAXADDR, 1922 BUS_SPACE_MAXADDR, 1923 NULL, NULL, 1924 tx_ring_sz, 1925 1, 1926 tx_ring_sz, 1927 0, 1928 NULL, NULL, 1929 &sc->sc_txring_dtag); 1930 if (error) { 1931 device_printf(sc->sc_dev, "can't create TX ring DMA tag\n"); 1932 return error; 1933 } 1934 1935 for (i = 0; i < BWI_TX_NRING; ++i) { 1936 error = bwi_dma_ring_alloc(sc, sc->sc_txring_dtag, 1937 &sc->sc_tx_rdata[i], tx_ring_sz, 1938 TXRX_CTRL(i)); 1939 if (error) { 1940 device_printf(sc->sc_dev, "%dth TX ring " 1941 "DMA alloc failed\n", i); 1942 return error; 1943 } 1944 } 1945 1946 /* 1947 * Create RX ring DMA stuffs 1948 */ 1949 error = bus_dma_tag_create(sc->sc_parent_dtag, 1950 BWI_RING_ALIGN, 0, 1951 BUS_SPACE_MAXADDR, 1952 BUS_SPACE_MAXADDR, 1953 NULL, NULL, 1954 rx_ring_sz, 1955 1, 1956 rx_ring_sz, 1957 0, 1958 NULL, NULL, 1959 &sc->sc_rxring_dtag); 1960 if (error) { 1961 device_printf(sc->sc_dev, "can't create RX ring DMA tag\n"); 1962 return error; 1963 } 1964 1965 error = bwi_dma_ring_alloc(sc, sc->sc_rxring_dtag, &sc->sc_rx_rdata, 1966 rx_ring_sz, TXRX_CTRL(0)); 1967 if (error) { 1968 device_printf(sc->sc_dev, "RX ring DMA alloc failed\n"); 1969 return error; 1970 } 1971 1972 if (has_txstats) { 1973 error = bwi_dma_txstats_alloc(sc, TXRX_CTRL(3), desc_sz); 1974 if (error) { 1975 device_printf(sc->sc_dev, 1976 "TX stats DMA alloc failed\n"); 1977 return error; 1978 } 1979 } 1980 1981#undef TXRX_CTRL 1982 1983 return bwi_dma_mbuf_create(sc); 1984} 1985 1986static void 1987bwi_dma_free(struct bwi_softc *sc) 1988{ 1989 if (sc->sc_txring_dtag != NULL) { 1990 int i; 1991 1992 for (i = 0; i < BWI_TX_NRING; ++i) { 1993 struct bwi_ring_data *rd = &sc->sc_tx_rdata[i]; 1994 1995 if (rd->rdata_desc != NULL) { 1996 bus_dmamap_unload(sc->sc_txring_dtag, 1997 rd->rdata_dmap); 1998 bus_dmamem_free(sc->sc_txring_dtag, 1999 rd->rdata_desc, 2000 rd->rdata_dmap); 2001 } 2002 } 2003 bus_dma_tag_destroy(sc->sc_txring_dtag); 2004 } 2005 2006 if (sc->sc_rxring_dtag != NULL) { 2007 struct bwi_ring_data *rd = &sc->sc_rx_rdata; 2008 2009 if (rd->rdata_desc != NULL) { 2010 bus_dmamap_unload(sc->sc_rxring_dtag, rd->rdata_dmap); 2011 bus_dmamem_free(sc->sc_rxring_dtag, rd->rdata_desc, 2012 rd->rdata_dmap); 2013 } 2014 bus_dma_tag_destroy(sc->sc_rxring_dtag); 2015 } 2016 2017 bwi_dma_txstats_free(sc); 2018 bwi_dma_mbuf_destroy(sc, BWI_TX_NRING, 1); 2019 2020 if (sc->sc_parent_dtag != NULL) 2021 bus_dma_tag_destroy(sc->sc_parent_dtag); 2022} 2023 2024static int 2025bwi_dma_ring_alloc(struct bwi_softc *sc, bus_dma_tag_t dtag, 2026 struct bwi_ring_data *rd, bus_size_t size, 2027 uint32_t txrx_ctrl) 2028{ 2029 int error; 2030 2031 error = bus_dmamem_alloc(dtag, &rd->rdata_desc, 2032 BUS_DMA_WAITOK | BUS_DMA_ZERO, 2033 &rd->rdata_dmap); 2034 if (error) { 2035 device_printf(sc->sc_dev, "can't allocate DMA mem\n"); 2036 return error; 2037 } 2038 2039 error = bus_dmamap_load(dtag, rd->rdata_dmap, rd->rdata_desc, size, 2040 bwi_dma_ring_addr, &rd->rdata_paddr, 2041 BUS_DMA_NOWAIT); 2042 if (error) { 2043 device_printf(sc->sc_dev, "can't load DMA mem\n"); 2044 bus_dmamem_free(dtag, rd->rdata_desc, rd->rdata_dmap); 2045 rd->rdata_desc = NULL; 2046 return error; 2047 } 2048 2049 rd->rdata_txrx_ctrl = txrx_ctrl; 2050 return 0; 2051} 2052 2053static int 2054bwi_dma_txstats_alloc(struct bwi_softc *sc, uint32_t ctrl_base, 2055 bus_size_t desc_sz) 2056{ 2057 struct bwi_txstats_data *st; 2058 bus_size_t dma_size; 2059 int error; 2060 2061 st = malloc(sizeof(*st), M_DEVBUF, M_NOWAIT | M_ZERO); 2062 if (st == NULL) { 2063 device_printf(sc->sc_dev, "can't allocate txstats data\n"); 2064 return ENOMEM; 2065 } 2066 sc->sc_txstats = st; 2067 2068 /* 2069 * Create TX stats descriptor DMA stuffs 2070 */ 2071 dma_size = roundup(desc_sz * BWI_TXSTATS_NDESC, BWI_RING_ALIGN); 2072 2073 error = bus_dma_tag_create(sc->sc_parent_dtag, 2074 BWI_RING_ALIGN, 2075 0, 2076 BUS_SPACE_MAXADDR, 2077 BUS_SPACE_MAXADDR, 2078 NULL, NULL, 2079 dma_size, 2080 1, 2081 dma_size, 2082 0, 2083 NULL, NULL, 2084 &st->stats_ring_dtag); 2085 if (error) { 2086 device_printf(sc->sc_dev, "can't create txstats ring " 2087 "DMA tag\n"); 2088 return error; 2089 } 2090 2091 error = bus_dmamem_alloc(st->stats_ring_dtag, &st->stats_ring, 2092 BUS_DMA_WAITOK | BUS_DMA_ZERO, 2093 &st->stats_ring_dmap); 2094 if (error) { 2095 device_printf(sc->sc_dev, "can't allocate txstats ring " 2096 "DMA mem\n"); 2097 bus_dma_tag_destroy(st->stats_ring_dtag); 2098 st->stats_ring_dtag = NULL; 2099 return error; 2100 } 2101 2102 error = bus_dmamap_load(st->stats_ring_dtag, st->stats_ring_dmap, 2103 st->stats_ring, dma_size, 2104 bwi_dma_ring_addr, &st->stats_ring_paddr, 2105 BUS_DMA_NOWAIT); 2106 if (error) { 2107 device_printf(sc->sc_dev, "can't load txstats ring DMA mem\n"); 2108 bus_dmamem_free(st->stats_ring_dtag, st->stats_ring, 2109 st->stats_ring_dmap); 2110 bus_dma_tag_destroy(st->stats_ring_dtag); 2111 st->stats_ring_dtag = NULL; 2112 return error; 2113 } 2114 2115 /* 2116 * Create TX stats DMA stuffs 2117 */ 2118 dma_size = roundup(sizeof(struct bwi_txstats) * BWI_TXSTATS_NDESC, 2119 BWI_ALIGN); 2120 2121 error = bus_dma_tag_create(sc->sc_parent_dtag, 2122 BWI_ALIGN, 2123 0, 2124 BUS_SPACE_MAXADDR, 2125 BUS_SPACE_MAXADDR, 2126 NULL, NULL, 2127 dma_size, 2128 1, 2129 dma_size, 2130 0, 2131 NULL, NULL, 2132 &st->stats_dtag); 2133 if (error) { 2134 device_printf(sc->sc_dev, "can't create txstats DMA tag\n"); 2135 return error; 2136 } 2137 2138 error = bus_dmamem_alloc(st->stats_dtag, (void **)&st->stats, 2139 BUS_DMA_WAITOK | BUS_DMA_ZERO, 2140 &st->stats_dmap); 2141 if (error) { 2142 device_printf(sc->sc_dev, "can't allocate txstats DMA mem\n"); 2143 bus_dma_tag_destroy(st->stats_dtag); 2144 st->stats_dtag = NULL; 2145 return error; 2146 } 2147 2148 error = bus_dmamap_load(st->stats_dtag, st->stats_dmap, st->stats, 2149 dma_size, bwi_dma_ring_addr, &st->stats_paddr, 2150 BUS_DMA_NOWAIT); 2151 if (error) { 2152 device_printf(sc->sc_dev, "can't load txstats DMA mem\n"); 2153 bus_dmamem_free(st->stats_dtag, st->stats, st->stats_dmap); 2154 bus_dma_tag_destroy(st->stats_dtag); 2155 st->stats_dtag = NULL; 2156 return error; 2157 } 2158 2159 st->stats_ctrl_base = ctrl_base; 2160 return 0; 2161} 2162 2163static void 2164bwi_dma_txstats_free(struct bwi_softc *sc) 2165{ 2166 struct bwi_txstats_data *st; 2167 2168 if (sc->sc_txstats == NULL) 2169 return; 2170 st = sc->sc_txstats; 2171 2172 if (st->stats_ring_dtag != NULL) { 2173 bus_dmamap_unload(st->stats_ring_dtag, st->stats_ring_dmap); 2174 bus_dmamem_free(st->stats_ring_dtag, st->stats_ring, 2175 st->stats_ring_dmap); 2176 bus_dma_tag_destroy(st->stats_ring_dtag); 2177 } 2178 2179 if (st->stats_dtag != NULL) { 2180 bus_dmamap_unload(st->stats_dtag, st->stats_dmap); 2181 bus_dmamem_free(st->stats_dtag, st->stats, st->stats_dmap); 2182 bus_dma_tag_destroy(st->stats_dtag); 2183 } 2184 2185 free(st, M_DEVBUF); 2186} 2187 2188static void 2189bwi_dma_ring_addr(void *arg, bus_dma_segment_t *seg, int nseg, int error) 2190{ 2191 KASSERT(nseg == 1, ("too many segments\n")); 2192 *((bus_addr_t *)arg) = seg->ds_addr; 2193} 2194 2195static int 2196bwi_dma_mbuf_create(struct bwi_softc *sc) 2197{ 2198 struct bwi_rxbuf_data *rbd = &sc->sc_rx_bdata; 2199 int i, j, k, ntx, error; 2200 2201 /* 2202 * Create TX/RX mbuf DMA tag 2203 */ 2204 error = bus_dma_tag_create(sc->sc_parent_dtag, 2205 1, 2206 0, 2207 BUS_SPACE_MAXADDR, 2208 BUS_SPACE_MAXADDR, 2209 NULL, NULL, 2210 MCLBYTES, 2211 1, 2212 MCLBYTES, 2213 BUS_DMA_ALLOCNOW, 2214 NULL, NULL, 2215 &sc->sc_buf_dtag); 2216 if (error) { 2217 device_printf(sc->sc_dev, "can't create mbuf DMA tag\n"); 2218 return error; 2219 } 2220 2221 ntx = 0; 2222 2223 /* 2224 * Create TX mbuf DMA map 2225 */ 2226 for (i = 0; i < BWI_TX_NRING; ++i) { 2227 struct bwi_txbuf_data *tbd = &sc->sc_tx_bdata[i]; 2228 2229 for (j = 0; j < BWI_TX_NDESC; ++j) { 2230 error = bus_dmamap_create(sc->sc_buf_dtag, 0, 2231 &tbd->tbd_buf[j].tb_dmap); 2232 if (error) { 2233 device_printf(sc->sc_dev, "can't create " 2234 "%dth tbd, %dth DMA map\n", i, j); 2235 2236 ntx = i; 2237 for (k = 0; k < j; ++k) { 2238 bus_dmamap_destroy(sc->sc_buf_dtag, 2239 tbd->tbd_buf[k].tb_dmap); 2240 } 2241 goto fail; 2242 } 2243 } 2244 } 2245 ntx = BWI_TX_NRING; 2246 2247 /* 2248 * Create RX mbuf DMA map and a spare DMA map 2249 */ 2250 error = bus_dmamap_create(sc->sc_buf_dtag, 0, 2251 &rbd->rbd_tmp_dmap); 2252 if (error) { 2253 device_printf(sc->sc_dev, 2254 "can't create spare RX buf DMA map\n"); 2255 goto fail; 2256 } 2257 2258 for (j = 0; j < BWI_RX_NDESC; ++j) { 2259 error = bus_dmamap_create(sc->sc_buf_dtag, 0, 2260 &rbd->rbd_buf[j].rb_dmap); 2261 if (error) { 2262 device_printf(sc->sc_dev, "can't create %dth " 2263 "RX buf DMA map\n", j); 2264 2265 for (k = 0; k < j; ++k) { 2266 bus_dmamap_destroy(sc->sc_buf_dtag, 2267 rbd->rbd_buf[j].rb_dmap); 2268 } 2269 bus_dmamap_destroy(sc->sc_buf_dtag, 2270 rbd->rbd_tmp_dmap); 2271 goto fail; 2272 } 2273 } 2274 2275 return 0; 2276fail: 2277 bwi_dma_mbuf_destroy(sc, ntx, 0); 2278 return error; 2279} 2280 2281static void 2282bwi_dma_mbuf_destroy(struct bwi_softc *sc, int ntx, int nrx) 2283{ 2284 int i, j; 2285 2286 if (sc->sc_buf_dtag == NULL) 2287 return; 2288 2289 for (i = 0; i < ntx; ++i) { 2290 struct bwi_txbuf_data *tbd = &sc->sc_tx_bdata[i]; 2291 2292 for (j = 0; j < BWI_TX_NDESC; ++j) { 2293 struct bwi_txbuf *tb = &tbd->tbd_buf[j]; 2294 2295 if (tb->tb_mbuf != NULL) { 2296 bus_dmamap_unload(sc->sc_buf_dtag, 2297 tb->tb_dmap); 2298 m_freem(tb->tb_mbuf); 2299 } 2300 if (tb->tb_ni != NULL) 2301 ieee80211_free_node(tb->tb_ni); 2302 bus_dmamap_destroy(sc->sc_buf_dtag, tb->tb_dmap); 2303 } 2304 } 2305 2306 if (nrx) { 2307 struct bwi_rxbuf_data *rbd = &sc->sc_rx_bdata; 2308 2309 bus_dmamap_destroy(sc->sc_buf_dtag, rbd->rbd_tmp_dmap); 2310 for (j = 0; j < BWI_RX_NDESC; ++j) { 2311 struct bwi_rxbuf *rb = &rbd->rbd_buf[j]; 2312 2313 if (rb->rb_mbuf != NULL) { 2314 bus_dmamap_unload(sc->sc_buf_dtag, 2315 rb->rb_dmap); 2316 m_freem(rb->rb_mbuf); 2317 } 2318 bus_dmamap_destroy(sc->sc_buf_dtag, rb->rb_dmap); 2319 } 2320 } 2321 2322 bus_dma_tag_destroy(sc->sc_buf_dtag); 2323 sc->sc_buf_dtag = NULL; 2324} 2325 2326static void 2327bwi_enable_intrs(struct bwi_softc *sc, uint32_t enable_intrs) 2328{ 2329 CSR_SETBITS_4(sc, BWI_MAC_INTR_MASK, enable_intrs); 2330} 2331 2332static void 2333bwi_disable_intrs(struct bwi_softc *sc, uint32_t disable_intrs) 2334{ 2335 CSR_CLRBITS_4(sc, BWI_MAC_INTR_MASK, disable_intrs); 2336} 2337 2338static int 2339bwi_init_tx_ring32(struct bwi_softc *sc, int ring_idx) 2340{ 2341 struct bwi_ring_data *rd; 2342 struct bwi_txbuf_data *tbd; 2343 uint32_t val, addr_hi, addr_lo; 2344 2345 KASSERT(ring_idx < BWI_TX_NRING, ("ring_idx %d", ring_idx)); 2346 rd = &sc->sc_tx_rdata[ring_idx]; 2347 tbd = &sc->sc_tx_bdata[ring_idx]; 2348 2349 tbd->tbd_idx = 0; 2350 tbd->tbd_used = 0; 2351 2352 bzero(rd->rdata_desc, sizeof(struct bwi_desc32) * BWI_TX_NDESC); 2353 bus_dmamap_sync(sc->sc_txring_dtag, rd->rdata_dmap, 2354 BUS_DMASYNC_PREWRITE); 2355 2356 addr_lo = __SHIFTOUT(rd->rdata_paddr, BWI_TXRX32_RINGINFO_ADDR_MASK); 2357 addr_hi = __SHIFTOUT(rd->rdata_paddr, BWI_TXRX32_RINGINFO_FUNC_MASK); 2358 2359 val = __SHIFTIN(addr_lo, BWI_TXRX32_RINGINFO_ADDR_MASK) | 2360 __SHIFTIN(BWI_TXRX32_RINGINFO_FUNC_TXRX, 2361 BWI_TXRX32_RINGINFO_FUNC_MASK); 2362 CSR_WRITE_4(sc, rd->rdata_txrx_ctrl + BWI_TX32_RINGINFO, val); 2363 2364 val = __SHIFTIN(addr_hi, BWI_TXRX32_CTRL_ADDRHI_MASK) | 2365 BWI_TXRX32_CTRL_ENABLE; 2366 CSR_WRITE_4(sc, rd->rdata_txrx_ctrl + BWI_TX32_CTRL, val); 2367 2368 return 0; 2369} 2370 2371static void 2372bwi_init_rxdesc_ring32(struct bwi_softc *sc, uint32_t ctrl_base, 2373 bus_addr_t paddr, int hdr_size, int ndesc) 2374{ 2375 uint32_t val, addr_hi, addr_lo; 2376 2377 addr_lo = __SHIFTOUT(paddr, BWI_TXRX32_RINGINFO_ADDR_MASK); 2378 addr_hi = __SHIFTOUT(paddr, BWI_TXRX32_RINGINFO_FUNC_MASK); 2379 2380 val = __SHIFTIN(addr_lo, BWI_TXRX32_RINGINFO_ADDR_MASK) | 2381 __SHIFTIN(BWI_TXRX32_RINGINFO_FUNC_TXRX, 2382 BWI_TXRX32_RINGINFO_FUNC_MASK); 2383 CSR_WRITE_4(sc, ctrl_base + BWI_RX32_RINGINFO, val); 2384 2385 val = __SHIFTIN(hdr_size, BWI_RX32_CTRL_HDRSZ_MASK) | 2386 __SHIFTIN(addr_hi, BWI_TXRX32_CTRL_ADDRHI_MASK) | 2387 BWI_TXRX32_CTRL_ENABLE; 2388 CSR_WRITE_4(sc, ctrl_base + BWI_RX32_CTRL, val); 2389 2390 CSR_WRITE_4(sc, ctrl_base + BWI_RX32_INDEX, 2391 (ndesc - 1) * sizeof(struct bwi_desc32)); 2392} 2393 2394static int 2395bwi_init_rx_ring32(struct bwi_softc *sc) 2396{ 2397 struct bwi_ring_data *rd = &sc->sc_rx_rdata; 2398 int i, error; 2399 2400 sc->sc_rx_bdata.rbd_idx = 0; 2401 2402 for (i = 0; i < BWI_RX_NDESC; ++i) { 2403 error = bwi_newbuf(sc, i, 1); 2404 if (error) { 2405 device_printf(sc->sc_dev, 2406 "can't allocate %dth RX buffer\n", i); 2407 return error; 2408 } 2409 } 2410 bus_dmamap_sync(sc->sc_rxring_dtag, rd->rdata_dmap, 2411 BUS_DMASYNC_PREWRITE); 2412 2413 bwi_init_rxdesc_ring32(sc, rd->rdata_txrx_ctrl, rd->rdata_paddr, 2414 sizeof(struct bwi_rxbuf_hdr), BWI_RX_NDESC); 2415 return 0; 2416} 2417 2418static int 2419bwi_init_txstats32(struct bwi_softc *sc) 2420{ 2421 struct bwi_txstats_data *st = sc->sc_txstats; 2422 bus_addr_t stats_paddr; 2423 int i; 2424 2425 bzero(st->stats, BWI_TXSTATS_NDESC * sizeof(struct bwi_txstats)); 2426 bus_dmamap_sync(st->stats_dtag, st->stats_dmap, BUS_DMASYNC_PREWRITE); 2427 2428 st->stats_idx = 0; 2429 2430 stats_paddr = st->stats_paddr; 2431 for (i = 0; i < BWI_TXSTATS_NDESC; ++i) { 2432 bwi_setup_desc32(sc, st->stats_ring, BWI_TXSTATS_NDESC, i, 2433 stats_paddr, sizeof(struct bwi_txstats), 0); 2434 stats_paddr += sizeof(struct bwi_txstats); 2435 } 2436 bus_dmamap_sync(st->stats_ring_dtag, st->stats_ring_dmap, 2437 BUS_DMASYNC_PREWRITE); 2438 2439 bwi_init_rxdesc_ring32(sc, st->stats_ctrl_base, 2440 st->stats_ring_paddr, 0, BWI_TXSTATS_NDESC); 2441 return 0; 2442} 2443 2444static void 2445bwi_setup_rx_desc32(struct bwi_softc *sc, int buf_idx, bus_addr_t paddr, 2446 int buf_len) 2447{ 2448 struct bwi_ring_data *rd = &sc->sc_rx_rdata; 2449 2450 KASSERT(buf_idx < BWI_RX_NDESC, ("buf_idx %d", buf_idx)); 2451 bwi_setup_desc32(sc, rd->rdata_desc, BWI_RX_NDESC, buf_idx, 2452 paddr, buf_len, 0); 2453} 2454 2455static void 2456bwi_setup_tx_desc32(struct bwi_softc *sc, struct bwi_ring_data *rd, 2457 int buf_idx, bus_addr_t paddr, int buf_len) 2458{ 2459 KASSERT(buf_idx < BWI_TX_NDESC, ("buf_idx %d", buf_idx)); 2460 bwi_setup_desc32(sc, rd->rdata_desc, BWI_TX_NDESC, buf_idx, 2461 paddr, buf_len, 1); 2462} 2463 2464static int 2465bwi_init_tx_ring64(struct bwi_softc *sc, int ring_idx) 2466{ 2467 /* TODO:64 */ 2468 return EOPNOTSUPP; 2469} 2470 2471static int 2472bwi_init_rx_ring64(struct bwi_softc *sc) 2473{ 2474 /* TODO:64 */ 2475 return EOPNOTSUPP; 2476} 2477 2478static int 2479bwi_init_txstats64(struct bwi_softc *sc) 2480{ 2481 /* TODO:64 */ 2482 return EOPNOTSUPP; 2483} 2484 2485static void 2486bwi_setup_rx_desc64(struct bwi_softc *sc, int buf_idx, bus_addr_t paddr, 2487 int buf_len) 2488{ 2489 /* TODO:64 */ 2490} 2491 2492static void 2493bwi_setup_tx_desc64(struct bwi_softc *sc, struct bwi_ring_data *rd, 2494 int buf_idx, bus_addr_t paddr, int buf_len) 2495{ 2496 /* TODO:64 */ 2497} 2498 2499static void 2500bwi_dma_buf_addr(void *arg, bus_dma_segment_t *seg, int nseg, 2501 bus_size_t mapsz __unused, int error) 2502{ 2503 if (!error) { 2504 KASSERT(nseg == 1, ("too many segments(%d)\n", nseg)); 2505 *((bus_addr_t *)arg) = seg->ds_addr; 2506 } 2507} 2508 2509static int 2510bwi_newbuf(struct bwi_softc *sc, int buf_idx, int init) 2511{ 2512 struct bwi_rxbuf_data *rbd = &sc->sc_rx_bdata; 2513 struct bwi_rxbuf *rxbuf = &rbd->rbd_buf[buf_idx]; 2514 struct bwi_rxbuf_hdr *hdr; 2515 bus_dmamap_t map; 2516 bus_addr_t paddr; 2517 struct mbuf *m; 2518 int error; 2519 2520 KASSERT(buf_idx < BWI_RX_NDESC, ("buf_idx %d", buf_idx)); 2521 2522 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 2523 if (m == NULL) { 2524 error = ENOBUFS; 2525 2526 /* 2527 * If the NIC is up and running, we need to: 2528 * - Clear RX buffer's header. 2529 * - Restore RX descriptor settings. 2530 */ 2531 if (init) 2532 return error; 2533 else 2534 goto back; 2535 } 2536 m->m_len = m->m_pkthdr.len = MCLBYTES; 2537 2538 /* 2539 * Try to load RX buf into temporary DMA map 2540 */ 2541 error = bus_dmamap_load_mbuf(sc->sc_buf_dtag, rbd->rbd_tmp_dmap, m, 2542 bwi_dma_buf_addr, &paddr, BUS_DMA_NOWAIT); 2543 if (error) { 2544 m_freem(m); 2545 2546 /* 2547 * See the comment above 2548 */ 2549 if (init) 2550 return error; 2551 else 2552 goto back; 2553 } 2554 2555 if (!init) 2556 bus_dmamap_unload(sc->sc_buf_dtag, rxbuf->rb_dmap); 2557 rxbuf->rb_mbuf = m; 2558 rxbuf->rb_paddr = paddr; 2559 2560 /* 2561 * Swap RX buf's DMA map with the loaded temporary one 2562 */ 2563 map = rxbuf->rb_dmap; 2564 rxbuf->rb_dmap = rbd->rbd_tmp_dmap; 2565 rbd->rbd_tmp_dmap = map; 2566 2567back: 2568 /* 2569 * Clear RX buf header 2570 */ 2571 hdr = mtod(rxbuf->rb_mbuf, struct bwi_rxbuf_hdr *); 2572 bzero(hdr, sizeof(*hdr)); 2573 bus_dmamap_sync(sc->sc_buf_dtag, rxbuf->rb_dmap, BUS_DMASYNC_PREWRITE); 2574 2575 /* 2576 * Setup RX buf descriptor 2577 */ 2578 sc->sc_setup_rxdesc(sc, buf_idx, rxbuf->rb_paddr, 2579 rxbuf->rb_mbuf->m_len - sizeof(*hdr)); 2580 return error; 2581} 2582 2583static void 2584bwi_set_addr_filter(struct bwi_softc *sc, uint16_t addr_ofs, 2585 const uint8_t *addr) 2586{ 2587 int i; 2588 2589 CSR_WRITE_2(sc, BWI_ADDR_FILTER_CTRL, 2590 BWI_ADDR_FILTER_CTRL_SET | addr_ofs); 2591 2592 for (i = 0; i < (IEEE80211_ADDR_LEN / 2); ++i) { 2593 uint16_t addr_val; 2594 2595 addr_val = (uint16_t)addr[i * 2] | 2596 (((uint16_t)addr[(i * 2) + 1]) << 8); 2597 CSR_WRITE_2(sc, BWI_ADDR_FILTER_DATA, addr_val); 2598 } 2599} 2600 2601static int 2602bwi_rxeof(struct bwi_softc *sc, int end_idx) 2603{ 2604 struct bwi_ring_data *rd = &sc->sc_rx_rdata; 2605 struct bwi_rxbuf_data *rbd = &sc->sc_rx_bdata; 2606 struct ieee80211com *ic = &sc->sc_ic; 2607 int idx, rx_data = 0; 2608 2609 idx = rbd->rbd_idx; 2610 while (idx != end_idx) { 2611 struct bwi_rxbuf *rb = &rbd->rbd_buf[idx]; 2612 struct bwi_rxbuf_hdr *hdr; 2613 struct ieee80211_frame_min *wh; 2614 struct ieee80211_node *ni; 2615 struct mbuf *m; 2616 uint32_t plcp; 2617 uint16_t flags2; 2618 int buflen, wh_ofs, hdr_extra, rssi, noise, type, rate; 2619 2620 m = rb->rb_mbuf; 2621 bus_dmamap_sync(sc->sc_buf_dtag, rb->rb_dmap, 2622 BUS_DMASYNC_POSTREAD); 2623 2624 if (bwi_newbuf(sc, idx, 0)) { 2625 counter_u64_add(ic->ic_ierrors, 1); 2626 goto next; 2627 } 2628 2629 hdr = mtod(m, struct bwi_rxbuf_hdr *); 2630 flags2 = le16toh(hdr->rxh_flags2); 2631 2632 hdr_extra = 0; 2633 if (flags2 & BWI_RXH_F2_TYPE2FRAME) 2634 hdr_extra = 2; 2635 wh_ofs = hdr_extra + 6; /* XXX magic number */ 2636 2637 buflen = le16toh(hdr->rxh_buflen); 2638 if (buflen < BWI_FRAME_MIN_LEN(wh_ofs)) { 2639 device_printf(sc->sc_dev, 2640 "%s: zero length data, hdr_extra %d\n", 2641 __func__, hdr_extra); 2642 counter_u64_add(ic->ic_ierrors, 1); 2643 m_freem(m); 2644 goto next; 2645 } 2646 2647 bcopy((uint8_t *)(hdr + 1) + hdr_extra, &plcp, sizeof(plcp)); 2648 rssi = bwi_calc_rssi(sc, hdr); 2649 noise = bwi_calc_noise(sc); 2650 2651 m->m_len = m->m_pkthdr.len = buflen + sizeof(*hdr); 2652 m_adj(m, sizeof(*hdr) + wh_ofs); 2653 2654 if (htole16(hdr->rxh_flags1) & BWI_RXH_F1_OFDM) 2655 rate = bwi_plcp2rate(plcp, IEEE80211_T_OFDM); 2656 else 2657 rate = bwi_plcp2rate(plcp, IEEE80211_T_CCK); 2658 2659 /* RX radio tap */ 2660 if (ieee80211_radiotap_active(ic)) 2661 bwi_rx_radiotap(sc, m, hdr, &plcp, rate, rssi, noise); 2662 2663 m_adj(m, -IEEE80211_CRC_LEN); 2664 2665 BWI_UNLOCK(sc); 2666 2667 wh = mtod(m, struct ieee80211_frame_min *); 2668 ni = ieee80211_find_rxnode(ic, wh); 2669 if (ni != NULL) { 2670 type = ieee80211_input(ni, m, rssi - noise, noise); 2671 ieee80211_free_node(ni); 2672 } else 2673 type = ieee80211_input_all(ic, m, rssi - noise, noise); 2674 if (type == IEEE80211_FC0_TYPE_DATA) { 2675 rx_data = 1; 2676 sc->sc_rx_rate = rate; 2677 } 2678 2679 BWI_LOCK(sc); 2680next: 2681 idx = (idx + 1) % BWI_RX_NDESC; 2682 2683 if (sc->sc_flags & BWI_F_STOP) { 2684 /* 2685 * Take the fast lane, don't do 2686 * any damage to softc 2687 */ 2688 return -1; 2689 } 2690 } 2691 2692 rbd->rbd_idx = idx; 2693 bus_dmamap_sync(sc->sc_rxring_dtag, rd->rdata_dmap, 2694 BUS_DMASYNC_PREWRITE); 2695 2696 return rx_data; 2697} 2698 2699static int 2700bwi_rxeof32(struct bwi_softc *sc) 2701{ 2702 uint32_t val, rx_ctrl; 2703 int end_idx, rx_data; 2704 2705 rx_ctrl = sc->sc_rx_rdata.rdata_txrx_ctrl; 2706 2707 val = CSR_READ_4(sc, rx_ctrl + BWI_RX32_STATUS); 2708 end_idx = __SHIFTOUT(val, BWI_RX32_STATUS_INDEX_MASK) / 2709 sizeof(struct bwi_desc32); 2710 2711 rx_data = bwi_rxeof(sc, end_idx); 2712 if (rx_data >= 0) { 2713 CSR_WRITE_4(sc, rx_ctrl + BWI_RX32_INDEX, 2714 end_idx * sizeof(struct bwi_desc32)); 2715 } 2716 return rx_data; 2717} 2718 2719static int 2720bwi_rxeof64(struct bwi_softc *sc) 2721{ 2722 /* TODO:64 */ 2723 return 0; 2724} 2725 2726static void 2727bwi_reset_rx_ring32(struct bwi_softc *sc, uint32_t rx_ctrl) 2728{ 2729 int i; 2730 2731 CSR_WRITE_4(sc, rx_ctrl + BWI_RX32_CTRL, 0); 2732 2733#define NRETRY 10 2734 2735 for (i = 0; i < NRETRY; ++i) { 2736 uint32_t status; 2737 2738 status = CSR_READ_4(sc, rx_ctrl + BWI_RX32_STATUS); 2739 if (__SHIFTOUT(status, BWI_RX32_STATUS_STATE_MASK) == 2740 BWI_RX32_STATUS_STATE_DISABLED) 2741 break; 2742 2743 DELAY(1000); 2744 } 2745 if (i == NRETRY) 2746 device_printf(sc->sc_dev, "reset rx ring timedout\n"); 2747 2748#undef NRETRY 2749 2750 CSR_WRITE_4(sc, rx_ctrl + BWI_RX32_RINGINFO, 0); 2751} 2752 2753static void 2754bwi_free_txstats32(struct bwi_softc *sc) 2755{ 2756 bwi_reset_rx_ring32(sc, sc->sc_txstats->stats_ctrl_base); 2757} 2758 2759static void 2760bwi_free_rx_ring32(struct bwi_softc *sc) 2761{ 2762 struct bwi_ring_data *rd = &sc->sc_rx_rdata; 2763 struct bwi_rxbuf_data *rbd = &sc->sc_rx_bdata; 2764 int i; 2765 2766 bwi_reset_rx_ring32(sc, rd->rdata_txrx_ctrl); 2767 2768 for (i = 0; i < BWI_RX_NDESC; ++i) { 2769 struct bwi_rxbuf *rb = &rbd->rbd_buf[i]; 2770 2771 if (rb->rb_mbuf != NULL) { 2772 bus_dmamap_unload(sc->sc_buf_dtag, rb->rb_dmap); 2773 m_freem(rb->rb_mbuf); 2774 rb->rb_mbuf = NULL; 2775 } 2776 } 2777} 2778 2779static void 2780bwi_free_tx_ring32(struct bwi_softc *sc, int ring_idx) 2781{ 2782 struct bwi_ring_data *rd; 2783 struct bwi_txbuf_data *tbd; 2784 uint32_t state, val; 2785 int i; 2786 2787 KASSERT(ring_idx < BWI_TX_NRING, ("ring_idx %d", ring_idx)); 2788 rd = &sc->sc_tx_rdata[ring_idx]; 2789 tbd = &sc->sc_tx_bdata[ring_idx]; 2790 2791#define NRETRY 10 2792 2793 for (i = 0; i < NRETRY; ++i) { 2794 val = CSR_READ_4(sc, rd->rdata_txrx_ctrl + BWI_TX32_STATUS); 2795 state = __SHIFTOUT(val, BWI_TX32_STATUS_STATE_MASK); 2796 if (state == BWI_TX32_STATUS_STATE_DISABLED || 2797 state == BWI_TX32_STATUS_STATE_IDLE || 2798 state == BWI_TX32_STATUS_STATE_STOPPED) 2799 break; 2800 2801 DELAY(1000); 2802 } 2803 if (i == NRETRY) { 2804 device_printf(sc->sc_dev, 2805 "%s: wait for TX ring(%d) stable timed out\n", 2806 __func__, ring_idx); 2807 } 2808 2809 CSR_WRITE_4(sc, rd->rdata_txrx_ctrl + BWI_TX32_CTRL, 0); 2810 for (i = 0; i < NRETRY; ++i) { 2811 val = CSR_READ_4(sc, rd->rdata_txrx_ctrl + BWI_TX32_STATUS); 2812 state = __SHIFTOUT(val, BWI_TX32_STATUS_STATE_MASK); 2813 if (state == BWI_TX32_STATUS_STATE_DISABLED) 2814 break; 2815 2816 DELAY(1000); 2817 } 2818 if (i == NRETRY) 2819 device_printf(sc->sc_dev, "%s: reset TX ring (%d) timed out\n", 2820 __func__, ring_idx); 2821 2822#undef NRETRY 2823 2824 DELAY(1000); 2825 2826 CSR_WRITE_4(sc, rd->rdata_txrx_ctrl + BWI_TX32_RINGINFO, 0); 2827 2828 for (i = 0; i < BWI_TX_NDESC; ++i) { 2829 struct bwi_txbuf *tb = &tbd->tbd_buf[i]; 2830 2831 if (tb->tb_mbuf != NULL) { 2832 bus_dmamap_unload(sc->sc_buf_dtag, tb->tb_dmap); 2833 m_freem(tb->tb_mbuf); 2834 tb->tb_mbuf = NULL; 2835 } 2836 if (tb->tb_ni != NULL) { 2837 ieee80211_free_node(tb->tb_ni); 2838 tb->tb_ni = NULL; 2839 } 2840 } 2841} 2842 2843static void 2844bwi_free_txstats64(struct bwi_softc *sc) 2845{ 2846 /* TODO:64 */ 2847} 2848 2849static void 2850bwi_free_rx_ring64(struct bwi_softc *sc) 2851{ 2852 /* TODO:64 */ 2853} 2854 2855static void 2856bwi_free_tx_ring64(struct bwi_softc *sc, int ring_idx) 2857{ 2858 /* TODO:64 */ 2859} 2860 2861/* XXX does not belong here */ 2862#define IEEE80211_OFDM_PLCP_RATE_MASK __BITS(3, 0) 2863#define IEEE80211_OFDM_PLCP_LEN_MASK __BITS(16, 5) 2864 2865static __inline void 2866bwi_ofdm_plcp_header(uint32_t *plcp0, int pkt_len, uint8_t rate) 2867{ 2868 uint32_t plcp; 2869 2870 plcp = __SHIFTIN(ieee80211_rate2plcp(rate, IEEE80211_T_OFDM), 2871 IEEE80211_OFDM_PLCP_RATE_MASK) | 2872 __SHIFTIN(pkt_len, IEEE80211_OFDM_PLCP_LEN_MASK); 2873 *plcp0 = htole32(plcp); 2874} 2875 2876static __inline void 2877bwi_ds_plcp_header(struct ieee80211_ds_plcp_hdr *plcp, int pkt_len, 2878 uint8_t rate) 2879{ 2880 int len, service, pkt_bitlen; 2881 2882 pkt_bitlen = pkt_len * NBBY; 2883 len = howmany(pkt_bitlen * 2, rate); 2884 2885 service = IEEE80211_PLCP_SERVICE_LOCKED; 2886 if (rate == (11 * 2)) { 2887 int pkt_bitlen1; 2888 2889 /* 2890 * PLCP service field needs to be adjusted, 2891 * if TX rate is 11Mbytes/s 2892 */ 2893 pkt_bitlen1 = len * 11; 2894 if (pkt_bitlen1 - pkt_bitlen >= NBBY) 2895 service |= IEEE80211_PLCP_SERVICE_LENEXT7; 2896 } 2897 2898 plcp->i_signal = ieee80211_rate2plcp(rate, IEEE80211_T_CCK); 2899 plcp->i_service = service; 2900 plcp->i_length = htole16(len); 2901 /* NOTE: do NOT touch i_crc */ 2902} 2903 2904static __inline void 2905bwi_plcp_header(const struct ieee80211_rate_table *rt, 2906 void *plcp, int pkt_len, uint8_t rate) 2907{ 2908 enum ieee80211_phytype modtype; 2909 2910 /* 2911 * Assume caller has zeroed 'plcp' 2912 */ 2913 modtype = ieee80211_rate2phytype(rt, rate); 2914 if (modtype == IEEE80211_T_OFDM) 2915 bwi_ofdm_plcp_header(plcp, pkt_len, rate); 2916 else if (modtype == IEEE80211_T_DS) 2917 bwi_ds_plcp_header(plcp, pkt_len, rate); 2918 else 2919 panic("unsupport modulation type %u\n", modtype); 2920} 2921 2922static int 2923bwi_encap(struct bwi_softc *sc, int idx, struct mbuf *m, 2924 struct ieee80211_node *ni) 2925{ 2926 struct ieee80211vap *vap = ni->ni_vap; 2927 struct ieee80211com *ic = &sc->sc_ic; 2928 struct bwi_ring_data *rd = &sc->sc_tx_rdata[BWI_TX_DATA_RING]; 2929 struct bwi_txbuf_data *tbd = &sc->sc_tx_bdata[BWI_TX_DATA_RING]; 2930 struct bwi_txbuf *tb = &tbd->tbd_buf[idx]; 2931 struct bwi_mac *mac; 2932 struct bwi_txbuf_hdr *hdr; 2933 struct ieee80211_frame *wh; 2934 const struct ieee80211_txparam *tp; 2935 uint8_t rate, rate_fb; 2936 uint32_t mac_ctrl; 2937 uint16_t phy_ctrl; 2938 bus_addr_t paddr; 2939 int type, ismcast, pkt_len, error, rix; 2940#if 0 2941 const uint8_t *p; 2942 int i; 2943#endif 2944 2945 KASSERT(sc->sc_cur_regwin->rw_type == BWI_REGWIN_T_MAC, 2946 ("current regwin type %d", sc->sc_cur_regwin->rw_type)); 2947 mac = (struct bwi_mac *)sc->sc_cur_regwin; 2948 2949 wh = mtod(m, struct ieee80211_frame *); 2950 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 2951 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 2952 2953 /* Get 802.11 frame len before prepending TX header */ 2954 pkt_len = m->m_pkthdr.len + IEEE80211_CRC_LEN; 2955 2956 /* 2957 * Find TX rate 2958 */ 2959 tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)]; 2960 if (type != IEEE80211_FC0_TYPE_DATA || (m->m_flags & M_EAPOL)) { 2961 rate = rate_fb = tp->mgmtrate; 2962 } else if (ismcast) { 2963 rate = rate_fb = tp->mcastrate; 2964 } else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) { 2965 rate = rate_fb = tp->ucastrate; 2966 } else { 2967 rix = ieee80211_ratectl_rate(ni, NULL, pkt_len); 2968 rate = ni->ni_txrate; 2969 2970 if (rix > 0) { 2971 rate_fb = ni->ni_rates.rs_rates[rix-1] & 2972 IEEE80211_RATE_VAL; 2973 } else { 2974 rate_fb = rate; 2975 } 2976 } 2977 tb->tb_rate[0] = rate; 2978 tb->tb_rate[1] = rate_fb; 2979 sc->sc_tx_rate = rate; 2980 2981 /* 2982 * TX radio tap 2983 */ 2984 if (ieee80211_radiotap_active_vap(vap)) { 2985 sc->sc_tx_th.wt_flags = 0; 2986 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) 2987 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; 2988 if (ieee80211_rate2phytype(sc->sc_rates, rate) == IEEE80211_T_DS && 2989 (ic->ic_flags & IEEE80211_F_SHPREAMBLE) && 2990 rate != (1 * 2)) { 2991 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 2992 } 2993 sc->sc_tx_th.wt_rate = rate; 2994 2995 ieee80211_radiotap_tx(vap, m); 2996 } 2997 2998 /* 2999 * Setup the embedded TX header 3000 */ 3001 M_PREPEND(m, sizeof(*hdr), M_NOWAIT); 3002 if (m == NULL) { 3003 device_printf(sc->sc_dev, "%s: prepend TX header failed\n", 3004 __func__); 3005 return ENOBUFS; 3006 } 3007 hdr = mtod(m, struct bwi_txbuf_hdr *); 3008 3009 bzero(hdr, sizeof(*hdr)); 3010 3011 bcopy(wh->i_fc, hdr->txh_fc, sizeof(hdr->txh_fc)); 3012 bcopy(wh->i_addr1, hdr->txh_addr1, sizeof(hdr->txh_addr1)); 3013 3014 if (!ismcast) { 3015 uint16_t dur; 3016 3017 dur = ieee80211_ack_duration(sc->sc_rates, rate, 3018 ic->ic_flags & ~IEEE80211_F_SHPREAMBLE); 3019 3020 hdr->txh_fb_duration = htole16(dur); 3021 } 3022 3023 hdr->txh_id = __SHIFTIN(BWI_TX_DATA_RING, BWI_TXH_ID_RING_MASK) | 3024 __SHIFTIN(idx, BWI_TXH_ID_IDX_MASK); 3025 3026 bwi_plcp_header(sc->sc_rates, hdr->txh_plcp, pkt_len, rate); 3027 bwi_plcp_header(sc->sc_rates, hdr->txh_fb_plcp, pkt_len, rate_fb); 3028 3029 phy_ctrl = __SHIFTIN(mac->mac_rf.rf_ant_mode, 3030 BWI_TXH_PHY_C_ANTMODE_MASK); 3031 if (ieee80211_rate2phytype(sc->sc_rates, rate) == IEEE80211_T_OFDM) 3032 phy_ctrl |= BWI_TXH_PHY_C_OFDM; 3033 else if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) && rate != (2 * 1)) 3034 phy_ctrl |= BWI_TXH_PHY_C_SHPREAMBLE; 3035 3036 mac_ctrl = BWI_TXH_MAC_C_HWSEQ | BWI_TXH_MAC_C_FIRST_FRAG; 3037 if (!ismcast) 3038 mac_ctrl |= BWI_TXH_MAC_C_ACK; 3039 if (ieee80211_rate2phytype(sc->sc_rates, rate_fb) == IEEE80211_T_OFDM) 3040 mac_ctrl |= BWI_TXH_MAC_C_FB_OFDM; 3041 3042 hdr->txh_mac_ctrl = htole32(mac_ctrl); 3043 hdr->txh_phy_ctrl = htole16(phy_ctrl); 3044 3045 /* Catch any further usage */ 3046 hdr = NULL; 3047 wh = NULL; 3048 3049 /* DMA load */ 3050 error = bus_dmamap_load_mbuf(sc->sc_buf_dtag, tb->tb_dmap, m, 3051 bwi_dma_buf_addr, &paddr, BUS_DMA_NOWAIT); 3052 if (error && error != EFBIG) { 3053 device_printf(sc->sc_dev, "%s: can't load TX buffer (1) %d\n", 3054 __func__, error); 3055 goto back; 3056 } 3057 3058 if (error) { /* error == EFBIG */ 3059 struct mbuf *m_new; 3060 3061 m_new = m_defrag(m, M_NOWAIT); 3062 if (m_new == NULL) { 3063 device_printf(sc->sc_dev, 3064 "%s: can't defrag TX buffer\n", __func__); 3065 error = ENOBUFS; 3066 goto back; 3067 } else { 3068 m = m_new; 3069 } 3070 3071 error = bus_dmamap_load_mbuf(sc->sc_buf_dtag, tb->tb_dmap, m, 3072 bwi_dma_buf_addr, &paddr, 3073 BUS_DMA_NOWAIT); 3074 if (error) { 3075 device_printf(sc->sc_dev, 3076 "%s: can't load TX buffer (2) %d\n", 3077 __func__, error); 3078 goto back; 3079 } 3080 } 3081 error = 0; 3082 3083 bus_dmamap_sync(sc->sc_buf_dtag, tb->tb_dmap, BUS_DMASYNC_PREWRITE); 3084 3085 tb->tb_mbuf = m; 3086 tb->tb_ni = ni; 3087 3088#if 0 3089 p = mtod(m, const uint8_t *); 3090 for (i = 0; i < m->m_pkthdr.len; ++i) { 3091 if (i != 0 && i % 8 == 0) 3092 printf("\n"); 3093 printf("%02x ", p[i]); 3094 } 3095 printf("\n"); 3096#endif 3097 DPRINTF(sc, BWI_DBG_TX, "idx %d, pkt_len %d, buflen %d\n", 3098 idx, pkt_len, m->m_pkthdr.len); 3099 3100 /* Setup TX descriptor */ 3101 sc->sc_setup_txdesc(sc, rd, idx, paddr, m->m_pkthdr.len); 3102 bus_dmamap_sync(sc->sc_txring_dtag, rd->rdata_dmap, 3103 BUS_DMASYNC_PREWRITE); 3104 3105 /* Kick start */ 3106 sc->sc_start_tx(sc, rd->rdata_txrx_ctrl, idx); 3107 3108back: 3109 if (error) 3110 m_freem(m); 3111 return error; 3112} 3113 3114static int 3115bwi_encap_raw(struct bwi_softc *sc, int idx, struct mbuf *m, 3116 struct ieee80211_node *ni, const struct ieee80211_bpf_params *params) 3117{ 3118 struct ieee80211vap *vap = ni->ni_vap; 3119 struct ieee80211com *ic = ni->ni_ic; 3120 struct bwi_ring_data *rd = &sc->sc_tx_rdata[BWI_TX_DATA_RING]; 3121 struct bwi_txbuf_data *tbd = &sc->sc_tx_bdata[BWI_TX_DATA_RING]; 3122 struct bwi_txbuf *tb = &tbd->tbd_buf[idx]; 3123 struct bwi_mac *mac; 3124 struct bwi_txbuf_hdr *hdr; 3125 struct ieee80211_frame *wh; 3126 uint8_t rate, rate_fb; 3127 uint32_t mac_ctrl; 3128 uint16_t phy_ctrl; 3129 bus_addr_t paddr; 3130 int ismcast, pkt_len, error; 3131 3132 KASSERT(sc->sc_cur_regwin->rw_type == BWI_REGWIN_T_MAC, 3133 ("current regwin type %d", sc->sc_cur_regwin->rw_type)); 3134 mac = (struct bwi_mac *)sc->sc_cur_regwin; 3135 3136 wh = mtod(m, struct ieee80211_frame *); 3137 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 3138 3139 /* Get 802.11 frame len before prepending TX header */ 3140 pkt_len = m->m_pkthdr.len + IEEE80211_CRC_LEN; 3141 3142 /* 3143 * Find TX rate 3144 */ 3145 rate = params->ibp_rate0; 3146 if (!ieee80211_isratevalid(ic->ic_rt, rate)) { 3147 /* XXX fall back to mcast/mgmt rate? */ 3148 m_freem(m); 3149 return EINVAL; 3150 } 3151 if (params->ibp_try1 != 0) { 3152 rate_fb = params->ibp_rate1; 3153 if (!ieee80211_isratevalid(ic->ic_rt, rate_fb)) { 3154 /* XXX fall back to rate0? */ 3155 m_freem(m); 3156 return EINVAL; 3157 } 3158 } else 3159 rate_fb = rate; 3160 tb->tb_rate[0] = rate; 3161 tb->tb_rate[1] = rate_fb; 3162 sc->sc_tx_rate = rate; 3163 3164 /* 3165 * TX radio tap 3166 */ 3167 if (ieee80211_radiotap_active_vap(vap)) { 3168 sc->sc_tx_th.wt_flags = 0; 3169 /* XXX IEEE80211_BPF_CRYPTO */ 3170 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) 3171 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; 3172 if (params->ibp_flags & IEEE80211_BPF_SHORTPRE) 3173 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 3174 sc->sc_tx_th.wt_rate = rate; 3175 3176 ieee80211_radiotap_tx(vap, m); 3177 } 3178 3179 /* 3180 * Setup the embedded TX header 3181 */ 3182 M_PREPEND(m, sizeof(*hdr), M_NOWAIT); 3183 if (m == NULL) { 3184 device_printf(sc->sc_dev, "%s: prepend TX header failed\n", 3185 __func__); 3186 return ENOBUFS; 3187 } 3188 hdr = mtod(m, struct bwi_txbuf_hdr *); 3189 3190 bzero(hdr, sizeof(*hdr)); 3191 3192 bcopy(wh->i_fc, hdr->txh_fc, sizeof(hdr->txh_fc)); 3193 bcopy(wh->i_addr1, hdr->txh_addr1, sizeof(hdr->txh_addr1)); 3194 3195 mac_ctrl = BWI_TXH_MAC_C_HWSEQ | BWI_TXH_MAC_C_FIRST_FRAG; 3196 if (!ismcast && (params->ibp_flags & IEEE80211_BPF_NOACK) == 0) { 3197 uint16_t dur; 3198 3199 dur = ieee80211_ack_duration(sc->sc_rates, rate_fb, 0); 3200 3201 hdr->txh_fb_duration = htole16(dur); 3202 mac_ctrl |= BWI_TXH_MAC_C_ACK; 3203 } 3204 3205 hdr->txh_id = __SHIFTIN(BWI_TX_DATA_RING, BWI_TXH_ID_RING_MASK) | 3206 __SHIFTIN(idx, BWI_TXH_ID_IDX_MASK); 3207 3208 bwi_plcp_header(sc->sc_rates, hdr->txh_plcp, pkt_len, rate); 3209 bwi_plcp_header(sc->sc_rates, hdr->txh_fb_plcp, pkt_len, rate_fb); 3210 3211 phy_ctrl = __SHIFTIN(mac->mac_rf.rf_ant_mode, 3212 BWI_TXH_PHY_C_ANTMODE_MASK); 3213 if (ieee80211_rate2phytype(sc->sc_rates, rate) == IEEE80211_T_OFDM) { 3214 phy_ctrl |= BWI_TXH_PHY_C_OFDM; 3215 mac_ctrl |= BWI_TXH_MAC_C_FB_OFDM; 3216 } else if (params->ibp_flags & IEEE80211_BPF_SHORTPRE) 3217 phy_ctrl |= BWI_TXH_PHY_C_SHPREAMBLE; 3218 3219 hdr->txh_mac_ctrl = htole32(mac_ctrl); 3220 hdr->txh_phy_ctrl = htole16(phy_ctrl); 3221 3222 /* Catch any further usage */ 3223 hdr = NULL; 3224 wh = NULL; 3225 3226 /* DMA load */ 3227 error = bus_dmamap_load_mbuf(sc->sc_buf_dtag, tb->tb_dmap, m, 3228 bwi_dma_buf_addr, &paddr, BUS_DMA_NOWAIT); 3229 if (error != 0) { 3230 struct mbuf *m_new; 3231 3232 if (error != EFBIG) { 3233 device_printf(sc->sc_dev, 3234 "%s: can't load TX buffer (1) %d\n", 3235 __func__, error); 3236 goto back; 3237 } 3238 m_new = m_defrag(m, M_NOWAIT); 3239 if (m_new == NULL) { 3240 device_printf(sc->sc_dev, 3241 "%s: can't defrag TX buffer\n", __func__); 3242 error = ENOBUFS; 3243 goto back; 3244 } 3245 m = m_new; 3246 error = bus_dmamap_load_mbuf(sc->sc_buf_dtag, tb->tb_dmap, m, 3247 bwi_dma_buf_addr, &paddr, 3248 BUS_DMA_NOWAIT); 3249 if (error) { 3250 device_printf(sc->sc_dev, 3251 "%s: can't load TX buffer (2) %d\n", 3252 __func__, error); 3253 goto back; 3254 } 3255 } 3256 3257 bus_dmamap_sync(sc->sc_buf_dtag, tb->tb_dmap, BUS_DMASYNC_PREWRITE); 3258 3259 tb->tb_mbuf = m; 3260 tb->tb_ni = ni; 3261 3262 DPRINTF(sc, BWI_DBG_TX, "idx %d, pkt_len %d, buflen %d\n", 3263 idx, pkt_len, m->m_pkthdr.len); 3264 3265 /* Setup TX descriptor */ 3266 sc->sc_setup_txdesc(sc, rd, idx, paddr, m->m_pkthdr.len); 3267 bus_dmamap_sync(sc->sc_txring_dtag, rd->rdata_dmap, 3268 BUS_DMASYNC_PREWRITE); 3269 3270 /* Kick start */ 3271 sc->sc_start_tx(sc, rd->rdata_txrx_ctrl, idx); 3272back: 3273 if (error) 3274 m_freem(m); 3275 return error; 3276} 3277 3278static void 3279bwi_start_tx32(struct bwi_softc *sc, uint32_t tx_ctrl, int idx) 3280{ 3281 idx = (idx + 1) % BWI_TX_NDESC; 3282 CSR_WRITE_4(sc, tx_ctrl + BWI_TX32_INDEX, 3283 idx * sizeof(struct bwi_desc32)); 3284} 3285 3286static void 3287bwi_start_tx64(struct bwi_softc *sc, uint32_t tx_ctrl, int idx) 3288{ 3289 /* TODO:64 */ 3290} 3291 3292static void 3293bwi_txeof_status32(struct bwi_softc *sc) 3294{ 3295 uint32_t val, ctrl_base; 3296 int end_idx; 3297 3298 ctrl_base = sc->sc_txstats->stats_ctrl_base; 3299 3300 val = CSR_READ_4(sc, ctrl_base + BWI_RX32_STATUS); 3301 end_idx = __SHIFTOUT(val, BWI_RX32_STATUS_INDEX_MASK) / 3302 sizeof(struct bwi_desc32); 3303 3304 bwi_txeof_status(sc, end_idx); 3305 3306 CSR_WRITE_4(sc, ctrl_base + BWI_RX32_INDEX, 3307 end_idx * sizeof(struct bwi_desc32)); 3308 3309 bwi_start_locked(sc); 3310} 3311 3312static void 3313bwi_txeof_status64(struct bwi_softc *sc) 3314{ 3315 /* TODO:64 */ 3316} 3317 3318static void 3319_bwi_txeof(struct bwi_softc *sc, uint16_t tx_id, int acked, int data_txcnt) 3320{ 3321 struct bwi_txbuf_data *tbd; 3322 struct bwi_txbuf *tb; 3323 int ring_idx, buf_idx; 3324 struct ieee80211_node *ni; 3325 struct ieee80211vap *vap; 3326 3327 if (tx_id == 0) { 3328 device_printf(sc->sc_dev, "%s: zero tx id\n", __func__); 3329 return; 3330 } 3331 3332 ring_idx = __SHIFTOUT(tx_id, BWI_TXH_ID_RING_MASK); 3333 buf_idx = __SHIFTOUT(tx_id, BWI_TXH_ID_IDX_MASK); 3334 3335 KASSERT(ring_idx == BWI_TX_DATA_RING, ("ring_idx %d", ring_idx)); 3336 KASSERT(buf_idx < BWI_TX_NDESC, ("buf_idx %d", buf_idx)); 3337 3338 tbd = &sc->sc_tx_bdata[ring_idx]; 3339 KASSERT(tbd->tbd_used > 0, ("tbd_used %d", tbd->tbd_used)); 3340 tbd->tbd_used--; 3341 3342 tb = &tbd->tbd_buf[buf_idx]; 3343 DPRINTF(sc, BWI_DBG_TXEOF, "txeof idx %d, " 3344 "acked %d, data_txcnt %d, ni %p\n", 3345 buf_idx, acked, data_txcnt, tb->tb_ni); 3346 3347 bus_dmamap_unload(sc->sc_buf_dtag, tb->tb_dmap); 3348 3349 if ((ni = tb->tb_ni) != NULL) { 3350 const struct bwi_txbuf_hdr *hdr = 3351 mtod(tb->tb_mbuf, const struct bwi_txbuf_hdr *); 3352 vap = ni->ni_vap; 3353 3354 /* NB: update rate control only for unicast frames */ 3355 if (hdr->txh_mac_ctrl & htole32(BWI_TXH_MAC_C_ACK)) { 3356 /* 3357 * Feed back 'acked and data_txcnt'. Note that the 3358 * generic AMRR code only understands one tx rate 3359 * and the estimator doesn't handle real retry counts 3360 * well so to avoid over-aggressive downshifting we 3361 * treat any number of retries as "1". 3362 */ 3363 ieee80211_ratectl_tx_complete(vap, ni, 3364 (data_txcnt > 1) ? IEEE80211_RATECTL_TX_SUCCESS : 3365 IEEE80211_RATECTL_TX_FAILURE, &acked, NULL); 3366 } 3367 ieee80211_tx_complete(ni, tb->tb_mbuf, !acked); 3368 tb->tb_ni = NULL; 3369 } else 3370 m_freem(tb->tb_mbuf); 3371 tb->tb_mbuf = NULL; 3372 3373 if (tbd->tbd_used == 0) 3374 sc->sc_tx_timer = 0; 3375} 3376 3377static void 3378bwi_txeof_status(struct bwi_softc *sc, int end_idx) 3379{ 3380 struct bwi_txstats_data *st = sc->sc_txstats; 3381 int idx; 3382 3383 bus_dmamap_sync(st->stats_dtag, st->stats_dmap, BUS_DMASYNC_POSTREAD); 3384 3385 idx = st->stats_idx; 3386 while (idx != end_idx) { 3387 const struct bwi_txstats *stats = &st->stats[idx]; 3388 3389 if ((stats->txs_flags & BWI_TXS_F_PENDING) == 0) { 3390 int data_txcnt; 3391 3392 data_txcnt = __SHIFTOUT(stats->txs_txcnt, 3393 BWI_TXS_TXCNT_DATA); 3394 _bwi_txeof(sc, le16toh(stats->txs_id), 3395 stats->txs_flags & BWI_TXS_F_ACKED, 3396 data_txcnt); 3397 } 3398 idx = (idx + 1) % BWI_TXSTATS_NDESC; 3399 } 3400 st->stats_idx = idx; 3401} 3402 3403static void 3404bwi_txeof(struct bwi_softc *sc) 3405{ 3406 3407 for (;;) { 3408 uint32_t tx_status0, tx_status1; 3409 uint16_t tx_id; 3410 int data_txcnt; 3411 3412 tx_status0 = CSR_READ_4(sc, BWI_TXSTATUS0); 3413 if ((tx_status0 & BWI_TXSTATUS0_VALID) == 0) 3414 break; 3415 tx_status1 = CSR_READ_4(sc, BWI_TXSTATUS1); 3416 3417 tx_id = __SHIFTOUT(tx_status0, BWI_TXSTATUS0_TXID_MASK); 3418 data_txcnt = __SHIFTOUT(tx_status0, 3419 BWI_TXSTATUS0_DATA_TXCNT_MASK); 3420 3421 if (tx_status0 & (BWI_TXSTATUS0_AMPDU | BWI_TXSTATUS0_PENDING)) 3422 continue; 3423 3424 _bwi_txeof(sc, le16toh(tx_id), tx_status0 & BWI_TXSTATUS0_ACKED, 3425 data_txcnt); 3426 } 3427 3428 bwi_start_locked(sc); 3429} 3430 3431static int 3432bwi_bbp_power_on(struct bwi_softc *sc, enum bwi_clock_mode clk_mode) 3433{ 3434 bwi_power_on(sc, 1); 3435 return bwi_set_clock_mode(sc, clk_mode); 3436} 3437 3438static void 3439bwi_bbp_power_off(struct bwi_softc *sc) 3440{ 3441 bwi_set_clock_mode(sc, BWI_CLOCK_MODE_SLOW); 3442 bwi_power_off(sc, 1); 3443} 3444 3445static int 3446bwi_get_pwron_delay(struct bwi_softc *sc) 3447{ 3448 struct bwi_regwin *com, *old; 3449 struct bwi_clock_freq freq; 3450 uint32_t val; 3451 int error; 3452 3453 com = &sc->sc_com_regwin; 3454 KASSERT(BWI_REGWIN_EXIST(com), ("no regwin")); 3455 3456 if ((sc->sc_cap & BWI_CAP_CLKMODE) == 0) 3457 return 0; 3458 3459 error = bwi_regwin_switch(sc, com, &old); 3460 if (error) 3461 return error; 3462 3463 bwi_get_clock_freq(sc, &freq); 3464 3465 val = CSR_READ_4(sc, BWI_PLL_ON_DELAY); 3466 sc->sc_pwron_delay = howmany((val + 2) * 1000000, freq.clkfreq_min); 3467 DPRINTF(sc, BWI_DBG_ATTACH, "power on delay %u\n", sc->sc_pwron_delay); 3468 3469 return bwi_regwin_switch(sc, old, NULL); 3470} 3471 3472static int 3473bwi_bus_attach(struct bwi_softc *sc) 3474{ 3475 struct bwi_regwin *bus, *old; 3476 int error; 3477 3478 bus = &sc->sc_bus_regwin; 3479 3480 error = bwi_regwin_switch(sc, bus, &old); 3481 if (error) 3482 return error; 3483 3484 if (!bwi_regwin_is_enabled(sc, bus)) 3485 bwi_regwin_enable(sc, bus, 0); 3486 3487 /* Disable interripts */ 3488 CSR_WRITE_4(sc, BWI_INTRVEC, 0); 3489 3490 return bwi_regwin_switch(sc, old, NULL); 3491} 3492 3493static const char * 3494bwi_regwin_name(const struct bwi_regwin *rw) 3495{ 3496 switch (rw->rw_type) { 3497 case BWI_REGWIN_T_COM: 3498 return "COM"; 3499 case BWI_REGWIN_T_BUSPCI: 3500 return "PCI"; 3501 case BWI_REGWIN_T_MAC: 3502 return "MAC"; 3503 case BWI_REGWIN_T_BUSPCIE: 3504 return "PCIE"; 3505 } 3506 panic("unknown regwin type 0x%04x\n", rw->rw_type); 3507 return NULL; 3508} 3509 3510static uint32_t 3511bwi_regwin_disable_bits(struct bwi_softc *sc) 3512{ 3513 uint32_t busrev; 3514 3515 /* XXX cache this */ 3516 busrev = __SHIFTOUT(CSR_READ_4(sc, BWI_ID_LO), BWI_ID_LO_BUSREV_MASK); 3517 DPRINTF(sc, BWI_DBG_ATTACH | BWI_DBG_INIT | BWI_DBG_MISC, 3518 "bus rev %u\n", busrev); 3519 3520 if (busrev == BWI_BUSREV_0) 3521 return BWI_STATE_LO_DISABLE1; 3522 else if (busrev == BWI_BUSREV_1) 3523 return BWI_STATE_LO_DISABLE2; 3524 else 3525 return (BWI_STATE_LO_DISABLE1 | BWI_STATE_LO_DISABLE2); 3526} 3527 3528int 3529bwi_regwin_is_enabled(struct bwi_softc *sc, struct bwi_regwin *rw) 3530{ 3531 uint32_t val, disable_bits; 3532 3533 disable_bits = bwi_regwin_disable_bits(sc); 3534 val = CSR_READ_4(sc, BWI_STATE_LO); 3535 3536 if ((val & (BWI_STATE_LO_CLOCK | 3537 BWI_STATE_LO_RESET | 3538 disable_bits)) == BWI_STATE_LO_CLOCK) { 3539 DPRINTF(sc, BWI_DBG_ATTACH | BWI_DBG_INIT, "%s is enabled\n", 3540 bwi_regwin_name(rw)); 3541 return 1; 3542 } else { 3543 DPRINTF(sc, BWI_DBG_ATTACH | BWI_DBG_INIT, "%s is disabled\n", 3544 bwi_regwin_name(rw)); 3545 return 0; 3546 } 3547} 3548 3549void 3550bwi_regwin_disable(struct bwi_softc *sc, struct bwi_regwin *rw, uint32_t flags) 3551{ 3552 uint32_t state_lo, disable_bits; 3553 int i; 3554 3555 state_lo = CSR_READ_4(sc, BWI_STATE_LO); 3556 3557 /* 3558 * If current regwin is in 'reset' state, it was already disabled. 3559 */ 3560 if (state_lo & BWI_STATE_LO_RESET) { 3561 DPRINTF(sc, BWI_DBG_ATTACH | BWI_DBG_INIT, 3562 "%s was already disabled\n", bwi_regwin_name(rw)); 3563 return; 3564 } 3565 3566 disable_bits = bwi_regwin_disable_bits(sc); 3567 3568 /* 3569 * Disable normal clock 3570 */ 3571 state_lo = BWI_STATE_LO_CLOCK | disable_bits; 3572 CSR_WRITE_4(sc, BWI_STATE_LO, state_lo); 3573 3574 /* 3575 * Wait until normal clock is disabled 3576 */ 3577#define NRETRY 1000 3578 for (i = 0; i < NRETRY; ++i) { 3579 state_lo = CSR_READ_4(sc, BWI_STATE_LO); 3580 if (state_lo & disable_bits) 3581 break; 3582 DELAY(10); 3583 } 3584 if (i == NRETRY) { 3585 device_printf(sc->sc_dev, "%s disable clock timeout\n", 3586 bwi_regwin_name(rw)); 3587 } 3588 3589 for (i = 0; i < NRETRY; ++i) { 3590 uint32_t state_hi; 3591 3592 state_hi = CSR_READ_4(sc, BWI_STATE_HI); 3593 if ((state_hi & BWI_STATE_HI_BUSY) == 0) 3594 break; 3595 DELAY(10); 3596 } 3597 if (i == NRETRY) { 3598 device_printf(sc->sc_dev, "%s wait BUSY unset timeout\n", 3599 bwi_regwin_name(rw)); 3600 } 3601#undef NRETRY 3602 3603 /* 3604 * Reset and disable regwin with gated clock 3605 */ 3606 state_lo = BWI_STATE_LO_RESET | disable_bits | 3607 BWI_STATE_LO_CLOCK | BWI_STATE_LO_GATED_CLOCK | 3608 __SHIFTIN(flags, BWI_STATE_LO_FLAGS_MASK); 3609 CSR_WRITE_4(sc, BWI_STATE_LO, state_lo); 3610 3611 /* Flush pending bus write */ 3612 CSR_READ_4(sc, BWI_STATE_LO); 3613 DELAY(1); 3614 3615 /* Reset and disable regwin */ 3616 state_lo = BWI_STATE_LO_RESET | disable_bits | 3617 __SHIFTIN(flags, BWI_STATE_LO_FLAGS_MASK); 3618 CSR_WRITE_4(sc, BWI_STATE_LO, state_lo); 3619 3620 /* Flush pending bus write */ 3621 CSR_READ_4(sc, BWI_STATE_LO); 3622 DELAY(1); 3623} 3624 3625void 3626bwi_regwin_enable(struct bwi_softc *sc, struct bwi_regwin *rw, uint32_t flags) 3627{ 3628 uint32_t state_lo, state_hi, imstate; 3629 3630 bwi_regwin_disable(sc, rw, flags); 3631 3632 /* Reset regwin with gated clock */ 3633 state_lo = BWI_STATE_LO_RESET | 3634 BWI_STATE_LO_CLOCK | 3635 BWI_STATE_LO_GATED_CLOCK | 3636 __SHIFTIN(flags, BWI_STATE_LO_FLAGS_MASK); 3637 CSR_WRITE_4(sc, BWI_STATE_LO, state_lo); 3638 3639 /* Flush pending bus write */ 3640 CSR_READ_4(sc, BWI_STATE_LO); 3641 DELAY(1); 3642 3643 state_hi = CSR_READ_4(sc, BWI_STATE_HI); 3644 if (state_hi & BWI_STATE_HI_SERROR) 3645 CSR_WRITE_4(sc, BWI_STATE_HI, 0); 3646 3647 imstate = CSR_READ_4(sc, BWI_IMSTATE); 3648 if (imstate & (BWI_IMSTATE_INBAND_ERR | BWI_IMSTATE_TIMEOUT)) { 3649 imstate &= ~(BWI_IMSTATE_INBAND_ERR | BWI_IMSTATE_TIMEOUT); 3650 CSR_WRITE_4(sc, BWI_IMSTATE, imstate); 3651 } 3652 3653 /* Enable regwin with gated clock */ 3654 state_lo = BWI_STATE_LO_CLOCK | 3655 BWI_STATE_LO_GATED_CLOCK | 3656 __SHIFTIN(flags, BWI_STATE_LO_FLAGS_MASK); 3657 CSR_WRITE_4(sc, BWI_STATE_LO, state_lo); 3658 3659 /* Flush pending bus write */ 3660 CSR_READ_4(sc, BWI_STATE_LO); 3661 DELAY(1); 3662 3663 /* Enable regwin with normal clock */ 3664 state_lo = BWI_STATE_LO_CLOCK | 3665 __SHIFTIN(flags, BWI_STATE_LO_FLAGS_MASK); 3666 CSR_WRITE_4(sc, BWI_STATE_LO, state_lo); 3667 3668 /* Flush pending bus write */ 3669 CSR_READ_4(sc, BWI_STATE_LO); 3670 DELAY(1); 3671} 3672 3673static void 3674bwi_set_bssid(struct bwi_softc *sc, const uint8_t *bssid) 3675{ 3676 struct bwi_mac *mac; 3677 struct bwi_myaddr_bssid buf; 3678 const uint8_t *p; 3679 uint32_t val; 3680 int n, i; 3681 3682 KASSERT(sc->sc_cur_regwin->rw_type == BWI_REGWIN_T_MAC, 3683 ("current regwin type %d", sc->sc_cur_regwin->rw_type)); 3684 mac = (struct bwi_mac *)sc->sc_cur_regwin; 3685 3686 bwi_set_addr_filter(sc, BWI_ADDR_FILTER_BSSID, bssid); 3687 3688 bcopy(sc->sc_ic.ic_macaddr, buf.myaddr, sizeof(buf.myaddr)); 3689 bcopy(bssid, buf.bssid, sizeof(buf.bssid)); 3690 3691 n = sizeof(buf) / sizeof(val); 3692 p = (const uint8_t *)&buf; 3693 for (i = 0; i < n; ++i) { 3694 int j; 3695 3696 val = 0; 3697 for (j = 0; j < sizeof(val); ++j) 3698 val |= ((uint32_t)(*p++)) << (j * 8); 3699 3700 TMPLT_WRITE_4(mac, 0x20 + (i * sizeof(val)), val); 3701 } 3702} 3703 3704static void 3705bwi_updateslot(struct ieee80211com *ic) 3706{ 3707 struct bwi_softc *sc = ic->ic_softc; 3708 struct bwi_mac *mac; 3709 3710 BWI_LOCK(sc); 3711 if (sc->sc_flags & BWI_F_RUNNING) { 3712 DPRINTF(sc, BWI_DBG_80211, "%s\n", __func__); 3713 3714 KASSERT(sc->sc_cur_regwin->rw_type == BWI_REGWIN_T_MAC, 3715 ("current regwin type %d", sc->sc_cur_regwin->rw_type)); 3716 mac = (struct bwi_mac *)sc->sc_cur_regwin; 3717 3718 bwi_mac_updateslot(mac, (ic->ic_flags & IEEE80211_F_SHSLOT)); 3719 } 3720 BWI_UNLOCK(sc); 3721} 3722 3723static void 3724bwi_calibrate(void *xsc) 3725{ 3726 struct bwi_softc *sc = xsc; 3727 struct bwi_mac *mac; 3728 3729 BWI_ASSERT_LOCKED(sc); 3730 3731 KASSERT(sc->sc_ic.ic_opmode != IEEE80211_M_MONITOR, 3732 ("opmode %d", sc->sc_ic.ic_opmode)); 3733 3734 KASSERT(sc->sc_cur_regwin->rw_type == BWI_REGWIN_T_MAC, 3735 ("current regwin type %d", sc->sc_cur_regwin->rw_type)); 3736 mac = (struct bwi_mac *)sc->sc_cur_regwin; 3737 3738 bwi_mac_calibrate_txpower(mac, sc->sc_txpwrcb_type); 3739 sc->sc_txpwrcb_type = BWI_TXPWR_CALIB; 3740 3741 /* XXX 15 seconds */ 3742 callout_reset(&sc->sc_calib_ch, hz * 15, bwi_calibrate, sc); 3743} 3744 3745static int 3746bwi_calc_rssi(struct bwi_softc *sc, const struct bwi_rxbuf_hdr *hdr) 3747{ 3748 struct bwi_mac *mac; 3749 3750 KASSERT(sc->sc_cur_regwin->rw_type == BWI_REGWIN_T_MAC, 3751 ("current regwin type %d", sc->sc_cur_regwin->rw_type)); 3752 mac = (struct bwi_mac *)sc->sc_cur_regwin; 3753 3754 return bwi_rf_calc_rssi(mac, hdr); 3755} 3756 3757static int 3758bwi_calc_noise(struct bwi_softc *sc) 3759{ 3760 struct bwi_mac *mac; 3761 3762 KASSERT(sc->sc_cur_regwin->rw_type == BWI_REGWIN_T_MAC, 3763 ("current regwin type %d", sc->sc_cur_regwin->rw_type)); 3764 mac = (struct bwi_mac *)sc->sc_cur_regwin; 3765 3766 return bwi_rf_calc_noise(mac); 3767} 3768 3769static __inline uint8_t 3770bwi_plcp2rate(const uint32_t plcp0, enum ieee80211_phytype type) 3771{ 3772 uint32_t plcp = le32toh(plcp0) & IEEE80211_OFDM_PLCP_RATE_MASK; 3773 return (ieee80211_plcp2rate(plcp, type)); 3774} 3775 3776static void 3777bwi_rx_radiotap(struct bwi_softc *sc, struct mbuf *m, 3778 struct bwi_rxbuf_hdr *hdr, const void *plcp, int rate, int rssi, int noise) 3779{ 3780 const struct ieee80211_frame_min *wh; 3781 3782 sc->sc_rx_th.wr_flags = IEEE80211_RADIOTAP_F_FCS; 3783 if (htole16(hdr->rxh_flags1) & BWI_RXH_F1_SHPREAMBLE) 3784 sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 3785 3786 wh = mtod(m, const struct ieee80211_frame_min *); 3787 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) 3788 sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_WEP; 3789 3790 sc->sc_rx_th.wr_tsf = hdr->rxh_tsf; /* No endian conversion */ 3791 sc->sc_rx_th.wr_rate = rate; 3792 sc->sc_rx_th.wr_antsignal = rssi; 3793 sc->sc_rx_th.wr_antnoise = noise; 3794} 3795 3796static void 3797bwi_led_attach(struct bwi_softc *sc) 3798{ 3799 const uint8_t *led_act = NULL; 3800 uint16_t gpio, val[BWI_LED_MAX]; 3801 int i; 3802 3803 for (i = 0; i < nitems(bwi_vendor_led_act); ++i) { 3804 if (sc->sc_pci_subvid == bwi_vendor_led_act[i].vid) { 3805 led_act = bwi_vendor_led_act[i].led_act; 3806 break; 3807 } 3808 } 3809 if (led_act == NULL) 3810 led_act = bwi_default_led_act; 3811 3812 gpio = bwi_read_sprom(sc, BWI_SPROM_GPIO01); 3813 val[0] = __SHIFTOUT(gpio, BWI_SPROM_GPIO_0); 3814 val[1] = __SHIFTOUT(gpio, BWI_SPROM_GPIO_1); 3815 3816 gpio = bwi_read_sprom(sc, BWI_SPROM_GPIO23); 3817 val[2] = __SHIFTOUT(gpio, BWI_SPROM_GPIO_2); 3818 val[3] = __SHIFTOUT(gpio, BWI_SPROM_GPIO_3); 3819 3820 for (i = 0; i < BWI_LED_MAX; ++i) { 3821 struct bwi_led *led = &sc->sc_leds[i]; 3822 3823 if (val[i] == 0xff) { 3824 led->l_act = led_act[i]; 3825 } else { 3826 if (val[i] & BWI_LED_ACT_LOW) 3827 led->l_flags |= BWI_LED_F_ACTLOW; 3828 led->l_act = __SHIFTOUT(val[i], BWI_LED_ACT_MASK); 3829 } 3830 led->l_mask = (1 << i); 3831 3832 if (led->l_act == BWI_LED_ACT_BLINK_SLOW || 3833 led->l_act == BWI_LED_ACT_BLINK_POLL || 3834 led->l_act == BWI_LED_ACT_BLINK) { 3835 led->l_flags |= BWI_LED_F_BLINK; 3836 if (led->l_act == BWI_LED_ACT_BLINK_POLL) 3837 led->l_flags |= BWI_LED_F_POLLABLE; 3838 else if (led->l_act == BWI_LED_ACT_BLINK_SLOW) 3839 led->l_flags |= BWI_LED_F_SLOW; 3840 3841 if (sc->sc_blink_led == NULL) { 3842 sc->sc_blink_led = led; 3843 if (led->l_flags & BWI_LED_F_SLOW) 3844 BWI_LED_SLOWDOWN(sc->sc_led_idle); 3845 } 3846 } 3847 3848 DPRINTF(sc, BWI_DBG_LED | BWI_DBG_ATTACH, 3849 "%dth led, act %d, lowact %d\n", i, 3850 led->l_act, led->l_flags & BWI_LED_F_ACTLOW); 3851 } 3852 callout_init_mtx(&sc->sc_led_blink_ch, &sc->sc_mtx, 0); 3853} 3854 3855static __inline uint16_t 3856bwi_led_onoff(const struct bwi_led *led, uint16_t val, int on) 3857{ 3858 if (led->l_flags & BWI_LED_F_ACTLOW) 3859 on = !on; 3860 if (on) 3861 val |= led->l_mask; 3862 else 3863 val &= ~led->l_mask; 3864 return val; 3865} 3866 3867static void 3868bwi_led_newstate(struct bwi_softc *sc, enum ieee80211_state nstate) 3869{ 3870 struct ieee80211com *ic = &sc->sc_ic; 3871 uint16_t val; 3872 int i; 3873 3874 if (nstate == IEEE80211_S_INIT) { 3875 callout_stop(&sc->sc_led_blink_ch); 3876 sc->sc_led_blinking = 0; 3877 } 3878 3879 if ((sc->sc_flags & BWI_F_RUNNING) == 0) 3880 return; 3881 3882 val = CSR_READ_2(sc, BWI_MAC_GPIO_CTRL); 3883 for (i = 0; i < BWI_LED_MAX; ++i) { 3884 struct bwi_led *led = &sc->sc_leds[i]; 3885 int on; 3886 3887 if (led->l_act == BWI_LED_ACT_UNKN || 3888 led->l_act == BWI_LED_ACT_NULL) 3889 continue; 3890 3891 if ((led->l_flags & BWI_LED_F_BLINK) && 3892 nstate != IEEE80211_S_INIT) 3893 continue; 3894 3895 switch (led->l_act) { 3896 case BWI_LED_ACT_ON: /* Always on */ 3897 on = 1; 3898 break; 3899 case BWI_LED_ACT_OFF: /* Always off */ 3900 case BWI_LED_ACT_5GHZ: /* TODO: 11A */ 3901 on = 0; 3902 break; 3903 default: 3904 on = 1; 3905 switch (nstate) { 3906 case IEEE80211_S_INIT: 3907 on = 0; 3908 break; 3909 case IEEE80211_S_RUN: 3910 if (led->l_act == BWI_LED_ACT_11G && 3911 ic->ic_curmode != IEEE80211_MODE_11G) 3912 on = 0; 3913 break; 3914 default: 3915 if (led->l_act == BWI_LED_ACT_ASSOC) 3916 on = 0; 3917 break; 3918 } 3919 break; 3920 } 3921 3922 val = bwi_led_onoff(led, val, on); 3923 } 3924 CSR_WRITE_2(sc, BWI_MAC_GPIO_CTRL, val); 3925} 3926static void 3927bwi_led_event(struct bwi_softc *sc, int event) 3928{ 3929 struct bwi_led *led = sc->sc_blink_led; 3930 int rate; 3931 3932 if (event == BWI_LED_EVENT_POLL) { 3933 if ((led->l_flags & BWI_LED_F_POLLABLE) == 0) 3934 return; 3935 if (ticks - sc->sc_led_ticks < sc->sc_led_idle) 3936 return; 3937 } 3938 3939 sc->sc_led_ticks = ticks; 3940 if (sc->sc_led_blinking) 3941 return; 3942 3943 switch (event) { 3944 case BWI_LED_EVENT_RX: 3945 rate = sc->sc_rx_rate; 3946 break; 3947 case BWI_LED_EVENT_TX: 3948 rate = sc->sc_tx_rate; 3949 break; 3950 case BWI_LED_EVENT_POLL: 3951 rate = 0; 3952 break; 3953 default: 3954 panic("unknown LED event %d\n", event); 3955 break; 3956 } 3957 bwi_led_blink_start(sc, bwi_led_duration[rate].on_dur, 3958 bwi_led_duration[rate].off_dur); 3959} 3960 3961static void 3962bwi_led_blink_start(struct bwi_softc *sc, int on_dur, int off_dur) 3963{ 3964 struct bwi_led *led = sc->sc_blink_led; 3965 uint16_t val; 3966 3967 val = CSR_READ_2(sc, BWI_MAC_GPIO_CTRL); 3968 val = bwi_led_onoff(led, val, 1); 3969 CSR_WRITE_2(sc, BWI_MAC_GPIO_CTRL, val); 3970 3971 if (led->l_flags & BWI_LED_F_SLOW) { 3972 BWI_LED_SLOWDOWN(on_dur); 3973 BWI_LED_SLOWDOWN(off_dur); 3974 } 3975 3976 sc->sc_led_blinking = 1; 3977 sc->sc_led_blink_offdur = off_dur; 3978 3979 callout_reset(&sc->sc_led_blink_ch, on_dur, bwi_led_blink_next, sc); 3980} 3981 3982static void 3983bwi_led_blink_next(void *xsc) 3984{ 3985 struct bwi_softc *sc = xsc; 3986 uint16_t val; 3987 3988 val = CSR_READ_2(sc, BWI_MAC_GPIO_CTRL); 3989 val = bwi_led_onoff(sc->sc_blink_led, val, 0); 3990 CSR_WRITE_2(sc, BWI_MAC_GPIO_CTRL, val); 3991 3992 callout_reset(&sc->sc_led_blink_ch, sc->sc_led_blink_offdur, 3993 bwi_led_blink_end, sc); 3994} 3995 3996static void 3997bwi_led_blink_end(void *xsc) 3998{ 3999 struct bwi_softc *sc = xsc; 4000 sc->sc_led_blinking = 0; 4001} 4002 4003static void 4004bwi_restart(void *xsc, int pending) 4005{ 4006 struct bwi_softc *sc = xsc; 4007 4008 device_printf(sc->sc_dev, "%s begin, help!\n", __func__); 4009 BWI_LOCK(sc); 4010 bwi_init_statechg(sc, 0); 4011#if 0 4012 bwi_start_locked(sc); 4013#endif 4014 BWI_UNLOCK(sc); 4015}
|