• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /freebsd-12-stable/sys/dev/vnic/

Lines Matching defs:bgx

79 static int bgx_init_phy(struct bgx *);
81 static struct bgx *bgx_vnic[MAX_BGX_THUNDER];
85 static void bgx_get_qlm_mode(struct bgx *);
86 static void bgx_init_hw(struct bgx *);
87 static int bgx_lmac_enable(struct bgx *, uint8_t);
88 static void bgx_lmac_disable(struct bgx *, uint8_t);
104 "bgx",
138 struct bgx *bgx;
144 bgx = malloc(sizeof(*bgx), M_BGX, (M_WAITOK | M_ZERO));
145 bgx->dev = dev;
148 lmac->bgx = bgx;
153 bgx->reg_base = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
155 if (bgx->reg_base == NULL) {
161 bgx->bgx_id = (rman_get_start(bgx->reg_base) >> BGX_NODE_ID_SHIFT) &
163 bgx->bgx_id += nic_get_node_id(bgx->reg_base) * MAX_BGX_PER_CN88XX;
165 bgx_vnic[bgx->bgx_id] = bgx;
166 bgx_get_qlm_mode(bgx);
168 err = bgx_init_phy(bgx);
172 bgx_init_hw(bgx);
175 for (lmacid = 0; lmacid < bgx->lmac_count; lmacid++) {
176 err = bgx_lmac_enable(bgx, lmacid);
179 bgx->bgx_id, lmacid);
187 bgx_vnic[bgx->bgx_id] = NULL;
189 rman_get_rid(bgx->reg_base), bgx->reg_base);
191 free(bgx, M_BGX);
201 struct bgx *bgx;
205 bgx = lmac->bgx;
207 for (lmacid = 0; lmacid < bgx->lmac_count; lmacid++)
208 bgx_lmac_disable(bgx, lmacid);
210 bgx_vnic[bgx->bgx_id] = NULL;
212 rman_get_rid(bgx->reg_base), bgx->reg_base);
213 free(bgx, M_BGX);
221 bgx_reg_read(struct bgx *bgx, uint8_t lmac, uint64_t offset)
227 return (bus_read_8(bgx->reg_base, addr));
231 bgx_reg_write(struct bgx *bgx, uint8_t lmac, uint64_t offset, uint64_t val)
237 bus_write_8(bgx->reg_base, addr, val);
241 bgx_reg_modify(struct bgx *bgx, uint8_t lmac, uint64_t offset, uint64_t val)
247 bus_write_8(bgx->reg_base, addr, val | bus_read_8(bgx->reg_base, addr));
251 bgx_poll_reg(struct bgx *bgx, uint8_t lmac, uint64_t reg, uint64_t mask,
258 reg_val = bgx_reg_read(bgx, lmac, reg);
289 struct bgx *bgx;
291 bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
292 if (bgx != NULL)
293 return (bgx->lmac_count);
303 struct bgx *bgx;
306 bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
307 if (bgx == NULL)
310 lmac = &bgx->lmac[lmacid];
319 struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
321 if (bgx != NULL)
322 return (bgx->lmac[lmacid].mac);
330 struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
332 if (bgx == NULL)
335 memcpy(bgx->lmac[lmacid].mac, mac, ETHER_ADDR_LEN);
341 struct bgx *bgx = lmac->bgx;
346 cmr_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_CMRX_CFG);
348 bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg);
350 port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG);
351 misc_ctl = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL);
368 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64);
369 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0);
377 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64);
378 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0);
386 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 512);
388 bgx_reg_write(bgx, lmac->lmacid,
391 bgx_reg_write(bgx, lmac->lmacid,
397 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL, misc_ctl);
398 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG, port_cfg);
400 port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG);
404 bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg);
456 struct bgx *bgx;
458 bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
459 if (bgx == NULL)
464 return (bgx_reg_read(bgx, lmac, BGX_CMRX_RX_STAT0 + (idx * 8)));
470 struct bgx *bgx;
472 bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
473 if (bgx == NULL)
476 return (bgx_reg_read(bgx, lmac, BGX_CMRX_TX_STAT0 + (idx * 8)));
480 bgx_flush_dmac_addrs(struct bgx *bgx, int lmac)
484 while (bgx->lmac[lmac].dmac > 0) {
485 offset = ((bgx->lmac[lmac].dmac - 1) * sizeof(uint64_t)) +
487 bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + offset, 0);
488 bgx->lmac[lmac].dmac--;
496 struct bgx *bgx;
503 bgx = bgx_vnic[bgx_idx];
505 if (bgx == NULL) {
512 if (bgx->lmac[lmac].dmac == MAX_DMAC_PER_LMAC) {
513 device_printf(bgx->dev,
519 if (bgx->lmac[lmac].dmac == MAX_DMAC_PER_LMAC_TNS_BYPASS_MODE)
520 bgx->lmac[lmac].dmac = 1;
522 offset = (bgx->lmac[lmac].dmac * sizeof(uint64_t)) +
524 bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + offset, dmac);
525 bgx->lmac[lmac].dmac++;
527 bgx_reg_write(bgx, lmac, BGX_CMRX_RX_DMAC_CTL,
537 struct bgx *bgx;
541 bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
542 if (bgx == NULL)
545 lmac = &bgx->lmac[lmac_idx];
547 cfg = bgx_reg_read(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL);
552 bgx_reg_write(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL, cfg);
554 cfg = bgx_reg_read(bgx, lmac_idx, BGX_SPUX_CONTROL1);
559 bgx_reg_write(bgx, lmac_idx, BGX_SPUX_CONTROL1, cfg);
564 bgx_lmac_sgmii_init(struct bgx *bgx, int lmacid)
568 bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_THRESH, 0x30);
570 bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_RXX_JABBER, MAX_FRAME_SIZE);
573 cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND);
575 bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_SGMII_CTL, 0);
578 bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
581 bgx_reg_modify(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_RESET);
582 if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_CTL,
584 device_printf(bgx->dev, "BGX PCS reset not completed\n");
589 cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MRX_CTL);
592 bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, cfg);
594 if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_STATUS,
596 device_printf(bgx->dev, "BGX AN_CPT not completed\n");
604 bgx_lmac_xaui_init(struct bgx *bgx, int lmacid, int lmac_type)
609 bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET);
610 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1,
612 device_printf(bgx->dev, "BGX SPU reset not completed\n");
617 cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
619 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
621 bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER);
623 if (bgx->lmac_type != BGX_MODE_RXAUI) {
624 bgx_reg_modify(bgx, lmacid,
627 bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL,
632 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_INT);
633 bgx_reg_write(bgx, lmacid, BGX_SMUX_RX_INT, cfg);
634 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_INT);
635 bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_INT, cfg);
636 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
637 bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
639 if (bgx->use_training) {
640 bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LP_CUP, 0x00);
641 bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_CUP, 0x00);
642 bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_REP, 0x00);
644 bgx_reg_modify(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL,
649 bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, SMU_TX_APPEND_FCS_D);
652 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_FEC_CONTROL);
654 bgx_reg_write(bgx, lmacid, BGX_SPUX_FEC_CONTROL, cfg);
657 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_CONTROL);
659 bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_CONTROL, cfg);
661 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_ADV);
662 if (bgx->lmac_type == BGX_MODE_10G_KR)
664 else if (bgx->lmac_type == BGX_MODE_40G_KR)
669 bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_ADV, cfg);
671 cfg = bgx_reg_read(bgx, 0, BGX_SPU_DBG_CONTROL);
673 bgx_reg_write(bgx, 0, BGX_SPU_DBG_CONTROL, cfg);
676 bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
678 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_CONTROL1);
680 bgx_reg_write(bgx, lmacid, BGX_SPUX_CONTROL1, cfg);
682 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_CTL);
685 bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_CTL, cfg);
688 bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_THRESH, (0x100 - 1));
690 bgx_reg_modify(bgx, lmacid, BGX_SMUX_RX_JABBER, MAX_FRAME_SIZE);
698 struct bgx *bgx = lmac->bgx;
700 int lmac_type = bgx->lmac_type;
703 bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS);
704 if (bgx->use_training) {
705 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
708 bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
709 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL);
711 bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL, cfg);
717 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1,
719 device_printf(bgx->dev, "BGX SPU reset not completed\n");
725 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BR_STATUS1,
727 device_printf(bgx->dev,
732 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BX_STATUS,
734 device_printf(bgx->dev,
741 bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT);
742 if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
743 device_printf(bgx->dev, "Receive fault, retry training\n");
744 if (bgx->use_training) {
745 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
748 bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
749 cfg = bgx_reg_read(bgx, lmacid,
752 bgx_reg_write(bgx, lmacid,
761 if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_RX_CTL,
763 device_printf(bgx->dev, "SMU RX link not okay\n");
768 if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL,
770 device_printf(bgx->dev, "SMU RX not idle\n");
775 if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL,
777 device_printf(bgx->dev, "SMU TX not idle\n");
781 if ((bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) &
783 device_printf(bgx->dev, "Receive fault\n");
788 bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK);
789 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_STATUS1,
791 device_printf(bgx->dev, "SPU receive link down\n");
795 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL);
797 bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg);
810 bgx_reg_modify(lmac->bgx, lmac->lmacid,
812 bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1,
815 link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1);
818 if (lmac->bgx->lmac_type == BGX_MODE_XLAUI)
837 bgx_lmac_enable(struct bgx *bgx, uint8_t lmacid)
843 lmac = &bgx->lmac[lmacid];
844 lmac->bgx = bgx;
846 if (bgx->lmac_type == BGX_MODE_SGMII) {
848 if (bgx_lmac_sgmii_init(bgx, lmacid) != 0)
852 if (bgx_lmac_xaui_init(bgx, lmacid, bgx->lmac_type))
857 cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND);
859 bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND, cfg);
860 bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_MIN_PKT, 60 - 1);
862 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_APPEND);
864 bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, cfg);
865 bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_MIN_PKT, 60 + 4);
869 bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG,
873 bgx_reg_write(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL, 0x03);
876 bgx_add_dmac_addr(dmac_bcast, 0, bgx->bgx_id, lmacid);
878 if ((bgx->lmac_type != BGX_MODE_XFI) &&
879 (bgx->lmac_type != BGX_MODE_XAUI) &&
880 (bgx->lmac_type != BGX_MODE_XLAUI) &&
881 (bgx->lmac_type != BGX_MODE_40G_KR) &&
882 (bgx->lmac_type != BGX_MODE_10G_KR)) {
884 device_printf(bgx->dev,
891 device_printf(bgx->dev,
912 bgx_lmac_disable(struct bgx *bgx, uint8_t lmacid)
917 lmac = &bgx->lmac[lmacid];
923 cmrx_cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
925 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cmrx_cfg);
926 bgx_flush_dmac_addrs(bgx, lmacid);
928 if ((bgx->lmac_type != BGX_MODE_XFI) &&
929 (bgx->lmac_type != BGX_MODE_XLAUI) &&
930 (bgx->lmac_type != BGX_MODE_40G_KR) &&
931 (bgx->lmac_type != BGX_MODE_10G_KR)) {
933 device_printf(bgx->dev,
939 device_printf(bgx->dev,
948 bgx_set_num_ports(struct bgx *bgx)
952 switch (bgx->qlm_mode) {
954 bgx->lmac_count = 4;
955 bgx->lmac_type = BGX_MODE_SGMII;
956 bgx->lane_to_sds = 0;
959 bgx->lmac_count = 1;
960 bgx->lmac_type = BGX_MODE_XAUI;
961 bgx->lane_to_sds = 0xE4;
964 bgx->lmac_count = 2;
965 bgx->lmac_type = BGX_MODE_RXAUI;
966 bgx->lane_to_sds = 0xE4;
969 bgx->lmac_count = 4;
970 bgx->lmac_type = BGX_MODE_XFI;
971 bgx->lane_to_sds = 0;
974 bgx->lmac_count = 1;
975 bgx->lmac_type = BGX_MODE_XLAUI;
976 bgx->lane_to_sds = 0xE4;
979 bgx->lmac_count = 4;
980 bgx->lmac_type = BGX_MODE_10G_KR;
981 bgx->lane_to_sds = 0;
982 bgx->use_training = 1;
985 bgx->lmac_count = 1;
986 bgx->lmac_type = BGX_MODE_40G_KR;
987 bgx->lane_to_sds = 0xE4;
988 bgx->use_training = 1;
991 bgx->lmac_count = 0;
1000 lmac_count = bgx_reg_read(bgx, 0, BGX_CMR_RX_LMACS) & 0x7;
1002 bgx->lmac_count = lmac_count;
1006 bgx_init_hw(struct bgx *bgx)
1010 bgx_set_num_ports(bgx);
1012 bgx_reg_modify(bgx, 0, BGX_CMR_GLOBAL_CFG, CMR_GLOBAL_CFG_FCS_STRIP);
1013 if (bgx_reg_read(bgx, 0, BGX_CMR_BIST_STATUS))
1014 device_printf(bgx->dev, "BGX%d BIST failed\n", bgx->bgx_id);
1017 for (i = 0; i < bgx->lmac_count; i++) {
1018 if (bgx->lmac_type == BGX_MODE_RXAUI) {
1020 bgx->lane_to_sds = 0x0e;
1022 bgx->lane_to_sds = 0x04;
1023 bgx_reg_write(bgx, i, BGX_CMRX_CFG,
1024 (bgx->lmac_type << 8) | bgx->lane_to_sds);
1027 bgx_reg_write(bgx, i, BGX_CMRX_CFG,
1028 (bgx->lmac_type << 8) | (bgx->lane_to_sds + i));
1029 bgx->lmac[i].lmacid_bd = lmac_count;
1033 bgx_reg_write(bgx, 0, BGX_CMR_TX_LMACS, bgx->lmac_count);
1034 bgx_reg_write(bgx, 0, BGX_CMR_RX_LMACS, bgx->lmac_count);
1037 for (i = 0; i < bgx->lmac_count; i++) {
1038 bgx_reg_modify(bgx, 0, BGX_CMR_CHAN_MSK_AND,
1045 bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + (i * 8), 0x00);
1049 bgx_reg_write(bgx, 0, BGX_CMR_RX_STREERING + (i * 8), 0x00);
1053 bgx_get_qlm_mode(struct bgx *bgx)
1055 device_t dev = bgx->dev;;
1062 lmac_type = bgx_reg_read(bgx, 0, BGX_CMRX_CFG);
1065 train_en = bgx_reg_read(bgx, 0, BGX_SPUX_BR_PMD_CRTL) &
1070 bgx->qlm_mode = QLM_MODE_SGMII;
1073 bgx->bgx_id);
1077 bgx->qlm_mode = QLM_MODE_XAUI_1X4;
1080 bgx->bgx_id);
1084 bgx->qlm_mode = QLM_MODE_RXAUI_2X2;
1087 bgx->bgx_id);
1092 bgx->qlm_mode = QLM_MODE_XFI_4X1;
1095 bgx->bgx_id);
1098 bgx->qlm_mode = QLM_MODE_10G_KR_4X1;
1101 bgx->bgx_id);
1107 bgx->qlm_mode = QLM_MODE_XLAUI_1X4;
1110 bgx->bgx_id);
1113 bgx->qlm_mode = QLM_MODE_40G_KR4_1X4;
1116 bgx->bgx_id);
1121 bgx->qlm_mode = QLM_MODE_SGMII;
1124 bgx->bgx_id);
1130 bgx_init_phy(struct bgx *bgx)
1137 err = bgx_fdt_init_phy(bgx);