Lines Matching refs:mcs

13 #include "mcs.h"
27 void mcs_get_tx_secy_stats(struct mcs *mcs, struct mcs_secy_stats *stats, int id)
32 stats->ctl_pkt_bcast_cnt = mcs_reg_read(mcs, reg);
35 stats->ctl_pkt_mcast_cnt = mcs_reg_read(mcs, reg);
38 stats->ctl_octet_cnt = mcs_reg_read(mcs, reg);
41 stats->ctl_pkt_ucast_cnt = mcs_reg_read(mcs, reg);
44 stats->unctl_pkt_bcast_cnt = mcs_reg_read(mcs, reg);
47 stats->unctl_pkt_mcast_cnt = mcs_reg_read(mcs, reg);
50 stats->unctl_octet_cnt = mcs_reg_read(mcs, reg);
53 stats->unctl_pkt_ucast_cnt = mcs_reg_read(mcs, reg);
56 stats->octet_encrypted_cnt = mcs_reg_read(mcs, reg);
59 stats->octet_protected_cnt = mcs_reg_read(mcs, reg);
62 stats->pkt_noactivesa_cnt = mcs_reg_read(mcs, reg);
65 stats->pkt_toolong_cnt = mcs_reg_read(mcs, reg);
68 stats->pkt_untagged_cnt = mcs_reg_read(mcs, reg);
71 void mcs_get_rx_secy_stats(struct mcs *mcs, struct mcs_secy_stats *stats, int id)
76 stats->ctl_pkt_bcast_cnt = mcs_reg_read(mcs, reg);
79 stats->ctl_pkt_mcast_cnt = mcs_reg_read(mcs, reg);
82 stats->ctl_octet_cnt = mcs_reg_read(mcs, reg);
85 stats->ctl_pkt_ucast_cnt = mcs_reg_read(mcs, reg);
88 stats->unctl_pkt_bcast_cnt = mcs_reg_read(mcs, reg);
91 stats->unctl_pkt_mcast_cnt = mcs_reg_read(mcs, reg);
94 stats->unctl_octet_cnt = mcs_reg_read(mcs, reg);
97 stats->unctl_pkt_ucast_cnt = mcs_reg_read(mcs, reg);
100 stats->octet_decrypted_cnt = mcs_reg_read(mcs, reg);
103 stats->octet_validated_cnt = mcs_reg_read(mcs, reg);
106 stats->pkt_port_disabled_cnt = mcs_reg_read(mcs, reg);
109 stats->pkt_badtag_cnt = mcs_reg_read(mcs, reg);
112 stats->pkt_nosa_cnt = mcs_reg_read(mcs, reg);
115 stats->pkt_nosaerror_cnt = mcs_reg_read(mcs, reg);
118 stats->pkt_tagged_ctl_cnt = mcs_reg_read(mcs, reg);
121 stats->pkt_untaged_cnt = mcs_reg_read(mcs, reg);
124 stats->pkt_ctl_cnt = mcs_reg_read(mcs, reg);
126 if (mcs->hw->mcs_blks > 1) {
128 stats->pkt_notag_cnt = mcs_reg_read(mcs, reg);
132 void mcs_get_flowid_stats(struct mcs *mcs, struct mcs_flowid_stats *stats,
142 stats->tcam_hit_cnt = mcs_reg_read(mcs, reg);
145 void mcs_get_port_stats(struct mcs *mcs, struct mcs_port_stats *stats,
152 stats->tcam_miss_cnt = mcs_reg_read(mcs, reg);
155 stats->parser_err_cnt = mcs_reg_read(mcs, reg);
156 if (mcs->hw->mcs_blks > 1) {
158 stats->preempt_err_cnt = mcs_reg_read(mcs, reg);
162 stats->tcam_miss_cnt = mcs_reg_read(mcs, reg);
165 stats->parser_err_cnt = mcs_reg_read(mcs, reg);
168 stats->sectag_insert_err_cnt = mcs_reg_read(mcs, reg);
172 void mcs_get_sa_stats(struct mcs *mcs, struct mcs_sa_stats *stats, int id, int dir)
178 stats->pkt_invalid_cnt = mcs_reg_read(mcs, reg);
181 stats->pkt_nosaerror_cnt = mcs_reg_read(mcs, reg);
184 stats->pkt_notvalid_cnt = mcs_reg_read(mcs, reg);
187 stats->pkt_ok_cnt = mcs_reg_read(mcs, reg);
190 stats->pkt_nosa_cnt = mcs_reg_read(mcs, reg);
193 stats->pkt_encrypt_cnt = mcs_reg_read(mcs, reg);
196 stats->pkt_protected_cnt = mcs_reg_read(mcs, reg);
200 void mcs_get_sc_stats(struct mcs *mcs, struct mcs_sc_stats *stats,
207 stats->hit_cnt = mcs_reg_read(mcs, reg);
210 stats->pkt_invalid_cnt = mcs_reg_read(mcs, reg);
213 stats->pkt_late_cnt = mcs_reg_read(mcs, reg);
216 stats->pkt_notvalid_cnt = mcs_reg_read(mcs, reg);
219 stats->pkt_unchecked_cnt = mcs_reg_read(mcs, reg);
221 if (mcs->hw->mcs_blks > 1) {
223 stats->pkt_delay_cnt = mcs_reg_read(mcs, reg);
226 stats->pkt_ok_cnt = mcs_reg_read(mcs, reg);
228 if (mcs->hw->mcs_blks == 1) {
230 stats->octet_decrypt_cnt = mcs_reg_read(mcs, reg);
233 stats->octet_validate_cnt = mcs_reg_read(mcs, reg);
237 stats->pkt_encrypt_cnt = mcs_reg_read(mcs, reg);
240 stats->pkt_protected_cnt = mcs_reg_read(mcs, reg);
242 if (mcs->hw->mcs_blks == 1) {
244 stats->octet_encrypt_cnt = mcs_reg_read(mcs, reg);
247 stats->octet_protected_cnt = mcs_reg_read(mcs, reg);
252 void mcs_clear_stats(struct mcs *mcs, u8 type, u8 id, int dir)
266 mcs_reg_write(mcs, reg, BIT_ULL(0));
270 mcs_get_flowid_stats(mcs, &flowid_st, id, dir);
274 mcs_get_rx_secy_stats(mcs, &secy_st, id);
276 mcs_get_tx_secy_stats(mcs, &secy_st, id);
279 mcs_get_sc_stats(mcs, &sc_st, id, dir);
282 mcs_get_sa_stats(mcs, &sa_st, id, dir);
285 mcs_get_port_stats(mcs, &port_st, id, dir);
289 mcs_reg_write(mcs, reg, 0x0);
292 int mcs_clear_all_stats(struct mcs *mcs, u16 pcifunc, int dir)
298 map = &mcs->rx;
300 map = &mcs->tx;
306 mcs_clear_stats(mcs, MCS_FLOWID_STATS, id, dir);
313 mcs_clear_stats(mcs, MCS_SECY_STATS, id, dir);
320 mcs_clear_stats(mcs, MCS_SC_STATS, id, dir);
327 mcs_clear_stats(mcs, MCS_SA_STATS, id, dir);
332 void mcs_pn_table_write(struct mcs *mcs, u8 pn_id, u64 next_pn, u8 dir)
340 mcs_reg_write(mcs, reg, next_pn);
343 void cn10kb_mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *map)
355 mcs_reg_write(mcs, reg, val);
359 mcs_reg_write(mcs, reg, val);
362 void cn10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map)
369 mcs_reg_write(mcs, reg, val);
372 void mcs_sa_plcy_write(struct mcs *mcs, u64 *plcy, int sa_id, int dir)
380 mcs_reg_write(mcs, reg, plcy[reg_id]);
385 mcs_reg_write(mcs, reg, plcy[reg_id]);
390 void mcs_ena_dis_sc_cam_entry(struct mcs *mcs, int sc_id, int ena)
399 val = mcs_reg_read(mcs, reg) | BIT_ULL(sc_id);
401 val = mcs_reg_read(mcs, reg) & ~BIT_ULL(sc_id);
403 mcs_reg_write(mcs, reg, val);
406 void mcs_rx_sc_cam_write(struct mcs *mcs, u64 sci, u64 secy, int sc_id)
408 mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_SC_CAMX(0, sc_id), sci);
409 mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_SC_CAMX(1, sc_id), secy);
411 mcs_ena_dis_sc_cam_entry(mcs, sc_id, true);
414 void mcs_secy_plcy_write(struct mcs *mcs, u64 plcy, int secy_id, int dir)
423 mcs_reg_write(mcs, reg, plcy);
425 if (mcs->hw->mcs_blks == 1 && dir == MCS_RX)
426 mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_SECY_PLCY_MEM_1X(secy_id), 0x0ull);
429 void cn10kb_mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir)
441 mcs_reg_write(mcs, reg, val);
444 void mcs_ena_dis_flowid_entry(struct mcs *mcs, int flow_id, int dir, int ena)
460 val = mcs_reg_read(mcs, reg) | BIT_ULL(flow_id);
462 val = mcs_reg_read(mcs, reg) & ~BIT_ULL(flow_id);
464 mcs_reg_write(mcs, reg, val);
467 void mcs_flowid_entry_write(struct mcs *mcs, u64 *data, u64 *mask, int flow_id, int dir)
475 mcs_reg_write(mcs, reg, data[reg_id]);
479 mcs_reg_write(mcs, reg, mask[reg_id]);
484 mcs_reg_write(mcs, reg, data[reg_id]);
488 mcs_reg_write(mcs, reg, mask[reg_id]);
493 int mcs_install_flowid_bypass_entry(struct mcs *mcs)
500 flow_id = mcs->hw->tcam_entries - MCS_RSRC_RSVD_CNT;
501 __set_bit(flow_id, mcs->rx.flow_ids.bmap);
502 __set_bit(flow_id, mcs->tx.flow_ids.bmap);
506 mcs_reg_write(mcs, reg, GENMASK_ULL(63, 0));
510 mcs_reg_write(mcs, reg, GENMASK_ULL(63, 0));
513 secy_id = mcs->hw->secy_entries - MCS_RSRC_RSVD_CNT;
514 __set_bit(secy_id, mcs->rx.secy.bmap);
515 __set_bit(secy_id, mcs->tx.secy.bmap);
519 if (mcs->hw->mcs_blks > 1)
521 mcs_secy_plcy_write(mcs, plcy, secy_id, MCS_RX);
525 if (mcs->hw->mcs_blks > 1)
527 mcs_secy_plcy_write(mcs, plcy, secy_id, MCS_TX);
533 mcs->mcs_ops->mcs_flowid_secy_map(mcs, &map, MCS_RX);
535 mcs->mcs_ops->mcs_flowid_secy_map(mcs, &map, MCS_TX);
538 mcs_ena_dis_flowid_entry(mcs, flow_id, MCS_RX, true);
539 mcs_ena_dis_flowid_entry(mcs, flow_id, MCS_TX, true);
544 void mcs_clear_secy_plcy(struct mcs *mcs, int secy_id, int dir)
550 map = &mcs->rx;
552 map = &mcs->tx;
555 mcs_secy_plcy_write(mcs, 0, secy_id, dir);
561 mcs_ena_dis_flowid_entry(mcs, flow_id, dir, false);
582 int mcs_free_ctrlpktrule(struct mcs *mcs, struct mcs_free_ctrl_pkt_rule_req *req)
590 map = (req->dir == MCS_RX) ? &mcs->rx : &mcs->tx;
597 dis = mcs_reg_read(mcs, reg);
599 mcs_reg_write(mcs, reg, dis);
605 dis = mcs_reg_read(mcs, reg);
607 mcs_reg_write(mcs, reg, dis);
612 int mcs_ctrlpktrule_write(struct mcs *mcs, struct mcs_ctrl_pkt_rule_write_req *req)
627 mcs_reg_write(mcs, reg, req->data0);
637 mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
646 mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
648 mcs_reg_write(mcs, reg, req->data1 & GENMASK_ULL(47, 0));
651 mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
653 mcs_reg_write(mcs, reg, req->data1 & GENMASK_ULL(47, 0));
665 mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
667 mcs_reg_write(mcs, reg, req->data1 & GENMASK_ULL(47, 0));
669 mcs_reg_write(mcs, reg, req->data2);
672 mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
674 mcs_reg_write(mcs, reg, req->data1 & GENMASK_ULL(47, 0));
676 mcs_reg_write(mcs, reg, req->data2);
687 mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
693 enb = mcs_reg_read(mcs, reg);
695 mcs_reg_write(mcs, reg, enb);
712 int mcs_free_all_rsrc(struct mcs *mcs, int dir, u16 pcifunc)
718 map = &mcs->rx;
720 map = &mcs->tx;
728 mcs_ena_dis_flowid_entry(mcs, id, dir, false);
737 mcs_clear_secy_plcy(mcs, id, dir);
748 mcs_ena_dis_sc_cam_entry(mcs, id, false);
771 int mcs_alloc_all_rsrc(struct mcs *mcs, u8 *flow_id, u8 *secy_id,
778 map = &mcs->rx;
780 map = &mcs->tx;
810 static void cn10kb_mcs_tx_pn_wrapped_handler(struct mcs *mcs)
817 sc_bmap = &mcs->tx.sc;
819 event.mcs_id = mcs->mcs_id;
822 for_each_set_bit(sc, sc_bmap->bmap, mcs->hw->sc_entries) {
823 val = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(sc));
825 if (mcs->tx_sa_active[sc])
832 event.pcifunc = mcs->tx.sa2pf_map[event.sa_id];
833 mcs_add_intr_wq_entry(mcs, &event);
837 static void cn10kb_mcs_tx_pn_thresh_reached_handler(struct mcs *mcs)
844 sc_bmap = &mcs->tx.sc;
846 event.mcs_id = mcs->mcs_id;
854 for_each_set_bit(sc, sc_bmap->bmap, mcs->hw->sc_entries) {
855 val = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(sc));
863 if (status == mcs->tx_sa_active[sc])
871 event.pcifunc = mcs->tx.sa2pf_map[event.sa_id];
872 mcs_add_intr_wq_entry(mcs, &event);
876 static void mcs_rx_pn_thresh_reached_handler(struct mcs *mcs)
883 for (reg = 0; reg < (mcs->hw->sa_entries / 64); reg++) {
887 intr = mcs_reg_read(mcs, MCSX_CPM_RX_SLAVE_PN_THRESH_REACHEDX(reg));
892 event.mcs_id = mcs->mcs_id;
895 event.pcifunc = mcs->rx.sa2pf_map[event.sa_id];
896 mcs_add_intr_wq_entry(mcs, &event);
901 static void mcs_rx_misc_intr_handler(struct mcs *mcs, u64 intr)
905 event.mcs_id = mcs->mcs_id;
906 event.pcifunc = mcs->pf_map[0];
921 mcs_add_intr_wq_entry(mcs, &event);
924 static void mcs_tx_misc_intr_handler(struct mcs *mcs, u64 intr)
931 event.mcs_id = mcs->mcs_id;
932 event.pcifunc = mcs->pf_map[0];
936 mcs_add_intr_wq_entry(mcs, &event);
939 void cn10kb_mcs_bbe_intr_handler(struct mcs *mcs, u64 intr,
954 val = mcs_reg_read(mcs, reg);
957 for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++) {
960 dev_warn(mcs->dev, "BEE:Policy or data overflow occurred on lmac:%d\n", lmac);
964 void cn10kb_mcs_pab_intr_handler(struct mcs *mcs, u64 intr,
972 for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++) {
974 dev_warn(mcs->dev, "PAB: overflow occurred on lmac:%d\n", lmac);
980 struct mcs *mcs = (struct mcs *)mcs_irq;
984 mcs_reg_write(mcs, MCSX_IP_INT_ENA_W1C, BIT_ULL(0));
987 intr = mcs_reg_read(mcs, MCSX_TOP_SLAVE_INT_SUM);
992 cpm_intr = mcs_reg_read(mcs, MCSX_CPM_RX_SLAVE_RX_INT);
995 mcs_rx_pn_thresh_reached_handler(mcs);
998 mcs_rx_misc_intr_handler(mcs, cpm_intr);
1001 mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_RX_INT, cpm_intr);
1006 cpm_intr = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_TX_INT);
1009 if (mcs->hw->mcs_blks > 1)
1010 cnf10kb_mcs_tx_pn_thresh_reached_handler(mcs);
1012 cn10kb_mcs_tx_pn_thresh_reached_handler(mcs);
1016 mcs_tx_misc_intr_handler(mcs, cpm_intr);
1019 if (mcs->hw->mcs_blks > 1)
1020 cnf10kb_mcs_tx_pn_wrapped_handler(mcs);
1022 cn10kb_mcs_tx_pn_wrapped_handler(mcs);
1025 mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_TX_INT, cpm_intr);
1030 bbe_intr = mcs_reg_read(mcs, MCSX_BBE_RX_SLAVE_BBE_INT);
1031 mcs->mcs_ops->mcs_bbe_intr_handler(mcs, bbe_intr, MCS_RX);
1034 mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_BBE_INT_INTR_RW, 0);
1035 mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_BBE_INT, bbe_intr);
1040 bbe_intr = mcs_reg_read(mcs, MCSX_BBE_TX_SLAVE_BBE_INT);
1041 mcs->mcs_ops->mcs_bbe_intr_handler(mcs, bbe_intr, MCS_TX);
1044 mcs_reg_write(mcs, MCSX_BBE_TX_SLAVE_BBE_INT_INTR_RW, 0);
1045 mcs_reg_write(mcs, MCSX_BBE_TX_SLAVE_BBE_INT, bbe_intr);
1050 pab_intr = mcs_reg_read(mcs, MCSX_PAB_RX_SLAVE_PAB_INT);
1051 mcs->mcs_ops->mcs_pab_intr_handler(mcs, pab_intr, MCS_RX);
1054 mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PAB_INT_INTR_RW, 0);
1055 mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PAB_INT, pab_intr);
1060 pab_intr = mcs_reg_read(mcs, MCSX_PAB_TX_SLAVE_PAB_INT);
1061 mcs->mcs_ops->mcs_pab_intr_handler(mcs, pab_intr, MCS_TX);
1064 mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT_INTR_RW, 0);
1065 mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT, pab_intr);
1069 mcs_reg_write(mcs, MCSX_IP_INT, BIT_ULL(0));
1070 mcs_reg_write(mcs, MCSX_IP_INT_ENA_W1S, BIT_ULL(0));
1075 static void *alloc_mem(struct mcs *mcs, int n)
1077 return devm_kcalloc(mcs->dev, n, sizeof(u16), GFP_KERNEL);
1080 static int mcs_alloc_struct_mem(struct mcs *mcs, struct mcs_rsrc_map *res)
1082 struct hwinfo *hw = mcs->hw;
1085 res->flowid2pf_map = alloc_mem(mcs, hw->tcam_entries);
1089 res->secy2pf_map = alloc_mem(mcs, hw->secy_entries);
1093 res->sc2pf_map = alloc_mem(mcs, hw->sc_entries);
1097 res->sa2pf_map = alloc_mem(mcs, hw->sa_entries);
1101 res->flowid2secy_map = alloc_mem(mcs, hw->tcam_entries);
1105 res->ctrlpktrule2pf_map = alloc_mem(mcs, MCS_MAX_CTRLPKT_RULES);
1137 static int mcs_register_interrupts(struct mcs *mcs)
1141 mcs->num_vec = pci_msix_vec_count(mcs->pdev);
1143 ret = pci_alloc_irq_vectors(mcs->pdev, mcs->num_vec,
1144 mcs->num_vec, PCI_IRQ_MSIX);
1146 dev_err(mcs->dev, "MCS Request for %d msix vector failed err:%d\n",
1147 mcs->num_vec, ret);
1151 ret = request_irq(pci_irq_vector(mcs->pdev, mcs->hw->ip_vec),
1152 mcs_ip_intr_handler, 0, "MCS_IP", mcs);
1154 dev_err(mcs->dev, "MCS IP irq registration failed\n");
1159 mcs_reg_write(mcs, MCSX_IP_INT_ENA_W1S, BIT_ULL(0));
1162 mcs_reg_write(mcs, MCSX_TOP_SLAVE_INT_SUM_ENB,
1167 mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_TX_INT_ENB, 0x7ULL);
1168 mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_RX_INT_ENB, 0x7FULL);
1170 mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_BBE_INT_ENB, 0xFFULL);
1171 mcs_reg_write(mcs, MCSX_BBE_TX_SLAVE_BBE_INT_ENB, 0xFFULL);
1173 mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PAB_INT_ENB, 0xFFFFFULL);
1174 mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT_ENB, 0xFFFFFULL);
1176 mcs->tx_sa_active = alloc_mem(mcs, mcs->hw->sc_entries);
1177 if (!mcs->tx_sa_active) {
1185 free_irq(pci_irq_vector(mcs->pdev, mcs->hw->ip_vec), mcs);
1187 pci_free_irq_vectors(mcs->pdev);
1188 mcs->num_vec = 0;
1194 struct mcs *mcs;
1201 list_for_each_entry(mcs, &mcs_list, mcs_list)
1202 if (mcs->mcs_id > idmax)
1203 idmax = mcs->mcs_id;
1211 struct mcs *mcs_get_pdata(int mcs_id)
1213 struct mcs *mcs_dev;
1224 struct mcs *mcs_dev;
1233 void mcs_set_port_cfg(struct mcs *mcs, struct mcs_port_cfg_set_req *req)
1237 mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PORT_CFGX(req->port_id),
1242 if (mcs->hw->mcs_blks > 1) {
1246 mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_FIFO_SKID_CFGX(req->port_id), val);
1247 mcs_reg_write(mcs, MCSX_PEX_TX_SLAVE_CUSTOM_TAG_REL_MODE_SEL(req->port_id),
1249 val = mcs_reg_read(mcs, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION);
1256 mcs_reg_write(mcs, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION, val);
1258 val = mcs_reg_read(mcs, MCSX_PEX_TX_SLAVE_PORT_CONFIG(req->port_id));
1260 mcs_reg_write(mcs, MCSX_PEX_TX_SLAVE_PORT_CONFIG(req->port_id), val);
1264 void mcs_get_port_cfg(struct mcs *mcs, struct mcs_port_cfg_get_req *req,
1269 rsp->port_mode = mcs_reg_read(mcs, MCSX_PAB_RX_SLAVE_PORT_CFGX(req->port_id)) &
1272 if (mcs->hw->mcs_blks > 1) {
1274 rsp->fifo_skid = mcs_reg_read(mcs, reg) & MCS_PORT_FIFO_SKID_MASK;
1276 rsp->cstm_tag_rel_mode_sel = mcs_reg_read(mcs, reg) & 0x3;
1277 if (mcs_reg_read(mcs, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION) & BIT_ULL(req->port_id))
1281 rsp->cstm_tag_rel_mode_sel = mcs_reg_read(mcs, reg) >> 2;
1288 void mcs_get_custom_tag_cfg(struct mcs *mcs, struct mcs_custom_tag_cfg_get_req *req,
1295 if (mcs->hw->mcs_blks > 1)
1302 val = mcs_reg_read(mcs, reg);
1303 if (mcs->hw->mcs_blks > 1) {
1308 rsp->cstm_etype_en = mcs_reg_read(mcs, reg) & 0xFF;
1320 void mcs_reset_port(struct mcs *mcs, u8 port_id, u8 reset)
1324 mcs_reg_write(mcs, reg, reset & 0x1);
1328 void mcs_set_lmac_mode(struct mcs *mcs, int lmac_id, u8 mode)
1334 mcs_reg_write(mcs, reg, (u64)mode);
1336 mcs_reg_write(mcs, reg, (u64)mode);
1339 void mcs_pn_threshold_set(struct mcs *mcs, struct mcs_set_pn_threshold *pn)
1348 mcs_reg_write(mcs, reg, pn->threshold);
1351 void cn10kb_mcs_parser_cfg(struct mcs *mcs)
1359 mcs_reg_write(mcs, reg, val);
1363 mcs_reg_write(mcs, reg, val);
1369 mcs_reg_write(mcs, reg, val);
1373 mcs_reg_write(mcs, reg, val);
1376 static void mcs_lmac_init(struct mcs *mcs, int lmac_id)
1382 mcs_reg_write(mcs, reg, 0);
1384 if (mcs->hw->mcs_blks > 1) {
1386 mcs_reg_write(mcs, reg, 0xe000e);
1391 mcs_reg_write(mcs, reg, 0);
1396 struct mcs *mcs;
1400 mcs = mcs_get_pdata(mcs_id);
1401 if (!mcs)
1403 for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++) {
1404 cfg = mcs_reg_read(mcs, MCSX_LINK_LMACX_CFG(lmac));
1408 mcs_reg_write(mcs, MCSX_LINK_LMACX_CFG(lmac), cfg);
1414 static int mcs_x2p_calibration(struct mcs *mcs)
1421 val = mcs_reg_read(mcs, MCSX_MIL_GLOBAL);
1423 mcs_reg_write(mcs, MCSX_MIL_GLOBAL, val);
1426 while (!(mcs_reg_read(mcs, MCSX_MIL_RX_GBL_STATUS) & BIT_ULL(0))) {
1432 dev_err(mcs->dev, "MCS X2P calibration failed..ignoring\n");
1437 val = mcs_reg_read(mcs, MCSX_MIL_RX_GBL_STATUS);
1438 for (i = 0; i < mcs->hw->mcs_x2p_intf; i++) {
1442 dev_err(mcs->dev, "MCS:%d didn't respond to X2P calibration\n", i);
1445 mcs_reg_write(mcs, MCSX_MIL_GLOBAL, mcs_reg_read(mcs, MCSX_MIL_GLOBAL) & ~BIT_ULL(5));
1450 static void mcs_set_external_bypass(struct mcs *mcs, bool bypass)
1455 val = mcs_reg_read(mcs, MCSX_MIL_GLOBAL);
1460 mcs_reg_write(mcs, MCSX_MIL_GLOBAL, val);
1461 mcs->bypass = bypass;
1464 static void mcs_global_cfg(struct mcs *mcs)
1467 mcs_set_external_bypass(mcs, false);
1470 mcs_reg_write(mcs, MCSX_CSE_RX_SLAVE_STATS_CLEAR, 0x1F);
1471 mcs_reg_write(mcs, MCSX_CSE_TX_SLAVE_STATS_CLEAR, 0x1F);
1474 if (mcs->hw->mcs_blks == 1) {
1475 mcs_reg_write(mcs, MCSX_IP_MODE, BIT_ULL(3));
1479 mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_CAL_ENTRY, 0xe4);
1480 mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_CAL_LEN, 4);
1483 void cn10kb_mcs_set_hw_capabilities(struct mcs *mcs)
1485 struct hwinfo *hw = mcs->hw;
1491 hw->lmac_cnt = 20; /* lmacs/ports per mcs block */
1511 struct mcs *mcs;
1513 mcs = devm_kzalloc(dev, sizeof(*mcs), GFP_KERNEL);
1514 if (!mcs)
1517 mcs->hw = devm_kzalloc(dev, sizeof(struct hwinfo), GFP_KERNEL);
1518 if (!mcs->hw)
1534 mcs->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
1535 if (!mcs->reg_base) {
1536 dev_err(dev, "mcs: Cannot map CSR memory space, aborting\n");
1541 pci_set_drvdata(pdev, mcs);
1542 mcs->pdev = pdev;
1543 mcs->dev = &pdev->dev;
1546 mcs->mcs_ops = &cn10kb_mcs_ops;
1548 mcs->mcs_ops = cnf10kb_get_mac_ops();
1551 mcs->mcs_ops->mcs_set_hw_capabilities(mcs);
1553 mcs_global_cfg(mcs);
1556 err = mcs_x2p_calibration(mcs);
1560 mcs->mcs_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24)
1563 /* Set mcs tx side resources */
1564 err = mcs_alloc_struct_mem(mcs, &mcs->tx);
1568 /* Set mcs rx side resources */
1569 err = mcs_alloc_struct_mem(mcs, &mcs->rx);
1574 for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++)
1575 mcs_lmac_init(mcs, lmac);
1578 mcs->mcs_ops->mcs_parser_cfg(mcs);
1580 err = mcs_register_interrupts(mcs);
1584 list_add(&mcs->mcs_list, &mcs_list);
1585 mutex_init(&mcs->stats_lock);
1591 mcs_set_external_bypass(mcs, true);
1601 struct mcs *mcs = pci_get_drvdata(pdev);
1604 mcs_set_external_bypass(mcs, true);
1605 free_irq(pci_irq_vector(pdev, mcs->hw->ip_vec), mcs);