Lines Matching refs:adap

109  *	@adap: the adapter
119 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
124 t4_write_reg(adap, addr_reg, start_idx);
125 *vals++ = t4_read_reg(adap, data_reg);
132 * @adap: the adapter
142 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
147 t4_write_reg(adap, addr_reg, start_idx++);
148 t4_write_reg(adap, data_reg, *vals++);
162 u32 t4_hw_pci_read_cfg4(adapter_t *adap, int reg)
164 u32 req = V_FUNCTION(adap->pf) | V_REGISTER(reg);
167 if (chip_id(adap) <= CHELSIO_T5)
172 if (is_t4(adap))
175 t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, req);
176 val = t4_read_reg(adap, A_PCIE_CFG_SPACE_DATA);
184 t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, 0);
191 * @adap: the adapter
197 static void t4_report_fw_error(struct adapter *adap)
211 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
213 adap->flags &= ~FW_OK;
214 CH_ERR(adap, "firmware reports adapter error: %s (0x%08x)\n",
217 t4_os_dump_devlog(adap);
224 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
228 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
234 static void fw_asrt(struct adapter *adap, struct fw_debug_cmd *asrt)
236 CH_ALERT(adap,
304 * @adap: the adapter
329 int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
350 if (adap->flags & CHK_MBOX_ACCESS)
351 ASSERT_SYNCHRONIZED_OP(adap);
356 if (adap->flags & IS_VF) {
357 if (is_t6(adap))
376 ctl = t4_read_reg(adap, ctl_reg);
386 t4_report_fw_error(adap);
399 CH_DUMP_MBOX(adap, mbox, data_reg, "VLD", NULL, true);
407 CH_DUMP_MBOX(adap, mbox, 0, "cmd", cmd_rpl, false);
409 t4_write_reg64(adap, data_reg + i * 8, be64_to_cpu(cmd_rpl[i]));
411 if (adap->flags & IS_VF) {
424 t4_read_reg(adap, data_reg);
427 t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
428 read_tx_state(adap, &tx_state[0]); /* also flushes the write_reg */
439 if (!(adap->flags & IS_VF)) {
440 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
446 check_tx_state(adap, &tx_state[0]);
459 v = t4_read_reg(adap, ctl_reg);
464 t4_write_reg(adap, ctl_reg,
472 get_mbox_rpl(adap, cmd_rpl, MBOX_LEN/8, data_reg);
473 CH_DUMP_MBOX(adap, mbox, 0, "rpl", cmd_rpl, false);
474 t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
478 fw_asrt(adap, (struct fw_debug_cmd *)cmd_rpl);
491 CH_ERR(adap, "command %#x in mbox %d timed out (0x%08x).\n",
493 CH_DUMP_MBOX(adap, mbox, 0, "cmdsent", cmd_rpl, true);
494 CH_DUMP_MBOX(adap, mbox, data_reg, "current", NULL, true);
498 t4_report_fw_error(adap);
501 t4_os_dump_devlog(adap);
504 t4_fatal_err(adap, true);
508 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
511 return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl,
516 static int t4_edc_err_read(struct adapter *adap, int idx)
521 if (is_t4(adap)) {
522 CH_WARN(adap, "%s: T4 NOT supported.\n", __func__);
526 CH_WARN(adap, "%s: idx %d NOT supported.\n", __func__, idx);
533 CH_WARN(adap,
536 t4_read_reg(adap, edc_ecc_err_addr_reg));
537 CH_WARN(adap,
540 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg),
541 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 8),
542 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 16),
543 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 24),
544 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 32),
545 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 40),
546 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 48),
547 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 56),
548 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 64));
555 * @adap: the adapter
565 int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
571 if (is_t4(adap)) {
587 if (t4_read_reg(adap, mc_bist_cmd_reg) & F_START_BIST)
589 t4_write_reg(adap, mc_bist_cmd_addr_reg, addr & ~0x3fU);
590 t4_write_reg(adap, mc_bist_cmd_len_reg, 64);
591 t4_write_reg(adap, mc_bist_data_pattern_reg, 0xc);
592 t4_write_reg(adap, mc_bist_cmd_reg, V_BIST_OPCODE(1) |
594 i = t4_wait_op_done(adap, mc_bist_cmd_reg, F_START_BIST, 0, 10, 1);
601 *data++ = ntohl(t4_read_reg(adap, MC_DATA(i)));
603 *ecc = t4_read_reg64(adap, MC_DATA(16));
610 * @adap: the adapter
620 int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
626 if (is_t4(adap)) {
652 if (t4_read_reg(adap, edc_bist_cmd_reg) & F_START_BIST)
654 t4_write_reg(adap, edc_bist_cmd_addr_reg, addr & ~0x3fU);
655 t4_write_reg(adap, edc_bist_cmd_len_reg, 64);
656 t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc);
657 t4_write_reg(adap, edc_bist_cmd_reg,
659 i = t4_wait_op_done(adap, edc_bist_cmd_reg, F_START_BIST, 0, 10, 1);
666 *data++ = ntohl(t4_read_reg(adap, EDC_DATA(i)));
668 *ecc = t4_read_reg64(adap, EDC_DATA(16));
675 * @adap: the adapter
688 int t4_mem_read(struct adapter *adap, int mtype, u32 addr, u32 len,
716 ret = t4_mc_read(adap, mtype - MEM_MC, pos, data, NULL);
718 ret = t4_edc_read(adap, mtype, pos, data, NULL);
740 u32 t4_read_pcie_cfg4(struct adapter *adap, int reg, int drv_fw_attach)
760 (F_FW_LDST_CMD_LC | V_FW_LDST_CMD_FN(adap->pf));
767 ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
772 CH_WARN(adap, "Firmware failed to return "
781 return t4_hw_pci_read_cfg4(adap, reg);
814 * @adap: the adapter
822 void t4_get_regs(struct adapter *adap, u8 *buf, size_t buf_size)
2650 unsigned int chip_version = chip_id(adap);
2658 if (adap->flags & IS_VF) {
2668 if (adap->flags & IS_VF) {
2678 if (adap->flags & IS_VF) {
2688 CH_ERR(adap,
2708 *bufp++ = t4_read_reg(adap, reg);
3358 int t4_get_exprom_version(struct adapter *adap, u32 *vers)
3368 ret = t4_read_flash(adap, FLASH_EXP_ROM_START,
3546 static int t4_fw_matches_chip(struct adapter *adap,
3553 if ((is_t4(adap) && hdr->chip == FW_HDR_CHIP_T4) ||
3554 (is_t5(adap) && hdr->chip == FW_HDR_CHIP_T5) ||
3555 (is_t6(adap) && hdr->chip == FW_HDR_CHIP_T6))
3558 CH_ERR(adap,
3560 hdr->chip, chip_id(adap));
3566 * @adap: the adapter
3572 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
3580 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
3596 CH_ERR(adap, "FW image has no data\n");
3600 CH_ERR(adap,
3605 CH_ERR(adap,
3610 CH_ERR(adap, "FW image too large, max is %u bytes\n",
3614 if (!t4_fw_matches_chip(adap, hdr))
3621 CH_ERR(adap,
3627 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
3638 ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page, 1);
3646 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, 1);
3651 ret = t4_write_flash(adap,
3656 CH_ERR(adap, "firmware download failed, error %d\n",
3663 * @adap: the adapter
3666 int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op)
3674 V_FW_PARAMS_CMD_PFN(adap->pf) |
3682 return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL);
3685 void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
3692 cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
3694 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
3696 val = t4_read_reg(adap, A_CIM_DEBUGSTS);
3706 t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(req) |
3708 *pif_req++ = t4_read_reg(adap, A_CIM_PO_LA_DEBUGDATA);
3709 *pif_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_DEBUGDATA);
3716 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
3719 void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
3724 cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
3726 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
3731 t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(idx) |
3733 *ma_req++ = t4_read_reg(adap, A_CIM_PO_LA_MADEBUGDATA);
3734 *ma_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_MADEBUGDATA);
3737 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
3740 void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
3747 t4_write_reg(adap, A_ULP_RX_LA_CTL, i);
3748 j = t4_read_reg(adap, A_ULP_RX_LA_WRPTR);
3749 t4_write_reg(adap, A_ULP_RX_LA_RDPTR, j);
3751 *p = t4_read_reg(adap, A_ULP_RX_LA_RDDATA);
3855 int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
3895 if (is_bt(adap->port[adap->chan_map[port]]))
3901 CH_WARN(adap, "rcap 0x%08x, pcap 0x%08x\n", rcap,
3912 if (adap->params.port_caps32) {
3924 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
3929 * @adap: the adapter
3935 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
3947 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3984 t4_show_intr_info(struct adapter *adap, const struct intr_info *ii, u32 cause)
3990 enable = t4_read_reg(adap, ii->enable_reg);
3992 fatal = ii->fatal & t4_read_reg(adap, ii->enable_reg);
3996 CH_ALERT(adap, "%c %s 0x%x = 0x%08x, E 0x%08x, F 0x%08x\n",
4005 CH_ALERT(adap, " %c [0x%08x] %s\n", alert, msgbits,
4010 CH_ALERT(adap, " ? [0x%08x]\n", leftover);
4017 t4_handle_intr(struct adapter *adap, const struct intr_info *ii,
4029 cause = t4_read_reg(adap, ii->cause_reg);
4031 cause &= t4_read_reg(adap, ii->enable_reg);
4033 t4_show_intr_info(adap, ii, cause);
4036 fatal &= t4_read_reg(adap, ii->enable_reg);
4045 rc |= (action->action)(adap, action->arg, verbose);
4049 t4_write_reg(adap, ii->cause_reg, cause);
4050 (void)t4_read_reg(adap, ii->cause_reg);
4058 static bool pcie_intr_handler(struct adapter *adap, int arg, bool verbose)
4177 if (is_t4(adap)) {
4178 fatal |= t4_handle_intr(adap, &sysbus_intr_info, 0, verbose);
4179 fatal |= t4_handle_intr(adap, &pcie_port_intr_info, 0, verbose);
4185 fatal |= t4_handle_intr(adap, &pcie_intr_info, 0, verbose);
4193 static bool tp_intr_handler(struct adapter *adap, int arg, bool verbose)
4210 return (t4_handle_intr(adap, &tp_intr_info, 0, verbose));
4216 static bool sge_intr_handler(struct adapter *adap, int arg, bool verbose)
4347 if (chip_id(adap) <= CHELSIO_T5) {
4354 fatal |= t4_handle_intr(adap, &sge_int1_info, 0, verbose);
4355 fatal |= t4_handle_intr(adap, &sge_int2_info, 0, verbose);
4356 fatal |= t4_handle_intr(adap, &sge_int3_info, 0, verbose);
4357 fatal |= t4_handle_intr(adap, &sge_int4_info, 0, verbose);
4358 if (chip_id(adap) >= CHELSIO_T5)
4359 fatal |= t4_handle_intr(adap, &sge_int5_info, 0, verbose);
4360 if (chip_id(adap) >= CHELSIO_T6)
4361 fatal |= t4_handle_intr(adap, &sge_int6_info, 0, verbose);
4363 v = t4_read_reg(adap, A_SGE_ERROR_STATS);
4365 CH_ERR(adap, "SGE error for QID %u\n", G_ERROR_QID(v));
4367 CH_ERR(adap, "SGE UNCAPTURED_ERROR set (clearing)\n");
4368 t4_write_reg(adap, A_SGE_ERROR_STATS,
4378 static bool cim_intr_handler(struct adapter *adap, int arg, bool verbose)
4494 fw_err = t4_read_reg(adap, A_PCIE_FW);
4496 t4_report_fw_error(adap);
4505 val = t4_read_reg(adap, A_CIM_HOST_INT_CAUSE);
4508 t4_write_reg(adap, A_CIM_HOST_INT_CAUSE, F_TIMER0INT);
4512 fatal |= t4_handle_intr(adap, &cim_host_intr_info, 0, verbose);
4513 fatal |= t4_handle_intr(adap, &cim_host_upacc_intr_info, 0, verbose);
4514 fatal |= t4_handle_intr(adap, &cim_pf_host_intr_info, 0, verbose);
4522 static bool ulprx_intr_handler(struct adapter *adap, int arg, bool verbose)
4555 fatal |= t4_handle_intr(adap, &ulprx_intr_info, 0, verbose);
4556 fatal |= t4_handle_intr(adap, &ulprx_intr2_info, 0, verbose);
4564 static bool ulptx_intr_handler(struct adapter *adap, int arg, bool verbose)
4594 fatal |= t4_handle_intr(adap, &ulptx_intr_info, 0, verbose);
4595 fatal |= t4_handle_intr(adap, &ulptx_intr2_info, 0, verbose);
4600 static bool pmtx_dump_dbg_stats(struct adapter *adap, int arg, bool verbose)
4605 t4_read_indirect(adap, A_PM_TX_DBG_CTRL, A_PM_TX_DBG_DATA, &data[0],
4608 CH_ALERT(adap, " - PM_TX_DBG_STAT%u (0x%x) = 0x%08x\n", i,
4618 static bool pmtx_intr_handler(struct adapter *adap, int arg, bool verbose)
4651 return (t4_handle_intr(adap, &pmtx_intr_info, 0, verbose));
4657 static bool pmrx_intr_handler(struct adapter *adap, int arg, bool verbose)
4691 return (t4_handle_intr(adap, &pmrx_intr_info, 0, verbose));
4697 static bool cplsw_intr_handler(struct adapter *adap, int arg, bool verbose)
4723 return (t4_handle_intr(adap, &cplsw_intr_info, 0, verbose));
4739 static bool le_intr_handler(struct adapter *adap, int arg, bool verbose)
4781 if (chip_id(adap) <= CHELSIO_T5) {
4789 return (t4_handle_intr(adap, &le_intr_info, 0, verbose));
4795 static bool mps_intr_handler(struct adapter *adap, int arg, bool verbose)
4916 fatal |= t4_handle_intr(adap, &mps_rx_perr_intr_info, 0, verbose);
4917 fatal |= t4_handle_intr(adap, &mps_tx_intr_info, 0, verbose);
4918 fatal |= t4_handle_intr(adap, &mps_trc_intr_info, 0, verbose);
4919 fatal |= t4_handle_intr(adap, &mps_stat_sram_intr_info, 0, verbose);
4920 fatal |= t4_handle_intr(adap, &mps_stat_tx_intr_info, 0, verbose);
4921 fatal |= t4_handle_intr(adap, &mps_stat_rx_intr_info, 0, verbose);
4922 fatal |= t4_handle_intr(adap, &mps_cls_intr_info, 0, verbose);
4923 if (chip_id(adap) > CHELSIO_T4) {
4924 fatal |= t4_handle_intr(adap, &mps_stat_sram1_intr_info, 0,
4928 t4_write_reg(adap, A_MPS_INT_CAUSE, is_t4(adap) ? 0 : 0xffffffff);
4929 t4_read_reg(adap, A_MPS_INT_CAUSE); /* flush */
4938 static bool mem_intr_handler(struct adapter *adap, int idx, bool verbose)
4971 if (is_t4(adap)) {
4989 fatal = t4_handle_intr(adap, &ii, 0, verbose);
4991 v = t4_read_reg(adap, count_reg);
4994 CH_ALERT(adap,
5000 t4_edc_err_read(adap, idx);
5001 CH_WARN_RATELIMIT(adap,
5005 t4_write_reg(adap, count_reg, 0xffffffff);
5011 static bool ma_wrap_status(struct adapter *adap, int arg, bool verbose)
5015 v = t4_read_reg(adap, A_MA_INT_WRAP_STATUS);
5016 CH_ALERT(adap,
5019 t4_write_reg(adap, A_MA_INT_WRAP_STATUS, v);
5028 static bool ma_intr_handler(struct adapter *adap, int arg, bool verbose)
5064 fatal |= t4_handle_intr(adap, &ma_intr_info, 0, verbose);
5065 fatal |= t4_handle_intr(adap, &ma_perr_status1, 0, verbose);
5066 if (chip_id(adap) > CHELSIO_T4)
5067 fatal |= t4_handle_intr(adap, &ma_perr_status2, 0, verbose);
5075 static bool smb_intr_handler(struct adapter *adap, int arg, bool verbose)
5093 return (t4_handle_intr(adap, &smb_intr_info, 0, verbose));
5099 static bool ncsi_intr_handler(struct adapter *adap, int arg, bool verbose)
5119 return (t4_handle_intr(adap, &ncsi_intr_info, 0, verbose));
5125 static bool mac_intr_handler(struct adapter *adap, int port, bool verbose)
5136 if (is_t4(adap)) {
5155 fatal |= t4_handle_intr(adap, &ii, 0, verbose);
5157 if (chip_id(adap) >= CHELSIO_T5) {
5166 fatal |= t4_handle_intr(adap, &ii, 0, verbose);
5169 if (chip_id(adap) >= CHELSIO_T6) {
5178 fatal |= t4_handle_intr(adap, &ii, 0, verbose);
5184 static bool plpl_intr_handler(struct adapter *adap, int arg, bool verbose)
5201 return (t4_handle_intr(adap, &plpl_intr_info, 0, verbose));
5206 * @adap: the adapter
5213 int t4_slow_intr_handler(struct adapter *adap, bool verbose)
5295 perr = t4_read_reg(adap, pl_perr_cause.cause_reg);
5297 t4_show_intr_info(adap, &pl_perr_cause, perr);
5299 t4_write_reg(adap, pl_perr_cause.cause_reg, perr);
5301 perr |= t4_read_reg(adap, pl_intr_info.enable_reg);
5303 fatal = t4_handle_intr(adap, &pl_intr_info, perr, verbose);
5305 t4_fatal_err(adap, false);
5325 void t4_intr_enable(struct adapter *adap)
5329 if (chip_id(adap) <= CHELSIO_T5)
5339 t4_set_reg_field(adap, A_SGE_INT_ENABLE3, val, val);
5340 t4_write_reg(adap, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK);
5341 t4_set_reg_field(adap, A_PL_INT_ENABLE, F_SF | F_I2CM, 0);
5342 t4_set_reg_field(adap, A_PL_INT_MAP0, 0, 1 << adap->pf);
5347 * @adap: the adapter whose interrupts should be disabled
5353 void t4_intr_disable(struct adapter *adap)
5356 t4_write_reg(adap, MYPF_REG(A_PL_PF_INT_ENABLE), 0);
5357 t4_set_reg_field(adap, A_PL_INT_MAP0, 1 << adap->pf, 0);
5362 * @adap: the adapter whose interrupts should be cleared
5367 void t4_intr_clear(struct adapter *adap)
5406 const int nchan = adap->chip_params->nchan;
5409 t4_write_reg(adap, cause_reg[i], 0xffffffff);
5411 if (is_t4(adap)) {
5412 t4_write_reg(adap, A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
5414 t4_write_reg(adap, A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
5416 t4_write_reg(adap, A_MC_INT_CAUSE, 0xffffffff);
5418 t4_write_reg(adap, PORT_REG(i, A_XGMAC_PORT_INT_CAUSE),
5422 if (chip_id(adap) >= CHELSIO_T5) {
5423 t4_write_reg(adap, A_MA_PARITY_ERROR_STATUS2, 0xffffffff);
5424 t4_write_reg(adap, A_MPS_STAT_PERR_INT_CAUSE_SRAM1, 0xffffffff);
5425 t4_write_reg(adap, A_SGE_INT_CAUSE5, 0xffffffff);
5426 t4_write_reg(adap, A_MC_P_INT_CAUSE, 0xffffffff);
5427 if (is_t5(adap)) {
5428 t4_write_reg(adap, MC_REG(A_MC_P_INT_CAUSE, 1),
5432 t4_write_reg(adap, T5_PORT_REG(i,
5434 if (chip_id(adap) > CHELSIO_T5) {
5435 t4_write_reg(adap, T5_PORT_REG(i,
5439 t4_write_reg(adap, T5_PORT_REG(i, A_MAC_PORT_INT_CAUSE),
5443 if (chip_id(adap) >= CHELSIO_T6) {
5444 t4_write_reg(adap, A_SGE_INT_CAUSE6, 0xffffffff);
5447 t4_write_reg(adap, A_MPS_INT_CAUSE, is_t4(adap) ? 0 : 0xffffffff);
5448 t4_write_reg(adap, A_PL_PERR_CAUSE, 0xffffffff);
5449 t4_write_reg(adap, A_PL_INT_CAUSE, 0xffffffff);
5450 (void) t4_read_reg(adap, A_PL_INT_CAUSE); /* flush */
5629 static int rd_rss_row(struct adapter *adap, int row, u32 *val)
5631 t4_write_reg(adap, A_TP_RSS_LKP_TABLE, 0xfff00000 | row);
5632 return t4_wait_op_done_val(adap, A_TP_RSS_LKP_TABLE, F_LKPTBLROWVLD, 1,
5660 * @adap: the adapter
5670 static int t4_tp_fw_ldst_rw(struct adapter *adap, int cmd, u32 *vals,
5689 ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c,
5702 * @adap: the adapter
5714 static void t4_tp_indirect_rw(struct adapter *adap, u32 reg_addr, u32 reg_data,
5735 if (t4_use_ldst(adap))
5736 rc = t4_tp_fw_ldst_rw(adap, cmd, buff, nregs, start_index, rw,
5743 t4_read_indirect(adap, reg_addr, reg_data, buff, nregs,
5746 t4_write_indirect(adap, reg_addr, reg_data, buff, nregs,
5753 * @adap: the adapter
5761 void t4_tp_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
5764 t4_tp_indirect_rw(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, buff, nregs,
5770 * @adap: the adapter
5778 void t4_tp_pio_write(struct adapter *adap, const u32 *buff, u32 nregs,
5781 t4_tp_indirect_rw(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
5787 * @adap: the adapter
5795 void t4_tp_tm_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
5798 t4_tp_indirect_rw(adap, A_TP_TM_PIO_ADDR, A_TP_TM_PIO_DATA, buff,
5804 * @adap: the adapter
5812 void t4_tp_mib_read(struct adapter *adap, u32 *buff, u32 nregs, u32 start_index,
5815 t4_tp_indirect_rw(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, buff, nregs,
5821 * @adap: the adapter
5827 void t4_read_rss_key(struct adapter *adap, u32 *key, bool sleep_ok)
5829 t4_tp_pio_read(adap, key, 10, A_TP_RSS_SECRET_KEY0, sleep_ok);
5834 * @adap: the adapter
5843 void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx,
5847 u32 vrt = t4_read_reg(adap, A_TP_RSS_CONFIG_VRT);
5854 if ((chip_id(adap) > CHELSIO_T5) &&
5858 t4_tp_pio_write(adap, key, 10, A_TP_RSS_SECRET_KEY0, sleep_ok);
5862 t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
5866 t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
6039 * @adap: the adapter
6047 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
6057 t4_tp_mib_read(adap, val, ARRAY_SIZE(val),
6065 t4_tp_mib_read(adap, val, ARRAY_SIZE(val),
6079 * @adap: the adapter
6085 void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st,
6088 int nchan = adap->chip_params->nchan;
6090 t4_tp_mib_read(adap, st->mac_in_errs, nchan, A_TP_MIB_MAC_IN_ERR_0,
6093 t4_tp_mib_read(adap, st->hdr_in_errs, nchan, A_TP_MIB_HDR_IN_ERR_0,
6096 t4_tp_mib_read(adap, st->tcp_in_errs, nchan, A_TP_MIB_TCP_IN_ERR_0,
6099 t4_tp_mib_read(adap, st->tnl_cong_drops, nchan,
6102 t4_tp_mib_read(adap, st->ofld_chan_drops, nchan,
6105 t4_tp_mib_read(adap, st->tnl_tx_drops, nchan, A_TP_MIB_TNL_DROP_0,
6108 t4_tp_mib_read(adap, st->ofld_vlan_drops, nchan,
6111 t4_tp_mib_read(adap, st->tcp6_in_errs, nchan,
6114 t4_tp_mib_read(adap, &st->ofld_no_neigh, 2, A_TP_MIB_OFD_ARP_DROP,
6120 * @adap: the adapter
6125 void t4_tp_get_proxy_stats(struct adapter *adap, struct tp_proxy_stats *st,
6128 int nchan = adap->chip_params->nchan;
6130 t4_tp_mib_read(adap, st->proxy, nchan, A_TP_MIB_TNL_LPBK_0, sleep_ok);
6135 * @adap: the adapter
6141 void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st,
6144 int nchan = adap->chip_params->nchan;
6146 t4_tp_mib_read(adap, st->req, nchan, A_TP_MIB_CPL_IN_REQ_0, sleep_ok);
6148 t4_tp_mib_read(adap, st->rsp, nchan, A_TP_MIB_CPL_OUT_RSP_0, sleep_ok);
6153 * @adap: the adapter
6158 void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st,
6161 t4_tp_mib_read(adap, &st->rqe_dfr_pkt, 2, A_TP_MIB_RQE_DFR_PKT,
6167 * @adap: the adapter
6174 void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
6179 t4_tp_mib_read(adap, &st->frames_ddp, 1, A_TP_MIB_FCOE_DDP_0 + idx,
6182 t4_tp_mib_read(adap, &st->frames_drop, 1,
6185 t4_tp_mib_read(adap, val, 2, A_TP_MIB_FCOE_BYTE_0_HI + 2 * idx,
6193 * @adap: the adapter
6199 void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st,
6204 t4_tp_mib_read(adap, val, 4, A_TP_MIB_USM_PKTS, sleep_ok);
6213 * @adap: the adapter
6219 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
6225 t4_write_reg(adap, A_TP_MTU_TABLE,
6227 v = t4_read_reg(adap, A_TP_MTU_TABLE);
6236 * @adap: the adapter
6242 void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
6248 t4_write_reg(adap, A_TP_CCTRL_TABLE,
6250 incr[mtu][w] = (u16)t4_read_reg(adap,
6257 * @adap: the adapter
6264 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
6267 t4_write_reg(adap, A_TP_PIO_ADDR, addr);
6268 val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask;
6269 t4_write_reg(adap, A_TP_PIO_DATA, val);
6321 * @adap: the adapter
6331 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
6348 t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) |
6357 t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
6365 * @adap: the adapter
6372 int t4_set_pace_tbl(struct adapter *adap, const unsigned int *pace_vals,
6376 unsigned int tick_ns = dack_ticks_to_usec(adap, 1000);
6390 t4_write_reg(adap, A_TP_PACE_TABLE, (start << 16) | vals[i]);
6396 * @adap: the adapter
6402 int t4_set_sched_bps(struct adapter *adap, int sched, unsigned int kbps)
6405 unsigned int clk = adap->params.vpd.cclk * 1000;
6427 t4_write_reg(adap, A_TP_TM_PIO_ADDR,
6429 v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
6434 t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
6440 * @adap: the adapter
6446 int t4_set_sched_ipg(struct adapter *adap, int sched, unsigned int ipg)
6451 ipg *= core_ticks_per_usec(adap);
6456 t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
6457 v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
6462 t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
6463 t4_read_reg(adap, A_TP_TM_PIO_DATA);
6477 static u64 chan_rate(struct adapter *adap, unsigned int bytes256)
6479 u64 v = (u64)bytes256 * adap->params.vpd.cclk;
6486 * @adap: the adapter
6493 void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
6497 v = t4_read_reg(adap, A_TP_TX_TRATE);
6498 nic_rate[0] = chan_rate(adap, G_TNLRATE0(v));
6499 nic_rate[1] = chan_rate(adap, G_TNLRATE1(v));
6500 if (adap->chip_params->nchan > 2) {
6501 nic_rate[2] = chan_rate(adap, G_TNLRATE2(v));
6502 nic_rate[3] = chan_rate(adap, G_TNLRATE3(v));
6505 v = t4_read_reg(adap, A_TP_TX_ORATE);
6506 ofld_rate[0] = chan_rate(adap, G_OFDRATE0(v));
6507 ofld_rate[1] = chan_rate(adap, G_OFDRATE1(v));
6508 if (adap->chip_params->nchan > 2) {
6509 ofld_rate[2] = chan_rate(adap, G_OFDRATE2(v));
6510 ofld_rate[3] = chan_rate(adap, G_OFDRATE3(v));
6516 * @adap: the adapter
6525 int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
6531 u32 en = is_t4(adap) ? F_TFEN : F_T5_TFEN;
6537 t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en,
6550 cfg = t4_read_reg(adap, A_MPS_TRC_CFG);
6570 if (tp->port > (is_t4(adap) ? 11 : 19) || tp->invert > 1 ||
6576 t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en, 0);
6583 t4_write_reg(adap, data_reg, tp->data[i]);
6584 t4_write_reg(adap, mask_reg, ~tp->mask[i]);
6586 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst,
6589 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst,
6591 (is_t4(adap) ?
6600 * @adap: the adapter
6607 void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
6614 ctla = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst);
6615 ctlb = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst);
6617 if (is_t4(adap)) {
6636 tp->mask[i] = ~t4_read_reg(adap, mask_reg);
6637 tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
6643 * @adap: the adapter
6649 void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
6654 for (i = 0; i < adap->chip_params->pm_stats_cnt; i++) {
6655 t4_write_reg(adap, A_PM_TX_STAT_CONFIG, i + 1);
6656 cnt[i] = t4_read_reg(adap, A_PM_TX_STAT_COUNT);
6657 if (is_t4(adap))
6658 cycles[i] = t4_read_reg64(adap, A_PM_TX_STAT_LSB);
6660 t4_read_indirect(adap, A_PM_TX_DBG_CTRL,
6670 * @adap: the adapter
6676 void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
6681 for (i = 0; i < adap->chip_params->pm_stats_cnt; i++) {
6682 t4_write_reg(adap, A_PM_RX_STAT_CONFIG, i + 1);
6683 cnt[i] = t4_read_reg(adap, A_PM_RX_STAT_COUNT);
6684 if (is_t4(adap)) {
6685 cycles[i] = t4_read_reg64(adap, A_PM_RX_STAT_LSB);
6687 t4_read_indirect(adap, A_PM_RX_DBG_CTRL,
6697 * @adap: the adapter
6704 static unsigned int t4_get_mps_bg_map(struct adapter *adap, int idx)
6708 if (adap->params.mps_bg_map)
6709 return ((adap->params.mps_bg_map >> (idx << 3)) & 0xff);
6711 n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL));
6714 if (n == 1 && chip_id(adap) <= CHELSIO_T5)
6722 static unsigned int t4_get_rx_e_chan_map(struct adapter *adap, int idx)
6724 u32 n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL));
6728 if (n == 1 && chip_id(adap) <= CHELSIO_T5)
6772 * @adap: The adapter
6777 void t4_get_port_stats_offset(struct adapter *adap, int idx,
6784 t4_get_port_stats(adap, idx, stats);
6793 * @adap: the adapter
6799 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
6801 u32 bgmap = adap2pinfo(adap, idx)->mps_bg_map;
6802 u32 stat_ctl = t4_read_reg(adap, A_MPS_STAT_CTL);
6805 t4_read_reg64(adap, \
6806 (is_t4(adap) ? PORT_REG(idx, A_MPS_PORT_STAT_##name##_L) : \
6808 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
6834 if (chip_id(adap) >= CHELSIO_T5) {
6871 if (chip_id(adap) >= CHELSIO_T5) {
6895 * @adap: the adapter
6901 void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
6903 u32 bgmap = adap2pinfo(adap, idx)->mps_bg_map;
6906 t4_read_reg64(adap, \
6907 (is_t4(adap) ? \
6910 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
6943 * @adap: the adapter
6949 void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
6954 if (is_t4(adap)) {
6965 t4_write_reg(adap, mag_id_reg_l,
6968 t4_write_reg(adap, mag_id_reg_h,
6971 t4_set_reg_field(adap, port_cfg_reg, F_MAGICEN,
6977 * @adap: the adapter
6990 int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
6996 if (is_t4(adap))
7002 t4_set_reg_field(adap, port_cfg_reg, F_PATEN, 0);
7009 (is_t4(adap) ? PORT_REG(port, A_XGMAC_PORT_EPIO_##name) : \
7012 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
7013 t4_write_reg(adap, EPIO_REG(DATA2), mask1);
7014 t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
7021 t4_write_reg(adap, EPIO_REG(DATA0), mask0);
7022 t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i) | F_EPIOWR);
7023 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
7024 if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
7028 t4_write_reg(adap, EPIO_REG(DATA0), crc);
7029 t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i + 32) | F_EPIOWR);
7030 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
7031 if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
7036 t4_set_reg_field(adap, port_cfg_reg, 0, F_PATEN);
7068 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
7084 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7089 * @adap: the adapter
7098 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
7115 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7123 * @adap: the adapter
7132 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
7149 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7155 * @adap: the adapter
7311 * @adap: the adapter
7317 int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox)
7331 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7337 * @adap: the adapter
7346 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
7374 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7378 if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR)
7379 t4_report_fw_error(adap);
7428 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
7467 * @adap: the adapter
7472 int t4_fw_bye(struct adapter *adap, unsigned int mbox)
7478 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7483 * @adap: the adapter
7489 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
7496 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7501 * @adap: the adapter
7515 int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
7523 if (adap->flags & FW_OK && mbox <= M_PCIE_FW_MASTER) {
7530 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7547 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST);
7548 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT,
7561 * @adap: the adapter
7567 int t4_fw_restart(struct adapter *adap, unsigned int mbox)
7571 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
7573 if (!(t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_HALT))
7584 * @adap: the adapter
7603 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
7611 if (!t4_fw_matches_chip(adap, fw_hdr))
7615 ret = t4_fw_halt(adap, mbox, force);
7620 ret = t4_load_fw(adap, fw_data, size);
7624 return t4_fw_restart(adap, mbox);
7629 * @adap: the adapter
7635 int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
7641 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7646 * @adap: the adapter
7658 int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf,
7683 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7690 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
7694 return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0);
7699 * @adap: the adapter
7711 int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
7734 return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout);
7739 * @adap: the adapter
7750 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
7754 return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val,
7760 * @adap: the adapter
7779 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
7803 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7808 * @adap: the adapter
7826 int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox,
7847 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7868 *vfvld = adap->params.viid_smt_extn_support ?
7873 *vin = adap->params.viid_smt_extn_support ?
7883 * @adap: the adapter
7896 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
7900 return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size,
7906 * @adap: the adapter
7914 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
7928 return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7933 * @adap: the adapter
7945 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
7974 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
7979 * @adap: the adapter
7999 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
8006 unsigned int max_naddr = adap->chip_params->mps_tcam_size;
8042 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
8072 * @adap: the adapter
8092 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
8098 unsigned int max_mac_addr = adap->chip_params->mps_tcam_size;
8114 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
8120 if (adap->params.viid_smt_extn_support)
8123 if (chip_id(adap) <= CHELSIO_T5)
8135 * @adap: the adapter
8144 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
8158 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
8163 * @adap: the adapter
8173 int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
8186 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
8191 * @adap: the adapter
8200 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
8203 return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
8208 * @adap: the adapter
8215 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
8226 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8231 * @adap: the adapter
8244 int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
8259 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8264 * @adap: the adapter
8275 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
8290 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8295 * @adap: the adapter
8303 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
8315 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8320 * @adap: the adapter
8328 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
8340 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8345 * @adap: the adapter
8353 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
8365 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8628 * @adap: the adapter
8633 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
8650 for_each_port(adap, i) {
8651 pi = adap2pinfo(adap, i);
8668 CH_WARN_RATELIMIT(adap, "Unknown firmware reply %d\n", opcode);
9098 * @adap: the adapter
9104 int t4_init_devlog_params(struct adapter *adap, int fw_attach)
9106 struct devlog_params *dparams = &adap->params.devlog;
9117 t4_read_reg(adap, PCIE_FW_REG(A_PCIE_FW_PF, PCIE_FW_PF_DEVLOG));
9149 ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
9164 * t4_init_sge_params - initialize adap->params.sge
9256 static void read_filter_mode_and_ingress_config(struct adapter *adap,
9260 struct tp_params *tpp = &adap->params.tp;
9262 t4_tp_pio_read(adap, &tpp->vlan_pri_map, 1, A_TP_VLAN_PRI_MAP,
9264 t4_tp_pio_read(adap, &tpp->ingress_config, 1, A_TP_INGRESS_CONFIG,
9272 tpp->fcoe_shift = t4_filter_field_shift(adap, F_FCOE);
9273 tpp->port_shift = t4_filter_field_shift(adap, F_PORT);
9274 tpp->vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
9275 tpp->vlan_shift = t4_filter_field_shift(adap, F_VLAN);
9276 tpp->tos_shift = t4_filter_field_shift(adap, F_TOS);
9277 tpp->protocol_shift = t4_filter_field_shift(adap, F_PROTOCOL);
9278 tpp->ethertype_shift = t4_filter_field_shift(adap, F_ETHERTYPE);
9279 tpp->macmatch_shift = t4_filter_field_shift(adap, F_MACMATCH);
9280 tpp->matchtype_shift = t4_filter_field_shift(adap, F_MPSHITTYPE);
9281 tpp->frag_shift = t4_filter_field_shift(adap, F_FRAGMENTATION);
9283 if (chip_id(adap) > CHELSIO_T4) {
9284 v = t4_read_reg(adap, LE_HASH_MASK_GEN_IPV4T5(3));
9285 adap->params.tp.hash_filter_mask = v;
9286 v = t4_read_reg(adap, LE_HASH_MASK_GEN_IPV4T5(4));
9287 adap->params.tp.hash_filter_mask |= (u64)v << 32;
9292 * t4_init_tp_params - initialize adap->params.tp
9293 * @adap: the adapter
9297 int t4_init_tp_params(struct adapter *adap, bool sleep_ok)
9301 struct tp_params *tpp = &adap->params.tp;
9303 v = t4_read_reg(adap, A_TP_TIMER_RESOLUTION);
9311 read_filter_mode_and_ingress_config(adap, sleep_ok);
9319 if (chip_id(adap) > CHELSIO_T5) {
9320 v = t4_read_reg(adap, A_TP_OUT_CONFIG);
9332 * @adap: the adapter
9339 int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
9341 unsigned int filter_mode = adap->params.tp.vlan_pri_map;
9385 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf, int port_id)
9389 struct port_info *p = adap2pinfo(adap, port_id);
9396 } while ((adap->params.portvec & (1 << j)) == 0);
9400 p->mps_bg_map = t4_get_mps_bg_map(adap, j);
9401 p->rx_e_chan_map = t4_get_rx_e_chan_map(adap, j);
9404 if (!(adap->flags & IS_VF) ||
9405 adap->params.vfres.r_caps & FW_CMD_CAP_PORT) {
9409 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &vi->rss_size,
9420 ret = t4_query_params(adap, mbox, pf, vf, 1, &param, &val);
9433 * @adap: the adapter
9441 void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
9444 int cim_num_obq = adap->chip_params->cim_num_obq;
9447 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_IBQSELECT |
9449 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
9456 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
9458 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
9467 * @adap: the adapter
9476 int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
9495 t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, V_IBQDBGADDR(addr) |
9497 err = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0,
9501 *data++ = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA);
9503 t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, 0);
9509 * @adap: the adapter
9518 int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
9522 int cim_num_obq = adap->chip_params->cim_num_obq;
9527 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
9529 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
9537 t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, V_OBQDBGADDR(addr) |
9539 err = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0,
9543 *data++ = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA);
9545 t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, 0);
9559 * @adap: the adapter
9566 int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
9571 if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
9575 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr);
9576 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
9579 *valp++ = t4_read_reg(adap, A_CIM_HOST_ACC_DATA);
9586 * @adap: the adapter
9593 int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
9598 if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
9602 t4_write_reg(adap, A_CIM_HOST_ACC_DATA, *valp++);
9603 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | F_HOSTWRITE);
9604 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
9610 static int t4_cim_write1(struct adapter *adap, unsigned int addr,
9613 return t4_cim_write(adap, addr, 1, &val);
9618 * @adap: the adapter
9625 int t4_cim_ctl_read(struct adapter *adap, unsigned int addr, unsigned int n,
9628 return t4_cim_read(adap, addr + CIM_CTL_BASE, n, valp);
9633 * @adap: the adapter
9641 int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
9646 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &cfg);
9651 ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 0);
9656 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
9664 for (i = 0; i < adap->params.cim_la_size; i++) {
9665 ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
9669 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
9676 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_DATA, 1, &la_buf[i]);
9686 if (is_t6(adap))
9692 int r = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
9702 * @adap: the adapter
9710 void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
9715 cfg = t4_read_reg(adap, A_TP_DBG_LA_CONFIG) & 0xffff;
9717 t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
9718 adap->params.tp.la_mask | (cfg ^ F_DBGLAENABLE));
9720 val = t4_read_reg(adap, A_TP_DBG_LA_CONFIG);
9730 val |= adap->params.tp.la_mask;
9733 t4_write_reg(adap, A_TP_DBG_LA_CONFIG, V_DBGLARPTR(idx) | val);
9734 la_buf[i] = t4_read_reg64(adap, A_TP_DBG_LA_DATAL);
9743 t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
9744 cfg | adap->params.tp.la_mask);
9876 * @adap: the adapter
9881 void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED])
9886 t4_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i);
9887 v = t4_read_reg(adap, A_TP_PACE_TABLE);
9888 pace_vals[i] = dack_ticks_to_usec(adap, v);
9894 * @adap: the adapter
9901 void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps,
9908 t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok);
9916 v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */
9922 t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok);
9926 *ipg = (10000 * v) / core_ticks_per_usec(adap);
9932 * @adap: the adapter
9938 int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
9943 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
9945 cfg_addr = t4_flash_cfg_addr(adap);
9953 CH_ERR(adap, "cfg file too large, max is %u bytes\n",
9960 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
9975 ret = t4_write_flash(adap, addr, n, cfg_data, 1);
9985 CH_ERR(adap, "config file %s failed %d\n",
9992 * @adap: the adapter
9996 int t5_fw_init_extern_mem(struct adapter *adap)
10001 if (!is_t5(adap))
10007 ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1, params, val,
10174 int t4_load_boot(struct adapter *adap, u8 *boot_data,
10184 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
10190 CH_ERR(adap, "boot image encroaching on firmware region\n");
10201 ret = t4_flash_erase_sectors(adap, boot_sector >> 16,
10223 CH_ERR(adap, "boot image too small/large\n");
10232 CH_ERR(adap, "Boot image missing signature\n");
10240 CH_ERR(adap, "PCI header missing signature\n");
10248 CH_ERR(adap, "Vendor ID missing signature\n");
10256 t4_os_pci_read_cfg2(adap, PCI_DEVICE_ID, &device_id);
10281 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data, 0);
10286 ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE,
10291 CH_ERR(adap, "boot image download failed, error %d\n", ret);
10315 int t4_load_bootcfg(struct adapter *adap,const u8 *cfg_data, unsigned int size)
10320 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
10322 cfg_addr = t4_flash_bootcfg_addr(adap);
10330 CH_ERR(adap, "bootcfg file too large, max is %u bytes\n",
10337 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
10353 ret = t4_write_flash(adap, addr, n, cfg_data, 0);
10363 CH_ERR(adap, "boot config data %s failed %d\n",
10370 * @adap: the adapter
10379 int t4_set_filter_mode(struct adapter *adap, unsigned int mode_map,
10391 t4_tp_pio_write(adap, &mode_map, 1, A_TP_VLAN_PRI_MAP, sleep_ok);
10392 read_filter_mode_and_ingress_config(adap, sleep_ok);
10399 * @adap: the adapter
10404 void t4_clr_port_stats(struct adapter *adap, int idx)
10407 u32 bgmap = adap2pinfo(adap, idx)->mps_bg_map;
10410 if (is_t4(adap))
10417 t4_write_reg(adap, port_base_addr + i, 0);
10420 t4_write_reg(adap, port_base_addr + i, 0);
10423 t4_write_reg(adap,
10425 t4_write_reg(adap,
10432 * @adap: the adapter
10441 int t4_i2c_rd(struct adapter *adap, unsigned int mbox,
10468 ret = t4_wr_mbox(adap, mbox, &ldst, sizeof ldst, &ldst);
10476 * @adap: the adapter
10485 int t4_i2c_wr(struct adapter *adap, unsigned int mbox,
10512 return t4_wr_mbox(adap, mbox, &ldst, sizeof ldst, &ldst);
10517 * @adap: the adapter
10525 int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
10547 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
10561 * @adap: the adapter
10569 int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type ctype,
10574 t4_write_reg(adap, A_SGE_CTXT_CMD, V_CTXTQID(cid) | V_CTXTTYPE(ctype));
10575 ret = t4_wait_op_done(adap, A_SGE_CTXT_CMD, F_BUSY, 0, 3, 1);
10578 *data++ = t4_read_reg(adap, i);