Lines Matching refs:oct

569 static void *__retrieve_octeon_config_info(struct octeon_device *oct,
572 u32 oct_id = oct->octeon_id;
577 if (oct->chip_id == OCTEON_CN66XX) {
579 } else if ((oct->chip_id == OCTEON_CN68XX) &&
582 } else if ((oct->chip_id == OCTEON_CN68XX) &&
585 } else if (oct->chip_id == OCTEON_CN23XX_PF_VID) {
587 } else if (oct->chip_id == OCTEON_CN23XX_VF_VID) {
597 static int __verify_octeon_config_info(struct octeon_device *oct, void *conf)
599 switch (oct->chip_id) {
602 return lio_validate_cn6xxx_config_info(oct, conf);
613 void *oct_get_config_info(struct octeon_device *oct, u16 card_type)
617 conf = __retrieve_octeon_config_info(oct, card_type);
621 if (__verify_octeon_config_info(oct, conf)) {
622 dev_err(&oct->pci_dev->dev, "Configuration verification failed\n");
646 void octeon_free_device_mem(struct octeon_device *oct)
650 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
651 if (oct->io_qmask.oq & BIT_ULL(i))
652 vfree(oct->droq[i]);
655 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
656 if (oct->io_qmask.iq & BIT_ULL(i))
657 vfree(oct->instr_queue[i]);
660 i = oct->octeon_id;
661 vfree(oct);
671 struct octeon_device *oct;
711 oct = (struct octeon_device *)buf;
712 oct->priv = (void *)(buf + octdevsize);
713 oct->chip = (void *)(buf + octdevsize + priv_size);
714 oct->dispatch.dlist = (struct octeon_dispatch *)
717 return oct;
724 struct octeon_device *oct = NULL;
733 oct = octeon_allocate_device_mem(pci_id, priv_size);
734 if (oct) {
736 octeon_device[oct_idx] = oct;
741 if (!oct)
744 spin_lock_init(&oct->pci_win_lock);
745 spin_lock_init(&oct->mem_access_lock);
747 oct->octeon_id = oct_idx;
748 snprintf(oct->device_name, sizeof(oct->device_name),
749 "LiquidIO%d", (oct->octeon_id));
751 return oct;
763 int octeon_register_device(struct octeon_device *oct,
768 oct->loc.bus = bus;
769 oct->loc.dev = dev;
770 oct->loc.func = func;
772 oct->adapter_refcount = &adapter_refcounts[oct->octeon_id];
773 atomic_set(oct->adapter_refcount, 0);
776 oct->adapter_fw_state = &adapter_fw_states[oct->octeon_id];
777 atomic_set(oct->adapter_fw_state, FW_NEEDS_TO_BE_LOADED);
780 for (idx = (int)oct->octeon_id - 1; idx >= 0; idx--) {
782 dev_err(&oct->pci_dev->dev,
786 atomic_inc(oct->adapter_refcount);
794 oct->adapter_refcount =
796 oct->adapter_fw_state =
803 atomic_inc(oct->adapter_refcount);
804 refcount = atomic_read(oct->adapter_refcount);
806 dev_dbg(&oct->pci_dev->dev, "%s: %02x:%02x:%d refcount %u", __func__,
807 oct->loc.bus, oct->loc.dev, oct->loc.func, refcount);
817 int octeon_deregister_device(struct octeon_device *oct)
821 atomic_dec(oct->adapter_refcount);
822 refcount = atomic_read(oct->adapter_refcount);
824 dev_dbg(&oct->pci_dev->dev, "%s: %04d:%02d:%d refcount %u", __func__,
825 oct->loc.bus, oct->loc.dev, oct->loc.func, refcount);
832 octeon_allocate_ioq_vector(struct octeon_device *oct, u32 num_ioqs)
841 oct->ioq_vector = vzalloc(size);
842 if (!oct->ioq_vector)
845 ioq_vector = &oct->ioq_vector[i];
846 ioq_vector->oct_dev = oct;
849 ioq_vector->mbox = oct->mbox[i];
854 if (oct->chip_id == OCTEON_CN23XX_PF_VID)
855 ioq_vector->ioq_num = i + oct->sriov_info.pf_srn;
865 octeon_free_ioq_vector(struct octeon_device *oct)
867 vfree(oct->ioq_vector);
872 int octeon_setup_instr_queues(struct octeon_device *oct)
877 int numa_node = dev_to_node(&oct->pci_dev->dev);
879 if (OCTEON_CN6XXX(oct))
881 CFG_GET_NUM_DEF_TX_DESCS(CHIP_CONF(oct, cn6xxx));
882 else if (OCTEON_CN23XX_PF(oct))
883 num_descs = CFG_GET_NUM_DEF_TX_DESCS(CHIP_CONF(oct, cn23xx_pf));
884 else if (OCTEON_CN23XX_VF(oct))
885 num_descs = CFG_GET_NUM_DEF_TX_DESCS(CHIP_CONF(oct, cn23xx_vf));
887 oct->num_iqs = 0;
889 oct->instr_queue[0] = vzalloc_node(sizeof(*oct->instr_queue[0]),
891 if (!oct->instr_queue[0])
892 oct->instr_queue[0] =
894 if (!oct->instr_queue[0])
896 memset(oct->instr_queue[0], 0, sizeof(struct octeon_instr_queue));
897 oct->instr_queue[0]->q_index = 0;
898 oct->instr_queue[0]->app_ctx = (void *)(size_t)0;
899 oct->instr_queue[0]->ifidx = 0;
902 txpciq.s.pkind = oct->pfvf_hsword.pkind;
905 if (octeon_init_instr_queue(oct, txpciq, num_descs)) {
907 vfree(oct->instr_queue[0]);
908 oct->instr_queue[0] = NULL;
912 oct->num_iqs++;
917 int octeon_setup_output_queues(struct octeon_device *oct)
922 int numa_node = dev_to_node(&oct->pci_dev->dev);
924 if (OCTEON_CN6XXX(oct)) {
926 CFG_GET_NUM_DEF_RX_DESCS(CHIP_CONF(oct, cn6xxx));
928 CFG_GET_DEF_RX_BUF_SIZE(CHIP_CONF(oct, cn6xxx));
929 } else if (OCTEON_CN23XX_PF(oct)) {
930 num_descs = CFG_GET_NUM_DEF_RX_DESCS(CHIP_CONF(oct, cn23xx_pf));
931 desc_size = CFG_GET_DEF_RX_BUF_SIZE(CHIP_CONF(oct, cn23xx_pf));
932 } else if (OCTEON_CN23XX_VF(oct)) {
933 num_descs = CFG_GET_NUM_DEF_RX_DESCS(CHIP_CONF(oct, cn23xx_vf));
934 desc_size = CFG_GET_DEF_RX_BUF_SIZE(CHIP_CONF(oct, cn23xx_vf));
936 oct->num_oqs = 0;
937 oct->droq[0] = vzalloc_node(sizeof(*oct->droq[0]), numa_node);
938 if (!oct->droq[0])
939 oct->droq[0] = vzalloc(sizeof(*oct->droq[0]));
940 if (!oct->droq[0])
943 if (octeon_init_droq(oct, oq_no, num_descs, desc_size, NULL)) {
944 vfree(oct->droq[oq_no]);
945 oct->droq[oq_no] = NULL;
948 oct->num_oqs++;
954 int octeon_set_io_queues_off(struct octeon_device *oct)
958 if (OCTEON_CN6XXX(oct)) {
959 octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0);
960 octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0);
961 } else if (oct->chip_id == OCTEON_CN23XX_VF_VID) {
968 for (q_no = 0; q_no < oct->sriov_info.rings_per_vf; q_no++) {
970 oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no));
976 oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
980 dev_err(&oct->pci_dev->dev,
987 octeon_write_csr64(oct,
992 oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
994 dev_err(&oct->pci_dev->dev,
1004 void octeon_set_droq_pkt_op(struct octeon_device *oct,
1011 if (OCTEON_CN6XXX(oct)) {
1012 reg_val = octeon_read_csr(oct, CN6XXX_SLI_PKT_OUT_ENB);
1019 octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, reg_val);
1023 int octeon_init_dispatch_list(struct octeon_device *oct)
1027 oct->dispatch.count = 0;
1030 oct->dispatch.dlist[i].opcode = 0;
1031 INIT_LIST_HEAD(&oct->dispatch.dlist[i].list);
1035 octeon_register_reqtype_free_fn(oct, i, NULL);
1037 spin_lock_init(&oct->dispatch.lock);
1043 void octeon_delete_dispatch_list(struct octeon_device *oct)
1050 spin_lock_bh(&oct->dispatch.lock);
1055 dispatch = &oct->dispatch.dlist[i].list;
1061 oct->dispatch.dlist[i].opcode = 0;
1064 oct->dispatch.count = 0;
1066 spin_unlock_bh(&oct->dispatch.lock);
1134 octeon_register_dispatch_fn(struct octeon_device *oct,
1145 spin_lock_bh(&oct->dispatch.lock);
1147 if (oct->dispatch.dlist[idx].opcode == 0) {
1148 oct->dispatch.dlist[idx].opcode = combined_opcode;
1149 oct->dispatch.dlist[idx].dispatch_fn = fn;
1150 oct->dispatch.dlist[idx].arg = fn_arg;
1151 oct->dispatch.count++;
1152 spin_unlock_bh(&oct->dispatch.lock);
1156 spin_unlock_bh(&oct->dispatch.lock);
1161 pfn = octeon_get_dispatch(oct, opcode, subcode);
1165 dev_dbg(&oct->pci_dev->dev,
1178 spin_lock_bh(&oct->dispatch.lock);
1179 list_add(&dispatch->list, &oct->dispatch.dlist[idx].list);
1180 oct->dispatch.count++;
1181 spin_unlock_bh(&oct->dispatch.lock);
1185 octeon_get_dispatch_arg(oct, opcode, subcode) == fn_arg)
1188 dev_err(&oct->pci_dev->dev,
1202 struct octeon_device *oct = (struct octeon_device *)buf;
1207 if (OCTEON_CN6XXX(oct))
1209 CFG_GET_NUM_NIC_PORTS(CHIP_CONF(oct, cn6xxx));
1210 else if (OCTEON_CN23XX_PF(oct))
1212 CFG_GET_NUM_NIC_PORTS(CHIP_CONF(oct, cn23xx_pf));
1214 if (atomic_read(&oct->status) >= OCT_DEV_RUNNING) {
1215 dev_err(&oct->pci_dev->dev, "Received CORE OK when device state is 0x%x\n",
1216 atomic_read(&oct->status));
1224 oct->app_mode = (u32)recv_pkt->rh.r_core_drv_init.app_mode;
1226 oct->fw_info.max_nic_ports =
1228 oct->fw_info.num_gmx_ports =
1232 if (oct->fw_info.max_nic_ports < num_nic_ports) {
1233 dev_err(&oct->pci_dev->dev,
1235 num_nic_ports, oct->fw_info.max_nic_ports);
1238 oct->fw_info.app_cap_flags = recv_pkt->rh.r_core_drv_init.app_cap_flags;
1239 oct->fw_info.app_mode = (u32)recv_pkt->rh.r_core_drv_init.app_mode;
1240 oct->pfvf_hsword.app_mode = (u32)recv_pkt->rh.r_core_drv_init.app_mode;
1242 oct->pfvf_hsword.pkind = recv_pkt->rh.r_core_drv_init.pkind;
1244 for (i = 0; i < oct->num_iqs; i++)
1245 oct->instr_queue[i]->txpciq.s.pkind = oct->pfvf_hsword.pkind;
1247 atomic_set(&oct->status, OCT_DEV_CORE_OK);
1249 cs = &core_setup[oct->octeon_id];
1252 dev_dbg(&oct->pci_dev->dev, "Core setup bytes expected %u found %d\n",
1260 strscpy(oct->boardinfo.name, cs->boardname,
1261 sizeof(oct->boardinfo.name));
1262 strscpy(oct->boardinfo.serial_number, cs->board_serial_number,
1263 sizeof(oct->boardinfo.serial_number));
1267 oct->boardinfo.major = cs->board_rev_major;
1268 oct->boardinfo.minor = cs->board_rev_minor;
1270 dev_info(&oct->pci_dev->dev,
1282 int octeon_get_tx_qsize(struct octeon_device *oct, u32 q_no)
1285 if (oct && (q_no < MAX_OCTEON_INSTR_QUEUES(oct)) &&
1286 (oct->io_qmask.iq & BIT_ULL(q_no)))
1287 return oct->instr_queue[q_no]->max_count;
1293 int octeon_get_rx_qsize(struct octeon_device *oct, u32 q_no)
1295 if (oct && (q_no < MAX_OCTEON_OUTPUT_QUEUES(oct)) &&
1296 (oct->io_qmask.oq & BIT_ULL(q_no)))
1297 return oct->droq[q_no]->max_count;
1303 struct octeon_config *octeon_get_conf(struct octeon_device *oct)
1311 if (OCTEON_CN6XXX(oct)) {
1313 (struct octeon_config *)(CHIP_CONF(oct, cn6xxx));
1314 } else if (OCTEON_CN23XX_PF(oct)) {
1316 (CHIP_CONF(oct, cn23xx_pf));
1317 } else if (OCTEON_CN23XX_VF(oct)) {
1319 (CHIP_CONF(oct, cn23xx_vf));
1342 u64 lio_pci_readq(struct octeon_device *oct, u64 addr)
1348 spin_lock_irqsave(&oct->pci_win_lock, flags);
1354 if ((oct->chip_id == OCTEON_CN66XX) ||
1355 (oct->chip_id == OCTEON_CN68XX) ||
1356 (oct->chip_id == OCTEON_CN23XX_PF_VID))
1358 writel(addrhi, oct->reg_list.pci_win_rd_addr_hi);
1361 readl(oct->reg_list.pci_win_rd_addr_hi);
1363 writel(addr & 0xffffffff, oct->reg_list.pci_win_rd_addr_lo);
1364 readl(oct->reg_list.pci_win_rd_addr_lo);
1366 val64 = readq(oct->reg_list.pci_win_rd_data);
1368 spin_unlock_irqrestore(&oct->pci_win_lock, flags);
1374 void lio_pci_writeq(struct octeon_device *oct,
1380 spin_lock_irqsave(&oct->pci_win_lock, flags);
1382 writeq(addr, oct->reg_list.pci_win_wr_addr);
1385 writel(val >> 32, oct->reg_list.pci_win_wr_data_hi);
1387 readl(oct->reg_list.pci_win_wr_data_hi);
1389 writel(val & 0xffffffff, oct->reg_list.pci_win_wr_data_lo);
1391 spin_unlock_irqrestore(&oct->pci_win_lock, flags);
1395 int octeon_mem_access_ok(struct octeon_device *oct)
1401 if (OCTEON_CN23XX_PF(oct)) {
1402 lmc0_reset_ctl = lio_pci_readq(oct, CN23XX_LMC0_RESET_CTL);
1406 lmc0_reset_ctl = lio_pci_readq(oct, CN6XXX_LMC0_RESET_CTL);
1415 int octeon_wait_for_ddr_init(struct octeon_device *oct, u32 *timeout)
1425 ret = octeon_mem_access_ok(oct);
1456 struct octeon_device *oct = NULL;
1463 oct = droq->oct_dev;
1472 oct = iq->oct_dev;
1477 if (oct && (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct))) {