Lines Matching refs:ioc

30 static bfa_boolean_t bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc);
31 static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc);
32 static void bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc);
33 static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc);
34 static bfa_boolean_t bfa_ioc_ct_sync_start(struct bfa_ioc_s *ioc);
35 static void bfa_ioc_ct_sync_join(struct bfa_ioc_s *ioc);
36 static void bfa_ioc_ct_sync_leave(struct bfa_ioc_s *ioc);
37 static void bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc);
38 static bfa_boolean_t bfa_ioc_ct_sync_complete(struct bfa_ioc_s *ioc);
40 struct bfa_ioc_s *ioc, enum bfi_ioc_state fwstate);
41 static enum bfi_ioc_state bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc_s *ioc);
43 struct bfa_ioc_s *ioc, enum bfi_ioc_state fwstate);
44 static enum bfi_ioc_state bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc_s *ioc);
53 bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
59 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
60 usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
66 writel(1, ioc->ioc_regs.ioc_usage_reg);
67 readl(ioc->ioc_regs.ioc_usage_sem_reg);
68 writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
69 writel(0, ioc->ioc_regs.ioc_fail_sync);
70 bfa_trc(ioc, usecnt);
74 ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
75 bfa_trc(ioc, ioc_fwstate);
85 bfa_ioc_fwver_get(ioc, &fwhdr);
86 if (!bfa_ioc_fwver_cmp(ioc, &fwhdr)) {
87 readl(ioc->ioc_regs.ioc_usage_sem_reg);
88 writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
89 bfa_trc(ioc, usecnt);
97 writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
98 readl(ioc->ioc_regs.ioc_usage_sem_reg);
99 writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
100 bfa_trc(ioc, usecnt);
105 bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc)
112 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
113 usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
117 writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
118 bfa_trc(ioc, usecnt);
120 readl(ioc->ioc_regs.ioc_usage_sem_reg);
121 writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
128 bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc)
130 if (bfa_ioc_is_cna(ioc)) {
131 writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt);
132 writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt);
134 readl(ioc->ioc_regs.ll_halt);
135 readl(ioc->ioc_regs.alt_ll_halt);
137 writel(~0U, ioc->ioc_regs.err_set);
138 readl(ioc->ioc_regs.err_set);
183 bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc)
186 int pcifn = bfa_ioc_pcifn(ioc);
188 rb = bfa_ioc_bar0(ioc);
190 ioc->ioc_regs.hfn_mbox = rb + ct_fnreg[pcifn].hfn_mbox;
191 ioc->ioc_regs.lpu_mbox = rb + ct_fnreg[pcifn].lpu_mbox;
192 ioc->ioc_regs.host_page_num_fn = rb + ct_fnreg[pcifn].hfn_pgn;
194 if (ioc->port_id == 0) {
195 ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
196 ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
197 ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG;
198 ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p0reg[pcifn].hfn;
199 ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p0reg[pcifn].lpu;
200 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
201 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
203 ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
204 ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
205 ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG;
206 ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p1reg[pcifn].hfn;
207 ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p1reg[pcifn].lpu;
208 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
209 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
215 ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
216 ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
217 ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_LCLK_CTL_REG);
218 ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_SCLK_CTL_REG);
223 ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG);
224 ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG);
225 ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
226 ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT);
227 ioc->ioc_regs.ioc_fail_sync = (rb + BFA_IOC_FAIL_SYNC);
232 ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
233 ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
238 ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
242 bfa_ioc_ct2_reg_init(struct bfa_ioc_s *ioc)
245 int port = bfa_ioc_portid(ioc);
247 rb = bfa_ioc_bar0(ioc);
249 ioc->ioc_regs.hfn_mbox = rb + ct2_reg[port].hfn_mbox;
250 ioc->ioc_regs.lpu_mbox = rb + ct2_reg[port].lpu_mbox;
251 ioc->ioc_regs.host_page_num_fn = rb + ct2_reg[port].hfn_pgn;
252 ioc->ioc_regs.hfn_mbox_cmd = rb + ct2_reg[port].hfn;
253 ioc->ioc_regs.lpu_mbox_cmd = rb + ct2_reg[port].lpu;
254 ioc->ioc_regs.lpu_read_stat = rb + ct2_reg[port].lpu_read;
257 ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC0_HBEAT_REG;
258 ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG;
259 ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG;
260 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
261 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
263 ioc->ioc_regs.heartbeat = (rb + CT2_BFA_IOC1_HBEAT_REG);
264 ioc->ioc_regs.ioc_fwstate = (rb + CT2_BFA_IOC1_STATE_REG);
265 ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG;
266 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
267 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
273 ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
274 ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
275 ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + CT2_APP_PLL_LCLK_CTL_REG);
276 ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + CT2_APP_PLL_SCLK_CTL_REG);
281 ioc->ioc_regs.ioc_sem_reg = (rb + CT2_HOST_SEM0_REG);
282 ioc->ioc_regs.ioc_usage_sem_reg = (rb + CT2_HOST_SEM1_REG);
283 ioc->ioc_regs.ioc_init_sem_reg = (rb + CT2_HOST_SEM2_REG);
284 ioc->ioc_regs.ioc_usage_reg = (rb + CT2_BFA_FW_USE_COUNT);
285 ioc->ioc_regs.ioc_fail_sync = (rb + CT2_BFA_IOC_FAIL_SYNC);
290 ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
291 ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
296 ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
305 bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc)
307 void __iomem *rb = ioc->pcidev.pci_bar_kva;
314 r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc));
315 ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH;
317 bfa_trc(ioc, bfa_ioc_pcifn(ioc));
318 bfa_trc(ioc, ioc->port_id);
322 bfa_ioc_ct2_map_port(struct bfa_ioc_s *ioc)
324 void __iomem *rb = ioc->pcidev.pci_bar_kva;
328 ioc->port_id = ((r32 & __FC_LL_PORT_MAP__MK) >> __FC_LL_PORT_MAP__SH);
330 bfa_trc(ioc, bfa_ioc_pcifn(ioc));
331 bfa_trc(ioc, ioc->port_id);
338 bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
340 void __iomem *rb = ioc->pcidev.pci_bar_kva;
344 bfa_trc(ioc, r32);
346 mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) &
360 r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
361 r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
362 bfa_trc(ioc, r32);
368 bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc_s *ioc)
372 r32 = readl(ioc->ioc_regs.lpu_read_stat);
374 writel(1, ioc->ioc_regs.lpu_read_stat);
385 bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc)
388 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
389 writel(0, ioc->ioc_regs.ioc_usage_reg);
390 readl(ioc->ioc_regs.ioc_usage_sem_reg);
391 writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
393 writel(0, ioc->ioc_regs.ioc_fail_sync);
399 readl(ioc->ioc_regs.ioc_sem_reg);
400 writel(1, ioc->ioc_regs.ioc_sem_reg);
404 bfa_ioc_ct_sync_start(struct bfa_ioc_s *ioc)
406 uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
416 if (sync_reqd & bfa_ioc_ct_sync_pos(ioc)) {
417 writel(0, ioc->ioc_regs.ioc_fail_sync);
418 writel(1, ioc->ioc_regs.ioc_usage_reg);
419 writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
420 writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
424 return bfa_ioc_ct_sync_complete(ioc);
431 bfa_ioc_ct_sync_join(struct bfa_ioc_s *ioc)
433 uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
434 uint32_t sync_pos = bfa_ioc_ct_sync_reqd_pos(ioc);
436 writel((r32 | sync_pos), ioc->ioc_regs.ioc_fail_sync);
440 bfa_ioc_ct_sync_leave(struct bfa_ioc_s *ioc)
442 uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
443 uint32_t sync_msk = bfa_ioc_ct_sync_reqd_pos(ioc) |
444 bfa_ioc_ct_sync_pos(ioc);
446 writel((r32 & ~sync_msk), ioc->ioc_regs.ioc_fail_sync);
450 bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc)
452 uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
454 writel((r32 | bfa_ioc_ct_sync_pos(ioc)),
455 ioc->ioc_regs.ioc_fail_sync);
459 bfa_ioc_ct_sync_complete(struct bfa_ioc_s *ioc)
461 uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
476 if ((sync_reqd & bfa_ioc_ct_sync_pos(ioc)) &&
477 !(sync_ackd & bfa_ioc_ct_sync_pos(ioc)))
478 sync_ackd |= bfa_ioc_ct_sync_pos(ioc);
482 ioc->ioc_regs.ioc_fail_sync);
483 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
484 writel(BFI_IOC_FAIL, ioc->ioc_regs.alt_ioc_fwstate);
494 writel((r32 | sync_ackd), ioc->ioc_regs.ioc_fail_sync);
503 bfa_ioc_set_ctx_hwif(struct bfa_ioc_s *ioc, struct bfa_ioc_hwif_s *hwif)
524 bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc)
526 bfa_ioc_set_ctx_hwif(ioc, &hwif_ct);
532 ioc->ioc_hwif = &hwif_ct;
539 bfa_ioc_set_ct2_hwif(struct bfa_ioc_s *ioc)
541 bfa_ioc_set_ctx_hwif(ioc, &hwif_ct2);
548 ioc->ioc_hwif = &hwif_ct2;
562 bfa_ioc_ct2_poweron(struct bfa_ioc_s *ioc)
564 void __iomem *rb = ioc->pcidev.pci_bar_kva;
575 HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc),
577 writel(HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc),
967 bfa_ioc_ct_set_cur_ioc_fwstate(struct bfa_ioc_s *ioc,
970 writel(fwstate, ioc->ioc_regs.ioc_fwstate);
974 bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc_s *ioc)
976 return (enum bfi_ioc_state)readl(ioc->ioc_regs.ioc_fwstate);
980 bfa_ioc_ct_set_alt_ioc_fwstate(struct bfa_ioc_s *ioc,
983 writel(fwstate, ioc->ioc_regs.alt_ioc_fwstate);
987 bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc_s *ioc)
989 return (enum bfi_ioc_state) readl(ioc->ioc_regs.alt_ioc_fwstate);