Lines Matching refs:md

82 	struct t7xx_modem *md = t7xx_dev->md;
88 ctl = md->fsm_ctl;
95 spin_lock_bh(&md->exp_lock);
97 md->exp_id |= int_sta;
98 if (md->exp_id & D2H_INT_EXCEPTION_INIT) {
103 md->exp_id &= ~D2H_INT_EXCEPTION_INIT;
106 } else if (md->exp_id & D2H_INT_PORT_ENUM) {
107 md->exp_id &= ~D2H_INT_PORT_ENUM;
114 if ((md->exp_id & D2H_INT_ASYNC_MD_HK) && !(mask & D2H_INT_ASYNC_MD_HK)) {
115 md->exp_id &= ~D2H_INT_ASYNC_MD_HK;
116 queue_work(md->handshake_wq, &md->handshake_work);
119 spin_unlock_bh(&md->exp_lock);
215 modem = t7xx_dev->md;
297 static void t7xx_md_exception(struct t7xx_modem *md, enum hif_ex_stage stage)
299 struct t7xx_pci_dev *t7xx_dev = md->t7xx_dev;
304 t7xx_port_proxy_reset(md->port_prox);
307 t7xx_cldma_exception(md->md_ctrl[CLDMA_ID_MD], stage);
308 t7xx_cldma_exception(md->md_ctrl[CLDMA_ID_AP], stage);
316 static int t7xx_wait_hif_ex_hk_event(struct t7xx_modem *md, int event_id)
321 if (md->exp_id & event_id)
438 t7xx_port_enum_msg_handler(ctl->md, rt_feature->data);
444 static int t7xx_core_reset(struct t7xx_modem *md)
446 struct device *dev = &md->t7xx_dev->pdev->dev;
447 struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
449 md->core_md.ready = false;
456 if (md->core_md.handshake_ongoing) {
463 md->core_md.handshake_ongoing = false;
467 static void t7xx_core_hk_handler(struct t7xx_modem *md, struct t7xx_sys_info *core_info,
473 struct device *dev = &md->t7xx_dev->pdev->dev;
533 struct t7xx_modem *md = container_of(work, struct t7xx_modem, handshake_work);
534 struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
538 t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_MD], CLDMA_SHARED_Q_CFG);
539 t7xx_cldma_start(md->md_ctrl[CLDMA_ID_MD]);
541 md->core_md.handshake_ongoing = true;
542 t7xx_core_hk_handler(md, &md->core_md, ctl, FSM_EVENT_MD_HS2, FSM_EVENT_MD_HS2_EXIT);
547 struct t7xx_modem *md = container_of(work, struct t7xx_modem, ap_handshake_work);
548 struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
552 t7xx_cldma_stop(md->md_ctrl[CLDMA_ID_AP]);
553 t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_AP], CLDMA_SHARED_Q_CFG);
554 t7xx_cldma_start(md->md_ctrl[CLDMA_ID_AP]);
555 md->core_ap.handshake_ongoing = true;
556 t7xx_core_hk_handler(md, &md->core_ap, ctl, FSM_EVENT_AP_HS2, FSM_EVENT_AP_HS2_EXIT);
559 void t7xx_md_event_notify(struct t7xx_modem *md, enum md_event_id evt_id)
561 struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
567 t7xx_mhccif_mask_clr(md->t7xx_dev, D2H_INT_PORT_ENUM | D2H_INT_ASYNC_MD_HK |
572 t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_PORT_ENUM);
574 spin_lock_irqsave(&md->exp_lock, flags);
575 int_sta = t7xx_get_interrupt_status(md->t7xx_dev);
576 md->exp_id |= int_sta;
577 if (md->exp_id & D2H_INT_EXCEPTION_INIT) {
579 md->exp_id &= ~D2H_INT_EXCEPTION_INIT;
580 md->exp_id &= ~D2H_INT_ASYNC_MD_HK;
581 md->exp_id &= ~D2H_INT_ASYNC_AP_HK;
583 md->exp_id &= ~D2H_INT_ASYNC_MD_HK;
584 md->exp_id &= ~D2H_INT_ASYNC_AP_HK;
586 void __iomem *mhccif_base = md->t7xx_dev->base_addr.mhccif_rc_base;
588 if (md->exp_id & D2H_INT_ASYNC_MD_HK) {
589 queue_work(md->handshake_wq, &md->handshake_work);
590 md->exp_id &= ~D2H_INT_ASYNC_MD_HK;
592 t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_MD_HK);
595 if (md->exp_id & D2H_INT_ASYNC_AP_HK) {
596 queue_work(md->handshake_wq, &md->ap_handshake_work);
597 md->exp_id &= ~D2H_INT_ASYNC_AP_HK;
599 t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_AP_HK);
602 spin_unlock_irqrestore(&md->exp_lock, flags);
604 t7xx_mhccif_mask_clr(md->t7xx_dev,
612 t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_MD_HK);
613 t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_AP_HK);
621 void t7xx_md_exception_handshake(struct t7xx_modem *md)
623 struct device *dev = &md->t7xx_dev->pdev->dev;
626 t7xx_md_exception(md, HIF_EX_INIT);
627 ret = t7xx_wait_hif_ex_hk_event(md, D2H_INT_EXCEPTION_INIT_DONE);
631 t7xx_md_exception(md, HIF_EX_INIT_DONE);
632 ret = t7xx_wait_hif_ex_hk_event(md, D2H_INT_EXCEPTION_CLEARQ_DONE);
636 t7xx_md_exception(md, HIF_EX_CLEARQ_DONE);
637 ret = t7xx_wait_hif_ex_hk_event(md, D2H_INT_EXCEPTION_ALLQ_RESET);
641 t7xx_md_exception(md, HIF_EX_ALLQ_RESET);
647 struct t7xx_modem *md;
649 md = devm_kzalloc(dev, sizeof(*md), GFP_KERNEL);
650 if (!md)
653 md->t7xx_dev = t7xx_dev;
654 t7xx_dev->md = md;
655 spin_lock_init(&md->exp_lock);
656 md->handshake_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI,
658 if (!md->handshake_wq)
661 INIT_WORK(&md->handshake_work, t7xx_md_hk_wq);
662 md->core_md.feature_set[RT_ID_MD_PORT_ENUM] &= ~FEATURE_MSK;
663 md->core_md.feature_set[RT_ID_MD_PORT_ENUM] |=
666 INIT_WORK(&md->ap_handshake_work, t7xx_ap_hk_wq);
667 md->core_ap.feature_set[RT_ID_AP_PORT_ENUM] &= ~FEATURE_MSK;
668 md->core_ap.feature_set[RT_ID_AP_PORT_ENUM] |=
671 return md;
676 struct t7xx_modem *md = t7xx_dev->md;
678 md->md_init_finish = false;
679 md->exp_id = 0;
680 t7xx_fsm_reset(md);
681 t7xx_cldma_reset(md->md_ctrl[CLDMA_ID_MD]);
682 t7xx_cldma_reset(md->md_ctrl[CLDMA_ID_AP]);
683 t7xx_port_proxy_reset(md->port_prox);
684 md->md_init_finish = true;
685 return t7xx_core_reset(md);
701 struct t7xx_modem *md;
704 md = t7xx_md_alloc(t7xx_dev);
705 if (!md)
716 ret = t7xx_fsm_init(md);
724 ret = t7xx_cldma_init(md->md_ctrl[CLDMA_ID_MD]);
728 ret = t7xx_cldma_init(md->md_ctrl[CLDMA_ID_AP]);
732 ret = t7xx_port_proxy_init(md);
736 ret = t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_START, 0);
741 md->md_init_finish = true;
745 t7xx_port_proxy_uninit(md->port_prox);
748 t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_AP]);
751 t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]);
757 t7xx_fsm_uninit(md);
760 destroy_workqueue(md->handshake_wq);
768 struct t7xx_modem *md = t7xx_dev->md;
772 if (!md->md_init_finish)
776 t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_PRE_STOP, FSM_CMD_FLAG_WAIT_FOR_COMPLETION);
777 t7xx_port_proxy_uninit(md->port_prox);
778 t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_AP]);
779 t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]);
781 t7xx_fsm_uninit(md);
782 destroy_workqueue(md->handshake_wq);