Lines Matching refs:mhi_cntrl

20 int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl,
23 return mhi_cntrl->read_reg(mhi_cntrl, base + offset, out);
26 int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl,
33 ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp);
42 int __must_check mhi_poll_reg_field(struct mhi_controller *mhi_cntrl,
51 ret = mhi_read_reg_field(mhi_cntrl, base, offset, mask, &out);
64 void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base,
67 mhi_cntrl->write_reg(mhi_cntrl, base + offset, val);
70 int __must_check mhi_write_reg_field(struct mhi_controller *mhi_cntrl,
77 ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp);
83 mhi_write_reg(mhi_cntrl, base, offset, tmp);
88 void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr,
91 mhi_write_reg(mhi_cntrl, db_addr, 4, upper_32_bits(db_val));
92 mhi_write_reg(mhi_cntrl, db_addr, 0, lower_32_bits(db_val));
95 void mhi_db_brstmode(struct mhi_controller *mhi_cntrl,
102 mhi_write_db(mhi_cntrl, db_addr, db_val);
107 void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl,
113 mhi_write_db(mhi_cntrl, db_addr, db_val);
120 mhi_event->db_cfg.process_db(mhi_event->mhi_cntrl, &mhi_event->db_cfg,
124 void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd)
131 mhi_write_db(mhi_cntrl, ring->db_addr, db);
134 void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl,
149 mhi_chan->db_cfg.process_db(mhi_cntrl, &mhi_chan->db_cfg,
153 enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl)
156 int ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_EXECENV, &exec);
162 enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl)
165 int ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS,
171 void mhi_soc_reset(struct mhi_controller *mhi_cntrl)
173 if (mhi_cntrl->reset) {
174 mhi_cntrl->reset(mhi_cntrl);
179 mhi_write_reg(mhi_cntrl, mhi_cntrl->regs, MHI_SOC_RESET_REQ_OFFSET,
184 int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl,
187 buf_info->p_addr = dma_map_single(mhi_cntrl->cntrl_dev,
190 if (dma_mapping_error(mhi_cntrl->cntrl_dev, buf_info->p_addr))
196 int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl,
199 void *buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, buf_info->len,
213 void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl,
216 dma_unmap_single(mhi_cntrl->cntrl_dev, buf_info->p_addr, buf_info->len,
220 void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl,
226 dma_free_coherent(mhi_cntrl->cntrl_dev, buf_info->len,
230 static int get_nr_avail_ring_elements(struct mhi_controller *mhi_cntrl,
251 static void mhi_add_ring_element(struct mhi_controller *mhi_cntrl,
261 static void mhi_del_ring_element(struct mhi_controller *mhi_cntrl,
281 struct mhi_controller *mhi_cntrl;
288 mhi_cntrl = mhi_dev->mhi_cntrl;
326 dev_dbg(&mhi_cntrl->mhi_dev->dev, "destroy device for chan:%s\n",
339 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
344 return get_nr_avail_ring_elements(mhi_cntrl, tre_ring);
363 void mhi_create_devices(struct mhi_controller *mhi_cntrl)
367 struct device *dev = &mhi_cntrl->mhi_dev->dev;
370 mhi_chan = mhi_cntrl->mhi_chan;
371 for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
373 !(mhi_chan->ee_mask & BIT(mhi_cntrl->ee)))
375 mhi_dev = mhi_alloc_device(mhi_cntrl);
400 if ((i + 1) < mhi_cntrl->max_chan && mhi_chan[1].configured) {
419 dev_name(&mhi_cntrl->mhi_dev->dev),
435 struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
446 if (!mhi_cntrl->mhi_ctxt) {
447 dev_dbg(&mhi_cntrl->mhi_dev->dev,
452 er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
456 dev_err(&mhi_cntrl->mhi_dev->dev,
483 struct mhi_controller *mhi_cntrl = priv;
484 struct device *dev = &mhi_cntrl->mhi_dev->dev;
489 write_lock_irq(&mhi_cntrl->pm_lock);
490 if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
491 write_unlock_irq(&mhi_cntrl->pm_lock);
495 state = mhi_get_mhi_state(mhi_cntrl);
496 ee = mhi_get_exec_env(mhi_cntrl);
498 trace_mhi_intvec_states(mhi_cntrl, ee, state);
501 pm_state = mhi_tryset_pm_state(mhi_cntrl,
504 write_unlock_irq(&mhi_cntrl->pm_lock);
512 if (mhi_cntrl->rddm_image && mhi_is_active(mhi_cntrl)) {
513 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
514 mhi_cntrl->ee = ee;
515 wake_up_all(&mhi_cntrl->state_event);
521 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FATAL_ERROR);
522 mhi_cntrl->ee = ee;
523 wake_up_all(&mhi_cntrl->state_event);
524 mhi_pm_sys_err_handler(mhi_cntrl);
527 wake_up_all(&mhi_cntrl->state_event);
528 mhi_pm_sys_err_handler(mhi_cntrl);
539 struct mhi_controller *mhi_cntrl = dev;
542 wake_up_all(&mhi_cntrl->state_event);
547 static void mhi_recycle_ev_ring_element(struct mhi_controller *mhi_cntrl,
567 static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
572 struct device *dev = &mhi_cntrl->mhi_dev->dev;
610 dev_err(&mhi_cntrl->mhi_dev->dev,
634 mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
641 mhi_del_ring_element(mhi_cntrl, buf_ring);
642 mhi_del_ring_element(mhi_cntrl, tre_ring);
651 atomic_dec(&mhi_cntrl->pending_pkts);
653 mhi_cntrl->runtime_put(mhi_cntrl);
683 read_lock_irqsave(&mhi_cntrl->pm_lock, pm_lock_flags);
685 MHI_DB_ACCESS_VALID(mhi_cntrl)) {
686 mhi_ring_chan_db(mhi_cntrl, mhi_chan);
688 read_unlock_irqrestore(&mhi_cntrl->pm_lock, pm_lock_flags);
706 static int parse_rsc_event(struct mhi_controller *mhi_cntrl,
761 mhi_del_ring_element(mhi_cntrl, tre_ring);
770 static void mhi_process_cmd_completion(struct mhi_controller *mhi_cntrl,
774 struct mhi_cmd *cmd_ring = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
781 dev_err(&mhi_cntrl->mhi_dev->dev,
790 if (chan < mhi_cntrl->max_chan &&
791 mhi_cntrl->mhi_chan[chan].configured) {
792 mhi_chan = &mhi_cntrl->mhi_chan[chan];
798 dev_err(&mhi_cntrl->mhi_dev->dev,
802 mhi_del_ring_element(mhi_cntrl, mhi_ring);
805 int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
812 &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
814 struct device *dev = &mhi_cntrl->mhi_dev->dev;
824 if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
828 dev_err(&mhi_cntrl->mhi_dev->dev,
839 trace_mhi_ctrl_event(mhi_cntrl, local_rp);
846 link_info = &mhi_cntrl->mhi_link_info;
847 write_lock_irq(&mhi_cntrl->pm_lock);
852 write_unlock_irq(&mhi_cntrl->pm_lock);
854 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_BW_REQ);
868 mhi_pm_m0_transition(mhi_cntrl);
871 mhi_pm_m1_transition(mhi_cntrl);
874 mhi_pm_m3_transition(mhi_cntrl);
881 write_lock_irq(&mhi_cntrl->pm_lock);
882 pm_state = mhi_tryset_pm_state(mhi_cntrl,
884 write_unlock_irq(&mhi_cntrl->pm_lock);
886 mhi_pm_sys_err_handler(mhi_cntrl);
897 mhi_process_cmd_completion(mhi_cntrl, local_rp);
918 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
919 write_lock_irq(&mhi_cntrl->pm_lock);
920 mhi_cntrl->ee = event;
921 write_unlock_irq(&mhi_cntrl->pm_lock);
922 wake_up_all(&mhi_cntrl->state_event);
929 mhi_queue_state_transition(mhi_cntrl, st);
936 WARN_ON(chan >= mhi_cntrl->max_chan);
942 if (chan < mhi_cntrl->max_chan) {
943 mhi_chan = &mhi_cntrl->mhi_chan[chan];
946 parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
954 mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
959 dev_err(&mhi_cntrl->mhi_dev->dev,
968 read_lock_bh(&mhi_cntrl->pm_lock);
971 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)) && count)
973 read_unlock_bh(&mhi_cntrl->pm_lock);
978 int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
985 &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
991 if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
995 dev_err(&mhi_cntrl->mhi_dev->dev,
1006 trace_mhi_data_event(mhi_cntrl, local_rp);
1010 WARN_ON(chan >= mhi_cntrl->max_chan);
1016 if (chan < mhi_cntrl->max_chan &&
1017 mhi_cntrl->mhi_chan[chan].configured) {
1018 mhi_chan = &mhi_cntrl->mhi_chan[chan];
1021 parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
1024 parse_rsc_event(mhi_cntrl, local_rp, mhi_chan);
1029 mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
1034 dev_err(&mhi_cntrl->mhi_dev->dev,
1042 read_lock_bh(&mhi_cntrl->pm_lock);
1045 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)) && count)
1047 read_unlock_bh(&mhi_cntrl->pm_lock);
1055 struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
1059 mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
1066 struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
1067 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1077 if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
1083 mhi_trigger_resume(mhi_cntrl);
1089 ret = mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
1096 write_lock_irq(&mhi_cntrl->pm_lock);
1097 state = mhi_get_mhi_state(mhi_cntrl);
1100 pm_state = mhi_tryset_pm_state(mhi_cntrl,
1103 write_unlock_irq(&mhi_cntrl->pm_lock);
1105 mhi_pm_sys_err_handler(mhi_cntrl);
1109 static bool mhi_is_ring_full(struct mhi_controller *mhi_cntrl,
1123 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1130 if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))
1133 ret = mhi_is_ring_full(mhi_cntrl, tre_ring);
1137 ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf_info, mflags);
1141 read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
1147 mhi_cntrl->runtime_get(mhi_cntrl);
1150 mhi_cntrl->wake_toggle(mhi_cntrl);
1153 atomic_inc(&mhi_cntrl->pending_pkts);
1155 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
1156 mhi_ring_chan_db(mhi_cntrl, mhi_chan);
1159 mhi_cntrl->runtime_put(mhi_cntrl);
1161 read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
1203 int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
1231 ret = mhi_cntrl->map_single(mhi_cntrl, buf_info);
1248 trace_mhi_gen_tre(mhi_cntrl, mhi_chan, mhi_tre);
1250 mhi_add_ring_element(mhi_cntrl, tre_ring);
1251 mhi_add_ring_element(mhi_cntrl, buf_ring);
1273 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1278 return mhi_is_ring_full(mhi_cntrl, tre_ring);
1282 int mhi_send_cmd(struct mhi_controller *mhi_cntrl,
1287 struct mhi_cmd *mhi_cmd = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
1289 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1296 if (!get_nr_avail_ring_elements(mhi_cntrl, ring)) {
1325 mhi_add_ring_element(mhi_cntrl, ring);
1326 read_lock_bh(&mhi_cntrl->pm_lock);
1327 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
1328 mhi_ring_cmd_db(mhi_cntrl, mhi_cmd);
1329 read_unlock_bh(&mhi_cntrl->pm_lock);
1335 static int mhi_update_channel_state(struct mhi_controller *mhi_cntrl,
1343 trace_mhi_channel_command_start(mhi_cntrl, mhi_chan, to_state, TPS("Updating"));
1378 ret = mhi_device_get_sync(mhi_cntrl->mhi_dev);
1381 mhi_cntrl->runtime_get(mhi_cntrl);
1384 ret = mhi_send_cmd(mhi_cntrl, mhi_chan, cmd);
1392 msecs_to_jiffies(mhi_cntrl->timeout_ms));
1410 trace_mhi_channel_command_end(mhi_cntrl, mhi_chan, to_state, TPS("Updated"));
1412 mhi_cntrl->runtime_put(mhi_cntrl);
1413 mhi_device_put(mhi_cntrl->mhi_dev);
1418 static void mhi_unprepare_channel(struct mhi_controller *mhi_cntrl,
1426 if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) {
1428 TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask);
1433 ret = mhi_update_channel_state(mhi_cntrl, mhi_chan,
1445 mhi_reset_chan(mhi_cntrl, mhi_chan);
1446 mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
1453 int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
1459 if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) {
1461 TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask);
1469 ret = mhi_init_chan_ctxt(mhi_cntrl, mhi_chan);
1474 ret = mhi_update_channel_state(mhi_cntrl, mhi_chan,
1484 int nr_el = get_nr_avail_ring_elements(mhi_cntrl,
1486 size_t len = mhi_cntrl->buffer_len;
1502 ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &info, MHI_EOT);
1509 read_lock_bh(&mhi_cntrl->pm_lock);
1510 if (MHI_DB_ACCESS_VALID(mhi_cntrl)) {
1512 mhi_ring_chan_db(mhi_cntrl, mhi_chan);
1515 read_unlock_bh(&mhi_cntrl->pm_lock);
1524 mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
1533 mhi_unprepare_channel(mhi_cntrl, mhi_chan);
1538 static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl,
1546 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1559 dev_err(&mhi_cntrl->mhi_dev->dev,
1581 static void mhi_reset_data_chan(struct mhi_controller *mhi_cntrl,
1596 atomic_dec(&mhi_cntrl->pending_pkts);
1598 mhi_cntrl->runtime_put(mhi_cntrl);
1602 mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
1604 mhi_del_ring_element(mhi_cntrl, buf_ring);
1605 mhi_del_ring_element(mhi_cntrl, tre_ring);
1616 void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan)
1626 read_lock_bh(&mhi_cntrl->pm_lock);
1627 mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
1628 er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_chan->er_index];
1630 mhi_mark_stale_events(mhi_cntrl, mhi_event, er_ctxt, chan);
1632 mhi_reset_data_chan(mhi_cntrl, mhi_chan);
1634 read_unlock_bh(&mhi_cntrl->pm_lock);
1640 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1648 ret = mhi_prepare_channel(mhi_cntrl, mhi_chan, flags);
1661 mhi_unprepare_channel(mhi_cntrl, mhi_chan);
1681 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1690 mhi_unprepare_channel(mhi_cntrl, mhi_chan);