Lines Matching refs:gsi

18 #include "gsi.h"
171 return channel - &channel->gsi->channel[0];
177 return !!channel->gsi;
197 static void gsi_irq_type_update(struct gsi *gsi, u32 val)
199 const struct reg *reg = gsi_reg(gsi, CNTXT_TYPE_IRQ_MSK);
201 gsi->type_enabled_bitmap = val;
202 iowrite32(val, gsi->virt + reg_offset(reg));
205 static void gsi_irq_type_enable(struct gsi *gsi, enum gsi_irq_type_id type_id)
207 gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | type_id);
210 static void gsi_irq_type_disable(struct gsi *gsi, enum gsi_irq_type_id type_id)
212 gsi_irq_type_update(gsi, gsi->type_enabled_bitmap & ~type_id);
220 static void gsi_irq_ev_ctrl_enable(struct gsi *gsi, u32 evt_ring_id)
229 reg = gsi_reg(gsi, CNTXT_SRC_EV_CH_IRQ_CLR);
230 iowrite32(~0, gsi->virt + reg_offset(reg));
232 reg = gsi_reg(gsi, CNTXT_SRC_EV_CH_IRQ_MSK);
233 iowrite32(val, gsi->virt + reg_offset(reg));
234 gsi_irq_type_enable(gsi, GSI_EV_CTRL);
238 static void gsi_irq_ev_ctrl_disable(struct gsi *gsi)
242 gsi_irq_type_disable(gsi, GSI_EV_CTRL);
244 reg = gsi_reg(gsi, CNTXT_SRC_EV_CH_IRQ_MSK);
245 iowrite32(0, gsi->virt + reg_offset(reg));
253 static void gsi_irq_ch_ctrl_enable(struct gsi *gsi, u32 channel_id)
262 reg = gsi_reg(gsi, CNTXT_SRC_CH_IRQ_CLR);
263 iowrite32(~0, gsi->virt + reg_offset(reg));
265 reg = gsi_reg(gsi, CNTXT_SRC_CH_IRQ_MSK);
266 iowrite32(val, gsi->virt + reg_offset(reg));
268 gsi_irq_type_enable(gsi, GSI_CH_CTRL);
272 static void gsi_irq_ch_ctrl_disable(struct gsi *gsi)
276 gsi_irq_type_disable(gsi, GSI_CH_CTRL);
278 reg = gsi_reg(gsi, CNTXT_SRC_CH_IRQ_MSK);
279 iowrite32(0, gsi->virt + reg_offset(reg));
282 static void gsi_irq_ieob_enable_one(struct gsi *gsi, u32 evt_ring_id)
284 bool enable_ieob = !gsi->ieob_enabled_bitmap;
288 gsi->ieob_enabled_bitmap |= BIT(evt_ring_id);
290 reg = gsi_reg(gsi, CNTXT_SRC_IEOB_IRQ_MSK);
291 val = gsi->ieob_enabled_bitmap;
292 iowrite32(val, gsi->virt + reg_offset(reg));
296 gsi_irq_type_enable(gsi, GSI_IEOB);
299 static void gsi_irq_ieob_disable(struct gsi *gsi, u32 event_mask)
304 gsi->ieob_enabled_bitmap &= ~event_mask;
307 if (!gsi->ieob_enabled_bitmap)
308 gsi_irq_type_disable(gsi, GSI_IEOB);
310 reg = gsi_reg(gsi, CNTXT_SRC_IEOB_IRQ_MSK);
311 val = gsi->ieob_enabled_bitmap;
312 iowrite32(val, gsi->virt + reg_offset(reg));
315 static void gsi_irq_ieob_disable_one(struct gsi *gsi, u32 evt_ring_id)
317 gsi_irq_ieob_disable(gsi, BIT(evt_ring_id));
321 static void gsi_irq_enable(struct gsi *gsi)
329 reg = gsi_reg(gsi, CNTXT_GLOB_IRQ_EN);
330 iowrite32(ERROR_INT, gsi->virt + reg_offset(reg));
332 gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | GSI_GLOB_EE);
339 reg = gsi_reg(gsi, CNTXT_GSI_IRQ_EN);
343 iowrite32(val, gsi->virt + reg_offset(reg));
345 gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | GSI_GENERAL);
349 static void gsi_irq_disable(struct gsi *gsi)
353 gsi_irq_type_update(gsi, 0);
356 reg = gsi_reg(gsi, CNTXT_GSI_IRQ_EN);
357 iowrite32(0, gsi->virt + reg_offset(reg));
359 reg = gsi_reg(gsi, CNTXT_GLOB_IRQ_EN);
360 iowrite32(0, gsi->virt + reg_offset(reg));
386 static bool gsi_command(struct gsi *gsi, u32 reg, u32 val)
389 struct completion *completion = &gsi->completion;
393 iowrite32(val, gsi->virt + reg);
400 gsi_evt_ring_state(struct gsi *gsi, u32 evt_ring_id)
402 const struct reg *reg = gsi_reg(gsi, EV_CH_E_CNTXT_0);
405 val = ioread32(gsi->virt + reg_n_offset(reg, evt_ring_id));
411 static void gsi_evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
414 struct device *dev = gsi->dev;
420 gsi_irq_ev_ctrl_enable(gsi, evt_ring_id);
422 reg = gsi_reg(gsi, EV_CH_CMD);
426 timeout = !gsi_command(gsi, reg_offset(reg), val);
428 gsi_irq_ev_ctrl_disable(gsi);
434 opcode, evt_ring_id, gsi_evt_ring_state(gsi, evt_ring_id));
438 static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id)
443 state = gsi_evt_ring_state(gsi, evt_ring_id);
445 dev_err(gsi->dev, "event ring %u bad state %u before alloc\n",
450 gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_ALLOCATE);
453 state = gsi_evt_ring_state(gsi, evt_ring_id);
457 dev_err(gsi->dev, "event ring %u bad state %u after alloc\n",
464 static void gsi_evt_ring_reset_command(struct gsi *gsi, u32 evt_ring_id)
468 state = gsi_evt_ring_state(gsi, evt_ring_id);
471 dev_err(gsi->dev, "event ring %u bad state %u before reset\n",
476 gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_RESET);
479 state = gsi_evt_ring_state(gsi, evt_ring_id);
483 dev_err(gsi->dev, "event ring %u bad state %u after reset\n",
488 static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id)
492 state = gsi_evt_ring_state(gsi, evt_ring_id);
494 dev_err(gsi->dev, "event ring %u state %u before dealloc\n",
499 gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_DE_ALLOC);
502 state = gsi_evt_ring_state(gsi, evt_ring_id);
506 dev_err(gsi->dev, "event ring %u bad state %u after dealloc\n",
513 const struct reg *reg = gsi_reg(channel->gsi, CH_C_CNTXT_0);
515 struct gsi *gsi = channel->gsi;
516 void __iomem *virt = gsi->virt;
519 reg = gsi_reg(gsi, CH_C_CNTXT_0);
530 struct gsi *gsi = channel->gsi;
531 struct device *dev = gsi->dev;
537 gsi_irq_ch_ctrl_enable(gsi, channel_id);
539 reg = gsi_reg(gsi, CH_CMD);
543 timeout = !gsi_command(gsi, reg_offset(reg), val);
545 gsi_irq_ch_ctrl_disable(gsi);
555 static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id)
557 struct gsi_channel *channel = &gsi->channel[channel_id];
558 struct device *dev = gsi->dev;
585 struct device *dev = channel->gsi->dev;
612 struct device *dev = channel->gsi->dev;
650 struct device *dev = channel->gsi->dev;
676 static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id)
678 struct gsi_channel *channel = &gsi->channel[channel_id];
679 struct device *dev = gsi->dev;
704 static void gsi_evt_ring_doorbell(struct gsi *gsi, u32 evt_ring_id, u32 index)
706 const struct reg *reg = gsi_reg(gsi, EV_CH_E_DOORBELL_0);
707 struct gsi_ring *ring = &gsi->evt_ring[evt_ring_id].ring;
714 iowrite32(val, gsi->virt + reg_n_offset(reg, evt_ring_id));
718 static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id)
720 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
725 reg = gsi_reg(gsi, EV_CH_E_CNTXT_0);
731 iowrite32(val, gsi->virt + reg_n_offset(reg, evt_ring_id));
733 reg = gsi_reg(gsi, EV_CH_E_CNTXT_1);
735 iowrite32(val, gsi->virt + reg_n_offset(reg, evt_ring_id));
741 reg = gsi_reg(gsi, EV_CH_E_CNTXT_2);
743 iowrite32(val, gsi->virt + reg_n_offset(reg, evt_ring_id));
745 reg = gsi_reg(gsi, EV_CH_E_CNTXT_3);
747 iowrite32(val, gsi->virt + reg_n_offset(reg, evt_ring_id));
750 reg = gsi_reg(gsi, EV_CH_E_CNTXT_8);
754 iowrite32(val, gsi->virt + reg_n_offset(reg, evt_ring_id));
757 reg = gsi_reg(gsi, EV_CH_E_CNTXT_9);
758 iowrite32(0, gsi->virt + reg_n_offset(reg, evt_ring_id));
760 reg = gsi_reg(gsi, EV_CH_E_CNTXT_10);
761 iowrite32(0, gsi->virt + reg_n_offset(reg, evt_ring_id));
763 reg = gsi_reg(gsi, EV_CH_E_CNTXT_11);
764 iowrite32(0, gsi->virt + reg_n_offset(reg, evt_ring_id));
767 reg = gsi_reg(gsi, EV_CH_E_CNTXT_12);
768 iowrite32(0, gsi->virt + reg_n_offset(reg, evt_ring_id));
770 reg = gsi_reg(gsi, EV_CH_E_CNTXT_13);
771 iowrite32(0, gsi->virt + reg_n_offset(reg, evt_ring_id));
774 gsi_evt_ring_doorbell(gsi, evt_ring_id, ring->index);
831 struct gsi *gsi = channel->gsi;
837 reg = gsi_reg(gsi, CH_C_CNTXT_0);
840 val = ch_c_cntxt_0_type_encode(gsi->version, reg, GSI_CHANNEL_TYPE_GPI);
843 if (gsi->version < IPA_VERSION_5_0)
846 iowrite32(val, gsi->virt + reg_n_offset(reg, channel_id));
848 reg = gsi_reg(gsi, CH_C_CNTXT_1);
850 if (gsi->version >= IPA_VERSION_5_0)
852 iowrite32(val, gsi->virt + reg_n_offset(reg, channel_id));
858 reg = gsi_reg(gsi, CH_C_CNTXT_2);
860 iowrite32(val, gsi->virt + reg_n_offset(reg, channel_id));
862 reg = gsi_reg(gsi, CH_C_CNTXT_3);
864 iowrite32(val, gsi->virt + reg_n_offset(reg, channel_id));
866 reg = gsi_reg(gsi, CH_C_QOS);
876 if (gsi->version < IPA_VERSION_4_0 && doorbell)
882 if (gsi->version >= IPA_VERSION_4_0 && !channel->command) {
884 if (gsi->version < IPA_VERSION_4_5)
890 if (gsi->version >= IPA_VERSION_4_9)
893 iowrite32(val, gsi->virt + reg_n_offset(reg, channel_id));
901 reg = gsi_reg(gsi, CH_C_SCRATCH_0);
903 iowrite32(val, gsi->virt + reg_n_offset(reg, channel_id));
905 reg = gsi_reg(gsi, CH_C_SCRATCH_1);
907 iowrite32(val, gsi->virt + reg_n_offset(reg, channel_id));
909 reg = gsi_reg(gsi, CH_C_SCRATCH_2);
911 iowrite32(val, gsi->virt + reg_n_offset(reg, channel_id));
917 reg = gsi_reg(gsi, CH_C_SCRATCH_3);
919 val = ioread32(gsi->virt + offset);
921 iowrite32(val, gsi->virt + offset);
928 struct gsi *gsi = channel->gsi;
932 if (resume && gsi->version < IPA_VERSION_4_0)
935 mutex_lock(&gsi->mutex);
939 mutex_unlock(&gsi->mutex);
945 int gsi_channel_start(struct gsi *gsi, u32 channel_id)
947 struct gsi_channel *channel = &gsi->channel[channel_id];
952 gsi_irq_ieob_enable_one(gsi, channel->evt_ring_id);
956 gsi_irq_ieob_disable_one(gsi, channel->evt_ring_id);
980 struct gsi *gsi = channel->gsi;
987 if (suspend && gsi->version < IPA_VERSION_4_0)
990 mutex_lock(&gsi->mutex);
994 mutex_unlock(&gsi->mutex);
1000 int gsi_channel_stop(struct gsi *gsi, u32 channel_id)
1002 struct gsi_channel *channel = &gsi->channel[channel_id];
1010 gsi_irq_ieob_disable_one(gsi, channel->evt_ring_id);
1017 void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool doorbell)
1019 struct gsi_channel *channel = &gsi->channel[channel_id];
1021 mutex_lock(&gsi->mutex);
1025 if (gsi->version < IPA_VERSION_4_0 && !channel->toward_ipa)
1033 mutex_unlock(&gsi->mutex);
1037 int gsi_channel_suspend(struct gsi *gsi, u32 channel_id)
1039 struct gsi_channel *channel = &gsi->channel[channel_id];
1053 int gsi_channel_resume(struct gsi *gsi, u32 channel_id)
1055 struct gsi_channel *channel = &gsi->channel[channel_id];
1061 void gsi_suspend(struct gsi *gsi)
1063 disable_irq(gsi->irq);
1067 void gsi_resume(struct gsi *gsi)
1069 enable_irq(gsi->irq);
1074 struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
1086 struct gsi *gsi = trans->gsi;
1091 channel = &gsi->channel[channel_id];
1098 ipa_gsi_channel_tx_queued(gsi, channel_id, trans_count, byte_count);
1119 struct gsi *gsi = trans->gsi;
1124 channel = &gsi->channel[channel_id];
1131 ipa_gsi_channel_tx_completed(gsi, channel_id, trans_count, byte_count);
1135 static void gsi_isr_chan_ctrl(struct gsi *gsi)
1140 reg = gsi_reg(gsi, CNTXT_SRC_CH_IRQ);
1141 channel_mask = ioread32(gsi->virt + reg_offset(reg));
1143 reg = gsi_reg(gsi, CNTXT_SRC_CH_IRQ_CLR);
1144 iowrite32(channel_mask, gsi->virt + reg_offset(reg));
1151 complete(&gsi->completion);
1156 static void gsi_isr_evt_ctrl(struct gsi *gsi)
1161 reg = gsi_reg(gsi, CNTXT_SRC_EV_CH_IRQ);
1162 event_mask = ioread32(gsi->virt + reg_offset(reg));
1164 reg = gsi_reg(gsi, CNTXT_SRC_EV_CH_IRQ_CLR);
1165 iowrite32(event_mask, gsi->virt + reg_offset(reg));
1172 complete(&gsi->completion);
1178 gsi_isr_glob_chan_err(struct gsi *gsi, u32 err_ee, u32 channel_id, u32 code)
1181 dev_err(gsi->dev, "channel %u out of resources\n", channel_id);
1182 complete(&gsi->completion);
1187 dev_err(gsi->dev, "channel %u global error ee 0x%08x code 0x%08x\n",
1193 gsi_isr_glob_evt_err(struct gsi *gsi, u32 err_ee, u32 evt_ring_id, u32 code)
1196 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
1199 complete(&gsi->completion);
1200 dev_err(gsi->dev, "evt_ring for channel %u out of resources\n",
1206 dev_err(gsi->dev, "event ring %u global error ee %u code 0x%08x\n",
1211 static void gsi_isr_glob_err(struct gsi *gsi)
1223 log_reg = gsi_reg(gsi, ERROR_LOG);
1225 val = ioread32(gsi->virt + offset);
1226 iowrite32(0, gsi->virt + offset);
1228 clr_reg = gsi_reg(gsi, ERROR_LOG_CLR);
1229 iowrite32(~0, gsi->virt + reg_offset(clr_reg));
1238 gsi_isr_glob_chan_err(gsi, ee, which, code);
1240 gsi_isr_glob_evt_err(gsi, ee, which, code);
1242 dev_err(gsi->dev, "unexpected global error 0x%08x\n", type);
1246 static void gsi_isr_gp_int1(struct gsi *gsi)
1271 reg = gsi_reg(gsi, CNTXT_SCRATCH_0);
1272 val = ioread32(gsi->virt + reg_offset(reg));
1278 gsi->result = 0;
1282 gsi->result = -EAGAIN;
1286 dev_err(gsi->dev, "global INT1 generic result %u\n", result);
1287 gsi->result = -EIO;
1291 complete(&gsi->completion);
1295 static void gsi_isr_glob_ee(struct gsi *gsi)
1300 reg = gsi_reg(gsi, CNTXT_GLOB_IRQ_STTS);
1301 val = ioread32(gsi->virt + reg_offset(reg));
1304 gsi_isr_glob_err(gsi);
1306 reg = gsi_reg(gsi, CNTXT_GLOB_IRQ_CLR);
1307 iowrite32(val, gsi->virt + reg_offset(reg));
1313 gsi_isr_gp_int1(gsi);
1317 dev_err(gsi->dev, "unexpected global interrupt 0x%08x\n", val);
1321 static void gsi_isr_ieob(struct gsi *gsi)
1326 reg = gsi_reg(gsi, CNTXT_SRC_IEOB_IRQ);
1327 event_mask = ioread32(gsi->virt + reg_offset(reg));
1329 gsi_irq_ieob_disable(gsi, event_mask);
1331 reg = gsi_reg(gsi, CNTXT_SRC_IEOB_IRQ_CLR);
1332 iowrite32(event_mask, gsi->virt + reg_offset(reg));
1339 napi_schedule(&gsi->evt_ring[evt_ring_id].channel->napi);
1344 static void gsi_isr_general(struct gsi *gsi)
1346 struct device *dev = gsi->dev;
1350 reg = gsi_reg(gsi, CNTXT_GSI_IRQ_STTS);
1351 val = ioread32(gsi->virt + reg_offset(reg));
1353 reg = gsi_reg(gsi, CNTXT_GSI_IRQ_CLR);
1354 iowrite32(val, gsi->virt + reg_offset(reg));
1369 struct gsi *gsi = dev_id;
1375 reg = gsi_reg(gsi, CNTXT_TYPE_IRQ);
1379 while ((intr_mask = ioread32(gsi->virt + offset))) {
1391 gsi_isr_chan_ctrl(gsi);
1394 gsi_isr_evt_ctrl(gsi);
1397 gsi_isr_glob_ee(gsi);
1400 gsi_isr_ieob(gsi);
1403 gsi_isr_general(gsi);
1406 dev_err(gsi->dev,
1414 dev_err(gsi->dev, "interrupt flood\n");
1423 static int gsi_irq_init(struct gsi *gsi, struct platform_device *pdev)
1427 ret = platform_get_irq_byname(pdev, "gsi");
1431 gsi->irq = ret;
1438 gsi_event_trans(struct gsi *gsi, struct gsi_event *event)
1446 channel = &gsi->channel[channel_id];
1447 if (WARN(!channel->gsi, "event has bad channel %u\n", channel_id))
1464 * @gsi: GSI pointer
1486 static void gsi_evt_ring_update(struct gsi *gsi, u32 evt_ring_id, u32 index)
1488 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
1513 trans = gsi_event_trans(gsi, event);
1532 gsi_evt_ring_doorbell(gsi, evt_ring_id, index);
1536 static int gsi_ring_alloc(struct gsi *gsi, struct gsi_ring *ring, u32 count)
1539 struct device *dev = gsi->dev;
1558 static void gsi_ring_free(struct gsi *gsi, struct gsi_ring *ring)
1562 dma_free_coherent(gsi->dev, size, ring->virt, ring->addr);
1566 static int gsi_evt_ring_id_alloc(struct gsi *gsi)
1570 if (gsi->event_bitmap == ~0U) {
1571 dev_err(gsi->dev, "event rings exhausted\n");
1575 evt_ring_id = ffz(gsi->event_bitmap);
1576 gsi->event_bitmap |= BIT(evt_ring_id);
1582 static void gsi_evt_ring_id_free(struct gsi *gsi, u32 evt_ring_id)
1584 gsi->event_bitmap &= ~BIT(evt_ring_id);
1592 struct gsi *gsi = channel->gsi;
1596 reg = gsi_reg(gsi, CH_C_DOORBELL_0);
1599 iowrite32(val, gsi->virt + reg_n_offset(reg, channel_id));
1606 struct gsi *gsi = channel->gsi;
1614 evt_ring = &gsi->evt_ring[evt_ring_id];
1620 reg = gsi_reg(gsi, EV_CH_E_CNTXT_4);
1622 index = gsi_ring_index(ring, ioread32(gsi->virt + offset));
1627 trans = gsi_event_trans(gsi, gsi_ring_virt(ring, index - 1));
1636 gsi_evt_ring_update(gsi, evt_ring_id, index);
1691 gsi_irq_ieob_enable_one(channel->gsi, channel->evt_ring_id);
1711 static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id)
1713 struct gsi_channel *channel = &gsi->channel[channel_id];
1720 ret = gsi_evt_ring_alloc_command(gsi, evt_ring_id);
1724 gsi_evt_ring_program(gsi, evt_ring_id);
1726 ret = gsi_channel_alloc_command(gsi, channel_id);
1733 netif_napi_add_tx(&gsi->dummy_dev, &channel->napi,
1736 netif_napi_add(&gsi->dummy_dev, &channel->napi,
1743 gsi_evt_ring_de_alloc_command(gsi, evt_ring_id);
1749 static void gsi_channel_teardown_one(struct gsi *gsi, u32 channel_id)
1751 struct gsi_channel *channel = &gsi->channel[channel_id];
1759 gsi_channel_de_alloc_command(gsi, channel_id);
1760 gsi_evt_ring_reset_command(gsi, evt_ring_id);
1761 gsi_evt_ring_de_alloc_command(gsi, evt_ring_id);
1768 static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
1786 reg = gsi_reg(gsi, CNTXT_GLOB_IRQ_EN);
1788 iowrite32(val, gsi->virt + reg_offset(reg));
1791 reg = gsi_reg(gsi, CNTXT_SCRATCH_0);
1793 val = ioread32(gsi->virt + offset);
1796 iowrite32(val, gsi->virt + offset);
1799 reg = gsi_reg(gsi, GENERIC_CMD);
1803 if (gsi->version >= IPA_VERSION_4_11)
1806 timeout = !gsi_command(gsi, reg_offset(reg), val);
1809 reg = gsi_reg(gsi, CNTXT_GLOB_IRQ_EN);
1810 iowrite32(ERROR_INT, gsi->virt + reg_offset(reg));
1813 return gsi->result;
1815 dev_err(gsi->dev, "GSI generic command %u to channel %u timed out\n",
1821 static int gsi_modem_channel_alloc(struct gsi *gsi, u32 channel_id)
1823 return gsi_generic_command(gsi, channel_id,
1827 static void gsi_modem_channel_halt(struct gsi *gsi, u32 channel_id)
1833 ret = gsi_generic_command(gsi, channel_id,
1838 dev_err(gsi->dev, "error %d halting modem channel %u\n",
1844 gsi_modem_channel_flow_control(struct gsi *gsi, u32 channel_id, bool enable)
1855 if (!enable && gsi->version >= IPA_VERSION_4_11)
1859 ret = gsi_generic_command(gsi, channel_id, command, 0);
1863 dev_err(gsi->dev,
1869 static int gsi_channel_setup(struct gsi *gsi)
1875 gsi_irq_enable(gsi);
1877 mutex_lock(&gsi->mutex);
1880 ret = gsi_channel_setup_one(gsi, channel_id);
1883 } while (++channel_id < gsi->channel_count);
1887 struct gsi_channel *channel = &gsi->channel[channel_id++];
1893 dev_err(gsi->dev, "channel %u not supported by hardware\n",
1895 channel_id = gsi->channel_count;
1900 mask = gsi->modem_channel_bitmap;
1904 ret = gsi_modem_channel_alloc(gsi, modem_channel_id);
1912 mutex_unlock(&gsi->mutex);
1918 mask ^= gsi->modem_channel_bitmap;
1924 gsi_modem_channel_halt(gsi, channel_id);
1929 gsi_channel_teardown_one(gsi, channel_id);
1931 mutex_unlock(&gsi->mutex);
1933 gsi_irq_disable(gsi);
1939 static void gsi_channel_teardown(struct gsi *gsi)
1941 u32 mask = gsi->modem_channel_bitmap;
1944 mutex_lock(&gsi->mutex);
1951 gsi_modem_channel_halt(gsi, channel_id);
1954 channel_id = gsi->channel_count - 1;
1956 gsi_channel_teardown_one(gsi, channel_id);
1959 mutex_unlock(&gsi->mutex);
1961 gsi_irq_disable(gsi);
1965 static int gsi_irq_setup(struct gsi *gsi)
1971 reg = gsi_reg(gsi, CNTXT_INTSET);
1972 iowrite32(reg_bit(reg, INTYPE), gsi->virt + reg_offset(reg));
1975 gsi_irq_type_update(gsi, 0);
1978 reg = gsi_reg(gsi, CNTXT_SRC_CH_IRQ_MSK);
1979 iowrite32(0, gsi->virt + reg_offset(reg));
1981 reg = gsi_reg(gsi, CNTXT_SRC_EV_CH_IRQ_MSK);
1982 iowrite32(0, gsi->virt + reg_offset(reg));
1984 reg = gsi_reg(gsi, CNTXT_GLOB_IRQ_EN);
1985 iowrite32(0, gsi->virt + reg_offset(reg));
1987 reg = gsi_reg(gsi, CNTXT_SRC_IEOB_IRQ_MSK);
1988 iowrite32(0, gsi->virt + reg_offset(reg));
1991 if (gsi->version > IPA_VERSION_3_1) {
1992 reg = gsi_reg(gsi, INTER_EE_SRC_CH_IRQ_MSK);
1993 iowrite32(0, gsi->virt + reg_offset(reg));
1995 reg = gsi_reg(gsi, INTER_EE_SRC_EV_CH_IRQ_MSK);
1996 iowrite32(0, gsi->virt + reg_offset(reg));
1999 reg = gsi_reg(gsi, CNTXT_GSI_IRQ_EN);
2000 iowrite32(0, gsi->virt + reg_offset(reg));
2002 ret = request_irq(gsi->irq, gsi_isr, 0, "gsi", gsi);
2004 dev_err(gsi->dev, "error %d requesting \"gsi\" IRQ\n", ret);
2009 static void gsi_irq_teardown(struct gsi *gsi)
2011 free_irq(gsi->irq, gsi);
2015 static int gsi_ring_setup(struct gsi *gsi)
2017 struct device *dev = gsi->dev;
2022 if (gsi->version < IPA_VERSION_3_5_1) {
2024 gsi->channel_count = GSI_CHANNEL_COUNT_MAX;
2025 gsi->evt_ring_count = GSI_EVT_RING_COUNT_MAX;
2030 reg = gsi_reg(gsi, HW_PARAM_2);
2031 val = ioread32(gsi->virt + reg_offset(reg));
2043 gsi->channel_count = count;
2045 if (gsi->version < IPA_VERSION_5_0) {
2048 reg = gsi_reg(gsi, HW_PARAM_4);
2061 gsi->evt_ring_count = count;
2067 int gsi_setup(struct gsi *gsi)
2074 reg = gsi_reg(gsi, GSI_STATUS);
2075 val = ioread32(gsi->virt + reg_offset(reg));
2077 dev_err(gsi->dev, "GSI has not been enabled\n");
2081 ret = gsi_irq_setup(gsi);
2085 ret = gsi_ring_setup(gsi); /* No matching teardown required */
2090 reg = gsi_reg(gsi, ERROR_LOG);
2091 iowrite32(0, gsi->virt + reg_offset(reg));
2093 ret = gsi_channel_setup(gsi);
2100 gsi_irq_teardown(gsi);
2106 void gsi_teardown(struct gsi *gsi)
2108 gsi_channel_teardown(gsi);
2109 gsi_irq_teardown(gsi);
2115 struct gsi *gsi = channel->gsi;
2119 ret = gsi_evt_ring_id_alloc(gsi);
2124 evt_ring = &gsi->evt_ring[channel->evt_ring_id];
2127 ret = gsi_ring_alloc(gsi, &evt_ring->ring, channel->event_count);
2131 dev_err(gsi->dev, "error %d allocating channel %u event ring\n",
2134 gsi_evt_ring_id_free(gsi, channel->evt_ring_id);
2143 struct gsi *gsi = channel->gsi;
2146 evt_ring = &gsi->evt_ring[evt_ring_id];
2147 gsi_ring_free(gsi, &evt_ring->ring);
2148 gsi_evt_ring_id_free(gsi, evt_ring_id);
2151 static bool gsi_channel_data_valid(struct gsi *gsi, bool command,
2156 struct device *dev = gsi->dev;
2219 static int gsi_channel_init_one(struct gsi *gsi,
2227 if (!gsi_channel_data_valid(gsi, command, data))
2233 dev_warn(gsi->dev, "channel %u limited to %u TREs\n",
2239 channel = &gsi->channel[data->channel_id];
2242 channel->gsi = gsi;
2253 ret = gsi_ring_alloc(gsi, &channel->tre_ring, data->channel.tre_count);
2255 dev_err(gsi->dev, "error %d allocating channel %u ring\n",
2260 ret = gsi_channel_trans_init(gsi, data->channel_id);
2265 u32 tre_max = gsi_channel_tre_max(gsi, data->channel_id);
2274 gsi_ring_free(gsi, &channel->tre_ring);
2278 channel->gsi = NULL; /* Mark it not (fully) initialized */
2292 gsi_ring_free(channel->gsi, &channel->tre_ring);
2297 static int gsi_channel_init(struct gsi *gsi, u32 count,
2305 modem_alloc = gsi->version == IPA_VERSION_4_2;
2307 gsi->event_bitmap = gsi_event_bitmap_init(GSI_EVT_RING_COUNT_MAX);
2308 gsi->ieob_enabled_bitmap = 0;
2320 gsi->modem_channel_bitmap |=
2325 ret = gsi_channel_init_one(gsi, &data[i], command);
2337 gsi->modem_channel_bitmap &= ~BIT(data[i].channel_id);
2340 gsi_channel_exit_one(&gsi->channel[data->channel_id]);
2347 static void gsi_channel_exit(struct gsi *gsi)
2352 gsi_channel_exit_one(&gsi->channel[channel_id]);
2354 gsi->modem_channel_bitmap = 0;
2358 int gsi_init(struct gsi *gsi, struct platform_device *pdev,
2366 gsi->dev = &pdev->dev;
2367 gsi->version = version;
2372 init_dummy_netdev(&gsi->dummy_dev);
2373 init_completion(&gsi->completion);
2375 ret = gsi_reg_init(gsi, pdev);
2379 ret = gsi_irq_init(gsi, pdev); /* No matching exit required */
2383 ret = gsi_channel_init(gsi, count, data);
2387 mutex_init(&gsi->mutex);
2392 gsi_reg_exit(gsi);
2398 void gsi_exit(struct gsi *gsi)
2400 mutex_destroy(&gsi->mutex);
2401 gsi_channel_exit(gsi);
2402 gsi_reg_exit(gsi);
2425 u32 gsi_channel_tre_max(struct gsi *gsi, u32 channel_id)
2427 struct gsi_channel *channel = &gsi->channel[channel_id];