Lines Matching refs:ipa

16 #include "ipa.h"
125 static u32 ipa_status_extract(struct ipa *ipa, const void *data,
128 enum ipa_version version = ipa->version;
231 static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
237 struct device *dev = ipa->dev;
305 reg = ipa_reg(ipa, ENDP_INIT_AGGR);
319 if (ipa->version >= IPA_VERSION_4_5) {
387 static u32 ipa_endpoint_max(struct ipa *ipa, u32 count,
391 struct device *dev = ipa->dev;
421 if (!ipa_endpoint_data_valid_one(ipa, count, data, dp))
433 struct gsi *gsi = &endpoint->ipa->gsi;
449 struct ipa *ipa = endpoint->ipa;
458 WARN_ON(ipa->version >= IPA_VERSION_4_2);
460 WARN_ON(ipa->version >= IPA_VERSION_4_0);
462 reg = ipa_reg(ipa, ENDP_INIT_CTRL);
464 val = ioread32(ipa->reg_virt + offset);
474 iowrite32(val, ipa->reg_virt + offset);
485 WARN_ON(endpoint->ipa->version >= IPA_VERSION_4_2);
494 struct ipa *ipa = endpoint->ipa;
499 WARN_ON(!test_bit(endpoint_id, ipa->available));
501 reg = ipa_reg(ipa, STATE_AGGR_ACTIVE);
502 val = ioread32(ipa->reg_virt + reg_n_offset(reg, unit));
511 struct ipa *ipa = endpoint->ipa;
515 WARN_ON(!test_bit(endpoint_id, ipa->available));
517 reg = ipa_reg(ipa, AGGR_FORCE_CLOSE);
518 iowrite32(mask, ipa->reg_virt + reg_n_offset(reg, unit));
532 struct ipa *ipa = endpoint->ipa;
544 ipa_interrupt_simulate_suspend(ipa->interrupt);
553 if (endpoint->ipa->version >= IPA_VERSION_4_0)
575 void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable)
579 while (endpoint_id < ipa->endpoint_count) {
580 struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id++];
587 else if (ipa->version < IPA_VERSION_4_2)
590 gsi_modem_channel_flow_control(&ipa->gsi,
597 int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
606 count = ipa->modem_tx_count + ipa_cmd_pipeline_clear_count();
607 trans = ipa_cmd_trans_alloc(ipa, count);
609 dev_err(ipa->dev,
614 for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count) {
620 endpoint = &ipa->endpoint[endpoint_id];
624 reg = ipa_reg(ipa, ENDP_STATUS);
638 ipa_cmd_pipeline_clear_wait(ipa);
646 struct ipa *ipa = endpoint->ipa;
651 reg = ipa_reg(ipa, ENDP_INIT_CFG);
654 enum ipa_version version = ipa->version;
677 iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
683 struct ipa *ipa = endpoint->ipa;
690 reg = ipa_reg(ipa, ENDP_INIT_NAT);
693 iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
785 struct ipa *ipa = endpoint->ipa;
789 reg = ipa_reg(ipa, ENDP_INIT_HDR);
791 enum ipa_version version = ipa->version;
823 iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
830 struct ipa *ipa = endpoint->ipa;
834 reg = ipa_reg(ipa, ENDP_INIT_HDR_EXT);
861 if (ipa->version >= IPA_VERSION_4_5) {
875 iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
881 struct ipa *ipa = endpoint->ipa;
889 reg = ipa_reg(ipa, ENDP_INIT_HDR_METADATA_MASK);
896 iowrite32(val, ipa->reg_virt + offset);
901 struct ipa *ipa = endpoint->ipa;
909 reg = ipa_reg(ipa, ENDP_INIT_MODE);
912 u32 dma_endpoint_id = ipa->name_map[name]->endpoint_id;
922 iowrite32(val, ipa->reg_virt + offset);
937 ipa_qtime_val(struct ipa *ipa, u32 microseconds, u32 max, u32 *select)
953 if (ipa->version >= IPA_VERSION_5_0) {
966 static u32 aggr_time_limit_encode(struct ipa *ipa, const struct reg *reg,
976 if (ipa->version >= IPA_VERSION_4_5) {
979 ticks = ipa_qtime_val(ipa, microseconds, max, &select);
996 struct ipa *ipa = endpoint->ipa;
1000 reg = ipa_reg(ipa, ENDP_INIT_AGGR);
1017 val |= aggr_time_limit_encode(ipa, reg, limit);
1035 iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
1046 static u32 hol_block_timer_encode(struct ipa *ipa, const struct reg *reg,
1059 if (ipa->version >= IPA_VERSION_4_5) {
1064 ticks = ipa_qtime_val(ipa, microseconds, max, &select);
1071 rate = ipa_core_clock_rate(ipa);
1078 if (ipa->version < IPA_VERSION_4_2)
1111 struct ipa *ipa = endpoint->ipa;
1116 reg = ipa_reg(ipa, ENDP_INIT_HOL_BLOCK_TIMER);
1117 val = hol_block_timer_encode(ipa, reg, microseconds);
1119 iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
1126 struct ipa *ipa = endpoint->ipa;
1131 reg = ipa_reg(ipa, ENDP_INIT_HOL_BLOCK_EN);
1135 iowrite32(val, ipa->reg_virt + offset);
1138 if (enable && ipa->version >= IPA_VERSION_4_5)
1139 iowrite32(val, ipa->reg_virt + offset);
1155 void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa)
1159 while (endpoint_id < ipa->endpoint_count) {
1160 struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id++];
1173 struct ipa *ipa = endpoint->ipa;
1180 reg = ipa_reg(ipa, ENDP_INIT_DEAGGR);
1186 iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
1193 struct ipa *ipa = endpoint->ipa;
1197 reg = ipa_reg(ipa, ENDP_INIT_RSRC_GRP);
1200 iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
1206 struct ipa *ipa = endpoint->ipa;
1213 reg = ipa_reg(ipa, ENDP_INIT_SEQ);
1219 if (ipa->version < IPA_VERSION_4_5)
1223 iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
1272 struct ipa *ipa = endpoint->ipa;
1276 reg = ipa_reg(ipa, ENDP_STATUS);
1284 status_endpoint_id = ipa->name_map[name]->endpoint_id;
1294 iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
1372 if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id))
1382 if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id))
1464 struct ipa *ipa = endpoint->ipa;
1468 opcode = ipa_status_extract(ipa, data, STATUS_OPCODE);
1472 endpoint_id = ipa_status_extract(ipa, data, STATUS_DST_ENDPOINT);
1484 struct ipa *ipa = endpoint->ipa;
1487 status_mask = ipa_status_extract(ipa, data, STATUS_MASK);
1496 endpoint_id = ipa_status_extract(ipa, data, STATUS_SRC_ENDPOINT);
1497 command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
1499 complete(&ipa->completion);
1501 dev_err(ipa->dev, "unexpected tagged packet from endpoint %u\n",
1513 struct ipa *ipa = endpoint->ipa;
1521 exception = ipa_status_extract(ipa, data, STATUS_EXCEPTION);
1526 rule = ipa_status_extract(ipa, data, STATUS_ROUTER_RULE_INDEX);
1537 struct ipa *ipa = endpoint->ipa;
1538 struct device *dev = ipa->dev;
1554 length = ipa_status_extract(ipa, data, STATUS_LENGTH);
1619 struct ipa *ipa = endpoint->ipa;
1622 if (endpoint != ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]) {
1636 void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id)
1641 reg = ipa_reg(ipa, ROUTE);
1649 iowrite32(val, ipa->reg_virt + reg_offset(reg));
1652 void ipa_endpoint_default_route_clear(struct ipa *ipa)
1654 ipa_endpoint_default_route_set(ipa, 0);
1669 struct ipa *ipa = endpoint->ipa;
1670 struct device *dev = ipa->dev;
1671 struct gsi *gsi = &ipa->gsi;
1756 struct ipa *ipa = endpoint->ipa;
1764 special = ipa->version < IPA_VERSION_4_0 && !endpoint->toward_ipa &&
1769 gsi_channel_reset(&ipa->gsi, channel_id, true);
1772 dev_err(ipa->dev,
1786 if (endpoint->ipa->version < IPA_VERSION_4_2)
1814 struct ipa *ipa = endpoint->ipa;
1815 struct gsi *gsi = &ipa->gsi;
1820 dev_err(ipa->dev,
1828 ipa_interrupt_suspend_enable(ipa->interrupt, endpoint_id);
1832 __set_bit(endpoint_id, ipa->enabled);
1840 struct ipa *ipa = endpoint->ipa;
1841 struct gsi *gsi = &ipa->gsi;
1844 if (!test_bit(endpoint_id, ipa->enabled))
1847 __clear_bit(endpoint_id, endpoint->ipa->enabled);
1851 ipa_interrupt_suspend_disable(ipa->interrupt, endpoint_id);
1857 dev_err(ipa->dev, "error %d attempting to stop endpoint %u\n",
1863 struct device *dev = endpoint->ipa->dev;
1864 struct gsi *gsi = &endpoint->ipa->gsi;
1867 if (!test_bit(endpoint->endpoint_id, endpoint->ipa->enabled))
1883 struct device *dev = endpoint->ipa->dev;
1884 struct gsi *gsi = &endpoint->ipa->gsi;
1887 if (!test_bit(endpoint->endpoint_id, endpoint->ipa->enabled))
1901 void ipa_endpoint_suspend(struct ipa *ipa)
1903 if (!ipa->setup_complete)
1906 if (ipa->modem_netdev)
1907 ipa_modem_suspend(ipa->modem_netdev);
1909 ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
1910 ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
1913 void ipa_endpoint_resume(struct ipa *ipa)
1915 if (!ipa->setup_complete)
1918 ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
1919 ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
1921 if (ipa->modem_netdev)
1922 ipa_modem_resume(ipa->modem_netdev);
1927 struct gsi *gsi = &endpoint->ipa->gsi;
1947 __set_bit(endpoint->endpoint_id, endpoint->ipa->set_up);
1952 __clear_bit(endpoint->endpoint_id, endpoint->ipa->set_up);
1960 void ipa_endpoint_setup(struct ipa *ipa)
1964 for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count)
1965 ipa_endpoint_setup_one(&ipa->endpoint[endpoint_id]);
1968 void ipa_endpoint_teardown(struct ipa *ipa)
1972 for_each_set_bit(endpoint_id, ipa->set_up, ipa->endpoint_count)
1973 ipa_endpoint_teardown_one(&ipa->endpoint[endpoint_id]);
1976 void ipa_endpoint_deconfig(struct ipa *ipa)
1978 ipa->available_count = 0;
1979 bitmap_free(ipa->available);
1980 ipa->available = NULL;
1983 int ipa_endpoint_config(struct ipa *ipa)
1985 struct device *dev = ipa->dev;
2005 if (ipa->version < IPA_VERSION_3_5) {
2006 ipa->available = bitmap_zalloc(IPA_ENDPOINT_MAX, GFP_KERNEL);
2007 if (!ipa->available)
2009 ipa->available_count = IPA_ENDPOINT_MAX;
2011 bitmap_set(ipa->available, 0, IPA_ENDPOINT_MAX);
2019 reg = ipa_reg(ipa, FLAVOR_0);
2020 val = ioread32(ipa->reg_virt + reg_offset(reg));
2035 hw_limit = ipa->version < IPA_VERSION_5_0 ? 32 : U8_MAX + 1;
2043 ipa->available = bitmap_zalloc(limit, GFP_KERNEL);
2044 if (!ipa->available)
2046 ipa->available_count = limit;
2049 bitmap_set(ipa->available, 0, tx_count);
2050 bitmap_set(ipa->available, rx_base, rx_count);
2052 for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count) {
2061 if (!test_bit(endpoint_id, ipa->available)) {
2068 endpoint = &ipa->endpoint[endpoint_id];
2083 ipa_endpoint_deconfig(ipa);
2088 static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name,
2093 endpoint = &ipa->endpoint[data->endpoint_id];
2096 ipa->channel_map[data->channel_id] = endpoint;
2097 ipa->name_map[name] = endpoint;
2099 endpoint->ipa = ipa;
2106 __set_bit(endpoint->endpoint_id, ipa->defined);
2111 __clear_bit(endpoint->endpoint_id, endpoint->ipa->defined);
2116 void ipa_endpoint_exit(struct ipa *ipa)
2120 ipa->filtered = 0;
2122 for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count)
2123 ipa_endpoint_exit_one(&ipa->endpoint[endpoint_id]);
2125 bitmap_free(ipa->enabled);
2126 ipa->enabled = NULL;
2127 bitmap_free(ipa->set_up);
2128 ipa->set_up = NULL;
2129 bitmap_free(ipa->defined);
2130 ipa->defined = NULL;
2132 memset(ipa->name_map, 0, sizeof(ipa->name_map));
2133 memset(ipa->channel_map, 0, sizeof(ipa->channel_map));
2137 int ipa_endpoint_init(struct ipa *ipa, u32 count,
2146 ipa->endpoint_count = ipa_endpoint_max(ipa, count, data) + 1;
2147 if (!ipa->endpoint_count)
2151 ipa->defined = bitmap_zalloc(ipa->endpoint_count, GFP_KERNEL);
2152 if (!ipa->defined)
2155 ipa->set_up = bitmap_zalloc(ipa->endpoint_count, GFP_KERNEL);
2156 if (!ipa->set_up)
2159 ipa->enabled = bitmap_zalloc(ipa->endpoint_count, GFP_KERNEL);
2160 if (!ipa->enabled)
2168 ipa_endpoint_init_one(ipa, name, data);
2173 ipa->modem_tx_count++;
2177 if (!ipa_filtered_valid(ipa, filtered)) {
2178 ipa_endpoint_exit(ipa);
2183 ipa->filtered = filtered;
2188 bitmap_free(ipa->set_up);
2189 ipa->set_up = NULL;
2191 bitmap_free(ipa->defined);
2192 ipa->defined = NULL;