Lines Matching defs:il

27 _il_poll_bit(struct il_priv *il, u32 addr, u32 bits, u32 mask, int timeout)
33 if ((_il_rd(il, addr) & mask) == (bits & mask))
66 _il_grab_nic_access(struct il_priv *il)
72 _il_set_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
92 _il_poll_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
96 val = _il_rd(il, CSR_GP_CNTRL);
99 _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI);
108 il_poll_bit(struct il_priv *il, u32 addr, u32 mask, int timeout)
114 if ((il_rd(il, addr) & mask) == mask)
125 il_rd_prph(struct il_priv *il, u32 reg)
130 spin_lock_irqsave(&il->reg_lock, reg_flags);
131 _il_grab_nic_access(il);
132 val = _il_rd_prph(il, reg);
133 _il_release_nic_access(il);
134 spin_unlock_irqrestore(&il->reg_lock, reg_flags);
140 il_wr_prph(struct il_priv *il, u32 addr, u32 val)
144 spin_lock_irqsave(&il->reg_lock, reg_flags);
145 if (likely(_il_grab_nic_access(il))) {
146 _il_wr_prph(il, addr, val);
147 _il_release_nic_access(il);
149 spin_unlock_irqrestore(&il->reg_lock, reg_flags);
154 il_read_targ_mem(struct il_priv *il, u32 addr)
159 spin_lock_irqsave(&il->reg_lock, reg_flags);
160 _il_grab_nic_access(il);
162 _il_wr(il, HBUS_TARG_MEM_RADDR, addr);
163 value = _il_rd(il, HBUS_TARG_MEM_RDAT);
165 _il_release_nic_access(il);
166 spin_unlock_irqrestore(&il->reg_lock, reg_flags);
172 il_write_targ_mem(struct il_priv *il, u32 addr, u32 val)
176 spin_lock_irqsave(&il->reg_lock, reg_flags);
177 if (likely(_il_grab_nic_access(il))) {
178 _il_wr(il, HBUS_TARG_MEM_WADDR, addr);
179 _il_wr(il, HBUS_TARG_MEM_WDAT, val);
180 _il_release_nic_access(il);
182 spin_unlock_irqrestore(&il->reg_lock, reg_flags);
241 il_generic_cmd_callback(struct il_priv *il, struct il_device_cmd *cmd,
264 il_send_cmd_async(struct il_priv *il, struct il_host_cmd *cmd)
277 if (test_bit(S_EXIT_PENDING, &il->status))
280 ret = il_enqueue_hcmd(il, cmd);
290 il_send_cmd_sync(struct il_priv *il, struct il_host_cmd *cmd)
295 lockdep_assert_held(&il->mutex);
305 set_bit(S_HCMD_ACTIVE, &il->status);
309 cmd_idx = il_enqueue_hcmd(il, cmd);
317 ret = wait_event_timeout(il->wait_command_queue,
318 !test_bit(S_HCMD_ACTIVE, &il->status),
321 if (test_bit(S_HCMD_ACTIVE, &il->status)) {
326 clear_bit(S_HCMD_ACTIVE, &il->status);
334 if (test_bit(S_RFKILL, &il->status)) {
340 if (test_bit(S_FW_ERROR, &il->status)) {
364 il->txq[il->cmd_queue].meta[cmd_idx].flags &= ~CMD_WANT_SKB;
368 il_free_pages(il, cmd->reply_page);
377 il_send_cmd(struct il_priv *il, struct il_host_cmd *cmd)
380 return il_send_cmd_async(il, cmd);
382 return il_send_cmd_sync(il, cmd);
387 il_send_cmd_pdu(struct il_priv *il, u8 id, u16 len, const void *data)
395 return il_send_cmd_sync(il, &cmd);
400 il_send_cmd_pdu_async(struct il_priv *il, u8 id, u16 len, const void *data,
401 void (*callback) (struct il_priv *il,
414 return il_send_cmd_async(il, &cmd);
462 il_blink_compensation(struct il_priv *il, u8 time, u16 compensation)
475 il_led_cmd(struct il_priv *il, unsigned long on, unsigned long off)
483 if (!test_bit(S_READY, &il->status))
486 if (il->blink_on == on && il->blink_off == off)
495 il->cfg->led_compensation);
497 il_blink_compensation(il, on,
498 il->cfg->led_compensation);
500 il_blink_compensation(il, off,
501 il->cfg->led_compensation);
503 ret = il->ops->send_led_cmd(il, &led_cmd);
505 il->blink_on = on;
506 il->blink_off = off;
515 struct il_priv *il = container_of(led_cdev, struct il_priv, led);
521 il_led_cmd(il, on, 0);
528 struct il_priv *il = container_of(led_cdev, struct il_priv, led);
530 return il_led_cmd(il, *delay_on, *delay_off);
534 il_leds_init(struct il_priv *il)
540 mode = il->cfg->led_mode;
542 il->led.name =
543 kasprintf(GFP_KERNEL, "%s-led", wiphy_name(il->hw->wiphy));
544 if (!il->led.name)
547 il->led.brightness_set = il_led_brightness_set;
548 il->led.blink_set = il_led_blink_set;
549 il->led.max_brightness = 1;
556 il->led.default_trigger =
557 ieee80211_create_tpt_led_trigger(il->hw,
563 il->led.default_trigger = ieee80211_get_radio_led_name(il->hw);
567 ret = led_classdev_register(&il->pci_dev->dev, &il->led);
569 kfree(il->led.name);
573 il->led_registered = true;
578 il_leds_exit(struct il_priv *il)
580 if (!il->led_registered)
583 led_classdev_unregister(&il->led);
584 kfree(il->led.name);
600 * information into il->channel_info_24/52 and il->channel_map_24/52
656 il_eeprom_verify_signature(struct il_priv *il)
658 u32 gp = _il_rd(il, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
675 il_eeprom_query_addr(const struct il_priv *il, size_t offset)
677 BUG_ON(offset >= il->cfg->eeprom_size);
678 return &il->eeprom[offset];
683 il_eeprom_query16(const struct il_priv *il, size_t offset)
685 if (!il->eeprom)
687 return (u16) il->eeprom[offset] | ((u16) il->eeprom[offset + 1] << 8);
694 * Load the EEPROM contents from adapter into il->eeprom
699 il_eeprom_init(struct il_priv *il)
702 u32 gp = _il_rd(il, CSR_EEPROM_GP);
708 sz = il->cfg->eeprom_size;
710 il->eeprom = kzalloc(sz, GFP_KERNEL);
711 if (!il->eeprom)
714 e = (__le16 *) il->eeprom;
716 il->ops->apm_init(il);
718 ret = il_eeprom_verify_signature(il);
726 ret = il->ops->eeprom_acquire_semaphore(il);
737 _il_wr(il, CSR_EEPROM_REG,
741 _il_poll_bit(il, CSR_EEPROM_REG,
749 r = _il_rd(il, CSR_EEPROM_REG);
754 il_eeprom_query16(il, EEPROM_VERSION));
758 il->ops->eeprom_release_semaphore(il);
762 il_eeprom_free(il);
764 il_apm_stop(il);
770 il_eeprom_free(struct il_priv *il)
772 kfree(il->eeprom);
773 il->eeprom = NULL;
778 il_init_band_reference(const struct il_priv *il, int eep_band,
783 u32 offset = il->cfg->regulatory_bands[eep_band - 1];
789 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
796 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
803 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
810 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
817 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
824 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
831 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
843 * il_mod_ht40_chan_info - Copy ht40 channel info into driver's il.
848 il_mod_ht40_chan_info(struct il_priv *il, enum nl80211_band band, u16 channel,
855 (struct il_channel_info *)il_get_channel_info(il, band, channel);
887 il_init_channel_map(struct il_priv *il)
895 if (il->channel_count) {
902 il->channel_count =
907 D_EEPROM("Parsing data for %d channels.\n", il->channel_count);
909 il->channel_info =
910 kcalloc(il->channel_count, sizeof(struct il_channel_info),
912 if (!il->channel_info) {
914 il->channel_count = 0;
918 ch_info = il->channel_info;
925 il_init_band_reference(il, band, &eeprom_ch_count,
985 if (il->cfg->regulatory_bands[5] == EEPROM_REGULATORY_BAND_NO_HT40 &&
986 il->cfg->regulatory_bands[6] == EEPROM_REGULATORY_BAND_NO_HT40)
993 il_init_band_reference(il, band, &eeprom_ch_count,
1003 il_mod_ht40_chan_info(il, ieeeband, eeprom_ch_idx[ch],
1008 il_mod_ht40_chan_info(il, ieeeband,
1023 il_free_channel_map(struct il_priv *il)
1025 kfree(il->channel_info);
1026 il->channel_count = 0;
1036 il_get_channel_info(const struct il_priv *il, enum nl80211_band band,
1043 for (i = 14; i < il->channel_count; i++) {
1044 if (il->channel_info[i].channel == channel)
1045 return &il->channel_info[i];
1050 return &il->channel_info[channel - 1];
1076 il_build_powertable_cmd(struct il_priv *il, struct il_powertable_cmd *cmd)
1089 if (il->power_data.pci_pm)
1093 if (il->power_data.ps_disabled)
1103 dtim_period = il->vif ? il->vif->bss_conf.dtim_period : 0;
1143 il_set_power(struct il_priv *il, struct il_powertable_cmd *cmd)
1156 return il_send_cmd_pdu(il, C_POWER_TBL,
1161 il_power_set_mode(struct il_priv *il, struct il_powertable_cmd *cmd, bool force)
1166 lockdep_assert_held(&il->mutex);
1169 update_chains = il->chain_noise_data.state == IL_CHAIN_NOISE_DONE ||
1170 il->chain_noise_data.state == IL_CHAIN_NOISE_ALIVE;
1172 if (!memcmp(&il->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force)
1175 if (!il_is_ready_rf(il))
1179 memcpy(&il->power_data.sleep_cmd_next, cmd, sizeof(*cmd));
1180 if (test_bit(S_SCANNING, &il->status) && !force) {
1186 set_bit(S_POWER_PMI, &il->status);
1188 ret = il_set_power(il, cmd);
1191 clear_bit(S_POWER_PMI, &il->status);
1193 if (il->ops->update_chain_flags && update_chains)
1194 il->ops->update_chain_flags(il);
1195 else if (il->ops->update_chain_flags)
1198 il->chain_noise_data.state);
1200 memcpy(&il->power_data.sleep_cmd, cmd, sizeof(*cmd));
1208 il_power_update_mode(struct il_priv *il, bool force)
1212 il_build_powertable_cmd(il, &cmd);
1214 return il_power_set_mode(il, &cmd, force);
1220 il_power_initialize(struct il_priv *il)
1224 pcie_capability_read_word(il->pci_dev, PCI_EXP_LNKCTL, &lctl);
1225 il->power_data.pci_pm = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
1227 il->power_data.debug_sleep_level_override = -1;
1229 memset(&il->power_data.sleep_cmd, 0, sizeof(il->power_data.sleep_cmd));
1251 il_send_scan_abort(struct il_priv *il)
1263 if (!test_bit(S_READY, &il->status) ||
1264 !test_bit(S_GEO_CONFIGURED, &il->status) ||
1265 !test_bit(S_SCAN_HW, &il->status) ||
1266 test_bit(S_FW_ERROR, &il->status) ||
1267 test_bit(S_EXIT_PENDING, &il->status))
1270 ret = il_send_cmd_sync(il, &cmd);
1286 il_free_pages(il, cmd.reply_page);
1291 il_complete_scan(struct il_priv *il, bool aborted)
1298 if (il->scan_request) {
1300 ieee80211_scan_completed(il->hw, &info);
1303 il->scan_vif = NULL;
1304 il->scan_request = NULL;
1308 il_force_scan_end(struct il_priv *il)
1310 lockdep_assert_held(&il->mutex);
1312 if (!test_bit(S_SCANNING, &il->status)) {
1318 clear_bit(S_SCANNING, &il->status);
1319 clear_bit(S_SCAN_HW, &il->status);
1320 clear_bit(S_SCAN_ABORTING, &il->status);
1321 il_complete_scan(il, true);
1325 il_do_scan_abort(struct il_priv *il)
1329 lockdep_assert_held(&il->mutex);
1331 if (!test_bit(S_SCANNING, &il->status)) {
1336 if (test_and_set_bit(S_SCAN_ABORTING, &il->status)) {
1341 ret = il_send_scan_abort(il);
1344 il_force_scan_end(il);
1353 il_scan_cancel(struct il_priv *il)
1356 queue_work(il->workqueue, &il->abort_scan);
1367 il_scan_cancel_timeout(struct il_priv *il, unsigned long ms)
1371 lockdep_assert_held(&il->mutex);
1375 il_do_scan_abort(il);
1378 if (!test_bit(S_SCAN_HW, &il->status))
1383 return test_bit(S_SCAN_HW, &il->status);
1389 il_hdl_scan(struct il_priv *il, struct il_rx_buf *rxb)
1402 il_hdl_scan_start(struct il_priv *il, struct il_rx_buf *rxb)
1407 il->scan_start_tsf = le32_to_cpu(notif->tsf_low);
1416 il_hdl_scan_results(struct il_priv *il, struct il_rx_buf *rxb)
1427 le32_to_cpu(notif->tsf_low) - il->scan_start_tsf);
1433 il_hdl_scan_complete(struct il_priv *il, struct il_rx_buf *rxb)
1444 clear_bit(S_SCAN_HW, &il->status);
1447 (il->scan_band == NL80211_BAND_2GHZ) ? "2.4" : "5.2",
1448 jiffies_to_msecs(jiffies - il->scan_start));
1450 queue_work(il->workqueue, &il->scan_completed);
1454 il_setup_rx_scan_handlers(struct il_priv *il)
1457 il->handlers[C_SCAN] = il_hdl_scan;
1458 il->handlers[N_SCAN_START] = il_hdl_scan_start;
1459 il->handlers[N_SCAN_RESULTS] = il_hdl_scan_results;
1460 il->handlers[N_SCAN_COMPLETE] = il_hdl_scan_complete;
1465 il_get_active_dwell_time(struct il_priv *il, enum nl80211_band band,
1478 il_get_passive_dwell_time(struct il_priv *il, enum nl80211_band band,
1489 if (il_is_any_associated(il)) {
1495 value = il->vif ? il->vif->bss_conf.beacon_int : 0;
1507 il_init_scan_params(struct il_priv *il)
1509 u8 ant_idx = fls(il->hw_params.valid_tx_ant) - 1;
1510 if (!il->scan_tx_ant[NL80211_BAND_5GHZ])
1511 il->scan_tx_ant[NL80211_BAND_5GHZ] = ant_idx;
1512 if (!il->scan_tx_ant[NL80211_BAND_2GHZ])
1513 il->scan_tx_ant[NL80211_BAND_2GHZ] = ant_idx;
1518 il_scan_initiate(struct il_priv *il, struct ieee80211_vif *vif)
1522 lockdep_assert_held(&il->mutex);
1524 cancel_delayed_work(&il->scan_check);
1526 if (!il_is_ready_rf(il)) {
1531 if (test_bit(S_SCAN_HW, &il->status)) {
1536 if (test_bit(S_SCAN_ABORTING, &il->status)) {
1543 set_bit(S_SCANNING, &il->status);
1544 il->scan_start = jiffies;
1546 ret = il->ops->request_scan(il, vif);
1548 clear_bit(S_SCANNING, &il->status);
1552 queue_delayed_work(il->workqueue, &il->scan_check,
1563 struct il_priv *il = hw->priv;
1571 mutex_lock(&il->mutex);
1574 if (test_bit(S_SCANNING, &il->status)) {
1581 il->scan_request = req;
1582 il->scan_vif = vif;
1583 il->scan_band = req->channels[0]->band;
1585 ret = il_scan_initiate(il, vif);
1589 mutex_unlock(&il->mutex);
1598 struct il_priv *il =
1606 mutex_lock(&il->mutex);
1607 il_force_scan_end(il);
1608 mutex_unlock(&il->mutex);
1615 il_fill_probe_req(struct il_priv *il, struct ieee80211_mgmt *frame,
1662 struct il_priv *il = container_of(work, struct il_priv, abort_scan);
1668 mutex_lock(&il->mutex);
1669 il_scan_cancel_timeout(il, 200);
1670 mutex_unlock(&il->mutex);
1676 struct il_priv *il = container_of(work, struct il_priv, scan_completed);
1681 cancel_delayed_work(&il->scan_check);
1683 mutex_lock(&il->mutex);
1685 aborted = test_and_clear_bit(S_SCAN_ABORTING, &il->status);
1689 if (!test_and_clear_bit(S_SCANNING, &il->status)) {
1694 il_complete_scan(il, aborted);
1698 if (!il_is_ready_rf(il))
1705 il_power_set_mode(il, &il->power_data.sleep_cmd_next, false);
1706 il_set_tx_power(il, il->tx_power_next, false);
1708 il->ops->post_scan(il);
1711 mutex_unlock(&il->mutex);
1715 il_setup_scan_deferred_work(struct il_priv *il)
1717 INIT_WORK(&il->scan_completed, il_bg_scan_completed);
1718 INIT_WORK(&il->abort_scan, il_bg_abort_scan);
1719 INIT_DELAYED_WORK(&il->scan_check, il_bg_scan_check);
1724 il_cancel_scan_deferred_work(struct il_priv *il)
1726 cancel_work_sync(&il->abort_scan);
1727 cancel_work_sync(&il->scan_completed);
1729 if (cancel_delayed_work_sync(&il->scan_check)) {
1730 mutex_lock(&il->mutex);
1731 il_force_scan_end(il);
1732 mutex_unlock(&il->mutex);
1737 /* il->sta_lock must be held */
1739 il_sta_ucode_activate(struct il_priv *il, u8 sta_id)
1742 if (!(il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE))
1744 sta_id, il->stations[sta_id].sta.sta.addr);
1746 if (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) {
1749 il->stations[sta_id].sta.sta.addr);
1751 il->stations[sta_id].used |= IL_STA_UCODE_ACTIVE;
1753 il->stations[sta_id].sta.sta.addr);
1758 il_process_add_sta_resp(struct il_priv *il, struct il_addsta_cmd *addsta,
1772 spin_lock_irqsave(&il->sta_lock, flags);
1777 il_sta_ucode_activate(il, sta_id);
1797 il->stations[sta_id].sta.mode ==
1799 il->stations[sta_id].sta.sta.addr);
1810 il->stations[sta_id].sta.mode ==
1812 spin_unlock_irqrestore(&il->sta_lock, flags);
1818 il_add_sta_callback(struct il_priv *il, struct il_device_cmd *cmd,
1823 il_process_add_sta_resp(il, addsta, pkt, false);
1828 il_send_add_sta(struct il_priv *il, struct il_addsta_cmd *sta, u8 flags)
1850 cmd.len = il->ops->build_addsta_hcmd(sta, data);
1851 ret = il_send_cmd(il, &cmd);
1858 ret = il_process_add_sta_resp(il, sta, pkt, true);
1860 il_free_pages(il, cmd.reply_page);
1867 il_set_ht_add_station(struct il_priv *il, u8 idx, struct ieee80211_sta *sta)
1880 sta_flags = il->stations[idx].sta.station_flags;
1906 if (il_is_ht40_tx_allowed(il, &sta->deflink.ht_cap))
1911 il->stations[idx].sta.station_flags = sta_flags;
1922 il_prep_station(struct il_priv *il, const u8 *addr, bool is_ap,
1933 sta_id = il->hw_params.bcast_id;
1935 for (i = IL_STA_ID; i < il->hw_params.max_stations; i++) {
1936 if (ether_addr_equal(il->stations[i].sta.sta.addr,
1942 if (!il->stations[i].used &&
1959 if (il->stations[sta_id].used & IL_STA_UCODE_INPROGRESS) {
1964 if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) &&
1965 (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) &&
1966 ether_addr_equal(il->stations[sta_id].sta.sta.addr, addr)) {
1972 station = &il->stations[sta_id];
1975 il->num_stations++;
1989 il_set_ht_add_station(il, sta_id, sta);
1992 rate = (il->band == NL80211_BAND_5GHZ) ? RATE_6M_PLCP : RATE_1M_PLCP;
2007 il_add_station_common(struct il_priv *il, const u8 *addr, bool is_ap,
2016 spin_lock_irqsave(&il->sta_lock, flags_spin);
2017 sta_id = il_prep_station(il, addr, is_ap, sta);
2020 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2029 if (il->stations[sta_id].used & IL_STA_UCODE_INPROGRESS) {
2031 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2035 if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) &&
2036 (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE)) {
2039 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2043 il->stations[sta_id].used |= IL_STA_UCODE_INPROGRESS;
2044 memcpy(&sta_cmd, &il->stations[sta_id].sta,
2046 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2049 ret = il_send_add_sta(il, &sta_cmd, CMD_SYNC);
2051 spin_lock_irqsave(&il->sta_lock, flags_spin);
2053 il->stations[sta_id].sta.sta.addr);
2054 il->stations[sta_id].used &= ~IL_STA_DRIVER_ACTIVE;
2055 il->stations[sta_id].used &= ~IL_STA_UCODE_INPROGRESS;
2056 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2066 * il->sta_lock must be held
2069 il_sta_ucode_deactivate(struct il_priv *il, u8 sta_id)
2072 if ((il->stations[sta_id].
2077 il->stations[sta_id].used &= ~IL_STA_UCODE_ACTIVE;
2079 memset(&il->stations[sta_id], 0, sizeof(struct il_station_entry));
2084 il_send_remove_station(struct il_priv *il, const u8 * addr, int sta_id,
2106 ret = il_send_cmd(il, &cmd);
2121 spin_lock_irqsave(&il->sta_lock, flags_spin);
2122 il_sta_ucode_deactivate(il, sta_id);
2123 spin_unlock_irqrestore(&il->sta_lock,
2134 il_free_pages(il, cmd.reply_page);
2143 il_remove_station(struct il_priv *il, const u8 sta_id, const u8 * addr)
2147 if (!il_is_ready(il)) {
2163 spin_lock_irqsave(&il->sta_lock, flags);
2165 if (!(il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE)) {
2170 if (!(il->stations[sta_id].used & IL_STA_UCODE_ACTIVE)) {
2175 if (il->stations[sta_id].used & IL_STA_LOCAL) {
2176 kfree(il->stations[sta_id].lq);
2177 il->stations[sta_id].lq = NULL;
2180 il->stations[sta_id].used &= ~IL_STA_DRIVER_ACTIVE;
2182 il->num_stations--;
2184 BUG_ON(il->num_stations < 0);
2186 spin_unlock_irqrestore(&il->sta_lock, flags);
2188 return il_send_remove_station(il, addr, sta_id, false);
2190 spin_unlock_irqrestore(&il->sta_lock, flags);
2204 il_clear_ucode_stations(struct il_priv *il)
2212 spin_lock_irqsave(&il->sta_lock, flags_spin);
2213 for (i = 0; i < il->hw_params.max_stations; i++) {
2214 if (il->stations[i].used & IL_STA_UCODE_ACTIVE) {
2216 il->stations[i].used &= ~IL_STA_UCODE_ACTIVE;
2220 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2236 il_restore_stations(struct il_priv *il)
2246 if (!il_is_ready(il)) {
2252 spin_lock_irqsave(&il->sta_lock, flags_spin);
2253 for (i = 0; i < il->hw_params.max_stations; i++) {
2254 if ((il->stations[i].used & IL_STA_DRIVER_ACTIVE) &&
2255 !(il->stations[i].used & IL_STA_UCODE_ACTIVE)) {
2257 il->stations[i].sta.sta.addr);
2258 il->stations[i].sta.mode = 0;
2259 il->stations[i].used |= IL_STA_UCODE_INPROGRESS;
2264 for (i = 0; i < il->hw_params.max_stations; i++) {
2265 if ((il->stations[i].used & IL_STA_UCODE_INPROGRESS)) {
2266 memcpy(&sta_cmd, &il->stations[i].sta,
2269 if (il->stations[i].lq) {
2270 memcpy(&lq, il->stations[i].lq,
2274 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2275 ret = il_send_add_sta(il, &sta_cmd, CMD_SYNC);
2277 spin_lock_irqsave(&il->sta_lock, flags_spin);
2279 il->stations[i].sta.sta.addr);
2280 il->stations[i].used &= ~IL_STA_DRIVER_ACTIVE;
2281 il->stations[i].used &=
2283 spin_unlock_irqrestore(&il->sta_lock,
2291 il_send_lq_cmd(il, &lq, CMD_SYNC, true);
2292 spin_lock_irqsave(&il->sta_lock, flags_spin);
2293 il->stations[i].used &= ~IL_STA_UCODE_INPROGRESS;
2297 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2307 il_get_free_ucode_key_idx(struct il_priv *il)
2311 for (i = 0; i < il->sta_key_max_num; i++)
2312 if (!test_and_set_bit(i, &il->ucode_key_table))
2320 il_dealloc_bcast_stations(struct il_priv *il)
2325 spin_lock_irqsave(&il->sta_lock, flags);
2326 for (i = 0; i < il->hw_params.max_stations; i++) {
2327 if (!(il->stations[i].used & IL_STA_BCAST))
2330 il->stations[i].used &= ~IL_STA_UCODE_ACTIVE;
2331 il->num_stations--;
2332 BUG_ON(il->num_stations < 0);
2333 kfree(il->stations[i].lq);
2334 il->stations[i].lq = NULL;
2336 spin_unlock_irqrestore(&il->sta_lock, flags);
2342 il_dump_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq)
2354 il_dump_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq)
2371 il_is_lq_table_valid(struct il_priv *il, struct il_link_quality_cmd *lq)
2375 if (il->ht.enabled)
2378 D_INFO("Channel %u is not an HT channel\n", il->active.channel);
2399 il_send_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq,
2415 spin_lock_irqsave(&il->sta_lock, flags_spin);
2416 if (!(il->stations[lq->sta_id].used & IL_STA_DRIVER_ACTIVE)) {
2417 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2420 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2422 il_dump_lq_cmd(il, lq);
2425 if (il_is_lq_table_valid(il, lq))
2426 ret = il_send_cmd(il, &cmd);
2437 spin_lock_irqsave(&il->sta_lock, flags_spin);
2438 il->stations[lq->sta_id].used &= ~IL_STA_UCODE_INPROGRESS;
2439 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2449 struct il_priv *il = hw->priv;
2453 mutex_lock(&il->mutex);
2456 ret = il_remove_station(il, sta_common->sta_id, sta->addr);
2461 mutex_unlock(&il->mutex);
2555 il_rx_queue_update_write_ptr(struct il_priv *il, struct il_rx_queue *q)
2558 u32 rx_wrt_ptr_reg = il->hw_params.rx_wrt_ptr_reg;
2567 if (test_bit(S_POWER_PMI, &il->status)) {
2568 reg = _il_rd(il, CSR_UCODE_DRV_GP1);
2573 il_set_bit(il, CSR_GP_CNTRL,
2579 il_wr(il, rx_wrt_ptr_reg, q->write_actual);
2585 il_wr(il, rx_wrt_ptr_reg, q->write_actual);
2596 il_rx_queue_alloc(struct il_priv *il)
2598 struct il_rx_queue *rxq = &il->rxq;
2599 struct device *dev = &il->pci_dev->dev;
2630 dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
2638 il_hdl_spectrum_measurement(struct il_priv *il, struct il_rx_buf *rxb)
2648 memcpy(&il->measure_report, report, sizeof(*report));
2649 il->measurement_status |= MEASUREMENT_READY;
2657 il_set_decrypted_flag(struct il_priv *il, struct ieee80211_hdr *hdr,
2666 if (il->active.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK)
2710 il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq)
2719 if (test_bit(S_POWER_PMI, &il->status)) {
2723 reg = _il_rd(il, CSR_UCODE_DRV_GP1);
2728 il_set_bit(il, CSR_GP_CNTRL,
2733 il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8));
2741 _il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8));
2750 il_tx_queue_unmap(struct il_priv *il, int txq_id)
2752 struct il_tx_queue *txq = &il->txq[txq_id];
2759 il->ops->txq_free_tfd(il, txq);
2774 il_tx_queue_free(struct il_priv *il, int txq_id)
2776 struct il_tx_queue *txq = &il->txq[txq_id];
2777 struct device *dev = &il->pci_dev->dev;
2780 il_tx_queue_unmap(il, txq_id);
2790 dma_free_coherent(dev, il->hw_params.tfd_size * txq->q.n_bd,
2812 il_cmd_queue_unmap(struct il_priv *il)
2814 struct il_tx_queue *txq = &il->txq[il->cmd_queue];
2825 dma_unmap_single(&il->pci_dev->dev,
2837 dma_unmap_single(&il->pci_dev->dev,
2854 il_cmd_queue_free(struct il_priv *il)
2856 struct il_tx_queue *txq = &il->txq[il->cmd_queue];
2857 struct device *dev = &il->pci_dev->dev;
2860 il_cmd_queue_unmap(il);
2870 dma_free_coherent(dev, il->hw_params.tfd_size * txq->q.n_bd,
2930 il_queue_init(struct il_priv *il, struct il_queue *q, int slots, u32 id)
2964 il_tx_queue_alloc(struct il_priv *il, struct il_tx_queue *txq, u32 id)
2966 struct device *dev = &il->pci_dev->dev;
2967 size_t tfd_sz = il->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
2971 if (id != il->cmd_queue) {
3004 il_tx_queue_init(struct il_priv *il, u32 txq_id)
3008 struct il_tx_queue *txq = &il->txq[txq_id];
3018 if (txq_id == il->cmd_queue) {
3046 ret = il_tx_queue_alloc(il, txq, txq_id);
3061 il_queue_init(il, &txq->q, slots, txq_id);
3064 il->ops->txq_init(il, txq);
3081 il_tx_queue_reset(struct il_priv *il, u32 txq_id)
3084 struct il_tx_queue *txq = &il->txq[txq_id];
3086 if (txq_id == il->cmd_queue) {
3098 il_queue_init(il, &txq->q, slots, txq_id);
3101 il->ops->txq_init(il, txq);
3109 * @il: device ilate data point
3117 il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd)
3119 struct il_tx_queue *txq = &il->txq[il->cmd_queue];
3128 cmd->len = il->ops->get_hcmd_size(cmd->id, cmd->len);
3140 if (il_is_rfkill(il) || il_is_ctkill(il)) {
3142 il_is_rfkill(il) ? "RF" : "CT");
3146 spin_lock_irqsave(&il->hcmd_lock, flags);
3149 spin_unlock_irqrestore(&il->hcmd_lock, flags);
3152 queue_work(il->workqueue, &il->restart);
3161 spin_unlock_irqrestore(&il->hcmd_lock, flags);
3180 cpu_to_le16(QUEUE_TO_SEQ(il->cmd_queue) | IDX_TO_SEQ(q->write_ptr));
3192 q->write_ptr, idx, il->cmd_queue);
3199 idx, il->cmd_queue);
3203 phys_addr = dma_map_single(&il->pci_dev->dev, &out_cmd->hdr, fix_size,
3205 if (unlikely(dma_mapping_error(&il->pci_dev->dev, phys_addr))) {
3214 if (il->ops->txq_update_byte_cnt_tbl)
3216 il->ops->txq_update_byte_cnt_tbl(il, txq, 0);
3218 il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, fix_size, 1,
3223 il_txq_update_write_ptr(il, txq);
3226 spin_unlock_irqrestore(&il->hcmd_lock, flags);
3238 il_hcmd_queue_reclaim(struct il_priv *il, int txq_id, int idx, int cmd_idx)
3240 struct il_tx_queue *txq = &il->txq[txq_id];
3257 queue_work(il->workqueue, &il->restart);
3272 il_tx_cmd_complete(struct il_priv *il, struct il_rx_buf *rxb)
3282 struct il_tx_queue *txq = &il->txq[il->cmd_queue];
3289 (txq_id != il->cmd_queue,
3291 txq_id, il->cmd_queue, sequence, il->txq[il->cmd_queue].q.read_ptr,
3292 il->txq[il->cmd_queue].q.write_ptr)) {
3293 il_print_hex_error(il, pkt, 32);
3303 dma_unmap_single(&il->pci_dev->dev, dma_unmap_addr(meta, mapping),
3311 meta->callback(il, cmd, pkt);
3313 spin_lock_irqsave(&il->hcmd_lock, flags);
3315 il_hcmd_queue_reclaim(il, txq_id, idx, cmd_idx);
3318 clear_bit(S_HCMD_ACTIVE, &il->status);
3321 wake_up(&il->wait_command_queue);
3327 spin_unlock_irqrestore(&il->hcmd_lock, flags);
3365 il_init_ht_hw_capab(const struct il_priv *il,
3370 u8 rx_chains_num = il->hw_params.rx_chains_num;
3371 u8 tx_chains_num = il->hw_params.tx_chains_num;
3380 if (il->hw_params.ht40_channel & BIT(band)) {
3387 if (il->cfg->mod_params->amsdu_size_8K)
3418 il_init_geos(struct il_priv *il)
3428 if (il->bands[NL80211_BAND_2GHZ].n_bitrates ||
3429 il->bands[NL80211_BAND_5GHZ].n_bitrates) {
3431 set_bit(S_GEO_CONFIGURED, &il->status);
3436 kcalloc(il->channel_count, sizeof(struct ieee80211_channel),
3448 sband = &il->bands[NL80211_BAND_5GHZ];
3454 if (il->cfg->sku & IL_SKU_N)
3455 il_init_ht_hw_capab(il, &sband->ht_cap, NL80211_BAND_5GHZ);
3457 sband = &il->bands[NL80211_BAND_2GHZ];
3463 if (il->cfg->sku & IL_SKU_N)
3464 il_init_ht_hw_capab(il, &sband->ht_cap, NL80211_BAND_2GHZ);
3466 il->ieee_channels = channels;
3467 il->ieee_rates = rates;
3469 for (i = 0; i < il->channel_count; i++) {
3470 ch = &il->channel_info[i];
3475 sband = &il->bands[ch->band];
3511 il->tx_power_device_lmt = max_tx_power;
3512 il->tx_power_user_lmt = max_tx_power;
3513 il->tx_power_next = max_tx_power;
3515 if (il->bands[NL80211_BAND_5GHZ].n_channels == 0 &&
3516 (il->cfg->sku & IL_SKU_A)) {
3519 il->pci_dev->device, il->pci_dev->subsystem_device);
3520 il->cfg->sku &= ~IL_SKU_A;
3524 il->bands[NL80211_BAND_2GHZ].n_channels,
3525 il->bands[NL80211_BAND_5GHZ].n_channels);
3527 set_bit(S_GEO_CONFIGURED, &il->status);
3537 il_free_geos(struct il_priv *il)
3539 kfree(il->ieee_channels);
3540 kfree(il->ieee_rates);
3541 clear_bit(S_GEO_CONFIGURED, &il->status);
3546 il_is_channel_extension(struct il_priv *il, enum nl80211_band band,
3551 ch_info = il_get_channel_info(il, band, channel);
3566 il_is_ht40_tx_allowed(struct il_priv *il, struct ieee80211_sta_ht_cap *ht_cap)
3568 if (!il->ht.enabled || !il->ht.is_40mhz)
3579 if (il->disable_ht40)
3583 return il_is_channel_extension(il, il->band,
3584 le16_to_cpu(il->staging.channel),
3585 il->ht.extension_chan_offset);
3624 il_send_rxon_timing(struct il_priv *il)
3630 struct ieee80211_vif *vif = il->vif;
3632 conf = &il->hw->conf;
3634 lockdep_assert_held(&il->mutex);
3636 memset(&il->timing, 0, sizeof(struct il_rxon_time_cmd));
3638 il->timing.timestamp = cpu_to_le64(il->timestamp);
3639 il->timing.listen_interval = cpu_to_le16(conf->listen_interval);
3647 il->timing.atim_win = 0;
3651 il->hw_params.max_beacon_itrvl *
3653 il->timing.beacon_interval = cpu_to_le16(beacon_int);
3655 tsf = il->timestamp; /* tsf is modifed by do_div: copy it */
3658 il->timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
3660 il->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ? : 1) : 1;
3663 le16_to_cpu(il->timing.beacon_interval),
3664 le32_to_cpu(il->timing.beacon_init_val),
3665 le16_to_cpu(il->timing.atim_win));
3667 return il_send_cmd_pdu(il, C_RXON_TIMING, sizeof(il->timing),
3668 &il->timing);
3673 il_set_rxon_hwcrypto(struct il_priv *il, int hw_decrypt)
3675 struct il_rxon_cmd *rxon = &il->staging;
3687 il_check_rxon_cmd(struct il_priv *il)
3689 struct il_rxon_cmd *rxon = &il->staging;
3760 * @il: staging_rxon is compared to active_rxon
3767 il_full_rxon_required(struct il_priv *il)
3769 const struct il_rxon_cmd *staging = &il->staging;
3770 const struct il_rxon_cmd *active = &il->active;
3787 CHK(!il_is_associated(il));
3821 il_get_lowest_plcp(struct il_priv *il)
3827 if (il->staging.flags & RXON_FLG_BAND_24G_MSK)
3835 _il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf)
3837 struct il_rxon_cmd *rxon = &il->staging;
3839 if (!il->ht.enabled) {
3848 cpu_to_le32(il->ht.protection << RXON_FLG_HT_OPERATING_MODE_POS);
3855 if (il_is_ht40_tx_allowed(il, NULL)) {
3857 if (il->ht.protection == IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) {
3860 switch (il->ht.extension_chan_offset) {
3871 switch (il->ht.extension_chan_offset) {
3892 if (il->ops->set_rxon_chain)
3893 il->ops->set_rxon_chain(il);
3897 il->ht.protection, il->ht.extension_chan_offset);
3901 il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf)
3903 _il_set_rxon_ht(il, ht_conf);
3909 il_get_single_channel_number(struct il_priv *il, enum nl80211_band band)
3918 max = il->channel_count;
3925 channel = il->channel_info[i].channel;
3926 if (channel == le16_to_cpu(il->staging.channel))
3929 ch_info = il_get_channel_info(il, band, channel);
3946 il_set_rxon_channel(struct il_priv *il, struct ieee80211_channel *ch)
3951 if (le16_to_cpu(il->staging.channel) == channel && il->band == band)
3954 il->staging.channel = cpu_to_le16(channel);
3956 il->staging.flags &= ~RXON_FLG_BAND_24G_MSK;
3958 il->staging.flags |= RXON_FLG_BAND_24G_MSK;
3960 il->band = band;
3969 il_set_flags_for_band(struct il_priv *il, enum nl80211_band band,
3973 il->staging.flags &=
3976 il->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
3980 il->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
3982 il->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
3984 il->staging.flags |= RXON_FLG_BAND_24G_MSK;
3985 il->staging.flags |= RXON_FLG_AUTO_DETECT_MSK;
3986 il->staging.flags &= ~RXON_FLG_CCK_MSK;
3995 il_connection_init_rx_config(struct il_priv *il)
3999 memset(&il->staging, 0, sizeof(il->staging));
4001 switch (il->iw_mode) {
4003 il->staging.dev_type = RXON_DEV_TYPE_ESS;
4006 il->staging.dev_type = RXON_DEV_TYPE_ESS;
4007 il->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
4010 il->staging.dev_type = RXON_DEV_TYPE_IBSS;
4011 il->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
4012 il->staging.filter_flags =
4016 IL_ERR("Unsupported interface type %d\n", il->vif->type);
4023 if (!hw_to_local(il->hw)->short_preamble)
4024 il->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
4026 il->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
4030 il_get_channel_info(il, il->band, le16_to_cpu(il->active.channel));
4033 ch_info = &il->channel_info[0];
4035 il->staging.channel = cpu_to_le16(ch_info->channel);
4036 il->band = ch_info->band;
4038 il_set_flags_for_band(il, il->band, il->vif);
4040 il->staging.ofdm_basic_rates =
4042 il->staging.cck_basic_rates =
4046 il->staging.flags &=
4048 if (il->vif)
4049 memcpy(il->staging.node_addr, il->vif->addr, ETH_ALEN);
4051 il->staging.ofdm_ht_single_stream_basic_rates = 0xff;
4052 il->staging.ofdm_ht_dual_stream_basic_rates = 0xff;
4057 il_set_rate(struct il_priv *il)
4063 hw = il_get_hw_mode(il, il->band);
4069 il->active_rate = 0;
4074 il->active_rate |= (1 << rate->hw_value);
4077 D_RATE("Set active_rate = %0x\n", il->active_rate);
4079 il->staging.cck_basic_rates =
4082 il->staging.ofdm_basic_rates =
4088 il_chswitch_done(struct il_priv *il, bool is_success)
4090 if (test_bit(S_EXIT_PENDING, &il->status))
4093 if (test_and_clear_bit(S_CHANNEL_SWITCH_PENDING, &il->status))
4094 ieee80211_chswitch_done(il->vif, is_success, 0);
4099 il_hdl_csa(struct il_priv *il, struct il_rx_buf *rxb)
4103 struct il_rxon_cmd *rxon = (void *)&il->active;
4105 if (!test_bit(S_CHANNEL_SWITCH_PENDING, &il->status))
4108 if (!le32_to_cpu(csa->status) && csa->channel == il->switch_channel) {
4110 il->staging.channel = csa->channel;
4112 il_chswitch_done(il, true);
4116 il_chswitch_done(il, false);
4123 il_print_rx_config_cmd(struct il_priv *il)
4125 struct il_rxon_cmd *rxon = &il->staging;
4128 il_print_hex_dump(il, IL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
4145 il_irq_handle_error(struct il_priv *il)
4148 set_bit(S_FW_ERROR, &il->status);
4151 clear_bit(S_HCMD_ACTIVE, &il->status);
4153 IL_ERR("Loaded firmware version: %s\n", il->hw->wiphy->fw_version);
4155 il->ops->dump_nic_error_log(il);
4156 if (il->ops->dump_fh)
4157 il->ops->dump_fh(il, NULL, false);
4159 if (il_get_debug_level(il) & IL_DL_FW_ERRORS)
4160 il_print_rx_config_cmd(il);
4163 wake_up(&il->wait_command_queue);
4167 clear_bit(S_READY, &il->status);
4169 if (!test_bit(S_EXIT_PENDING, &il->status)) {
4173 if (il->cfg->mod_params->restart_fw)
4174 queue_work(il->workqueue, &il->restart);
4180 _il_apm_stop_master(struct il_priv *il)
4185 _il_set_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
4188 _il_poll_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_MASTER_DISABLED,
4199 _il_apm_stop(struct il_priv *il)
4201 lockdep_assert_held(&il->reg_lock);
4206 _il_apm_stop_master(il);
4209 _il_set_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
4217 _il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
4222 il_apm_stop(struct il_priv *il)
4226 spin_lock_irqsave(&il->reg_lock, flags);
4227 _il_apm_stop(il);
4228 spin_unlock_irqrestore(&il->reg_lock, flags);
4238 il_apm_init(struct il_priv *il)
4251 il_set_bit(il, CSR_GIO_CHICKEN_BITS,
4258 il_set_bit(il, CSR_GIO_CHICKEN_BITS,
4262 il_set_bit(il, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
4269 il_set_bit(il, CSR_HW_IF_CONFIG_REG,
4280 if (il->cfg->set_l0s) {
4281 ret = pcie_capability_read_word(il->pci_dev, PCI_EXP_LNKCTL, &lctl);
4284 il_set_bit(il, CSR_GIO_REG,
4289 il_clear_bit(il, CSR_GIO_REG,
4296 if (il->cfg->pll_cfg_val)
4297 il_set_bit(il, CSR_ANA_PLL_CFG,
4298 il->cfg->pll_cfg_val);
4304 il_set_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
4312 _il_poll_bit(il, CSR_GP_CNTRL,
4328 if (il->cfg->use_bsm)
4329 il_wr_prph(il, APMG_CLK_EN_REG,
4332 il_wr_prph(il, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
4336 il_set_bits_prph(il, APMG_PCIDEV_STT_REG,
4345 il_set_tx_power(struct il_priv *il, s8 tx_power, bool force)
4351 lockdep_assert_held(&il->mutex);
4353 if (il->tx_power_user_lmt == tx_power && !force)
4356 if (!il->ops->send_tx_power)
4365 if (tx_power > il->tx_power_device_lmt) {
4367 tx_power, il->tx_power_device_lmt);
4371 if (!il_is_ready_rf(il))
4376 il->tx_power_next = tx_power;
4379 defer = test_bit(S_SCANNING, &il->status) ||
4380 memcmp(&il->active, &il->staging, sizeof(il->staging));
4386 prev_tx_power = il->tx_power_user_lmt;
4387 il->tx_power_user_lmt = tx_power;
4389 ret = il->ops->send_tx_power(il);
4393 il->tx_power_user_lmt = prev_tx_power;
4394 il->tx_power_next = prev_tx_power;
4401 il_send_bt_config(struct il_priv *il)
4418 if (il_send_cmd_pdu(il, C_BT_CONFIG, sizeof(struct il_bt_cmd), &bt_cmd))
4424 il_send_stats_request(struct il_priv *il, u8 flags, bool clear)
4431 return il_send_cmd_pdu_async(il, C_STATS, sizeof(struct il_stats_cmd),
4434 return il_send_cmd_pdu(il, C_STATS, sizeof(struct il_stats_cmd),
4440 il_hdl_pm_sleep(struct il_priv *il, struct il_rx_buf *rxb)
4452 il_hdl_pm_debug_stats(struct il_priv *il, struct il_rx_buf *rxb)
4458 il_print_hex_dump(il, IL_DL_RADIO, pkt->u.raw, len);
4463 il_hdl_error(struct il_priv *il, struct il_rx_buf *rxb)
4478 il_clear_isr_stats(struct il_priv *il)
4480 memset(&il->isr_stats, 0, sizeof(il->isr_stats));
4488 struct il_priv *il = hw->priv;
4494 if (!il_is_ready_rf(il)) {
4506 spin_lock_irqsave(&il->lock, flags);
4508 il->qos_data.def_qos_parm.ac[q].cw_min =
4510 il->qos_data.def_qos_parm.ac[q].cw_max =
4512 il->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
4513 il->qos_data.def_qos_parm.ac[q].edca_txop =
4516 il->qos_data.def_qos_parm.ac[q].reserved1 = 0;
4518 spin_unlock_irqrestore(&il->lock, flags);
4528 struct il_priv *il = hw->priv;
4533 ret = (il->ibss_manager == IL_IBSS_MANAGER);
4541 il_set_mode(struct il_priv *il)
4543 il_connection_init_rx_config(il);
4545 if (il->ops->set_rxon_chain)
4546 il->ops->set_rxon_chain(il);
4548 return il_commit_rxon(il);
4554 struct il_priv *il = hw->priv;
4558 mutex_lock(&il->mutex);
4561 if (!il_is_ready_rf(il)) {
4571 reset = (il->vif == vif);
4572 if (il->vif && !reset) {
4577 il->vif = vif;
4578 il->iw_mode = vif->type;
4580 err = il_set_mode(il);
4584 il->vif = NULL;
4585 il->iw_mode = NL80211_IFTYPE_STATION;
4591 mutex_unlock(&il->mutex);
4598 il_teardown_interface(struct il_priv *il, struct ieee80211_vif *vif)
4600 lockdep_assert_held(&il->mutex);
4602 if (il->scan_vif == vif) {
4603 il_scan_cancel_timeout(il, 200);
4604 il_force_scan_end(il);
4607 il_set_mode(il);
4613 struct il_priv *il = hw->priv;
4615 mutex_lock(&il->mutex);
4618 WARN_ON(il->vif != vif);
4619 il->vif = NULL;
4620 il->iw_mode = NL80211_IFTYPE_UNSPECIFIED;
4621 il_teardown_interface(il, vif);
4622 eth_zero_addr(il->bssid);
4625 mutex_unlock(&il->mutex);
4630 il_alloc_txq_mem(struct il_priv *il)
4632 if (!il->txq)
4633 il->txq =
4634 kcalloc(il->cfg->num_of_queues,
4637 if (!il->txq) {
4646 il_free_txq_mem(struct il_priv *il)
4648 kfree(il->txq);
4649 il->txq = NULL;
4654 il_force_reset(struct il_priv *il, bool external)
4658 if (test_bit(S_EXIT_PENDING, &il->status))
4661 force_reset = &il->force_reset;
4684 if (!external && !il->cfg->mod_params->restart_fw) {
4693 set_bit(S_FW_ERROR, &il->status);
4694 wake_up(&il->wait_command_queue);
4699 clear_bit(S_READY, &il->status);
4700 queue_work(il->workqueue, &il->restart);
4710 struct il_priv *il = hw->priv;
4713 mutex_lock(&il->mutex);
4722 if (!il->vif || !il_is_ready_rf(il)) {
4734 il->iw_mode = newtype;
4735 il_teardown_interface(il, vif);
4740 mutex_unlock(&il->mutex);
4749 struct il_priv *il = hw->priv;
4753 mutex_lock(&il->mutex);
4756 if (il->txq == NULL)
4759 for (i = 0; i < il->hw_params.max_txq_num; i++) {
4762 if (i == il->cmd_queue)
4765 q = &il->txq[i].q;
4778 mutex_unlock(&il->mutex);
4787 il_check_stuck_queue(struct il_priv *il, int cnt)
4789 struct il_tx_queue *txq = &il->txq[cnt];
4802 msecs_to_jiffies(il->cfg->wd_timeout);
4807 ret = il_force_reset(il, false);
4827 struct il_priv *il = from_timer(il, t, watchdog);
4831 if (test_bit(S_EXIT_PENDING, &il->status))
4834 timeout = il->cfg->wd_timeout;
4839 if (il_check_stuck_queue(il, il->cmd_queue))
4843 for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) {
4845 if (cnt == il->cmd_queue)
4847 if (il_check_stuck_queue(il, cnt))
4851 mod_timer(&il->watchdog,
4857 il_setup_watchdog(struct il_priv *il)
4859 unsigned int timeout = il->cfg->wd_timeout;
4862 mod_timer(&il->watchdog,
4865 del_timer(&il->watchdog);
4876 il_usecs_to_beacons(struct il_priv *il, u32 usec, u32 beacon_interval)
4887 interval) & (il_beacon_time_mask_high(il,
4888 il->hw_params.
4889 beacon_time_tsf_bits) >> il->
4892 (usec % interval) & il_beacon_time_mask_low(il,
4893 il->hw_params.
4896 return (quot << il->hw_params.beacon_time_tsf_bits) + rem;
4904 il_add_beacon_time(struct il_priv *il, u32 base, u32 addon,
4907 u32 base_low = base & il_beacon_time_mask_low(il,
4908 il->hw_params.
4910 u32 addon_low = addon & il_beacon_time_mask_low(il,
4911 il->hw_params.
4914 u32 res = (base & il_beacon_time_mask_high(il,
4915 il->hw_params.
4917 (addon & il_beacon_time_mask_high(il,
4918 il->hw_params.
4925 res += (1 << il->hw_params.beacon_time_tsf_bits);
4927 res += (1 << il->hw_params.beacon_time_tsf_bits);
4938 struct il_priv *il = dev_get_drvdata(device);
4947 il_apm_stop(il);
4956 struct il_priv *il = pci_get_drvdata(pdev);
4965 il_enable_interrupts(il);
4967 if (!(_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
4971 set_bit(S_RFKILL, &il->status);
4973 clear_bit(S_RFKILL, &il->status);
4975 wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rfkill);
4986 il_update_qos(struct il_priv *il)
4988 if (test_bit(S_EXIT_PENDING, &il->status))
4991 il->qos_data.def_qos_parm.qos_flags = 0;
4993 if (il->qos_data.qos_active)
4994 il->qos_data.def_qos_parm.qos_flags |=
4997 if (il->ht.enabled)
4998 il->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
5001 il->qos_data.qos_active, il->qos_data.def_qos_parm.qos_flags);
5003 il_send_cmd_pdu_async(il, C_QOS_PARAM, sizeof(struct il_qosparam_cmd),
5004 &il->qos_data.def_qos_parm, NULL);
5013 struct il_priv *il = hw->priv;
5017 struct il_ht_config *ht_conf = &il->current_ht_config;
5024 mutex_lock(&il->mutex);
5028 if (unlikely(test_bit(S_SCANNING, &il->status))) {
5036 il->current_ht_config.smps = conf->smps_mode;
5045 if (il->ops->set_rxon_chain)
5046 il->ops->set_rxon_chain(il);
5058 ch_info = il_get_channel_info(il, channel->band, ch);
5065 if (il->iw_mode == NL80211_IFTYPE_ADHOC &&
5072 spin_lock_irqsave(&il->lock, flags);
5075 if (il->ht.enabled != conf_is_ht(conf)) {
5076 il->ht.enabled = conf_is_ht(conf);
5079 if (il->ht.enabled) {
5081 il->ht.extension_chan_offset =
5083 il->ht.is_40mhz = true;
5085 il->ht.extension_chan_offset =
5087 il->ht.is_40mhz = true;
5089 il->ht.extension_chan_offset =
5091 il->ht.is_40mhz = false;
5094 il->ht.is_40mhz = false;
5100 il->ht.protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
5105 if ((le16_to_cpu(il->staging.channel) != ch))
5106 il->staging.flags = 0;
5108 il_set_rxon_channel(il, channel);
5109 il_set_rxon_ht(il, ht_conf);
5111 il_set_flags_for_band(il, channel->band, il->vif);
5113 spin_unlock_irqrestore(&il->lock, flags);
5115 if (il->ops->update_bcast_stations)
5116 ret = il->ops->update_bcast_stations(il);
5122 il_set_rate(il);
5126 il->power_data.ps_disabled = !(conf->flags & IEEE80211_CONF_PS);
5127 if (!il->power_data.ps_disabled)
5129 ret = il_power_update_mode(il, false);
5135 D_MAC80211("TX Power old=%d new=%d\n", il->tx_power_user_lmt,
5138 il_set_tx_power(il, conf->power_level, false);
5141 if (!il_is_ready(il)) {
5149 if (memcmp(&il->active, &il->staging, sizeof(il->staging)))
5150 il_commit_rxon(il);
5154 il_update_qos(il);
5158 mutex_unlock(&il->mutex);
5167 struct il_priv *il = hw->priv;
5170 mutex_lock(&il->mutex);
5173 spin_lock_irqsave(&il->lock, flags);
5175 memset(&il->current_ht_config, 0, sizeof(struct il_ht_config));
5178 dev_consume_skb_irq(il->beacon_skb);
5179 il->beacon_skb = NULL;
5180 il->timestamp = 0;
5182 spin_unlock_irqrestore(&il->lock, flags);
5184 il_scan_cancel_timeout(il, 100);
5185 if (!il_is_ready_rf(il)) {
5187 mutex_unlock(&il->mutex);
5192 il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
5193 il_commit_rxon(il);
5195 il_set_rate(il);
5198 mutex_unlock(&il->mutex);
5203 il_ht_conf(struct il_priv *il, struct ieee80211_vif *vif)
5205 struct il_ht_config *ht_conf = &il->current_ht_config;
5211 if (!il->ht.enabled)
5214 il->ht.protection =
5216 il->ht.non_gf_sta_present =
5263 il_set_no_assoc(struct il_priv *il, struct ieee80211_vif *vif)
5270 il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
5271 il->staging.assoc_id = 0;
5272 il_commit_rxon(il);
5278 struct il_priv *il = hw->priv;
5288 lockdep_assert_held(&il->mutex);
5290 if (!il->beacon_enabled) {
5296 spin_lock_irqsave(&il->lock, flags);
5297 dev_consume_skb_irq(il->beacon_skb);
5298 il->beacon_skb = skb;
5301 il->timestamp = le64_to_cpu(timestamp);
5304 spin_unlock_irqrestore(&il->lock, flags);
5306 if (!il_is_ready_rf(il)) {
5311 il->ops->post_associate(il);
5318 struct il_priv *il = hw->priv;
5321 mutex_lock(&il->mutex);
5324 if (!il_is_alive(il)) {
5326 mutex_unlock(&il->mutex);
5333 spin_lock_irqsave(&il->lock, flags);
5334 il->qos_data.qos_active = bss_conf->qos;
5335 il_update_qos(il);
5336 spin_unlock_irqrestore(&il->lock, flags);
5342 il->beacon_enabled = true;
5344 il->beacon_enabled = false;
5359 il_wake_queues_by_reason(il, IL_STOP_REASON_PASSIVE);
5366 if (il_scan_cancel_timeout(il, 100)) {
5368 mutex_unlock(&il->mutex);
5373 memcpy(il->staging.bssid_addr, bss_conf->bssid, ETH_ALEN);
5376 memcpy(il->bssid, bss_conf->bssid, ETH_ALEN);
5390 il->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
5392 il->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
5397 if (bss_conf->use_cts_prot && il->band != NL80211_BAND_5GHZ)
5398 il->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
5400 il->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
5402 il->staging.flags |= RXON_FLG_SELF_CTS_EN;
5404 il->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
5414 il->staging.ofdm_basic_rates =
5417 il->staging.ofdm_basic_rates =
5419 il->staging.cck_basic_rates =
5425 il_ht_conf(il, vif);
5427 if (il->ops->set_rxon_chain)
5428 il->ops->set_rxon_chain(il);
5434 il->timestamp = bss_conf->sync_tsf;
5436 if (!il_is_rfkill(il))
5437 il->ops->post_associate(il);
5439 il_set_no_assoc(il, vif);
5442 if (changes && il_is_associated(il) && vif->cfg.aid) {
5444 ret = il_send_rxon_assoc(il);
5447 memcpy((void *)&il->active, &il->staging,
5454 memcpy(il->staging.bssid_addr, bss_conf->bssid,
5456 memcpy(il->bssid, bss_conf->bssid, ETH_ALEN);
5457 il->ops->config_ap(il);
5459 il_set_no_assoc(il, vif);
5463 ret = il->ops->manage_ibss_station(il, vif,
5472 mutex_unlock(&il->mutex);
5479 struct il_priv *il = data;
5483 if (!il)
5486 spin_lock_irqsave(&il->lock, flags);
5492 inta_mask = _il_rd(il, CSR_INT_MASK); /* just for debug */
5493 _il_wr(il, CSR_INT_MASK, 0x00000000);
5496 inta = _il_rd(il, CSR_INT);
5497 inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
5521 tasklet_schedule(&il->irq_tasklet);
5524 spin_unlock_irqrestore(&il->lock, flags);
5530 if (test_bit(S_INT_ENABLED, &il->status))
5531 il_enable_interrupts(il);
5532 spin_unlock_irqrestore(&il->lock, flags);
5542 il_tx_cmd_protection(struct il_priv *il, struct ieee80211_tx_info *info,