1/****************************************************************************** 2 * 3 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved. 4 * 5 * Portions of this file are derived from the ipw3945 project, as well 6 * as portions of the ieee80211 subsystem header files. 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms of version 2 of the GNU General Public License as 10 * published by the Free Software Foundation. 11 * 12 * This program is distributed in the hope that it will be useful, but WITHOUT 13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 15 * more details. 16 * 17 * You should have received a copy of the GNU General Public License along with 18 * this program; if not, write to the Free Software Foundation, Inc., 19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA 20 * 21 * The full GNU General Public License is included in this distribution in the 22 * file called LICENSE. 23 * 24 * Contact Information: 25 * Intel Linux Wireless <ilw@linux.intel.com> 26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 27 * 28 *****************************************************************************/ 29 30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 31 32#include <linux/kernel.h> 33#include <linux/module.h> 34#include <linux/init.h> 35#include <linux/pci.h> 36#include <linux/slab.h> 37#include <linux/dma-mapping.h> 38#include <linux/delay.h> 39#include <linux/sched.h> 40#include <linux/skbuff.h> 41#include <linux/netdevice.h> 42#include <linux/wireless.h> 43#include <linux/firmware.h> 44#include <linux/etherdevice.h> 45#include <linux/if_arp.h> 46 47#include <net/mac80211.h> 48 49#include <asm/div64.h> 50 51#define DRV_NAME "iwlagn" 52 53#include "iwl-eeprom.h" 54#include "iwl-dev.h" 55#include "iwl-core.h" 56#include "iwl-io.h" 57#include "iwl-helpers.h" 58#include "iwl-sta.h" 59#include "iwl-calib.h" 60#include "iwl-agn.h" 61 62 63/****************************************************************************** 64 * 65 * module boiler plate 66 * 67 ******************************************************************************/ 68 69/* 70 * module name, copyright, version, etc. 71 */ 72#define DRV_DESCRIPTION "Intel(R) Wireless WiFi Link AGN driver for Linux" 73 74#ifdef CONFIG_IWLWIFI_DEBUG 75#define VD "d" 76#else 77#define VD 78#endif 79 80#define DRV_VERSION IWLWIFI_VERSION VD 81 82 83MODULE_DESCRIPTION(DRV_DESCRIPTION); 84MODULE_VERSION(DRV_VERSION); 85MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); 86MODULE_LICENSE("GPL"); 87MODULE_ALIAS("iwl4965"); 88 89/** 90 * iwl_commit_rxon - commit staging_rxon to hardware 91 * 92 * The RXON command in staging_rxon is committed to the hardware and 93 * the active_rxon structure is updated with the new data. This 94 * function correctly transitions out of the RXON_ASSOC_MSK state if 95 * a HW tune is required based on the RXON structure changes. 96 */ 97int iwl_commit_rxon(struct iwl_priv *priv) 98{ 99 /* cast away the const for active_rxon in this function */ 100 struct iwl_rxon_cmd *active_rxon = (void *)&priv->active_rxon; 101 int ret; 102 bool new_assoc = 103 !!(priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK); 104 105 if (!iwl_is_alive(priv)) 106 return -EBUSY; 107 108 /* always get timestamp with Rx frame */ 109 priv->staging_rxon.flags |= RXON_FLG_TSF2HOST_MSK; 110 111 ret = iwl_check_rxon_cmd(priv); 112 if (ret) { 113 IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n"); 114 return -EINVAL; 115 } 116 117 /* 118 * receive commit_rxon request 119 * abort any previous channel switch if still in process 120 */ 121 if (priv->switch_rxon.switch_in_progress && 122 (priv->switch_rxon.channel != priv->staging_rxon.channel)) { 123 IWL_DEBUG_11H(priv, "abort channel switch on %d\n", 124 le16_to_cpu(priv->switch_rxon.channel)); 125 iwl_chswitch_done(priv, false); 126 } 127 128 /* If we don't need to send a full RXON, we can use 129 * iwl_rxon_assoc_cmd which is used to reconfigure filter 130 * and other flags for the current radio configuration. */ 131 if (!iwl_full_rxon_required(priv)) { 132 ret = iwl_send_rxon_assoc(priv); 133 if (ret) { 134 IWL_ERR(priv, "Error setting RXON_ASSOC (%d)\n", ret); 135 return ret; 136 } 137 138 memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon)); 139 iwl_print_rx_config_cmd(priv); 140 return 0; 141 } 142 143 /* If we are currently associated and the new config requires 144 * an RXON_ASSOC and the new config wants the associated mask enabled, 145 * we must clear the associated from the active configuration 146 * before we apply the new config */ 147 if (iwl_is_associated(priv) && new_assoc) { 148 IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n"); 149 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; 150 151 ret = iwl_send_cmd_pdu(priv, REPLY_RXON, 152 sizeof(struct iwl_rxon_cmd), 153 &priv->active_rxon); 154 155 /* If the mask clearing failed then we set 156 * active_rxon back to what it was previously */ 157 if (ret) { 158 active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK; 159 IWL_ERR(priv, "Error clearing ASSOC_MSK (%d)\n", ret); 160 return ret; 161 } 162 iwl_clear_ucode_stations(priv); 163 iwl_restore_stations(priv); 164 ret = iwl_restore_default_wep_keys(priv); 165 if (ret) { 166 IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret); 167 return ret; 168 } 169 } 170 171 IWL_DEBUG_INFO(priv, "Sending RXON\n" 172 "* with%s RXON_FILTER_ASSOC_MSK\n" 173 "* channel = %d\n" 174 "* bssid = %pM\n", 175 (new_assoc ? "" : "out"), 176 le16_to_cpu(priv->staging_rxon.channel), 177 priv->staging_rxon.bssid_addr); 178 179 iwl_set_rxon_hwcrypto(priv, !priv->cfg->mod_params->sw_crypto); 180 181 /* Apply the new configuration 182 * RXON unassoc clears the station table in uCode so restoration of 183 * stations is needed after it (the RXON command) completes 184 */ 185 if (!new_assoc) { 186 ret = iwl_send_cmd_pdu(priv, REPLY_RXON, 187 sizeof(struct iwl_rxon_cmd), &priv->staging_rxon); 188 if (ret) { 189 IWL_ERR(priv, "Error setting new RXON (%d)\n", ret); 190 return ret; 191 } 192 IWL_DEBUG_INFO(priv, "Return from !new_assoc RXON.\n"); 193 memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon)); 194 iwl_clear_ucode_stations(priv); 195 iwl_restore_stations(priv); 196 ret = iwl_restore_default_wep_keys(priv); 197 if (ret) { 198 IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret); 199 return ret; 200 } 201 } 202 203 priv->start_calib = 0; 204 if (new_assoc) { 205 /* Apply the new configuration 206 * RXON assoc doesn't clear the station table in uCode, 207 */ 208 ret = iwl_send_cmd_pdu(priv, REPLY_RXON, 209 sizeof(struct iwl_rxon_cmd), &priv->staging_rxon); 210 if (ret) { 211 IWL_ERR(priv, "Error setting new RXON (%d)\n", ret); 212 return ret; 213 } 214 memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon)); 215 } 216 iwl_print_rx_config_cmd(priv); 217 218 iwl_init_sensitivity(priv); 219 220 /* If we issue a new RXON command which required a tune then we must 221 * send a new TXPOWER command or we won't be able to Tx any frames */ 222 ret = iwl_set_tx_power(priv, priv->tx_power_user_lmt, true); 223 if (ret) { 224 IWL_ERR(priv, "Error sending TX power (%d)\n", ret); 225 return ret; 226 } 227 228 return 0; 229} 230 231void iwl_update_chain_flags(struct iwl_priv *priv) 232{ 233 234 if (priv->cfg->ops->hcmd->set_rxon_chain) 235 priv->cfg->ops->hcmd->set_rxon_chain(priv); 236 iwlcore_commit_rxon(priv); 237} 238 239static void iwl_clear_free_frames(struct iwl_priv *priv) 240{ 241 struct list_head *element; 242 243 IWL_DEBUG_INFO(priv, "%d frames on pre-allocated heap on clear.\n", 244 priv->frames_count); 245 246 while (!list_empty(&priv->free_frames)) { 247 element = priv->free_frames.next; 248 list_del(element); 249 kfree(list_entry(element, struct iwl_frame, list)); 250 priv->frames_count--; 251 } 252 253 if (priv->frames_count) { 254 IWL_WARN(priv, "%d frames still in use. Did we lose one?\n", 255 priv->frames_count); 256 priv->frames_count = 0; 257 } 258} 259 260static struct iwl_frame *iwl_get_free_frame(struct iwl_priv *priv) 261{ 262 struct iwl_frame *frame; 263 struct list_head *element; 264 if (list_empty(&priv->free_frames)) { 265 frame = kzalloc(sizeof(*frame), GFP_KERNEL); 266 if (!frame) { 267 IWL_ERR(priv, "Could not allocate frame!\n"); 268 return NULL; 269 } 270 271 priv->frames_count++; 272 return frame; 273 } 274 275 element = priv->free_frames.next; 276 list_del(element); 277 return list_entry(element, struct iwl_frame, list); 278} 279 280static void iwl_free_frame(struct iwl_priv *priv, struct iwl_frame *frame) 281{ 282 memset(frame, 0, sizeof(*frame)); 283 list_add(&frame->list, &priv->free_frames); 284} 285 286static u32 iwl_fill_beacon_frame(struct iwl_priv *priv, 287 struct ieee80211_hdr *hdr, 288 int left) 289{ 290 if (!priv->ibss_beacon) 291 return 0; 292 293 if (priv->ibss_beacon->len > left) 294 return 0; 295 296 memcpy(hdr, priv->ibss_beacon->data, priv->ibss_beacon->len); 297 298 return priv->ibss_beacon->len; 299} 300 301/* Parse the beacon frame to find the TIM element and set tim_idx & tim_size */ 302static void iwl_set_beacon_tim(struct iwl_priv *priv, 303 struct iwl_tx_beacon_cmd *tx_beacon_cmd, 304 u8 *beacon, u32 frame_size) 305{ 306 u16 tim_idx; 307 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon; 308 309 /* 310 * The index is relative to frame start but we start looking at the 311 * variable-length part of the beacon. 312 */ 313 tim_idx = mgmt->u.beacon.variable - beacon; 314 315 /* Parse variable-length elements of beacon to find WLAN_EID_TIM */ 316 while ((tim_idx < (frame_size - 2)) && 317 (beacon[tim_idx] != WLAN_EID_TIM)) 318 tim_idx += beacon[tim_idx+1] + 2; 319 320 /* If TIM field was found, set variables */ 321 if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) { 322 tx_beacon_cmd->tim_idx = cpu_to_le16(tim_idx); 323 tx_beacon_cmd->tim_size = beacon[tim_idx+1]; 324 } else 325 IWL_WARN(priv, "Unable to find TIM Element in beacon\n"); 326} 327 328static unsigned int iwl_hw_get_beacon_cmd(struct iwl_priv *priv, 329 struct iwl_frame *frame) 330{ 331 struct iwl_tx_beacon_cmd *tx_beacon_cmd; 332 u32 frame_size; 333 u32 rate_flags; 334 u32 rate; 335 /* 336 * We have to set up the TX command, the TX Beacon command, and the 337 * beacon contents. 338 */ 339 340 /* Initialize memory */ 341 tx_beacon_cmd = &frame->u.beacon; 342 memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd)); 343 344 /* Set up TX beacon contents */ 345 frame_size = iwl_fill_beacon_frame(priv, tx_beacon_cmd->frame, 346 sizeof(frame->u) - sizeof(*tx_beacon_cmd)); 347 if (WARN_ON_ONCE(frame_size > MAX_MPDU_SIZE)) 348 return 0; 349 350 /* Set up TX command fields */ 351 tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size); 352 tx_beacon_cmd->tx.sta_id = priv->hw_params.bcast_sta_id; 353 tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; 354 tx_beacon_cmd->tx.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK | 355 TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK; 356 357 /* Set up TX beacon command fields */ 358 iwl_set_beacon_tim(priv, tx_beacon_cmd, (u8 *)tx_beacon_cmd->frame, 359 frame_size); 360 361 /* Set up packet rate and flags */ 362 rate = iwl_rate_get_lowest_plcp(priv); 363 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant, 364 priv->hw_params.valid_tx_ant); 365 rate_flags = iwl_ant_idx_to_flags(priv->mgmt_tx_ant); 366 if ((rate >= IWL_FIRST_CCK_RATE) && (rate <= IWL_LAST_CCK_RATE)) 367 rate_flags |= RATE_MCS_CCK_MSK; 368 tx_beacon_cmd->tx.rate_n_flags = iwl_hw_set_rate_n_flags(rate, 369 rate_flags); 370 371 return sizeof(*tx_beacon_cmd) + frame_size; 372} 373static int iwl_send_beacon_cmd(struct iwl_priv *priv) 374{ 375 struct iwl_frame *frame; 376 unsigned int frame_size; 377 int rc; 378 379 frame = iwl_get_free_frame(priv); 380 if (!frame) { 381 IWL_ERR(priv, "Could not obtain free frame buffer for beacon " 382 "command.\n"); 383 return -ENOMEM; 384 } 385 386 frame_size = iwl_hw_get_beacon_cmd(priv, frame); 387 if (!frame_size) { 388 IWL_ERR(priv, "Error configuring the beacon command\n"); 389 iwl_free_frame(priv, frame); 390 return -EINVAL; 391 } 392 393 rc = iwl_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size, 394 &frame->u.cmd[0]); 395 396 iwl_free_frame(priv, frame); 397 398 return rc; 399} 400 401static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx) 402{ 403 struct iwl_tfd_tb *tb = &tfd->tbs[idx]; 404 405 dma_addr_t addr = get_unaligned_le32(&tb->lo); 406 if (sizeof(dma_addr_t) > sizeof(u32)) 407 addr |= 408 ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16; 409 410 return addr; 411} 412 413static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx) 414{ 415 struct iwl_tfd_tb *tb = &tfd->tbs[idx]; 416 417 return le16_to_cpu(tb->hi_n_len) >> 4; 418} 419 420static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx, 421 dma_addr_t addr, u16 len) 422{ 423 struct iwl_tfd_tb *tb = &tfd->tbs[idx]; 424 u16 hi_n_len = len << 4; 425 426 put_unaligned_le32(addr, &tb->lo); 427 if (sizeof(dma_addr_t) > sizeof(u32)) 428 hi_n_len |= ((addr >> 16) >> 16) & 0xF; 429 430 tb->hi_n_len = cpu_to_le16(hi_n_len); 431 432 tfd->num_tbs = idx + 1; 433} 434 435static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd) 436{ 437 return tfd->num_tbs & 0x1f; 438} 439 440/** 441 * iwl_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] 442 * @priv - driver private data 443 * @txq - tx queue 444 * 445 * Does NOT advance any TFD circular buffer read/write indexes 446 * Does NOT free the TFD itself (which is within circular buffer) 447 */ 448void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq) 449{ 450 struct iwl_tfd *tfd_tmp = (struct iwl_tfd *)txq->tfds; 451 struct iwl_tfd *tfd; 452 struct pci_dev *dev = priv->pci_dev; 453 int index = txq->q.read_ptr; 454 int i; 455 int num_tbs; 456 457 tfd = &tfd_tmp[index]; 458 459 /* Sanity check on number of chunks */ 460 num_tbs = iwl_tfd_get_num_tbs(tfd); 461 462 if (num_tbs >= IWL_NUM_OF_TBS) { 463 IWL_ERR(priv, "Too many chunks: %i\n", num_tbs); 464 /* @todo issue fatal error, it is quite serious situation */ 465 return; 466 } 467 468 /* Unmap tx_cmd */ 469 if (num_tbs) 470 pci_unmap_single(dev, 471 dma_unmap_addr(&txq->meta[index], mapping), 472 dma_unmap_len(&txq->meta[index], len), 473 PCI_DMA_BIDIRECTIONAL); 474 475 /* Unmap chunks, if any. */ 476 for (i = 1; i < num_tbs; i++) 477 pci_unmap_single(dev, iwl_tfd_tb_get_addr(tfd, i), 478 iwl_tfd_tb_get_len(tfd, i), PCI_DMA_TODEVICE); 479 480 /* free SKB */ 481 if (txq->txb) { 482 struct sk_buff *skb; 483 484 skb = txq->txb[txq->q.read_ptr].skb; 485 486 /* can be called from irqs-disabled context */ 487 if (skb) { 488 dev_kfree_skb_any(skb); 489 txq->txb[txq->q.read_ptr].skb = NULL; 490 } 491 } 492} 493 494int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, 495 struct iwl_tx_queue *txq, 496 dma_addr_t addr, u16 len, 497 u8 reset, u8 pad) 498{ 499 struct iwl_queue *q; 500 struct iwl_tfd *tfd, *tfd_tmp; 501 u32 num_tbs; 502 503 q = &txq->q; 504 tfd_tmp = (struct iwl_tfd *)txq->tfds; 505 tfd = &tfd_tmp[q->write_ptr]; 506 507 if (reset) 508 memset(tfd, 0, sizeof(*tfd)); 509 510 num_tbs = iwl_tfd_get_num_tbs(tfd); 511 512 /* Each TFD can point to a maximum 20 Tx buffers */ 513 if (num_tbs >= IWL_NUM_OF_TBS) { 514 IWL_ERR(priv, "Error can not send more than %d chunks\n", 515 IWL_NUM_OF_TBS); 516 return -EINVAL; 517 } 518 519 BUG_ON(addr & ~DMA_BIT_MASK(36)); 520 if (unlikely(addr & ~IWL_TX_DMA_MASK)) 521 IWL_ERR(priv, "Unaligned address = %llx\n", 522 (unsigned long long)addr); 523 524 iwl_tfd_set_tb(tfd, num_tbs, addr, len); 525 526 return 0; 527} 528 529/* 530 * Tell nic where to find circular buffer of Tx Frame Descriptors for 531 * given Tx queue, and enable the DMA channel used for that queue. 532 * 533 * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA 534 * channels supported in hardware. 535 */ 536int iwl_hw_tx_queue_init(struct iwl_priv *priv, 537 struct iwl_tx_queue *txq) 538{ 539 int txq_id = txq->q.id; 540 541 /* Circular buffer (TFD queue in DRAM) physical base address */ 542 iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id), 543 txq->q.dma_addr >> 8); 544 545 return 0; 546} 547 548/****************************************************************************** 549 * 550 * Generic RX handler implementations 551 * 552 ******************************************************************************/ 553static void iwl_rx_reply_alive(struct iwl_priv *priv, 554 struct iwl_rx_mem_buffer *rxb) 555{ 556 struct iwl_rx_packet *pkt = rxb_addr(rxb); 557 struct iwl_alive_resp *palive; 558 struct delayed_work *pwork; 559 560 palive = &pkt->u.alive_frame; 561 562 IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision " 563 "0x%01X 0x%01X\n", 564 palive->is_valid, palive->ver_type, 565 palive->ver_subtype); 566 567 if (palive->ver_subtype == INITIALIZE_SUBTYPE) { 568 IWL_DEBUG_INFO(priv, "Initialization Alive received.\n"); 569 memcpy(&priv->card_alive_init, 570 &pkt->u.alive_frame, 571 sizeof(struct iwl_init_alive_resp)); 572 pwork = &priv->init_alive_start; 573 } else { 574 IWL_DEBUG_INFO(priv, "Runtime Alive received.\n"); 575 memcpy(&priv->card_alive, &pkt->u.alive_frame, 576 sizeof(struct iwl_alive_resp)); 577 pwork = &priv->alive_start; 578 } 579 580 /* We delay the ALIVE response by 5ms to 581 * give the HW RF Kill time to activate... */ 582 if (palive->is_valid == UCODE_VALID_OK) 583 queue_delayed_work(priv->workqueue, pwork, 584 msecs_to_jiffies(5)); 585 else 586 IWL_WARN(priv, "uCode did not respond OK.\n"); 587} 588 589static void iwl_bg_beacon_update(struct work_struct *work) 590{ 591 struct iwl_priv *priv = 592 container_of(work, struct iwl_priv, beacon_update); 593 struct sk_buff *beacon; 594 595 /* Pull updated AP beacon from mac80211. will fail if not in AP mode */ 596 beacon = ieee80211_beacon_get(priv->hw, priv->vif); 597 598 if (!beacon) { 599 IWL_ERR(priv, "update beacon failed\n"); 600 return; 601 } 602 603 mutex_lock(&priv->mutex); 604 /* new beacon skb is allocated every time; dispose previous.*/ 605 if (priv->ibss_beacon) 606 dev_kfree_skb(priv->ibss_beacon); 607 608 priv->ibss_beacon = beacon; 609 mutex_unlock(&priv->mutex); 610 611 iwl_send_beacon_cmd(priv); 612} 613 614/** 615 * iwl_bg_statistics_periodic - Timer callback to queue statistics 616 * 617 * This callback is provided in order to send a statistics request. 618 * 619 * This timer function is continually reset to execute within 620 * REG_RECALIB_PERIOD seconds since the last STATISTICS_NOTIFICATION 621 * was received. We need to ensure we receive the statistics in order 622 * to update the temperature used for calibrating the TXPOWER. 623 */ 624static void iwl_bg_statistics_periodic(unsigned long data) 625{ 626 struct iwl_priv *priv = (struct iwl_priv *)data; 627 628 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 629 return; 630 631 /* dont send host command if rf-kill is on */ 632 if (!iwl_is_ready_rf(priv)) 633 return; 634 635 iwl_send_statistics_request(priv, CMD_ASYNC, false); 636} 637 638 639static void iwl_print_cont_event_trace(struct iwl_priv *priv, u32 base, 640 u32 start_idx, u32 num_events, 641 u32 mode) 642{ 643 u32 i; 644 u32 ptr; /* SRAM byte address of log data */ 645 u32 ev, time, data; /* event log data */ 646 unsigned long reg_flags; 647 648 if (mode == 0) 649 ptr = base + (4 * sizeof(u32)) + (start_idx * 2 * sizeof(u32)); 650 else 651 ptr = base + (4 * sizeof(u32)) + (start_idx * 3 * sizeof(u32)); 652 653 /* Make sure device is powered up for SRAM reads */ 654 spin_lock_irqsave(&priv->reg_lock, reg_flags); 655 if (iwl_grab_nic_access(priv)) { 656 spin_unlock_irqrestore(&priv->reg_lock, reg_flags); 657 return; 658 } 659 660 /* Set starting address; reads will auto-increment */ 661 _iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr); 662 rmb(); 663 664 /* 665 * "time" is actually "data" for mode 0 (no timestamp). 666 * place event id # at far right for easier visual parsing. 667 */ 668 for (i = 0; i < num_events; i++) { 669 ev = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); 670 time = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); 671 if (mode == 0) { 672 trace_iwlwifi_dev_ucode_cont_event(priv, 673 0, time, ev); 674 } else { 675 data = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); 676 trace_iwlwifi_dev_ucode_cont_event(priv, 677 time, data, ev); 678 } 679 } 680 /* Allow device to power down */ 681 iwl_release_nic_access(priv); 682 spin_unlock_irqrestore(&priv->reg_lock, reg_flags); 683} 684 685static void iwl_continuous_event_trace(struct iwl_priv *priv) 686{ 687 u32 capacity; /* event log capacity in # entries */ 688 u32 base; /* SRAM byte address of event log header */ 689 u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */ 690 u32 num_wraps; /* # times uCode wrapped to top of log */ 691 u32 next_entry; /* index of next entry to be written by uCode */ 692 693 if (priv->ucode_type == UCODE_INIT) 694 base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr); 695 else 696 base = le32_to_cpu(priv->card_alive.log_event_table_ptr); 697 if (priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) { 698 capacity = iwl_read_targ_mem(priv, base); 699 num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32))); 700 mode = iwl_read_targ_mem(priv, base + (1 * sizeof(u32))); 701 next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32))); 702 } else 703 return; 704 705 if (num_wraps == priv->event_log.num_wraps) { 706 iwl_print_cont_event_trace(priv, 707 base, priv->event_log.next_entry, 708 next_entry - priv->event_log.next_entry, 709 mode); 710 priv->event_log.non_wraps_count++; 711 } else { 712 if ((num_wraps - priv->event_log.num_wraps) > 1) 713 priv->event_log.wraps_more_count++; 714 else 715 priv->event_log.wraps_once_count++; 716 trace_iwlwifi_dev_ucode_wrap_event(priv, 717 num_wraps - priv->event_log.num_wraps, 718 next_entry, priv->event_log.next_entry); 719 if (next_entry < priv->event_log.next_entry) { 720 iwl_print_cont_event_trace(priv, base, 721 priv->event_log.next_entry, 722 capacity - priv->event_log.next_entry, 723 mode); 724 725 iwl_print_cont_event_trace(priv, base, 0, 726 next_entry, mode); 727 } else { 728 iwl_print_cont_event_trace(priv, base, 729 next_entry, capacity - next_entry, 730 mode); 731 732 iwl_print_cont_event_trace(priv, base, 0, 733 next_entry, mode); 734 } 735 } 736 priv->event_log.num_wraps = num_wraps; 737 priv->event_log.next_entry = next_entry; 738} 739 740/** 741 * iwl_bg_ucode_trace - Timer callback to log ucode event 742 * 743 * The timer is continually set to execute every 744 * UCODE_TRACE_PERIOD milliseconds after the last timer expired 745 * this function is to perform continuous uCode event logging operation 746 * if enabled 747 */ 748static void iwl_bg_ucode_trace(unsigned long data) 749{ 750 struct iwl_priv *priv = (struct iwl_priv *)data; 751 752 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 753 return; 754 755 if (priv->event_log.ucode_trace) { 756 iwl_continuous_event_trace(priv); 757 /* Reschedule the timer to occur in UCODE_TRACE_PERIOD */ 758 mod_timer(&priv->ucode_trace, 759 jiffies + msecs_to_jiffies(UCODE_TRACE_PERIOD)); 760 } 761} 762 763static void iwl_rx_beacon_notif(struct iwl_priv *priv, 764 struct iwl_rx_mem_buffer *rxb) 765{ 766#ifdef CONFIG_IWLWIFI_DEBUG 767 struct iwl_rx_packet *pkt = rxb_addr(rxb); 768 struct iwl4965_beacon_notif *beacon = 769 (struct iwl4965_beacon_notif *)pkt->u.raw; 770 u8 rate = iwl_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags); 771 772 IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d " 773 "tsf %d %d rate %d\n", 774 le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK, 775 beacon->beacon_notify_hdr.failure_frame, 776 le32_to_cpu(beacon->ibss_mgr_status), 777 le32_to_cpu(beacon->high_tsf), 778 le32_to_cpu(beacon->low_tsf), rate); 779#endif 780 781 if ((priv->iw_mode == NL80211_IFTYPE_AP) && 782 (!test_bit(STATUS_EXIT_PENDING, &priv->status))) 783 queue_work(priv->workqueue, &priv->beacon_update); 784} 785 786/* Handle notification from uCode that card's power state is changing 787 * due to software, hardware, or critical temperature RFKILL */ 788static void iwl_rx_card_state_notif(struct iwl_priv *priv, 789 struct iwl_rx_mem_buffer *rxb) 790{ 791 struct iwl_rx_packet *pkt = rxb_addr(rxb); 792 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags); 793 unsigned long status = priv->status; 794 795 IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s CT:%s\n", 796 (flags & HW_CARD_DISABLED) ? "Kill" : "On", 797 (flags & SW_CARD_DISABLED) ? "Kill" : "On", 798 (flags & CT_CARD_DISABLED) ? 799 "Reached" : "Not reached"); 800 801 if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED | 802 CT_CARD_DISABLED)) { 803 804 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET, 805 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); 806 807 iwl_write_direct32(priv, HBUS_TARG_MBX_C, 808 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED); 809 810 if (!(flags & RXON_CARD_DISABLED)) { 811 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, 812 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); 813 iwl_write_direct32(priv, HBUS_TARG_MBX_C, 814 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED); 815 } 816 if (flags & CT_CARD_DISABLED) 817 iwl_tt_enter_ct_kill(priv); 818 } 819 if (!(flags & CT_CARD_DISABLED)) 820 iwl_tt_exit_ct_kill(priv); 821 822 if (flags & HW_CARD_DISABLED) 823 set_bit(STATUS_RF_KILL_HW, &priv->status); 824 else 825 clear_bit(STATUS_RF_KILL_HW, &priv->status); 826 827 828 if (!(flags & RXON_CARD_DISABLED)) 829 iwl_scan_cancel(priv); 830 831 if ((test_bit(STATUS_RF_KILL_HW, &status) != 832 test_bit(STATUS_RF_KILL_HW, &priv->status))) 833 wiphy_rfkill_set_hw_state(priv->hw->wiphy, 834 test_bit(STATUS_RF_KILL_HW, &priv->status)); 835 else 836 wake_up_interruptible(&priv->wait_command_queue); 837} 838 839int iwl_set_pwr_src(struct iwl_priv *priv, enum iwl_pwr_src src) 840{ 841 if (src == IWL_PWR_SRC_VAUX) { 842 if (pci_pme_capable(priv->pci_dev, PCI_D3cold)) 843 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG, 844 APMG_PS_CTRL_VAL_PWR_SRC_VAUX, 845 ~APMG_PS_CTRL_MSK_PWR_SRC); 846 } else { 847 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG, 848 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, 849 ~APMG_PS_CTRL_MSK_PWR_SRC); 850 } 851 852 return 0; 853} 854 855static void iwl_bg_tx_flush(struct work_struct *work) 856{ 857 struct iwl_priv *priv = 858 container_of(work, struct iwl_priv, tx_flush); 859 860 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 861 return; 862 863 /* do nothing if rf-kill is on */ 864 if (!iwl_is_ready_rf(priv)) 865 return; 866 867 if (priv->cfg->ops->lib->txfifo_flush) { 868 IWL_DEBUG_INFO(priv, "device request: flush all tx frames\n"); 869 iwlagn_dev_txfifo_flush(priv, IWL_DROP_ALL); 870 } 871} 872 873/** 874 * iwl_setup_rx_handlers - Initialize Rx handler callbacks 875 * 876 * Setup the RX handlers for each of the reply types sent from the uCode 877 * to the host. 878 * 879 * This function chains into the hardware specific files for them to setup 880 * any hardware specific handlers as well. 881 */ 882static void iwl_setup_rx_handlers(struct iwl_priv *priv) 883{ 884 priv->rx_handlers[REPLY_ALIVE] = iwl_rx_reply_alive; 885 priv->rx_handlers[REPLY_ERROR] = iwl_rx_reply_error; 886 priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_rx_csa; 887 priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] = 888 iwl_rx_spectrum_measure_notif; 889 priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif; 890 priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] = 891 iwl_rx_pm_debug_statistics_notif; 892 priv->rx_handlers[BEACON_NOTIFICATION] = iwl_rx_beacon_notif; 893 894 /* 895 * The same handler is used for both the REPLY to a discrete 896 * statistics request from the host as well as for the periodic 897 * statistics notifications (after received beacons) from the uCode. 898 */ 899 priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl_reply_statistics; 900 priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl_rx_statistics; 901 902 iwl_setup_rx_scan_handlers(priv); 903 904 /* status change handler */ 905 priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl_rx_card_state_notif; 906 907 priv->rx_handlers[MISSED_BEACONS_NOTIFICATION] = 908 iwl_rx_missed_beacon_notif; 909 /* Rx handlers */ 910 priv->rx_handlers[REPLY_RX_PHY_CMD] = iwlagn_rx_reply_rx_phy; 911 priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwlagn_rx_reply_rx; 912 /* block ack */ 913 priv->rx_handlers[REPLY_COMPRESSED_BA] = iwlagn_rx_reply_compressed_ba; 914 /* Set up hardware specific Rx handlers */ 915 priv->cfg->ops->lib->rx_handler_setup(priv); 916} 917 918/** 919 * iwl_rx_handle - Main entry function for receiving responses from uCode 920 * 921 * Uses the priv->rx_handlers callback function array to invoke 922 * the appropriate handlers, including command responses, 923 * frame-received notifications, and other notifications. 924 */ 925void iwl_rx_handle(struct iwl_priv *priv) 926{ 927 struct iwl_rx_mem_buffer *rxb; 928 struct iwl_rx_packet *pkt; 929 struct iwl_rx_queue *rxq = &priv->rxq; 930 u32 r, i; 931 int reclaim; 932 unsigned long flags; 933 u8 fill_rx = 0; 934 u32 count = 8; 935 int total_empty; 936 937 /* uCode's read index (stored in shared DRAM) indicates the last Rx 938 * buffer that the driver may process (last buffer filled by ucode). */ 939 r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF; 940 i = rxq->read; 941 942 /* Rx interrupt, but nothing sent from uCode */ 943 if (i == r) 944 IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i); 945 946 /* calculate total frames need to be restock after handling RX */ 947 total_empty = r - rxq->write_actual; 948 if (total_empty < 0) 949 total_empty += RX_QUEUE_SIZE; 950 951 if (total_empty > (RX_QUEUE_SIZE / 2)) 952 fill_rx = 1; 953 954 while (i != r) { 955 int len; 956 957 rxb = rxq->queue[i]; 958 959 /* If an RXB doesn't have a Rx queue slot associated with it, 960 * then a bug has been introduced in the queue refilling 961 * routines -- catch it here */ 962 BUG_ON(rxb == NULL); 963 964 rxq->queue[i] = NULL; 965 966 pci_unmap_page(priv->pci_dev, rxb->page_dma, 967 PAGE_SIZE << priv->hw_params.rx_page_order, 968 PCI_DMA_FROMDEVICE); 969 pkt = rxb_addr(rxb); 970 971 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; 972 len += sizeof(u32); /* account for status word */ 973 trace_iwlwifi_dev_rx(priv, pkt, len); 974 975 /* Reclaim a command buffer only if this packet is a response 976 * to a (driver-originated) command. 977 * If the packet (e.g. Rx frame) originated from uCode, 978 * there is no command buffer to reclaim. 979 * Ucode should set SEQ_RX_FRAME bit if ucode-originated, 980 * but apparently a few don't get set; catch them here. */ 981 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) && 982 (pkt->hdr.cmd != REPLY_RX_PHY_CMD) && 983 (pkt->hdr.cmd != REPLY_RX) && 984 (pkt->hdr.cmd != REPLY_RX_MPDU_CMD) && 985 (pkt->hdr.cmd != REPLY_COMPRESSED_BA) && 986 (pkt->hdr.cmd != STATISTICS_NOTIFICATION) && 987 (pkt->hdr.cmd != REPLY_TX); 988 989 /* Based on type of command response or notification, 990 * handle those that need handling via function in 991 * rx_handlers table. See iwl_setup_rx_handlers() */ 992 if (priv->rx_handlers[pkt->hdr.cmd]) { 993 IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r, 994 i, get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); 995 priv->isr_stats.rx_handlers[pkt->hdr.cmd]++; 996 priv->rx_handlers[pkt->hdr.cmd] (priv, rxb); 997 } else { 998 /* No handling needed */ 999 IWL_DEBUG_RX(priv, 1000 "r %d i %d No handler needed for %s, 0x%02x\n", 1001 r, i, get_cmd_string(pkt->hdr.cmd), 1002 pkt->hdr.cmd); 1003 } 1004 1005 1006 if (reclaim) { 1007 /* Invoke any callbacks, transfer the buffer to caller, 1008 * and fire off the (possibly) blocking iwl_send_cmd() 1009 * as we reclaim the driver command queue */ 1010 if (rxb->page) 1011 iwl_tx_cmd_complete(priv, rxb); 1012 else 1013 IWL_WARN(priv, "Claim null rxb?\n"); 1014 } 1015 1016 /* Reuse the page if possible. For notification packets and 1017 * SKBs that fail to Rx correctly, add them back into the 1018 * rx_free list for reuse later. */ 1019 spin_lock_irqsave(&rxq->lock, flags); 1020 if (rxb->page != NULL) { 1021 rxb->page_dma = pci_map_page(priv->pci_dev, rxb->page, 1022 0, PAGE_SIZE << priv->hw_params.rx_page_order, 1023 PCI_DMA_FROMDEVICE); 1024 list_add_tail(&rxb->list, &rxq->rx_free); 1025 rxq->free_count++; 1026 } else 1027 list_add_tail(&rxb->list, &rxq->rx_used); 1028 1029 spin_unlock_irqrestore(&rxq->lock, flags); 1030 1031 i = (i + 1) & RX_QUEUE_MASK; 1032 /* If there are a lot of unused frames, 1033 * restock the Rx queue so ucode wont assert. */ 1034 if (fill_rx) { 1035 count++; 1036 if (count >= 8) { 1037 rxq->read = i; 1038 iwlagn_rx_replenish_now(priv); 1039 count = 0; 1040 } 1041 } 1042 } 1043 1044 /* Backtrack one entry */ 1045 rxq->read = i; 1046 if (fill_rx) 1047 iwlagn_rx_replenish_now(priv); 1048 else 1049 iwlagn_rx_queue_restock(priv); 1050} 1051 1052/* call this function to flush any scheduled tasklet */ 1053static inline void iwl_synchronize_irq(struct iwl_priv *priv) 1054{ 1055 /* wait to make sure we flush pending tasklet*/ 1056 synchronize_irq(priv->pci_dev->irq); 1057 tasklet_kill(&priv->irq_tasklet); 1058} 1059 1060static void iwl_irq_tasklet_legacy(struct iwl_priv *priv) 1061{ 1062 u32 inta, handled = 0; 1063 u32 inta_fh; 1064 unsigned long flags; 1065 u32 i; 1066#ifdef CONFIG_IWLWIFI_DEBUG 1067 u32 inta_mask; 1068#endif 1069 1070 spin_lock_irqsave(&priv->lock, flags); 1071 1072 /* Ack/clear/reset pending uCode interrupts. 1073 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, 1074 * and will clear only when CSR_FH_INT_STATUS gets cleared. */ 1075 inta = iwl_read32(priv, CSR_INT); 1076 iwl_write32(priv, CSR_INT, inta); 1077 1078 /* Ack/clear/reset pending flow-handler (DMA) interrupts. 1079 * Any new interrupts that happen after this, either while we're 1080 * in this tasklet, or later, will show up in next ISR/tasklet. */ 1081 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); 1082 iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh); 1083 1084#ifdef CONFIG_IWLWIFI_DEBUG 1085 if (iwl_get_debug_level(priv) & IWL_DL_ISR) { 1086 /* just for debug */ 1087 inta_mask = iwl_read32(priv, CSR_INT_MASK); 1088 IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", 1089 inta, inta_mask, inta_fh); 1090 } 1091#endif 1092 1093 spin_unlock_irqrestore(&priv->lock, flags); 1094 1095 /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not 1096 * atomic, make sure that inta covers all the interrupts that 1097 * we've discovered, even if FH interrupt came in just after 1098 * reading CSR_INT. */ 1099 if (inta_fh & CSR49_FH_INT_RX_MASK) 1100 inta |= CSR_INT_BIT_FH_RX; 1101 if (inta_fh & CSR49_FH_INT_TX_MASK) 1102 inta |= CSR_INT_BIT_FH_TX; 1103 1104 /* Now service all interrupt bits discovered above. */ 1105 if (inta & CSR_INT_BIT_HW_ERR) { 1106 IWL_ERR(priv, "Hardware error detected. Restarting.\n"); 1107 1108 /* Tell the device to stop sending interrupts */ 1109 iwl_disable_interrupts(priv); 1110 1111 priv->isr_stats.hw++; 1112 iwl_irq_handle_error(priv); 1113 1114 handled |= CSR_INT_BIT_HW_ERR; 1115 1116 return; 1117 } 1118 1119#ifdef CONFIG_IWLWIFI_DEBUG 1120 if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) { 1121 /* NIC fires this, but we don't use it, redundant with WAKEUP */ 1122 if (inta & CSR_INT_BIT_SCD) { 1123 IWL_DEBUG_ISR(priv, "Scheduler finished to transmit " 1124 "the frame/frames.\n"); 1125 priv->isr_stats.sch++; 1126 } 1127 1128 /* Alive notification via Rx interrupt will do the real work */ 1129 if (inta & CSR_INT_BIT_ALIVE) { 1130 IWL_DEBUG_ISR(priv, "Alive interrupt\n"); 1131 priv->isr_stats.alive++; 1132 } 1133 } 1134#endif 1135 /* Safely ignore these bits for debug checks below */ 1136 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE); 1137 1138 /* HW RF KILL switch toggled */ 1139 if (inta & CSR_INT_BIT_RF_KILL) { 1140 int hw_rf_kill = 0; 1141 if (!(iwl_read32(priv, CSR_GP_CNTRL) & 1142 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) 1143 hw_rf_kill = 1; 1144 1145 IWL_WARN(priv, "RF_KILL bit toggled to %s.\n", 1146 hw_rf_kill ? "disable radio" : "enable radio"); 1147 1148 priv->isr_stats.rfkill++; 1149 1150 /* driver only loads ucode once setting the interface up. 1151 * the driver allows loading the ucode even if the radio 1152 * is killed. Hence update the killswitch state here. The 1153 * rfkill handler will care about restarting if needed. 1154 */ 1155 if (!test_bit(STATUS_ALIVE, &priv->status)) { 1156 if (hw_rf_kill) 1157 set_bit(STATUS_RF_KILL_HW, &priv->status); 1158 else 1159 clear_bit(STATUS_RF_KILL_HW, &priv->status); 1160 wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rf_kill); 1161 } 1162 1163 handled |= CSR_INT_BIT_RF_KILL; 1164 } 1165 1166 /* Chip got too hot and stopped itself */ 1167 if (inta & CSR_INT_BIT_CT_KILL) { 1168 IWL_ERR(priv, "Microcode CT kill error detected.\n"); 1169 priv->isr_stats.ctkill++; 1170 handled |= CSR_INT_BIT_CT_KILL; 1171 } 1172 1173 /* Error detected by uCode */ 1174 if (inta & CSR_INT_BIT_SW_ERR) { 1175 IWL_ERR(priv, "Microcode SW error detected. " 1176 " Restarting 0x%X.\n", inta); 1177 priv->isr_stats.sw++; 1178 priv->isr_stats.sw_err = inta; 1179 iwl_irq_handle_error(priv); 1180 handled |= CSR_INT_BIT_SW_ERR; 1181 } 1182 1183 /* 1184 * uCode wakes up after power-down sleep. 1185 * Tell device about any new tx or host commands enqueued, 1186 * and about any Rx buffers made available while asleep. 1187 */ 1188 if (inta & CSR_INT_BIT_WAKEUP) { 1189 IWL_DEBUG_ISR(priv, "Wakeup interrupt\n"); 1190 iwl_rx_queue_update_write_ptr(priv, &priv->rxq); 1191 for (i = 0; i < priv->hw_params.max_txq_num; i++) 1192 iwl_txq_update_write_ptr(priv, &priv->txq[i]); 1193 priv->isr_stats.wakeup++; 1194 handled |= CSR_INT_BIT_WAKEUP; 1195 } 1196 1197 /* All uCode command responses, including Tx command responses, 1198 * Rx "responses" (frame-received notification), and other 1199 * notifications from uCode come through here*/ 1200 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { 1201 iwl_rx_handle(priv); 1202 priv->isr_stats.rx++; 1203 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); 1204 } 1205 1206 /* This "Tx" DMA channel is used only for loading uCode */ 1207 if (inta & CSR_INT_BIT_FH_TX) { 1208 IWL_DEBUG_ISR(priv, "uCode load interrupt\n"); 1209 priv->isr_stats.tx++; 1210 handled |= CSR_INT_BIT_FH_TX; 1211 /* Wake up uCode load routine, now that load is complete */ 1212 priv->ucode_write_complete = 1; 1213 wake_up_interruptible(&priv->wait_command_queue); 1214 } 1215 1216 if (inta & ~handled) { 1217 IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled); 1218 priv->isr_stats.unhandled++; 1219 } 1220 1221 if (inta & ~(priv->inta_mask)) { 1222 IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n", 1223 inta & ~priv->inta_mask); 1224 IWL_WARN(priv, " with FH_INT = 0x%08x\n", inta_fh); 1225 } 1226 1227 /* Re-enable all interrupts */ 1228 /* only Re-enable if diabled by irq */ 1229 if (test_bit(STATUS_INT_ENABLED, &priv->status)) 1230 iwl_enable_interrupts(priv); 1231 /* Re-enable RF_KILL if it occurred */ 1232 else if (handled & CSR_INT_BIT_RF_KILL) 1233 iwl_enable_rfkill_int(priv); 1234 1235#ifdef CONFIG_IWLWIFI_DEBUG 1236 if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) { 1237 inta = iwl_read32(priv, CSR_INT); 1238 inta_mask = iwl_read32(priv, CSR_INT_MASK); 1239 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); 1240 IWL_DEBUG_ISR(priv, "End inta 0x%08x, enabled 0x%08x, fh 0x%08x, " 1241 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags); 1242 } 1243#endif 1244} 1245 1246/* tasklet for iwlagn interrupt */ 1247static void iwl_irq_tasklet(struct iwl_priv *priv) 1248{ 1249 u32 inta = 0; 1250 u32 handled = 0; 1251 unsigned long flags; 1252 u32 i; 1253#ifdef CONFIG_IWLWIFI_DEBUG 1254 u32 inta_mask; 1255#endif 1256 1257 spin_lock_irqsave(&priv->lock, flags); 1258 1259 /* Ack/clear/reset pending uCode interrupts. 1260 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, 1261 */ 1262 iwl_write32(priv, CSR_INT, priv->_agn.inta | ~priv->inta_mask); 1263 1264 inta = priv->_agn.inta; 1265 1266#ifdef CONFIG_IWLWIFI_DEBUG 1267 if (iwl_get_debug_level(priv) & IWL_DL_ISR) { 1268 /* just for debug */ 1269 inta_mask = iwl_read32(priv, CSR_INT_MASK); 1270 IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x\n ", 1271 inta, inta_mask); 1272 } 1273#endif 1274 1275 spin_unlock_irqrestore(&priv->lock, flags); 1276 1277 /* saved interrupt in inta variable now we can reset priv->_agn.inta */ 1278 priv->_agn.inta = 0; 1279 1280 /* Now service all interrupt bits discovered above. */ 1281 if (inta & CSR_INT_BIT_HW_ERR) { 1282 IWL_ERR(priv, "Hardware error detected. Restarting.\n"); 1283 1284 /* Tell the device to stop sending interrupts */ 1285 iwl_disable_interrupts(priv); 1286 1287 priv->isr_stats.hw++; 1288 iwl_irq_handle_error(priv); 1289 1290 handled |= CSR_INT_BIT_HW_ERR; 1291 1292 return; 1293 } 1294 1295#ifdef CONFIG_IWLWIFI_DEBUG 1296 if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) { 1297 /* NIC fires this, but we don't use it, redundant with WAKEUP */ 1298 if (inta & CSR_INT_BIT_SCD) { 1299 IWL_DEBUG_ISR(priv, "Scheduler finished to transmit " 1300 "the frame/frames.\n"); 1301 priv->isr_stats.sch++; 1302 } 1303 1304 /* Alive notification via Rx interrupt will do the real work */ 1305 if (inta & CSR_INT_BIT_ALIVE) { 1306 IWL_DEBUG_ISR(priv, "Alive interrupt\n"); 1307 priv->isr_stats.alive++; 1308 } 1309 } 1310#endif 1311 /* Safely ignore these bits for debug checks below */ 1312 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE); 1313 1314 /* HW RF KILL switch toggled */ 1315 if (inta & CSR_INT_BIT_RF_KILL) { 1316 int hw_rf_kill = 0; 1317 if (!(iwl_read32(priv, CSR_GP_CNTRL) & 1318 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) 1319 hw_rf_kill = 1; 1320 1321 IWL_WARN(priv, "RF_KILL bit toggled to %s.\n", 1322 hw_rf_kill ? "disable radio" : "enable radio"); 1323 1324 priv->isr_stats.rfkill++; 1325 1326 /* driver only loads ucode once setting the interface up. 1327 * the driver allows loading the ucode even if the radio 1328 * is killed. Hence update the killswitch state here. The 1329 * rfkill handler will care about restarting if needed. 1330 */ 1331 if (!test_bit(STATUS_ALIVE, &priv->status)) { 1332 if (hw_rf_kill) 1333 set_bit(STATUS_RF_KILL_HW, &priv->status); 1334 else 1335 clear_bit(STATUS_RF_KILL_HW, &priv->status); 1336 wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rf_kill); 1337 } 1338 1339 handled |= CSR_INT_BIT_RF_KILL; 1340 } 1341 1342 /* Chip got too hot and stopped itself */ 1343 if (inta & CSR_INT_BIT_CT_KILL) { 1344 IWL_ERR(priv, "Microcode CT kill error detected.\n"); 1345 priv->isr_stats.ctkill++; 1346 handled |= CSR_INT_BIT_CT_KILL; 1347 } 1348 1349 /* Error detected by uCode */ 1350 if (inta & CSR_INT_BIT_SW_ERR) { 1351 IWL_ERR(priv, "Microcode SW error detected. " 1352 " Restarting 0x%X.\n", inta); 1353 priv->isr_stats.sw++; 1354 priv->isr_stats.sw_err = inta; 1355 iwl_irq_handle_error(priv); 1356 handled |= CSR_INT_BIT_SW_ERR; 1357 } 1358 1359 /* uCode wakes up after power-down sleep */ 1360 if (inta & CSR_INT_BIT_WAKEUP) { 1361 IWL_DEBUG_ISR(priv, "Wakeup interrupt\n"); 1362 iwl_rx_queue_update_write_ptr(priv, &priv->rxq); 1363 for (i = 0; i < priv->hw_params.max_txq_num; i++) 1364 iwl_txq_update_write_ptr(priv, &priv->txq[i]); 1365 1366 priv->isr_stats.wakeup++; 1367 1368 handled |= CSR_INT_BIT_WAKEUP; 1369 } 1370 1371 /* All uCode command responses, including Tx command responses, 1372 * Rx "responses" (frame-received notification), and other 1373 * notifications from uCode come through here*/ 1374 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX | 1375 CSR_INT_BIT_RX_PERIODIC)) { 1376 IWL_DEBUG_ISR(priv, "Rx interrupt\n"); 1377 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { 1378 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); 1379 iwl_write32(priv, CSR_FH_INT_STATUS, 1380 CSR49_FH_INT_RX_MASK); 1381 } 1382 if (inta & CSR_INT_BIT_RX_PERIODIC) { 1383 handled |= CSR_INT_BIT_RX_PERIODIC; 1384 iwl_write32(priv, CSR_INT, CSR_INT_BIT_RX_PERIODIC); 1385 } 1386 /* Sending RX interrupt require many steps to be done in the 1387 * the device: 1388 * 1- write interrupt to current index in ICT table. 1389 * 2- dma RX frame. 1390 * 3- update RX shared data to indicate last write index. 1391 * 4- send interrupt. 1392 * This could lead to RX race, driver could receive RX interrupt 1393 * but the shared data changes does not reflect this; 1394 * periodic interrupt will detect any dangling Rx activity. 1395 */ 1396 1397 /* Disable periodic interrupt; we use it as just a one-shot. */ 1398 iwl_write8(priv, CSR_INT_PERIODIC_REG, 1399 CSR_INT_PERIODIC_DIS); 1400 iwl_rx_handle(priv); 1401 1402 /* 1403 * Enable periodic interrupt in 8 msec only if we received 1404 * real RX interrupt (instead of just periodic int), to catch 1405 * any dangling Rx interrupt. If it was just the periodic 1406 * interrupt, there was no dangling Rx activity, and no need 1407 * to extend the periodic interrupt; one-shot is enough. 1408 */ 1409 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) 1410 iwl_write8(priv, CSR_INT_PERIODIC_REG, 1411 CSR_INT_PERIODIC_ENA); 1412 1413 priv->isr_stats.rx++; 1414 } 1415 1416 /* This "Tx" DMA channel is used only for loading uCode */ 1417 if (inta & CSR_INT_BIT_FH_TX) { 1418 iwl_write32(priv, CSR_FH_INT_STATUS, CSR49_FH_INT_TX_MASK); 1419 IWL_DEBUG_ISR(priv, "uCode load interrupt\n"); 1420 priv->isr_stats.tx++; 1421 handled |= CSR_INT_BIT_FH_TX; 1422 /* Wake up uCode load routine, now that load is complete */ 1423 priv->ucode_write_complete = 1; 1424 wake_up_interruptible(&priv->wait_command_queue); 1425 } 1426 1427 if (inta & ~handled) { 1428 IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled); 1429 priv->isr_stats.unhandled++; 1430 } 1431 1432 if (inta & ~(priv->inta_mask)) { 1433 IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n", 1434 inta & ~priv->inta_mask); 1435 } 1436 1437 /* Re-enable all interrupts */ 1438 /* only Re-enable if diabled by irq */ 1439 if (test_bit(STATUS_INT_ENABLED, &priv->status)) 1440 iwl_enable_interrupts(priv); 1441 /* Re-enable RF_KILL if it occurred */ 1442 else if (handled & CSR_INT_BIT_RF_KILL) 1443 iwl_enable_rfkill_int(priv); 1444} 1445 1446/* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */ 1447#define ACK_CNT_RATIO (50) 1448#define BA_TIMEOUT_CNT (5) 1449#define BA_TIMEOUT_MAX (16) 1450 1451/** 1452 * iwl_good_ack_health - checks for ACK count ratios, BA timeout retries. 1453 * 1454 * When the ACK count ratio is 0 and aggregated BA timeout retries exceeding 1455 * the BA_TIMEOUT_MAX, reload firmware and bring system back to normal 1456 * operation state. 1457 */ 1458bool iwl_good_ack_health(struct iwl_priv *priv, 1459 struct iwl_rx_packet *pkt) 1460{ 1461 bool rc = true; 1462 int actual_ack_cnt_delta, expected_ack_cnt_delta; 1463 int ba_timeout_delta; 1464 1465 actual_ack_cnt_delta = 1466 le32_to_cpu(pkt->u.stats.tx.actual_ack_cnt) - 1467 le32_to_cpu(priv->_agn.statistics.tx.actual_ack_cnt); 1468 expected_ack_cnt_delta = 1469 le32_to_cpu(pkt->u.stats.tx.expected_ack_cnt) - 1470 le32_to_cpu(priv->_agn.statistics.tx.expected_ack_cnt); 1471 ba_timeout_delta = 1472 le32_to_cpu(pkt->u.stats.tx.agg.ba_timeout) - 1473 le32_to_cpu(priv->_agn.statistics.tx.agg.ba_timeout); 1474 if ((priv->_agn.agg_tids_count > 0) && 1475 (expected_ack_cnt_delta > 0) && 1476 (((actual_ack_cnt_delta * 100) / expected_ack_cnt_delta) 1477 < ACK_CNT_RATIO) && 1478 (ba_timeout_delta > BA_TIMEOUT_CNT)) { 1479 IWL_DEBUG_RADIO(priv, "actual_ack_cnt delta = %d," 1480 " expected_ack_cnt = %d\n", 1481 actual_ack_cnt_delta, expected_ack_cnt_delta); 1482 1483#ifdef CONFIG_IWLWIFI_DEBUGFS 1484 /* 1485 * This is ifdef'ed on DEBUGFS because otherwise the 1486 * statistics aren't available. If DEBUGFS is set but 1487 * DEBUG is not, these will just compile out. 1488 */ 1489 IWL_DEBUG_RADIO(priv, "rx_detected_cnt delta = %d\n", 1490 priv->_agn.delta_statistics.tx.rx_detected_cnt); 1491 IWL_DEBUG_RADIO(priv, 1492 "ack_or_ba_timeout_collision delta = %d\n", 1493 priv->_agn.delta_statistics.tx. 1494 ack_or_ba_timeout_collision); 1495#endif 1496 IWL_DEBUG_RADIO(priv, "agg ba_timeout delta = %d\n", 1497 ba_timeout_delta); 1498 if (!actual_ack_cnt_delta && 1499 (ba_timeout_delta >= BA_TIMEOUT_MAX)) 1500 rc = false; 1501 } 1502 return rc; 1503} 1504 1505 1506/***************************************************************************** 1507 * 1508 * sysfs attributes 1509 * 1510 *****************************************************************************/ 1511 1512#ifdef CONFIG_IWLWIFI_DEBUG 1513 1514/* 1515 * The following adds a new attribute to the sysfs representation 1516 * of this device driver (i.e. a new file in /sys/class/net/wlan0/device/) 1517 * used for controlling the debug level. 1518 * 1519 * See the level definitions in iwl for details. 1520 * 1521 * The debug_level being managed using sysfs below is a per device debug 1522 * level that is used instead of the global debug level if it (the per 1523 * device debug level) is set. 1524 */ 1525static ssize_t show_debug_level(struct device *d, 1526 struct device_attribute *attr, char *buf) 1527{ 1528 struct iwl_priv *priv = dev_get_drvdata(d); 1529 return sprintf(buf, "0x%08X\n", iwl_get_debug_level(priv)); 1530} 1531static ssize_t store_debug_level(struct device *d, 1532 struct device_attribute *attr, 1533 const char *buf, size_t count) 1534{ 1535 struct iwl_priv *priv = dev_get_drvdata(d); 1536 unsigned long val; 1537 int ret; 1538 1539 ret = strict_strtoul(buf, 0, &val); 1540 if (ret) 1541 IWL_ERR(priv, "%s is not in hex or decimal form.\n", buf); 1542 else { 1543 priv->debug_level = val; 1544 if (iwl_alloc_traffic_mem(priv)) 1545 IWL_ERR(priv, 1546 "Not enough memory to generate traffic log\n"); 1547 } 1548 return strnlen(buf, count); 1549} 1550 1551static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO, 1552 show_debug_level, store_debug_level); 1553 1554 1555#endif /* CONFIG_IWLWIFI_DEBUG */ 1556 1557 1558static ssize_t show_temperature(struct device *d, 1559 struct device_attribute *attr, char *buf) 1560{ 1561 struct iwl_priv *priv = dev_get_drvdata(d); 1562 1563 if (!iwl_is_alive(priv)) 1564 return -EAGAIN; 1565 1566 return sprintf(buf, "%d\n", priv->temperature); 1567} 1568 1569static DEVICE_ATTR(temperature, S_IRUGO, show_temperature, NULL); 1570 1571static ssize_t show_tx_power(struct device *d, 1572 struct device_attribute *attr, char *buf) 1573{ 1574 struct iwl_priv *priv = dev_get_drvdata(d); 1575 1576 if (!iwl_is_ready_rf(priv)) 1577 return sprintf(buf, "off\n"); 1578 else 1579 return sprintf(buf, "%d\n", priv->tx_power_user_lmt); 1580} 1581 1582static ssize_t store_tx_power(struct device *d, 1583 struct device_attribute *attr, 1584 const char *buf, size_t count) 1585{ 1586 struct iwl_priv *priv = dev_get_drvdata(d); 1587 unsigned long val; 1588 int ret; 1589 1590 ret = strict_strtoul(buf, 10, &val); 1591 if (ret) 1592 IWL_INFO(priv, "%s is not in decimal form.\n", buf); 1593 else { 1594 ret = iwl_set_tx_power(priv, val, false); 1595 if (ret) 1596 IWL_ERR(priv, "failed setting tx power (0x%d).\n", 1597 ret); 1598 else 1599 ret = count; 1600 } 1601 return ret; 1602} 1603 1604static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, show_tx_power, store_tx_power); 1605 1606static struct attribute *iwl_sysfs_entries[] = { 1607 &dev_attr_temperature.attr, 1608 &dev_attr_tx_power.attr, 1609#ifdef CONFIG_IWLWIFI_DEBUG 1610 &dev_attr_debug_level.attr, 1611#endif 1612 NULL 1613}; 1614 1615static struct attribute_group iwl_attribute_group = { 1616 .name = NULL, /* put in device directory */ 1617 .attrs = iwl_sysfs_entries, 1618}; 1619 1620/****************************************************************************** 1621 * 1622 * uCode download functions 1623 * 1624 ******************************************************************************/ 1625 1626static void iwl_dealloc_ucode_pci(struct iwl_priv *priv) 1627{ 1628 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_code); 1629 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_data); 1630 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup); 1631 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_init); 1632 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_init_data); 1633 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_boot); 1634} 1635 1636static void iwl_nic_start(struct iwl_priv *priv) 1637{ 1638 /* Remove all resets to allow NIC to operate */ 1639 iwl_write32(priv, CSR_RESET, 0); 1640} 1641 1642struct iwlagn_ucode_capabilities { 1643 u32 max_probe_length; 1644 u32 standard_phy_calibration_size; 1645}; 1646 1647static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context); 1648static int iwl_mac_setup_register(struct iwl_priv *priv, 1649 struct iwlagn_ucode_capabilities *capa); 1650 1651static int __must_check iwl_request_firmware(struct iwl_priv *priv, bool first) 1652{ 1653 const char *name_pre = priv->cfg->fw_name_pre; 1654 1655 if (first) 1656 priv->fw_index = priv->cfg->ucode_api_max; 1657 else 1658 priv->fw_index--; 1659 1660 if (priv->fw_index < priv->cfg->ucode_api_min) { 1661 IWL_ERR(priv, "no suitable firmware found!\n"); 1662 return -ENOENT; 1663 } 1664 1665 sprintf(priv->firmware_name, "%s%d%s", 1666 name_pre, priv->fw_index, ".ucode"); 1667 1668 IWL_DEBUG_INFO(priv, "attempting to load firmware '%s'\n", 1669 priv->firmware_name); 1670 1671 return request_firmware_nowait(THIS_MODULE, 1, priv->firmware_name, 1672 &priv->pci_dev->dev, GFP_KERNEL, priv, 1673 iwl_ucode_callback); 1674} 1675 1676struct iwlagn_firmware_pieces { 1677 const void *inst, *data, *init, *init_data, *boot; 1678 size_t inst_size, data_size, init_size, init_data_size, boot_size; 1679 1680 u32 build; 1681 1682 u32 init_evtlog_ptr, init_evtlog_size, init_errlog_ptr; 1683 u32 inst_evtlog_ptr, inst_evtlog_size, inst_errlog_ptr; 1684}; 1685 1686static int iwlagn_load_legacy_firmware(struct iwl_priv *priv, 1687 const struct firmware *ucode_raw, 1688 struct iwlagn_firmware_pieces *pieces) 1689{ 1690 struct iwl_ucode_header *ucode = (void *)ucode_raw->data; 1691 u32 api_ver, hdr_size; 1692 const u8 *src; 1693 1694 priv->ucode_ver = le32_to_cpu(ucode->ver); 1695 api_ver = IWL_UCODE_API(priv->ucode_ver); 1696 1697 switch (api_ver) { 1698 default: 1699 /* 1700 * 4965 doesn't revision the firmware file format 1701 * along with the API version, it always uses v1 1702 * file format. 1703 */ 1704 if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) != 1705 CSR_HW_REV_TYPE_4965) { 1706 hdr_size = 28; 1707 if (ucode_raw->size < hdr_size) { 1708 IWL_ERR(priv, "File size too small!\n"); 1709 return -EINVAL; 1710 } 1711 pieces->build = le32_to_cpu(ucode->u.v2.build); 1712 pieces->inst_size = le32_to_cpu(ucode->u.v2.inst_size); 1713 pieces->data_size = le32_to_cpu(ucode->u.v2.data_size); 1714 pieces->init_size = le32_to_cpu(ucode->u.v2.init_size); 1715 pieces->init_data_size = le32_to_cpu(ucode->u.v2.init_data_size); 1716 pieces->boot_size = le32_to_cpu(ucode->u.v2.boot_size); 1717 src = ucode->u.v2.data; 1718 break; 1719 } 1720 /* fall through for 4965 */ 1721 case 0: 1722 case 1: 1723 case 2: 1724 hdr_size = 24; 1725 if (ucode_raw->size < hdr_size) { 1726 IWL_ERR(priv, "File size too small!\n"); 1727 return -EINVAL; 1728 } 1729 pieces->build = 0; 1730 pieces->inst_size = le32_to_cpu(ucode->u.v1.inst_size); 1731 pieces->data_size = le32_to_cpu(ucode->u.v1.data_size); 1732 pieces->init_size = le32_to_cpu(ucode->u.v1.init_size); 1733 pieces->init_data_size = le32_to_cpu(ucode->u.v1.init_data_size); 1734 pieces->boot_size = le32_to_cpu(ucode->u.v1.boot_size); 1735 src = ucode->u.v1.data; 1736 break; 1737 } 1738 1739 /* Verify size of file vs. image size info in file's header */ 1740 if (ucode_raw->size != hdr_size + pieces->inst_size + 1741 pieces->data_size + pieces->init_size + 1742 pieces->init_data_size + pieces->boot_size) { 1743 1744 IWL_ERR(priv, 1745 "uCode file size %d does not match expected size\n", 1746 (int)ucode_raw->size); 1747 return -EINVAL; 1748 } 1749 1750 pieces->inst = src; 1751 src += pieces->inst_size; 1752 pieces->data = src; 1753 src += pieces->data_size; 1754 pieces->init = src; 1755 src += pieces->init_size; 1756 pieces->init_data = src; 1757 src += pieces->init_data_size; 1758 pieces->boot = src; 1759 src += pieces->boot_size; 1760 1761 return 0; 1762} 1763 1764static int iwlagn_wanted_ucode_alternative = 1; 1765 1766static int iwlagn_load_firmware(struct iwl_priv *priv, 1767 const struct firmware *ucode_raw, 1768 struct iwlagn_firmware_pieces *pieces, 1769 struct iwlagn_ucode_capabilities *capa) 1770{ 1771 struct iwl_tlv_ucode_header *ucode = (void *)ucode_raw->data; 1772 struct iwl_ucode_tlv *tlv; 1773 size_t len = ucode_raw->size; 1774 const u8 *data; 1775 int wanted_alternative = iwlagn_wanted_ucode_alternative, tmp; 1776 u64 alternatives; 1777 u32 tlv_len; 1778 enum iwl_ucode_tlv_type tlv_type; 1779 const u8 *tlv_data; 1780 1781 if (len < sizeof(*ucode)) { 1782 IWL_ERR(priv, "uCode has invalid length: %zd\n", len); 1783 return -EINVAL; 1784 } 1785 1786 if (ucode->magic != cpu_to_le32(IWL_TLV_UCODE_MAGIC)) { 1787 IWL_ERR(priv, "invalid uCode magic: 0X%x\n", 1788 le32_to_cpu(ucode->magic)); 1789 return -EINVAL; 1790 } 1791 1792 /* 1793 * Check which alternatives are present, and "downgrade" 1794 * when the chosen alternative is not present, warning 1795 * the user when that happens. Some files may not have 1796 * any alternatives, so don't warn in that case. 1797 */ 1798 alternatives = le64_to_cpu(ucode->alternatives); 1799 tmp = wanted_alternative; 1800 if (wanted_alternative > 63) 1801 wanted_alternative = 63; 1802 while (wanted_alternative && !(alternatives & BIT(wanted_alternative))) 1803 wanted_alternative--; 1804 if (wanted_alternative && wanted_alternative != tmp) 1805 IWL_WARN(priv, 1806 "uCode alternative %d not available, choosing %d\n", 1807 tmp, wanted_alternative); 1808 1809 priv->ucode_ver = le32_to_cpu(ucode->ver); 1810 pieces->build = le32_to_cpu(ucode->build); 1811 data = ucode->data; 1812 1813 len -= sizeof(*ucode); 1814 1815 while (len >= sizeof(*tlv)) { 1816 u16 tlv_alt; 1817 1818 len -= sizeof(*tlv); 1819 tlv = (void *)data; 1820 1821 tlv_len = le32_to_cpu(tlv->length); 1822 tlv_type = le16_to_cpu(tlv->type); 1823 tlv_alt = le16_to_cpu(tlv->alternative); 1824 tlv_data = tlv->data; 1825 1826 if (len < tlv_len) { 1827 IWL_ERR(priv, "invalid TLV len: %zd/%u\n", 1828 len, tlv_len); 1829 return -EINVAL; 1830 } 1831 len -= ALIGN(tlv_len, 4); 1832 data += sizeof(*tlv) + ALIGN(tlv_len, 4); 1833 1834 /* 1835 * Alternative 0 is always valid. 1836 * 1837 * Skip alternative TLVs that are not selected. 1838 */ 1839 if (tlv_alt != 0 && tlv_alt != wanted_alternative) 1840 continue; 1841 1842 switch (tlv_type) { 1843 case IWL_UCODE_TLV_INST: 1844 pieces->inst = tlv_data; 1845 pieces->inst_size = tlv_len; 1846 break; 1847 case IWL_UCODE_TLV_DATA: 1848 pieces->data = tlv_data; 1849 pieces->data_size = tlv_len; 1850 break; 1851 case IWL_UCODE_TLV_INIT: 1852 pieces->init = tlv_data; 1853 pieces->init_size = tlv_len; 1854 break; 1855 case IWL_UCODE_TLV_INIT_DATA: 1856 pieces->init_data = tlv_data; 1857 pieces->init_data_size = tlv_len; 1858 break; 1859 case IWL_UCODE_TLV_BOOT: 1860 pieces->boot = tlv_data; 1861 pieces->boot_size = tlv_len; 1862 break; 1863 case IWL_UCODE_TLV_PROBE_MAX_LEN: 1864 if (tlv_len != sizeof(u32)) 1865 goto invalid_tlv_len; 1866 capa->max_probe_length = 1867 le32_to_cpup((__le32 *)tlv_data); 1868 break; 1869 case IWL_UCODE_TLV_INIT_EVTLOG_PTR: 1870 if (tlv_len != sizeof(u32)) 1871 goto invalid_tlv_len; 1872 pieces->init_evtlog_ptr = 1873 le32_to_cpup((__le32 *)tlv_data); 1874 break; 1875 case IWL_UCODE_TLV_INIT_EVTLOG_SIZE: 1876 if (tlv_len != sizeof(u32)) 1877 goto invalid_tlv_len; 1878 pieces->init_evtlog_size = 1879 le32_to_cpup((__le32 *)tlv_data); 1880 break; 1881 case IWL_UCODE_TLV_INIT_ERRLOG_PTR: 1882 if (tlv_len != sizeof(u32)) 1883 goto invalid_tlv_len; 1884 pieces->init_errlog_ptr = 1885 le32_to_cpup((__le32 *)tlv_data); 1886 break; 1887 case IWL_UCODE_TLV_RUNT_EVTLOG_PTR: 1888 if (tlv_len != sizeof(u32)) 1889 goto invalid_tlv_len; 1890 pieces->inst_evtlog_ptr = 1891 le32_to_cpup((__le32 *)tlv_data); 1892 break; 1893 case IWL_UCODE_TLV_RUNT_EVTLOG_SIZE: 1894 if (tlv_len != sizeof(u32)) 1895 goto invalid_tlv_len; 1896 pieces->inst_evtlog_size = 1897 le32_to_cpup((__le32 *)tlv_data); 1898 break; 1899 case IWL_UCODE_TLV_RUNT_ERRLOG_PTR: 1900 if (tlv_len != sizeof(u32)) 1901 goto invalid_tlv_len; 1902 pieces->inst_errlog_ptr = 1903 le32_to_cpup((__le32 *)tlv_data); 1904 break; 1905 case IWL_UCODE_TLV_ENHANCE_SENS_TBL: 1906 if (tlv_len) 1907 goto invalid_tlv_len; 1908 priv->enhance_sensitivity_table = true; 1909 break; 1910 case IWL_UCODE_TLV_PHY_CALIBRATION_SIZE: 1911 if (tlv_len != sizeof(u32)) 1912 goto invalid_tlv_len; 1913 capa->standard_phy_calibration_size = 1914 le32_to_cpup((__le32 *)tlv_data); 1915 break; 1916 default: 1917 IWL_WARN(priv, "unknown TLV: %d\n", tlv_type); 1918 break; 1919 } 1920 } 1921 1922 if (len) { 1923 IWL_ERR(priv, "invalid TLV after parsing: %zd\n", len); 1924 iwl_print_hex_dump(priv, IWL_DL_FW, (u8 *)data, len); 1925 return -EINVAL; 1926 } 1927 1928 return 0; 1929 1930 invalid_tlv_len: 1931 IWL_ERR(priv, "TLV %d has invalid size: %u\n", tlv_type, tlv_len); 1932 iwl_print_hex_dump(priv, IWL_DL_FW, tlv_data, tlv_len); 1933 1934 return -EINVAL; 1935} 1936 1937/** 1938 * iwl_ucode_callback - callback when firmware was loaded 1939 * 1940 * If loaded successfully, copies the firmware into buffers 1941 * for the card to fetch (via DMA). 1942 */ 1943static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context) 1944{ 1945 struct iwl_priv *priv = context; 1946 struct iwl_ucode_header *ucode; 1947 int err; 1948 struct iwlagn_firmware_pieces pieces; 1949 const unsigned int api_max = priv->cfg->ucode_api_max; 1950 const unsigned int api_min = priv->cfg->ucode_api_min; 1951 u32 api_ver; 1952 char buildstr[25]; 1953 u32 build; 1954 struct iwlagn_ucode_capabilities ucode_capa = { 1955 .max_probe_length = 200, 1956 .standard_phy_calibration_size = 1957 IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE, 1958 }; 1959 1960 memset(&pieces, 0, sizeof(pieces)); 1961 1962 if (!ucode_raw) { 1963 IWL_ERR(priv, "request for firmware file '%s' failed.\n", 1964 priv->firmware_name); 1965 goto try_again; 1966 } 1967 1968 IWL_DEBUG_INFO(priv, "Loaded firmware file '%s' (%zd bytes).\n", 1969 priv->firmware_name, ucode_raw->size); 1970 1971 /* Make sure that we got at least the API version number */ 1972 if (ucode_raw->size < 4) { 1973 IWL_ERR(priv, "File size way too small!\n"); 1974 goto try_again; 1975 } 1976 1977 /* Data from ucode file: header followed by uCode images */ 1978 ucode = (struct iwl_ucode_header *)ucode_raw->data; 1979 1980 if (ucode->ver) 1981 err = iwlagn_load_legacy_firmware(priv, ucode_raw, &pieces); 1982 else 1983 err = iwlagn_load_firmware(priv, ucode_raw, &pieces, 1984 &ucode_capa); 1985 1986 if (err) 1987 goto try_again; 1988 1989 api_ver = IWL_UCODE_API(priv->ucode_ver); 1990 build = pieces.build; 1991 1992 /* 1993 * api_ver should match the api version forming part of the 1994 * firmware filename ... but we don't check for that and only rely 1995 * on the API version read from firmware header from here on forward 1996 */ 1997 if (api_ver < api_min || api_ver > api_max) { 1998 IWL_ERR(priv, "Driver unable to support your firmware API. " 1999 "Driver supports v%u, firmware is v%u.\n", 2000 api_max, api_ver); 2001 goto try_again; 2002 } 2003 2004 if (api_ver != api_max) 2005 IWL_ERR(priv, "Firmware has old API version. Expected v%u, " 2006 "got v%u. New firmware can be obtained " 2007 "from http://www.intellinuxwireless.org.\n", 2008 api_max, api_ver); 2009 2010 if (build) 2011 sprintf(buildstr, " build %u", build); 2012 else 2013 buildstr[0] = '\0'; 2014 2015 IWL_INFO(priv, "loaded firmware version %u.%u.%u.%u%s\n", 2016 IWL_UCODE_MAJOR(priv->ucode_ver), 2017 IWL_UCODE_MINOR(priv->ucode_ver), 2018 IWL_UCODE_API(priv->ucode_ver), 2019 IWL_UCODE_SERIAL(priv->ucode_ver), 2020 buildstr); 2021 2022 snprintf(priv->hw->wiphy->fw_version, 2023 sizeof(priv->hw->wiphy->fw_version), 2024 "%u.%u.%u.%u%s", 2025 IWL_UCODE_MAJOR(priv->ucode_ver), 2026 IWL_UCODE_MINOR(priv->ucode_ver), 2027 IWL_UCODE_API(priv->ucode_ver), 2028 IWL_UCODE_SERIAL(priv->ucode_ver), 2029 buildstr); 2030 2031 /* 2032 * For any of the failures below (before allocating pci memory) 2033 * we will try to load a version with a smaller API -- maybe the 2034 * user just got a corrupted version of the latest API. 2035 */ 2036 2037 IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n", 2038 priv->ucode_ver); 2039 IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %Zd\n", 2040 pieces.inst_size); 2041 IWL_DEBUG_INFO(priv, "f/w package hdr runtime data size = %Zd\n", 2042 pieces.data_size); 2043 IWL_DEBUG_INFO(priv, "f/w package hdr init inst size = %Zd\n", 2044 pieces.init_size); 2045 IWL_DEBUG_INFO(priv, "f/w package hdr init data size = %Zd\n", 2046 pieces.init_data_size); 2047 IWL_DEBUG_INFO(priv, "f/w package hdr boot inst size = %Zd\n", 2048 pieces.boot_size); 2049 2050 /* Verify that uCode images will fit in card's SRAM */ 2051 if (pieces.inst_size > priv->hw_params.max_inst_size) { 2052 IWL_ERR(priv, "uCode instr len %Zd too large to fit in\n", 2053 pieces.inst_size); 2054 goto try_again; 2055 } 2056 2057 if (pieces.data_size > priv->hw_params.max_data_size) { 2058 IWL_ERR(priv, "uCode data len %Zd too large to fit in\n", 2059 pieces.data_size); 2060 goto try_again; 2061 } 2062 2063 if (pieces.init_size > priv->hw_params.max_inst_size) { 2064 IWL_ERR(priv, "uCode init instr len %Zd too large to fit in\n", 2065 pieces.init_size); 2066 goto try_again; 2067 } 2068 2069 if (pieces.init_data_size > priv->hw_params.max_data_size) { 2070 IWL_ERR(priv, "uCode init data len %Zd too large to fit in\n", 2071 pieces.init_data_size); 2072 goto try_again; 2073 } 2074 2075 if (pieces.boot_size > priv->hw_params.max_bsm_size) { 2076 IWL_ERR(priv, "uCode boot instr len %Zd too large to fit in\n", 2077 pieces.boot_size); 2078 goto try_again; 2079 } 2080 2081 /* Allocate ucode buffers for card's bus-master loading ... */ 2082 2083 /* Runtime instructions and 2 copies of data: 2084 * 1) unmodified from disk 2085 * 2) backup cache for save/restore during power-downs */ 2086 priv->ucode_code.len = pieces.inst_size; 2087 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_code); 2088 2089 priv->ucode_data.len = pieces.data_size; 2090 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data); 2091 2092 priv->ucode_data_backup.len = pieces.data_size; 2093 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup); 2094 2095 if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr || 2096 !priv->ucode_data_backup.v_addr) 2097 goto err_pci_alloc; 2098 2099 /* Initialization instructions and data */ 2100 if (pieces.init_size && pieces.init_data_size) { 2101 priv->ucode_init.len = pieces.init_size; 2102 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_init); 2103 2104 priv->ucode_init_data.len = pieces.init_data_size; 2105 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data); 2106 2107 if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr) 2108 goto err_pci_alloc; 2109 } 2110 2111 /* Bootstrap (instructions only, no data) */ 2112 if (pieces.boot_size) { 2113 priv->ucode_boot.len = pieces.boot_size; 2114 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot); 2115 2116 if (!priv->ucode_boot.v_addr) 2117 goto err_pci_alloc; 2118 } 2119 2120 /* Now that we can no longer fail, copy information */ 2121 2122 /* 2123 * The (size - 16) / 12 formula is based on the information recorded 2124 * for each event, which is of mode 1 (including timestamp) for all 2125 * new microcodes that include this information. 2126 */ 2127 priv->_agn.init_evtlog_ptr = pieces.init_evtlog_ptr; 2128 if (pieces.init_evtlog_size) 2129 priv->_agn.init_evtlog_size = (pieces.init_evtlog_size - 16)/12; 2130 else 2131 priv->_agn.init_evtlog_size = priv->cfg->max_event_log_size; 2132 priv->_agn.init_errlog_ptr = pieces.init_errlog_ptr; 2133 priv->_agn.inst_evtlog_ptr = pieces.inst_evtlog_ptr; 2134 if (pieces.inst_evtlog_size) 2135 priv->_agn.inst_evtlog_size = (pieces.inst_evtlog_size - 16)/12; 2136 else 2137 priv->_agn.inst_evtlog_size = priv->cfg->max_event_log_size; 2138 priv->_agn.inst_errlog_ptr = pieces.inst_errlog_ptr; 2139 2140 /* Copy images into buffers for card's bus-master reads ... */ 2141 2142 /* Runtime instructions (first block of data in file) */ 2143 IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode instr len %Zd\n", 2144 pieces.inst_size); 2145 memcpy(priv->ucode_code.v_addr, pieces.inst, pieces.inst_size); 2146 2147 IWL_DEBUG_INFO(priv, "uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n", 2148 priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr); 2149 2150 /* 2151 * Runtime data 2152 * NOTE: Copy into backup buffer will be done in iwl_up() 2153 */ 2154 IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode data len %Zd\n", 2155 pieces.data_size); 2156 memcpy(priv->ucode_data.v_addr, pieces.data, pieces.data_size); 2157 memcpy(priv->ucode_data_backup.v_addr, pieces.data, pieces.data_size); 2158 2159 /* Initialization instructions */ 2160 if (pieces.init_size) { 2161 IWL_DEBUG_INFO(priv, "Copying (but not loading) init instr len %Zd\n", 2162 pieces.init_size); 2163 memcpy(priv->ucode_init.v_addr, pieces.init, pieces.init_size); 2164 } 2165 2166 /* Initialization data */ 2167 if (pieces.init_data_size) { 2168 IWL_DEBUG_INFO(priv, "Copying (but not loading) init data len %Zd\n", 2169 pieces.init_data_size); 2170 memcpy(priv->ucode_init_data.v_addr, pieces.init_data, 2171 pieces.init_data_size); 2172 } 2173 2174 /* Bootstrap instructions */ 2175 IWL_DEBUG_INFO(priv, "Copying (but not loading) boot instr len %Zd\n", 2176 pieces.boot_size); 2177 memcpy(priv->ucode_boot.v_addr, pieces.boot, pieces.boot_size); 2178 2179 /* 2180 * figure out the offset of chain noise reset and gain commands 2181 * base on the size of standard phy calibration commands table size 2182 */ 2183 if (ucode_capa.standard_phy_calibration_size > 2184 IWL_MAX_PHY_CALIBRATE_TBL_SIZE) 2185 ucode_capa.standard_phy_calibration_size = 2186 IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE; 2187 2188 priv->_agn.phy_calib_chain_noise_reset_cmd = 2189 ucode_capa.standard_phy_calibration_size; 2190 priv->_agn.phy_calib_chain_noise_gain_cmd = 2191 ucode_capa.standard_phy_calibration_size + 1; 2192 2193 /************************************************** 2194 * This is still part of probe() in a sense... 2195 * 2196 * 9. Setup and register with mac80211 and debugfs 2197 **************************************************/ 2198 err = iwl_mac_setup_register(priv, &ucode_capa); 2199 if (err) 2200 goto out_unbind; 2201 2202 err = iwl_dbgfs_register(priv, DRV_NAME); 2203 if (err) 2204 IWL_ERR(priv, "failed to create debugfs files. Ignoring error: %d\n", err); 2205 2206 err = sysfs_create_group(&priv->pci_dev->dev.kobj, 2207 &iwl_attribute_group); 2208 if (err) { 2209 IWL_ERR(priv, "failed to create sysfs device attributes\n"); 2210 goto out_unbind; 2211 } 2212 2213 /* We have our copies now, allow OS release its copies */ 2214 release_firmware(ucode_raw); 2215 complete(&priv->_agn.firmware_loading_complete); 2216 return; 2217 2218 try_again: 2219 /* try next, if any */ 2220 if (iwl_request_firmware(priv, false)) 2221 goto out_unbind; 2222 release_firmware(ucode_raw); 2223 return; 2224 2225 err_pci_alloc: 2226 IWL_ERR(priv, "failed to allocate pci memory\n"); 2227 iwl_dealloc_ucode_pci(priv); 2228 out_unbind: 2229 complete(&priv->_agn.firmware_loading_complete); 2230 device_release_driver(&priv->pci_dev->dev); 2231 release_firmware(ucode_raw); 2232} 2233 2234static const char *desc_lookup_text[] = { 2235 "OK", 2236 "FAIL", 2237 "BAD_PARAM", 2238 "BAD_CHECKSUM", 2239 "NMI_INTERRUPT_WDG", 2240 "SYSASSERT", 2241 "FATAL_ERROR", 2242 "BAD_COMMAND", 2243 "HW_ERROR_TUNE_LOCK", 2244 "HW_ERROR_TEMPERATURE", 2245 "ILLEGAL_CHAN_FREQ", 2246 "VCC_NOT_STABLE", 2247 "FH_ERROR", 2248 "NMI_INTERRUPT_HOST", 2249 "NMI_INTERRUPT_ACTION_PT", 2250 "NMI_INTERRUPT_UNKNOWN", 2251 "UCODE_VERSION_MISMATCH", 2252 "HW_ERROR_ABS_LOCK", 2253 "HW_ERROR_CAL_LOCK_FAIL", 2254 "NMI_INTERRUPT_INST_ACTION_PT", 2255 "NMI_INTERRUPT_DATA_ACTION_PT", 2256 "NMI_TRM_HW_ER", 2257 "NMI_INTERRUPT_TRM", 2258 "NMI_INTERRUPT_BREAK_POINT" 2259 "DEBUG_0", 2260 "DEBUG_1", 2261 "DEBUG_2", 2262 "DEBUG_3", 2263}; 2264 2265static struct { char *name; u8 num; } advanced_lookup[] = { 2266 { "NMI_INTERRUPT_WDG", 0x34 }, 2267 { "SYSASSERT", 0x35 }, 2268 { "UCODE_VERSION_MISMATCH", 0x37 }, 2269 { "BAD_COMMAND", 0x38 }, 2270 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C }, 2271 { "FATAL_ERROR", 0x3D }, 2272 { "NMI_TRM_HW_ERR", 0x46 }, 2273 { "NMI_INTERRUPT_TRM", 0x4C }, 2274 { "NMI_INTERRUPT_BREAK_POINT", 0x54 }, 2275 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C }, 2276 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 }, 2277 { "NMI_INTERRUPT_HOST", 0x66 }, 2278 { "NMI_INTERRUPT_ACTION_PT", 0x7C }, 2279 { "NMI_INTERRUPT_UNKNOWN", 0x84 }, 2280 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 }, 2281 { "ADVANCED_SYSASSERT", 0 }, 2282}; 2283 2284static const char *desc_lookup(u32 num) 2285{ 2286 int i; 2287 int max = ARRAY_SIZE(desc_lookup_text); 2288 2289 if (num < max) 2290 return desc_lookup_text[num]; 2291 2292 max = ARRAY_SIZE(advanced_lookup) - 1; 2293 for (i = 0; i < max; i++) { 2294 if (advanced_lookup[i].num == num) 2295 break;; 2296 } 2297 return advanced_lookup[i].name; 2298} 2299 2300#define ERROR_START_OFFSET (1 * sizeof(u32)) 2301#define ERROR_ELEM_SIZE (7 * sizeof(u32)) 2302 2303void iwl_dump_nic_error_log(struct iwl_priv *priv) 2304{ 2305 u32 data2, line; 2306 u32 desc, time, count, base, data1; 2307 u32 blink1, blink2, ilink1, ilink2; 2308 u32 pc, hcmd; 2309 2310 if (priv->ucode_type == UCODE_INIT) { 2311 base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr); 2312 if (!base) 2313 base = priv->_agn.init_errlog_ptr; 2314 } else { 2315 base = le32_to_cpu(priv->card_alive.error_event_table_ptr); 2316 if (!base) 2317 base = priv->_agn.inst_errlog_ptr; 2318 } 2319 2320 if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) { 2321 IWL_ERR(priv, 2322 "Not valid error log pointer 0x%08X for %s uCode\n", 2323 base, (priv->ucode_type == UCODE_INIT) ? "Init" : "RT"); 2324 return; 2325 } 2326 2327 count = iwl_read_targ_mem(priv, base); 2328 2329 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) { 2330 IWL_ERR(priv, "Start IWL Error Log Dump:\n"); 2331 IWL_ERR(priv, "Status: 0x%08lX, count: %d\n", 2332 priv->status, count); 2333 } 2334 2335 desc = iwl_read_targ_mem(priv, base + 1 * sizeof(u32)); 2336 pc = iwl_read_targ_mem(priv, base + 2 * sizeof(u32)); 2337 blink1 = iwl_read_targ_mem(priv, base + 3 * sizeof(u32)); 2338 blink2 = iwl_read_targ_mem(priv, base + 4 * sizeof(u32)); 2339 ilink1 = iwl_read_targ_mem(priv, base + 5 * sizeof(u32)); 2340 ilink2 = iwl_read_targ_mem(priv, base + 6 * sizeof(u32)); 2341 data1 = iwl_read_targ_mem(priv, base + 7 * sizeof(u32)); 2342 data2 = iwl_read_targ_mem(priv, base + 8 * sizeof(u32)); 2343 line = iwl_read_targ_mem(priv, base + 9 * sizeof(u32)); 2344 time = iwl_read_targ_mem(priv, base + 11 * sizeof(u32)); 2345 hcmd = iwl_read_targ_mem(priv, base + 22 * sizeof(u32)); 2346 2347 trace_iwlwifi_dev_ucode_error(priv, desc, time, data1, data2, line, 2348 blink1, blink2, ilink1, ilink2); 2349 2350 IWL_ERR(priv, "Desc Time " 2351 "data1 data2 line\n"); 2352 IWL_ERR(priv, "%-28s (0x%04X) %010u 0x%08X 0x%08X %u\n", 2353 desc_lookup(desc), desc, time, data1, data2, line); 2354 IWL_ERR(priv, "pc blink1 blink2 ilink1 ilink2 hcmd\n"); 2355 IWL_ERR(priv, "0x%05X 0x%05X 0x%05X 0x%05X 0x%05X 0x%05X\n", 2356 pc, blink1, blink2, ilink1, ilink2, hcmd); 2357} 2358 2359#define EVENT_START_OFFSET (4 * sizeof(u32)) 2360 2361/** 2362 * iwl_print_event_log - Dump error event log to syslog 2363 * 2364 */ 2365static int iwl_print_event_log(struct iwl_priv *priv, u32 start_idx, 2366 u32 num_events, u32 mode, 2367 int pos, char **buf, size_t bufsz) 2368{ 2369 u32 i; 2370 u32 base; /* SRAM byte address of event log header */ 2371 u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */ 2372 u32 ptr; /* SRAM byte address of log data */ 2373 u32 ev, time, data; /* event log data */ 2374 unsigned long reg_flags; 2375 2376 if (num_events == 0) 2377 return pos; 2378 2379 if (priv->ucode_type == UCODE_INIT) { 2380 base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr); 2381 if (!base) 2382 base = priv->_agn.init_evtlog_ptr; 2383 } else { 2384 base = le32_to_cpu(priv->card_alive.log_event_table_ptr); 2385 if (!base) 2386 base = priv->_agn.inst_evtlog_ptr; 2387 } 2388 2389 if (mode == 0) 2390 event_size = 2 * sizeof(u32); 2391 else 2392 event_size = 3 * sizeof(u32); 2393 2394 ptr = base + EVENT_START_OFFSET + (start_idx * event_size); 2395 2396 /* Make sure device is powered up for SRAM reads */ 2397 spin_lock_irqsave(&priv->reg_lock, reg_flags); 2398 iwl_grab_nic_access(priv); 2399 2400 /* Set starting address; reads will auto-increment */ 2401 _iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr); 2402 rmb(); 2403 2404 /* "time" is actually "data" for mode 0 (no timestamp). 2405 * place event id # at far right for easier visual parsing. */ 2406 for (i = 0; i < num_events; i++) { 2407 ev = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); 2408 time = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); 2409 if (mode == 0) { 2410 /* data, ev */ 2411 if (bufsz) { 2412 pos += scnprintf(*buf + pos, bufsz - pos, 2413 "EVT_LOG:0x%08x:%04u\n", 2414 time, ev); 2415 } else { 2416 trace_iwlwifi_dev_ucode_event(priv, 0, 2417 time, ev); 2418 IWL_ERR(priv, "EVT_LOG:0x%08x:%04u\n", 2419 time, ev); 2420 } 2421 } else { 2422 data = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); 2423 if (bufsz) { 2424 pos += scnprintf(*buf + pos, bufsz - pos, 2425 "EVT_LOGT:%010u:0x%08x:%04u\n", 2426 time, data, ev); 2427 } else { 2428 IWL_ERR(priv, "EVT_LOGT:%010u:0x%08x:%04u\n", 2429 time, data, ev); 2430 trace_iwlwifi_dev_ucode_event(priv, time, 2431 data, ev); 2432 } 2433 } 2434 } 2435 2436 /* Allow device to power down */ 2437 iwl_release_nic_access(priv); 2438 spin_unlock_irqrestore(&priv->reg_lock, reg_flags); 2439 return pos; 2440} 2441 2442/** 2443 * iwl_print_last_event_logs - Dump the newest # of event log to syslog 2444 */ 2445static int iwl_print_last_event_logs(struct iwl_priv *priv, u32 capacity, 2446 u32 num_wraps, u32 next_entry, 2447 u32 size, u32 mode, 2448 int pos, char **buf, size_t bufsz) 2449{ 2450 /* 2451 * display the newest DEFAULT_LOG_ENTRIES entries 2452 * i.e the entries just before the next ont that uCode would fill. 2453 */ 2454 if (num_wraps) { 2455 if (next_entry < size) { 2456 pos = iwl_print_event_log(priv, 2457 capacity - (size - next_entry), 2458 size - next_entry, mode, 2459 pos, buf, bufsz); 2460 pos = iwl_print_event_log(priv, 0, 2461 next_entry, mode, 2462 pos, buf, bufsz); 2463 } else 2464 pos = iwl_print_event_log(priv, next_entry - size, 2465 size, mode, pos, buf, bufsz); 2466 } else { 2467 if (next_entry < size) { 2468 pos = iwl_print_event_log(priv, 0, next_entry, 2469 mode, pos, buf, bufsz); 2470 } else { 2471 pos = iwl_print_event_log(priv, next_entry - size, 2472 size, mode, pos, buf, bufsz); 2473 } 2474 } 2475 return pos; 2476} 2477 2478#define DEFAULT_DUMP_EVENT_LOG_ENTRIES (20) 2479 2480int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log, 2481 char **buf, bool display) 2482{ 2483 u32 base; /* SRAM byte address of event log header */ 2484 u32 capacity; /* event log capacity in # entries */ 2485 u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */ 2486 u32 num_wraps; /* # times uCode wrapped to top of log */ 2487 u32 next_entry; /* index of next entry to be written by uCode */ 2488 u32 size; /* # entries that we'll print */ 2489 u32 logsize; 2490 int pos = 0; 2491 size_t bufsz = 0; 2492 2493 if (priv->ucode_type == UCODE_INIT) { 2494 base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr); 2495 logsize = priv->_agn.init_evtlog_size; 2496 if (!base) 2497 base = priv->_agn.init_evtlog_ptr; 2498 } else { 2499 base = le32_to_cpu(priv->card_alive.log_event_table_ptr); 2500 logsize = priv->_agn.inst_evtlog_size; 2501 if (!base) 2502 base = priv->_agn.inst_evtlog_ptr; 2503 } 2504 2505 if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) { 2506 IWL_ERR(priv, 2507 "Invalid event log pointer 0x%08X for %s uCode\n", 2508 base, (priv->ucode_type == UCODE_INIT) ? "Init" : "RT"); 2509 return -EINVAL; 2510 } 2511 2512 /* event log header */ 2513 capacity = iwl_read_targ_mem(priv, base); 2514 mode = iwl_read_targ_mem(priv, base + (1 * sizeof(u32))); 2515 num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32))); 2516 next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32))); 2517 2518 if (capacity > logsize) { 2519 IWL_ERR(priv, "Log capacity %d is bogus, limit to %d entries\n", 2520 capacity, logsize); 2521 capacity = logsize; 2522 } 2523 2524 if (next_entry > logsize) { 2525 IWL_ERR(priv, "Log write index %d is bogus, limit to %d\n", 2526 next_entry, logsize); 2527 next_entry = logsize; 2528 } 2529 2530 size = num_wraps ? capacity : next_entry; 2531 2532 /* bail out if nothing in log */ 2533 if (size == 0) { 2534 IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n"); 2535 return pos; 2536 } 2537 2538#ifdef CONFIG_IWLWIFI_DEBUG 2539 if (!(iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) && !full_log) 2540 size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES) 2541 ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size; 2542#else 2543 size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES) 2544 ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size; 2545#endif 2546 IWL_ERR(priv, "Start IWL Event Log Dump: display last %u entries\n", 2547 size); 2548 2549#ifdef CONFIG_IWLWIFI_DEBUG 2550 if (display) { 2551 if (full_log) 2552 bufsz = capacity * 48; 2553 else 2554 bufsz = size * 48; 2555 *buf = kmalloc(bufsz, GFP_KERNEL); 2556 if (!*buf) 2557 return -ENOMEM; 2558 } 2559 if ((iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) { 2560 /* 2561 * if uCode has wrapped back to top of log, 2562 * start at the oldest entry, 2563 * i.e the next one that uCode would fill. 2564 */ 2565 if (num_wraps) 2566 pos = iwl_print_event_log(priv, next_entry, 2567 capacity - next_entry, mode, 2568 pos, buf, bufsz); 2569 /* (then/else) start at top of log */ 2570 pos = iwl_print_event_log(priv, 0, 2571 next_entry, mode, pos, buf, bufsz); 2572 } else 2573 pos = iwl_print_last_event_logs(priv, capacity, num_wraps, 2574 next_entry, size, mode, 2575 pos, buf, bufsz); 2576#else 2577 pos = iwl_print_last_event_logs(priv, capacity, num_wraps, 2578 next_entry, size, mode, 2579 pos, buf, bufsz); 2580#endif 2581 return pos; 2582} 2583 2584/** 2585 * iwl_alive_start - called after REPLY_ALIVE notification received 2586 * from protocol/runtime uCode (initialization uCode's 2587 * Alive gets handled by iwl_init_alive_start()). 2588 */ 2589static void iwl_alive_start(struct iwl_priv *priv) 2590{ 2591 int ret = 0; 2592 2593 IWL_DEBUG_INFO(priv, "Runtime Alive received.\n"); 2594 2595 if (priv->card_alive.is_valid != UCODE_VALID_OK) { 2596 /* We had an error bringing up the hardware, so take it 2597 * all the way back down so we can try again */ 2598 IWL_DEBUG_INFO(priv, "Alive failed.\n"); 2599 goto restart; 2600 } 2601 2602 /* Initialize uCode has loaded Runtime uCode ... verify inst image. 2603 * This is a paranoid check, because we would not have gotten the 2604 * "runtime" alive if code weren't properly loaded. */ 2605 if (iwl_verify_ucode(priv)) { 2606 /* Runtime instruction load was bad; 2607 * take it all the way back down so we can try again */ 2608 IWL_DEBUG_INFO(priv, "Bad runtime uCode load.\n"); 2609 goto restart; 2610 } 2611 2612 ret = priv->cfg->ops->lib->alive_notify(priv); 2613 if (ret) { 2614 IWL_WARN(priv, 2615 "Could not complete ALIVE transition [ntf]: %d\n", ret); 2616 goto restart; 2617 } 2618 2619 /* After the ALIVE response, we can send host commands to the uCode */ 2620 set_bit(STATUS_ALIVE, &priv->status); 2621 2622 if (priv->cfg->ops->lib->recover_from_tx_stall) { 2623 /* Enable timer to monitor the driver queues */ 2624 mod_timer(&priv->monitor_recover, 2625 jiffies + 2626 msecs_to_jiffies(priv->cfg->monitor_recover_period)); 2627 } 2628 2629 if (iwl_is_rfkill(priv)) 2630 return; 2631 2632 ieee80211_wake_queues(priv->hw); 2633 2634 priv->active_rate = IWL_RATES_MASK; 2635 2636 /* Configure Tx antenna selection based on H/W config */ 2637 if (priv->cfg->ops->hcmd->set_tx_ant) 2638 priv->cfg->ops->hcmd->set_tx_ant(priv, priv->cfg->valid_tx_ant); 2639 2640 if (iwl_is_associated(priv)) { 2641 struct iwl_rxon_cmd *active_rxon = 2642 (struct iwl_rxon_cmd *)&priv->active_rxon; 2643 /* apply any changes in staging */ 2644 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK; 2645 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; 2646 } else { 2647 /* Initialize our rx_config data */ 2648 iwl_connection_init_rx_config(priv, NULL); 2649 2650 if (priv->cfg->ops->hcmd->set_rxon_chain) 2651 priv->cfg->ops->hcmd->set_rxon_chain(priv); 2652 } 2653 2654 /* Configure Bluetooth device coexistence support */ 2655 priv->cfg->ops->hcmd->send_bt_config(priv); 2656 2657 iwl_reset_run_time_calib(priv); 2658 2659 /* Configure the adapter for unassociated operation */ 2660 iwlcore_commit_rxon(priv); 2661 2662 /* At this point, the NIC is initialized and operational */ 2663 iwl_rf_kill_ct_config(priv); 2664 2665 iwl_leds_init(priv); 2666 2667 IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n"); 2668 set_bit(STATUS_READY, &priv->status); 2669 wake_up_interruptible(&priv->wait_command_queue); 2670 2671 iwl_power_update_mode(priv, true); 2672 IWL_DEBUG_INFO(priv, "Updated power mode\n"); 2673 2674 2675 return; 2676 2677 restart: 2678 queue_work(priv->workqueue, &priv->restart); 2679} 2680 2681static void iwl_cancel_deferred_work(struct iwl_priv *priv); 2682 2683static void __iwl_down(struct iwl_priv *priv) 2684{ 2685 unsigned long flags; 2686 int exit_pending = test_bit(STATUS_EXIT_PENDING, &priv->status); 2687 2688 IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n"); 2689 2690 if (!exit_pending) 2691 set_bit(STATUS_EXIT_PENDING, &priv->status); 2692 2693 iwl_clear_ucode_stations(priv); 2694 iwl_dealloc_bcast_station(priv); 2695 iwl_clear_driver_stations(priv); 2696 2697 /* Unblock any waiting calls */ 2698 wake_up_interruptible_all(&priv->wait_command_queue); 2699 2700 /* Wipe out the EXIT_PENDING status bit if we are not actually 2701 * exiting the module */ 2702 if (!exit_pending) 2703 clear_bit(STATUS_EXIT_PENDING, &priv->status); 2704 2705 /* stop and reset the on-board processor */ 2706 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); 2707 2708 /* tell the device to stop sending interrupts */ 2709 spin_lock_irqsave(&priv->lock, flags); 2710 iwl_disable_interrupts(priv); 2711 spin_unlock_irqrestore(&priv->lock, flags); 2712 iwl_synchronize_irq(priv); 2713 2714 if (priv->mac80211_registered) 2715 ieee80211_stop_queues(priv->hw); 2716 2717 /* If we have not previously called iwl_init() then 2718 * clear all bits but the RF Kill bit and return */ 2719 if (!iwl_is_init(priv)) { 2720 priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) << 2721 STATUS_RF_KILL_HW | 2722 test_bit(STATUS_GEO_CONFIGURED, &priv->status) << 2723 STATUS_GEO_CONFIGURED | 2724 test_bit(STATUS_EXIT_PENDING, &priv->status) << 2725 STATUS_EXIT_PENDING; 2726 goto exit; 2727 } 2728 2729 /* ...otherwise clear out all the status bits but the RF Kill 2730 * bit and continue taking the NIC down. */ 2731 priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) << 2732 STATUS_RF_KILL_HW | 2733 test_bit(STATUS_GEO_CONFIGURED, &priv->status) << 2734 STATUS_GEO_CONFIGURED | 2735 test_bit(STATUS_FW_ERROR, &priv->status) << 2736 STATUS_FW_ERROR | 2737 test_bit(STATUS_EXIT_PENDING, &priv->status) << 2738 STATUS_EXIT_PENDING; 2739 2740 /* device going down, Stop using ICT table */ 2741 iwl_disable_ict(priv); 2742 2743 iwlagn_txq_ctx_stop(priv); 2744 iwlagn_rxq_stop(priv); 2745 2746 /* Power-down device's busmaster DMA clocks */ 2747 iwl_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT); 2748 udelay(5); 2749 2750 /* Make sure (redundant) we've released our request to stay awake */ 2751 iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 2752 2753 /* Stop the device, and put it in low power state */ 2754 priv->cfg->ops->lib->apm_ops.stop(priv); 2755 2756 exit: 2757 memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp)); 2758 2759 if (priv->ibss_beacon) 2760 dev_kfree_skb(priv->ibss_beacon); 2761 priv->ibss_beacon = NULL; 2762 2763 /* clear out any free frames */ 2764 iwl_clear_free_frames(priv); 2765} 2766 2767static void iwl_down(struct iwl_priv *priv) 2768{ 2769 mutex_lock(&priv->mutex); 2770 __iwl_down(priv); 2771 mutex_unlock(&priv->mutex); 2772 2773 iwl_cancel_deferred_work(priv); 2774} 2775 2776#define HW_READY_TIMEOUT (50) 2777 2778static int iwl_set_hw_ready(struct iwl_priv *priv) 2779{ 2780 int ret = 0; 2781 2782 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, 2783 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY); 2784 2785 /* See if we got it */ 2786 ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG, 2787 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, 2788 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, 2789 HW_READY_TIMEOUT); 2790 if (ret != -ETIMEDOUT) 2791 priv->hw_ready = true; 2792 else 2793 priv->hw_ready = false; 2794 2795 IWL_DEBUG_INFO(priv, "hardware %s\n", 2796 (priv->hw_ready == 1) ? "ready" : "not ready"); 2797 return ret; 2798} 2799 2800static int iwl_prepare_card_hw(struct iwl_priv *priv) 2801{ 2802 int ret = 0; 2803 2804 IWL_DEBUG_INFO(priv, "iwl_prepare_card_hw enter\n"); 2805 2806 ret = iwl_set_hw_ready(priv); 2807 if (priv->hw_ready) 2808 return ret; 2809 2810 /* If HW is not ready, prepare the conditions to check again */ 2811 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, 2812 CSR_HW_IF_CONFIG_REG_PREPARE); 2813 2814 ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG, 2815 ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 2816 CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000); 2817 2818 /* HW should be ready by now, check again. */ 2819 if (ret != -ETIMEDOUT) 2820 iwl_set_hw_ready(priv); 2821 2822 return ret; 2823} 2824 2825#define MAX_HW_RESTARTS 5 2826 2827static int __iwl_up(struct iwl_priv *priv) 2828{ 2829 int i; 2830 int ret; 2831 2832 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) { 2833 IWL_WARN(priv, "Exit pending; will not bring the NIC up\n"); 2834 return -EIO; 2835 } 2836 2837 if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) { 2838 IWL_ERR(priv, "ucode not available for device bringup\n"); 2839 return -EIO; 2840 } 2841 2842 ret = iwl_alloc_bcast_station(priv, true); 2843 if (ret) 2844 return ret; 2845 2846 iwl_prepare_card_hw(priv); 2847 2848 if (!priv->hw_ready) { 2849 IWL_WARN(priv, "Exit HW not ready\n"); 2850 return -EIO; 2851 } 2852 2853 /* If platform's RF_KILL switch is NOT set to KILL */ 2854 if (iwl_read32(priv, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) 2855 clear_bit(STATUS_RF_KILL_HW, &priv->status); 2856 else 2857 set_bit(STATUS_RF_KILL_HW, &priv->status); 2858 2859 if (iwl_is_rfkill(priv)) { 2860 wiphy_rfkill_set_hw_state(priv->hw->wiphy, true); 2861 2862 iwl_enable_interrupts(priv); 2863 IWL_WARN(priv, "Radio disabled by HW RF Kill switch\n"); 2864 return 0; 2865 } 2866 2867 iwl_write32(priv, CSR_INT, 0xFFFFFFFF); 2868 2869 ret = iwlagn_hw_nic_init(priv); 2870 if (ret) { 2871 IWL_ERR(priv, "Unable to init nic\n"); 2872 return ret; 2873 } 2874 2875 /* make sure rfkill handshake bits are cleared */ 2876 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 2877 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, 2878 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); 2879 2880 /* clear (again), then enable host interrupts */ 2881 iwl_write32(priv, CSR_INT, 0xFFFFFFFF); 2882 iwl_enable_interrupts(priv); 2883 2884 /* really make sure rfkill handshake bits are cleared */ 2885 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 2886 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 2887 2888 /* Copy original ucode data image from disk into backup cache. 2889 * This will be used to initialize the on-board processor's 2890 * data SRAM for a clean start when the runtime program first loads. */ 2891 memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr, 2892 priv->ucode_data.len); 2893 2894 for (i = 0; i < MAX_HW_RESTARTS; i++) { 2895 2896 /* load bootstrap state machine, 2897 * load bootstrap program into processor's memory, 2898 * prepare to load the "initialize" uCode */ 2899 ret = priv->cfg->ops->lib->load_ucode(priv); 2900 2901 if (ret) { 2902 IWL_ERR(priv, "Unable to set up bootstrap uCode: %d\n", 2903 ret); 2904 continue; 2905 } 2906 2907 /* start card; "initialize" will load runtime ucode */ 2908 iwl_nic_start(priv); 2909 2910 IWL_DEBUG_INFO(priv, DRV_NAME " is coming up\n"); 2911 2912 return 0; 2913 } 2914 2915 set_bit(STATUS_EXIT_PENDING, &priv->status); 2916 __iwl_down(priv); 2917 clear_bit(STATUS_EXIT_PENDING, &priv->status); 2918 2919 /* tried to restart and config the device for as long as our 2920 * patience could withstand */ 2921 IWL_ERR(priv, "Unable to initialize device after %d attempts.\n", i); 2922 return -EIO; 2923} 2924 2925 2926/***************************************************************************** 2927 * 2928 * Workqueue callbacks 2929 * 2930 *****************************************************************************/ 2931 2932static void iwl_bg_init_alive_start(struct work_struct *data) 2933{ 2934 struct iwl_priv *priv = 2935 container_of(data, struct iwl_priv, init_alive_start.work); 2936 2937 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 2938 return; 2939 2940 mutex_lock(&priv->mutex); 2941 priv->cfg->ops->lib->init_alive_start(priv); 2942 mutex_unlock(&priv->mutex); 2943} 2944 2945static void iwl_bg_alive_start(struct work_struct *data) 2946{ 2947 struct iwl_priv *priv = 2948 container_of(data, struct iwl_priv, alive_start.work); 2949 2950 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 2951 return; 2952 2953 /* enable dram interrupt */ 2954 iwl_reset_ict(priv); 2955 2956 mutex_lock(&priv->mutex); 2957 iwl_alive_start(priv); 2958 mutex_unlock(&priv->mutex); 2959} 2960 2961static void iwl_bg_run_time_calib_work(struct work_struct *work) 2962{ 2963 struct iwl_priv *priv = container_of(work, struct iwl_priv, 2964 run_time_calib_work); 2965 2966 mutex_lock(&priv->mutex); 2967 2968 if (test_bit(STATUS_EXIT_PENDING, &priv->status) || 2969 test_bit(STATUS_SCANNING, &priv->status)) { 2970 mutex_unlock(&priv->mutex); 2971 return; 2972 } 2973 2974 if (priv->start_calib) { 2975 if (priv->cfg->bt_statistics) { 2976 iwl_chain_noise_calibration(priv, 2977 (void *)&priv->_agn.statistics_bt); 2978 iwl_sensitivity_calibration(priv, 2979 (void *)&priv->_agn.statistics_bt); 2980 } else { 2981 iwl_chain_noise_calibration(priv, 2982 (void *)&priv->_agn.statistics); 2983 iwl_sensitivity_calibration(priv, 2984 (void *)&priv->_agn.statistics); 2985 } 2986 } 2987 2988 mutex_unlock(&priv->mutex); 2989} 2990 2991static void iwl_bg_restart(struct work_struct *data) 2992{ 2993 struct iwl_priv *priv = container_of(data, struct iwl_priv, restart); 2994 2995 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 2996 return; 2997 2998 if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) { 2999 mutex_lock(&priv->mutex); 3000 priv->vif = NULL; 3001 priv->is_open = 0; 3002 mutex_unlock(&priv->mutex); 3003 iwl_down(priv); 3004 ieee80211_restart_hw(priv->hw); 3005 } else { 3006 iwl_down(priv); 3007 3008 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 3009 return; 3010 3011 mutex_lock(&priv->mutex); 3012 __iwl_up(priv); 3013 mutex_unlock(&priv->mutex); 3014 } 3015} 3016 3017static void iwl_bg_rx_replenish(struct work_struct *data) 3018{ 3019 struct iwl_priv *priv = 3020 container_of(data, struct iwl_priv, rx_replenish); 3021 3022 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 3023 return; 3024 3025 mutex_lock(&priv->mutex); 3026 iwlagn_rx_replenish(priv); 3027 mutex_unlock(&priv->mutex); 3028} 3029 3030#define IWL_DELAY_NEXT_SCAN (HZ*2) 3031 3032void iwl_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif) 3033{ 3034 struct ieee80211_conf *conf = NULL; 3035 int ret = 0; 3036 3037 if (!vif || !priv->is_open) 3038 return; 3039 3040 if (vif->type == NL80211_IFTYPE_AP) { 3041 IWL_ERR(priv, "%s Should not be called in AP mode\n", __func__); 3042 return; 3043 } 3044 3045 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 3046 return; 3047 3048 iwl_scan_cancel_timeout(priv, 200); 3049 3050 conf = ieee80211_get_hw_conf(priv->hw); 3051 3052 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 3053 iwlcore_commit_rxon(priv); 3054 3055 iwl_setup_rxon_timing(priv, vif); 3056 ret = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING, 3057 sizeof(priv->rxon_timing), &priv->rxon_timing); 3058 if (ret) 3059 IWL_WARN(priv, "REPLY_RXON_TIMING failed - " 3060 "Attempting to continue.\n"); 3061 3062 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK; 3063 3064 iwl_set_rxon_ht(priv, &priv->current_ht_config); 3065 3066 if (priv->cfg->ops->hcmd->set_rxon_chain) 3067 priv->cfg->ops->hcmd->set_rxon_chain(priv); 3068 3069 priv->staging_rxon.assoc_id = cpu_to_le16(vif->bss_conf.aid); 3070 3071 IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n", 3072 vif->bss_conf.aid, vif->bss_conf.beacon_int); 3073 3074 if (vif->bss_conf.use_short_preamble) 3075 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; 3076 else 3077 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; 3078 3079 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) { 3080 if (vif->bss_conf.use_short_slot) 3081 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK; 3082 else 3083 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; 3084 } 3085 3086 iwlcore_commit_rxon(priv); 3087 3088 IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n", 3089 vif->bss_conf.aid, priv->active_rxon.bssid_addr); 3090 3091 switch (vif->type) { 3092 case NL80211_IFTYPE_STATION: 3093 break; 3094 case NL80211_IFTYPE_ADHOC: 3095 iwl_send_beacon_cmd(priv); 3096 break; 3097 default: 3098 IWL_ERR(priv, "%s Should not be called in %d mode\n", 3099 __func__, vif->type); 3100 break; 3101 } 3102 3103 /* the chain noise calibration will enabled PM upon completion 3104 * If chain noise has already been run, then we need to enable 3105 * power management here */ 3106 if (priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE) 3107 iwl_power_update_mode(priv, false); 3108 3109 /* Enable Rx differential gain and sensitivity calibrations */ 3110 iwl_chain_noise_reset(priv); 3111 priv->start_calib = 1; 3112 3113} 3114 3115/***************************************************************************** 3116 * 3117 * mac80211 entry point functions 3118 * 3119 *****************************************************************************/ 3120 3121#define UCODE_READY_TIMEOUT (4 * HZ) 3122 3123/* 3124 * Not a mac80211 entry point function, but it fits in with all the 3125 * other mac80211 functions grouped here. 3126 */ 3127static int iwl_mac_setup_register(struct iwl_priv *priv, 3128 struct iwlagn_ucode_capabilities *capa) 3129{ 3130 int ret; 3131 struct ieee80211_hw *hw = priv->hw; 3132 hw->rate_control_algorithm = "iwl-agn-rs"; 3133 3134 /* Tell mac80211 our characteristics */ 3135 hw->flags = IEEE80211_HW_SIGNAL_DBM | 3136 IEEE80211_HW_AMPDU_AGGREGATION | 3137 IEEE80211_HW_SPECTRUM_MGMT; 3138 3139 if (!priv->cfg->broken_powersave) 3140 hw->flags |= IEEE80211_HW_SUPPORTS_PS | 3141 IEEE80211_HW_SUPPORTS_DYNAMIC_PS; 3142 3143 if (priv->cfg->sku & IWL_SKU_N) 3144 hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS | 3145 IEEE80211_HW_SUPPORTS_STATIC_SMPS; 3146 3147 hw->sta_data_size = sizeof(struct iwl_station_priv); 3148 hw->vif_data_size = sizeof(struct iwl_vif_priv); 3149 3150 hw->wiphy->interface_modes = 3151 BIT(NL80211_IFTYPE_STATION) | 3152 BIT(NL80211_IFTYPE_ADHOC); 3153 3154 hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY | 3155 WIPHY_FLAG_DISABLE_BEACON_HINTS; 3156 3157 /* 3158 * For now, disable PS by default because it affects 3159 * RX performance significantly. 3160 */ 3161 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; 3162 3163 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX; 3164 /* we create the 802.11 header and a zero-length SSID element */ 3165 hw->wiphy->max_scan_ie_len = capa->max_probe_length - 24 - 2; 3166 3167 /* Default value; 4 EDCA QOS priorities */ 3168 hw->queues = 4; 3169 3170 hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL; 3171 3172 if (priv->bands[IEEE80211_BAND_2GHZ].n_channels) 3173 priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = 3174 &priv->bands[IEEE80211_BAND_2GHZ]; 3175 if (priv->bands[IEEE80211_BAND_5GHZ].n_channels) 3176 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = 3177 &priv->bands[IEEE80211_BAND_5GHZ]; 3178 3179 ret = ieee80211_register_hw(priv->hw); 3180 if (ret) { 3181 IWL_ERR(priv, "Failed to register hw (error %d)\n", ret); 3182 return ret; 3183 } 3184 priv->mac80211_registered = 1; 3185 3186 return 0; 3187} 3188 3189 3190static int iwl_mac_start(struct ieee80211_hw *hw) 3191{ 3192 struct iwl_priv *priv = hw->priv; 3193 int ret; 3194 3195 IWL_DEBUG_MAC80211(priv, "enter\n"); 3196 3197 /* we should be verifying the device is ready to be opened */ 3198 mutex_lock(&priv->mutex); 3199 ret = __iwl_up(priv); 3200 mutex_unlock(&priv->mutex); 3201 3202 if (ret) 3203 return ret; 3204 3205 if (iwl_is_rfkill(priv)) 3206 goto out; 3207 3208 IWL_DEBUG_INFO(priv, "Start UP work done.\n"); 3209 3210 /* Wait for START_ALIVE from Run Time ucode. Otherwise callbacks from 3211 * mac80211 will not be run successfully. */ 3212 ret = wait_event_interruptible_timeout(priv->wait_command_queue, 3213 test_bit(STATUS_READY, &priv->status), 3214 UCODE_READY_TIMEOUT); 3215 if (!ret) { 3216 if (!test_bit(STATUS_READY, &priv->status)) { 3217 IWL_ERR(priv, "START_ALIVE timeout after %dms.\n", 3218 jiffies_to_msecs(UCODE_READY_TIMEOUT)); 3219 return -ETIMEDOUT; 3220 } 3221 } 3222 3223 iwl_led_start(priv); 3224 3225out: 3226 priv->is_open = 1; 3227 IWL_DEBUG_MAC80211(priv, "leave\n"); 3228 return 0; 3229} 3230 3231static void iwl_mac_stop(struct ieee80211_hw *hw) 3232{ 3233 struct iwl_priv *priv = hw->priv; 3234 3235 IWL_DEBUG_MAC80211(priv, "enter\n"); 3236 3237 if (!priv->is_open) 3238 return; 3239 3240 priv->is_open = 0; 3241 3242 if (iwl_is_ready_rf(priv) || test_bit(STATUS_SCAN_HW, &priv->status)) { 3243 /* stop mac, cancel any scan request and clear 3244 * RXON_FILTER_ASSOC_MSK BIT 3245 */ 3246 mutex_lock(&priv->mutex); 3247 iwl_scan_cancel_timeout(priv, 100); 3248 mutex_unlock(&priv->mutex); 3249 } 3250 3251 iwl_down(priv); 3252 3253 flush_workqueue(priv->workqueue); 3254 3255 /* User space software may expect getting rfkill changes 3256 * even if interface is down */ 3257 iwl_write32(priv, CSR_INT, 0xFFFFFFFF); 3258 iwl_enable_rfkill_int(priv); 3259 3260 IWL_DEBUG_MAC80211(priv, "leave\n"); 3261} 3262 3263static int iwl_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 3264{ 3265 struct iwl_priv *priv = hw->priv; 3266 3267 IWL_DEBUG_MACDUMP(priv, "enter\n"); 3268 3269 IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, 3270 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate); 3271 3272 if (iwlagn_tx_skb(priv, skb)) 3273 dev_kfree_skb_any(skb); 3274 3275 IWL_DEBUG_MACDUMP(priv, "leave\n"); 3276 return NETDEV_TX_OK; 3277} 3278 3279void iwl_config_ap(struct iwl_priv *priv, struct ieee80211_vif *vif) 3280{ 3281 int ret = 0; 3282 3283 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 3284 return; 3285 3286 /* The following should be done only at AP bring up */ 3287 if (!iwl_is_associated(priv)) { 3288 3289 /* RXON - unassoc (to set timing command) */ 3290 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 3291 iwlcore_commit_rxon(priv); 3292 3293 /* RXON Timing */ 3294 iwl_setup_rxon_timing(priv, vif); 3295 ret = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING, 3296 sizeof(priv->rxon_timing), &priv->rxon_timing); 3297 if (ret) 3298 IWL_WARN(priv, "REPLY_RXON_TIMING failed - " 3299 "Attempting to continue.\n"); 3300 3301 /* AP has all antennas */ 3302 priv->chain_noise_data.active_chains = 3303 priv->hw_params.valid_rx_ant; 3304 iwl_set_rxon_ht(priv, &priv->current_ht_config); 3305 if (priv->cfg->ops->hcmd->set_rxon_chain) 3306 priv->cfg->ops->hcmd->set_rxon_chain(priv); 3307 3308 priv->staging_rxon.assoc_id = 0; 3309 3310 if (vif->bss_conf.use_short_preamble) 3311 priv->staging_rxon.flags |= 3312 RXON_FLG_SHORT_PREAMBLE_MSK; 3313 else 3314 priv->staging_rxon.flags &= 3315 ~RXON_FLG_SHORT_PREAMBLE_MSK; 3316 3317 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) { 3318 if (vif->bss_conf.use_short_slot) 3319 priv->staging_rxon.flags |= 3320 RXON_FLG_SHORT_SLOT_MSK; 3321 else 3322 priv->staging_rxon.flags &= 3323 ~RXON_FLG_SHORT_SLOT_MSK; 3324 } 3325 /* restore RXON assoc */ 3326 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK; 3327 iwlcore_commit_rxon(priv); 3328 } 3329 iwl_send_beacon_cmd(priv); 3330 3331} 3332 3333static void iwl_mac_update_tkip_key(struct ieee80211_hw *hw, 3334 struct ieee80211_vif *vif, 3335 struct ieee80211_key_conf *keyconf, 3336 struct ieee80211_sta *sta, 3337 u32 iv32, u16 *phase1key) 3338{ 3339 3340 struct iwl_priv *priv = hw->priv; 3341 IWL_DEBUG_MAC80211(priv, "enter\n"); 3342 3343 iwl_update_tkip_key(priv, keyconf, sta, 3344 iv32, phase1key); 3345 3346 IWL_DEBUG_MAC80211(priv, "leave\n"); 3347} 3348 3349static int iwl_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, 3350 struct ieee80211_vif *vif, 3351 struct ieee80211_sta *sta, 3352 struct ieee80211_key_conf *key) 3353{ 3354 struct iwl_priv *priv = hw->priv; 3355 int ret; 3356 u8 sta_id; 3357 bool is_default_wep_key = false; 3358 3359 IWL_DEBUG_MAC80211(priv, "enter\n"); 3360 3361 if (priv->cfg->mod_params->sw_crypto) { 3362 IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n"); 3363 return -EOPNOTSUPP; 3364 } 3365 3366 sta_id = iwl_sta_id_or_broadcast(priv, sta); 3367 if (sta_id == IWL_INVALID_STATION) 3368 return -EINVAL; 3369 3370 mutex_lock(&priv->mutex); 3371 iwl_scan_cancel_timeout(priv, 100); 3372 3373 /* 3374 * If we are getting WEP group key and we didn't receive any key mapping 3375 * so far, we are in legacy wep mode (group key only), otherwise we are 3376 * in 1X mode. 3377 * In legacy wep mode, we use another host command to the uCode. 3378 */ 3379 if (key->alg == ALG_WEP && !sta && vif->type != NL80211_IFTYPE_AP) { 3380 if (cmd == SET_KEY) 3381 is_default_wep_key = !priv->key_mapping_key; 3382 else 3383 is_default_wep_key = 3384 (key->hw_key_idx == HW_KEY_DEFAULT); 3385 } 3386 3387 switch (cmd) { 3388 case SET_KEY: 3389 if (is_default_wep_key) 3390 ret = iwl_set_default_wep_key(priv, key); 3391 else 3392 ret = iwl_set_dynamic_key(priv, key, sta_id); 3393 3394 IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n"); 3395 break; 3396 case DISABLE_KEY: 3397 if (is_default_wep_key) 3398 ret = iwl_remove_default_wep_key(priv, key); 3399 else 3400 ret = iwl_remove_dynamic_key(priv, key, sta_id); 3401 3402 IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n"); 3403 break; 3404 default: 3405 ret = -EINVAL; 3406 } 3407 3408 mutex_unlock(&priv->mutex); 3409 IWL_DEBUG_MAC80211(priv, "leave\n"); 3410 3411 return ret; 3412} 3413 3414static int iwl_mac_ampdu_action(struct ieee80211_hw *hw, 3415 struct ieee80211_vif *vif, 3416 enum ieee80211_ampdu_mlme_action action, 3417 struct ieee80211_sta *sta, u16 tid, u16 *ssn) 3418{ 3419 struct iwl_priv *priv = hw->priv; 3420 int ret = -EINVAL; 3421 3422 IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n", 3423 sta->addr, tid); 3424 3425 if (!(priv->cfg->sku & IWL_SKU_N)) 3426 return -EACCES; 3427 3428 mutex_lock(&priv->mutex); 3429 3430 switch (action) { 3431 case IEEE80211_AMPDU_RX_START: 3432 IWL_DEBUG_HT(priv, "start Rx\n"); 3433 ret = iwl_sta_rx_agg_start(priv, sta, tid, *ssn); 3434 break; 3435 case IEEE80211_AMPDU_RX_STOP: 3436 IWL_DEBUG_HT(priv, "stop Rx\n"); 3437 ret = iwl_sta_rx_agg_stop(priv, sta, tid); 3438 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 3439 ret = 0; 3440 break; 3441 case IEEE80211_AMPDU_TX_START: 3442 IWL_DEBUG_HT(priv, "start Tx\n"); 3443 ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn); 3444 if (ret == 0) { 3445 priv->_agn.agg_tids_count++; 3446 IWL_DEBUG_HT(priv, "priv->_agn.agg_tids_count = %u\n", 3447 priv->_agn.agg_tids_count); 3448 } 3449 break; 3450 case IEEE80211_AMPDU_TX_STOP: 3451 IWL_DEBUG_HT(priv, "stop Tx\n"); 3452 ret = iwlagn_tx_agg_stop(priv, vif, sta, tid); 3453 if ((ret == 0) && (priv->_agn.agg_tids_count > 0)) { 3454 priv->_agn.agg_tids_count--; 3455 IWL_DEBUG_HT(priv, "priv->_agn.agg_tids_count = %u\n", 3456 priv->_agn.agg_tids_count); 3457 } 3458 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 3459 ret = 0; 3460 if (priv->cfg->use_rts_for_aggregation) { 3461 struct iwl_station_priv *sta_priv = 3462 (void *) sta->drv_priv; 3463 /* 3464 * switch off RTS/CTS if it was previously enabled 3465 */ 3466 3467 sta_priv->lq_sta.lq.general_params.flags &= 3468 ~LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK; 3469 iwl_send_lq_cmd(priv, &sta_priv->lq_sta.lq, 3470 CMD_ASYNC, false); 3471 } 3472 break; 3473 case IEEE80211_AMPDU_TX_OPERATIONAL: 3474 if (priv->cfg->use_rts_for_aggregation) { 3475 struct iwl_station_priv *sta_priv = 3476 (void *) sta->drv_priv; 3477 3478 /* 3479 * switch to RTS/CTS if it is the prefer protection 3480 * method for HT traffic 3481 */ 3482 3483 sta_priv->lq_sta.lq.general_params.flags |= 3484 LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK; 3485 iwl_send_lq_cmd(priv, &sta_priv->lq_sta.lq, 3486 CMD_ASYNC, false); 3487 } 3488 ret = 0; 3489 break; 3490 } 3491 mutex_unlock(&priv->mutex); 3492 3493 return ret; 3494} 3495 3496static void iwl_mac_sta_notify(struct ieee80211_hw *hw, 3497 struct ieee80211_vif *vif, 3498 enum sta_notify_cmd cmd, 3499 struct ieee80211_sta *sta) 3500{ 3501 struct iwl_priv *priv = hw->priv; 3502 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; 3503 int sta_id; 3504 3505 switch (cmd) { 3506 case STA_NOTIFY_SLEEP: 3507 WARN_ON(!sta_priv->client); 3508 sta_priv->asleep = true; 3509 if (atomic_read(&sta_priv->pending_frames) > 0) 3510 ieee80211_sta_block_awake(hw, sta, true); 3511 break; 3512 case STA_NOTIFY_AWAKE: 3513 WARN_ON(!sta_priv->client); 3514 if (!sta_priv->asleep) 3515 break; 3516 sta_priv->asleep = false; 3517 sta_id = iwl_sta_id(sta); 3518 if (sta_id != IWL_INVALID_STATION) 3519 iwl_sta_modify_ps_wake(priv, sta_id); 3520 break; 3521 default: 3522 break; 3523 } 3524} 3525 3526static int iwlagn_mac_sta_add(struct ieee80211_hw *hw, 3527 struct ieee80211_vif *vif, 3528 struct ieee80211_sta *sta) 3529{ 3530 struct iwl_priv *priv = hw->priv; 3531 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; 3532 bool is_ap = vif->type == NL80211_IFTYPE_STATION; 3533 int ret; 3534 u8 sta_id; 3535 3536 IWL_DEBUG_INFO(priv, "received request to add station %pM\n", 3537 sta->addr); 3538 mutex_lock(&priv->mutex); 3539 IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n", 3540 sta->addr); 3541 sta_priv->common.sta_id = IWL_INVALID_STATION; 3542 3543 atomic_set(&sta_priv->pending_frames, 0); 3544 if (vif->type == NL80211_IFTYPE_AP) 3545 sta_priv->client = true; 3546 3547 ret = iwl_add_station_common(priv, sta->addr, is_ap, &sta->ht_cap, 3548 &sta_id); 3549 if (ret) { 3550 IWL_ERR(priv, "Unable to add station %pM (%d)\n", 3551 sta->addr, ret); 3552 /* Should we return success if return code is EEXIST ? */ 3553 mutex_unlock(&priv->mutex); 3554 return ret; 3555 } 3556 3557 sta_priv->common.sta_id = sta_id; 3558 3559 /* Initialize rate scaling */ 3560 IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n", 3561 sta->addr); 3562 iwl_rs_rate_init(priv, sta, sta_id); 3563 mutex_unlock(&priv->mutex); 3564 3565 return 0; 3566} 3567 3568static void iwl_mac_channel_switch(struct ieee80211_hw *hw, 3569 struct ieee80211_channel_switch *ch_switch) 3570{ 3571 struct iwl_priv *priv = hw->priv; 3572 const struct iwl_channel_info *ch_info; 3573 struct ieee80211_conf *conf = &hw->conf; 3574 struct iwl_ht_config *ht_conf = &priv->current_ht_config; 3575 u16 ch; 3576 unsigned long flags = 0; 3577 3578 IWL_DEBUG_MAC80211(priv, "enter\n"); 3579 3580 if (iwl_is_rfkill(priv)) 3581 goto out_exit; 3582 3583 if (test_bit(STATUS_EXIT_PENDING, &priv->status) || 3584 test_bit(STATUS_SCANNING, &priv->status)) 3585 goto out_exit; 3586 3587 if (!iwl_is_associated(priv)) 3588 goto out_exit; 3589 3590 /* channel switch in progress */ 3591 if (priv->switch_rxon.switch_in_progress == true) 3592 goto out_exit; 3593 3594 mutex_lock(&priv->mutex); 3595 if (priv->cfg->ops->lib->set_channel_switch) { 3596 3597 ch = ieee80211_frequency_to_channel( 3598 ch_switch->channel->center_freq); 3599 if (le16_to_cpu(priv->active_rxon.channel) != ch) { 3600 ch_info = iwl_get_channel_info(priv, 3601 conf->channel->band, 3602 ch); 3603 if (!is_channel_valid(ch_info)) { 3604 IWL_DEBUG_MAC80211(priv, "invalid channel\n"); 3605 goto out; 3606 } 3607 spin_lock_irqsave(&priv->lock, flags); 3608 3609 priv->current_ht_config.smps = conf->smps_mode; 3610 3611 /* Configure HT40 channels */ 3612 ht_conf->is_ht = conf_is_ht(conf); 3613 if (ht_conf->is_ht) { 3614 if (conf_is_ht40_minus(conf)) { 3615 ht_conf->extension_chan_offset = 3616 IEEE80211_HT_PARAM_CHA_SEC_BELOW; 3617 ht_conf->is_40mhz = true; 3618 } else if (conf_is_ht40_plus(conf)) { 3619 ht_conf->extension_chan_offset = 3620 IEEE80211_HT_PARAM_CHA_SEC_ABOVE; 3621 ht_conf->is_40mhz = true; 3622 } else { 3623 ht_conf->extension_chan_offset = 3624 IEEE80211_HT_PARAM_CHA_SEC_NONE; 3625 ht_conf->is_40mhz = false; 3626 } 3627 } else 3628 ht_conf->is_40mhz = false; 3629 3630 /* if we are switching from ht to 2.4 clear flags 3631 * from any ht related info since 2.4 does not 3632 * support ht */ 3633 if ((le16_to_cpu(priv->staging_rxon.channel) != ch)) 3634 priv->staging_rxon.flags = 0; 3635 3636 iwl_set_rxon_channel(priv, conf->channel); 3637 iwl_set_rxon_ht(priv, ht_conf); 3638 iwl_set_flags_for_band(priv, conf->channel->band, 3639 priv->vif); 3640 spin_unlock_irqrestore(&priv->lock, flags); 3641 3642 iwl_set_rate(priv); 3643 /* 3644 * at this point, staging_rxon has the 3645 * configuration for channel switch 3646 */ 3647 if (priv->cfg->ops->lib->set_channel_switch(priv, 3648 ch_switch)) 3649 priv->switch_rxon.switch_in_progress = false; 3650 } 3651 } 3652out: 3653 mutex_unlock(&priv->mutex); 3654out_exit: 3655 if (!priv->switch_rxon.switch_in_progress) 3656 ieee80211_chswitch_done(priv->vif, false); 3657 IWL_DEBUG_MAC80211(priv, "leave\n"); 3658} 3659 3660static void iwlagn_configure_filter(struct ieee80211_hw *hw, 3661 unsigned int changed_flags, 3662 unsigned int *total_flags, 3663 u64 multicast) 3664{ 3665 struct iwl_priv *priv = hw->priv; 3666 __le32 filter_or = 0, filter_nand = 0; 3667 3668#define CHK(test, flag) do { \ 3669 if (*total_flags & (test)) \ 3670 filter_or |= (flag); \ 3671 else \ 3672 filter_nand |= (flag); \ 3673 } while (0) 3674 3675 IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n", 3676 changed_flags, *total_flags); 3677 3678 CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK); 3679 CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK); 3680 CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK); 3681 3682#undef CHK 3683 3684 mutex_lock(&priv->mutex); 3685 3686 priv->staging_rxon.filter_flags &= ~filter_nand; 3687 priv->staging_rxon.filter_flags |= filter_or; 3688 3689 iwlcore_commit_rxon(priv); 3690 3691 mutex_unlock(&priv->mutex); 3692 3693 /* 3694 * Receiving all multicast frames is always enabled by the 3695 * default flags setup in iwl_connection_init_rx_config() 3696 * since we currently do not support programming multicast 3697 * filters into the device. 3698 */ 3699 *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS | 3700 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL; 3701} 3702 3703static void iwl_mac_flush(struct ieee80211_hw *hw, bool drop) 3704{ 3705 struct iwl_priv *priv = hw->priv; 3706 3707 mutex_lock(&priv->mutex); 3708 IWL_DEBUG_MAC80211(priv, "enter\n"); 3709 3710 /* do not support "flush" */ 3711 if (!priv->cfg->ops->lib->txfifo_flush) 3712 goto done; 3713 3714 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) { 3715 IWL_DEBUG_TX(priv, "Aborting flush due to device shutdown\n"); 3716 goto done; 3717 } 3718 if (iwl_is_rfkill(priv)) { 3719 IWL_DEBUG_TX(priv, "Aborting flush due to RF Kill\n"); 3720 goto done; 3721 } 3722 3723 /* 3724 * mac80211 will not push any more frames for transmit 3725 * until the flush is completed 3726 */ 3727 if (drop) { 3728 IWL_DEBUG_MAC80211(priv, "send flush command\n"); 3729 if (priv->cfg->ops->lib->txfifo_flush(priv, IWL_DROP_ALL)) { 3730 IWL_ERR(priv, "flush request fail\n"); 3731 goto done; 3732 } 3733 } 3734 IWL_DEBUG_MAC80211(priv, "wait transmit/flush all frames\n"); 3735 iwlagn_wait_tx_queue_empty(priv); 3736done: 3737 mutex_unlock(&priv->mutex); 3738 IWL_DEBUG_MAC80211(priv, "leave\n"); 3739} 3740 3741/***************************************************************************** 3742 * 3743 * driver setup and teardown 3744 * 3745 *****************************************************************************/ 3746 3747static void iwl_setup_deferred_work(struct iwl_priv *priv) 3748{ 3749 priv->workqueue = create_singlethread_workqueue(DRV_NAME); 3750 3751 init_waitqueue_head(&priv->wait_command_queue); 3752 3753 INIT_WORK(&priv->restart, iwl_bg_restart); 3754 INIT_WORK(&priv->rx_replenish, iwl_bg_rx_replenish); 3755 INIT_WORK(&priv->beacon_update, iwl_bg_beacon_update); 3756 INIT_WORK(&priv->run_time_calib_work, iwl_bg_run_time_calib_work); 3757 INIT_WORK(&priv->tx_flush, iwl_bg_tx_flush); 3758 INIT_DELAYED_WORK(&priv->init_alive_start, iwl_bg_init_alive_start); 3759 INIT_DELAYED_WORK(&priv->alive_start, iwl_bg_alive_start); 3760 3761 iwl_setup_scan_deferred_work(priv); 3762 3763 if (priv->cfg->ops->lib->setup_deferred_work) 3764 priv->cfg->ops->lib->setup_deferred_work(priv); 3765 3766 init_timer(&priv->statistics_periodic); 3767 priv->statistics_periodic.data = (unsigned long)priv; 3768 priv->statistics_periodic.function = iwl_bg_statistics_periodic; 3769 3770 init_timer(&priv->ucode_trace); 3771 priv->ucode_trace.data = (unsigned long)priv; 3772 priv->ucode_trace.function = iwl_bg_ucode_trace; 3773 3774 if (priv->cfg->ops->lib->recover_from_tx_stall) { 3775 init_timer(&priv->monitor_recover); 3776 priv->monitor_recover.data = (unsigned long)priv; 3777 priv->monitor_recover.function = 3778 priv->cfg->ops->lib->recover_from_tx_stall; 3779 } 3780 3781 if (!priv->cfg->use_isr_legacy) 3782 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) 3783 iwl_irq_tasklet, (unsigned long)priv); 3784 else 3785 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) 3786 iwl_irq_tasklet_legacy, (unsigned long)priv); 3787} 3788 3789static void iwl_cancel_deferred_work(struct iwl_priv *priv) 3790{ 3791 if (priv->cfg->ops->lib->cancel_deferred_work) 3792 priv->cfg->ops->lib->cancel_deferred_work(priv); 3793 3794 cancel_delayed_work_sync(&priv->init_alive_start); 3795 cancel_delayed_work(&priv->scan_check); 3796 cancel_work_sync(&priv->start_internal_scan); 3797 cancel_delayed_work(&priv->alive_start); 3798 cancel_work_sync(&priv->run_time_calib_work); 3799 cancel_work_sync(&priv->beacon_update); 3800 del_timer_sync(&priv->statistics_periodic); 3801 del_timer_sync(&priv->ucode_trace); 3802 if (priv->cfg->ops->lib->recover_from_tx_stall) 3803 del_timer_sync(&priv->monitor_recover); 3804} 3805 3806static void iwl_init_hw_rates(struct iwl_priv *priv, 3807 struct ieee80211_rate *rates) 3808{ 3809 int i; 3810 3811 for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) { 3812 rates[i].bitrate = iwl_rates[i].ieee * 5; 3813 rates[i].hw_value = i; /* Rate scaling will work on indexes */ 3814 rates[i].hw_value_short = i; 3815 rates[i].flags = 0; 3816 if ((i >= IWL_FIRST_CCK_RATE) && (i <= IWL_LAST_CCK_RATE)) { 3817 /* 3818 * If CCK != 1M then set short preamble rate flag. 3819 */ 3820 rates[i].flags |= 3821 (iwl_rates[i].plcp == IWL_RATE_1M_PLCP) ? 3822 0 : IEEE80211_RATE_SHORT_PREAMBLE; 3823 } 3824 } 3825} 3826 3827static int iwl_init_drv(struct iwl_priv *priv) 3828{ 3829 int ret; 3830 3831 priv->ibss_beacon = NULL; 3832 3833 spin_lock_init(&priv->sta_lock); 3834 spin_lock_init(&priv->hcmd_lock); 3835 3836 INIT_LIST_HEAD(&priv->free_frames); 3837 3838 mutex_init(&priv->mutex); 3839 mutex_init(&priv->sync_cmd_mutex); 3840 3841 priv->ieee_channels = NULL; 3842 priv->ieee_rates = NULL; 3843 priv->band = IEEE80211_BAND_2GHZ; 3844 3845 priv->iw_mode = NL80211_IFTYPE_STATION; 3846 priv->current_ht_config.smps = IEEE80211_SMPS_STATIC; 3847 priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF; 3848 priv->_agn.agg_tids_count = 0; 3849 3850 /* initialize force reset */ 3851 priv->force_reset[IWL_RF_RESET].reset_duration = 3852 IWL_DELAY_NEXT_FORCE_RF_RESET; 3853 priv->force_reset[IWL_FW_RESET].reset_duration = 3854 IWL_DELAY_NEXT_FORCE_FW_RELOAD; 3855 3856 /* Choose which receivers/antennas to use */ 3857 if (priv->cfg->ops->hcmd->set_rxon_chain) 3858 priv->cfg->ops->hcmd->set_rxon_chain(priv); 3859 3860 iwl_init_scan_params(priv); 3861 3862 /* Set the tx_power_user_lmt to the lowest power level 3863 * this value will get overwritten by channel max power avg 3864 * from eeprom */ 3865 priv->tx_power_user_lmt = IWLAGN_TX_POWER_TARGET_POWER_MIN; 3866 3867 ret = iwl_init_channel_map(priv); 3868 if (ret) { 3869 IWL_ERR(priv, "initializing regulatory failed: %d\n", ret); 3870 goto err; 3871 } 3872 3873 ret = iwlcore_init_geos(priv); 3874 if (ret) { 3875 IWL_ERR(priv, "initializing geos failed: %d\n", ret); 3876 goto err_free_channel_map; 3877 } 3878 iwl_init_hw_rates(priv, priv->ieee_rates); 3879 3880 return 0; 3881 3882err_free_channel_map: 3883 iwl_free_channel_map(priv); 3884err: 3885 return ret; 3886} 3887 3888static void iwl_uninit_drv(struct iwl_priv *priv) 3889{ 3890 iwl_calib_free_results(priv); 3891 iwlcore_free_geos(priv); 3892 iwl_free_channel_map(priv); 3893 kfree(priv->scan_cmd); 3894} 3895 3896static struct ieee80211_ops iwl_hw_ops = { 3897 .tx = iwl_mac_tx, 3898 .start = iwl_mac_start, 3899 .stop = iwl_mac_stop, 3900 .add_interface = iwl_mac_add_interface, 3901 .remove_interface = iwl_mac_remove_interface, 3902 .config = iwl_mac_config, 3903 .configure_filter = iwlagn_configure_filter, 3904 .set_key = iwl_mac_set_key, 3905 .update_tkip_key = iwl_mac_update_tkip_key, 3906 .conf_tx = iwl_mac_conf_tx, 3907 .reset_tsf = iwl_mac_reset_tsf, 3908 .bss_info_changed = iwl_bss_info_changed, 3909 .ampdu_action = iwl_mac_ampdu_action, 3910 .hw_scan = iwl_mac_hw_scan, 3911 .sta_notify = iwl_mac_sta_notify, 3912 .sta_add = iwlagn_mac_sta_add, 3913 .sta_remove = iwl_mac_sta_remove, 3914 .channel_switch = iwl_mac_channel_switch, 3915 .flush = iwl_mac_flush, 3916}; 3917 3918static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 3919{ 3920 int err = 0; 3921 struct iwl_priv *priv; 3922 struct ieee80211_hw *hw; 3923 struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data); 3924 unsigned long flags; 3925 u16 pci_cmd, num_mac; 3926 3927 /************************ 3928 * 1. Allocating HW data 3929 ************************/ 3930 3931 /* Disabling hardware scan means that mac80211 will perform scans 3932 * "the hard way", rather than using device's scan. */ 3933 if (cfg->mod_params->disable_hw_scan) { 3934 if (iwl_debug_level & IWL_DL_INFO) 3935 dev_printk(KERN_DEBUG, &(pdev->dev), 3936 "Disabling hw_scan\n"); 3937 iwl_hw_ops.hw_scan = NULL; 3938 } 3939 3940 hw = iwl_alloc_all(cfg, &iwl_hw_ops); 3941 if (!hw) { 3942 err = -ENOMEM; 3943 goto out; 3944 } 3945 priv = hw->priv; 3946 /* At this point both hw and priv are allocated. */ 3947 3948 SET_IEEE80211_DEV(hw, &pdev->dev); 3949 3950 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n"); 3951 priv->cfg = cfg; 3952 priv->pci_dev = pdev; 3953 priv->inta_mask = CSR_INI_SET_MASK; 3954 3955 if (iwl_alloc_traffic_mem(priv)) 3956 IWL_ERR(priv, "Not enough memory to generate traffic log\n"); 3957 3958 /************************** 3959 * 2. Initializing PCI bus 3960 **************************/ 3961 if (pci_enable_device(pdev)) { 3962 err = -ENODEV; 3963 goto out_ieee80211_free_hw; 3964 } 3965 3966 pci_set_master(pdev); 3967 3968 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); 3969 if (!err) 3970 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36)); 3971 if (err) { 3972 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 3973 if (!err) 3974 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 3975 /* both attempts failed: */ 3976 if (err) { 3977 IWL_WARN(priv, "No suitable DMA available.\n"); 3978 goto out_pci_disable_device; 3979 } 3980 } 3981 3982 err = pci_request_regions(pdev, DRV_NAME); 3983 if (err) 3984 goto out_pci_disable_device; 3985 3986 pci_set_drvdata(pdev, priv); 3987 3988 3989 /*********************** 3990 * 3. Read REV register 3991 ***********************/ 3992 priv->hw_base = pci_iomap(pdev, 0, 0); 3993 if (!priv->hw_base) { 3994 err = -ENODEV; 3995 goto out_pci_release_regions; 3996 } 3997 3998 IWL_DEBUG_INFO(priv, "pci_resource_len = 0x%08llx\n", 3999 (unsigned long long) pci_resource_len(pdev, 0)); 4000 IWL_DEBUG_INFO(priv, "pci_resource_base = %p\n", priv->hw_base); 4001 4002 /* these spin locks will be used in apm_ops.init and EEPROM access 4003 * we should init now 4004 */ 4005 spin_lock_init(&priv->reg_lock); 4006 spin_lock_init(&priv->lock); 4007 4008 /* 4009 * stop and reset the on-board processor just in case it is in a 4010 * strange state ... like being left stranded by a primary kernel 4011 * and this is now the kdump kernel trying to start up 4012 */ 4013 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); 4014 4015 iwl_hw_detect(priv); 4016 IWL_INFO(priv, "Detected %s, REV=0x%X\n", 4017 priv->cfg->name, priv->hw_rev); 4018 4019 /* We disable the RETRY_TIMEOUT register (0x41) to keep 4020 * PCI Tx retries from interfering with C3 CPU state */ 4021 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); 4022 4023 iwl_prepare_card_hw(priv); 4024 if (!priv->hw_ready) { 4025 IWL_WARN(priv, "Failed, HW not ready\n"); 4026 goto out_iounmap; 4027 } 4028 4029 /***************** 4030 * 4. Read EEPROM 4031 *****************/ 4032 /* Read the EEPROM */ 4033 err = iwl_eeprom_init(priv); 4034 if (err) { 4035 IWL_ERR(priv, "Unable to init EEPROM\n"); 4036 goto out_iounmap; 4037 } 4038 err = iwl_eeprom_check_version(priv); 4039 if (err) 4040 goto out_free_eeprom; 4041 4042 /* extract MAC Address */ 4043 iwl_eeprom_get_mac(priv, priv->addresses[0].addr); 4044 IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->addresses[0].addr); 4045 priv->hw->wiphy->addresses = priv->addresses; 4046 priv->hw->wiphy->n_addresses = 1; 4047 num_mac = iwl_eeprom_query16(priv, EEPROM_NUM_MAC_ADDRESS); 4048 if (num_mac > 1) { 4049 memcpy(priv->addresses[1].addr, priv->addresses[0].addr, 4050 ETH_ALEN); 4051 priv->addresses[1].addr[5]++; 4052 priv->hw->wiphy->n_addresses++; 4053 } 4054 4055 /************************ 4056 * 5. Setup HW constants 4057 ************************/ 4058 if (iwl_set_hw_params(priv)) { 4059 IWL_ERR(priv, "failed to set hw parameters\n"); 4060 goto out_free_eeprom; 4061 } 4062 4063 /******************* 4064 * 6. Setup priv 4065 *******************/ 4066 4067 err = iwl_init_drv(priv); 4068 if (err) 4069 goto out_free_eeprom; 4070 /* At this point both hw and priv are initialized. */ 4071 4072 /******************** 4073 * 7. Setup services 4074 ********************/ 4075 spin_lock_irqsave(&priv->lock, flags); 4076 iwl_disable_interrupts(priv); 4077 spin_unlock_irqrestore(&priv->lock, flags); 4078 4079 pci_enable_msi(priv->pci_dev); 4080 4081 iwl_alloc_isr_ict(priv); 4082 err = request_irq(priv->pci_dev->irq, priv->cfg->ops->lib->isr, 4083 IRQF_SHARED, DRV_NAME, priv); 4084 if (err) { 4085 IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq); 4086 goto out_disable_msi; 4087 } 4088 4089 iwl_setup_deferred_work(priv); 4090 iwl_setup_rx_handlers(priv); 4091 4092 /********************************************* 4093 * 8. Enable interrupts and read RFKILL state 4094 *********************************************/ 4095 4096 /* enable rfkill interrupt: hw bug w/a */ 4097 pci_read_config_word(priv->pci_dev, PCI_COMMAND, &pci_cmd); 4098 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) { 4099 pci_cmd &= ~PCI_COMMAND_INTX_DISABLE; 4100 pci_write_config_word(priv->pci_dev, PCI_COMMAND, pci_cmd); 4101 } 4102 4103 iwl_enable_rfkill_int(priv); 4104 4105 /* If platform's RF_KILL switch is NOT set to KILL */ 4106 if (iwl_read32(priv, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) 4107 clear_bit(STATUS_RF_KILL_HW, &priv->status); 4108 else 4109 set_bit(STATUS_RF_KILL_HW, &priv->status); 4110 4111 wiphy_rfkill_set_hw_state(priv->hw->wiphy, 4112 test_bit(STATUS_RF_KILL_HW, &priv->status)); 4113 4114 iwl_power_initialize(priv); 4115 iwl_tt_initialize(priv); 4116 4117 init_completion(&priv->_agn.firmware_loading_complete); 4118 4119 err = iwl_request_firmware(priv, true); 4120 if (err) 4121 goto out_destroy_workqueue; 4122 4123 return 0; 4124 4125 out_destroy_workqueue: 4126 destroy_workqueue(priv->workqueue); 4127 priv->workqueue = NULL; 4128 free_irq(priv->pci_dev->irq, priv); 4129 iwl_free_isr_ict(priv); 4130 out_disable_msi: 4131 pci_disable_msi(priv->pci_dev); 4132 iwl_uninit_drv(priv); 4133 out_free_eeprom: 4134 iwl_eeprom_free(priv); 4135 out_iounmap: 4136 pci_iounmap(pdev, priv->hw_base); 4137 out_pci_release_regions: 4138 pci_set_drvdata(pdev, NULL); 4139 pci_release_regions(pdev); 4140 out_pci_disable_device: 4141 pci_disable_device(pdev); 4142 out_ieee80211_free_hw: 4143 iwl_free_traffic_mem(priv); 4144 ieee80211_free_hw(priv->hw); 4145 out: 4146 return err; 4147} 4148 4149static void __devexit iwl_pci_remove(struct pci_dev *pdev) 4150{ 4151 struct iwl_priv *priv = pci_get_drvdata(pdev); 4152 unsigned long flags; 4153 4154 if (!priv) 4155 return; 4156 4157 wait_for_completion(&priv->_agn.firmware_loading_complete); 4158 4159 IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n"); 4160 4161 iwl_dbgfs_unregister(priv); 4162 sysfs_remove_group(&pdev->dev.kobj, &iwl_attribute_group); 4163 4164 /* ieee80211_unregister_hw call wil cause iwl_mac_stop to 4165 * to be called and iwl_down since we are removing the device 4166 * we need to set STATUS_EXIT_PENDING bit. 4167 */ 4168 set_bit(STATUS_EXIT_PENDING, &priv->status); 4169 if (priv->mac80211_registered) { 4170 ieee80211_unregister_hw(priv->hw); 4171 priv->mac80211_registered = 0; 4172 } else { 4173 iwl_down(priv); 4174 } 4175 4176 /* 4177 * Make sure device is reset to low power before unloading driver. 4178 * This may be redundant with iwl_down(), but there are paths to 4179 * run iwl_down() without calling apm_ops.stop(), and there are 4180 * paths to avoid running iwl_down() at all before leaving driver. 4181 * This (inexpensive) call *makes sure* device is reset. 4182 */ 4183 priv->cfg->ops->lib->apm_ops.stop(priv); 4184 4185 iwl_tt_exit(priv); 4186 4187 /* make sure we flush any pending irq or 4188 * tasklet for the driver 4189 */ 4190 spin_lock_irqsave(&priv->lock, flags); 4191 iwl_disable_interrupts(priv); 4192 spin_unlock_irqrestore(&priv->lock, flags); 4193 4194 iwl_synchronize_irq(priv); 4195 4196 iwl_dealloc_ucode_pci(priv); 4197 4198 if (priv->rxq.bd) 4199 iwlagn_rx_queue_free(priv, &priv->rxq); 4200 iwlagn_hw_txq_ctx_free(priv); 4201 4202 iwl_eeprom_free(priv); 4203 4204 4205 /*netif_stop_queue(dev); */ 4206 flush_workqueue(priv->workqueue); 4207 4208 /* ieee80211_unregister_hw calls iwl_mac_stop, which flushes 4209 * priv->workqueue... so we can't take down the workqueue 4210 * until now... */ 4211 destroy_workqueue(priv->workqueue); 4212 priv->workqueue = NULL; 4213 iwl_free_traffic_mem(priv); 4214 4215 free_irq(priv->pci_dev->irq, priv); 4216 pci_disable_msi(priv->pci_dev); 4217 pci_iounmap(pdev, priv->hw_base); 4218 pci_release_regions(pdev); 4219 pci_disable_device(pdev); 4220 pci_set_drvdata(pdev, NULL); 4221 4222 iwl_uninit_drv(priv); 4223 4224 iwl_free_isr_ict(priv); 4225 4226 if (priv->ibss_beacon) 4227 dev_kfree_skb(priv->ibss_beacon); 4228 4229 ieee80211_free_hw(priv->hw); 4230} 4231 4232 4233/***************************************************************************** 4234 * 4235 * driver and module entry point 4236 * 4237 *****************************************************************************/ 4238 4239/* Hardware specific file defines the PCI IDs table for that hardware module */ 4240static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = { 4241#ifdef CONFIG_IWL4965 4242 {IWL_PCI_DEVICE(0x4229, PCI_ANY_ID, iwl4965_agn_cfg)}, 4243 {IWL_PCI_DEVICE(0x4230, PCI_ANY_ID, iwl4965_agn_cfg)}, 4244#endif /* CONFIG_IWL4965 */ 4245#ifdef CONFIG_IWL5000 4246/* 5100 Series WiFi */ 4247 {IWL_PCI_DEVICE(0x4232, 0x1201, iwl5100_agn_cfg)}, /* Mini Card */ 4248 {IWL_PCI_DEVICE(0x4232, 0x1301, iwl5100_agn_cfg)}, /* Half Mini Card */ 4249 {IWL_PCI_DEVICE(0x4232, 0x1204, iwl5100_agn_cfg)}, /* Mini Card */ 4250 {IWL_PCI_DEVICE(0x4232, 0x1304, iwl5100_agn_cfg)}, /* Half Mini Card */ 4251 {IWL_PCI_DEVICE(0x4232, 0x1205, iwl5100_bgn_cfg)}, /* Mini Card */ 4252 {IWL_PCI_DEVICE(0x4232, 0x1305, iwl5100_bgn_cfg)}, /* Half Mini Card */ 4253 {IWL_PCI_DEVICE(0x4232, 0x1206, iwl5100_abg_cfg)}, /* Mini Card */ 4254 {IWL_PCI_DEVICE(0x4232, 0x1306, iwl5100_abg_cfg)}, /* Half Mini Card */ 4255 {IWL_PCI_DEVICE(0x4232, 0x1221, iwl5100_agn_cfg)}, /* Mini Card */ 4256 {IWL_PCI_DEVICE(0x4232, 0x1321, iwl5100_agn_cfg)}, /* Half Mini Card */ 4257 {IWL_PCI_DEVICE(0x4232, 0x1224, iwl5100_agn_cfg)}, /* Mini Card */ 4258 {IWL_PCI_DEVICE(0x4232, 0x1324, iwl5100_agn_cfg)}, /* Half Mini Card */ 4259 {IWL_PCI_DEVICE(0x4232, 0x1225, iwl5100_bgn_cfg)}, /* Mini Card */ 4260 {IWL_PCI_DEVICE(0x4232, 0x1325, iwl5100_bgn_cfg)}, /* Half Mini Card */ 4261 {IWL_PCI_DEVICE(0x4232, 0x1226, iwl5100_abg_cfg)}, /* Mini Card */ 4262 {IWL_PCI_DEVICE(0x4232, 0x1326, iwl5100_abg_cfg)}, /* Half Mini Card */ 4263 {IWL_PCI_DEVICE(0x4237, 0x1211, iwl5100_agn_cfg)}, /* Mini Card */ 4264 {IWL_PCI_DEVICE(0x4237, 0x1311, iwl5100_agn_cfg)}, /* Half Mini Card */ 4265 {IWL_PCI_DEVICE(0x4237, 0x1214, iwl5100_agn_cfg)}, /* Mini Card */ 4266 {IWL_PCI_DEVICE(0x4237, 0x1314, iwl5100_agn_cfg)}, /* Half Mini Card */ 4267 {IWL_PCI_DEVICE(0x4237, 0x1215, iwl5100_bgn_cfg)}, /* Mini Card */ 4268 {IWL_PCI_DEVICE(0x4237, 0x1315, iwl5100_bgn_cfg)}, /* Half Mini Card */ 4269 {IWL_PCI_DEVICE(0x4237, 0x1216, iwl5100_abg_cfg)}, /* Mini Card */ 4270 {IWL_PCI_DEVICE(0x4237, 0x1316, iwl5100_abg_cfg)}, /* Half Mini Card */ 4271 4272/* 5300 Series WiFi */ 4273 {IWL_PCI_DEVICE(0x4235, 0x1021, iwl5300_agn_cfg)}, /* Mini Card */ 4274 {IWL_PCI_DEVICE(0x4235, 0x1121, iwl5300_agn_cfg)}, /* Half Mini Card */ 4275 {IWL_PCI_DEVICE(0x4235, 0x1024, iwl5300_agn_cfg)}, /* Mini Card */ 4276 {IWL_PCI_DEVICE(0x4235, 0x1124, iwl5300_agn_cfg)}, /* Half Mini Card */ 4277 {IWL_PCI_DEVICE(0x4235, 0x1001, iwl5300_agn_cfg)}, /* Mini Card */ 4278 {IWL_PCI_DEVICE(0x4235, 0x1101, iwl5300_agn_cfg)}, /* Half Mini Card */ 4279 {IWL_PCI_DEVICE(0x4235, 0x1004, iwl5300_agn_cfg)}, /* Mini Card */ 4280 {IWL_PCI_DEVICE(0x4235, 0x1104, iwl5300_agn_cfg)}, /* Half Mini Card */ 4281 {IWL_PCI_DEVICE(0x4236, 0x1011, iwl5300_agn_cfg)}, /* Mini Card */ 4282 {IWL_PCI_DEVICE(0x4236, 0x1111, iwl5300_agn_cfg)}, /* Half Mini Card */ 4283 {IWL_PCI_DEVICE(0x4236, 0x1014, iwl5300_agn_cfg)}, /* Mini Card */ 4284 {IWL_PCI_DEVICE(0x4236, 0x1114, iwl5300_agn_cfg)}, /* Half Mini Card */ 4285 4286/* 5350 Series WiFi/WiMax */ 4287 {IWL_PCI_DEVICE(0x423A, 0x1001, iwl5350_agn_cfg)}, /* Mini Card */ 4288 {IWL_PCI_DEVICE(0x423A, 0x1021, iwl5350_agn_cfg)}, /* Mini Card */ 4289 {IWL_PCI_DEVICE(0x423B, 0x1011, iwl5350_agn_cfg)}, /* Mini Card */ 4290 4291/* 5150 Series Wifi/WiMax */ 4292 {IWL_PCI_DEVICE(0x423C, 0x1201, iwl5150_agn_cfg)}, /* Mini Card */ 4293 {IWL_PCI_DEVICE(0x423C, 0x1301, iwl5150_agn_cfg)}, /* Half Mini Card */ 4294 {IWL_PCI_DEVICE(0x423C, 0x1206, iwl5150_abg_cfg)}, /* Mini Card */ 4295 {IWL_PCI_DEVICE(0x423C, 0x1306, iwl5150_abg_cfg)}, /* Half Mini Card */ 4296 {IWL_PCI_DEVICE(0x423C, 0x1221, iwl5150_agn_cfg)}, /* Mini Card */ 4297 {IWL_PCI_DEVICE(0x423C, 0x1321, iwl5150_agn_cfg)}, /* Half Mini Card */ 4298 4299 {IWL_PCI_DEVICE(0x423D, 0x1211, iwl5150_agn_cfg)}, /* Mini Card */ 4300 {IWL_PCI_DEVICE(0x423D, 0x1311, iwl5150_agn_cfg)}, /* Half Mini Card */ 4301 {IWL_PCI_DEVICE(0x423D, 0x1216, iwl5150_abg_cfg)}, /* Mini Card */ 4302 {IWL_PCI_DEVICE(0x423D, 0x1316, iwl5150_abg_cfg)}, /* Half Mini Card */ 4303 4304/* 6x00 Series */ 4305 {IWL_PCI_DEVICE(0x422B, 0x1101, iwl6000_3agn_cfg)}, 4306 {IWL_PCI_DEVICE(0x422B, 0x1121, iwl6000_3agn_cfg)}, 4307 {IWL_PCI_DEVICE(0x422C, 0x1301, iwl6000i_2agn_cfg)}, 4308 {IWL_PCI_DEVICE(0x422C, 0x1306, iwl6000i_2abg_cfg)}, 4309 {IWL_PCI_DEVICE(0x422C, 0x1307, iwl6000i_2bg_cfg)}, 4310 {IWL_PCI_DEVICE(0x422C, 0x1321, iwl6000i_2agn_cfg)}, 4311 {IWL_PCI_DEVICE(0x422C, 0x1326, iwl6000i_2abg_cfg)}, 4312 {IWL_PCI_DEVICE(0x4238, 0x1111, iwl6000_3agn_cfg)}, 4313 {IWL_PCI_DEVICE(0x4239, 0x1311, iwl6000i_2agn_cfg)}, 4314 {IWL_PCI_DEVICE(0x4239, 0x1316, iwl6000i_2abg_cfg)}, 4315 4316/* 6x00 Series Gen2a */ 4317 {IWL_PCI_DEVICE(0x0082, 0x1201, iwl6000g2a_2agn_cfg)}, 4318 {IWL_PCI_DEVICE(0x0085, 0x1211, iwl6000g2a_2agn_cfg)}, 4319 {IWL_PCI_DEVICE(0x0082, 0x1221, iwl6000g2a_2agn_cfg)}, 4320 {IWL_PCI_DEVICE(0x0082, 0x1206, iwl6000g2a_2abg_cfg)}, 4321 {IWL_PCI_DEVICE(0x0085, 0x1216, iwl6000g2a_2abg_cfg)}, 4322 {IWL_PCI_DEVICE(0x0082, 0x1226, iwl6000g2a_2abg_cfg)}, 4323 {IWL_PCI_DEVICE(0x0082, 0x1207, iwl6000g2a_2bg_cfg)}, 4324 {IWL_PCI_DEVICE(0x0082, 0x1301, iwl6000g2a_2agn_cfg)}, 4325 {IWL_PCI_DEVICE(0x0082, 0x1306, iwl6000g2a_2abg_cfg)}, 4326 {IWL_PCI_DEVICE(0x0082, 0x1307, iwl6000g2a_2bg_cfg)}, 4327 {IWL_PCI_DEVICE(0x0082, 0x1321, iwl6000g2a_2agn_cfg)}, 4328 {IWL_PCI_DEVICE(0x0082, 0x1326, iwl6000g2a_2abg_cfg)}, 4329 {IWL_PCI_DEVICE(0x0085, 0x1311, iwl6000g2a_2agn_cfg)}, 4330 {IWL_PCI_DEVICE(0x0085, 0x1316, iwl6000g2a_2abg_cfg)}, 4331 4332/* 6x00 Series Gen2b */ 4333 {IWL_PCI_DEVICE(0x008F, 0x5105, iwl6000g2b_bgn_cfg)}, 4334 {IWL_PCI_DEVICE(0x0090, 0x5115, iwl6000g2b_bgn_cfg)}, 4335 {IWL_PCI_DEVICE(0x008F, 0x5125, iwl6000g2b_bgn_cfg)}, 4336 {IWL_PCI_DEVICE(0x008F, 0x5107, iwl6000g2b_bg_cfg)}, 4337 {IWL_PCI_DEVICE(0x008F, 0x5201, iwl6000g2b_2agn_cfg)}, 4338 {IWL_PCI_DEVICE(0x0090, 0x5211, iwl6000g2b_2agn_cfg)}, 4339 {IWL_PCI_DEVICE(0x008F, 0x5221, iwl6000g2b_2agn_cfg)}, 4340 {IWL_PCI_DEVICE(0x008F, 0x5206, iwl6000g2b_2abg_cfg)}, 4341 {IWL_PCI_DEVICE(0x0090, 0x5216, iwl6000g2b_2abg_cfg)}, 4342 {IWL_PCI_DEVICE(0x008F, 0x5226, iwl6000g2b_2abg_cfg)}, 4343 {IWL_PCI_DEVICE(0x008F, 0x5207, iwl6000g2b_2bg_cfg)}, 4344 {IWL_PCI_DEVICE(0x008A, 0x5301, iwl6000g2b_bgn_cfg)}, 4345 {IWL_PCI_DEVICE(0x008A, 0x5305, iwl6000g2b_bgn_cfg)}, 4346 {IWL_PCI_DEVICE(0x008A, 0x5307, iwl6000g2b_bg_cfg)}, 4347 {IWL_PCI_DEVICE(0x008A, 0x5321, iwl6000g2b_bgn_cfg)}, 4348 {IWL_PCI_DEVICE(0x008A, 0x5325, iwl6000g2b_bgn_cfg)}, 4349 {IWL_PCI_DEVICE(0x008B, 0x5311, iwl6000g2b_bgn_cfg)}, 4350 {IWL_PCI_DEVICE(0x008B, 0x5315, iwl6000g2b_bgn_cfg)}, 4351 {IWL_PCI_DEVICE(0x0090, 0x5211, iwl6000g2b_2agn_cfg)}, 4352 {IWL_PCI_DEVICE(0x0090, 0x5215, iwl6000g2b_2bgn_cfg)}, 4353 {IWL_PCI_DEVICE(0x0090, 0x5216, iwl6000g2b_2abg_cfg)}, 4354 {IWL_PCI_DEVICE(0x0091, 0x5201, iwl6000g2b_2agn_cfg)}, 4355 {IWL_PCI_DEVICE(0x0091, 0x5205, iwl6000g2b_2bgn_cfg)}, 4356 {IWL_PCI_DEVICE(0x0091, 0x5206, iwl6000g2b_2abg_cfg)}, 4357 {IWL_PCI_DEVICE(0x0091, 0x5207, iwl6000g2b_2bg_cfg)}, 4358 {IWL_PCI_DEVICE(0x0091, 0x5221, iwl6000g2b_2agn_cfg)}, 4359 {IWL_PCI_DEVICE(0x0091, 0x5225, iwl6000g2b_2bgn_cfg)}, 4360 {IWL_PCI_DEVICE(0x0091, 0x5226, iwl6000g2b_2abg_cfg)}, 4361 4362/* 6x50 WiFi/WiMax Series */ 4363 {IWL_PCI_DEVICE(0x0087, 0x1301, iwl6050_2agn_cfg)}, 4364 {IWL_PCI_DEVICE(0x0087, 0x1306, iwl6050_2abg_cfg)}, 4365 {IWL_PCI_DEVICE(0x0087, 0x1321, iwl6050_2agn_cfg)}, 4366 {IWL_PCI_DEVICE(0x0087, 0x1326, iwl6050_2abg_cfg)}, 4367 {IWL_PCI_DEVICE(0x0089, 0x1311, iwl6050_2agn_cfg)}, 4368 {IWL_PCI_DEVICE(0x0089, 0x1316, iwl6050_2abg_cfg)}, 4369 4370/* 6x50 WiFi/WiMax Series Gen2 */ 4371 {IWL_PCI_DEVICE(0x0885, 0x1305, iwl6050g2_bgn_cfg)}, 4372 {IWL_PCI_DEVICE(0x0885, 0x1306, iwl6050g2_bgn_cfg)}, 4373 {IWL_PCI_DEVICE(0x0885, 0x1325, iwl6050g2_bgn_cfg)}, 4374 {IWL_PCI_DEVICE(0x0885, 0x1326, iwl6050g2_bgn_cfg)}, 4375 {IWL_PCI_DEVICE(0x0886, 0x1315, iwl6050g2_bgn_cfg)}, 4376 {IWL_PCI_DEVICE(0x0886, 0x1316, iwl6050g2_bgn_cfg)}, 4377 4378/* 1000 Series WiFi */ 4379 {IWL_PCI_DEVICE(0x0083, 0x1205, iwl1000_bgn_cfg)}, 4380 {IWL_PCI_DEVICE(0x0083, 0x1305, iwl1000_bgn_cfg)}, 4381 {IWL_PCI_DEVICE(0x0083, 0x1225, iwl1000_bgn_cfg)}, 4382 {IWL_PCI_DEVICE(0x0083, 0x1325, iwl1000_bgn_cfg)}, 4383 {IWL_PCI_DEVICE(0x0084, 0x1215, iwl1000_bgn_cfg)}, 4384 {IWL_PCI_DEVICE(0x0084, 0x1315, iwl1000_bgn_cfg)}, 4385 {IWL_PCI_DEVICE(0x0083, 0x1206, iwl1000_bg_cfg)}, 4386 {IWL_PCI_DEVICE(0x0083, 0x1306, iwl1000_bg_cfg)}, 4387 {IWL_PCI_DEVICE(0x0083, 0x1226, iwl1000_bg_cfg)}, 4388 {IWL_PCI_DEVICE(0x0083, 0x1326, iwl1000_bg_cfg)}, 4389 {IWL_PCI_DEVICE(0x0084, 0x1216, iwl1000_bg_cfg)}, 4390 {IWL_PCI_DEVICE(0x0084, 0x1316, iwl1000_bg_cfg)}, 4391#endif /* CONFIG_IWL5000 */ 4392 4393 {0} 4394}; 4395MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids); 4396 4397static struct pci_driver iwl_driver = { 4398 .name = DRV_NAME, 4399 .id_table = iwl_hw_card_ids, 4400 .probe = iwl_pci_probe, 4401 .remove = __devexit_p(iwl_pci_remove), 4402#ifdef CONFIG_PM 4403 .suspend = iwl_pci_suspend, 4404 .resume = iwl_pci_resume, 4405#endif 4406}; 4407 4408static int __init iwl_init(void) 4409{ 4410 4411 int ret; 4412 pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n"); 4413 pr_info(DRV_COPYRIGHT "\n"); 4414 4415 ret = iwlagn_rate_control_register(); 4416 if (ret) { 4417 pr_err("Unable to register rate control algorithm: %d\n", ret); 4418 return ret; 4419 } 4420 4421 ret = pci_register_driver(&iwl_driver); 4422 if (ret) { 4423 pr_err("Unable to initialize PCI module\n"); 4424 goto error_register; 4425 } 4426 4427 return ret; 4428 4429error_register: 4430 iwlagn_rate_control_unregister(); 4431 return ret; 4432} 4433 4434static void __exit iwl_exit(void) 4435{ 4436 pci_unregister_driver(&iwl_driver); 4437 iwlagn_rate_control_unregister(); 4438} 4439 4440module_exit(iwl_exit); 4441module_init(iwl_init); 4442 4443#ifdef CONFIG_IWLWIFI_DEBUG 4444module_param_named(debug50, iwl_debug_level, uint, S_IRUGO); 4445MODULE_PARM_DESC(debug50, "50XX debug output mask (deprecated)"); 4446module_param_named(debug, iwl_debug_level, uint, S_IRUGO | S_IWUSR); 4447MODULE_PARM_DESC(debug, "debug output mask"); 4448#endif 4449 4450module_param_named(swcrypto50, iwlagn_mod_params.sw_crypto, bool, S_IRUGO); 4451MODULE_PARM_DESC(swcrypto50, 4452 "using crypto in software (default 0 [hardware]) (deprecated)"); 4453module_param_named(swcrypto, iwlagn_mod_params.sw_crypto, int, S_IRUGO); 4454MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])"); 4455module_param_named(queues_num50, 4456 iwlagn_mod_params.num_of_queues, int, S_IRUGO); 4457MODULE_PARM_DESC(queues_num50, 4458 "number of hw queues in 50xx series (deprecated)"); 4459module_param_named(queues_num, iwlagn_mod_params.num_of_queues, int, S_IRUGO); 4460MODULE_PARM_DESC(queues_num, "number of hw queues."); 4461module_param_named(11n_disable50, iwlagn_mod_params.disable_11n, int, S_IRUGO); 4462MODULE_PARM_DESC(11n_disable50, "disable 50XX 11n functionality (deprecated)"); 4463module_param_named(11n_disable, iwlagn_mod_params.disable_11n, int, S_IRUGO); 4464MODULE_PARM_DESC(11n_disable, "disable 11n functionality"); 4465module_param_named(amsdu_size_8K50, iwlagn_mod_params.amsdu_size_8K, 4466 int, S_IRUGO); 4467MODULE_PARM_DESC(amsdu_size_8K50, 4468 "enable 8K amsdu size in 50XX series (deprecated)"); 4469module_param_named(amsdu_size_8K, iwlagn_mod_params.amsdu_size_8K, 4470 int, S_IRUGO); 4471MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size"); 4472module_param_named(fw_restart50, iwlagn_mod_params.restart_fw, int, S_IRUGO); 4473MODULE_PARM_DESC(fw_restart50, 4474 "restart firmware in case of error (deprecated)"); 4475module_param_named(fw_restart, iwlagn_mod_params.restart_fw, int, S_IRUGO); 4476MODULE_PARM_DESC(fw_restart, "restart firmware in case of error"); 4477module_param_named( 4478 disable_hw_scan, iwlagn_mod_params.disable_hw_scan, int, S_IRUGO); 4479MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)"); 4480 4481module_param_named(ucode_alternative, iwlagn_wanted_ucode_alternative, int, 4482 S_IRUGO); 4483MODULE_PARM_DESC(ucode_alternative, 4484 "specify ucode alternative to use from ucode file"); 4485