1/* 2 Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com> 3 Copyright (C) 2004 - 2009 Gertjan van Wingerde <gwingerde@gmail.com> 4 <http://rt2x00.serialmonkey.com> 5 6 This program is free software; you can redistribute it and/or modify 7 it under the terms of the GNU General Public License as published by 8 the Free Software Foundation; either version 2 of the License, or 9 (at your option) any later version. 10 11 This program is distributed in the hope that it will be useful, 12 but WITHOUT ANY WARRANTY; without even the implied warranty of 13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 GNU General Public License for more details. 15 16 You should have received a copy of the GNU General Public License 17 along with this program; if not, write to the 18 Free Software Foundation, Inc., 19 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 20 */ 21 22/* 23 Module: rt2x00lib 24 Abstract: rt2x00 queue specific routines. 25 */ 26 27#include <linux/slab.h> 28#include <linux/kernel.h> 29#include <linux/module.h> 30#include <linux/dma-mapping.h> 31 32#include "rt2x00.h" 33#include "rt2x00lib.h" 34 35struct sk_buff *rt2x00queue_alloc_rxskb(struct rt2x00_dev *rt2x00dev, 36 struct queue_entry *entry) 37{ 38 struct sk_buff *skb; 39 struct skb_frame_desc *skbdesc; 40 unsigned int frame_size; 41 unsigned int head_size = 0; 42 unsigned int tail_size = 0; 43 44 /* 45 * The frame size includes descriptor size, because the 46 * hardware directly receive the frame into the skbuffer. 47 */ 48 frame_size = entry->queue->data_size + entry->queue->desc_size; 49 50 /* 51 * The payload should be aligned to a 4-byte boundary, 52 * this means we need at least 3 bytes for moving the frame 53 * into the correct offset. 54 */ 55 head_size = 4; 56 57 /* 58 * For IV/EIV/ICV assembly we must make sure there is 59 * at least 8 bytes bytes available in headroom for IV/EIV 60 * and 8 bytes for ICV data as tailroon. 61 */ 62 if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) { 63 head_size += 8; 64 tail_size += 8; 65 } 66 67 /* 68 * Allocate skbuffer. 69 */ 70 skb = dev_alloc_skb(frame_size + head_size + tail_size); 71 if (!skb) 72 return NULL; 73 74 /* 75 * Make sure we not have a frame with the requested bytes 76 * available in the head and tail. 77 */ 78 skb_reserve(skb, head_size); 79 skb_put(skb, frame_size); 80 81 /* 82 * Populate skbdesc. 83 */ 84 skbdesc = get_skb_frame_desc(skb); 85 memset(skbdesc, 0, sizeof(*skbdesc)); 86 skbdesc->entry = entry; 87 88 if (test_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags)) { 89 skbdesc->skb_dma = dma_map_single(rt2x00dev->dev, 90 skb->data, 91 skb->len, 92 DMA_FROM_DEVICE); 93 skbdesc->flags |= SKBDESC_DMA_MAPPED_RX; 94 } 95 96 return skb; 97} 98 99void rt2x00queue_map_txskb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb) 100{ 101 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); 102 103 skbdesc->skb_dma = 104 dma_map_single(rt2x00dev->dev, skb->data, skb->len, DMA_TO_DEVICE); 105 skbdesc->flags |= SKBDESC_DMA_MAPPED_TX; 106} 107EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb); 108 109void rt2x00queue_unmap_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb) 110{ 111 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); 112 113 if (skbdesc->flags & SKBDESC_DMA_MAPPED_RX) { 114 dma_unmap_single(rt2x00dev->dev, skbdesc->skb_dma, skb->len, 115 DMA_FROM_DEVICE); 116 skbdesc->flags &= ~SKBDESC_DMA_MAPPED_RX; 117 } 118 119 if (skbdesc->flags & SKBDESC_DMA_MAPPED_TX) { 120 dma_unmap_single(rt2x00dev->dev, skbdesc->skb_dma, skb->len, 121 DMA_TO_DEVICE); 122 skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX; 123 } 124} 125EXPORT_SYMBOL_GPL(rt2x00queue_unmap_skb); 126 127void rt2x00queue_free_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb) 128{ 129 if (!skb) 130 return; 131 132 rt2x00queue_unmap_skb(rt2x00dev, skb); 133 dev_kfree_skb_any(skb); 134} 135 136void rt2x00queue_align_frame(struct sk_buff *skb) 137{ 138 unsigned int frame_length = skb->len; 139 unsigned int align = ALIGN_SIZE(skb, 0); 140 141 if (!align) 142 return; 143 144 skb_push(skb, align); 145 memmove(skb->data, skb->data + align, frame_length); 146 skb_trim(skb, frame_length); 147} 148 149void rt2x00queue_align_payload(struct sk_buff *skb, unsigned int header_length) 150{ 151 unsigned int frame_length = skb->len; 152 unsigned int align = ALIGN_SIZE(skb, header_length); 153 154 if (!align) 155 return; 156 157 skb_push(skb, align); 158 memmove(skb->data, skb->data + align, frame_length); 159 skb_trim(skb, frame_length); 160} 161 162void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int header_length) 163{ 164 unsigned int payload_length = skb->len - header_length; 165 unsigned int header_align = ALIGN_SIZE(skb, 0); 166 unsigned int payload_align = ALIGN_SIZE(skb, header_length); 167 unsigned int l2pad = payload_length ? L2PAD_SIZE(header_length) : 0; 168 169 /* 170 * Adjust the header alignment if the payload needs to be moved more 171 * than the header. 172 */ 173 if (payload_align > header_align) 174 header_align += 4; 175 176 /* There is nothing to do if no alignment is needed */ 177 if (!header_align) 178 return; 179 180 /* Reserve the amount of space needed in front of the frame */ 181 skb_push(skb, header_align); 182 183 /* 184 * Move the header. 185 */ 186 memmove(skb->data, skb->data + header_align, header_length); 187 188 /* Move the payload, if present and if required */ 189 if (payload_length && payload_align) 190 memmove(skb->data + header_length + l2pad, 191 skb->data + header_length + l2pad + payload_align, 192 payload_length); 193 194 /* Trim the skb to the correct size */ 195 skb_trim(skb, header_length + l2pad + payload_length); 196} 197 198void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length) 199{ 200 unsigned int l2pad = L2PAD_SIZE(header_length); 201 202 if (!l2pad) 203 return; 204 205 memmove(skb->data + l2pad, skb->data, header_length); 206 skb_pull(skb, l2pad); 207} 208 209static void rt2x00queue_create_tx_descriptor_seq(struct queue_entry *entry, 210 struct txentry_desc *txdesc) 211{ 212 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb); 213 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data; 214 struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif); 215 unsigned long irqflags; 216 217 if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) || 218 unlikely(!tx_info->control.vif)) 219 return; 220 221 spin_lock_irqsave(&intf->seqlock, irqflags); 222 223 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags)) 224 intf->seqno += 0x10; 225 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); 226 hdr->seq_ctrl |= cpu_to_le16(intf->seqno); 227 228 spin_unlock_irqrestore(&intf->seqlock, irqflags); 229 230 __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags); 231} 232 233static void rt2x00queue_create_tx_descriptor_plcp(struct queue_entry *entry, 234 struct txentry_desc *txdesc, 235 const struct rt2x00_rate *hwrate) 236{ 237 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 238 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb); 239 struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0]; 240 unsigned int data_length; 241 unsigned int duration; 242 unsigned int residual; 243 244 /* Data length + CRC + Crypto overhead (IV/EIV/ICV/MIC) */ 245 data_length = entry->skb->len + 4; 246 data_length += rt2x00crypto_tx_overhead(rt2x00dev, entry->skb); 247 248 /* 249 * PLCP setup 250 * Length calculation depends on OFDM/CCK rate. 251 */ 252 txdesc->signal = hwrate->plcp; 253 txdesc->service = 0x04; 254 255 if (hwrate->flags & DEV_RATE_OFDM) { 256 txdesc->length_high = (data_length >> 6) & 0x3f; 257 txdesc->length_low = data_length & 0x3f; 258 } else { 259 /* 260 * Convert length to microseconds. 261 */ 262 residual = GET_DURATION_RES(data_length, hwrate->bitrate); 263 duration = GET_DURATION(data_length, hwrate->bitrate); 264 265 if (residual != 0) { 266 duration++; 267 268 /* 269 * Check if we need to set the Length Extension 270 */ 271 if (hwrate->bitrate == 110 && residual <= 30) 272 txdesc->service |= 0x80; 273 } 274 275 txdesc->length_high = (duration >> 8) & 0xff; 276 txdesc->length_low = duration & 0xff; 277 278 /* 279 * When preamble is enabled we should set the 280 * preamble bit for the signal. 281 */ 282 if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) 283 txdesc->signal |= 0x08; 284 } 285} 286 287static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry, 288 struct txentry_desc *txdesc) 289{ 290 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 291 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb); 292 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data; 293 struct ieee80211_rate *rate = 294 ieee80211_get_tx_rate(rt2x00dev->hw, tx_info); 295 const struct rt2x00_rate *hwrate; 296 297 memset(txdesc, 0, sizeof(*txdesc)); 298 299 /* 300 * Initialize information from queue 301 */ 302 txdesc->queue = entry->queue->qid; 303 txdesc->cw_min = entry->queue->cw_min; 304 txdesc->cw_max = entry->queue->cw_max; 305 txdesc->aifs = entry->queue->aifs; 306 307 /* 308 * Header and frame information. 309 */ 310 txdesc->length = entry->skb->len; 311 txdesc->header_length = ieee80211_get_hdrlen_from_skb(entry->skb); 312 313 /* 314 * Check whether this frame is to be acked. 315 */ 316 if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK)) 317 __set_bit(ENTRY_TXD_ACK, &txdesc->flags); 318 319 /* 320 * Check if this is a RTS/CTS frame 321 */ 322 if (ieee80211_is_rts(hdr->frame_control) || 323 ieee80211_is_cts(hdr->frame_control)) { 324 __set_bit(ENTRY_TXD_BURST, &txdesc->flags); 325 if (ieee80211_is_rts(hdr->frame_control)) 326 __set_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags); 327 else 328 __set_bit(ENTRY_TXD_CTS_FRAME, &txdesc->flags); 329 if (tx_info->control.rts_cts_rate_idx >= 0) 330 rate = 331 ieee80211_get_rts_cts_rate(rt2x00dev->hw, tx_info); 332 } 333 334 /* 335 * Determine retry information. 336 */ 337 txdesc->retry_limit = tx_info->control.rates[0].count - 1; 338 if (txdesc->retry_limit >= rt2x00dev->long_retry) 339 __set_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags); 340 341 /* 342 * Check if more fragments are pending 343 */ 344 if (ieee80211_has_morefrags(hdr->frame_control)) { 345 __set_bit(ENTRY_TXD_BURST, &txdesc->flags); 346 __set_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags); 347 } 348 349 /* 350 * Check if more frames (!= fragments) are pending 351 */ 352 if (tx_info->flags & IEEE80211_TX_CTL_MORE_FRAMES) 353 __set_bit(ENTRY_TXD_BURST, &txdesc->flags); 354 355 /* 356 * Beacons and probe responses require the tsf timestamp 357 * to be inserted into the frame, except for a frame that has been injected 358 * through a monitor interface. This latter is needed for testing a 359 * monitor interface. 360 */ 361 if ((ieee80211_is_beacon(hdr->frame_control) || 362 ieee80211_is_probe_resp(hdr->frame_control)) && 363 (!(tx_info->flags & IEEE80211_TX_CTL_INJECTED))) 364 __set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags); 365 366 /* 367 * Determine with what IFS priority this frame should be send. 368 * Set ifs to IFS_SIFS when the this is not the first fragment, 369 * or this fragment came after RTS/CTS. 370 */ 371 if ((tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) && 372 !test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags)) { 373 __set_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags); 374 txdesc->ifs = IFS_BACKOFF; 375 } else 376 txdesc->ifs = IFS_SIFS; 377 378 /* 379 * Determine rate modulation. 380 */ 381 hwrate = rt2x00_get_rate(rate->hw_value); 382 txdesc->rate_mode = RATE_MODE_CCK; 383 if (hwrate->flags & DEV_RATE_OFDM) 384 txdesc->rate_mode = RATE_MODE_OFDM; 385 386 /* 387 * Apply TX descriptor handling by components 388 */ 389 rt2x00crypto_create_tx_descriptor(entry, txdesc); 390 rt2x00ht_create_tx_descriptor(entry, txdesc, hwrate); 391 rt2x00queue_create_tx_descriptor_seq(entry, txdesc); 392 rt2x00queue_create_tx_descriptor_plcp(entry, txdesc, hwrate); 393} 394 395static int rt2x00queue_write_tx_data(struct queue_entry *entry, 396 struct txentry_desc *txdesc) 397{ 398 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 399 400 /* 401 * This should not happen, we already checked the entry 402 * was ours. When the hardware disagrees there has been 403 * a queue corruption! 404 */ 405 if (unlikely(rt2x00dev->ops->lib->get_entry_state && 406 rt2x00dev->ops->lib->get_entry_state(entry))) { 407 ERROR(rt2x00dev, 408 "Corrupt queue %d, accessing entry which is not ours.\n" 409 "Please file bug report to %s.\n", 410 entry->queue->qid, DRV_PROJECT); 411 return -EINVAL; 412 } 413 414 /* 415 * Add the requested extra tx headroom in front of the skb. 416 */ 417 skb_push(entry->skb, rt2x00dev->ops->extra_tx_headroom); 418 memset(entry->skb->data, 0, rt2x00dev->ops->extra_tx_headroom); 419 420 /* 421 * Call the driver's write_tx_data function, if it exists. 422 */ 423 if (rt2x00dev->ops->lib->write_tx_data) 424 rt2x00dev->ops->lib->write_tx_data(entry, txdesc); 425 426 /* 427 * Map the skb to DMA. 428 */ 429 if (test_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags)) 430 rt2x00queue_map_txskb(rt2x00dev, entry->skb); 431 432 return 0; 433} 434 435static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry, 436 struct txentry_desc *txdesc) 437{ 438 struct data_queue *queue = entry->queue; 439 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; 440 441 rt2x00dev->ops->lib->write_tx_desc(rt2x00dev, entry->skb, txdesc); 442 443 /* 444 * All processing on the frame has been completed, this means 445 * it is now ready to be dumped to userspace through debugfs. 446 */ 447 rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_TX, entry->skb); 448} 449 450static void rt2x00queue_kick_tx_queue(struct queue_entry *entry, 451 struct txentry_desc *txdesc) 452{ 453 struct data_queue *queue = entry->queue; 454 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; 455 456 /* 457 * Check if we need to kick the queue, there are however a few rules 458 * 1) Don't kick unless this is the last in frame in a burst. 459 * When the burst flag is set, this frame is always followed 460 * by another frame which in some way are related to eachother. 461 * This is true for fragments, RTS or CTS-to-self frames. 462 * 2) Rule 1 can be broken when the available entries 463 * in the queue are less then a certain threshold. 464 */ 465 if (rt2x00queue_threshold(queue) || 466 !test_bit(ENTRY_TXD_BURST, &txdesc->flags)) 467 rt2x00dev->ops->lib->kick_tx_queue(rt2x00dev, queue->qid); 468} 469 470int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb, 471 bool local) 472{ 473 struct ieee80211_tx_info *tx_info; 474 struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX); 475 struct txentry_desc txdesc; 476 struct skb_frame_desc *skbdesc; 477 u8 rate_idx, rate_flags; 478 479 if (unlikely(rt2x00queue_full(queue))) 480 return -ENOBUFS; 481 482 if (test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) { 483 ERROR(queue->rt2x00dev, 484 "Arrived at non-free entry in the non-full queue %d.\n" 485 "Please file bug report to %s.\n", 486 queue->qid, DRV_PROJECT); 487 return -EINVAL; 488 } 489 490 /* 491 * Copy all TX descriptor information into txdesc, 492 * after that we are free to use the skb->cb array 493 * for our information. 494 */ 495 entry->skb = skb; 496 rt2x00queue_create_tx_descriptor(entry, &txdesc); 497 498 /* 499 * All information is retrieved from the skb->cb array, 500 * now we should claim ownership of the driver part of that 501 * array, preserving the bitrate index and flags. 502 */ 503 tx_info = IEEE80211_SKB_CB(skb); 504 rate_idx = tx_info->control.rates[0].idx; 505 rate_flags = tx_info->control.rates[0].flags; 506 skbdesc = get_skb_frame_desc(skb); 507 memset(skbdesc, 0, sizeof(*skbdesc)); 508 skbdesc->entry = entry; 509 skbdesc->tx_rate_idx = rate_idx; 510 skbdesc->tx_rate_flags = rate_flags; 511 512 if (local) 513 skbdesc->flags |= SKBDESC_NOT_MAC80211; 514 515 /* 516 * When hardware encryption is supported, and this frame 517 * is to be encrypted, we should strip the IV/EIV data from 518 * the frame so we can provide it to the driver separately. 519 */ 520 if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) && 521 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) { 522 if (test_bit(DRIVER_REQUIRE_COPY_IV, &queue->rt2x00dev->flags)) 523 rt2x00crypto_tx_copy_iv(skb, &txdesc); 524 else 525 rt2x00crypto_tx_remove_iv(skb, &txdesc); 526 } 527 528 /* 529 * When DMA allocation is required we should guarentee to the 530 * driver that the DMA is aligned to a 4-byte boundary. 531 * However some drivers require L2 padding to pad the payload 532 * rather then the header. This could be a requirement for 533 * PCI and USB devices, while header alignment only is valid 534 * for PCI devices. 535 */ 536 if (test_bit(DRIVER_REQUIRE_L2PAD, &queue->rt2x00dev->flags)) 537 rt2x00queue_insert_l2pad(entry->skb, txdesc.header_length); 538 else if (test_bit(DRIVER_REQUIRE_DMA, &queue->rt2x00dev->flags)) 539 rt2x00queue_align_frame(entry->skb); 540 541 /* 542 * It could be possible that the queue was corrupted and this 543 * call failed. Since we always return NETDEV_TX_OK to mac80211, 544 * this frame will simply be dropped. 545 */ 546 if (unlikely(rt2x00queue_write_tx_data(entry, &txdesc))) { 547 clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags); 548 entry->skb = NULL; 549 return -EIO; 550 } 551 552 set_bit(ENTRY_DATA_PENDING, &entry->flags); 553 554 rt2x00queue_index_inc(queue, Q_INDEX); 555 rt2x00queue_write_tx_descriptor(entry, &txdesc); 556 rt2x00queue_kick_tx_queue(entry, &txdesc); 557 558 return 0; 559} 560 561int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev, 562 struct ieee80211_vif *vif, 563 const bool enable_beacon) 564{ 565 struct rt2x00_intf *intf = vif_to_intf(vif); 566 struct skb_frame_desc *skbdesc; 567 struct txentry_desc txdesc; 568 569 if (unlikely(!intf->beacon)) 570 return -ENOBUFS; 571 572 mutex_lock(&intf->beacon_skb_mutex); 573 574 /* 575 * Clean up the beacon skb. 576 */ 577 rt2x00queue_free_skb(rt2x00dev, intf->beacon->skb); 578 intf->beacon->skb = NULL; 579 580 if (!enable_beacon) { 581 rt2x00dev->ops->lib->kill_tx_queue(rt2x00dev, QID_BEACON); 582 mutex_unlock(&intf->beacon_skb_mutex); 583 return 0; 584 } 585 586 intf->beacon->skb = ieee80211_beacon_get(rt2x00dev->hw, vif); 587 if (!intf->beacon->skb) { 588 mutex_unlock(&intf->beacon_skb_mutex); 589 return -ENOMEM; 590 } 591 592 /* 593 * Copy all TX descriptor information into txdesc, 594 * after that we are free to use the skb->cb array 595 * for our information. 596 */ 597 rt2x00queue_create_tx_descriptor(intf->beacon, &txdesc); 598 599 /* 600 * Fill in skb descriptor 601 */ 602 skbdesc = get_skb_frame_desc(intf->beacon->skb); 603 memset(skbdesc, 0, sizeof(*skbdesc)); 604 skbdesc->entry = intf->beacon; 605 606 /* 607 * Send beacon to hardware and enable beacon genaration.. 608 */ 609 rt2x00dev->ops->lib->write_beacon(intf->beacon, &txdesc); 610 611 mutex_unlock(&intf->beacon_skb_mutex); 612 613 return 0; 614} 615 616struct data_queue *rt2x00queue_get_queue(struct rt2x00_dev *rt2x00dev, 617 const enum data_queue_qid queue) 618{ 619 int atim = test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags); 620 621 if (queue == QID_RX) 622 return rt2x00dev->rx; 623 624 if (queue < rt2x00dev->ops->tx_queues && rt2x00dev->tx) 625 return &rt2x00dev->tx[queue]; 626 627 if (!rt2x00dev->bcn) 628 return NULL; 629 630 if (queue == QID_BEACON) 631 return &rt2x00dev->bcn[0]; 632 else if (queue == QID_ATIM && atim) 633 return &rt2x00dev->bcn[1]; 634 635 return NULL; 636} 637EXPORT_SYMBOL_GPL(rt2x00queue_get_queue); 638 639struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue, 640 enum queue_index index) 641{ 642 struct queue_entry *entry; 643 unsigned long irqflags; 644 645 if (unlikely(index >= Q_INDEX_MAX)) { 646 ERROR(queue->rt2x00dev, 647 "Entry requested from invalid index type (%d)\n", index); 648 return NULL; 649 } 650 651 spin_lock_irqsave(&queue->lock, irqflags); 652 653 entry = &queue->entries[queue->index[index]]; 654 655 spin_unlock_irqrestore(&queue->lock, irqflags); 656 657 return entry; 658} 659EXPORT_SYMBOL_GPL(rt2x00queue_get_entry); 660 661void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index) 662{ 663 unsigned long irqflags; 664 665 if (unlikely(index >= Q_INDEX_MAX)) { 666 ERROR(queue->rt2x00dev, 667 "Index change on invalid index type (%d)\n", index); 668 return; 669 } 670 671 spin_lock_irqsave(&queue->lock, irqflags); 672 673 queue->index[index]++; 674 if (queue->index[index] >= queue->limit) 675 queue->index[index] = 0; 676 677 if (index == Q_INDEX) { 678 queue->length++; 679 queue->last_index = jiffies; 680 } else if (index == Q_INDEX_DONE) { 681 queue->length--; 682 queue->count++; 683 queue->last_index_done = jiffies; 684 } 685 686 spin_unlock_irqrestore(&queue->lock, irqflags); 687} 688 689static void rt2x00queue_reset(struct data_queue *queue) 690{ 691 unsigned long irqflags; 692 693 spin_lock_irqsave(&queue->lock, irqflags); 694 695 queue->count = 0; 696 queue->length = 0; 697 queue->last_index = jiffies; 698 queue->last_index_done = jiffies; 699 memset(queue->index, 0, sizeof(queue->index)); 700 701 spin_unlock_irqrestore(&queue->lock, irqflags); 702} 703 704void rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev) 705{ 706 struct data_queue *queue; 707 708 txall_queue_for_each(rt2x00dev, queue) 709 rt2x00dev->ops->lib->kill_tx_queue(rt2x00dev, queue->qid); 710} 711 712void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev) 713{ 714 struct data_queue *queue; 715 unsigned int i; 716 717 queue_for_each(rt2x00dev, queue) { 718 rt2x00queue_reset(queue); 719 720 for (i = 0; i < queue->limit; i++) { 721 queue->entries[i].flags = 0; 722 723 rt2x00dev->ops->lib->clear_entry(&queue->entries[i]); 724 } 725 } 726} 727 728static int rt2x00queue_alloc_entries(struct data_queue *queue, 729 const struct data_queue_desc *qdesc) 730{ 731 struct queue_entry *entries; 732 unsigned int entry_size; 733 unsigned int i; 734 735 rt2x00queue_reset(queue); 736 737 queue->limit = qdesc->entry_num; 738 queue->threshold = DIV_ROUND_UP(qdesc->entry_num, 10); 739 queue->data_size = qdesc->data_size; 740 queue->desc_size = qdesc->desc_size; 741 742 /* 743 * Allocate all queue entries. 744 */ 745 entry_size = sizeof(*entries) + qdesc->priv_size; 746 entries = kzalloc(queue->limit * entry_size, GFP_KERNEL); 747 if (!entries) 748 return -ENOMEM; 749 750#define QUEUE_ENTRY_PRIV_OFFSET(__base, __index, __limit, __esize, __psize) \ 751 ( ((char *)(__base)) + ((__limit) * (__esize)) + \ 752 ((__index) * (__psize)) ) 753 754 for (i = 0; i < queue->limit; i++) { 755 entries[i].flags = 0; 756 entries[i].queue = queue; 757 entries[i].skb = NULL; 758 entries[i].entry_idx = i; 759 entries[i].priv_data = 760 QUEUE_ENTRY_PRIV_OFFSET(entries, i, queue->limit, 761 sizeof(*entries), qdesc->priv_size); 762 } 763 764#undef QUEUE_ENTRY_PRIV_OFFSET 765 766 queue->entries = entries; 767 768 return 0; 769} 770 771static void rt2x00queue_free_skbs(struct rt2x00_dev *rt2x00dev, 772 struct data_queue *queue) 773{ 774 unsigned int i; 775 776 if (!queue->entries) 777 return; 778 779 for (i = 0; i < queue->limit; i++) { 780 if (queue->entries[i].skb) 781 rt2x00queue_free_skb(rt2x00dev, queue->entries[i].skb); 782 } 783} 784 785static int rt2x00queue_alloc_rxskbs(struct rt2x00_dev *rt2x00dev, 786 struct data_queue *queue) 787{ 788 unsigned int i; 789 struct sk_buff *skb; 790 791 for (i = 0; i < queue->limit; i++) { 792 skb = rt2x00queue_alloc_rxskb(rt2x00dev, &queue->entries[i]); 793 if (!skb) 794 return -ENOMEM; 795 queue->entries[i].skb = skb; 796 } 797 798 return 0; 799} 800 801int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev) 802{ 803 struct data_queue *queue; 804 int status; 805 806 status = rt2x00queue_alloc_entries(rt2x00dev->rx, rt2x00dev->ops->rx); 807 if (status) 808 goto exit; 809 810 tx_queue_for_each(rt2x00dev, queue) { 811 status = rt2x00queue_alloc_entries(queue, rt2x00dev->ops->tx); 812 if (status) 813 goto exit; 814 } 815 816 status = rt2x00queue_alloc_entries(rt2x00dev->bcn, rt2x00dev->ops->bcn); 817 if (status) 818 goto exit; 819 820 if (test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags)) { 821 status = rt2x00queue_alloc_entries(&rt2x00dev->bcn[1], 822 rt2x00dev->ops->atim); 823 if (status) 824 goto exit; 825 } 826 827 status = rt2x00queue_alloc_rxskbs(rt2x00dev, rt2x00dev->rx); 828 if (status) 829 goto exit; 830 831 return 0; 832 833exit: 834 ERROR(rt2x00dev, "Queue entries allocation failed.\n"); 835 836 rt2x00queue_uninitialize(rt2x00dev); 837 838 return status; 839} 840 841void rt2x00queue_uninitialize(struct rt2x00_dev *rt2x00dev) 842{ 843 struct data_queue *queue; 844 845 rt2x00queue_free_skbs(rt2x00dev, rt2x00dev->rx); 846 847 queue_for_each(rt2x00dev, queue) { 848 kfree(queue->entries); 849 queue->entries = NULL; 850 } 851} 852 853static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev, 854 struct data_queue *queue, enum data_queue_qid qid) 855{ 856 spin_lock_init(&queue->lock); 857 858 queue->rt2x00dev = rt2x00dev; 859 queue->qid = qid; 860 queue->txop = 0; 861 queue->aifs = 2; 862 queue->cw_min = 5; 863 queue->cw_max = 10; 864} 865 866int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev) 867{ 868 struct data_queue *queue; 869 enum data_queue_qid qid; 870 unsigned int req_atim = 871 !!test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags); 872 873 /* 874 * We need the following queues: 875 * RX: 1 876 * TX: ops->tx_queues 877 * Beacon: 1 878 * Atim: 1 (if required) 879 */ 880 rt2x00dev->data_queues = 2 + rt2x00dev->ops->tx_queues + req_atim; 881 882 queue = kzalloc(rt2x00dev->data_queues * sizeof(*queue), GFP_KERNEL); 883 if (!queue) { 884 ERROR(rt2x00dev, "Queue allocation failed.\n"); 885 return -ENOMEM; 886 } 887 888 /* 889 * Initialize pointers 890 */ 891 rt2x00dev->rx = queue; 892 rt2x00dev->tx = &queue[1]; 893 rt2x00dev->bcn = &queue[1 + rt2x00dev->ops->tx_queues]; 894 895 /* 896 * Initialize queue parameters. 897 * RX: qid = QID_RX 898 * TX: qid = QID_AC_BE + index 899 * TX: cw_min: 2^5 = 32. 900 * TX: cw_max: 2^10 = 1024. 901 * BCN: qid = QID_BEACON 902 * ATIM: qid = QID_ATIM 903 */ 904 rt2x00queue_init(rt2x00dev, rt2x00dev->rx, QID_RX); 905 906 qid = QID_AC_BE; 907 tx_queue_for_each(rt2x00dev, queue) 908 rt2x00queue_init(rt2x00dev, queue, qid++); 909 910 rt2x00queue_init(rt2x00dev, &rt2x00dev->bcn[0], QID_BEACON); 911 if (req_atim) 912 rt2x00queue_init(rt2x00dev, &rt2x00dev->bcn[1], QID_ATIM); 913 914 return 0; 915} 916 917void rt2x00queue_free(struct rt2x00_dev *rt2x00dev) 918{ 919 kfree(rt2x00dev->rx); 920 rt2x00dev->rx = NULL; 921 rt2x00dev->tx = NULL; 922 rt2x00dev->bcn = NULL; 923} 924