1/* 2 Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com> 3 <http://rt2x00.serialmonkey.com> 4 5 This program is free software; you can redistribute it and/or modify 6 it under the terms of the GNU General Public License as published by 7 the Free Software Foundation; either version 2 of the License, or 8 (at your option) any later version. 9 10 This program is distributed in the hope that it will be useful, 11 but WITHOUT ANY WARRANTY; without even the implied warranty of 12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 GNU General Public License for more details. 14 15 You should have received a copy of the GNU General Public License 16 along with this program; if not, write to the 17 Free Software Foundation, Inc., 18 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 19 */ 20 21/* 22 Module: rt2x00 23 Abstract: rt2x00 queue datastructures and routines 24 */ 25 26#ifndef RT2X00QUEUE_H 27#define RT2X00QUEUE_H 28 29#include <linux/prefetch.h> 30 31/** 32 * DOC: Entry frame size 33 * 34 * Ralink PCI devices demand the Frame size to be a multiple of 128 bytes, 35 * for USB devices this restriction does not apply, but the value of 36 * 2432 makes sense since it is big enough to contain the maximum fragment 37 * size according to the ieee802.11 specs. 38 * The aggregation size depends on support from the driver, but should 39 * be something around 3840 bytes. 40 */ 41#define DATA_FRAME_SIZE 2432 42#define MGMT_FRAME_SIZE 256 43#define AGGREGATION_SIZE 3840 44 45/** 46 * DOC: Number of entries per queue 47 * 48 * Under normal load without fragmentation, 12 entries are sufficient 49 * without the queue being filled up to the maximum. When using fragmentation 50 * and the queue threshold code, we need to add some additional margins to 51 * make sure the queue will never (or only under extreme load) fill up 52 * completely. 53 * Since we don't use preallocated DMA, having a large number of queue entries 54 * will have minimal impact on the memory requirements for the queue. 55 */ 56#define RX_ENTRIES 24 57#define TX_ENTRIES 24 58#define BEACON_ENTRIES 1 59#define ATIM_ENTRIES 8 60 61/** 62 * enum data_queue_qid: Queue identification 63 * 64 * @QID_AC_BE: AC BE queue 65 * @QID_AC_BK: AC BK queue 66 * @QID_AC_VI: AC VI queue 67 * @QID_AC_VO: AC VO queue 68 * @QID_HCCA: HCCA queue 69 * @QID_MGMT: MGMT queue (prio queue) 70 * @QID_RX: RX queue 71 * @QID_OTHER: None of the above (don't use, only present for completeness) 72 * @QID_BEACON: Beacon queue (value unspecified, don't send it to device) 73 * @QID_ATIM: Atim queue (value unspeficied, don't send it to device) 74 */ 75enum data_queue_qid { 76 QID_AC_BE = 0, 77 QID_AC_BK = 1, 78 QID_AC_VI = 2, 79 QID_AC_VO = 3, 80 QID_HCCA = 4, 81 QID_MGMT = 13, 82 QID_RX = 14, 83 QID_OTHER = 15, 84 QID_BEACON, 85 QID_ATIM, 86}; 87 88/** 89 * enum skb_frame_desc_flags: Flags for &struct skb_frame_desc 90 * 91 * @SKBDESC_DMA_MAPPED_RX: &skb_dma field has been mapped for RX 92 * @SKBDESC_DMA_MAPPED_TX: &skb_dma field has been mapped for TX 93 * @SKBDESC_IV_STRIPPED: Frame contained a IV/EIV provided by 94 * mac80211 but was stripped for processing by the driver. 95 * @SKBDESC_NOT_MAC80211: Frame didn't originate from mac80211, 96 * don't try to pass it back. 97 * @SKBDESC_DESC_IN_SKB: The descriptor is at the start of the 98 * skb, instead of in the desc field. 99 */ 100enum skb_frame_desc_flags { 101 SKBDESC_DMA_MAPPED_RX = 1 << 0, 102 SKBDESC_DMA_MAPPED_TX = 1 << 1, 103 SKBDESC_IV_STRIPPED = 1 << 2, 104 SKBDESC_NOT_MAC80211 = 1 << 3, 105 SKBDESC_DESC_IN_SKB = 1 << 4, 106}; 107 108/** 109 * struct skb_frame_desc: Descriptor information for the skb buffer 110 * 111 * This structure is placed over the driver_data array, this means that 112 * this structure should not exceed the size of that array (40 bytes). 113 * 114 * @flags: Frame flags, see &enum skb_frame_desc_flags. 115 * @desc_len: Length of the frame descriptor. 116 * @tx_rate_idx: the index of the TX rate, used for TX status reporting 117 * @tx_rate_flags: the TX rate flags, used for TX status reporting 118 * @desc: Pointer to descriptor part of the frame. 119 * Note that this pointer could point to something outside 120 * of the scope of the skb->data pointer. 121 * @iv: IV/EIV data used during encryption/decryption. 122 * @skb_dma: (PCI-only) the DMA address associated with the sk buffer. 123 * @entry: The entry to which this sk buffer belongs. 124 */ 125struct skb_frame_desc { 126 u8 flags; 127 128 u8 desc_len; 129 u8 tx_rate_idx; 130 u8 tx_rate_flags; 131 132 void *desc; 133 134 __le32 iv[2]; 135 136 dma_addr_t skb_dma; 137 138 struct queue_entry *entry; 139}; 140 141/** 142 * get_skb_frame_desc - Obtain the rt2x00 frame descriptor from a sk_buff. 143 * @skb: &struct sk_buff from where we obtain the &struct skb_frame_desc 144 */ 145static inline struct skb_frame_desc* get_skb_frame_desc(struct sk_buff *skb) 146{ 147 BUILD_BUG_ON(sizeof(struct skb_frame_desc) > 148 IEEE80211_TX_INFO_DRIVER_DATA_SIZE); 149 return (struct skb_frame_desc *)&IEEE80211_SKB_CB(skb)->driver_data; 150} 151 152/** 153 * enum rxdone_entry_desc_flags: Flags for &struct rxdone_entry_desc 154 * 155 * @RXDONE_SIGNAL_PLCP: Signal field contains the plcp value. 156 * @RXDONE_SIGNAL_BITRATE: Signal field contains the bitrate value. 157 * @RXDONE_SIGNAL_MCS: Signal field contains the mcs value. 158 * @RXDONE_MY_BSS: Does this frame originate from device's BSS. 159 * @RXDONE_CRYPTO_IV: Driver provided IV/EIV data. 160 * @RXDONE_CRYPTO_ICV: Driver provided ICV data. 161 * @RXDONE_L2PAD: 802.11 payload has been padded to 4-byte boundary. 162 */ 163enum rxdone_entry_desc_flags { 164 RXDONE_SIGNAL_PLCP = BIT(0), 165 RXDONE_SIGNAL_BITRATE = BIT(1), 166 RXDONE_SIGNAL_MCS = BIT(2), 167 RXDONE_MY_BSS = BIT(3), 168 RXDONE_CRYPTO_IV = BIT(4), 169 RXDONE_CRYPTO_ICV = BIT(5), 170 RXDONE_L2PAD = BIT(6), 171}; 172 173/** 174 * RXDONE_SIGNAL_MASK - Define to mask off all &rxdone_entry_desc_flags flags 175 * except for the RXDONE_SIGNAL_* flags. This is useful to convert the dev_flags 176 * from &rxdone_entry_desc to a signal value type. 177 */ 178#define RXDONE_SIGNAL_MASK \ 179 ( RXDONE_SIGNAL_PLCP | RXDONE_SIGNAL_BITRATE | RXDONE_SIGNAL_MCS ) 180 181/** 182 * struct rxdone_entry_desc: RX Entry descriptor 183 * 184 * Summary of information that has been read from the RX frame descriptor. 185 * 186 * @timestamp: RX Timestamp 187 * @signal: Signal of the received frame. 188 * @rssi: RSSI of the received frame. 189 * @size: Data size of the received frame. 190 * @flags: MAC80211 receive flags (See &enum mac80211_rx_flags). 191 * @dev_flags: Ralink receive flags (See &enum rxdone_entry_desc_flags). 192 * @rate_mode: Rate mode (See @enum rate_modulation). 193 * @cipher: Cipher type used during decryption. 194 * @cipher_status: Decryption status. 195 * @iv: IV/EIV data used during decryption. 196 * @icv: ICV data used during decryption. 197 */ 198struct rxdone_entry_desc { 199 u64 timestamp; 200 int signal; 201 int rssi; 202 int size; 203 int flags; 204 int dev_flags; 205 u16 rate_mode; 206 u8 cipher; 207 u8 cipher_status; 208 209 __le32 iv[2]; 210 __le32 icv; 211}; 212 213/** 214 * enum txdone_entry_desc_flags: Flags for &struct txdone_entry_desc 215 * 216 * Every txdone report has to contain the basic result of the 217 * transmission, either &TXDONE_UNKNOWN, &TXDONE_SUCCESS or 218 * &TXDONE_FAILURE. The flag &TXDONE_FALLBACK can be used in 219 * conjunction with all of these flags but should only be set 220 * if retires > 0. The flag &TXDONE_EXCESSIVE_RETRY can only be used 221 * in conjunction with &TXDONE_FAILURE. 222 * 223 * @TXDONE_UNKNOWN: Hardware could not determine success of transmission. 224 * @TXDONE_SUCCESS: Frame was successfully send 225 * @TXDONE_FALLBACK: Hardware used fallback rates for retries 226 * @TXDONE_FAILURE: Frame was not successfully send 227 * @TXDONE_EXCESSIVE_RETRY: In addition to &TXDONE_FAILURE, the 228 * frame transmission failed due to excessive retries. 229 */ 230enum txdone_entry_desc_flags { 231 TXDONE_UNKNOWN, 232 TXDONE_SUCCESS, 233 TXDONE_FALLBACK, 234 TXDONE_FAILURE, 235 TXDONE_EXCESSIVE_RETRY, 236}; 237 238/** 239 * struct txdone_entry_desc: TX done entry descriptor 240 * 241 * Summary of information that has been read from the TX frame descriptor 242 * after the device is done with transmission. 243 * 244 * @flags: TX done flags (See &enum txdone_entry_desc_flags). 245 * @retry: Retry count. 246 */ 247struct txdone_entry_desc { 248 unsigned long flags; 249 int retry; 250}; 251 252/** 253 * enum txentry_desc_flags: Status flags for TX entry descriptor 254 * 255 * @ENTRY_TXD_RTS_FRAME: This frame is a RTS frame. 256 * @ENTRY_TXD_CTS_FRAME: This frame is a CTS-to-self frame. 257 * @ENTRY_TXD_GENERATE_SEQ: This frame requires sequence counter. 258 * @ENTRY_TXD_FIRST_FRAGMENT: This is the first frame. 259 * @ENTRY_TXD_MORE_FRAG: This frame is followed by another fragment. 260 * @ENTRY_TXD_REQ_TIMESTAMP: Require timestamp to be inserted. 261 * @ENTRY_TXD_BURST: This frame belongs to the same burst event. 262 * @ENTRY_TXD_ACK: An ACK is required for this frame. 263 * @ENTRY_TXD_RETRY_MODE: When set, the long retry count is used. 264 * @ENTRY_TXD_ENCRYPT: This frame should be encrypted. 265 * @ENTRY_TXD_ENCRYPT_PAIRWISE: Use pairwise key table (instead of shared). 266 * @ENTRY_TXD_ENCRYPT_IV: Generate IV/EIV in hardware. 267 * @ENTRY_TXD_ENCRYPT_MMIC: Generate MIC in hardware. 268 * @ENTRY_TXD_HT_AMPDU: This frame is part of an AMPDU. 269 * @ENTRY_TXD_HT_BW_40: Use 40MHz Bandwidth. 270 * @ENTRY_TXD_HT_SHORT_GI: Use short GI. 271 */ 272enum txentry_desc_flags { 273 ENTRY_TXD_RTS_FRAME, 274 ENTRY_TXD_CTS_FRAME, 275 ENTRY_TXD_GENERATE_SEQ, 276 ENTRY_TXD_FIRST_FRAGMENT, 277 ENTRY_TXD_MORE_FRAG, 278 ENTRY_TXD_REQ_TIMESTAMP, 279 ENTRY_TXD_BURST, 280 ENTRY_TXD_ACK, 281 ENTRY_TXD_RETRY_MODE, 282 ENTRY_TXD_ENCRYPT, 283 ENTRY_TXD_ENCRYPT_PAIRWISE, 284 ENTRY_TXD_ENCRYPT_IV, 285 ENTRY_TXD_ENCRYPT_MMIC, 286 ENTRY_TXD_HT_AMPDU, 287 ENTRY_TXD_HT_BW_40, 288 ENTRY_TXD_HT_SHORT_GI, 289}; 290 291/** 292 * struct txentry_desc: TX Entry descriptor 293 * 294 * Summary of information for the frame descriptor before sending a TX frame. 295 * 296 * @flags: Descriptor flags (See &enum queue_entry_flags). 297 * @queue: Queue identification (See &enum data_queue_qid). 298 * @length: Length of the entire frame. 299 * @header_length: Length of 802.11 header. 300 * @length_high: PLCP length high word. 301 * @length_low: PLCP length low word. 302 * @signal: PLCP signal. 303 * @service: PLCP service. 304 * @msc: MCS. 305 * @stbc: STBC. 306 * @ba_size: BA size. 307 * @rate_mode: Rate mode (See @enum rate_modulation). 308 * @mpdu_density: MDPU density. 309 * @retry_limit: Max number of retries. 310 * @aifs: AIFS value. 311 * @ifs: IFS value. 312 * @txop: IFS value for 11n capable chips. 313 * @cw_min: cwmin value. 314 * @cw_max: cwmax value. 315 * @cipher: Cipher type used for encryption. 316 * @key_idx: Key index used for encryption. 317 * @iv_offset: Position where IV should be inserted by hardware. 318 * @iv_len: Length of IV data. 319 */ 320struct txentry_desc { 321 unsigned long flags; 322 323 enum data_queue_qid queue; 324 325 u16 length; 326 u16 header_length; 327 328 u16 length_high; 329 u16 length_low; 330 u16 signal; 331 u16 service; 332 333 u16 mcs; 334 u16 stbc; 335 u16 ba_size; 336 u16 rate_mode; 337 u16 mpdu_density; 338 339 short retry_limit; 340 short aifs; 341 short ifs; 342 short txop; 343 short cw_min; 344 short cw_max; 345 346 enum cipher cipher; 347 u16 key_idx; 348 u16 iv_offset; 349 u16 iv_len; 350}; 351 352/** 353 * enum queue_entry_flags: Status flags for queue entry 354 * 355 * @ENTRY_BCN_ASSIGNED: This entry has been assigned to an interface. 356 * As long as this bit is set, this entry may only be touched 357 * through the interface structure. 358 * @ENTRY_OWNER_DEVICE_DATA: This entry is owned by the device for data 359 * transfer (either TX or RX depending on the queue). The entry should 360 * only be touched after the device has signaled it is done with it. 361 * @ENTRY_OWNER_DEVICE_CRYPTO: This entry is owned by the device for data 362 * encryption or decryption. The entry should only be touched after 363 * the device has signaled it is done with it. 364 * @ENTRY_DATA_PENDING: This entry contains a valid frame and is waiting 365 * for the signal to start sending. 366 */ 367enum queue_entry_flags { 368 ENTRY_BCN_ASSIGNED, 369 ENTRY_OWNER_DEVICE_DATA, 370 ENTRY_OWNER_DEVICE_CRYPTO, 371 ENTRY_DATA_PENDING, 372}; 373 374/** 375 * struct queue_entry: Entry inside the &struct data_queue 376 * 377 * @flags: Entry flags, see &enum queue_entry_flags. 378 * @queue: The data queue (&struct data_queue) to which this entry belongs. 379 * @skb: The buffer which is currently being transmitted (for TX queue), 380 * or used to directly recieve data in (for RX queue). 381 * @entry_idx: The entry index number. 382 * @priv_data: Private data belonging to this queue entry. The pointer 383 * points to data specific to a particular driver and queue type. 384 */ 385struct queue_entry { 386 unsigned long flags; 387 388 struct data_queue *queue; 389 390 struct sk_buff *skb; 391 392 unsigned int entry_idx; 393 394 void *priv_data; 395}; 396 397/** 398 * enum queue_index: Queue index type 399 * 400 * @Q_INDEX: Index pointer to the current entry in the queue, if this entry is 401 * owned by the hardware then the queue is considered to be full. 402 * @Q_INDEX_DONE: Index pointer to the next entry which will be completed by 403 * the hardware and for which we need to run the txdone handler. If this 404 * entry is not owned by the hardware the queue is considered to be empty. 405 * @Q_INDEX_CRYPTO: Index pointer to the next entry which encryption/decription 406 * will be completed by the hardware next. 407 * @Q_INDEX_MAX: Keep last, used in &struct data_queue to determine the size 408 * of the index array. 409 */ 410enum queue_index { 411 Q_INDEX, 412 Q_INDEX_DONE, 413 Q_INDEX_CRYPTO, 414 Q_INDEX_MAX, 415}; 416 417/** 418 * struct data_queue: Data queue 419 * 420 * @rt2x00dev: Pointer to main &struct rt2x00dev where this queue belongs to. 421 * @entries: Base address of the &struct queue_entry which are 422 * part of this queue. 423 * @qid: The queue identification, see &enum data_queue_qid. 424 * @lock: Spinlock to protect index handling. Whenever @index, @index_done or 425 * @index_crypt needs to be changed this lock should be grabbed to prevent 426 * index corruption due to concurrency. 427 * @count: Number of frames handled in the queue. 428 * @limit: Maximum number of entries in the queue. 429 * @threshold: Minimum number of free entries before queue is kicked by force. 430 * @length: Number of frames in queue. 431 * @index: Index pointers to entry positions in the queue, 432 * use &enum queue_index to get a specific index field. 433 * @txop: maximum burst time. 434 * @aifs: The aifs value for outgoing frames (field ignored in RX queue). 435 * @cw_min: The cw min value for outgoing frames (field ignored in RX queue). 436 * @cw_max: The cw max value for outgoing frames (field ignored in RX queue). 437 * @data_size: Maximum data size for the frames in this queue. 438 * @desc_size: Hardware descriptor size for the data in this queue. 439 * @usb_endpoint: Device endpoint used for communication (USB only) 440 * @usb_maxpacket: Max packet size for given endpoint (USB only) 441 */ 442struct data_queue { 443 struct rt2x00_dev *rt2x00dev; 444 struct queue_entry *entries; 445 446 enum data_queue_qid qid; 447 448 spinlock_t lock; 449 unsigned long last_index; 450 unsigned long last_index_done; 451 unsigned int count; 452 unsigned short limit; 453 unsigned short threshold; 454 unsigned short length; 455 unsigned short index[Q_INDEX_MAX]; 456 457 unsigned short txop; 458 unsigned short aifs; 459 unsigned short cw_min; 460 unsigned short cw_max; 461 462 unsigned short data_size; 463 unsigned short desc_size; 464 465 unsigned short usb_endpoint; 466 unsigned short usb_maxpacket; 467}; 468 469/** 470 * struct data_queue_desc: Data queue description 471 * 472 * The information in this structure is used by drivers 473 * to inform rt2x00lib about the creation of the data queue. 474 * 475 * @entry_num: Maximum number of entries for a queue. 476 * @data_size: Maximum data size for the frames in this queue. 477 * @desc_size: Hardware descriptor size for the data in this queue. 478 * @priv_size: Size of per-queue_entry private data. 479 */ 480struct data_queue_desc { 481 unsigned short entry_num; 482 unsigned short data_size; 483 unsigned short desc_size; 484 unsigned short priv_size; 485}; 486 487/** 488 * queue_end - Return pointer to the last queue (HELPER MACRO). 489 * @__dev: Pointer to &struct rt2x00_dev 490 * 491 * Using the base rx pointer and the maximum number of available queues, 492 * this macro will return the address of 1 position beyond the end of the 493 * queues array. 494 */ 495#define queue_end(__dev) \ 496 &(__dev)->rx[(__dev)->data_queues] 497 498/** 499 * tx_queue_end - Return pointer to the last TX queue (HELPER MACRO). 500 * @__dev: Pointer to &struct rt2x00_dev 501 * 502 * Using the base tx pointer and the maximum number of available TX 503 * queues, this macro will return the address of 1 position beyond 504 * the end of the TX queue array. 505 */ 506#define tx_queue_end(__dev) \ 507 &(__dev)->tx[(__dev)->ops->tx_queues] 508 509/** 510 * queue_next - Return pointer to next queue in list (HELPER MACRO). 511 * @__queue: Current queue for which we need the next queue 512 * 513 * Using the current queue address we take the address directly 514 * after the queue to take the next queue. Note that this macro 515 * should be used carefully since it does not protect against 516 * moving past the end of the list. (See macros &queue_end and 517 * &tx_queue_end for determining the end of the queue). 518 */ 519#define queue_next(__queue) \ 520 &(__queue)[1] 521 522/** 523 * queue_loop - Loop through the queues within a specific range (HELPER MACRO). 524 * @__entry: Pointer where the current queue entry will be stored in. 525 * @__start: Start queue pointer. 526 * @__end: End queue pointer. 527 * 528 * This macro will loop through all queues between &__start and &__end. 529 */ 530#define queue_loop(__entry, __start, __end) \ 531 for ((__entry) = (__start); \ 532 prefetch(queue_next(__entry)), (__entry) != (__end);\ 533 (__entry) = queue_next(__entry)) 534 535/** 536 * queue_for_each - Loop through all queues 537 * @__dev: Pointer to &struct rt2x00_dev 538 * @__entry: Pointer where the current queue entry will be stored in. 539 * 540 * This macro will loop through all available queues. 541 */ 542#define queue_for_each(__dev, __entry) \ 543 queue_loop(__entry, (__dev)->rx, queue_end(__dev)) 544 545/** 546 * tx_queue_for_each - Loop through the TX queues 547 * @__dev: Pointer to &struct rt2x00_dev 548 * @__entry: Pointer where the current queue entry will be stored in. 549 * 550 * This macro will loop through all TX related queues excluding 551 * the Beacon and Atim queues. 552 */ 553#define tx_queue_for_each(__dev, __entry) \ 554 queue_loop(__entry, (__dev)->tx, tx_queue_end(__dev)) 555 556/** 557 * txall_queue_for_each - Loop through all TX related queues 558 * @__dev: Pointer to &struct rt2x00_dev 559 * @__entry: Pointer where the current queue entry will be stored in. 560 * 561 * This macro will loop through all TX related queues including 562 * the Beacon and Atim queues. 563 */ 564#define txall_queue_for_each(__dev, __entry) \ 565 queue_loop(__entry, (__dev)->tx, queue_end(__dev)) 566 567/** 568 * rt2x00queue_empty - Check if the queue is empty. 569 * @queue: Queue to check if empty. 570 */ 571static inline int rt2x00queue_empty(struct data_queue *queue) 572{ 573 return queue->length == 0; 574} 575 576/** 577 * rt2x00queue_full - Check if the queue is full. 578 * @queue: Queue to check if full. 579 */ 580static inline int rt2x00queue_full(struct data_queue *queue) 581{ 582 return queue->length == queue->limit; 583} 584 585/** 586 * rt2x00queue_free - Check the number of available entries in queue. 587 * @queue: Queue to check. 588 */ 589static inline int rt2x00queue_available(struct data_queue *queue) 590{ 591 return queue->limit - queue->length; 592} 593 594/** 595 * rt2x00queue_threshold - Check if the queue is below threshold 596 * @queue: Queue to check. 597 */ 598static inline int rt2x00queue_threshold(struct data_queue *queue) 599{ 600 return rt2x00queue_available(queue) < queue->threshold; 601} 602 603/** 604 * rt2x00queue_timeout - Check if a timeout occured for this queue 605 * @queue: Queue to check. 606 */ 607static inline int rt2x00queue_timeout(struct data_queue *queue) 608{ 609 return time_after(queue->last_index, queue->last_index_done + (HZ / 10)); 610} 611 612/** 613 * _rt2x00_desc_read - Read a word from the hardware descriptor. 614 * @desc: Base descriptor address 615 * @word: Word index from where the descriptor should be read. 616 * @value: Address where the descriptor value should be written into. 617 */ 618static inline void _rt2x00_desc_read(__le32 *desc, const u8 word, __le32 *value) 619{ 620 *value = desc[word]; 621} 622 623/** 624 * rt2x00_desc_read - Read a word from the hardware descriptor, this 625 * function will take care of the byte ordering. 626 * @desc: Base descriptor address 627 * @word: Word index from where the descriptor should be read. 628 * @value: Address where the descriptor value should be written into. 629 */ 630static inline void rt2x00_desc_read(__le32 *desc, const u8 word, u32 *value) 631{ 632 __le32 tmp; 633 _rt2x00_desc_read(desc, word, &tmp); 634 *value = le32_to_cpu(tmp); 635} 636 637/** 638 * rt2x00_desc_write - write a word to the hardware descriptor, this 639 * function will take care of the byte ordering. 640 * @desc: Base descriptor address 641 * @word: Word index from where the descriptor should be written. 642 * @value: Value that should be written into the descriptor. 643 */ 644static inline void _rt2x00_desc_write(__le32 *desc, const u8 word, __le32 value) 645{ 646 desc[word] = value; 647} 648 649/** 650 * rt2x00_desc_write - write a word to the hardware descriptor. 651 * @desc: Base descriptor address 652 * @word: Word index from where the descriptor should be written. 653 * @value: Value that should be written into the descriptor. 654 */ 655static inline void rt2x00_desc_write(__le32 *desc, const u8 word, u32 value) 656{ 657 _rt2x00_desc_write(desc, word, cpu_to_le32(value)); 658} 659 660#endif /* RT2X00QUEUE_H */ 661