1/* 2 * Intel Wireless WiMAX Connection 2400m 3 * Generic (non-bus specific) TX handling 4 * 5 * 6 * Copyright (C) 2007-2008 Intel Corporation. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * * Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * * Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * * Neither the name of Intel Corporation nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 * 35 * Intel Corporation <linux-wimax@intel.com> 36 * Yanir Lubetkin <yanirx.lubetkin@intel.com> 37 * - Initial implementation 38 * 39 * Intel Corporation <linux-wimax@intel.com> 40 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> 41 * - Rewritten to use a single FIFO to lower the memory allocation 42 * pressure and optimize cache hits when copying to the queue, as 43 * well as splitting out bus-specific code. 44 * 45 * 46 * Implements data transmission to the device; this is done through a 47 * software FIFO, as data/control frames can be coalesced (while the 48 * device is reading the previous tx transaction, others accumulate). 49 * 50 * A FIFO is used because at the end it is resource-cheaper that trying 51 * to implement scatter/gather over USB. As well, most traffic is going 52 * to be download (vs upload). 53 * 54 * The format for sending/receiving data to/from the i2400m is 55 * described in detail in rx.c:PROTOCOL FORMAT. In here we implement 56 * the transmission of that. This is split between a bus-independent 57 * part that just prepares everything and a bus-specific part that 58 * does the actual transmission over the bus to the device (in the 59 * bus-specific driver). 60 * 61 * 62 * The general format of a device-host transaction is MSG-HDR, PLD1, 63 * PLD2...PLDN, PL1, PL2,...PLN, PADDING. 64 * 65 * Because we need the send payload descriptors and then payloads and 66 * because it is kind of expensive to do scatterlists in USB (one URB 67 * per node), it becomes cheaper to append all the data to a FIFO 68 * (copying to a FIFO potentially in cache is cheaper). 69 * 70 * Then the bus-specific code takes the parts of that FIFO that are 71 * written and passes them to the device. 72 * 73 * So the concepts to keep in mind there are: 74 * 75 * We use a FIFO to queue the data in a linear buffer. We first append 76 * a MSG-HDR, space for I2400M_TX_PLD_MAX payload descriptors and then 77 * go appending payloads until we run out of space or of payload 78 * descriptors. Then we append padding to make the whole transaction a 79 * multiple of i2400m->bus_tx_block_size (as defined by the bus layer). 80 * 81 * - A TX message: a combination of a message header, payload 82 * descriptors and payloads. 83 * 84 * Open: it is marked as active (i2400m->tx_msg is valid) and we 85 * can keep adding payloads to it. 86 * 87 * Closed: we are not appending more payloads to this TX message 88 * (exahusted space in the queue, too many payloads or 89 * whichever). We have appended padding so the whole message 90 * length is aligned to i2400m->bus_tx_block_size (as set by the 91 * bus/transport layer). 92 * 93 * - Most of the time we keep a TX message open to which we append 94 * payloads. 95 * 96 * - If we are going to append and there is no more space (we are at 97 * the end of the FIFO), we close the message, mark the rest of the 98 * FIFO space unusable (skip_tail), create a new message at the 99 * beginning of the FIFO (if there is space) and append the message 100 * there. 101 * 102 * This is because we need to give linear TX messages to the bus 103 * engine. So we don't write a message to the remaining FIFO space 104 * until the tail and continue at the head of it. 105 * 106 * - We overload one of the fields in the message header to use it as 107 * 'size' of the TX message, so we can iterate over them. It also 108 * contains a flag that indicates if we have to skip it or not. 109 * When we send the buffer, we update that to its real on-the-wire 110 * value. 111 * 112 * - The MSG-HDR PLD1...PLD2 stuff has to be a size multiple of 16. 113 * 114 * It follows that if MSG-HDR says we have N messages, the whole 115 * header + descriptors is 16 + 4*N; for those to be a multiple of 116 * 16, it follows that N can be 4, 8, 12, ... (32, 48, 64, 80... 117 * bytes). 118 * 119 * So if we have only 1 payload, we have to submit a header that in 120 * all truth has space for 4. 121 * 122 * The implication is that we reserve space for 12 (64 bytes); but 123 * if we fill up only (eg) 2, our header becomes 32 bytes only. So 124 * the TX engine has to shift those 32 bytes of msg header and 2 125 * payloads and padding so that right after it the payloads start 126 * and the TX engine has to know about that. 127 * 128 * It is cheaper to move the header up than the whole payloads down. 129 * 130 * We do this in i2400m_tx_close(). See 'i2400m_msg_hdr->offset'. 131 * 132 * - Each payload has to be size-padded to 16 bytes; before appending 133 * it, we just do it. 134 * 135 * - The whole message has to be padded to i2400m->bus_tx_block_size; 136 * we do this at close time. Thus, when reserving space for the 137 * payload, we always make sure there is also free space for this 138 * padding that sooner or later will happen. 139 * 140 * When we append a message, we tell the bus specific code to kick in 141 * TXs. It will TX (in parallel) until the buffer is exhausted--hence 142 * the lockin we do. The TX code will only send a TX message at the 143 * time (which remember, might contain more than one payload). Of 144 * course, when the bus-specific driver attempts to TX a message that 145 * is still open, it gets closed first. 146 * 147 * Gee, this is messy; well a picture. In the example below we have a 148 * partially full FIFO, with a closed message ready to be delivered 149 * (with a moved message header to make sure it is size-aligned to 150 * 16), TAIL room that was unusable (and thus is marked with a message 151 * header that says 'skip this') and at the head of the buffer, an 152 * imcomplete message with a couple of payloads. 153 * 154 * N ___________________________________________________ 155 * | | 156 * | TAIL room | 157 * | | 158 * | msg_hdr to skip (size |= 0x80000) | 159 * |---------------------------------------------------|------- 160 * | | /|\ 161 * | | | 162 * | TX message padding | | 163 * | | | 164 * | | | 165 * |- - - - - - - - - - - - - - - - - - - - - - - - - -| | 166 * | | | 167 * | payload 1 | | 168 * | | N * tx_block_size 169 * | | | 170 * |- - - - - - - - - - - - - - - - - - - - - - - - - -| | 171 * | | | 172 * | payload 1 | | 173 * | | | 174 * | | | 175 * |- - - - - - - - - - - - - - - - - - - - - - - - - -|- -|- - - - 176 * | padding 3 /|\ | | /|\ 177 * | padding 2 | | | | 178 * | pld 1 32 bytes (2 * 16) | | | 179 * | pld 0 | | | | 180 * | moved msg_hdr \|/ | \|/ | 181 * |- - - - - - - - - - - - - - - - - - - - - - - - - -|- - - | 182 * | | _PLD_SIZE 183 * | unused | | 184 * | | | 185 * |- - - - - - - - - - - - - - - - - - - - - - - - - -| | 186 * | msg_hdr (size X) [this message is closed] | \|/ 187 * |===================================================|========== <=== OUT 188 * | | 189 * | | 190 * | | 191 * | Free rooom | 192 * | | 193 * | | 194 * | | 195 * | | 196 * | | 197 * | | 198 * | | 199 * | | 200 * | | 201 * |===================================================|========== <=== IN 202 * | | 203 * | | 204 * | | 205 * | | 206 * | payload 1 | 207 * | | 208 * | | 209 * |- - - - - - - - - - - - - - - - - - - - - - - - - -| 210 * | | 211 * | payload 0 | 212 * | | 213 * | | 214 * |- - - - - - - - - - - - - - - - - - - - - - - - - -| 215 * | pld 11 /|\ | 216 * | ... | | 217 * | pld 1 64 bytes (2 * 16) | 218 * | pld 0 | | 219 * | msg_hdr (size X) \|/ [message is open] | 220 * 0 --------------------------------------------------- 221 * 222 * 223 * ROADMAP 224 * 225 * i2400m_tx_setup() Called by i2400m_setup 226 * i2400m_tx_release() Called by i2400m_release() 227 * 228 * i2400m_tx() Called to send data or control frames 229 * i2400m_tx_fifo_push() Allocates append-space in the FIFO 230 * i2400m_tx_new() Opens a new message in the FIFO 231 * i2400m_tx_fits() Checks if a new payload fits in the message 232 * i2400m_tx_close() Closes an open message in the FIFO 233 * i2400m_tx_skip_tail() Marks unusable FIFO tail space 234 * i2400m->bus_tx_kick() 235 * 236 * Now i2400m->bus_tx_kick() is the the bus-specific driver backend 237 * implementation; that would do: 238 * 239 * i2400m->bus_tx_kick() 240 * i2400m_tx_msg_get() Gets first message ready to go 241 * ...sends it... 242 * i2400m_tx_msg_sent() Ack the message is sent; repeat from 243 * _tx_msg_get() until it returns NULL 244 * (FIFO empty). 245 */ 246#include <linux/netdevice.h> 247#include <linux/slab.h> 248#include "i2400m.h" 249 250 251#define D_SUBMODULE tx 252#include "debug-levels.h" 253 254enum { 255 /** 256 * TX Buffer size 257 * 258 * Doc says maximum transaction is 16KiB. If we had 16KiB en 259 * route and 16KiB being queued, it boils down to needing 260 * 32KiB. 261 * 32KiB is insufficient for 1400 MTU, hence increasing 262 * tx buffer size to 64KiB. 263 */ 264 I2400M_TX_BUF_SIZE = 65536, 265 /** 266 * Message header and payload descriptors have to be 16 267 * aligned (16 + 4 * N = 16 * M). If we take that average sent 268 * packets are MTU size (~1400-~1500) it follows that we could 269 * fit at most 10-11 payloads in one transaction. To meet the 270 * alignment requirement, that means we need to leave space 271 * for 12 (64 bytes). To simplify, we leave space for that. If 272 * at the end there are less, we pad up to the nearest 273 * multiple of 16. 274 */ 275 /* 276 * According to Intel Wimax i3200, i5x50 and i6x50 specification 277 * documents, the maximum number of payloads per message can be 278 * up to 60. Increasing the number of payloads to 60 per message 279 * helps to accommodate smaller payloads in a single transaction. 280 */ 281 I2400M_TX_PLD_MAX = 60, 282 I2400M_TX_PLD_SIZE = sizeof(struct i2400m_msg_hdr) 283 + I2400M_TX_PLD_MAX * sizeof(struct i2400m_pld), 284 I2400M_TX_SKIP = 0x80000000, 285 /* 286 * According to Intel Wimax i3200, i5x50 and i6x50 specification 287 * documents, the maximum size of each message can be up to 16KiB. 288 */ 289 I2400M_TX_MSG_SIZE = 16384, 290}; 291 292#define TAIL_FULL ((void *)~(unsigned long)NULL) 293 294/* 295 * Calculate how much tail room is available 296 * 297 * Note the trick here. This path is ONLY caleed for Case A (see 298 * i2400m_tx_fifo_push() below), where we have: 299 * 300 * Case A 301 * N ___________ 302 * | tail room | 303 * | | 304 * |<- IN ->| 305 * | | 306 * | data | 307 * | | 308 * |<- OUT ->| 309 * | | 310 * | head room | 311 * 0 ----------- 312 * 313 * When calculating the tail_room, tx_in might get to be zero if 314 * i2400m->tx_in is right at the end of the buffer (really full 315 * buffer) if there is no head room. In this case, tail_room would be 316 * I2400M_TX_BUF_SIZE, although it is actually zero. Hence the final 317 * mod (%) operation. However, when doing this kind of optimization, 318 * i2400m->tx_in being zero would fail, so we treat is an a special 319 * case. 320 */ 321static inline 322size_t __i2400m_tx_tail_room(struct i2400m *i2400m) 323{ 324 size_t tail_room; 325 size_t tx_in; 326 327 if (unlikely(i2400m->tx_in == 0)) 328 return I2400M_TX_BUF_SIZE; 329 tx_in = i2400m->tx_in % I2400M_TX_BUF_SIZE; 330 tail_room = I2400M_TX_BUF_SIZE - tx_in; 331 tail_room %= I2400M_TX_BUF_SIZE; 332 return tail_room; 333} 334 335 336/* 337 * Allocate @size bytes in the TX fifo, return a pointer to it 338 * 339 * @i2400m: device descriptor 340 * @size: size of the buffer we need to allocate 341 * @padding: ensure that there is at least this many bytes of free 342 * contiguous space in the fifo. This is needed because later on 343 * we might need to add padding. 344 * @try_head: specify either to allocate head room or tail room space 345 * in the TX FIFO. This boolean is required to avoids a system hang 346 * due to an infinite loop caused by i2400m_tx_fifo_push(). 347 * The caller must always try to allocate tail room space first by 348 * calling this routine with try_head = 0. In case if there 349 * is not enough tail room space but there is enough head room space, 350 * (i2400m_tx_fifo_push() returns TAIL_FULL) try to allocate head 351 * room space, by calling this routine again with try_head = 1. 352 * 353 * Returns: 354 * 355 * Pointer to the allocated space. NULL if there is no 356 * space. TAIL_FULL if there is no space at the tail but there is at 357 * the head (Case B below). 358 * 359 * These are the two basic cases we need to keep an eye for -- it is 360 * much better explained in linux/kernel/kfifo.c, but this code 361 * basically does the same. No rocket science here. 362 * 363 * Case A Case B 364 * N ___________ ___________ 365 * | tail room | | data | 366 * | | | | 367 * |<- IN ->| |<- OUT ->| 368 * | | | | 369 * | data | | room | 370 * | | | | 371 * |<- OUT ->| |<- IN ->| 372 * | | | | 373 * | head room | | data | 374 * 0 ----------- ----------- 375 * 376 * We allocate only *contiguous* space. 377 * 378 * We can allocate only from 'room'. In Case B, it is simple; in case 379 * A, we only try from the tail room; if it is not enough, we just 380 * fail and return TAIL_FULL and let the caller figure out if we wants to 381 * skip the tail room and try to allocate from the head. 382 * 383 * There is a corner case, wherein i2400m_tx_new() can get into 384 * an infinite loop calling i2400m_tx_fifo_push(). 385 * In certain situations, tx_in would have reached on the top of TX FIFO 386 * and i2400m_tx_tail_room() returns 0, as described below: 387 * 388 * N ___________ tail room is zero 389 * |<- IN ->| 390 * | | 391 * | | 392 * | | 393 * | data | 394 * |<- OUT ->| 395 * | | 396 * | | 397 * | head room | 398 * 0 ----------- 399 * During such a time, where tail room is zero in the TX FIFO and if there 400 * is a request to add a payload to TX FIFO, which calls: 401 * i2400m_tx() 402 * ->calls i2400m_tx_close() 403 * ->calls i2400m_tx_skip_tail() 404 * goto try_new; 405 * ->calls i2400m_tx_new() 406 * |----> [try_head:] 407 * infinite loop | ->calls i2400m_tx_fifo_push() 408 * | if (tail_room < needed) 409 * | if (head_room => needed) 410 * | return TAIL_FULL; 411 * |<---- goto try_head; 412 * 413 * i2400m_tx() calls i2400m_tx_close() to close the message, since there 414 * is no tail room to accommodate the payload and calls 415 * i2400m_tx_skip_tail() to skip the tail space. Now i2400m_tx() calls 416 * i2400m_tx_new() to allocate space for new message header calling 417 * i2400m_tx_fifo_push() that returns TAIL_FULL, since there is no tail space 418 * to accommodate the message header, but there is enough head space. 419 * The i2400m_tx_new() keeps re-retrying by calling i2400m_tx_fifo_push() 420 * ending up in a loop causing system freeze. 421 * 422 * This corner case is avoided by using a try_head boolean, 423 * as an argument to i2400m_tx_fifo_push(). 424 * 425 * Note: 426 * 427 * Assumes i2400m->tx_lock is taken, and we use that as a barrier 428 * 429 * The indexes keep increasing and we reset them to zero when we 430 * pop data off the queue 431 */ 432static 433void *i2400m_tx_fifo_push(struct i2400m *i2400m, size_t size, 434 size_t padding, bool try_head) 435{ 436 struct device *dev = i2400m_dev(i2400m); 437 size_t room, tail_room, needed_size; 438 void *ptr; 439 440 needed_size = size + padding; 441 room = I2400M_TX_BUF_SIZE - (i2400m->tx_in - i2400m->tx_out); 442 if (room < needed_size) { /* this takes care of Case B */ 443 d_printf(2, dev, "fifo push %zu/%zu: no space\n", 444 size, padding); 445 return NULL; 446 } 447 /* Is there space at the tail? */ 448 tail_room = __i2400m_tx_tail_room(i2400m); 449 if (!try_head && tail_room < needed_size) { 450 /* 451 * If the tail room space is not enough to push the message 452 * in the TX FIFO, then there are two possibilities: 453 * 1. There is enough head room space to accommodate 454 * this message in the TX FIFO. 455 * 2. There is not enough space in the head room and 456 * in tail room of the TX FIFO to accommodate the message. 457 * In the case (1), return TAIL_FULL so that the caller 458 * can figure out, if the caller wants to push the message 459 * into the head room space. 460 * In the case (2), return NULL, indicating that the TX FIFO 461 * cannot accommodate the message. 462 */ 463 if (room - tail_room >= needed_size) { 464 d_printf(2, dev, "fifo push %zu/%zu: tail full\n", 465 size, padding); 466 return TAIL_FULL; /* There might be head space */ 467 } else { 468 d_printf(2, dev, "fifo push %zu/%zu: no head space\n", 469 size, padding); 470 return NULL; /* There is no space */ 471 } 472 } 473 ptr = i2400m->tx_buf + i2400m->tx_in % I2400M_TX_BUF_SIZE; 474 d_printf(2, dev, "fifo push %zu/%zu: at @%zu\n", size, padding, 475 i2400m->tx_in % I2400M_TX_BUF_SIZE); 476 i2400m->tx_in += size; 477 return ptr; 478} 479 480 481/* 482 * Mark the tail of the FIFO buffer as 'to-skip' 483 * 484 * We should never hit the BUG_ON() because all the sizes we push to 485 * the FIFO are padded to be a multiple of 16 -- the size of *msg 486 * (I2400M_PL_PAD for the payloads, I2400M_TX_PLD_SIZE for the 487 * header). 488 * 489 * Tail room can get to be zero if a message was opened when there was 490 * space only for a header. _tx_close() will mark it as to-skip (as it 491 * will have no payloads) and there will be no more space to flush, so 492 * nothing has to be done here. This is probably cheaper than ensuring 493 * in _tx_new() that there is some space for payloads...as we could 494 * always possibly hit the same problem if the payload wouldn't fit. 495 * 496 * Note: 497 * 498 * Assumes i2400m->tx_lock is taken, and we use that as a barrier 499 * 500 * This path is only taken for Case A FIFO situations [see 501 * i2400m_tx_fifo_push()] 502 */ 503static 504void i2400m_tx_skip_tail(struct i2400m *i2400m) 505{ 506 struct device *dev = i2400m_dev(i2400m); 507 size_t tx_in = i2400m->tx_in % I2400M_TX_BUF_SIZE; 508 size_t tail_room = __i2400m_tx_tail_room(i2400m); 509 struct i2400m_msg_hdr *msg = i2400m->tx_buf + tx_in; 510 if (unlikely(tail_room == 0)) 511 return; 512 BUG_ON(tail_room < sizeof(*msg)); 513 msg->size = tail_room | I2400M_TX_SKIP; 514 d_printf(2, dev, "skip tail: skipping %zu bytes @%zu\n", 515 tail_room, tx_in); 516 i2400m->tx_in += tail_room; 517} 518 519 520/* 521 * Check if a skb will fit in the TX queue's current active TX 522 * message (if there are still descriptors left unused). 523 * 524 * Returns: 525 * 0 if the message won't fit, 1 if it will. 526 * 527 * Note: 528 * 529 * Assumes a TX message is active (i2400m->tx_msg). 530 * 531 * Assumes i2400m->tx_lock is taken, and we use that as a barrier 532 */ 533static 534unsigned i2400m_tx_fits(struct i2400m *i2400m) 535{ 536 struct i2400m_msg_hdr *msg_hdr = i2400m->tx_msg; 537 return le16_to_cpu(msg_hdr->num_pls) < I2400M_TX_PLD_MAX; 538 539} 540 541 542/* 543 * Start a new TX message header in the queue. 544 * 545 * Reserve memory from the base FIFO engine and then just initialize 546 * the message header. 547 * 548 * We allocate the biggest TX message header we might need (one that'd 549 * fit I2400M_TX_PLD_MAX payloads) -- when it is closed it will be 550 * 'ironed it out' and the unneeded parts removed. 551 * 552 * NOTE: 553 * 554 * Assumes that the previous message is CLOSED (eg: either 555 * there was none or 'i2400m_tx_close()' was called on it). 556 * 557 * Assumes i2400m->tx_lock is taken, and we use that as a barrier 558 */ 559static 560void i2400m_tx_new(struct i2400m *i2400m) 561{ 562 struct device *dev = i2400m_dev(i2400m); 563 struct i2400m_msg_hdr *tx_msg; 564 bool try_head = 0; 565 BUG_ON(i2400m->tx_msg != NULL); 566 /* 567 * In certain situations, TX queue might have enough space to 568 * accommodate the new message header I2400M_TX_PLD_SIZE, but 569 * might not have enough space to accommodate the payloads. 570 * Adding bus_tx_room_min padding while allocating a new TX message 571 * increases the possibilities of including at least one payload of the 572 * size <= bus_tx_room_min. 573 */ 574try_head: 575 tx_msg = i2400m_tx_fifo_push(i2400m, I2400M_TX_PLD_SIZE, 576 i2400m->bus_tx_room_min, try_head); 577 if (tx_msg == NULL) 578 goto out; 579 else if (tx_msg == TAIL_FULL) { 580 i2400m_tx_skip_tail(i2400m); 581 d_printf(2, dev, "new TX message: tail full, trying head\n"); 582 try_head = 1; 583 goto try_head; 584 } 585 memset(tx_msg, 0, I2400M_TX_PLD_SIZE); 586 tx_msg->size = I2400M_TX_PLD_SIZE; 587out: 588 i2400m->tx_msg = tx_msg; 589 d_printf(2, dev, "new TX message: %p @%zu\n", 590 tx_msg, (void *) tx_msg - i2400m->tx_buf); 591} 592 593 594/* 595 * Finalize the current TX message header 596 * 597 * Sets the message header to be at the proper location depending on 598 * how many descriptors we have (check documentation at the file's 599 * header for more info on that). 600 * 601 * Appends padding bytes to make sure the whole TX message (counting 602 * from the 'relocated' message header) is aligned to 603 * tx_block_size. We assume the _append() code has left enough space 604 * in the FIFO for that. If there are no payloads, just pass, as it 605 * won't be transferred. 606 * 607 * The amount of padding bytes depends on how many payloads are in the 608 * TX message, as the "msg header and payload descriptors" will be 609 * shifted up in the buffer. 610 */ 611static 612void i2400m_tx_close(struct i2400m *i2400m) 613{ 614 struct device *dev = i2400m_dev(i2400m); 615 struct i2400m_msg_hdr *tx_msg = i2400m->tx_msg; 616 struct i2400m_msg_hdr *tx_msg_moved; 617 size_t aligned_size, padding, hdr_size; 618 void *pad_buf; 619 unsigned num_pls; 620 621 if (tx_msg->size & I2400M_TX_SKIP) /* a skipper? nothing to do */ 622 goto out; 623 num_pls = le16_to_cpu(tx_msg->num_pls); 624 /* We can get this situation when a new message was started 625 * and there was no space to add payloads before hitting the 626 tail (and taking padding into consideration). */ 627 if (num_pls == 0) { 628 tx_msg->size |= I2400M_TX_SKIP; 629 goto out; 630 } 631 /* Relocate the message header 632 * 633 * Find the current header size, align it to 16 and if we need 634 * to move it so the tail is next to the payloads, move it and 635 * set the offset. 636 * 637 * If it moved, this header is good only for transmission; the 638 * original one (it is kept if we moved) is still used to 639 * figure out where the next TX message starts (and where the 640 * offset to the moved header is). 641 */ 642 hdr_size = sizeof(*tx_msg) 643 + le16_to_cpu(tx_msg->num_pls) * sizeof(tx_msg->pld[0]); 644 hdr_size = ALIGN(hdr_size, I2400M_PL_ALIGN); 645 tx_msg->offset = I2400M_TX_PLD_SIZE - hdr_size; 646 tx_msg_moved = (void *) tx_msg + tx_msg->offset; 647 memmove(tx_msg_moved, tx_msg, hdr_size); 648 tx_msg_moved->size -= tx_msg->offset; 649 /* 650 * Now figure out how much we have to add to the (moved!) 651 * message so the size is a multiple of i2400m->bus_tx_block_size. 652 */ 653 aligned_size = ALIGN(tx_msg_moved->size, i2400m->bus_tx_block_size); 654 padding = aligned_size - tx_msg_moved->size; 655 if (padding > 0) { 656 pad_buf = i2400m_tx_fifo_push(i2400m, padding, 0, 0); 657 if (unlikely(WARN_ON(pad_buf == NULL 658 || pad_buf == TAIL_FULL))) { 659 /* This should not happen -- append should verify 660 * there is always space left at least to append 661 * tx_block_size */ 662 dev_err(dev, 663 "SW BUG! Possible data leakage from memory the " 664 "device should not read for padding - " 665 "size %lu aligned_size %zu tx_buf %p in " 666 "%zu out %zu\n", 667 (unsigned long) tx_msg_moved->size, 668 aligned_size, i2400m->tx_buf, i2400m->tx_in, 669 i2400m->tx_out); 670 } else 671 memset(pad_buf, 0xad, padding); 672 } 673 tx_msg_moved->padding = cpu_to_le16(padding); 674 tx_msg_moved->size += padding; 675 if (tx_msg != tx_msg_moved) 676 tx_msg->size += padding; 677out: 678 i2400m->tx_msg = NULL; 679} 680 681 682/** 683 * i2400m_tx - send the data in a buffer to the device 684 * 685 * @buf: pointer to the buffer to transmit 686 * 687 * @buf_len: buffer size 688 * 689 * @pl_type: type of the payload we are sending. 690 * 691 * Returns: 692 * 0 if ok, < 0 errno code on error (-ENOSPC, if there is no more 693 * room for the message in the queue). 694 * 695 * Appends the buffer to the TX FIFO and notifies the bus-specific 696 * part of the driver that there is new data ready to transmit. 697 * Once this function returns, the buffer has been copied, so it can 698 * be reused. 699 * 700 * The steps followed to append are explained in detail in the file 701 * header. 702 * 703 * Whenever we write to a message, we increase msg->size, so it 704 * reflects exactly how big the message is. This is needed so that if 705 * we concatenate two messages before they can be sent, the code that 706 * sends the messages can find the boundaries (and it will replace the 707 * size with the real barker before sending). 708 * 709 * Note: 710 * 711 * Cold and warm reset payloads need to be sent as a single 712 * payload, so we handle that. 713 */ 714int i2400m_tx(struct i2400m *i2400m, const void *buf, size_t buf_len, 715 enum i2400m_pt pl_type) 716{ 717 int result = -ENOSPC; 718 struct device *dev = i2400m_dev(i2400m); 719 unsigned long flags; 720 size_t padded_len; 721 void *ptr; 722 bool try_head = 0; 723 unsigned is_singleton = pl_type == I2400M_PT_RESET_WARM 724 || pl_type == I2400M_PT_RESET_COLD; 725 726 d_fnstart(3, dev, "(i2400m %p skb %p [%zu bytes] pt %u)\n", 727 i2400m, buf, buf_len, pl_type); 728 padded_len = ALIGN(buf_len, I2400M_PL_ALIGN); 729 d_printf(5, dev, "padded_len %zd buf_len %zd\n", padded_len, buf_len); 730 /* If there is no current TX message, create one; if the 731 * current one is out of payload slots or we have a singleton, 732 * close it and start a new one */ 733 spin_lock_irqsave(&i2400m->tx_lock, flags); 734 /* If tx_buf is NULL, device is shutdown */ 735 if (i2400m->tx_buf == NULL) { 736 result = -ESHUTDOWN; 737 goto error_tx_new; 738 } 739try_new: 740 if (unlikely(i2400m->tx_msg == NULL)) 741 i2400m_tx_new(i2400m); 742 else if (unlikely(!i2400m_tx_fits(i2400m) 743 || (is_singleton && i2400m->tx_msg->num_pls != 0))) { 744 d_printf(2, dev, "closing TX message (fits %u singleton " 745 "%u num_pls %u)\n", i2400m_tx_fits(i2400m), 746 is_singleton, i2400m->tx_msg->num_pls); 747 i2400m_tx_close(i2400m); 748 i2400m_tx_new(i2400m); 749 } 750 if (i2400m->tx_msg == NULL) 751 goto error_tx_new; 752 /* 753 * Check if this skb will fit in the TX queue's current active 754 * TX message. The total message size must not exceed the maximum 755 * size of each message I2400M_TX_MSG_SIZE. If it exceeds, 756 * close the current message and push this skb into the new message. 757 */ 758 if (i2400m->tx_msg->size + padded_len > I2400M_TX_MSG_SIZE) { 759 d_printf(2, dev, "TX: message too big, going new\n"); 760 i2400m_tx_close(i2400m); 761 i2400m_tx_new(i2400m); 762 } 763 if (i2400m->tx_msg == NULL) 764 goto error_tx_new; 765 /* So we have a current message header; now append space for 766 * the message -- if there is not enough, try the head */ 767 ptr = i2400m_tx_fifo_push(i2400m, padded_len, 768 i2400m->bus_tx_block_size, try_head); 769 if (ptr == TAIL_FULL) { /* Tail is full, try head */ 770 d_printf(2, dev, "pl append: tail full\n"); 771 i2400m_tx_close(i2400m); 772 i2400m_tx_skip_tail(i2400m); 773 try_head = 1; 774 goto try_new; 775 } else if (ptr == NULL) { /* All full */ 776 result = -ENOSPC; 777 d_printf(2, dev, "pl append: all full\n"); 778 } else { /* Got space, copy it, set padding */ 779 struct i2400m_msg_hdr *tx_msg = i2400m->tx_msg; 780 unsigned num_pls = le16_to_cpu(tx_msg->num_pls); 781 memcpy(ptr, buf, buf_len); 782 memset(ptr + buf_len, 0xad, padded_len - buf_len); 783 i2400m_pld_set(&tx_msg->pld[num_pls], buf_len, pl_type); 784 d_printf(3, dev, "pld 0x%08x (type 0x%1x len 0x%04zx\n", 785 le32_to_cpu(tx_msg->pld[num_pls].val), 786 pl_type, buf_len); 787 tx_msg->num_pls = le16_to_cpu(num_pls+1); 788 tx_msg->size += padded_len; 789 d_printf(2, dev, "TX: appended %zu b (up to %u b) pl #%u\n", 790 padded_len, tx_msg->size, num_pls+1); 791 d_printf(2, dev, 792 "TX: appended hdr @%zu %zu b pl #%u @%zu %zu/%zu b\n", 793 (void *)tx_msg - i2400m->tx_buf, (size_t)tx_msg->size, 794 num_pls+1, ptr - i2400m->tx_buf, buf_len, padded_len); 795 result = 0; 796 if (is_singleton) 797 i2400m_tx_close(i2400m); 798 } 799error_tx_new: 800 spin_unlock_irqrestore(&i2400m->tx_lock, flags); 801 /* kick in most cases, except when the TX subsys is down, as 802 * it might free space */ 803 if (likely(result != -ESHUTDOWN)) 804 i2400m->bus_tx_kick(i2400m); 805 d_fnend(3, dev, "(i2400m %p skb %p [%zu bytes] pt %u) = %d\n", 806 i2400m, buf, buf_len, pl_type, result); 807 return result; 808} 809EXPORT_SYMBOL_GPL(i2400m_tx); 810 811 812/** 813 * i2400m_tx_msg_get - Get the first TX message in the FIFO to start sending it 814 * 815 * @i2400m: device descriptors 816 * @bus_size: where to place the size of the TX message 817 * 818 * Called by the bus-specific driver to get the first TX message at 819 * the FIF that is ready for transmission. 820 * 821 * It sets the state in @i2400m to indicate the bus-specific driver is 822 * transfering that message (i2400m->tx_msg_size). 823 * 824 * Once the transfer is completed, call i2400m_tx_msg_sent(). 825 * 826 * Notes: 827 * 828 * The size of the TX message to be transmitted might be smaller than 829 * that of the TX message in the FIFO (in case the header was 830 * shorter). Hence, we copy it in @bus_size, for the bus layer to 831 * use. We keep the message's size in i2400m->tx_msg_size so that 832 * when the bus later is done transferring we know how much to 833 * advance the fifo. 834 * 835 * We collect statistics here as all the data is available and we 836 * assume it is going to work [see i2400m_tx_msg_sent()]. 837 */ 838struct i2400m_msg_hdr *i2400m_tx_msg_get(struct i2400m *i2400m, 839 size_t *bus_size) 840{ 841 struct device *dev = i2400m_dev(i2400m); 842 struct i2400m_msg_hdr *tx_msg, *tx_msg_moved; 843 unsigned long flags, pls; 844 845 d_fnstart(3, dev, "(i2400m %p bus_size %p)\n", i2400m, bus_size); 846 spin_lock_irqsave(&i2400m->tx_lock, flags); 847 tx_msg_moved = NULL; 848 if (i2400m->tx_buf == NULL) 849 goto out_unlock; 850skip: 851 tx_msg_moved = NULL; 852 if (i2400m->tx_in == i2400m->tx_out) { /* Empty FIFO? */ 853 i2400m->tx_in = 0; 854 i2400m->tx_out = 0; 855 d_printf(2, dev, "TX: FIFO empty: resetting\n"); 856 goto out_unlock; 857 } 858 tx_msg = i2400m->tx_buf + i2400m->tx_out % I2400M_TX_BUF_SIZE; 859 if (tx_msg->size & I2400M_TX_SKIP) { /* skip? */ 860 d_printf(2, dev, "TX: skip: msg @%zu (%zu b)\n", 861 i2400m->tx_out % I2400M_TX_BUF_SIZE, 862 (size_t) tx_msg->size & ~I2400M_TX_SKIP); 863 i2400m->tx_out += tx_msg->size & ~I2400M_TX_SKIP; 864 goto skip; 865 } 866 867 if (tx_msg->num_pls == 0) { /* No payloads? */ 868 if (tx_msg == i2400m->tx_msg) { /* open, we are done */ 869 d_printf(2, dev, 870 "TX: FIFO empty: open msg w/o payloads @%zu\n", 871 (void *) tx_msg - i2400m->tx_buf); 872 tx_msg = NULL; 873 goto out_unlock; 874 } else { /* closed, skip it */ 875 d_printf(2, dev, 876 "TX: skip msg w/o payloads @%zu (%zu b)\n", 877 (void *) tx_msg - i2400m->tx_buf, 878 (size_t) tx_msg->size); 879 i2400m->tx_out += tx_msg->size & ~I2400M_TX_SKIP; 880 goto skip; 881 } 882 } 883 if (tx_msg == i2400m->tx_msg) /* open msg? */ 884 i2400m_tx_close(i2400m); 885 886 /* Now we have a valid TX message (with payloads) to TX */ 887 tx_msg_moved = (void *) tx_msg + tx_msg->offset; 888 i2400m->tx_msg_size = tx_msg->size; 889 *bus_size = tx_msg_moved->size; 890 d_printf(2, dev, "TX: pid %d msg hdr at @%zu offset +@%zu " 891 "size %zu bus_size %zu\n", 892 current->pid, (void *) tx_msg - i2400m->tx_buf, 893 (size_t) tx_msg->offset, (size_t) tx_msg->size, 894 (size_t) tx_msg_moved->size); 895 tx_msg_moved->barker = le32_to_cpu(I2400M_H2D_PREVIEW_BARKER); 896 tx_msg_moved->sequence = le32_to_cpu(i2400m->tx_sequence++); 897 898 pls = le32_to_cpu(tx_msg_moved->num_pls); 899 i2400m->tx_pl_num += pls; /* Update stats */ 900 if (pls > i2400m->tx_pl_max) 901 i2400m->tx_pl_max = pls; 902 if (pls < i2400m->tx_pl_min) 903 i2400m->tx_pl_min = pls; 904 i2400m->tx_num++; 905 i2400m->tx_size_acc += *bus_size; 906 if (*bus_size < i2400m->tx_size_min) 907 i2400m->tx_size_min = *bus_size; 908 if (*bus_size > i2400m->tx_size_max) 909 i2400m->tx_size_max = *bus_size; 910out_unlock: 911 spin_unlock_irqrestore(&i2400m->tx_lock, flags); 912 d_fnstart(3, dev, "(i2400m %p bus_size %p [%zu]) = %p\n", 913 i2400m, bus_size, *bus_size, tx_msg_moved); 914 return tx_msg_moved; 915} 916EXPORT_SYMBOL_GPL(i2400m_tx_msg_get); 917 918 919/** 920 * i2400m_tx_msg_sent - indicate the transmission of a TX message 921 * 922 * @i2400m: device descriptor 923 * 924 * Called by the bus-specific driver when a message has been sent; 925 * this pops it from the FIFO; and as there is space, start the queue 926 * in case it was stopped. 927 * 928 * Should be called even if the message send failed and we are 929 * dropping this TX message. 930 */ 931void i2400m_tx_msg_sent(struct i2400m *i2400m) 932{ 933 unsigned n; 934 unsigned long flags; 935 struct device *dev = i2400m_dev(i2400m); 936 937 d_fnstart(3, dev, "(i2400m %p)\n", i2400m); 938 spin_lock_irqsave(&i2400m->tx_lock, flags); 939 if (i2400m->tx_buf == NULL) 940 goto out_unlock; 941 i2400m->tx_out += i2400m->tx_msg_size; 942 d_printf(2, dev, "TX: sent %zu b\n", (size_t) i2400m->tx_msg_size); 943 i2400m->tx_msg_size = 0; 944 BUG_ON(i2400m->tx_out > i2400m->tx_in); 945 /* level them FIFO markers off */ 946 n = i2400m->tx_out / I2400M_TX_BUF_SIZE; 947 i2400m->tx_out %= I2400M_TX_BUF_SIZE; 948 i2400m->tx_in -= n * I2400M_TX_BUF_SIZE; 949out_unlock: 950 spin_unlock_irqrestore(&i2400m->tx_lock, flags); 951 d_fnend(3, dev, "(i2400m %p) = void\n", i2400m); 952} 953EXPORT_SYMBOL_GPL(i2400m_tx_msg_sent); 954 955 956/** 957 * i2400m_tx_setup - Initialize the TX queue and infrastructure 958 * 959 * Make sure we reset the TX sequence to zero, as when this function 960 * is called, the firmware has been just restarted. Same rational 961 * for tx_in, tx_out, tx_msg_size and tx_msg. We reset them since 962 * the memory for TX queue is reallocated. 963 */ 964int i2400m_tx_setup(struct i2400m *i2400m) 965{ 966 int result = 0; 967 void *tx_buf; 968 unsigned long flags; 969 970 /* Do this here only once -- can't do on 971 * i2400m_hard_start_xmit() as we'll cause race conditions if 972 * the WS was scheduled on another CPU */ 973 INIT_WORK(&i2400m->wake_tx_ws, i2400m_wake_tx_work); 974 975 tx_buf = kmalloc(I2400M_TX_BUF_SIZE, GFP_ATOMIC); 976 if (tx_buf == NULL) { 977 result = -ENOMEM; 978 goto error_kmalloc; 979 } 980 981 /* 982 * Fail the build if we can't fit at least two maximum size messages 983 * on the TX FIFO [one being delivered while one is constructed]. 984 */ 985 BUILD_BUG_ON(2 * I2400M_TX_MSG_SIZE > I2400M_TX_BUF_SIZE); 986 spin_lock_irqsave(&i2400m->tx_lock, flags); 987 i2400m->tx_sequence = 0; 988 i2400m->tx_in = 0; 989 i2400m->tx_out = 0; 990 i2400m->tx_msg_size = 0; 991 i2400m->tx_msg = NULL; 992 i2400m->tx_buf = tx_buf; 993 spin_unlock_irqrestore(&i2400m->tx_lock, flags); 994 /* Huh? the bus layer has to define this... */ 995 BUG_ON(i2400m->bus_tx_block_size == 0); 996error_kmalloc: 997 return result; 998 999} 1000 1001 1002/** 1003 * i2400m_tx_release - Tear down the TX queue and infrastructure 1004 */ 1005void i2400m_tx_release(struct i2400m *i2400m) 1006{ 1007 unsigned long flags; 1008 spin_lock_irqsave(&i2400m->tx_lock, flags); 1009 kfree(i2400m->tx_buf); 1010 i2400m->tx_buf = NULL; 1011 spin_unlock_irqrestore(&i2400m->tx_lock, flags); 1012} 1013