1/* 2 * Intel Wireless WiMAX Connection 2400m 3 * Generic (non-bus specific) TX handling 4 * 5 * 6 * Copyright (C) 2007-2008 Intel Corporation. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * * Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * * Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * * Neither the name of Intel Corporation nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 * 35 * Intel Corporation <linux-wimax@intel.com> 36 * Yanir Lubetkin <yanirx.lubetkin@intel.com> 37 * - Initial implementation 38 * 39 * Intel Corporation <linux-wimax@intel.com> 40 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> 41 * - Rewritten to use a single FIFO to lower the memory allocation 42 * pressure and optimize cache hits when copying to the queue, as 43 * well as splitting out bus-specific code. 44 * 45 * 46 * Implements data transmission to the device; this is done through a 47 * software FIFO, as data/control frames can be coalesced (while the 48 * device is reading the previous tx transaction, others accumulate). 49 * 50 * A FIFO is used because at the end it is resource-cheaper that trying 51 * to implement scatter/gather over USB. As well, most traffic is going 52 * to be download (vs upload). 53 * 54 * The format for sending/receiving data to/from the i2400m is 55 * described in detail in rx.c:PROTOCOL FORMAT. In here we implement 56 * the transmission of that. This is split between a bus-independent 57 * part that just prepares everything and a bus-specific part that 58 * does the actual transmission over the bus to the device (in the 59 * bus-specific driver). 60 * 61 * 62 * The general format of a device-host transaction is MSG-HDR, PLD1, 63 * PLD2...PLDN, PL1, PL2,...PLN, PADDING. 64 * 65 * Because we need the send payload descriptors and then payloads and 66 * because it is kind of expensive to do scatterlists in USB (one URB 67 * per node), it becomes cheaper to append all the data to a FIFO 68 * (copying to a FIFO potentially in cache is cheaper). 69 * 70 * Then the bus-specific code takes the parts of that FIFO that are 71 * written and passes them to the device. 72 * 73 * So the concepts to keep in mind there are: 74 * 75 * We use a FIFO to queue the data in a linear buffer. We first append 76 * a MSG-HDR, space for I2400M_TX_PLD_MAX payload descriptors and then 77 * go appending payloads until we run out of space or of payload 78 * descriptors. Then we append padding to make the whole transaction a 79 * multiple of i2400m->bus_tx_block_size (as defined by the bus layer). 80 * 81 * - A TX message: a combination of a message header, payload 82 * descriptors and payloads. 83 * 84 * Open: it is marked as active (i2400m->tx_msg is valid) and we 85 * can keep adding payloads to it. 86 * 87 * Closed: we are not appending more payloads to this TX message 88 * (exahusted space in the queue, too many payloads or 89 * whichever). We have appended padding so the whole message 90 * length is aligned to i2400m->bus_tx_block_size (as set by the 91 * bus/transport layer). 92 * 93 * - Most of the time we keep a TX message open to which we append 94 * payloads. 95 * 96 * - If we are going to append and there is no more space (we are at 97 * the end of the FIFO), we close the message, mark the rest of the 98 * FIFO space unusable (skip_tail), create a new message at the 99 * beginning of the FIFO (if there is space) and append the message 100 * there. 101 * 102 * This is because we need to give linear TX messages to the bus 103 * engine. So we don't write a message to the remaining FIFO space 104 * until the tail and continue at the head of it. 105 * 106 * - We overload one of the fields in the message header to use it as 107 * 'size' of the TX message, so we can iterate over them. It also 108 * contains a flag that indicates if we have to skip it or not. 109 * When we send the buffer, we update that to its real on-the-wire 110 * value. 111 * 112 * - The MSG-HDR PLD1...PLD2 stuff has to be a size multiple of 16. 113 * 114 * It follows that if MSG-HDR says we have N messages, the whole 115 * header + descriptors is 16 + 4*N; for those to be a multiple of 116 * 16, it follows that N can be 4, 8, 12, ... (32, 48, 64, 80... 117 * bytes). 118 * 119 * So if we have only 1 payload, we have to submit a header that in 120 * all truth has space for 4. 121 * 122 * The implication is that we reserve space for 12 (64 bytes); but 123 * if we fill up only (eg) 2, our header becomes 32 bytes only. So 124 * the TX engine has to shift those 32 bytes of msg header and 2 125 * payloads and padding so that right after it the payloads start 126 * and the TX engine has to know about that. 127 * 128 * It is cheaper to move the header up than the whole payloads down. 129 * 130 * We do this in i2400m_tx_close(). See 'i2400m_msg_hdr->offset'. 131 * 132 * - Each payload has to be size-padded to 16 bytes; before appending 133 * it, we just do it. 134 * 135 * - The whole message has to be padded to i2400m->bus_tx_block_size; 136 * we do this at close time. Thus, when reserving space for the 137 * payload, we always make sure there is also free space for this 138 * padding that sooner or later will happen. 139 * 140 * When we append a message, we tell the bus specific code to kick in 141 * TXs. It will TX (in parallel) until the buffer is exhausted--hence 142 * the lockin we do. The TX code will only send a TX message at the 143 * time (which remember, might contain more than one payload). Of 144 * course, when the bus-specific driver attempts to TX a message that 145 * is still open, it gets closed first. 146 * 147 * Gee, this is messy; well a picture. In the example below we have a 148 * partially full FIFO, with a closed message ready to be delivered 149 * (with a moved message header to make sure it is size-aligned to 150 * 16), TAIL room that was unusable (and thus is marked with a message 151 * header that says 'skip this') and at the head of the buffer, an 152 * incomplete message with a couple of payloads. 153 * 154 * N ___________________________________________________ 155 * | | 156 * | TAIL room | 157 * | | 158 * | msg_hdr to skip (size |= 0x80000) | 159 * |---------------------------------------------------|------- 160 * | | /|\ 161 * | | | 162 * | TX message padding | | 163 * | | | 164 * | | | 165 * |- - - - - - - - - - - - - - - - - - - - - - - - - -| | 166 * | | | 167 * | payload 1 | | 168 * | | N * tx_block_size 169 * | | | 170 * |- - - - - - - - - - - - - - - - - - - - - - - - - -| | 171 * | | | 172 * | payload 1 | | 173 * | | | 174 * | | | 175 * |- - - - - - - - - - - - - - - - - - - - - - - - - -|- -|- - - - 176 * | padding 3 /|\ | | /|\ 177 * | padding 2 | | | | 178 * | pld 1 32 bytes (2 * 16) | | | 179 * | pld 0 | | | | 180 * | moved msg_hdr \|/ | \|/ | 181 * |- - - - - - - - - - - - - - - - - - - - - - - - - -|- - - | 182 * | | _PLD_SIZE 183 * | unused | | 184 * | | | 185 * |- - - - - - - - - - - - - - - - - - - - - - - - - -| | 186 * | msg_hdr (size X) [this message is closed] | \|/ 187 * |===================================================|========== <=== OUT 188 * | | 189 * | | 190 * | | 191 * | Free rooom | 192 * | | 193 * | | 194 * | | 195 * | | 196 * | | 197 * | | 198 * | | 199 * | | 200 * | | 201 * |===================================================|========== <=== IN 202 * | | 203 * | | 204 * | | 205 * | | 206 * | payload 1 | 207 * | | 208 * | | 209 * |- - - - - - - - - - - - - - - - - - - - - - - - - -| 210 * | | 211 * | payload 0 | 212 * | | 213 * | | 214 * |- - - - - - - - - - - - - - - - - - - - - - - - - -| 215 * | pld 11 /|\ | 216 * | ... | | 217 * | pld 1 64 bytes (2 * 16) | 218 * | pld 0 | | 219 * | msg_hdr (size X) \|/ [message is open] | 220 * 0 --------------------------------------------------- 221 * 222 * 223 * ROADMAP 224 * 225 * i2400m_tx_setup() Called by i2400m_setup 226 * i2400m_tx_release() Called by i2400m_release() 227 * 228 * i2400m_tx() Called to send data or control frames 229 * i2400m_tx_fifo_push() Allocates append-space in the FIFO 230 * i2400m_tx_new() Opens a new message in the FIFO 231 * i2400m_tx_fits() Checks if a new payload fits in the message 232 * i2400m_tx_close() Closes an open message in the FIFO 233 * i2400m_tx_skip_tail() Marks unusable FIFO tail space 234 * i2400m->bus_tx_kick() 235 * 236 * Now i2400m->bus_tx_kick() is the the bus-specific driver backend 237 * implementation; that would do: 238 * 239 * i2400m->bus_tx_kick() 240 * i2400m_tx_msg_get() Gets first message ready to go 241 * ...sends it... 242 * i2400m_tx_msg_sent() Ack the message is sent; repeat from 243 * _tx_msg_get() until it returns NULL 244 * (FIFO empty). 245 */ 246#include <linux/netdevice.h> 247#include <linux/slab.h> 248#include <linux/export.h> 249#include "i2400m.h" 250 251 252#define D_SUBMODULE tx 253#include "debug-levels.h" 254 255enum { 256 /** 257 * TX Buffer size 258 * 259 * Doc says maximum transaction is 16KiB. If we had 16KiB en 260 * route and 16KiB being queued, it boils down to needing 261 * 32KiB. 262 * 32KiB is insufficient for 1400 MTU, hence increasing 263 * tx buffer size to 64KiB. 264 */ 265 I2400M_TX_BUF_SIZE = 65536, 266 /** 267 * Message header and payload descriptors have to be 16 268 * aligned (16 + 4 * N = 16 * M). If we take that average sent 269 * packets are MTU size (~1400-~1500) it follows that we could 270 * fit at most 10-11 payloads in one transaction. To meet the 271 * alignment requirement, that means we need to leave space 272 * for 12 (64 bytes). To simplify, we leave space for that. If 273 * at the end there are less, we pad up to the nearest 274 * multiple of 16. 275 */ 276 /* 277 * According to Intel Wimax i3200, i5x50 and i6x50 specification 278 * documents, the maximum number of payloads per message can be 279 * up to 60. Increasing the number of payloads to 60 per message 280 * helps to accommodate smaller payloads in a single transaction. 281 */ 282 I2400M_TX_PLD_MAX = 60, 283 I2400M_TX_PLD_SIZE = sizeof(struct i2400m_msg_hdr) 284 + I2400M_TX_PLD_MAX * sizeof(struct i2400m_pld), 285 I2400M_TX_SKIP = 0x80000000, 286 /* 287 * According to Intel Wimax i3200, i5x50 and i6x50 specification 288 * documents, the maximum size of each message can be up to 16KiB. 289 */ 290 I2400M_TX_MSG_SIZE = 16384, 291}; 292 293#define TAIL_FULL ((void *)~(unsigned long)NULL) 294 295/* 296 * Calculate how much tail room is available 297 * 298 * Note the trick here. This path is ONLY caleed for Case A (see 299 * i2400m_tx_fifo_push() below), where we have: 300 * 301 * Case A 302 * N ___________ 303 * | tail room | 304 * | | 305 * |<- IN ->| 306 * | | 307 * | data | 308 * | | 309 * |<- OUT ->| 310 * | | 311 * | head room | 312 * 0 ----------- 313 * 314 * When calculating the tail_room, tx_in might get to be zero if 315 * i2400m->tx_in is right at the end of the buffer (really full 316 * buffer) if there is no head room. In this case, tail_room would be 317 * I2400M_TX_BUF_SIZE, although it is actually zero. Hence the final 318 * mod (%) operation. However, when doing this kind of optimization, 319 * i2400m->tx_in being zero would fail, so we treat is an a special 320 * case. 321 */ 322static inline 323size_t __i2400m_tx_tail_room(struct i2400m *i2400m) 324{ 325 size_t tail_room; 326 size_t tx_in; 327 328 if (unlikely(i2400m->tx_in == 0)) 329 return I2400M_TX_BUF_SIZE; 330 tx_in = i2400m->tx_in % I2400M_TX_BUF_SIZE; 331 tail_room = I2400M_TX_BUF_SIZE - tx_in; 332 tail_room %= I2400M_TX_BUF_SIZE; 333 return tail_room; 334} 335 336 337/* 338 * Allocate @size bytes in the TX fifo, return a pointer to it 339 * 340 * @i2400m: device descriptor 341 * @size: size of the buffer we need to allocate 342 * @padding: ensure that there is at least this many bytes of free 343 * contiguous space in the fifo. This is needed because later on 344 * we might need to add padding. 345 * @try_head: specify either to allocate head room or tail room space 346 * in the TX FIFO. This boolean is required to avoids a system hang 347 * due to an infinite loop caused by i2400m_tx_fifo_push(). 348 * The caller must always try to allocate tail room space first by 349 * calling this routine with try_head = 0. In case if there 350 * is not enough tail room space but there is enough head room space, 351 * (i2400m_tx_fifo_push() returns TAIL_FULL) try to allocate head 352 * room space, by calling this routine again with try_head = 1. 353 * 354 * Returns: 355 * 356 * Pointer to the allocated space. NULL if there is no 357 * space. TAIL_FULL if there is no space at the tail but there is at 358 * the head (Case B below). 359 * 360 * These are the two basic cases we need to keep an eye for -- it is 361 * much better explained in linux/kernel/kfifo.c, but this code 362 * basically does the same. No rocket science here. 363 * 364 * Case A Case B 365 * N ___________ ___________ 366 * | tail room | | data | 367 * | | | | 368 * |<- IN ->| |<- OUT ->| 369 * | | | | 370 * | data | | room | 371 * | | | | 372 * |<- OUT ->| |<- IN ->| 373 * | | | | 374 * | head room | | data | 375 * 0 ----------- ----------- 376 * 377 * We allocate only *contiguous* space. 378 * 379 * We can allocate only from 'room'. In Case B, it is simple; in case 380 * A, we only try from the tail room; if it is not enough, we just 381 * fail and return TAIL_FULL and let the caller figure out if we wants to 382 * skip the tail room and try to allocate from the head. 383 * 384 * There is a corner case, wherein i2400m_tx_new() can get into 385 * an infinite loop calling i2400m_tx_fifo_push(). 386 * In certain situations, tx_in would have reached on the top of TX FIFO 387 * and i2400m_tx_tail_room() returns 0, as described below: 388 * 389 * N ___________ tail room is zero 390 * |<- IN ->| 391 * | | 392 * | | 393 * | | 394 * | data | 395 * |<- OUT ->| 396 * | | 397 * | | 398 * | head room | 399 * 0 ----------- 400 * During such a time, where tail room is zero in the TX FIFO and if there 401 * is a request to add a payload to TX FIFO, which calls: 402 * i2400m_tx() 403 * ->calls i2400m_tx_close() 404 * ->calls i2400m_tx_skip_tail() 405 * goto try_new; 406 * ->calls i2400m_tx_new() 407 * |----> [try_head:] 408 * infinite loop | ->calls i2400m_tx_fifo_push() 409 * | if (tail_room < needed) 410 * | if (head_room => needed) 411 * | return TAIL_FULL; 412 * |<---- goto try_head; 413 * 414 * i2400m_tx() calls i2400m_tx_close() to close the message, since there 415 * is no tail room to accommodate the payload and calls 416 * i2400m_tx_skip_tail() to skip the tail space. Now i2400m_tx() calls 417 * i2400m_tx_new() to allocate space for new message header calling 418 * i2400m_tx_fifo_push() that returns TAIL_FULL, since there is no tail space 419 * to accommodate the message header, but there is enough head space. 420 * The i2400m_tx_new() keeps re-retrying by calling i2400m_tx_fifo_push() 421 * ending up in a loop causing system freeze. 422 * 423 * This corner case is avoided by using a try_head boolean, 424 * as an argument to i2400m_tx_fifo_push(). 425 * 426 * Note: 427 * 428 * Assumes i2400m->tx_lock is taken, and we use that as a barrier 429 * 430 * The indexes keep increasing and we reset them to zero when we 431 * pop data off the queue 432 */ 433static 434void *i2400m_tx_fifo_push(struct i2400m *i2400m, size_t size, 435 size_t padding, bool try_head) 436{ 437 struct device *dev = i2400m_dev(i2400m); 438 size_t room, tail_room, needed_size; 439 void *ptr; 440 441 needed_size = size + padding; 442 room = I2400M_TX_BUF_SIZE - (i2400m->tx_in - i2400m->tx_out); 443 if (room < needed_size) { /* this takes care of Case B */ 444 d_printf(2, dev, "fifo push %zu/%zu: no space\n", 445 size, padding); 446 return NULL; 447 } 448 /* Is there space at the tail? */ 449 tail_room = __i2400m_tx_tail_room(i2400m); 450 if (!try_head && tail_room < needed_size) { 451 /* 452 * If the tail room space is not enough to push the message 453 * in the TX FIFO, then there are two possibilities: 454 * 1. There is enough head room space to accommodate 455 * this message in the TX FIFO. 456 * 2. There is not enough space in the head room and 457 * in tail room of the TX FIFO to accommodate the message. 458 * In the case (1), return TAIL_FULL so that the caller 459 * can figure out, if the caller wants to push the message 460 * into the head room space. 461 * In the case (2), return NULL, indicating that the TX FIFO 462 * cannot accommodate the message. 463 */ 464 if (room - tail_room >= needed_size) { 465 d_printf(2, dev, "fifo push %zu/%zu: tail full\n", 466 size, padding); 467 return TAIL_FULL; /* There might be head space */ 468 } else { 469 d_printf(2, dev, "fifo push %zu/%zu: no head space\n", 470 size, padding); 471 return NULL; /* There is no space */ 472 } 473 } 474 ptr = i2400m->tx_buf + i2400m->tx_in % I2400M_TX_BUF_SIZE; 475 d_printf(2, dev, "fifo push %zu/%zu: at @%zu\n", size, padding, 476 i2400m->tx_in % I2400M_TX_BUF_SIZE); 477 i2400m->tx_in += size; 478 return ptr; 479} 480 481 482/* 483 * Mark the tail of the FIFO buffer as 'to-skip' 484 * 485 * We should never hit the BUG_ON() because all the sizes we push to 486 * the FIFO are padded to be a multiple of 16 -- the size of *msg 487 * (I2400M_PL_PAD for the payloads, I2400M_TX_PLD_SIZE for the 488 * header). 489 * 490 * Tail room can get to be zero if a message was opened when there was 491 * space only for a header. _tx_close() will mark it as to-skip (as it 492 * will have no payloads) and there will be no more space to flush, so 493 * nothing has to be done here. This is probably cheaper than ensuring 494 * in _tx_new() that there is some space for payloads...as we could 495 * always possibly hit the same problem if the payload wouldn't fit. 496 * 497 * Note: 498 * 499 * Assumes i2400m->tx_lock is taken, and we use that as a barrier 500 * 501 * This path is only taken for Case A FIFO situations [see 502 * i2400m_tx_fifo_push()] 503 */ 504static 505void i2400m_tx_skip_tail(struct i2400m *i2400m) 506{ 507 struct device *dev = i2400m_dev(i2400m); 508 size_t tx_in = i2400m->tx_in % I2400M_TX_BUF_SIZE; 509 size_t tail_room = __i2400m_tx_tail_room(i2400m); 510 struct i2400m_msg_hdr *msg = i2400m->tx_buf + tx_in; 511 if (unlikely(tail_room == 0)) 512 return; 513 BUG_ON(tail_room < sizeof(*msg)); 514 msg->size = tail_room | I2400M_TX_SKIP; 515 d_printf(2, dev, "skip tail: skipping %zu bytes @%zu\n", 516 tail_room, tx_in); 517 i2400m->tx_in += tail_room; 518} 519 520 521/* 522 * Check if a skb will fit in the TX queue's current active TX 523 * message (if there are still descriptors left unused). 524 * 525 * Returns: 526 * 0 if the message won't fit, 1 if it will. 527 * 528 * Note: 529 * 530 * Assumes a TX message is active (i2400m->tx_msg). 531 * 532 * Assumes i2400m->tx_lock is taken, and we use that as a barrier 533 */ 534static 535unsigned i2400m_tx_fits(struct i2400m *i2400m) 536{ 537 struct i2400m_msg_hdr *msg_hdr = i2400m->tx_msg; 538 return le16_to_cpu(msg_hdr->num_pls) < I2400M_TX_PLD_MAX; 539 540} 541 542 543/* 544 * Start a new TX message header in the queue. 545 * 546 * Reserve memory from the base FIFO engine and then just initialize 547 * the message header. 548 * 549 * We allocate the biggest TX message header we might need (one that'd 550 * fit I2400M_TX_PLD_MAX payloads) -- when it is closed it will be 551 * 'ironed it out' and the unneeded parts removed. 552 * 553 * NOTE: 554 * 555 * Assumes that the previous message is CLOSED (eg: either 556 * there was none or 'i2400m_tx_close()' was called on it). 557 * 558 * Assumes i2400m->tx_lock is taken, and we use that as a barrier 559 */ 560static 561void i2400m_tx_new(struct i2400m *i2400m) 562{ 563 struct device *dev = i2400m_dev(i2400m); 564 struct i2400m_msg_hdr *tx_msg; 565 bool try_head = false; 566 BUG_ON(i2400m->tx_msg != NULL); 567 /* 568 * In certain situations, TX queue might have enough space to 569 * accommodate the new message header I2400M_TX_PLD_SIZE, but 570 * might not have enough space to accommodate the payloads. 571 * Adding bus_tx_room_min padding while allocating a new TX message 572 * increases the possibilities of including at least one payload of the 573 * size <= bus_tx_room_min. 574 */ 575try_head: 576 tx_msg = i2400m_tx_fifo_push(i2400m, I2400M_TX_PLD_SIZE, 577 i2400m->bus_tx_room_min, try_head); 578 if (tx_msg == NULL) 579 goto out; 580 else if (tx_msg == TAIL_FULL) { 581 i2400m_tx_skip_tail(i2400m); 582 d_printf(2, dev, "new TX message: tail full, trying head\n"); 583 try_head = true; 584 goto try_head; 585 } 586 memset(tx_msg, 0, I2400M_TX_PLD_SIZE); 587 tx_msg->size = I2400M_TX_PLD_SIZE; 588out: 589 i2400m->tx_msg = tx_msg; 590 d_printf(2, dev, "new TX message: %p @%zu\n", 591 tx_msg, (void *) tx_msg - i2400m->tx_buf); 592} 593 594 595/* 596 * Finalize the current TX message header 597 * 598 * Sets the message header to be at the proper location depending on 599 * how many descriptors we have (check documentation at the file's 600 * header for more info on that). 601 * 602 * Appends padding bytes to make sure the whole TX message (counting 603 * from the 'relocated' message header) is aligned to 604 * tx_block_size. We assume the _append() code has left enough space 605 * in the FIFO for that. If there are no payloads, just pass, as it 606 * won't be transferred. 607 * 608 * The amount of padding bytes depends on how many payloads are in the 609 * TX message, as the "msg header and payload descriptors" will be 610 * shifted up in the buffer. 611 */ 612static 613void i2400m_tx_close(struct i2400m *i2400m) 614{ 615 struct device *dev = i2400m_dev(i2400m); 616 struct i2400m_msg_hdr *tx_msg = i2400m->tx_msg; 617 struct i2400m_msg_hdr *tx_msg_moved; 618 size_t aligned_size, padding, hdr_size; 619 void *pad_buf; 620 unsigned num_pls; 621 622 if (tx_msg->size & I2400M_TX_SKIP) /* a skipper? nothing to do */ 623 goto out; 624 num_pls = le16_to_cpu(tx_msg->num_pls); 625 /* We can get this situation when a new message was started 626 * and there was no space to add payloads before hitting the 627 tail (and taking padding into consideration). */ 628 if (num_pls == 0) { 629 tx_msg->size |= I2400M_TX_SKIP; 630 goto out; 631 } 632 /* Relocate the message header 633 * 634 * Find the current header size, align it to 16 and if we need 635 * to move it so the tail is next to the payloads, move it and 636 * set the offset. 637 * 638 * If it moved, this header is good only for transmission; the 639 * original one (it is kept if we moved) is still used to 640 * figure out where the next TX message starts (and where the 641 * offset to the moved header is). 642 */ 643 hdr_size = sizeof(*tx_msg) 644 + le16_to_cpu(tx_msg->num_pls) * sizeof(tx_msg->pld[0]); 645 hdr_size = ALIGN(hdr_size, I2400M_PL_ALIGN); 646 tx_msg->offset = I2400M_TX_PLD_SIZE - hdr_size; 647 tx_msg_moved = (void *) tx_msg + tx_msg->offset; 648 memmove(tx_msg_moved, tx_msg, hdr_size); 649 tx_msg_moved->size -= tx_msg->offset; 650 /* 651 * Now figure out how much we have to add to the (moved!) 652 * message so the size is a multiple of i2400m->bus_tx_block_size. 653 */ 654 aligned_size = ALIGN(tx_msg_moved->size, i2400m->bus_tx_block_size); 655 padding = aligned_size - tx_msg_moved->size; 656 if (padding > 0) { 657 pad_buf = i2400m_tx_fifo_push(i2400m, padding, 0, 0); 658 if (unlikely(WARN_ON(pad_buf == NULL 659 || pad_buf == TAIL_FULL))) { 660 /* This should not happen -- append should verify 661 * there is always space left at least to append 662 * tx_block_size */ 663 dev_err(dev, 664 "SW BUG! Possible data leakage from memory the " 665 "device should not read for padding - " 666 "size %lu aligned_size %zu tx_buf %p in " 667 "%zu out %zu\n", 668 (unsigned long) tx_msg_moved->size, 669 aligned_size, i2400m->tx_buf, i2400m->tx_in, 670 i2400m->tx_out); 671 } else 672 memset(pad_buf, 0xad, padding); 673 } 674 tx_msg_moved->padding = cpu_to_le16(padding); 675 tx_msg_moved->size += padding; 676 if (tx_msg != tx_msg_moved) 677 tx_msg->size += padding; 678out: 679 i2400m->tx_msg = NULL; 680} 681 682 683/** 684 * i2400m_tx - send the data in a buffer to the device 685 * 686 * @buf: pointer to the buffer to transmit 687 * 688 * @buf_len: buffer size 689 * 690 * @pl_type: type of the payload we are sending. 691 * 692 * Returns: 693 * 0 if ok, < 0 errno code on error (-ENOSPC, if there is no more 694 * room for the message in the queue). 695 * 696 * Appends the buffer to the TX FIFO and notifies the bus-specific 697 * part of the driver that there is new data ready to transmit. 698 * Once this function returns, the buffer has been copied, so it can 699 * be reused. 700 * 701 * The steps followed to append are explained in detail in the file 702 * header. 703 * 704 * Whenever we write to a message, we increase msg->size, so it 705 * reflects exactly how big the message is. This is needed so that if 706 * we concatenate two messages before they can be sent, the code that 707 * sends the messages can find the boundaries (and it will replace the 708 * size with the real barker before sending). 709 * 710 * Note: 711 * 712 * Cold and warm reset payloads need to be sent as a single 713 * payload, so we handle that. 714 */ 715int i2400m_tx(struct i2400m *i2400m, const void *buf, size_t buf_len, 716 enum i2400m_pt pl_type) 717{ 718 int result = -ENOSPC; 719 struct device *dev = i2400m_dev(i2400m); 720 unsigned long flags; 721 size_t padded_len; 722 void *ptr; 723 bool try_head = false; 724 unsigned is_singleton = pl_type == I2400M_PT_RESET_WARM 725 || pl_type == I2400M_PT_RESET_COLD; 726 727 d_fnstart(3, dev, "(i2400m %p skb %p [%zu bytes] pt %u)\n", 728 i2400m, buf, buf_len, pl_type); 729 padded_len = ALIGN(buf_len, I2400M_PL_ALIGN); 730 d_printf(5, dev, "padded_len %zd buf_len %zd\n", padded_len, buf_len); 731 /* If there is no current TX message, create one; if the 732 * current one is out of payload slots or we have a singleton, 733 * close it and start a new one */ 734 spin_lock_irqsave(&i2400m->tx_lock, flags); 735 /* If tx_buf is NULL, device is shutdown */ 736 if (i2400m->tx_buf == NULL) { 737 result = -ESHUTDOWN; 738 goto error_tx_new; 739 } 740try_new: 741 if (unlikely(i2400m->tx_msg == NULL)) 742 i2400m_tx_new(i2400m); 743 else if (unlikely(!i2400m_tx_fits(i2400m) 744 || (is_singleton && i2400m->tx_msg->num_pls != 0))) { 745 d_printf(2, dev, "closing TX message (fits %u singleton " 746 "%u num_pls %u)\n", i2400m_tx_fits(i2400m), 747 is_singleton, i2400m->tx_msg->num_pls); 748 i2400m_tx_close(i2400m); 749 i2400m_tx_new(i2400m); 750 } 751 if (i2400m->tx_msg == NULL) 752 goto error_tx_new; 753 /* 754 * Check if this skb will fit in the TX queue's current active 755 * TX message. The total message size must not exceed the maximum 756 * size of each message I2400M_TX_MSG_SIZE. If it exceeds, 757 * close the current message and push this skb into the new message. 758 */ 759 if (i2400m->tx_msg->size + padded_len > I2400M_TX_MSG_SIZE) { 760 d_printf(2, dev, "TX: message too big, going new\n"); 761 i2400m_tx_close(i2400m); 762 i2400m_tx_new(i2400m); 763 } 764 if (i2400m->tx_msg == NULL) 765 goto error_tx_new; 766 /* So we have a current message header; now append space for 767 * the message -- if there is not enough, try the head */ 768 ptr = i2400m_tx_fifo_push(i2400m, padded_len, 769 i2400m->bus_tx_block_size, try_head); 770 if (ptr == TAIL_FULL) { /* Tail is full, try head */ 771 d_printf(2, dev, "pl append: tail full\n"); 772 i2400m_tx_close(i2400m); 773 i2400m_tx_skip_tail(i2400m); 774 try_head = true; 775 goto try_new; 776 } else if (ptr == NULL) { /* All full */ 777 result = -ENOSPC; 778 d_printf(2, dev, "pl append: all full\n"); 779 } else { /* Got space, copy it, set padding */ 780 struct i2400m_msg_hdr *tx_msg = i2400m->tx_msg; 781 unsigned num_pls = le16_to_cpu(tx_msg->num_pls); 782 memcpy(ptr, buf, buf_len); 783 memset(ptr + buf_len, 0xad, padded_len - buf_len); 784 i2400m_pld_set(&tx_msg->pld[num_pls], buf_len, pl_type); 785 d_printf(3, dev, "pld 0x%08x (type 0x%1x len 0x%04zx\n", 786 le32_to_cpu(tx_msg->pld[num_pls].val), 787 pl_type, buf_len); 788 tx_msg->num_pls = le16_to_cpu(num_pls+1); 789 tx_msg->size += padded_len; 790 d_printf(2, dev, "TX: appended %zu b (up to %u b) pl #%u\n", 791 padded_len, tx_msg->size, num_pls+1); 792 d_printf(2, dev, 793 "TX: appended hdr @%zu %zu b pl #%u @%zu %zu/%zu b\n", 794 (void *)tx_msg - i2400m->tx_buf, (size_t)tx_msg->size, 795 num_pls+1, ptr - i2400m->tx_buf, buf_len, padded_len); 796 result = 0; 797 if (is_singleton) 798 i2400m_tx_close(i2400m); 799 } 800error_tx_new: 801 spin_unlock_irqrestore(&i2400m->tx_lock, flags); 802 /* kick in most cases, except when the TX subsys is down, as 803 * it might free space */ 804 if (likely(result != -ESHUTDOWN)) 805 i2400m->bus_tx_kick(i2400m); 806 d_fnend(3, dev, "(i2400m %p skb %p [%zu bytes] pt %u) = %d\n", 807 i2400m, buf, buf_len, pl_type, result); 808 return result; 809} 810EXPORT_SYMBOL_GPL(i2400m_tx); 811 812 813/** 814 * i2400m_tx_msg_get - Get the first TX message in the FIFO to start sending it 815 * 816 * @i2400m: device descriptors 817 * @bus_size: where to place the size of the TX message 818 * 819 * Called by the bus-specific driver to get the first TX message at 820 * the FIF that is ready for transmission. 821 * 822 * It sets the state in @i2400m to indicate the bus-specific driver is 823 * transferring that message (i2400m->tx_msg_size). 824 * 825 * Once the transfer is completed, call i2400m_tx_msg_sent(). 826 * 827 * Notes: 828 * 829 * The size of the TX message to be transmitted might be smaller than 830 * that of the TX message in the FIFO (in case the header was 831 * shorter). Hence, we copy it in @bus_size, for the bus layer to 832 * use. We keep the message's size in i2400m->tx_msg_size so that 833 * when the bus later is done transferring we know how much to 834 * advance the fifo. 835 * 836 * We collect statistics here as all the data is available and we 837 * assume it is going to work [see i2400m_tx_msg_sent()]. 838 */ 839struct i2400m_msg_hdr *i2400m_tx_msg_get(struct i2400m *i2400m, 840 size_t *bus_size) 841{ 842 struct device *dev = i2400m_dev(i2400m); 843 struct i2400m_msg_hdr *tx_msg, *tx_msg_moved; 844 unsigned long flags, pls; 845 846 d_fnstart(3, dev, "(i2400m %p bus_size %p)\n", i2400m, bus_size); 847 spin_lock_irqsave(&i2400m->tx_lock, flags); 848 tx_msg_moved = NULL; 849 if (i2400m->tx_buf == NULL) 850 goto out_unlock; 851skip: 852 tx_msg_moved = NULL; 853 if (i2400m->tx_in == i2400m->tx_out) { /* Empty FIFO? */ 854 i2400m->tx_in = 0; 855 i2400m->tx_out = 0; 856 d_printf(2, dev, "TX: FIFO empty: resetting\n"); 857 goto out_unlock; 858 } 859 tx_msg = i2400m->tx_buf + i2400m->tx_out % I2400M_TX_BUF_SIZE; 860 if (tx_msg->size & I2400M_TX_SKIP) { /* skip? */ 861 d_printf(2, dev, "TX: skip: msg @%zu (%zu b)\n", 862 i2400m->tx_out % I2400M_TX_BUF_SIZE, 863 (size_t) tx_msg->size & ~I2400M_TX_SKIP); 864 i2400m->tx_out += tx_msg->size & ~I2400M_TX_SKIP; 865 goto skip; 866 } 867 868 if (tx_msg->num_pls == 0) { /* No payloads? */ 869 if (tx_msg == i2400m->tx_msg) { /* open, we are done */ 870 d_printf(2, dev, 871 "TX: FIFO empty: open msg w/o payloads @%zu\n", 872 (void *) tx_msg - i2400m->tx_buf); 873 tx_msg = NULL; 874 goto out_unlock; 875 } else { /* closed, skip it */ 876 d_printf(2, dev, 877 "TX: skip msg w/o payloads @%zu (%zu b)\n", 878 (void *) tx_msg - i2400m->tx_buf, 879 (size_t) tx_msg->size); 880 i2400m->tx_out += tx_msg->size & ~I2400M_TX_SKIP; 881 goto skip; 882 } 883 } 884 if (tx_msg == i2400m->tx_msg) /* open msg? */ 885 i2400m_tx_close(i2400m); 886 887 /* Now we have a valid TX message (with payloads) to TX */ 888 tx_msg_moved = (void *) tx_msg + tx_msg->offset; 889 i2400m->tx_msg_size = tx_msg->size; 890 *bus_size = tx_msg_moved->size; 891 d_printf(2, dev, "TX: pid %d msg hdr at @%zu offset +@%zu " 892 "size %zu bus_size %zu\n", 893 current->pid, (void *) tx_msg - i2400m->tx_buf, 894 (size_t) tx_msg->offset, (size_t) tx_msg->size, 895 (size_t) tx_msg_moved->size); 896 tx_msg_moved->barker = le32_to_cpu(I2400M_H2D_PREVIEW_BARKER); 897 tx_msg_moved->sequence = le32_to_cpu(i2400m->tx_sequence++); 898 899 pls = le32_to_cpu(tx_msg_moved->num_pls); 900 i2400m->tx_pl_num += pls; /* Update stats */ 901 if (pls > i2400m->tx_pl_max) 902 i2400m->tx_pl_max = pls; 903 if (pls < i2400m->tx_pl_min) 904 i2400m->tx_pl_min = pls; 905 i2400m->tx_num++; 906 i2400m->tx_size_acc += *bus_size; 907 if (*bus_size < i2400m->tx_size_min) 908 i2400m->tx_size_min = *bus_size; 909 if (*bus_size > i2400m->tx_size_max) 910 i2400m->tx_size_max = *bus_size; 911out_unlock: 912 spin_unlock_irqrestore(&i2400m->tx_lock, flags); 913 d_fnstart(3, dev, "(i2400m %p bus_size %p [%zu]) = %p\n", 914 i2400m, bus_size, *bus_size, tx_msg_moved); 915 return tx_msg_moved; 916} 917EXPORT_SYMBOL_GPL(i2400m_tx_msg_get); 918 919 920/** 921 * i2400m_tx_msg_sent - indicate the transmission of a TX message 922 * 923 * @i2400m: device descriptor 924 * 925 * Called by the bus-specific driver when a message has been sent; 926 * this pops it from the FIFO; and as there is space, start the queue 927 * in case it was stopped. 928 * 929 * Should be called even if the message send failed and we are 930 * dropping this TX message. 931 */ 932void i2400m_tx_msg_sent(struct i2400m *i2400m) 933{ 934 unsigned n; 935 unsigned long flags; 936 struct device *dev = i2400m_dev(i2400m); 937 938 d_fnstart(3, dev, "(i2400m %p)\n", i2400m); 939 spin_lock_irqsave(&i2400m->tx_lock, flags); 940 if (i2400m->tx_buf == NULL) 941 goto out_unlock; 942 i2400m->tx_out += i2400m->tx_msg_size; 943 d_printf(2, dev, "TX: sent %zu b\n", (size_t) i2400m->tx_msg_size); 944 i2400m->tx_msg_size = 0; 945 BUG_ON(i2400m->tx_out > i2400m->tx_in); 946 /* level them FIFO markers off */ 947 n = i2400m->tx_out / I2400M_TX_BUF_SIZE; 948 i2400m->tx_out %= I2400M_TX_BUF_SIZE; 949 i2400m->tx_in -= n * I2400M_TX_BUF_SIZE; 950out_unlock: 951 spin_unlock_irqrestore(&i2400m->tx_lock, flags); 952 d_fnend(3, dev, "(i2400m %p) = void\n", i2400m); 953} 954EXPORT_SYMBOL_GPL(i2400m_tx_msg_sent); 955 956 957/** 958 * i2400m_tx_setup - Initialize the TX queue and infrastructure 959 * 960 * Make sure we reset the TX sequence to zero, as when this function 961 * is called, the firmware has been just restarted. Same rational 962 * for tx_in, tx_out, tx_msg_size and tx_msg. We reset them since 963 * the memory for TX queue is reallocated. 964 */ 965int i2400m_tx_setup(struct i2400m *i2400m) 966{ 967 int result = 0; 968 void *tx_buf; 969 unsigned long flags; 970 971 /* Do this here only once -- can't do on 972 * i2400m_hard_start_xmit() as we'll cause race conditions if 973 * the WS was scheduled on another CPU */ 974 INIT_WORK(&i2400m->wake_tx_ws, i2400m_wake_tx_work); 975 976 tx_buf = kmalloc(I2400M_TX_BUF_SIZE, GFP_ATOMIC); 977 if (tx_buf == NULL) { 978 result = -ENOMEM; 979 goto error_kmalloc; 980 } 981 982 /* 983 * Fail the build if we can't fit at least two maximum size messages 984 * on the TX FIFO [one being delivered while one is constructed]. 985 */ 986 BUILD_BUG_ON(2 * I2400M_TX_MSG_SIZE > I2400M_TX_BUF_SIZE); 987 spin_lock_irqsave(&i2400m->tx_lock, flags); 988 i2400m->tx_sequence = 0; 989 i2400m->tx_in = 0; 990 i2400m->tx_out = 0; 991 i2400m->tx_msg_size = 0; 992 i2400m->tx_msg = NULL; 993 i2400m->tx_buf = tx_buf; 994 spin_unlock_irqrestore(&i2400m->tx_lock, flags); 995 /* Huh? the bus layer has to define this... */ 996 BUG_ON(i2400m->bus_tx_block_size == 0); 997error_kmalloc: 998 return result; 999 1000}
1001 1002 1003/** 1004 * i2400m_tx_release - Tear down the TX queue and infrastructure 1005 */ 1006void i2400m_tx_release(struct i2400m *i2400m) 1007{ 1008 unsigned long flags; 1009 spin_lock_irqsave(&i2400m->tx_lock, flags); 1010 kfree(i2400m->tx_buf); 1011 i2400m->tx_buf = NULL; 1012 spin_unlock_irqrestore(&i2400m->tx_lock, flags); 1013} 1014