1/* 2 * Intel Wireless WiMAX Connection 2400m 3 * Generic (non-bus specific) TX handling 4 * 5 * 6 * Copyright (C) 2007-2008 Intel Corporation. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * * Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * * Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * * Neither the name of Intel Corporation nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 * 35 * Intel Corporation <linux-wimax@intel.com> 36 * Yanir Lubetkin <yanirx.lubetkin@intel.com> 37 * - Initial implementation 38 * 39 * Intel Corporation <linux-wimax@intel.com> 40 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> 41 * - Rewritten to use a single FIFO to lower the memory allocation 42 * pressure and optimize cache hits when copying to the queue, as 43 * well as splitting out bus-specific code. 44 * 45 * 46 * Implements data transmission to the device; this is done through a 47 * software FIFO, as data/control frames can be coalesced (while the 48 * device is reading the previous tx transaction, others accumulate). 49 * 50 * A FIFO is used because at the end it is resource-cheaper that trying 51 * to implement scatter/gather over USB. As well, most traffic is going 52 * to be download (vs upload). 53 * 54 * The format for sending/receiving data to/from the i2400m is 55 * described in detail in rx.c:PROTOCOL FORMAT. In here we implement 56 * the transmission of that. This is split between a bus-independent 57 * part that just prepares everything and a bus-specific part that 58 * does the actual transmission over the bus to the device (in the 59 * bus-specific driver). 60 * 61 * 62 * The general format of a device-host transaction is MSG-HDR, PLD1, 63 * PLD2...PLDN, PL1, PL2,...PLN, PADDING. 64 * 65 * Because we need the send payload descriptors and then payloads and 66 * because it is kind of expensive to do scatterlists in USB (one URB 67 * per node), it becomes cheaper to append all the data to a FIFO 68 * (copying to a FIFO potentially in cache is cheaper). 69 * 70 * Then the bus-specific code takes the parts of that FIFO that are 71 * written and passes them to the device. 72 * 73 * So the concepts to keep in mind there are: 74 * 75 * We use a FIFO to queue the data in a linear buffer. We first append 76 * a MSG-HDR, space for I2400M_TX_PLD_MAX payload descriptors and then 77 * go appending payloads until we run out of space or of payload 78 * descriptors. Then we append padding to make the whole transaction a 79 * multiple of i2400m->bus_tx_block_size (as defined by the bus layer). 80 * 81 * - A TX message: a combination of a message header, payload 82 * descriptors and payloads. 83 * 84 * Open: it is marked as active (i2400m->tx_msg is valid) and we 85 * can keep adding payloads to it. 86 * 87 * Closed: we are not appending more payloads to this TX message 88 * (exahusted space in the queue, too many payloads or 89 * whichever). We have appended padding so the whole message 90 * length is aligned to i2400m->bus_tx_block_size (as set by the 91 * bus/transport layer). 92 * 93 * - Most of the time we keep a TX message open to which we append 94 * payloads. 95 * 96 * - If we are going to append and there is no more space (we are at 97 * the end of the FIFO), we close the message, mark the rest of the 98 * FIFO space unusable (skip_tail), create a new message at the 99 * beginning of the FIFO (if there is space) and append the message 100 * there. 101 * 102 * This is because we need to give linear TX messages to the bus 103 * engine. So we don't write a message to the remaining FIFO space 104 * until the tail and continue at the head of it. 105 * 106 * - We overload one of the fields in the message header to use it as 107 * 'size' of the TX message, so we can iterate over them. It also 108 * contains a flag that indicates if we have to skip it or not. 109 * When we send the buffer, we update that to its real on-the-wire 110 * value. 111 * 112 * - The MSG-HDR PLD1...PLD2 stuff has to be a size multiple of 16. 113 * 114 * It follows that if MSG-HDR says we have N messages, the whole 115 * header + descriptors is 16 + 4*N; for those to be a multiple of 116 * 16, it follows that N can be 4, 8, 12, ... (32, 48, 64, 80... 117 * bytes). 118 * 119 * So if we have only 1 payload, we have to submit a header that in 120 * all truth has space for 4. 121 * 122 * The implication is that we reserve space for 12 (64 bytes); but 123 * if we fill up only (eg) 2, our header becomes 32 bytes only. So 124 * the TX engine has to shift those 32 bytes of msg header and 2 125 * payloads and padding so that right after it the payloads start 126 * and the TX engine has to know about that. 127 * 128 * It is cheaper to move the header up than the whole payloads down. 129 * 130 * We do this in i2400m_tx_close(). See 'i2400m_msg_hdr->offset'. 131 * 132 * - Each payload has to be size-padded to 16 bytes; before appending 133 * it, we just do it. 134 * 135 * - The whole message has to be padded to i2400m->bus_tx_block_size; 136 * we do this at close time. Thus, when reserving space for the 137 * payload, we always make sure there is also free space for this 138 * padding that sooner or later will happen. 139 * 140 * When we append a message, we tell the bus specific code to kick in 141 * TXs. It will TX (in parallel) until the buffer is exhausted--hence 142 * the lockin we do. The TX code will only send a TX message at the 143 * time (which remember, might contain more than one payload). Of 144 * course, when the bus-specific driver attempts to TX a message that 145 * is still open, it gets closed first. 146 * 147 * Gee, this is messy; well a picture. In the example below we have a 148 * partially full FIFO, with a closed message ready to be delivered 149 * (with a moved message header to make sure it is size-aligned to 150 * 16), TAIL room that was unusable (and thus is marked with a message 151 * header that says 'skip this') and at the head of the buffer, an 152 * imcomplete message with a couple of payloads. 153 * 154 * N ___________________________________________________ 155 * | | 156 * | TAIL room | 157 * | | 158 * | msg_hdr to skip (size |= 0x80000) | 159 * |---------------------------------------------------|------- 160 * | | /|\ 161 * | | | 162 * | TX message padding | | 163 * | | | 164 * | | | 165 * |- - - - - - - - - - - - - - - - - - - - - - - - - -| | 166 * | | | 167 * | payload 1 | | 168 * | | N * tx_block_size 169 * | | | 170 * |- - - - - - - - - - - - - - - - - - - - - - - - - -| | 171 * | | | 172 * | payload 1 | | 173 * | | | 174 * | | | 175 * |- - - - - - - - - - - - - - - - - - - - - - - - - -|- -|- - - - 176 * | padding 3 /|\ | | /|\ 177 * | padding 2 | | | | 178 * | pld 1 32 bytes (2 * 16) | | | 179 * | pld 0 | | | | 180 * | moved msg_hdr \|/ | \|/ | 181 * |- - - - - - - - - - - - - - - - - - - - - - - - - -|- - - | 182 * | | _PLD_SIZE 183 * | unused | | 184 * | | | 185 * |- - - - - - - - - - - - - - - - - - - - - - - - - -| | 186 * | msg_hdr (size X) [this message is closed] | \|/ 187 * |===================================================|========== <=== OUT 188 * | | 189 * | | 190 * | | 191 * | Free rooom | 192 * | | 193 * | | 194 * | | 195 * | | 196 * | | 197 * | | 198 * | | 199 * | | 200 * | | 201 * |===================================================|========== <=== IN 202 * | | 203 * | | 204 * | | 205 * | | 206 * | payload 1 | 207 * | | 208 * | | 209 * |- - - - - - - - - - - - - - - - - - - - - - - - - -| 210 * | | 211 * | payload 0 | 212 * | | 213 * | | 214 * |- - - - - - - - - - - - - - - - - - - - - - - - - -| 215 * | pld 11 /|\ | 216 * | ... | | 217 * | pld 1 64 bytes (2 * 16) | 218 * | pld 0 | | 219 * | msg_hdr (size X) \|/ [message is open] | 220 * 0 --------------------------------------------------- 221 * 222 * 223 * ROADMAP 224 * 225 * i2400m_tx_setup() Called by i2400m_setup 226 * i2400m_tx_release() Called by i2400m_release() 227 * 228 * i2400m_tx() Called to send data or control frames 229 * i2400m_tx_fifo_push() Allocates append-space in the FIFO 230 * i2400m_tx_new() Opens a new message in the FIFO 231 * i2400m_tx_fits() Checks if a new payload fits in the message 232 * i2400m_tx_close() Closes an open message in the FIFO 233 * i2400m_tx_skip_tail() Marks unusable FIFO tail space 234 * i2400m->bus_tx_kick() 235 * 236 * Now i2400m->bus_tx_kick() is the the bus-specific driver backend 237 * implementation; that would do: 238 * 239 * i2400m->bus_tx_kick() 240 * i2400m_tx_msg_get() Gets first message ready to go 241 * ...sends it... 242 * i2400m_tx_msg_sent() Ack the message is sent; repeat from 243 * _tx_msg_get() until it returns NULL 244 * (FIFO empty). 245 */ 246#include <linux/netdevice.h> 247#include "i2400m.h" 248 249 250#define D_SUBMODULE tx 251#include "debug-levels.h" 252 253enum { 254 /** 255 * TX Buffer size 256 * 257 * Doc says maximum transaction is 16KiB. If we had 16KiB en 258 * route and 16KiB being queued, it boils down to needing 259 * 32KiB. 260 */ 261 I2400M_TX_BUF_SIZE = 32768, 262 /** 263 * Message header and payload descriptors have to be 16 264 * aligned (16 + 4 * N = 16 * M). If we take that average sent 265 * packets are MTU size (~1400-~1500) it follows that we could 266 * fit at most 10-11 payloads in one transaction. To meet the 267 * alignment requirement, that means we need to leave space 268 * for 12 (64 bytes). To simplify, we leave space for that. If 269 * at the end there are less, we pad up to the nearest 270 * multiple of 16. 271 */ 272 I2400M_TX_PLD_MAX = 12, 273 I2400M_TX_PLD_SIZE = sizeof(struct i2400m_msg_hdr) 274 + I2400M_TX_PLD_MAX * sizeof(struct i2400m_pld), 275 I2400M_TX_SKIP = 0x80000000, 276}; 277 278#define TAIL_FULL ((void *)~(unsigned long)NULL) 279 280/* 281 * Calculate how much tail room is available 282 * 283 * Note the trick here. This path is ONLY caleed for Case A (see 284 * i2400m_tx_fifo_push() below), where we have: 285 * 286 * Case A 287 * N ___________ 288 * | tail room | 289 * | | 290 * |<- IN ->| 291 * | | 292 * | data | 293 * | | 294 * |<- OUT ->| 295 * | | 296 * | head room | 297 * 0 ----------- 298 * 299 * When calculating the tail_room, tx_in might get to be zero if 300 * i2400m->tx_in is right at the end of the buffer (really full 301 * buffer) if there is no head room. In this case, tail_room would be 302 * I2400M_TX_BUF_SIZE, although it is actually zero. Hence the final 303 * mod (%) operation. However, when doing this kind of optimization, 304 * i2400m->tx_in being zero would fail, so we treat is an a special 305 * case. 306 */ 307static inline 308size_t __i2400m_tx_tail_room(struct i2400m *i2400m) 309{ 310 size_t tail_room; 311 size_t tx_in; 312 313 if (unlikely(i2400m->tx_in) == 0) 314 return I2400M_TX_BUF_SIZE; 315 tx_in = i2400m->tx_in % I2400M_TX_BUF_SIZE; 316 tail_room = I2400M_TX_BUF_SIZE - tx_in; 317 tail_room %= I2400M_TX_BUF_SIZE; 318 return tail_room; 319} 320 321 322/* 323 * Allocate @size bytes in the TX fifo, return a pointer to it 324 * 325 * @i2400m: device descriptor 326 * @size: size of the buffer we need to allocate 327 * @padding: ensure that there is at least this many bytes of free 328 * contiguous space in the fifo. This is needed because later on 329 * we might need to add padding. 330 * 331 * Returns: 332 * 333 * Pointer to the allocated space. NULL if there is no 334 * space. TAIL_FULL if there is no space at the tail but there is at 335 * the head (Case B below). 336 * 337 * These are the two basic cases we need to keep an eye for -- it is 338 * much better explained in linux/kernel/kfifo.c, but this code 339 * basically does the same. No rocket science here. 340 * 341 * Case A Case B 342 * N ___________ ___________ 343 * | tail room | | data | 344 * | | | | 345 * |<- IN ->| |<- OUT ->| 346 * | | | | 347 * | data | | room | 348 * | | | | 349 * |<- OUT ->| |<- IN ->| 350 * | | | | 351 * | head room | | data | 352 * 0 ----------- ----------- 353 * 354 * We allocate only *contiguous* space. 355 * 356 * We can allocate only from 'room'. In Case B, it is simple; in case 357 * A, we only try from the tail room; if it is not enough, we just 358 * fail and return TAIL_FULL and let the caller figure out if we wants to 359 * skip the tail room and try to allocate from the head. 360 * 361 * Note: 362 * 363 * Assumes i2400m->tx_lock is taken, and we use that as a barrier 364 * 365 * The indexes keep increasing and we reset them to zero when we 366 * pop data off the queue 367 */ 368static 369void *i2400m_tx_fifo_push(struct i2400m *i2400m, size_t size, size_t padding) 370{ 371 struct device *dev = i2400m_dev(i2400m); 372 size_t room, tail_room, needed_size; 373 void *ptr; 374 375 needed_size = size + padding; 376 room = I2400M_TX_BUF_SIZE - (i2400m->tx_in - i2400m->tx_out); 377 if (room < needed_size) { /* this takes care of Case B */ 378 d_printf(2, dev, "fifo push %zu/%zu: no space\n", 379 size, padding); 380 return NULL; 381 } 382 /* Is there space at the tail? */ 383 tail_room = __i2400m_tx_tail_room(i2400m); 384 if (tail_room < needed_size) { 385 if (i2400m->tx_out % I2400M_TX_BUF_SIZE 386 < i2400m->tx_in % I2400M_TX_BUF_SIZE) { 387 d_printf(2, dev, "fifo push %zu/%zu: tail full\n", 388 size, padding); 389 return TAIL_FULL; /* There might be head space */ 390 } else { 391 d_printf(2, dev, "fifo push %zu/%zu: no head space\n", 392 size, padding); 393 return NULL; /* There is no space */ 394 } 395 } 396 ptr = i2400m->tx_buf + i2400m->tx_in % I2400M_TX_BUF_SIZE; 397 d_printf(2, dev, "fifo push %zu/%zu: at @%zu\n", size, padding, 398 i2400m->tx_in % I2400M_TX_BUF_SIZE); 399 i2400m->tx_in += size; 400 return ptr; 401} 402 403 404/* 405 * Mark the tail of the FIFO buffer as 'to-skip' 406 * 407 * We should never hit the BUG_ON() because all the sizes we push to 408 * the FIFO are padded to be a multiple of 16 -- the size of *msg 409 * (I2400M_PL_PAD for the payloads, I2400M_TX_PLD_SIZE for the 410 * header). 411 * 412 * Tail room can get to be zero if a message was opened when there was 413 * space only for a header. _tx_close() will mark it as to-skip (as it 414 * will have no payloads) and there will be no more space to flush, so 415 * nothing has to be done here. This is probably cheaper than ensuring 416 * in _tx_new() that there is some space for payloads...as we could 417 * always possibly hit the same problem if the payload wouldn't fit. 418 * 419 * Note: 420 * 421 * Assumes i2400m->tx_lock is taken, and we use that as a barrier 422 * 423 * This path is only taken for Case A FIFO situations [see 424 * i2400m_tx_fifo_push()] 425 */ 426static 427void i2400m_tx_skip_tail(struct i2400m *i2400m) 428{ 429 struct device *dev = i2400m_dev(i2400m); 430 size_t tx_in = i2400m->tx_in % I2400M_TX_BUF_SIZE; 431 size_t tail_room = __i2400m_tx_tail_room(i2400m); 432 struct i2400m_msg_hdr *msg = i2400m->tx_buf + tx_in; 433 if (unlikely(tail_room == 0)) 434 return; 435 BUG_ON(tail_room < sizeof(*msg)); 436 msg->size = tail_room | I2400M_TX_SKIP; 437 d_printf(2, dev, "skip tail: skipping %zu bytes @%zu\n", 438 tail_room, tx_in); 439 i2400m->tx_in += tail_room; 440} 441 442 443/* 444 * Check if a skb will fit in the TX queue's current active TX 445 * message (if there are still descriptors left unused). 446 * 447 * Returns: 448 * 0 if the message won't fit, 1 if it will. 449 * 450 * Note: 451 * 452 * Assumes a TX message is active (i2400m->tx_msg). 453 * 454 * Assumes i2400m->tx_lock is taken, and we use that as a barrier 455 */ 456static 457unsigned i2400m_tx_fits(struct i2400m *i2400m) 458{ 459 struct i2400m_msg_hdr *msg_hdr = i2400m->tx_msg; 460 return le16_to_cpu(msg_hdr->num_pls) < I2400M_TX_PLD_MAX; 461 462} 463 464 465/* 466 * Start a new TX message header in the queue. 467 * 468 * Reserve memory from the base FIFO engine and then just initialize 469 * the message header. 470 * 471 * We allocate the biggest TX message header we might need (one that'd 472 * fit I2400M_TX_PLD_MAX payloads) -- when it is closed it will be 473 * 'ironed it out' and the unneeded parts removed. 474 * 475 * NOTE: 476 * 477 * Assumes that the previous message is CLOSED (eg: either 478 * there was none or 'i2400m_tx_close()' was called on it). 479 * 480 * Assumes i2400m->tx_lock is taken, and we use that as a barrier 481 */ 482static 483void i2400m_tx_new(struct i2400m *i2400m) 484{ 485 struct device *dev = i2400m_dev(i2400m); 486 struct i2400m_msg_hdr *tx_msg; 487 BUG_ON(i2400m->tx_msg != NULL); 488try_head: 489 tx_msg = i2400m_tx_fifo_push(i2400m, I2400M_TX_PLD_SIZE, 0); 490 if (tx_msg == NULL) 491 goto out; 492 else if (tx_msg == TAIL_FULL) { 493 i2400m_tx_skip_tail(i2400m); 494 d_printf(2, dev, "new TX message: tail full, trying head\n"); 495 goto try_head; 496 } 497 memset(tx_msg, 0, I2400M_TX_PLD_SIZE); 498 tx_msg->size = I2400M_TX_PLD_SIZE; 499out: 500 i2400m->tx_msg = tx_msg; 501 d_printf(2, dev, "new TX message: %p @%zu\n", 502 tx_msg, (void *) tx_msg - i2400m->tx_buf); 503} 504 505 506/* 507 * Finalize the current TX message header 508 * 509 * Sets the message header to be at the proper location depending on 510 * how many descriptors we have (check documentation at the file's 511 * header for more info on that). 512 * 513 * Appends padding bytes to make sure the whole TX message (counting 514 * from the 'relocated' message header) is aligned to 515 * tx_block_size. We assume the _append() code has left enough space 516 * in the FIFO for that. If there are no payloads, just pass, as it 517 * won't be transferred. 518 * 519 * The amount of padding bytes depends on how many payloads are in the 520 * TX message, as the "msg header and payload descriptors" will be 521 * shifted up in the buffer. 522 */ 523static 524void i2400m_tx_close(struct i2400m *i2400m) 525{ 526 struct device *dev = i2400m_dev(i2400m); 527 struct i2400m_msg_hdr *tx_msg = i2400m->tx_msg; 528 struct i2400m_msg_hdr *tx_msg_moved; 529 size_t aligned_size, padding, hdr_size; 530 void *pad_buf; 531 unsigned num_pls; 532 533 if (tx_msg->size & I2400M_TX_SKIP) /* a skipper? nothing to do */ 534 goto out; 535 num_pls = le16_to_cpu(tx_msg->num_pls); 536 /* We can get this situation when a new message was started 537 * and there was no space to add payloads before hitting the 538 tail (and taking padding into consideration). */ 539 if (num_pls == 0) { 540 tx_msg->size |= I2400M_TX_SKIP; 541 goto out; 542 } 543 /* Relocate the message header 544 * 545 * Find the current header size, align it to 16 and if we need 546 * to move it so the tail is next to the payloads, move it and 547 * set the offset. 548 * 549 * If it moved, this header is good only for transmission; the 550 * original one (it is kept if we moved) is still used to 551 * figure out where the next TX message starts (and where the 552 * offset to the moved header is). 553 */ 554 hdr_size = sizeof(*tx_msg) 555 + le16_to_cpu(tx_msg->num_pls) * sizeof(tx_msg->pld[0]); 556 hdr_size = ALIGN(hdr_size, I2400M_PL_ALIGN); 557 tx_msg->offset = I2400M_TX_PLD_SIZE - hdr_size; 558 tx_msg_moved = (void *) tx_msg + tx_msg->offset; 559 memmove(tx_msg_moved, tx_msg, hdr_size); 560 tx_msg_moved->size -= tx_msg->offset; 561 /* 562 * Now figure out how much we have to add to the (moved!) 563 * message so the size is a multiple of i2400m->bus_tx_block_size. 564 */ 565 aligned_size = ALIGN(tx_msg_moved->size, i2400m->bus_tx_block_size); 566 padding = aligned_size - tx_msg_moved->size; 567 if (padding > 0) { 568 pad_buf = i2400m_tx_fifo_push(i2400m, padding, 0); 569 if (unlikely(WARN_ON(pad_buf == NULL 570 || pad_buf == TAIL_FULL))) { 571 /* This should not happen -- append should verify 572 * there is always space left at least to append 573 * tx_block_size */ 574 dev_err(dev, 575 "SW BUG! Possible data leakage from memory the " 576 "device should not read for padding - " 577 "size %lu aligned_size %zu tx_buf %p in " 578 "%zu out %zu\n", 579 (unsigned long) tx_msg_moved->size, 580 aligned_size, i2400m->tx_buf, i2400m->tx_in, 581 i2400m->tx_out); 582 } else 583 memset(pad_buf, 0xad, padding); 584 } 585 tx_msg_moved->padding = cpu_to_le16(padding); 586 tx_msg_moved->size += padding; 587 if (tx_msg != tx_msg_moved) 588 tx_msg->size += padding; 589out: 590 i2400m->tx_msg = NULL; 591} 592 593 594/** 595 * i2400m_tx - send the data in a buffer to the device 596 * 597 * @buf: pointer to the buffer to transmit 598 * 599 * @buf_len: buffer size 600 * 601 * @pl_type: type of the payload we are sending. 602 * 603 * Returns: 604 * 0 if ok, < 0 errno code on error (-ENOSPC, if there is no more 605 * room for the message in the queue). 606 * 607 * Appends the buffer to the TX FIFO and notifies the bus-specific 608 * part of the driver that there is new data ready to transmit. 609 * Once this function returns, the buffer has been copied, so it can 610 * be reused. 611 * 612 * The steps followed to append are explained in detail in the file 613 * header. 614 * 615 * Whenever we write to a message, we increase msg->size, so it 616 * reflects exactly how big the message is. This is needed so that if 617 * we concatenate two messages before they can be sent, the code that 618 * sends the messages can find the boundaries (and it will replace the 619 * size with the real barker before sending). 620 * 621 * Note: 622 * 623 * Cold and warm reset payloads need to be sent as a single 624 * payload, so we handle that. 625 */ 626int i2400m_tx(struct i2400m *i2400m, const void *buf, size_t buf_len, 627 enum i2400m_pt pl_type) 628{ 629 int result = -ENOSPC; 630 struct device *dev = i2400m_dev(i2400m); 631 unsigned long flags; 632 size_t padded_len; 633 void *ptr; 634 unsigned is_singleton = pl_type == I2400M_PT_RESET_WARM 635 || pl_type == I2400M_PT_RESET_COLD; 636 637 d_fnstart(3, dev, "(i2400m %p skb %p [%zu bytes] pt %u)\n", 638 i2400m, buf, buf_len, pl_type); 639 padded_len = ALIGN(buf_len, I2400M_PL_ALIGN); 640 d_printf(5, dev, "padded_len %zd buf_len %zd\n", padded_len, buf_len); 641 /* If there is no current TX message, create one; if the 642 * current one is out of payload slots or we have a singleton, 643 * close it and start a new one */ 644 spin_lock_irqsave(&i2400m->tx_lock, flags); 645try_new: 646 if (unlikely(i2400m->tx_msg == NULL)) 647 i2400m_tx_new(i2400m); 648 else if (unlikely(!i2400m_tx_fits(i2400m) 649 || (is_singleton && i2400m->tx_msg->num_pls != 0))) { 650 d_printf(2, dev, "closing TX message (fits %u singleton " 651 "%u num_pls %u)\n", i2400m_tx_fits(i2400m), 652 is_singleton, i2400m->tx_msg->num_pls); 653 i2400m_tx_close(i2400m); 654 i2400m_tx_new(i2400m); 655 } 656 if (i2400m->tx_msg == NULL) 657 goto error_tx_new; 658 if (i2400m->tx_msg->size + padded_len > I2400M_TX_BUF_SIZE / 2) { 659 d_printf(2, dev, "TX: message too big, going new\n"); 660 i2400m_tx_close(i2400m); 661 i2400m_tx_new(i2400m); 662 } 663 if (i2400m->tx_msg == NULL) 664 goto error_tx_new; 665 /* So we have a current message header; now append space for 666 * the message -- if there is not enough, try the head */ 667 ptr = i2400m_tx_fifo_push(i2400m, padded_len, 668 i2400m->bus_tx_block_size); 669 if (ptr == TAIL_FULL) { /* Tail is full, try head */ 670 d_printf(2, dev, "pl append: tail full\n"); 671 i2400m_tx_close(i2400m); 672 i2400m_tx_skip_tail(i2400m); 673 goto try_new; 674 } else if (ptr == NULL) { /* All full */ 675 result = -ENOSPC; 676 d_printf(2, dev, "pl append: all full\n"); 677 } else { /* Got space, copy it, set padding */ 678 struct i2400m_msg_hdr *tx_msg = i2400m->tx_msg; 679 unsigned num_pls = le16_to_cpu(tx_msg->num_pls); 680 memcpy(ptr, buf, buf_len); 681 memset(ptr + buf_len, 0xad, padded_len - buf_len); 682 i2400m_pld_set(&tx_msg->pld[num_pls], buf_len, pl_type); 683 d_printf(3, dev, "pld 0x%08x (type 0x%1x len 0x%04zx\n", 684 le32_to_cpu(tx_msg->pld[num_pls].val), 685 pl_type, buf_len); 686 tx_msg->num_pls = le16_to_cpu(num_pls+1); 687 tx_msg->size += padded_len; 688 d_printf(2, dev, "TX: appended %zu b (up to %u b) pl #%u \n", 689 padded_len, tx_msg->size, num_pls+1); 690 d_printf(2, dev, 691 "TX: appended hdr @%zu %zu b pl #%u @%zu %zu/%zu b\n", 692 (void *)tx_msg - i2400m->tx_buf, (size_t)tx_msg->size, 693 num_pls+1, ptr - i2400m->tx_buf, buf_len, padded_len); 694 result = 0; 695 if (is_singleton) 696 i2400m_tx_close(i2400m); 697 } 698error_tx_new: 699 spin_unlock_irqrestore(&i2400m->tx_lock, flags); 700 i2400m->bus_tx_kick(i2400m); /* always kick, might free up space */ 701 d_fnend(3, dev, "(i2400m %p skb %p [%zu bytes] pt %u) = %d\n", 702 i2400m, buf, buf_len, pl_type, result); 703 return result; 704} 705EXPORT_SYMBOL_GPL(i2400m_tx); 706 707 708/** 709 * i2400m_tx_msg_get - Get the first TX message in the FIFO to start sending it 710 * 711 * @i2400m: device descriptors 712 * @bus_size: where to place the size of the TX message 713 * 714 * Called by the bus-specific driver to get the first TX message at 715 * the FIF that is ready for transmission. 716 * 717 * It sets the state in @i2400m to indicate the bus-specific driver is 718 * transfering that message (i2400m->tx_msg_size). 719 * 720 * Once the transfer is completed, call i2400m_tx_msg_sent(). 721 * 722 * Notes: 723 * 724 * The size of the TX message to be transmitted might be smaller than 725 * that of the TX message in the FIFO (in case the header was 726 * shorter). Hence, we copy it in @bus_size, for the bus layer to 727 * use. We keep the message's size in i2400m->tx_msg_size so that 728 * when the bus later is done transferring we know how much to 729 * advance the fifo. 730 * 731 * We collect statistics here as all the data is available and we 732 * assume it is going to work [see i2400m_tx_msg_sent()]. 733 */ 734struct i2400m_msg_hdr *i2400m_tx_msg_get(struct i2400m *i2400m, 735 size_t *bus_size) 736{ 737 struct device *dev = i2400m_dev(i2400m); 738 struct i2400m_msg_hdr *tx_msg, *tx_msg_moved; 739 unsigned long flags, pls; 740 741 d_fnstart(3, dev, "(i2400m %p bus_size %p)\n", i2400m, bus_size); 742 spin_lock_irqsave(&i2400m->tx_lock, flags); 743skip: 744 tx_msg_moved = NULL; 745 if (i2400m->tx_in == i2400m->tx_out) { /* Empty FIFO? */ 746 i2400m->tx_in = 0; 747 i2400m->tx_out = 0; 748 d_printf(2, dev, "TX: FIFO empty: resetting\n"); 749 goto out_unlock; 750 } 751 tx_msg = i2400m->tx_buf + i2400m->tx_out % I2400M_TX_BUF_SIZE; 752 if (tx_msg->size & I2400M_TX_SKIP) { /* skip? */ 753 d_printf(2, dev, "TX: skip: msg @%zu (%zu b)\n", 754 i2400m->tx_out % I2400M_TX_BUF_SIZE, 755 (size_t) tx_msg->size & ~I2400M_TX_SKIP); 756 i2400m->tx_out += tx_msg->size & ~I2400M_TX_SKIP; 757 goto skip; 758 } 759 760 if (tx_msg->num_pls == 0) { /* No payloads? */ 761 if (tx_msg == i2400m->tx_msg) { /* open, we are done */ 762 d_printf(2, dev, 763 "TX: FIFO empty: open msg w/o payloads @%zu\n", 764 (void *) tx_msg - i2400m->tx_buf); 765 tx_msg = NULL; 766 goto out_unlock; 767 } else { /* closed, skip it */ 768 d_printf(2, dev, 769 "TX: skip msg w/o payloads @%zu (%zu b)\n", 770 (void *) tx_msg - i2400m->tx_buf, 771 (size_t) tx_msg->size); 772 i2400m->tx_out += tx_msg->size & ~I2400M_TX_SKIP; 773 goto skip; 774 } 775 } 776 if (tx_msg == i2400m->tx_msg) /* open msg? */ 777 i2400m_tx_close(i2400m); 778 779 /* Now we have a valid TX message (with payloads) to TX */ 780 tx_msg_moved = (void *) tx_msg + tx_msg->offset; 781 i2400m->tx_msg_size = tx_msg->size; 782 *bus_size = tx_msg_moved->size; 783 d_printf(2, dev, "TX: pid %d msg hdr at @%zu offset +@%zu " 784 "size %zu bus_size %zu\n", 785 current->pid, (void *) tx_msg - i2400m->tx_buf, 786 (size_t) tx_msg->offset, (size_t) tx_msg->size, 787 (size_t) tx_msg_moved->size); 788 tx_msg_moved->barker = le32_to_cpu(I2400M_H2D_PREVIEW_BARKER); 789 tx_msg_moved->sequence = le32_to_cpu(i2400m->tx_sequence++); 790 791 pls = le32_to_cpu(tx_msg_moved->num_pls); 792 i2400m->tx_pl_num += pls; /* Update stats */ 793 if (pls > i2400m->tx_pl_max) 794 i2400m->tx_pl_max = pls; 795 if (pls < i2400m->tx_pl_min) 796 i2400m->tx_pl_min = pls; 797 i2400m->tx_num++; 798 i2400m->tx_size_acc += *bus_size; 799 if (*bus_size < i2400m->tx_size_min) 800 i2400m->tx_size_min = *bus_size; 801 if (*bus_size > i2400m->tx_size_max) 802 i2400m->tx_size_max = *bus_size; 803out_unlock: 804 spin_unlock_irqrestore(&i2400m->tx_lock, flags); 805 d_fnstart(3, dev, "(i2400m %p bus_size %p [%zu]) = %p\n", 806 i2400m, bus_size, *bus_size, tx_msg_moved); 807 return tx_msg_moved; 808} 809EXPORT_SYMBOL_GPL(i2400m_tx_msg_get); 810 811 812/** 813 * i2400m_tx_msg_sent - indicate the transmission of a TX message 814 * 815 * @i2400m: device descriptor 816 * 817 * Called by the bus-specific driver when a message has been sent; 818 * this pops it from the FIFO; and as there is space, start the queue 819 * in case it was stopped. 820 * 821 * Should be called even if the message send failed and we are 822 * dropping this TX message. 823 */ 824void i2400m_tx_msg_sent(struct i2400m *i2400m) 825{ 826 unsigned n; 827 unsigned long flags; 828 struct device *dev = i2400m_dev(i2400m); 829 830 d_fnstart(3, dev, "(i2400m %p)\n", i2400m); 831 spin_lock_irqsave(&i2400m->tx_lock, flags); 832 i2400m->tx_out += i2400m->tx_msg_size; 833 d_printf(2, dev, "TX: sent %zu b\n", (size_t) i2400m->tx_msg_size); 834 i2400m->tx_msg_size = 0; 835 BUG_ON(i2400m->tx_out > i2400m->tx_in); 836 /* level them FIFO markers off */ 837 n = i2400m->tx_out / I2400M_TX_BUF_SIZE; 838 i2400m->tx_out %= I2400M_TX_BUF_SIZE; 839 i2400m->tx_in -= n * I2400M_TX_BUF_SIZE; 840 spin_unlock_irqrestore(&i2400m->tx_lock, flags); 841 d_fnend(3, dev, "(i2400m %p) = void\n", i2400m); 842} 843EXPORT_SYMBOL_GPL(i2400m_tx_msg_sent); 844 845 846/** 847 * i2400m_tx_setup - Initialize the TX queue and infrastructure 848 * 849 * Make sure we reset the TX sequence to zero, as when this function 850 * is called, the firmware has been just restarted. 851 */ 852int i2400m_tx_setup(struct i2400m *i2400m) 853{ 854 int result; 855 856 /* Do this here only once -- can't do on 857 * i2400m_hard_start_xmit() as we'll cause race conditions if 858 * the WS was scheduled on another CPU */ 859 INIT_WORK(&i2400m->wake_tx_ws, i2400m_wake_tx_work); 860 861 i2400m->tx_sequence = 0; 862 i2400m->tx_buf = kmalloc(I2400M_TX_BUF_SIZE, GFP_KERNEL); 863 if (i2400m->tx_buf == NULL) 864 result = -ENOMEM; 865 else 866 result = 0; 867 /* Huh? the bus layer has to define this... */ 868 BUG_ON(i2400m->bus_tx_block_size == 0); 869 return result; 870 871} 872 873 874/** 875 * i2400m_tx_release - Tear down the TX queue and infrastructure 876 */ 877void i2400m_tx_release(struct i2400m *i2400m) 878{ 879 kfree(i2400m->tx_buf); 880} 881