linux/drivers/tty/tty_buffer.c
<<
>>
Prefs
   1/*
   2 * Tty buffer allocation management
   3 */
   4
   5#include <linux/types.h>
   6#include <linux/errno.h>
   7#include <linux/tty.h>
   8#include <linux/tty_driver.h>
   9#include <linux/tty_flip.h>
  10#include <linux/timer.h>
  11#include <linux/string.h>
  12#include <linux/slab.h>
  13#include <linux/sched.h>
  14#include <linux/wait.h>
  15#include <linux/bitops.h>
  16#include <linux/delay.h>
  17#include <linux/module.h>
  18#include <linux/ratelimit.h>
  19
  20
  21#define MIN_TTYB_SIZE   256
  22#define TTYB_ALIGN_MASK 255
  23
  24/*
  25 * Byte threshold to limit memory consumption for flip buffers.
  26 * The actual memory limit is > 2x this amount.
  27 */
  28#define TTYB_DEFAULT_MEM_LIMIT  65536
  29
  30/*
  31 * We default to dicing tty buffer allocations to this many characters
  32 * in order to avoid multiple page allocations. We know the size of
  33 * tty_buffer itself but it must also be taken into account that the
  34 * the buffer is 256 byte aligned. See tty_buffer_find for the allocation
  35 * logic this must match
  36 */
  37
  38#define TTY_BUFFER_PAGE (((PAGE_SIZE - sizeof(struct tty_buffer)) / 2) & ~0xFF)
  39
  40/**
  41 *      tty_buffer_lock_exclusive       -       gain exclusive access to buffer
  42 *      tty_buffer_unlock_exclusive     -       release exclusive access
  43 *
  44 *      @port - tty_port owning the flip buffer
  45 *
  46 *      Guarantees safe use of the line discipline's receive_buf() method by
  47 *      excluding the buffer work and any pending flush from using the flip
  48 *      buffer. Data can continue to be added concurrently to the flip buffer
  49 *      from the driver side.
  50 *
  51 *      On release, the buffer work is restarted if there is data in the
  52 *      flip buffer
  53 */
  54
  55void tty_buffer_lock_exclusive(struct tty_port *port)
  56{
  57        struct tty_bufhead *buf = &port->buf;
  58
  59        atomic_inc(&buf->priority);
  60        mutex_lock(&buf->lock);
  61}
  62EXPORT_SYMBOL_GPL(tty_buffer_lock_exclusive);
  63
  64void tty_buffer_unlock_exclusive(struct tty_port *port)
  65{
  66        struct tty_bufhead *buf = &port->buf;
  67        int restart;
  68
  69        restart = buf->head->commit != buf->head->read;
  70
  71        atomic_dec(&buf->priority);
  72        mutex_unlock(&buf->lock);
  73        if (restart)
  74                queue_work(system_unbound_wq, &buf->work);
  75}
  76EXPORT_SYMBOL_GPL(tty_buffer_unlock_exclusive);
  77
  78/**
  79 *      tty_buffer_space_avail  -       return unused buffer space
  80 *      @port - tty_port owning the flip buffer
  81 *
  82 *      Returns the # of bytes which can be written by the driver without
  83 *      reaching the buffer limit.
  84 *
  85 *      Note: this does not guarantee that memory is available to write
  86 *      the returned # of bytes (use tty_prepare_flip_string_xxx() to
  87 *      pre-allocate if memory guarantee is required).
  88 */
  89
  90int tty_buffer_space_avail(struct tty_port *port)
  91{
  92        int space = port->buf.mem_limit - atomic_read(&port->buf.mem_used);
  93        return max(space, 0);
  94}
  95EXPORT_SYMBOL_GPL(tty_buffer_space_avail);
  96
  97static void tty_buffer_reset(struct tty_buffer *p, size_t size)
  98{
  99        p->used = 0;
 100        p->size = size;
 101        p->next = NULL;
 102        p->commit = 0;
 103        p->read = 0;
 104        p->flags = 0;
 105}
 106
 107/**
 108 *      tty_buffer_free_all             -       free buffers used by a tty
 109 *      @tty: tty to free from
 110 *
 111 *      Remove all the buffers pending on a tty whether queued with data
 112 *      or in the free ring. Must be called when the tty is no longer in use
 113 */
 114
 115void tty_buffer_free_all(struct tty_port *port)
 116{
 117        struct tty_bufhead *buf = &port->buf;
 118        struct tty_buffer *p, *next;
 119        struct llist_node *llist;
 120
 121        while ((p = buf->head) != NULL) {
 122                buf->head = p->next;
 123                if (p->size > 0)
 124                        kfree(p);
 125        }
 126        llist = llist_del_all(&buf->free);
 127        llist_for_each_entry_safe(p, next, llist, free)
 128                kfree(p);
 129
 130        tty_buffer_reset(&buf->sentinel, 0);
 131        buf->head = &buf->sentinel;
 132        buf->tail = &buf->sentinel;
 133
 134        atomic_set(&buf->mem_used, 0);
 135}
 136
 137/**
 138 *      tty_buffer_alloc        -       allocate a tty buffer
 139 *      @tty: tty device
 140 *      @size: desired size (characters)
 141 *
 142 *      Allocate a new tty buffer to hold the desired number of characters.
 143 *      We round our buffers off in 256 character chunks to get better
 144 *      allocation behaviour.
 145 *      Return NULL if out of memory or the allocation would exceed the
 146 *      per device queue
 147 */
 148
 149static struct tty_buffer *tty_buffer_alloc(struct tty_port *port, size_t size)
 150{
 151        struct llist_node *free;
 152        struct tty_buffer *p;
 153
 154        /* Round the buffer size out */
 155        size = __ALIGN_MASK(size, TTYB_ALIGN_MASK);
 156
 157        if (size <= MIN_TTYB_SIZE) {
 158                free = llist_del_first(&port->buf.free);
 159                if (free) {
 160                        p = llist_entry(free, struct tty_buffer, free);
 161                        goto found;
 162                }
 163        }
 164
 165        /* Should possibly check if this fails for the largest buffer we
 166           have queued and recycle that ? */
 167        if (atomic_read(&port->buf.mem_used) > port->buf.mem_limit)
 168                return NULL;
 169        p = kmalloc(sizeof(struct tty_buffer) + 2 * size, GFP_ATOMIC);
 170        if (p == NULL)
 171                return NULL;
 172
 173found:
 174        tty_buffer_reset(p, size);
 175        atomic_add(size, &port->buf.mem_used);
 176        return p;
 177}
 178
 179/**
 180 *      tty_buffer_free         -       free a tty buffer
 181 *      @tty: tty owning the buffer
 182 *      @b: the buffer to free
 183 *
 184 *      Free a tty buffer, or add it to the free list according to our
 185 *      internal strategy
 186 */
 187
 188static void tty_buffer_free(struct tty_port *port, struct tty_buffer *b)
 189{
 190        struct tty_bufhead *buf = &port->buf;
 191
 192        /* Dumb strategy for now - should keep some stats */
 193        WARN_ON(atomic_sub_return(b->size, &buf->mem_used) < 0);
 194
 195        if (b->size > MIN_TTYB_SIZE)
 196                kfree(b);
 197        else if (b->size > 0)
 198                llist_add(&b->free, &buf->free);
 199}
 200
 201/**
 202 *      tty_buffer_flush                -       flush full tty buffers
 203 *      @tty: tty to flush
 204 *      @ld:  optional ldisc ptr (must be referenced)
 205 *
 206 *      flush all the buffers containing receive data. If ld != NULL,
 207 *      flush the ldisc input buffer.
 208 *
 209 *      Locking: takes buffer lock to ensure single-threaded flip buffer
 210 *               'consumer'
 211 */
 212
 213void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld)
 214{
 215        struct tty_port *port = tty->port;
 216        struct tty_bufhead *buf = &port->buf;
 217        struct tty_buffer *next;
 218
 219        atomic_inc(&buf->priority);
 220
 221        mutex_lock(&buf->lock);
 222        /* paired w/ release in __tty_buffer_request_room; ensures there are
 223         * no pending memory accesses to the freed buffer
 224         */
 225        while ((next = smp_load_acquire(&buf->head->next)) != NULL) {
 226                tty_buffer_free(port, buf->head);
 227                buf->head = next;
 228        }
 229        buf->head->read = buf->head->commit;
 230
 231        if (ld && ld->ops->flush_buffer)
 232                ld->ops->flush_buffer(tty);
 233
 234        atomic_dec(&buf->priority);
 235        mutex_unlock(&buf->lock);
 236}
 237
 238/**
 239 *      tty_buffer_request_room         -       grow tty buffer if needed
 240 *      @tty: tty structure
 241 *      @size: size desired
 242 *      @flags: buffer flags if new buffer allocated (default = 0)
 243 *
 244 *      Make at least size bytes of linear space available for the tty
 245 *      buffer. If we fail return the size we managed to find.
 246 *
 247 *      Will change over to a new buffer if the current buffer is encoded as
 248 *      TTY_NORMAL (so has no flags buffer) and the new buffer requires
 249 *      a flags buffer.
 250 */
 251static int __tty_buffer_request_room(struct tty_port *port, size_t size,
 252                                     int flags)
 253{
 254        struct tty_bufhead *buf = &port->buf;
 255        struct tty_buffer *b, *n;
 256        int left, change;
 257
 258        b = buf->tail;
 259        if (b->flags & TTYB_NORMAL)
 260                left = 2 * b->size - b->used;
 261        else
 262                left = b->size - b->used;
 263
 264        change = (b->flags & TTYB_NORMAL) && (~flags & TTYB_NORMAL);
 265        if (change || left < size) {
 266                /* This is the slow path - looking for new buffers to use */
 267                n = tty_buffer_alloc(port, size);
 268                if (n != NULL) {
 269                        n->flags = flags;
 270                        buf->tail = n;
 271                        /* paired w/ acquire in flush_to_ldisc(); ensures
 272                         * flush_to_ldisc() sees buffer data.
 273                         */
 274                        smp_store_release(&b->commit, b->used);
 275                        /* paired w/ acquire in flush_to_ldisc(); ensures the
 276                         * latest commit value can be read before the head is
 277                         * advanced to the next buffer
 278                         */
 279                        smp_store_release(&b->next, n);
 280                } else if (change)
 281                        size = 0;
 282                else
 283                        size = left;
 284        }
 285        return size;
 286}
 287
 288int tty_buffer_request_room(struct tty_port *port, size_t size)
 289{
 290        return __tty_buffer_request_room(port, size, 0);
 291}
 292EXPORT_SYMBOL_GPL(tty_buffer_request_room);
 293
 294/**
 295 *      tty_insert_flip_string_fixed_flag - Add characters to the tty buffer
 296 *      @port: tty port
 297 *      @chars: characters
 298 *      @flag: flag value for each character
 299 *      @size: size
 300 *
 301 *      Queue a series of bytes to the tty buffering. All the characters
 302 *      passed are marked with the supplied flag. Returns the number added.
 303 */
 304
 305int tty_insert_flip_string_fixed_flag(struct tty_port *port,
 306                const unsigned char *chars, char flag, size_t size)
 307{
 308        int copied = 0;
 309        do {
 310                int goal = min_t(size_t, size - copied, TTY_BUFFER_PAGE);
 311                int flags = (flag == TTY_NORMAL) ? TTYB_NORMAL : 0;
 312                int space = __tty_buffer_request_room(port, goal, flags);
 313                struct tty_buffer *tb = port->buf.tail;
 314                if (unlikely(space == 0))
 315                        break;
 316                memcpy(char_buf_ptr(tb, tb->used), chars, space);
 317                if (~tb->flags & TTYB_NORMAL)
 318                        memset(flag_buf_ptr(tb, tb->used), flag, space);
 319                tb->used += space;
 320                copied += space;
 321                chars += space;
 322                /* There is a small chance that we need to split the data over
 323                   several buffers. If this is the case we must loop */
 324        } while (unlikely(size > copied));
 325        return copied;
 326}
 327EXPORT_SYMBOL(tty_insert_flip_string_fixed_flag);
 328
 329/**
 330 *      tty_insert_flip_string_flags    -       Add characters to the tty buffer
 331 *      @port: tty port
 332 *      @chars: characters
 333 *      @flags: flag bytes
 334 *      @size: size
 335 *
 336 *      Queue a series of bytes to the tty buffering. For each character
 337 *      the flags array indicates the status of the character. Returns the
 338 *      number added.
 339 */
 340
 341int tty_insert_flip_string_flags(struct tty_port *port,
 342                const unsigned char *chars, const char *flags, size_t size)
 343{
 344        int copied = 0;
 345        do {
 346                int goal = min_t(size_t, size - copied, TTY_BUFFER_PAGE);
 347                int space = tty_buffer_request_room(port, goal);
 348                struct tty_buffer *tb = port->buf.tail;
 349                if (unlikely(space == 0))
 350                        break;
 351                memcpy(char_buf_ptr(tb, tb->used), chars, space);
 352                memcpy(flag_buf_ptr(tb, tb->used), flags, space);
 353                tb->used += space;
 354                copied += space;
 355                chars += space;
 356                flags += space;
 357                /* There is a small chance that we need to split the data over
 358                   several buffers. If this is the case we must loop */
 359        } while (unlikely(size > copied));
 360        return copied;
 361}
 362EXPORT_SYMBOL(tty_insert_flip_string_flags);
 363
 364/**
 365 *      tty_schedule_flip       -       push characters to ldisc
 366 *      @port: tty port to push from
 367 *
 368 *      Takes any pending buffers and transfers their ownership to the
 369 *      ldisc side of the queue. It then schedules those characters for
 370 *      processing by the line discipline.
 371 */
 372
 373void tty_schedule_flip(struct tty_port *port)
 374{
 375        struct tty_bufhead *buf = &port->buf;
 376
 377        /* paired w/ acquire in flush_to_ldisc(); ensures
 378         * flush_to_ldisc() sees buffer data.
 379         */
 380        smp_store_release(&buf->tail->commit, buf->tail->used);
 381        queue_work(system_unbound_wq, &buf->work);
 382}
 383EXPORT_SYMBOL(tty_schedule_flip);
 384
 385/**
 386 *      tty_prepare_flip_string         -       make room for characters
 387 *      @port: tty port
 388 *      @chars: return pointer for character write area
 389 *      @size: desired size
 390 *
 391 *      Prepare a block of space in the buffer for data. Returns the length
 392 *      available and buffer pointer to the space which is now allocated and
 393 *      accounted for as ready for normal characters. This is used for drivers
 394 *      that need their own block copy routines into the buffer. There is no
 395 *      guarantee the buffer is a DMA target!
 396 */
 397
 398int tty_prepare_flip_string(struct tty_port *port, unsigned char **chars,
 399                size_t size)
 400{
 401        int space = __tty_buffer_request_room(port, size, TTYB_NORMAL);
 402        if (likely(space)) {
 403                struct tty_buffer *tb = port->buf.tail;
 404                *chars = char_buf_ptr(tb, tb->used);
 405                if (~tb->flags & TTYB_NORMAL)
 406                        memset(flag_buf_ptr(tb, tb->used), TTY_NORMAL, space);
 407                tb->used += space;
 408        }
 409        return space;
 410}
 411EXPORT_SYMBOL_GPL(tty_prepare_flip_string);
 412
 413/**
 414 *      tty_ldisc_receive_buf           -       forward data to line discipline
 415 *      @ld:    line discipline to process input
 416 *      @p:     char buffer
 417 *      @f:     TTY_* flags buffer
 418 *      @count: number of bytes to process
 419 *
 420 *      Callers other than flush_to_ldisc() need to exclude the kworker
 421 *      from concurrent use of the line discipline, see paste_selection().
 422 *
 423 *      Returns the number of bytes not processed
 424 */
 425int tty_ldisc_receive_buf(struct tty_ldisc *ld, unsigned char *p,
 426                          char *f, int count)
 427{
 428        if (ld->ops->receive_buf2)
 429                count = ld->ops->receive_buf2(ld->tty, p, f, count);
 430        else {
 431                count = min_t(int, count, ld->tty->receive_room);
 432                if (count && ld->ops->receive_buf)
 433                        ld->ops->receive_buf(ld->tty, p, f, count);
 434        }
 435        return count;
 436}
 437EXPORT_SYMBOL_GPL(tty_ldisc_receive_buf);
 438
 439static int
 440receive_buf(struct tty_ldisc *ld, struct tty_buffer *head, int count)
 441{
 442        unsigned char *p = char_buf_ptr(head, head->read);
 443        char          *f = NULL;
 444
 445        if (~head->flags & TTYB_NORMAL)
 446                f = flag_buf_ptr(head, head->read);
 447
 448        return tty_ldisc_receive_buf(ld, p, f, count);
 449}
 450
 451/**
 452 *      flush_to_ldisc
 453 *      @work: tty structure passed from work queue.
 454 *
 455 *      This routine is called out of the software interrupt to flush data
 456 *      from the buffer chain to the line discipline.
 457 *
 458 *      The receive_buf method is single threaded for each tty instance.
 459 *
 460 *      Locking: takes buffer lock to ensure single-threaded flip buffer
 461 *               'consumer'
 462 */
 463
 464static void flush_to_ldisc(struct work_struct *work)
 465{
 466        struct tty_port *port = container_of(work, struct tty_port, buf.work);
 467        struct tty_bufhead *buf = &port->buf;
 468        struct tty_struct *tty;
 469        struct tty_ldisc *disc;
 470
 471        tty = READ_ONCE(port->itty);
 472        if (tty == NULL)
 473                return;
 474
 475        disc = tty_ldisc_ref(tty);
 476        if (disc == NULL)
 477                return;
 478
 479        mutex_lock(&buf->lock);
 480
 481        while (1) {
 482                struct tty_buffer *head = buf->head;
 483                struct tty_buffer *next;
 484                int count;
 485
 486                /* Ldisc or user is trying to gain exclusive access */
 487                if (atomic_read(&buf->priority))
 488                        break;
 489
 490                /* paired w/ release in __tty_buffer_request_room();
 491                 * ensures commit value read is not stale if the head
 492                 * is advancing to the next buffer
 493                 */
 494                next = smp_load_acquire(&head->next);
 495                /* paired w/ release in __tty_buffer_request_room() or in
 496                 * tty_buffer_flush(); ensures we see the committed buffer data
 497                 */
 498                count = smp_load_acquire(&head->commit) - head->read;
 499                if (!count) {
 500                        if (next == NULL)
 501                                break;
 502                        buf->head = next;
 503                        tty_buffer_free(port, head);
 504                        continue;
 505                }
 506
 507                count = receive_buf(disc, head, count);
 508                if (!count)
 509                        break;
 510                head->read += count;
 511        }
 512
 513        mutex_unlock(&buf->lock);
 514
 515        tty_ldisc_deref(disc);
 516}
 517
 518/**
 519 *      tty_flip_buffer_push    -       terminal
 520 *      @port: tty port to push
 521 *
 522 *      Queue a push of the terminal flip buffers to the line discipline.
 523 *      Can be called from IRQ/atomic context.
 524 *
 525 *      In the event of the queue being busy for flipping the work will be
 526 *      held off and retried later.
 527 */
 528
 529void tty_flip_buffer_push(struct tty_port *port)
 530{
 531        tty_schedule_flip(port);
 532}
 533EXPORT_SYMBOL(tty_flip_buffer_push);
 534
 535/**
 536 *      tty_buffer_init         -       prepare a tty buffer structure
 537 *      @tty: tty to initialise
 538 *
 539 *      Set up the initial state of the buffer management for a tty device.
 540 *      Must be called before the other tty buffer functions are used.
 541 */
 542
 543void tty_buffer_init(struct tty_port *port)
 544{
 545        struct tty_bufhead *buf = &port->buf;
 546
 547        mutex_init(&buf->lock);
 548        tty_buffer_reset(&buf->sentinel, 0);
 549        buf->head = &buf->sentinel;
 550        buf->tail = &buf->sentinel;
 551        init_llist_head(&buf->free);
 552        atomic_set(&buf->mem_used, 0);
 553        atomic_set(&buf->priority, 0);
 554        INIT_WORK(&buf->work, flush_to_ldisc);
 555        buf->mem_limit = TTYB_DEFAULT_MEM_LIMIT;
 556}
 557
 558/**
 559 *      tty_buffer_set_limit    -       change the tty buffer memory limit
 560 *      @port: tty port to change
 561 *
 562 *      Change the tty buffer memory limit.
 563 *      Must be called before the other tty buffer functions are used.
 564 */
 565
 566int tty_buffer_set_limit(struct tty_port *port, int limit)
 567{
 568        if (limit < MIN_TTYB_SIZE)
 569                return -EINVAL;
 570        port->buf.mem_limit = limit;
 571        return 0;
 572}
 573EXPORT_SYMBOL_GPL(tty_buffer_set_limit);
 574
 575/* slave ptys can claim nested buffer lock when handling BRK and INTR */
 576void tty_buffer_set_lock_subclass(struct tty_port *port)
 577{
 578        lockdep_set_subclass(&port->buf.lock, TTY_LOCK_SLAVE);
 579}
 580
 581bool tty_buffer_restart_work(struct tty_port *port)
 582{
 583        return queue_work(system_unbound_wq, &port->buf.work);
 584}
 585
 586bool tty_buffer_cancel_work(struct tty_port *port)
 587{
 588        return cancel_work_sync(&port->buf.work);
 589}
 590
 591void tty_buffer_flush_work(struct tty_port *port)
 592{
 593        flush_work(&port->buf.work);
 594}
 595